code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from collections import defaultdict
from django_bulk_update.helper import bulk_update
from django.conf import settings as django_settings
from django.db import transaction
from django.http import JsonResponse
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework import status
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.exceptions import ValidationError, NotFound
from rest_framework.mixins import ListModelMixin
from rest_framework.response import Response
from api.base import permissions as base_permissions
from api.base import utils
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.filters import ListFilterMixin
from api.base.parsers import JSONAPIRelationshipParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON
from api.base.requests import EmbeddedRequest
from api.base.serializers import (
MaintenanceStateSerializer,
LinkedNodesRelationshipSerializer,
LinkedRegistrationsRelationshipSerializer
)
from api.base.throttling import RootAnonThrottle, UserRateThrottle
from api.base.utils import is_bulk_request, get_user_auth
from api.nodes.utils import get_file_object
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ContributorOrPublicForRelationshipPointers
from api.nodes.permissions import ReadOnlyIfRegistration
from api.users.serializers import UserSerializer
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Contributor, MaintenanceState, BaseFileNode
from waffle.models import Flag
from waffle import flag_is_active
class JSONAPIBaseView(generics.GenericAPIView):
def __init__(self, **kwargs):
assert getattr(self, 'view_name', None), 'Must specify view_name on view.'
assert getattr(self, 'view_category', None), 'Must specify view_category on view.'
self.view_fqn = ':'.join([self.view_category, self.view_name])
super(JSONAPIBaseView, self).__init__(**kwargs)
def _get_embed_partial(self, field_name, field):
"""Create a partial function to fetch the values of an embedded field. A basic
example is to include a Node's children in a single response.
:param str field_name: Name of field of the view's serializer_class to load
results for
:return function object -> dict:
"""
if getattr(field, 'field', None):
field = field.field
def partial(item):
# resolve must be implemented on the field
v, view_args, view_kwargs = field.resolve(item, field_name, self.request)
if not v:
return None
if isinstance(self.request, EmbeddedRequest):
request = EmbeddedRequest(self.request._request)
else:
request = EmbeddedRequest(self.request)
if not hasattr(request._request._request, '_embed_cache'):
request._request._request._embed_cache = {}
cache = request._request._request._embed_cache
request.parents.setdefault(type(item), {})[item._id] = item
view_kwargs.update({
'request': request,
'is_embedded': True,
})
# Setup a view ourselves to avoid all the junk DRF throws in
# v is a function that hides everything v.cls is the actual view class
view = v.cls()
view.args = view_args
view.kwargs = view_kwargs
view.request = request
view.request.parser_context['kwargs'] = view_kwargs
view.format_kwarg = view.get_format_suffix(**view_kwargs)
if not isinstance(view, ListModelMixin):
try:
item = view.get_object()
except Exception as e:
with transaction.atomic():
ret = view.handle_exception(e).data
return ret
_cache_key = (v.cls, field_name, view.get_serializer_class(), (type(item), item.id))
if _cache_key in cache:
# We already have the result for this embed, return it
return cache[_cache_key]
# Cache serializers. to_representation of a serializer should NOT augment it's fields so resetting the context
# should be sufficient for reuse
if not view.get_serializer_class() in cache:
cache[view.get_serializer_class()] = view.get_serializer_class()(many=isinstance(view, ListModelMixin), context=view.get_serializer_context())
ser = cache[view.get_serializer_class()]
try:
ser._context = view.get_serializer_context()
if not isinstance(view, ListModelMixin):
ret = ser.to_representation(item)
else:
queryset = view.filter_queryset(view.get_queryset())
page = view.paginate_queryset(getattr(queryset, '_results_cache', None) or queryset)
ret = ser.to_representation(page or queryset)
if page is not None:
request.parser_context['view'] = view
request.parser_context['kwargs'].pop('request')
view.paginator.request = request
ret = view.paginator.get_paginated_response(ret).data
except Exception as e:
with transaction.atomic():
ret = view.handle_exception(e).data
# Allow request to be gc'd
ser._context = None
# Cache our final result
cache[_cache_key] = ret
return ret
return partial
def get_serializer_context(self):
"""Inject request into the serializer context. Additionally, inject partial functions
(request, object -> embed items) if the query string contains embeds. Allows
multiple levels of nesting.
"""
context = super(JSONAPIBaseView, self).get_serializer_context()
if self.kwargs.get('is_embedded'):
embeds = []
else:
embeds = self.request.query_params.getlist('embed') or self.request.query_params.getlist('embed[]')
fields_check = self.get_serializer_class()._declared_fields.copy()
if 'fields[{}]'.format(self.serializer_class.Meta.type_) in self.request.query_params:
# Check only requested and mandatory fields
sparse_fields = self.request.query_params['fields[{}]'.format(self.serializer_class.Meta.type_)]
for field in fields_check.copy().keys():
if field not in ('type', 'id', 'links') and field not in sparse_fields:
fields_check.pop(field)
for field in fields_check:
if getattr(fields_check[field], 'field', None):
fields_check[field] = fields_check[field].field
for field in fields_check:
if getattr(fields_check[field], 'always_embed', False) and field not in embeds:
embeds.append(unicode(field))
if getattr(fields_check[field], 'never_embed', False) and field in embeds:
embeds.remove(field)
embeds_partials = {}
for embed in embeds:
embed_field = fields_check.get(embed)
embeds_partials[embed] = self._get_embed_partial(embed, embed_field)
context.update({
'enable_esi': (
utils.is_truthy(self.request.query_params.get('esi', django_settings.ENABLE_ESI)) and
self.request.accepted_renderer.media_type in django_settings.ESI_MEDIA_TYPES
),
'embed': embeds_partials,
'envelope': self.request.query_params.get('envelope', 'data'),
})
return context
class LinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.node')
if pointer.can_view(auth)
], 'self': object}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
def create(self, *args, **kwargs):
try:
ret = super(LinkedNodesRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
class LinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Registrations relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.registration')
if pointer.can_view(auth)
], 'self': object}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
else:
raise NotFound(detail='Pointer with id "{}" not found in pointers list'.format(val['id'], collection))
def create(self, *args, **kwargs):
try:
ret = super(LinkedRegistrationsRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
@api_view(('GET',))
@throttle_classes([RootAnonThrottle, UserRateThrottle])
def root(request, format=None, **kwargs):
"""
The documentation for the Open Science Framework API can be found at [developer.osf.io](https://developer.osf.io).
The contents of this endpoint are variable and subject to change without notification.
"""
if request.user and not request.user.is_anonymous:
user = request.user
current_user = UserSerializer(user, context={'request': request}).data
else:
current_user = None
flags = [name for name in Flag.objects.values_list('name', flat=True) if flag_is_active(request, name)]
kwargs = request.parser_context['kwargs']
return_val = {
'meta': {
'message': 'Welcome to the OSF API.',
'version': request.version,
'current_user': current_user,
'active_flags': flags,
},
'links': {
'nodes': utils.absolute_reverse('nodes:node-list', kwargs=kwargs),
'users': utils.absolute_reverse('users:user-list', kwargs=kwargs),
'collections': utils.absolute_reverse('collections:collection-list', kwargs=kwargs),
'registrations': utils.absolute_reverse('registrations:registration-list', kwargs=kwargs),
'institutions': utils.absolute_reverse('institutions:institution-list', kwargs=kwargs),
'licenses': utils.absolute_reverse('licenses:license-list', kwargs=kwargs),
'metaschemas': utils.absolute_reverse('metaschemas:registration-metaschema-list', kwargs=kwargs),
'addons': utils.absolute_reverse('addons:addon-list', kwargs=kwargs),
}
}
if utils.has_admin_scope(request):
return_val['meta']['admin'] = True
return Response(return_val)
@api_view(('GET',))
@throttle_classes([RootAnonThrottle, UserRateThrottle])
def status_check(request, format=None, **kwargs):
maintenance = MaintenanceState.objects.all().first()
return Response({
'maintenance': MaintenanceStateSerializer(maintenance).data if maintenance else None
})
def error_404(request, format=None, *args, **kwargs):
return JsonResponse(
{'errors': [{'detail': 'Not found.'}]},
status=404,
content_type='application/vnd.api+json; application/json'
)
class BaseContributorDetail(JSONAPIBaseView, generics.RetrieveAPIView):
# overrides RetrieveAPIView
def get_object(self):
node = self.get_node()
user = self.get_user()
# May raise a permission denied
self.check_object_permissions(self.request, user)
try:
return node.contributor_set.get(user=user)
except Contributor.DoesNotExist:
raise NotFound('{} cannot be found in the list of contributors.'.format(user))
class BaseContributorList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
ordering = ('-modified',)
def get_default_queryset(self):
node = self.get_node()
return node.contributor_set.all().include('user__guids')
def get_queryset(self):
queryset = self.get_queryset_from_request()
# If bulk request, queryset only contains contributors in request
if is_bulk_request(self.request):
contrib_ids = []
for item in self.request.data:
try:
contrib_ids.append(item['id'].split('-')[1])
except AttributeError:
raise ValidationError('Contributor identifier not provided.')
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
queryset[:] = [contrib for contrib in queryset if contrib._id in contrib_ids]
return queryset
class BaseNodeLinksDetail(JSONAPIBaseView, generics.RetrieveAPIView):
pass
class BaseNodeLinksList(JSONAPIBaseView, generics.ListAPIView):
ordering = ('-modified',)
def get_queryset(self):
auth = get_user_auth(self.request)
query = self.get_node()\
.node_relations.select_related('child')\
.filter(is_node_link=True, child__is_deleted=False)\
.exclude(child__type='osf.collection')
return sorted([
node_link for node_link in query
if node_link.child.can_view(auth) and not node_link.child.is_retracted
], key=lambda node_link: node_link.child.modified, reverse=True)
class BaseLinkedList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
# subclass must set
serializer_class = None
view_category = None
view_name = None
ordering = ('-modified',)
# TODO: This class no longer exists
# model_class = Pointer
def get_queryset(self):
auth = get_user_auth(self.request)
return self.get_node().linked_nodes.filter(is_deleted=False).exclude(type='osf.collection').can_view(user=auth.user, private_link=auth.private_link).order_by('-modified')
class WaterButlerMixin(object):
path_lookup_url_kwarg = 'path'
provider_lookup_url_kwarg = 'provider'
def bulk_get_file_nodes_from_wb_resp(self, files_list):
"""Takes a list of file data from wb response, touches/updates metadata for each, and returns list of file objects.
This function mirrors all the actions of get_file_node_from_wb_resp except the create and updates are done in bulk.
The bulk_update and bulk_create do not call the base class update and create so the actions of those functions are
done here where needed
"""
node = self.get_node(check_object_permissions=False)
objs_to_create = defaultdict(lambda: [])
file_objs = []
for item in files_list:
attrs = item['attributes']
base_class = BaseFileNode.resolve_class(
attrs['provider'],
BaseFileNode.FOLDER if attrs['kind'] == 'folder'
else BaseFileNode.FILE
)
# mirrors BaseFileNode get_or_create
try:
file_obj = base_class.objects.get(node=node, _path='/' + attrs['path'].lstrip('/'))
except base_class.DoesNotExist:
# create method on BaseFileNode appends provider, bulk_create bypasses this step so it is added here
file_obj = base_class(node=node, _path='/' + attrs['path'].lstrip('/'), provider=base_class._provider)
objs_to_create[base_class].append(file_obj)
else:
file_objs.append(file_obj)
file_obj.update(None, attrs, user=self.request.user, save=False)
bulk_update(file_objs)
for base_class in objs_to_create:
base_class.objects.bulk_create(objs_to_create[base_class])
file_objs += objs_to_create[base_class]
return file_objs
def get_file_node_from_wb_resp(self, item):
"""Takes file data from wb response, touches/updates metadata for it, and returns file object"""
attrs = item['attributes']
file_node = BaseFileNode.resolve_class(
attrs['provider'],
BaseFileNode.FOLDER if attrs['kind'] == 'folder'
else BaseFileNode.FILE
).get_or_create(self.get_node(check_object_permissions=False), attrs['path'])
file_node.update(None, attrs, user=self.request.user)
return file_node
def fetch_from_waterbutler(self):
node = self.get_node(check_object_permissions=False)
path = self.kwargs[self.path_lookup_url_kwarg]
provider = self.kwargs[self.provider_lookup_url_kwarg]
return self.get_file_object(node, path, provider)
def get_file_object(self, node, path, provider, check_object_permissions=True):
obj = get_file_object(node=node, path=path, provider=provider, request=self.request)
if provider == 'osfstorage':
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
class DeprecatedView(JSONAPIBaseView):
""" Mixin for deprecating old views
Subclasses must define `max_version`
"""
@property
def max_version(self):
raise NotImplementedError()
def __init__(self, *args, **kwargs):
super(DeprecatedView, self).__init__(*args, **kwargs)
self.is_deprecated = False
def determine_version(self, request, *args, **kwargs):
version, scheme = super(DeprecatedView, self).determine_version(request, *args, **kwargs)
if version > self.max_version:
self.is_deprecated = True
raise NotFound(detail='This route has been deprecated. It was last available in version {}'.format(self.max_version))
return version, scheme
def finalize_response(self, request, response, *args, **kwargs):
response = super(DeprecatedView, self).finalize_response(request, response, *args, **kwargs)
if self.is_deprecated:
# Already has the error message
return response
if response.status_code == 204:
response.status_code = 200
response.data = {}
deprecation_warning = 'This route is deprecated and will be unavailable after version {}'.format(self.max_version)
if response.data.get('meta', False):
if response.data['meta'].get('warnings', False):
response.data['meta']['warnings'].append(deprecation_warning)
else:
response.data['meta']['warnings'] = [deprecation_warning]
else:
response.data['meta'] = {'warnings': [deprecation_warning]}
return response
|
icereval/osf.io
|
api/base/views.py
|
Python
|
apache-2.0
| 26,351
|
#!/usr/bin/env python3
import re
import hashlib
import sys
pattern = re.compile(r"\*\n(Builtin protocol feature: (.*)(?:\*[^/]|[^\*])*)\*/")
def main():
with open(sys.argv[1], "r") as f:
contents = f.read()
print('#include <eosio/chain/protocol_feature_manager.hpp>')
print('#include <map>')
print('#include <boost/test/unit_test.hpp>')
print('using namespace eosio::chain;')
print('BOOST_AUTO_TEST_CASE(protocol_feature_digest_tests) {')
print(' std::map<std::string, std::string> digests;')
for match in re.finditer(pattern, contents):
print(' digests.emplace("%s", "%s");' % (match.group(2), hashlib.sha256(match.group(1).encode('utf8')).hexdigest()))
print(' for(const auto& [id, spec] : builtin_protocol_feature_codenames) {')
print(' BOOST_TEST(digests[spec.codename] == fc::variant(spec.description_digest).as<std::string>());')
print(' }')
print('}')
if __name__ == "__main__":
main()
|
EOSIO/eos
|
unittests/gen_protocol_feature_digest_tests.py
|
Python
|
mit
| 1,030
|
import keyedcache
import random
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from keyedcache.views import stats_page, view_page, delete_page
import time
CACHE_HIT=0
def cachetest(a,b,c):
global CACHE_HIT
CACHE_HIT += 1
r = [random.randrange(0,1000) for x in range(0,3)]
ret = [r, a + r[0], b + r[1], c + r[2]]
return ret
cachetest = keyedcache.cache_function(2)(cachetest)
class DecoratorTest(TestCase):
def testCachePut(self):
d = cachetest(1,2,3)
self.assertEqual(CACHE_HIT,1)
d2 = cachetest(1,2,3)
self.assertEqual(CACHE_HIT,1)
self.assertEqual(d, d2)
seeds = d[0]
self.assertEqual(seeds[0] + 1, d[1])
self.assertEqual(seeds[1] + 2, d[2])
self.assertEqual(seeds[2] + 3, d[3])
time.sleep(3)
d3 = cachetest(1,2,3)
self.assertEqual(CACHE_HIT,2)
self.assertNotEqual(d, d3)
def testDeleteCachedFunction(self):
orig = cachetest(10,20,30)
keyedcache.cache_delete_function(cachetest)
after = cachetest(10,20,30)
self.assertNotEqual(orig,keyedcache)
class CachingTest(TestCase):
def testCacheGetFail(self):
try:
keyedcache.cache_get('x')
self.fail('should have raised NotCachedError')
except keyedcache.NotCachedError:
pass
def testCacheGetOK(self):
one = [1,2,3,4]
keyedcache.cache_set('ok', value=one, length=2)
two = keyedcache.cache_get('ok')
self.assertEqual(one, two)
time.sleep(5)
try:
three = keyedcache.cache_get('ok')
self.fail('should have raised NotCachedError, got %s' % three)
except keyedcache.NotCachedError:
pass
def testCacheGetDefault(self):
chk = keyedcache.cache_get('default',default='-')
self.assertEqual(chk, '-')
def testDelete(self):
keyedcache.cache_set('del', value=True)
for x in range(0,10):
keyedcache.cache_set('del', 'x', x, value=True)
for y in range(0,5):
keyedcache.cache_set('del', 'x', x, 'y', y, value=True)
# check to make sure all the values are in the cache
self.assert_(keyedcache.cache_get('del', default=False))
for x in range(0,10):
self.assert_(keyedcache.cache_get('del', 'x', x, default=False))
for y in range(0,5):
self.assert_(keyedcache.cache_get('del', 'x', x, 'y', y, default=False))
# try to delete just one
killed = keyedcache.cache_delete('del','x',1)
self.assertEqual(["del::x::1"], killed)
self.assertFalse(keyedcache.cache_get('del', 'x', 1, default=False))
# but the others are still there
self.assert_(keyedcache.cache_get('del', 'x', 2, default=False))
# now kill all of del::x::1
killed = keyedcache.cache_delete('del','x', 1, children=True)
for y in range(0,5):
self.assertFalse(keyedcache.cache_get('del', 'x', 1, 'y', y, default=False))
# but del::x::2 and children are there
self.assert_(keyedcache.cache_get('del','x',2,'y',1, default=False))
# kill the rest
killed = keyedcache.cache_delete('del', children=True)
self.assertFalse(keyedcache.cache_get('del',default=False))
for x in range(0,10):
self.assertFalse(keyedcache.cache_get('del', 'x', x, default=False))
for y in range(0,5):
self.assertFalse(keyedcache.cache_get('del', 'x', x, 'y', y, default=False))
class TestCacheDisable(TestCase):
def testDisable(self):
keyedcache.cache_set('disabled', value=False)
v = keyedcache.cache_get('disabled')
self.assertEqual(v, False)
keyedcache.cache_enable(False)
keyedcache.cache_set('disabled', value=True)
try:
keyedcache.cache_get('disabled')
self.fail('should have raised NotCachedError')
except keyedcache.NotCachedError, nce:
key = keyedcache.cache_key('disabled')
self.assertEqual(nce.key, key)
keyedcache.cache_enable()
v2 = keyedcache.cache_get('disabled')
# should still be False, since the cache was disabled
self.assertEqual(v2, False)
class TestKeyMaker(TestCase):
def testSimpleKey(self):
v = keyedcache.cache_key('test')
self.assertEqual(v, 'test')
def testDualKey(self):
v = keyedcache.cache_key('test', 2)
self.assertEqual(v, 'test::2')
def testPairedKey(self):
v = keyedcache.cache_key('test', more='yes')
self.assertEqual(v, 'test::more::yes')
def testPairedDualKey(self):
v = keyedcache.cache_key('test', 3, more='yes')
self.assertEqual(v, 'test::3::more::yes')
class TestClient(TestCase):
urls = 'keyedcache.tests_urls'
def test_basic_views(self):
# Authentized user is not enough
user = User.objects.create_user('alice', 'alice@example.com', 'secret')
user.save()
self.client.login(username='alice', password='secret')
response = self.client.get(reverse(stats_page))
self.assertEqual(response.status_code, 302)
self.assertTrue('/login/' in response._headers['location'][1])
# User must be in staff
user.is_staff = True
user.save()
response = self.client.get(reverse(stats_page))
self.assertContains(response, 'Cache Hit Rate')
response = self.client.get(reverse(view_page))
self.assertContains(response, 'Cache Keys')
response = self.client.get(reverse(delete_page))
self.assertContains(response, 'Key to delete:')
|
aronysidoro/django-keyedcache
|
keyedcache/tests.py
|
Python
|
bsd-3-clause
| 5,800
|
# @license
# Copyright 2016 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import os
def make_random_token():
"""Return a 20-byte (40 character) random hex string."""
return binascii.hexlify(os.urandom(20)).decode()
|
janelia-flyem/neuroglancer
|
python/neuroglancer/random_token.py
|
Python
|
apache-2.0
| 753
|
import markdown
from qtk.common import TemplateBase, NameBase, Category as C
from qtk.fields import Field as F
class GenericTemplate(NameBase, TemplateBase):
_id_map = {}
def __init__(self, name, category, sub_category=C.MAIN, convention_keys=(F.CURRENCY,)):
name_id = "%s.%s" % (sub_category.id, self.toid(name))
super(GenericTemplate, self).__init__(name, name_id=name_id, prefix=category.id)
TemplateBase.__init__(self, category.id, convention_keys)
def info(self):
creator = self.get_creator()
return creator.class_info() + "\n\n" + creator.field_info(self)
def sample_data(self):
creator = self.get_creator()
return creator.sample_data(self)
def help(self):
print self.info()
def _repr_html_(self):
return markdown.markdown(self.info())
class Instrument(NameBase, TemplateBase):
_id_map = {}
def __init__(self, instrument_name, asset_type, security_type, security_subtype,
convention_keys=(F.CURRENCY,)):
self._asset_type = asset_type
self._security_type = security_type
self._security_subtype = security_subtype
inst_id = "%s.%s.%s" % (security_type.id, security_subtype.id, self.toid(instrument_name))
prefix = self.__class__.__name__
super(Instrument, self).__init__(instrument_name, name_id=inst_id, prefix=prefix)
TemplateBase.__init__(self, prefix, convention_keys)
@property
def asset_type(self):
return self._asset_type
@property
def security_type(self):
return self._security_type
@property
def security_subtype(self):
return self._security_subtype
class Template(object):
# Instruments
INSTRUMENT_BOND_TBOND = GenericTemplate("Treasury Bond", C.INSTRUMENT, C.BOND)
INSTRUMENT_BOND_TBILL = GenericTemplate("Treasury Bill", C.INSTRUMENT, C.BOND)
INSTRUMENT_DERIVATIVE_EUROPEANOPTION = GenericTemplate("European Option", C.INSTRUMENT, C.DERIVATIVE,
convention_keys=())
INSTRUMENT_DERIVATIVE_AMERICANOPTION = GenericTemplate("American Option", C.INSTRUMENT, C.DERIVATIVE,
convention_keys=())
INSTRUMENT_DERIVATIVE_BERMUDANOPTION = GenericTemplate("Bermudan Option", C.INSTRUMENT, C.DERIVATIVE,
convention_keys=())
# Instrument Helpers in Building Term Structures
INSTRUMENT_BOND_TBOND_HELPER = GenericTemplate("Treasury Bond Helper", C.INSTRUMENT, C.BOND)
INSTRUMENT_BOND_TBILL_HELPER = GenericTemplate("Treasury Bill Helper", C.INSTRUMENT, C.BOND)
INSTRUMENT_DERIVATIVE_SWAPTION_HELPER = GenericTemplate("Swaption Helper", C.INSTRUMENT, C.DERIVATIVE)
# All Term Structures
TS_YIELD_BOND = GenericTemplate("Bond Curve", C.TERM_STRUCTURE, C.YIELD)
TS_YIELD_ZERO = GenericTemplate("Zero Curve", C.TERM_STRUCTURE, C.YIELD)
TS_YIELD_DISCOUNT = GenericTemplate("Discount Curve", C.TERM_STRUCTURE, C.YIELD)
TS_YIELD_FLAT = GenericTemplate("Flat Curve", C.TERM_STRUCTURE, C.YIELD)
TS_VOLATILITY_BLACKCONSTANT = GenericTemplate("Black Constant", C.TERM_STRUCTURE, C.VOLATILITY, convention_keys=())
TS_VOLATILITY_BLACKCURVE = GenericTemplate("Black Curve", C.TERM_STRUCTURE, C.VOLATILITY, convention_keys=())
TS_VOLATILITY_BLACKSURFACE = GenericTemplate("Black Surface", C.TERM_STRUCTURE, C.VOLATILITY, convention_keys=())
# All Models
MODEL_YIELD_HW1F = GenericTemplate("Hull White 1 Factor", C.MODEL, C.YIELD, convention_keys=())
# All Engines
ENGINE_BOND_DISCOUNTING = GenericTemplate("Discounting", C.ENGINE, C.BOND, convention_keys=())
ENGINE_EQUITY_ANALYTICEUROPEAN = GenericTemplate("Analytic European", C.ENGINE, C.EQUITY, convention_keys=())
ENGINE_EQUITY_FDAMERICAN = GenericTemplate("FD American", C.ENGINE, C.EQUITY, convention_keys=())
ENGINE_EQUITY_FDBERMUDAN = GenericTemplate("FD Bermudan", C.ENGINE, C.EQUITY, convention_keys=())
# Time Module
TIME_MAIN_SCHEDULE = GenericTemplate("Schedule", C.TIME, C.MAIN)
# Market Report
REPORT_MARKET_ALL = GenericTemplate("All", C.REPORT, C.MARKET)
# Analytics
ANALYTIC_MARKET_BOND = GenericTemplate("Bond", C.ANALYTIC, C.MARKET, convention_keys=())
# Indexes
INDEX_IBOR_USDLIBOR = GenericTemplate("USD Libor", C.INDEX, C.IBOR, convention_keys=())
INDEX_IBOR_CADLIBOR = GenericTemplate("CAD Libor", C.INDEX, C.IBOR, convention_keys=())
INDEX_IBOR_AUDLIBOR = GenericTemplate("AUD Libor", C.INDEX, C.IBOR, convention_keys=())
INDEX_IBOR_JPYLIBOR = GenericTemplate("JPY Libor", C.INDEX, C.IBOR, convention_keys=())
INDEX_IBOR_GBPLIBOR = GenericTemplate("GBP Libor", C.INDEX, C.IBOR, convention_keys=())
INDEX_IBOR_EURLIBOR = GenericTemplate("EUR Libor", C.INDEX, C.IBOR, convention_keys=())
# Processes
PROCESS_EQUITY_BLACKSCHOLESMERTON = GenericTemplate("Black Scholes Merton", C.PROCESS, C.EQUITY,convention_keys=())
|
gouthambs/qtk-python
|
qtk/templates.py
|
Python
|
mit
| 5,066
|
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decrypts and logs a process's SSL traffic.
Hooks the functions SSL_read() and SSL_write() in a given process and logs the
decrypted data to the console and/or to a pcap file.
Typical usage example:
ssl_log("wget", "log.pcap", True)
Dependencies:
frida (https://www.frida.re/):
sudo pip install frida
hexdump (https://bitbucket.org/techtonik/hexdump/) if using verbose output:
sudo pip install hexdump
"""
__author__ = "geffner@google.com (Jason Geffner)"
__version__ = "1.0"
import argparse
import os
import platform
import pprint
import random
import signal
import socket
import struct
import time
import frida
try:
import hexdump # pylint: disable=g-import-not-at-top
except ImportError:
pass
_FRIDA_SCRIPT = """
/**
* Initializes 'addresses' dictionary and NativeFunctions.
*/
function initializeGlobals()
{
addresses = {};
var resolver = new ApiResolver("module");
var exps = [
["*libssl*",
["SSL_read", "SSL_write", "SSL_get_fd", "SSL_get_session",
"SSL_SESSION_get_id"]],
[Process.platform == "darwin" ? "*libsystem*" : "*libc*",
["getpeername", "getsockname", "ntohs", "ntohl"]]
];
for (var i = 0; i < exps.length; i++)
{
var lib = exps[i][0];
var names = exps[i][1];
for (var j = 0; j < names.length; j++)
{
var name = names[j];
var matches = resolver.enumerateMatchesSync("exports:" + lib + "!" +
name);
if (matches.length == 0)
{
throw "Could not find " + lib + "!" + name;
}
else if (matches.length != 1)
{
// Sometimes Frida returns duplicates.
var address = 0;
var s = "";
var duplicates_only = true;
for (var k = 0; k < matches.length; k++)
{
if (s.length != 0)
{
s += ", ";
}
s += matches[k].name + "@" + matches[k].address;
if (address == 0)
{
address = matches[k].address;
}
else if (!address.equals(matches[k].address))
{
duplicates_only = false;
}
}
if (!duplicates_only)
{
throw "More than one match found for " + lib + "!" + name + ": " +
s;
}
}
addresses[name] = matches[0].address;
}
}
SSL_get_fd = new NativeFunction(addresses["SSL_get_fd"], "int",
["pointer"]);
SSL_get_session = new NativeFunction(addresses["SSL_get_session"],
"pointer", ["pointer"]);
SSL_SESSION_get_id = new NativeFunction(addresses["SSL_SESSION_get_id"],
"pointer", ["pointer", "pointer"]);
getpeername = new NativeFunction(addresses["getpeername"], "int", ["int",
"pointer", "pointer"]);
getsockname = new NativeFunction(addresses["getsockname"], "int", ["int",
"pointer", "pointer"]);
ntohs = new NativeFunction(addresses["ntohs"], "uint16", ["uint16"]);
ntohl = new NativeFunction(addresses["ntohl"], "uint32", ["uint32"]);
}
initializeGlobals();
/**
* Returns a dictionary of a sockfd's "src_addr", "src_port", "dst_addr", and
* "dst_port".
* @param {int} sockfd The file descriptor of the socket to inspect.
* @param {boolean} isRead If true, the context is an SSL_read call. If
* false, the context is an SSL_write call.
* @return {dict} Dictionary of sockfd's "src_addr", "src_port", "dst_addr",
* and "dst_port".
*/
function getPortsAndAddresses(sockfd, isRead)
{
var message = {};
var addrlen = Memory.alloc(4);
var addr = Memory.alloc(16);
var src_dst = ["src", "dst"];
for (var i = 0; i < src_dst.length; i++)
{
Memory.writeU32(addrlen, 16);
if ((src_dst[i] == "src") ^ isRead)
{
getsockname(sockfd, addr, addrlen);
}
else
{
getpeername(sockfd, addr, addrlen);
}
message[src_dst[i] + "_port"] = ntohs(Memory.readU16(addr.add(2)));
message[src_dst[i] + "_addr"] = ntohl(Memory.readU32(addr.add(4)));
}
return message;
}
/**
* Get the session_id of SSL object and return it as a hex string.
* @param {!NativePointer} ssl A pointer to an SSL object.
* @return {dict} A string representing the session_id of the SSL object's
* SSL_SESSION. For example,
* "59FD71B7B90202F359D89E66AE4E61247954E28431F6C6AC46625D472FF76336".
*/
function getSslSessionId(ssl)
{
var session = SSL_get_session(ssl);
if (session == 0)
{
return 0;
}
var len = Memory.alloc(4);
var p = SSL_SESSION_get_id(session, len);
len = Memory.readU32(len);
var session_id = "";
for (var i = 0; i < len; i++)
{
// Read a byte, convert it to a hex string (0xAB ==> "AB"), and append
// it to session_id.
session_id +=
("0" + Memory.readU8(p.add(i)).toString(16).toUpperCase()).substr(-2);
}
return session_id;
}
Interceptor.attach(addresses["SSL_read"],
{
onEnter: function (args)
{
var message = getPortsAndAddresses(SSL_get_fd(args[0]), true);
message["ssl_session_id"] = getSslSessionId(args[0]);
message["function"] = "SSL_read";
this.message = message;
this.buf = args[1];
},
onLeave: function (retval)
{
retval |= 0; // Cast retval to 32-bit integer.
if (retval <= 0)
{
return;
}
send(this.message, Memory.readByteArray(this.buf, retval));
}
});
Interceptor.attach(addresses["SSL_write"],
{
onEnter: function (args)
{
var message = getPortsAndAddresses(SSL_get_fd(args[0]), false);
message["ssl_session_id"] = getSslSessionId(args[0]);
message["function"] = "SSL_write";
send(message, Memory.readByteArray(args[1], parseInt(args[2])));
},
onLeave: function (retval)
{
}
});
"""
# ssl_session[<SSL_SESSION id>] = (<bytes sent by client>,
# <bytes sent by server>)
ssl_sessions = {}
def ssl_log(process, pcap=None, verbose=False):
"""Decrypts and logs a process's SSL traffic.
Hooks the functions SSL_read() and SSL_write() in a given process and logs
the decrypted data to the console and/or to a pcap file.
Args:
process: The target process's name (as a string) or process ID (as an int).
pcap: The file path to which the pcap file should be written.
verbose: If True, log the decrypted traffic to the console.
Raises:
NotImplementedError: Not running on a Linux or macOS system.
"""
if platform.system() not in ("Darwin", "Linux"):
raise NotImplementedError("This function is only implemented for Linux and "
"macOS systems.")
def log_pcap(pcap_file, ssl_session_id, function, src_addr, src_port,
dst_addr, dst_port, data):
"""Writes the captured data to a pcap file.
Args:
pcap_file: The opened pcap file.
ssl_session_id: The SSL session ID for the communication.
function: The function that was intercepted ("SSL_read" or "SSL_write").
src_addr: The source address of the logged packet.
src_port: The source port of the logged packet.
dst_addr: The destination address of the logged packet.
dst_port: The destination port of the logged packet.
data: The decrypted packet data.
"""
t = time.time()
if ssl_session_id not in ssl_sessions:
ssl_sessions[ssl_session_id] = (random.randint(0, 0xFFFFFFFF),
random.randint(0, 0xFFFFFFFF))
client_sent, server_sent = ssl_sessions[ssl_session_id]
if function == "SSL_read":
seq, ack = (server_sent, client_sent)
else:
seq, ack = (client_sent, server_sent)
for writes in (
# PCAP record (packet) header
("=I", int(t)), # Timestamp seconds
("=I", (t * 1000000) % 1000000), # Timestamp microseconds
("=I", 40 + len(data)), # Number of octets saved
("=i", 40 + len(data)), # Actual length of packet
# IPv4 header
(">B", 0x45), # Version and Header Length
(">B", 0), # Type of Service
(">H", 40 + len(data)), # Total Length
(">H", 0), # Identification
(">H", 0x4000), # Flags and Fragment Offset
(">B", 0xFF), # Time to Live
(">B", 6), # Protocol
(">H", 0), # Header Checksum
(">I", src_addr), # Source Address
(">I", dst_addr), # Destination Address
# TCP header
(">H", src_port), # Source Port
(">H", dst_port), # Destination Port
(">I", seq), # Sequence Number
(">I", ack), # Acknowledgment Number
(">H", 0x5018), # Header Length and Flags
(">H", 0xFFFF), # Window Size
(">H", 0), # Checksum
(">H", 0)): # Urgent Pointer
pcap_file.write(struct.pack(writes[0], writes[1]))
pcap_file.write(data)
if function == "SSL_read":
server_sent += len(data)
else:
client_sent += len(data)
ssl_sessions[ssl_session_id] = (client_sent, server_sent)
def on_message(message, data):
"""Callback for errors and messages sent from Frida-injected JavaScript.
Logs captured packet data received from JavaScript to the console and/or a
pcap file. See https://www.frida.re/docs/messages/ for more detail on
Frida's messages.
Args:
message: A dictionary containing the message "type" and other fields
dependent on message type.
data: The string of captured decrypted data.
"""
if message["type"] == "error":
pprint.pprint(message)
os.kill(os.getpid(), signal.SIGTERM)
return
if len(data) == 0:
return
p = message["payload"]
if verbose:
src_addr = socket.inet_ntop(socket.AF_INET,
struct.pack(">I", p["src_addr"]))
dst_addr = socket.inet_ntop(socket.AF_INET,
struct.pack(">I", p["dst_addr"]))
print "SSL Session: " + p["ssl_session_id"]
print "[%s] %s:%d --> %s:%d" % (
p["function"],
src_addr,
p["src_port"],
dst_addr,
p["dst_port"])
hexdump.hexdump(data)
print
if pcap:
log_pcap(pcap_file, p["ssl_session_id"], p["function"], p["src_addr"],
p["src_port"], p["dst_addr"], p["dst_port"], data)
session = frida.attach(process)
if pcap:
pcap_file = open(pcap, "wb", 0)
for writes in (
("=I", 0xa1b2c3d4), # Magic number
("=H", 2), # Major version number
("=H", 4), # Minor version number
("=i", time.timezone), # GMT to local correction
("=I", 0), # Accuracy of timestamps
("=I", 65535), # Max length of captured packets
("=I", 228)): # Data link type (LINKTYPE_IPV4)
pcap_file.write(struct.pack(writes[0], writes[1]))
script = session.create_script(_FRIDA_SCRIPT)
script.on("message", on_message)
script.load()
print "Press Ctrl+C to stop logging."
try:
signal.pause()
except KeyboardInterrupt:
pass
session.detach()
if pcap:
pcap_file.close()
if __name__ == "__main__":
class ArgParser(argparse.ArgumentParser):
def error(self, message):
print "ssl_logger v" + __version__
print "by " + __author__
print
print "Error: " + message
print
print self.format_help().replace("usage:", "Usage:")
self.exit(0)
parser = ArgParser(
add_help=False,
description="Decrypts and logs a process's SSL traffic.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=r"""
Examples:
%(prog)s -pcap ssl.pcap openssl
%(prog)s -verbose 31337
%(prog)s -pcap log.pcap -verbose wget
""")
args = parser.add_argument_group("Arguments")
args.add_argument("-pcap", metavar="<path>", required=False,
help="Name of PCAP file to write")
args.add_argument("-verbose", required=False, action="store_const",
const=True, help="Show verbose output")
args.add_argument("process", metavar="<process name | process id>",
help="Process whose SSL calls to log")
parsed = parser.parse_args()
ssl_log(int(parsed.process) if parsed.process.isdigit() else parsed.process,
parsed.pcap, parsed.verbose)
|
google/ssl_logger
|
ssl_logger.py
|
Python
|
apache-2.0
| 13,453
|
"""Common file operations."""
import io
import os
import sys
from gam import controlflow
from gam import display
from gam.var import GM_Globals
from gam.var import GM_SYS_ENCODING
from gam.var import UTF8_SIG
def _open_file(filename, mode, encoding=None, newline=None):
"""Opens a file with no error handling."""
# Determine which encoding to use
if 'b' in mode:
encoding = None
elif not encoding:
encoding = GM_Globals[GM_SYS_ENCODING]
elif 'r' in mode and encoding.lower().replace('-', '') == 'utf8':
encoding = UTF8_SIG
return open(os.path.expanduser(filename),
mode,
newline=newline,
encoding=encoding)
def open_file(filename,
mode='r',
encoding=None,
newline=None,
strip_utf_bom=False):
"""Opens a file.
Args:
filename: String, the name of the file to open, or '-' to use stdin/stdout,
to read/write, depending on the mode param, respectively.
mode: String, the common file mode to open the file with. Default is read.
encoding: String, the name of the encoding used to decode or encode the
file. This should only be used in text mode.
newline: See param description in
https://docs.python.org/3.7/library/functions.html#open
strip_utf_bom: Boolean, True if the file being opened should seek past the
UTF Byte Order Mark before being returned.
See more: https://en.wikipedia.org/wiki/UTF-8#Byte_order_mark
Returns:
The opened file.
"""
try:
if filename == '-':
# Read from stdin, rather than a file
if 'r' in mode:
return io.StringIO(str(sys.stdin.read()))
return sys.stdout
# Open a file on disk
f = _open_file(filename, mode, newline=newline, encoding=encoding)
if strip_utf_bom:
utf_bom = '\ufeff'
has_bom = False
if 'b' in mode:
has_bom = f.read(3).decode('UTF-8') == utf_bom
elif f.encoding and not f.encoding.lower().startswith('utf'):
# Convert UTF BOM into ISO-8859-1 via Bytes
utf8_bom_bytes = utf_bom.encode('UTF-8')
iso_8859_1_bom = utf8_bom_bytes.decode('iso-8859-1').encode(
'iso-8859-1')
has_bom = f.read(3).encode('iso-8859-1',
'replace') == iso_8859_1_bom
else:
has_bom = f.read(1) == utf_bom
if not has_bom:
f.seek(0)
return f
except OSError as e:
controlflow.system_error_exit(6, e)
def close_file(f, force_flush=False):
"""Closes a file.
Args:
f: The file to close
force_flush: Flush file to disk emptying Python and OS caches. See:
https://stackoverflow.com/a/13762137/1503886
Returns:
Boolean, True if the file was successfully closed. False if an error
was encountered while closing.
"""
if force_flush:
f.flush()
os.fsync(f.fileno())
try:
f.close()
return True
except OSError as e:
display.print_error(e)
return False
def read_file(filename,
mode='r',
encoding=None,
newline=None,
continue_on_error=False,
display_errors=True):
"""Reads a file from disk.
Args:
filename: String, the path of the file to open from disk, or "-" to read
from stdin.
mode: String, the mode in which to open the file.
encoding: String, the name of the encoding used to decode or encode the
file. This should only be used in text mode.
newline: See param description in
https://docs.python.org/3.7/library/functions.html#open
continue_on_error: Boolean, If True, suppresses any IO errors and returns to
the caller without any externalities.
display_errors: Boolean, If True, prints error messages when errors are
encountered and continue_on_error is True.
Returns:
The contents of the file, or stdin if filename == "-". Returns None if
an error is encountered and continue_on_errors is True.
"""
try:
if filename == '-':
# Read from stdin, rather than a file.
return str(sys.stdin.read())
with _open_file(filename, mode, newline=newline,
encoding=encoding) as f:
return f.read()
except OSError as e:
if continue_on_error:
if display_errors:
display.print_warning(e)
return None
controlflow.system_error_exit(6, e)
except (LookupError, UnicodeDecodeError, UnicodeError) as e:
controlflow.system_error_exit(2, str(e))
def write_file(filename,
data,
mode='w',
continue_on_error=False,
display_errors=True):
"""Writes data to a file.
Args:
filename: String, the path of the file to write to disk.
data: Serializable data to write to the file.
mode: String, the mode in which to open the file and write to it.
continue_on_error: Boolean, If True, suppresses any IO errors and returns to
the caller without any externalities.
display_errors: Boolean, If True, prints error messages when errors are
encountered and continue_on_error is True.
Returns:
Boolean, True if the write operation succeeded, or False if not.
"""
try:
with _open_file(filename, mode) as f:
f.write(data)
return True
except OSError as e:
if continue_on_error:
if display_errors:
display.print_error(e)
return False
else:
controlflow.system_error_exit(6, e)
|
GAM-team/GAM
|
src/gam/fileutils.py
|
Python
|
apache-2.0
| 5,834
|
import os
import sys
import json
from urllib.parse import urlparse
from gruve import io
class WebServiceCache():
"""
Implements a local file system cache to prevent overusing a web service
"""
def __init__(self, webServiceProxy):
self.webServiceProxy = webServiceProxy
# -------------------------------------------------------------------------
def _removeBadFileNameCharacters(self, x):
value = x.replace('.', '-')
for c in '\/:*?"<>|':
value = value.replace(c, '')
return value;
def _createFileName(self, url):
c = urlparse(url)
dir = str(c.netloc).replace('.', '-')
file = self._removeBadFileNameCharacters(c.query) + '.json'
relative = os.path.join('../cache/', dir, file)
return io.relativeToAbsolute(relative)
# -------------------------------------------------------------------------
def get(self, url, maxRecords=-1, start=0):
fileName = self._createFileName(url)
if not os.path.exists(fileName):
data = self.webServiceProxy.get(url, maxRecords, start)
if not data or self.webServiceProxy.status_code != 200:
return []
if not os.path.exists(os.path.dirname(fileName)):
os.makedirs(os.path.dirname(fileName))
with open(fileName, 'w') as f:
json.dump(data, f)
return data
else:
with open(fileName, 'r') as f:
data = json.load(f)
return data
|
STSILABS/fda-sandbox
|
gruve/gruve/web_service_cache.py
|
Python
|
mit
| 1,562
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the PE file parser."""
import unittest
from plaso.formatters import pe # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import pe
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class PECOFFTest(test_lib.ParserTestCase):
"""Tests for the PE file parser."""
@shared_test_lib.skipUnlessHasTestFile([u'test_pe.exe'])
def testParseFileObjectOnExecutable(self):
"""Tests the ParseFileObject on a PE executable (EXE) file."""
parser_object = pe.PEParser()
storage_writer = self._ParseFile([u'test_pe.exe'], parser_object)
self.assertEqual(len(storage_writer.events), 3)
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-04-21 14:53:56')
self.assertEqual(event_object.pe_type, u'Executable (EXE)')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.data_type, u'pe:compilation:compilation_time')
event_object = storage_writer.events[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-04-21 14:53:55')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.data_type, u'pe:import:import_time')
event_object = storage_writer.events[2]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-04-21 14:53:54')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.data_type, u'pe:delay_import:import_time')
@shared_test_lib.skipUnlessHasTestFile([u'test_driver.sys'])
def testParseFileObjectOnDriver(self):
"""Tests the ParseFileObject on a PE driver (SYS) file."""
parser_object = pe.PEParser()
storage_writer = self._ParseFile([u'test_driver.sys'], parser_object)
self.assertEqual(len(storage_writer.events), 1)
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2015-04-21 14:53:54')
self.assertEqual(event_object.pe_type, u'Driver (SYS)')
self.assertEqual(event_object.timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
|
dc3-plaso/plaso
|
tests/parsers/pe.py
|
Python
|
apache-2.0
| 2,247
|
import json
import logging
import os
import random
import shutil
from uuid import uuid4
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from foresite import utils, Aggregation, URIRef, AggregatedResource, RdfLibSerializer
from rdflib import Namespace
from hs_core.models import ResourceFile
from hs_core.signals import post_remove_file_aggregation
from hs_file_types.models import AbstractLogicalFile
from hs_file_types.models.base import FileTypeContext, SCHEMA_JSON_FILE_ENDSWITH
from hydroshare import settings
class AbstractModelLogicalFile(AbstractLogicalFile):
# folder path relative to {resource_id}/data/contents/ that represents this aggregation
# folder becomes the name of the aggregation. Where folder is not set, the one file that is part
# of this aggregation becomes the aggregation name
folder = models.CharField(max_length=4096, null=True, blank=True)
# metadata schema (in json format) for model instance aggregation
# metadata for the model instance aggregation is validated based on this schema
metadata_schema_json = JSONField(default=dict)
class Meta:
abstract = True
@property
def aggregation_name(self):
"""Returns aggregation name as per the aggregation naming rule defined in issue#2568"""
if self.folder:
# this model program/instance aggregation has ben created from a folder
# aggregation folder path is the aggregation name
return self.folder
else:
# this model program/instance aggregation has been created from a single resource file
# the path of the resource file is the aggregation name
single_res_file = self.files.first()
if single_res_file:
return single_res_file.short_path
return ""
@property
def schema_short_file_path(self):
"""File path of the aggregation metadata schema file relative to {resource_id}/data/contents/
"""
json_file_name = self.aggregation_name
if not json_file_name:
return json_file_name
if "/" in json_file_name:
json_file_name = os.path.basename(json_file_name)
json_file_name, _ = os.path.splitext(json_file_name)
json_file_name += SCHEMA_JSON_FILE_ENDSWITH
if self.folder:
file_folder = self.folder
else:
file_folder = ''
aggr_file = self.files.first()
if aggr_file is not None:
file_folder = aggr_file.file_folder
if file_folder:
json_file_name = os.path.join(file_folder, json_file_name)
return json_file_name
@property
def schema_file_path(self):
"""Full path of the aggregation metadata schema json file starting with {resource_id}/data/contents/
"""
return os.path.join(self.resource.file_path, self.schema_short_file_path)
@property
def schema_file_url(self):
"""URL to the aggregation metadata schema json file
"""
from hs_core.hydroshare.utils import current_site_url
return "{}/resource/{}".format(current_site_url(), self.schema_file_path)
@classmethod
def get_main_file_type(cls):
"""The main file type for this aggregation - no specific main file"""
return ".*"
@classmethod
def check_files_for_aggregation_type(cls, files):
"""Checks if the specified files can be used to set this aggregation type
:param files: a list of ResourceFile objects
:return If the files meet the requirements of this aggregation type, then returns this
aggregation class name, otherwise empty string.
"""
if len(files) == 0:
# no files
return ""
return cls.__name__
@classmethod
def get_primary_resouce_file(cls, resource_files):
"""Gets any one resource file from the list of files *resource_files* """
return resource_files[0] if resource_files else None
@classmethod
def set_file_type(cls, resource, user, file_id=None, folder_path=''):
"""Makes all physical files that are in a folder (*folder_path*) part of a model program/instance
aggregation type or a single file (*file_id*) part of this aggregation type.
Note: parameter file_id is ignored here and a value for folder_path is required
"""
log = logging.getLogger()
with FileTypeContext(aggr_cls=cls, user=user, resource=resource, file_id=file_id,
folder_path=folder_path,
post_aggr_signal=None,
is_temp_file=False) as ft_ctx:
if folder_path:
res_files = []
dataset_name = folder_path
if '/' in folder_path:
dataset_name = os.path.basename(folder_path)
else:
res_file = ft_ctx.res_file
res_files = [res_file]
folder_path = res_file.file_folder
dataset_name, _ = os.path.splitext(res_file.file_name)
# remove any previously associated logical files from the files
# before making them part of this new logical file
for res_file in res_files:
if res_file.has_logical_file:
res_file.logical_file_content_object = None
res_file.save()
# create a model program/instance logical file object
logical_file = cls.create_aggregation(dataset_name=dataset_name,
resource=resource,
res_files=res_files,
new_files_to_upload=[],
folder_path=folder_path)
try:
if folder_path and file_id is None:
logical_file.folder = folder_path
logical_file.save()
# make all the files in the selected folder as part of the aggregation
logical_file.add_resource_files_in_folder(resource, folder_path)
log.info("{0} aggregation was created for folder:{1}.".format(logical_file.data_type, folder_path))
else:
log.info("{0} aggregation was created for file:{1}.".format(logical_file.data_type,
res_file.storage_path))
ft_ctx.logical_file = logical_file
except Exception as ex:
msg = "{} aggregation. Error when creating aggregation. Error:{}".format(logical_file.data_type,
str(ex))
log.exception(msg)
logical_file.remove_aggregation()
raise ValidationError(msg)
return logical_file
def generate_map_xml(self):
"""Generates the xml needed to write to the aggregation map xml document"""
from hs_core.hydroshare import encode_resource_url
from hs_core.hydroshare.utils import current_site_url, get_file_mime_type
current_site_url = current_site_url()
# This is the qualified resource url.
hs_res_url = os.path.join(current_site_url, 'resource', self.resource.file_path)
# this is the path to the resource metadata file for download
aggr_metadata_file_path = self.metadata_short_file_path
metadata_url = os.path.join(hs_res_url, aggr_metadata_file_path)
metadata_url = encode_resource_url(metadata_url)
# this is the path to the aggregation resourcemap file for download
aggr_map_file_path = self.map_short_file_path
res_map_url = os.path.join(hs_res_url, aggr_map_file_path)
res_map_url = encode_resource_url(res_map_url)
# make the resource map:
utils.namespaces['citoterms'] = Namespace('http://purl.org/spar/cito/')
utils.namespaceSearchOrder.append('citoterms')
ag_url = res_map_url + '#aggregation'
a = Aggregation(ag_url)
# Set properties of the aggregation
a._dc.title = self.dataset_name
agg_type_url = "{site}/terms/{aggr_type}"\
.format(site=current_site_url, aggr_type=self.get_aggregation_type_name())
a._dcterms.type = URIRef(agg_type_url)
a._citoterms.isDocumentedBy = metadata_url
a._ore.isDescribedBy = res_map_url
res_type_aggregation = AggregatedResource(agg_type_url)
res_type_aggregation._rdfs.label = self.get_aggregation_term_label()
res_type_aggregation._rdfs.isDefinedBy = current_site_url + "/terms"
a.add_resource(res_type_aggregation)
# Create a description of the metadata document that describes the whole resource and add it
# to the aggregation
resMetaFile = AggregatedResource(metadata_url)
resMetaFile._citoterms.documents = ag_url
resMetaFile._ore.isAggregatedBy = ag_url
resMetaFile._dc.format = "application/rdf+xml"
# Create a description of the content file and add it to the aggregation
files = self.files.all()
resFiles = []
for n, f in enumerate(files):
res_uri = '{hs_url}/resource/{res_id}/data/contents/{file_name}'.format(
hs_url=current_site_url,
res_id=self.resource.short_id,
file_name=f.short_path)
res_uri = encode_resource_url(res_uri)
resFiles.append(AggregatedResource(res_uri))
resFiles[n]._ore.isAggregatedBy = ag_url
resFiles[n]._dc.format = get_file_mime_type(os.path.basename(f.short_path))
# Add the resource files to the aggregation
a.add_resource(resMetaFile)
for f in resFiles:
a.add_resource(f)
# Create a description of the contained aggregations and add it to the aggregation
child_ore_aggregations = []
for n, child_aggr in enumerate(self.get_children()):
res_uri = '{hs_url}/resource/{res_id}/data/contents/{aggr_name}'.format(
hs_url=current_site_url,
res_id=self.resource.short_id,
aggr_name=child_aggr.map_short_file_path + '#aggregation')
res_uri = encode_resource_url(res_uri)
child_ore_aggr = Aggregation(res_uri)
child_ore_aggregations.append(child_ore_aggr)
child_ore_aggregations[n]._ore.isAggregatedBy = ag_url
child_agg_type_url = "{site}/terms/{aggr_type}"
child_agg_type_url = child_agg_type_url.format(
site=current_site_url, aggr_type=child_aggr.get_aggregation_type_name())
child_ore_aggregations[n]._dcterms.type = URIRef(child_agg_type_url)
# Add contained aggregations to the aggregation
for aggr in child_ore_aggregations:
a.add_resource(aggr)
# Register a serializer with the aggregation, which creates a new ResourceMap that
# needs a URI
serializer = RdfLibSerializer('xml')
# resMap = a.register_serialization(serializer, res_map_url)
a.register_serialization(serializer, res_map_url)
# Fetch the serialization
remdoc = a.get_serialization()
# remove this additional xml element - not sure why it gets added
# <ore:aggregates rdf:resource="https://www.hydroshare.org/terms/[aggregation name]"/>
xml_element_to_replace = '<ore:aggregates rdf:resource="{}"/>\n'.format(agg_type_url)
xml_string = remdoc.data.replace(xml_element_to_replace, '')
return xml_string
def xml_file_short_path(self, resmap=True):
"""File path of the aggregation metadata or map xml file relative
to {resource_id}/data/contents/
:param resmap If true file path for aggregation resmap xml file, otherwise file path for
aggregation metadata file is returned
"""
xml_file_name = self.get_xml_file_name(resmap=resmap)
if self.folder is not None:
file_folder = self.folder
else:
file_folder = ''
aggr_file = self.files.first()
if aggr_file is not None:
file_folder = aggr_file.file_folder
if file_folder:
xml_file_name = os.path.join(file_folder, xml_file_name)
return xml_file_name
def logical_delete(self, user, delete_res_files=True):
"""
Deletes the logical file as well as all resource files associated with this logical file.
This function is primarily used by the system to delete logical file object and associated
metadata as part of deleting a resource file object. Any time a request is made to
deleted a specific resource file object, if the the requested file is part of a
logical file then all files in the same logical file group will be deleted. if custom logic
requires deleting logical file object (LFO) then instead of using LFO.delete(), you must
use LFO.logical_delete()
:param user user who is deleting file type/aggregation
:param delete_res_files If True all resource files that are part of this logical file will
be deleted
"""
from hs_core.hydroshare.resource import delete_resource_file
parent_aggr = self.get_parent()
resource = self.resource
# delete associated metadata and map xml documents
istorage = resource.get_irods_storage()
if istorage.exists(self.metadata_file_path):
istorage.delete(self.metadata_file_path)
if istorage.exists(self.map_file_path):
istorage.delete(self.map_file_path)
# delete schema json file if this a model aggregation
if istorage.exists(self.schema_file_path):
istorage.delete(self.schema_file_path)
# delete all resource files associated with this instance of logical file
if delete_res_files:
for f in self.files.all():
delete_resource_file(resource.short_id, f.id, user, delete_logical_file=False)
# delete logical file first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
# if we are deleting a model program aggregation, then we need to set the
# metadata of all the associated model instances to dirty
if self.is_model_program:
self.set_model_instances_dirty()
self.delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
# if the this deleted aggregation has a parent aggregation - xml files for the parent
# aggregation need to be regenerated at the time of download - so need to set metadata to dirty
if parent_aggr is not None:
parent_aggr.set_metadata_dirty()
resource.cleanup_aggregations()
def remove_aggregation(self):
"""Deletes the aggregation object (logical file) *self* and the associated metadata
object. However, it doesn't delete any resource files that are part of the aggregation."""
# delete associated metadata and map xml document
istorage = self.resource.get_irods_storage()
if istorage.exists(self.metadata_file_path):
istorage.delete(self.metadata_file_path)
if istorage.exists(self.map_file_path):
istorage.delete(self.map_file_path)
# delete schema json file if this a model aggregation
if istorage.exists(self.schema_file_path):
istorage.delete(self.schema_file_path)
# find if there is a parent aggregation - files in this (self) aggregation
# need to be added to parent if exists
parent_aggr = self.get_parent()
res_files = []
res_files.extend(self.files.all())
# first need to set the aggregation for each of the associated resource files to None
# so that deleting the aggregation (logical file) does not cascade to deleting of
# resource files associated with the aggregation
for res_file in self.files.all():
res_file.logical_file_content_object = None
res_file.save()
# delete logical file (aggregation) first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
# if we are removing a model program aggregation, then we need to set the
# metadata of all the associated model instances to dirty
if self.is_model_program:
self.set_model_instances_dirty()
self.delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
# make all the resource files of this (self) aggregation part of the parent aggregation
if parent_aggr is not None:
for res_file in res_files:
parent_aggr.add_resource_file(res_file)
# need to regenerate the xml files for the parent at the time of download so that the references
# to this deleted aggregation can be removed from the parent xml files - so need to set metadata to dirty
parent_aggr.set_metadata_dirty()
post_remove_file_aggregation.send(
sender=self.__class__,
resource=self.resource,
res_files=self.files.all()
)
self.resource.setAVU("bag_modified", True)
self.resource.setAVU('metadata_dirty', 'true')
def create_aggregation_xml_documents(self, create_map_xml=True):
super(AbstractModelLogicalFile, self).create_aggregation_xml_documents(create_map_xml)
self.metadata.is_dirty = False
self.metadata.save()
self.create_metadata_schema_json_file()
def create_metadata_schema_json_file(self):
"""Creates aggregation metadata schema json file """
if not self.metadata_schema_json:
return
# create a temp dir where the json file will be temporarily saved before copying to iRODS
tmpdir = os.path.join(settings.TEMP_FILE_DIR, str(random.getrandbits(32)), uuid4().hex)
istorage = self.resource.get_irods_storage()
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
# create json schema file for the aggregation
json_from_file_name = os.path.join(tmpdir, 'schema.json')
try:
with open(json_from_file_name, 'w') as out:
json_schema = json.dumps(self.metadata_schema_json, indent=4)
out.write(json_schema)
to_file_name = self.schema_file_path
istorage.saveFile(json_from_file_name, to_file_name, True)
finally:
shutil.rmtree(tmpdir)
def can_be_deleted_on_file_delete(self):
"""model aggregation based on folder is not deleted on delete of any or all of the resource files that
are part of the model aggregation"""
return self.folder is None
@classmethod
def can_set_folder_to_aggregation(cls, resource, dir_path):
"""helper to check if the specified folder *dir_path* can be set to ModelProgram or ModelInstance aggregation
"""
# checking target folder for any aggregation
if resource.get_folder_aggregation_object(dir_path) is not None:
# target folder is already an aggregation
return False
aggregation_path = dir_path
if dir_path.startswith(resource.file_path):
aggregation_path = dir_path[len(resource.file_path) + 1:]
# checking sub-folders for fileset aggregation
# check that we don't have any sub folder of dir_path representing a fileset aggregation
# so that we can avoid nesting a fileset aggregation inside a model program or model instance aggregation
if resource.filesetlogicalfile_set.filter(folder__startswith=aggregation_path).exists():
return False
if cls.__name__ == "ModelProgramLogicalFile":
# checking sub-folders for model program aggregation
# check that we don't have any sub folder of dir_path representing a model program aggregation
# so that we can avoid nesting a model program aggregation inside a model
# program aggregation
if resource.modelprogramlogicalfile_set.filter(folder__startswith=aggregation_path).exists():
return False
# checking sub-folders for model instance aggregation
# check that we don't have any sub folder of dir_path representing a model instance aggregation
# so that we can avoid nesting a model instance aggregation inside a model program aggregation
if resource.modelinstancelogicalfile_set.filter(folder__startswith=aggregation_path).exists():
return False
# check the first parent folder that represents an aggregation
irods_path = dir_path
if resource.is_federated:
irods_path = os.path.join(resource.resource_federation_path, irods_path)
# get the parent folder path
path = os.path.dirname(dir_path)
parent_aggregation = None
while '/' in path:
if path == resource.file_path:
break
parent_aggregation = resource.get_folder_aggregation_object(path)
if parent_aggregation is not None:
# this is the first parent folder that represents an aggregation
break
# get the next parent folder path
path = os.path.dirname(path)
if parent_aggregation is not None:
if parent_aggregation.is_fileset:
# check that all resource files under the target folder 'dir_path' are associated with fileset only
files_in_path = ResourceFile.list_folder(resource, folder=irods_path, sub_folders=True)
# if all the resource files are associated with fileset then we can set the folder to model program
# or model instance aggregation
if files_in_path:
return all(res_file.has_logical_file and res_file.logical_file.is_fileset for
res_file in files_in_path)
return False
else:
return False
else:
# none of the parent folders represents an aggregation
# check the files in the target path
files_in_path = ResourceFile.list_folder(resource, folder=irods_path, sub_folders=True)
if files_in_path:
# if none of the resource files in the target path has logical file then we can set the folder
# to model program or model instance aggregation
if cls.__name__ == "ModelProgramLogicalFile":
# if none of the resource files in the target path has logical file then we can set the folder
# to model program aggregation
return not any(res_file.has_logical_file for res_file in files_in_path)
else:
# if any of the files is part of a model instance aggr or fileset - folder can't be
# set to model instance
return not any(res_file.has_logical_file and (res_file.logical_file.is_model_instance or
res_file.logical_file.is_fileset) for
res_file in files_in_path)
# path has no files - can't set the folder to aggregation
return False
|
hydroshare/hydroshare
|
hs_file_types/models/base_model_program_instance.py
|
Python
|
bsd-3-clause
| 24,351
|
# Copyright (c) 2015-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from mhctools import NetMHCpan
from nose.tools import eq_, raises
from pyensembl import ensembl_grch37
from topiary import TopiaryPredictor
from varcode import Variant, VariantCollection
# TODO: find out about these variants,
# what do we expect from them? Are they SNVs?
variants = VariantCollection([
Variant(
contig=10,
start=100018900,
ref='C',
alt='T',
ensembl=ensembl_grch37),
Variant(
contig=11,
start=32861682,
ref='G',
alt='A',
ensembl=ensembl_grch37)])
alleles = [
'A02:01',
'a0204',
'B*07:02',
'HLA-B14:02',
'HLA-C*07:02',
'hla-c07:01'
]
mhc_model = NetMHCpan(
alleles=alleles,
default_peptide_lengths=[9])
def test_epitope_prediction_without_padding():
output_without_padding = TopiaryPredictor(
mhc_model=mhc_model,
only_novel_epitopes=True).predict_from_variants(variants=variants)
# one prediction for each variant * number of alleles
strong_binders = output_without_padding[output_without_padding.affinity <= 500]
eq_(len(strong_binders), 5)
@raises(ValueError)
def test_epitope_prediction_with_invalid_padding():
TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=7).predict_from_variants(variants=variants)
@raises(ValueError)
def test_epitope_prediction_with_invalid_zero_padding():
TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=7).predict_from_variants(variants=variants)
def test_epitope_prediction_with_valid_padding():
predictor = TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=8,
only_novel_epitopes=True)
output_with_padding = predictor.predict_from_variants(variants=variants)
# 6 alleles * 2 mutations * 9 distinct windows = 108
eq_(len(output_with_padding), 108)
|
hammerlab/topiary
|
test/test_mutant_epitope_predictions_class1.py
|
Python
|
apache-2.0
| 2,550
|
"""
Custom manager for HelpEntry objects.
"""
from django.db import models
from evennia.utils import logger, utils
from evennia.typeclasses.managers import TypedObjectManager
__all__ = ("HelpEntryManager",)
class HelpEntryManager(TypedObjectManager):
"""
This HelpEntryManager implements methods for searching
and manipulating HelpEntries directly from the database.
These methods will all return database objects
(or QuerySets) directly.
Evennia-specific:
find_topicmatch
find_apropos
find_topicsuggestions
find_topics_with_category
all_to_category
search_help (equivalent to evennia.search_helpentry)
"""
def find_topicmatch(self, topicstr, exact=False):
"""
Searches for matching topics based on player's input.
Args:
topcistr (str): Help topic to search for.
exact (bool, optional): Require exact match
(non-case-sensitive). If `False` (default), match
sub-parts of the string.
Returns:
matches (HelpEntries): Query results.
"""
dbref = utils.dbref(topicstr)
if dbref:
return self.filter(id=dbref)
topics = self.filter(db_key__iexact=topicstr)
if not topics and not exact:
topics = self.filter(db_key__istartswith=topicstr)
if not topics:
topics = self.filter(db_key__icontains=topicstr)
return topics
def find_apropos(self, topicstr):
"""
Do a very loose search, returning all help entries containing
the search criterion in their titles.
Args:
topicstr (str): Search criterion.
Returns:
matches (HelpEntries): Query results.
"""
return self.filter(db_key__icontains=topicstr)
def find_topicsuggestions(self, topicstr):
"""
Do a fuzzy match, preferably within the category of the
current topic.
Args:
topicstr (str): Search criterion.
Returns:
matches (Helpentries): Query results.
"""
return self.filter(db_key__icontains=topicstr).exclude(db_key__iexact=topicstr)
def find_topics_with_category(self, help_category):
"""
Search topics having a particular category.
Args:
help_category (str): Category query criterion.
Returns:
matches (HelpEntries): Query results.
"""
return self.filter(db_help_category__iexact=help_category)
def get_all_topics(self):
"""
Get all topics.
Returns:
all (HelpEntries): All topics.
"""
return self.all()
def get_all_categories(self):
"""
Return all defined category names with at least one topic in
them.
Returns:
matches (list): Unique list of category names across all
topics.
"""
return list(set(topic.help_category for topic in self.all()))
def all_to_category(self, default_category):
"""
Shifts all help entries in database to default_category. This
action cannot be reverted. It is used primarily by the engine
when importing a default help database, making sure this ends
up in one easily separated category.
Args:
default_category (str): Category to move entries to.
"""
topics = self.all()
for topic in topics:
topic.help_category = default_category
topic.save()
string = "Help database moved to category %s" % default_category
logger.log_info(string)
def search_help(self, ostring, help_category=None):
"""
Retrieve a search entry object.
Args:
ostring (str): The help topic to look for.
category (str): Limit the search to a particular help topic
"""
ostring = ostring.strip().lower()
if help_category:
return self.filter(db_key__iexact=ostring,
db_help_category__iexact=help_category)
else:
return self.filter(db_key__iexact=ostring)
|
titeuf87/evennia
|
evennia/help/manager.py
|
Python
|
bsd-3-clause
| 4,222
|
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import os
import sys
import unittest
if sys.version_info[0] == 2:
import mock
else:
import unittest.mock as mock
from pyfakefs import fake_filesystem_unittest
from unexpected_passes_common import builders
from unexpected_passes_common import constants
from unexpected_passes_common import data_types
from unexpected_passes_common import multiprocessing_utils
from unexpected_passes_common import unittest_utils
class GetCiBuildersUnittest(fake_filesystem_unittest.TestCase):
def setUp(self):
self._builders_instance = unittest_utils.GenericBuilders()
self._isolate_patcher = mock.patch.object(
self._builders_instance,
'GetIsolateNames',
return_value={'telemetry_gpu_integration_test'})
self._isolate_mock = self._isolate_patcher.start()
self.addCleanup(self._isolate_patcher.stop)
def CreateFile(self, *args, **kwargs):
# TODO(crbug.com/1156806): Remove this and just use fs.create_file() when
# Catapult is updated to a newer version of pyfakefs that is compatible with
# Chromium's version.
if hasattr(self.fs, 'create_file'):
self.fs.create_file(*args, **kwargs)
else:
self.fs.CreateFile(*args, **kwargs)
def testJsonContentLoaded(self):
"""Tests that the correct JSON data is loaded in."""
self.setUpPyfakefs()
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU Linux Builder': {},
}
gpu_fyi_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'ANGLE GPU Android Release (Nexus 5X)': {
'isolated_scripts': [{
'args': [
'webgl_conformance',
],
'isolate_name':
'telemetry_gpu_integration_test',
}],
},
'GPU FYI Linux Builder': {},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.json'),
contents=json.dumps(gpu_json))
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.gpu.fyi.json'),
contents=json.dumps(gpu_fyi_json))
gpu_builders = self._builders_instance.GetCiBuilders('webgl_conformance')
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('ANGLE GPU Android Release (Nexus 5X)',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU Linux Builder',
constants.BuilderTypes.CI, False),
data_types.BuilderEntry('GPU FYI Linux Builder',
constants.BuilderTypes.CI, False),
]))
def testFilterBySuite(self):
"""Tests that only builders that run the given suite are returned."""
def SideEffect(tm, s):
tests = tm.get('isolated_scripts', [])
for t in tests:
if t.get('isolate_name') == 'foo_integration_test':
if s in t.get('args', []):
return True
return False
self.setUpPyfakefs()
gpu_json = {
'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
'Android Tester': {
'isolated_scripts': [
{
'args': [
'bar_conformance',
],
'isolate_name': 'not_telemetry',
},
],
},
'Linux Tester': {
'isolated_scripts': [
{
'args': [
'not_a_suite',
],
'isolate_name': 'foo_integration_test',
},
],
},
'Windows Tester': {
'isolated_scripts': [
{
'args': [
'bar_conformance',
],
'isolate_name': 'foo_integration_test',
},
],
},
}
self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
'chromium.json'),
contents=json.dumps(gpu_json))
with mock.patch.object(self._builders_instance,
'_BuilderRunsTestOfInterest',
side_effect=SideEffect):
gpu_builders = self._builders_instance.GetCiBuilders('bar_conformance')
self.assertEqual(
gpu_builders,
set([
data_types.BuilderEntry('Windows Tester', constants.BuilderTypes.CI,
False)
]))
def testRealContentCanBeLoaded(self):
"""Tests that *something* from the real JSON files can be loaded."""
# This directory is not available on swarming, so if it doesn't exist, just
# skip the test.
if not os.path.exists(builders.TESTING_BUILDBOT_DIR):
return
self.assertNotEqual(
len(self._builders_instance.GetCiBuilders('webgl_conformance')), 0)
class GetMirroredBuildersForCiBuilderUnittest(unittest.TestCase):
def setUp(self):
self._builders_instance = builders.Builders(False)
self._bb_patcher = mock.patch.object(self._builders_instance,
'_GetBuildbucketOutputForCiBuilder')
self._bb_mock = self._bb_patcher.start()
self.addCleanup(self._bb_patcher.stop)
self._fake_ci_patcher = mock.patch.object(self._builders_instance,
'GetFakeCiBuilders',
return_value={})
self._fake_ci_mock = self._fake_ci_patcher.start()
self.addCleanup(self._fake_ci_patcher.stop)
self._non_chromium_patcher = mock.patch.object(
self._builders_instance,
'GetNonChromiumBuilders',
return_value={'foo_non_chromium'})
self._non_chromium_mock = self._non_chromium_patcher.start()
self.addCleanup(self._non_chromium_patcher.stop)
def testFakeCiBuilder(self):
"""Tests that a fake CI builder gets properly mapped."""
self._fake_ci_mock.return_value = {
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI, False):
{data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY, False)}
}
try_builder, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI,
False)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builder,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
False)
]))
self._bb_mock.assert_not_called()
def testNoBuildbucketOutput(self):
"""Tests that a failure to get Buildbucket output is surfaced."""
self._bb_mock.return_value = ''
builder_entry = data_types.BuilderEntry('nonexistent',
constants.BuilderTypes.CI, False)
try_builder, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(builder_entry))
self.assertFalse(found_mirror)
self.assertEqual(try_builder, set([builder_entry]))
def testBuildbucketOutput(self):
"""Tests that Buildbucket output is parsed correctly."""
self._bb_mock.return_value = json.dumps({
'output': {
'properties': {
'mirrored_builders': [
'try:foo_try',
'try:bar_try',
]
}
}
})
try_builders, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI,
False)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builders,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
False),
data_types.BuilderEntry('bar_try', constants.BuilderTypes.TRY,
False)
]))
def testBuildbucketOutputInternal(self):
"""Tests that internal Buildbucket output is parsed correctly."""
self._bb_mock.return_value = json.dumps({
'output': {
'properties': {
'mirrored_builders': [
'try:foo_try',
'try:bar_try',
]
}
}
})
try_builders, found_mirror = (
self._builders_instance._GetMirroredBuildersForCiBuilder(
data_types.BuilderEntry('foo_ci', constants.BuilderTypes.CI, True)))
self.assertTrue(found_mirror)
self.assertEqual(
try_builders,
set([
data_types.BuilderEntry('foo_try', constants.BuilderTypes.TRY,
True),
data_types.BuilderEntry('bar_try', constants.BuilderTypes.TRY, True)
]))
class GetTryBuildersUnittest(unittest.TestCase):
def setUp(self):
self._builders_instance = builders.Builders(False)
self._get_patcher = mock.patch.object(self._builders_instance,
'_GetMirroredBuildersForCiBuilder')
self._get_mock = self._get_patcher.start()
self.addCleanup(self._get_patcher.stop)
self._pool_patcher = mock.patch.object(multiprocessing_utils,
'GetProcessPool')
self._pool_mock = self._pool_patcher.start()
self._pool_mock.return_value = unittest_utils.FakePool()
self.addCleanup(self._pool_patcher.stop)
def testNoOutputCausesFailure(self):
"""Tests that a failure to get Buildbot output raises an exception."""
self._get_mock.return_value = (set(['foo_ci']), False)
with self.assertRaises(RuntimeError):
self._builders_instance.GetTryBuilders(['foo_ci'])
def testOutputReturned(self):
"""Tests that parsed builders get returned on success."""
def SideEffect(ci_builder):
b = [
ci_builder.replace('ci', 'try'),
ci_builder.replace('ci', 'try2'),
]
return set(b), True
self._get_mock.side_effect = SideEffect
mirrored_builders = self._builders_instance.GetTryBuilders(
['foo_ci', 'bar_ci'])
self.assertEqual(mirrored_builders,
set(['foo_try', 'foo_try2', 'bar_try', 'bar_try2']))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
chromium/chromium
|
testing/unexpected_passes_common/builders_unittest.py
|
Python
|
bsd-3-clause
| 11,096
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
# ============= standard library imports ========================
from threading import Thread
from pyface.tasks.action.schema import SToolBar
from pyface.tasks.task_layout import TaskLayout, PaneItem, Splitter, VSplitter
from pyface.ui.qt4.tasks.advanced_editor_area_pane import EditorWidget
from traits.api import Any, Instance, on_trait_change
# ============= local library imports ==========================
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.envisage.tasks.editor_task import EditorTask
from pychron.spectrometer.tasks.editor import PeakCenterEditor, ScanEditor, CoincidenceEditor, ScannerEditor
from pychron.spectrometer.tasks.spectrometer_actions import StopScanAction
from pychron.spectrometer.tasks.spectrometer_panes import ControlsPane, \
ReadoutPane, IntensitiesPane, RecordControlsPane, DACScannerPane, MassScannerPane
class SpectrometerTask(EditorTask):
scan_manager = Any
name = 'Spectrometer'
id = 'pychron.spectrometer'
_scan_editor = Instance(ScanEditor)
tool_bars = [SToolBar(StopScanAction(), )]
def info(self, msg, *args, **kw):
super(SpectrometerTask, self).info(msg)
def spy_position_magnet(self, *args, **kw):
self.scan_manager.position_magnet(*args, **kw)
def spy_peak_center(self, name):
peak_kw = dict(confirm_save=False, warn=True,
new_thread=False,
message='spectrometer script peakcenter',
on_end=self._on_peak_center_end)
setup_kw = dict(config_name=name)
return self._peak_center(setup_kw=setup_kw, peak_kw=peak_kw)
def populate_mftable(self):
sm = self.scan_manager
cfg = sm.setup_populate_mftable()
if cfg:
def func():
refiso = cfg.isotope
ion = sm.ion_optics_manager
ion.backup_mftable()
odefl = []
dets = cfg.get_detectors()
self.debug('setting deflections')
for det, defl in dets:
odefl.append((det, sm.spectrometer.get_deflection(det)))
sm.spectrometer.set_deflection(det, defl)
for di in dets:
ion.setup_peak_center(detector=[di.name], isotope=refiso,
config_name=cfg.peak_center_config.active_item.name,
standalone_graph=False,
new=True,
show_label=True, use_configuration_dac=False)
ion.peak_center.update_others = False
name = 'Pop MFTable {}-{}'.format(di.name, refiso)
invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center,
name=name))
self._on_peak_center_start()
ion.do_peak_center(new_thread=False, save=True, warn=True)
self._on_peak_center_end()
if not ion.peak_center.isAlive():
break
self.debug('unset deflections')
for det, defl in odefl:
sm.spectrometer.set_deflection(det, defl)
fp = cfg.get_finish_position()
self.debug('move to end position={}'.format(fp))
if fp:
iso, det = fp
if iso and det:
ion.position(iso, det)
t = Thread(target=func)
t.start()
def stop_scan(self):
self.debug('stop scan fired')
editor = self.active_editor
self.debug('active editor {}'.format(editor))
if editor:
if isinstance(editor, (ScanEditor, PeakCenterEditor, CoincidenceEditor)):
self.debug('editor stop')
editor.stop()
def do_coincidence(self):
es = [int(e.name.split(' ')[-1])
for e in self.editor_area.editors
if isinstance(e, CoincidenceEditor)]
i = max(es) + 1 if es else 1
man = self.scan_manager.ion_optics_manager
name = 'Coincidence {:02d}'.format(i)
if man.setup_coincidence():
self._open_editor(CoincidenceEditor(model=man.coincidence, name=name))
man.do_coincidence_scan()
def do_peak_center(self):
peak_kw = dict(confirm_save=True, warn=True,
message='manual peakcenter',
on_end=self._on_peak_center_end)
self._peak_center(peak_kw=peak_kw)
def define_peak_center(self):
from pychron.spectrometer.ion_optics.define_peak_center_view import DefinePeakCenterView
man = self.scan_manager.ion_optics_manager
spec = man.spectrometer
dets = spec.detector_names
isos = spec.isotopes
dpc = DefinePeakCenterView(detectors=dets,
isotopes=isos,
detector=dets[0],
isotope=isos[0])
info = dpc.edit_traits()
if info.result:
det = dpc.detector
isotope = dpc.isotope
dac = dpc.dac
self.debug('manually setting mftable to {}:{}:{}'.format(det, isotope, dac))
message = 'manually define peak center {}:{}:{}'.format(det, isotope, dac)
man.spectrometer.magnet.update_field_table(det, isotope, dac, message)
def _on_peak_center_start(self):
self.scan_manager.log_events_enabled = False
self.scan_manager.scan_enabled = False
def _on_peak_center_end(self):
self.scan_manager.log_events_enabled = True
self.scan_manager.scan_enabled = True
def send_configuration(self):
self.scan_manager.spectrometer.send_configuration()
def prepare_destroy(self):
for e in self.editor_area.editors:
if hasattr(e, 'stop'):
e.stop()
self.scan_manager.prepare_destroy()
super(SpectrometerTask, self).prepare_destroy()
# def activated(self):
# self.scan_manager.activate()
# self._scan_factory()
# super(SpectrometerTask, self).activated()
def create_dock_panes(self):
panes = [
ControlsPane(model=self.scan_manager),
RecordControlsPane(model=self.scan_manager),
MassScannerPane(model=self.scan_manager),
DACScannerPane(model=self.scan_manager),
ReadoutPane(model=self.scan_manager),
IntensitiesPane(model=self.scan_manager)]
panes = self._add_canvas_pane(panes)
return panes
# def _active_editor_changed(self, new):
# if not new:
# try:
# self._scan_factory()
# except AttributeError:
# pass
# private
def _peak_center(self, setup_kw=None, peak_kw=None):
if setup_kw is None:
setup_kw = {}
if peak_kw is None:
peak_kw = {}
es = []
for e in self.editor_area.editors:
if isinstance(e, PeakCenterEditor):
try:
es.append(int(e.name.split(' ')[-1]))
except ValueError:
pass
i = max(es) + 1 if es else 1
ret = -1
ion = self.scan_manager.ion_optics_manager
self._peak_center_start_hook()
time.sleep(2)
name = 'Peak Center {:02d}'.format(i)
if ion.setup_peak_center(new=True, **setup_kw):
self._on_peak_center_start()
invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center, name=name))
ion.do_peak_center(**peak_kw)
ret = ion.peak_center_result
self._peak_center_stop_hook()
return ret
def _peak_center_start_hook(self):
pass
def _peak_center_stop_hook(self):
pass
def _scan_factory(self):
sim = self.scan_manager.spectrometer.simulation
name = 'Scan (Simulation)' if sim else 'Scan'
# self._open_editor(ScanEditor(model=self.scan_manager, name=name))
# print 'asdfas', self.editor_area.control
# print [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)]
# super(SpectrometerTask, self).activated()
se = ScanEditor(model=self.scan_manager, name=name)
self._open_editor(se)
def _default_layout_default(self):
return TaskLayout(
left=Splitter(
PaneItem('pychron.spectrometer.controls'),
orientation='vertical'),
right=VSplitter(PaneItem('pychron.spectrometer.intensities'),
PaneItem('pychron.spectrometer.readout')))
# def create_central_pane(self):
# g = ScanPane(model=self.scan_manager)
# return g
@on_trait_change('scan_manager:mass_scanner:new_scanner')
def _handle_mass_scan_event(self):
self._scan_event(self.scan_manager.mass_scanner)
@on_trait_change('scan_manager:dac_scanner:new_scanner')
def _handle_dac_scan_event(self):
self._scan_event(self.scan_manager.dac_scanner)
def _scan_event(self, scanner):
sim = self.scan_manager.spectrometer.simulation
name = 'Magnet Scan (Simulation)' if sim else 'Magnet Scan'
editor = next((e for e in self.editor_area.editors if e.id == 'pychron.scanner'), None)
if editor is not None:
scanner.reset()
else:
editor = ScannerEditor(model=scanner, name=name, id='pychron.scanner')
self._open_editor(editor, activate=False)
self.split_editors(0, 1, h2=300, orientation='vertical')
self.activate_editor(editor)
@on_trait_change('window:opened')
def _opened(self):
self.scan_manager.activate()
self._scan_factory()
ee = [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)][0]
# print int(ee.features())
# ee.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
# print int(ee.features())
# ee.update_title()
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/spectrometer/tasks/spectrometer_task.py
|
Python
|
apache-2.0
| 11,184
|
#
# THIS IS WORK IN PROGRESS.
#
# The Python Imaging Library.
# $Id: //modules/pil/PIL/WmfImagePlugin.py#4 $
#
# WMF support for PIL
#
# history:
# 96-12-14 fl Created
#
# notes:
# This code currently supports placable metafiles only, and
# just a few graphics operations are implemented.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import Image, ImageDraw, ImageFile
import string
#
# --------------------------------------------------------------------
def i16(c):
return ord(c[0]) + (ord(c[1])<<8)
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
# --------------------------------------------------------------------
# The following codes are taken from the wingdi.h header file.
# Copyright (c) 1985-1996, Microsoft Corp. All rights reserved.
META_ANIMATEPALETTE = 0x0436
META_ARC = 0x0817
META_BITBLT = 0x0922
META_CHORD = 0x0830
META_CREATEBRUSHINDIRECT = 0x02FC
META_CREATEFONTINDIRECT = 0x02FB
META_CREATEPALETTE = 0x00f7
META_CREATEPATTERNBRUSH = 0x01F9
META_CREATEPENINDIRECT = 0x02FA
META_CREATEREGION = 0x06FF
META_DELETEOBJECT = 0x01f0
META_DIBBITBLT = 0x0940
META_DIBCREATEPATTERNBRUSH = 0x0142
META_DIBSTRETCHBLT = 0x0b41
META_ELLIPSE = 0x0418
META_ESCAPE = 0x0626
META_EXCLUDECLIPRECT = 0x0415
META_EXTFLOODFILL = 0x0548
META_EXTTEXTOUT = 0x0a32
META_FILLREGION = 0x0228
META_FLOODFILL = 0x0419
META_FRAMEREGION = 0x0429
META_INTERSECTCLIPRECT = 0x0416
META_INVERTREGION = 0x012A
META_LINETO = 0x0213
META_MOVETO = 0x0214
META_OFFSETCLIPRGN = 0x0220
META_OFFSETVIEWPORTORG = 0x0211
META_OFFSETWINDOWORG = 0x020F
META_PAINTREGION = 0x012B
META_PATBLT = 0x061D
META_PIE = 0x081A
META_POLYGON = 0x0324
META_POLYLINE = 0x0325
META_POLYPOLYGON = 0x0538
META_REALIZEPALETTE = 0x0035
META_RECTANGLE = 0x041B
META_RESIZEPALETTE = 0x0139
META_RESTOREDC = 0x0127
META_ROUNDRECT = 0x061C
META_SAVEDC = 0x001E
META_SCALEVIEWPORTEXT = 0x0412
META_SCALEWINDOWEXT = 0x0410
META_SELECTCLIPREGION = 0x012C
META_SELECTOBJECT = 0x012D
META_SELECTPALETTE = 0x0234
META_SETBKCOLOR = 0x0201
META_SETBKMODE = 0x0102
META_SETDIBTODEV = 0x0d33
META_SETMAPMODE = 0x0103
META_SETMAPPERFLAGS = 0x0231
META_SETPALENTRIES = 0x0037
META_SETPIXEL = 0x041F
META_SETPOLYFILLMODE = 0x0106
META_SETRELABS = 0x0105
META_SETROP2 = 0x0104
META_SETSTRETCHBLTMODE = 0x0107
META_SETTEXTALIGN = 0x012E
META_SETTEXTCHAREXTRA = 0x0108
META_SETTEXTCOLOR = 0x0209
META_SETTEXTJUSTIFICATION = 0x020A
META_SETVIEWPORTEXT = 0x020E
META_SETVIEWPORTORG = 0x020D
META_SETWINDOWEXT = 0x020C
META_SETWINDOWORG = 0x020B
META_STRETCHBLT = 0x0B23
META_STRETCHDIB = 0x0f43
META_TEXTOUT = 0x0521
# create a code to name dictionary (for debugging)
NAME = {}
for k, v in vars().items():
if k[:5] == "META_":
NAME[v] = k[5:]
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix):
return prefix[:6] == "\327\315\306\232\000\000"
##
# Image plugin for Windows metafiles. This plugin can identify a
# metafile, but the loader only supports a small number of primitives,
# and isn't very usable.
class WmfImageFile(ImageFile.ImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
# check placable header
s = self.fp.read(22)
if s[:6] != "\327\315\306\232\000\000":
raise SyntaxError, "Not a placable WMF file"
# position on output device
bbox = i16(s[6:8]), i16(s[8:10]), i16(s[10:12]), i16(s[12:14])
# FIXME: should take the scale into account
self.mode = "P"
self.size = (bbox[2]-bbox[0]) / 20, (bbox[3]-bbox[1]) / 20
# FIXME: while hacking
self.size = (bbox[2] + bbox[0])/10, (bbox[3] + bbox[1])/10
self.bbox = bbox
# check standard header
s = self.fp.read(18)
if s[:6] != "\001\000\011\000\000\003":
raise SyntaxError, "Not a WMF file"
def _ink(self, rgb):
# lookup colour in current palette
try:
return self.palette[rgb]
except KeyError:
# hmm. what if the palette becomes full?
ink = len(self.palette)
self.palette[rgb] = ink
return ink
def load(self):
if self.im:
return
#
# windows standard palette
self.palette = {
'\000\000\000': 0,
'\200\000\000': 1,
'\000\200\000': 2,
'\200\200\000': 3,
'\000\000\200': 4,
'\200\000\200': 5,
'\000\200\200': 6,
'\300\300\300': 7,
'\300\334\300': 8,
'\246\312\360': 9,
'\377\373\360': 246,
'\240\240\244': 247,
'\200\200\200': 248,
'\377\000\000': 249,
'\000\377\000': 250,
'\377\377\000': 251,
'\000\000\377': 252,
'\377\000\377': 253,
'\000\377\377': 254,
'\377\377\377': 255,
}
fill = 0
pen = brush = self._ink("\000\000\000")
paper = self._ink("\377\377\377")
self.im = Image.core.fill(self.mode, self.size, paper)
#
# render metafile into image, using the standard palette
id = ImageDraw.ImageDraw(self)
while 1:
s = self.fp.read(6)
size = i32(s)*2
func = i16(s[4:])
if not func:
break
s = self.fp.read(size-6)
if func == META_SETPOLYFILLMODE:
fill = i16(s)
id.setfill(fill)
elif func == META_CREATEBRUSHINDIRECT:
brush = self._ink(s[2:5])
elif func == META_CREATEPENINDIRECT:
pen = self._ink(s[6:9])
elif func == META_POLYGON:
xy = map(lambda i,s=s: i16(s[i:i+2])/10, range(2, len(s), 2))
if fill:
id.setink(brush)
id.polygon(xy)
id.setink(pen)
id.setfill(0)
id.polygon(xy)
id.setfill(1)
else:
id.setink(pen)
id.polygon(xy)
elif func == META_POLYLINE:
xy = map(lambda i,s=s: i16(s[i:i+2])/10, range(2, len(s), 2))
id.setink(pen)
id.line(xy)
elif func == META_RECTANGLE:
xy = (i16(s[2:4])/10, i16(s[0:2])/10,
i16(s[6:8])/10, i16(s[4:6])/10)
if fill:
id.setink(brush)
id.rectangle(xy)
id.setink(pen)
id.setfill(0)
id.rectangle(xy)
id.setfill(1)
else:
id.setink(pen)
id.rectangle(xy)
else:
if Image.DEBUG:
print size, hex(func), NAME[func]
pass
#
# attach palette to image
palette = ["\0\0\0"] * 256
for rgb, i in self.palette.items():
if i < 256:
palette[i] = rgb
self.im.putpalette("RGB", string.join(palette, ""))
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open("WMF", WmfImageFile, _accept)
Image.register_extension("WMF", ".wmf")
|
fxia22/ASM_xf
|
PythonD/lib/python2.4/site-packages/display/PIL/WmfImagePlugin.py
|
Python
|
gpl-2.0
| 7,556
|
# Copyright (c) 2013, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements some basic help invocation functions along
# with the bulk of the help topic text for the OE Core Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import subprocess
import logging
from wic.pluginbase import PluginMgr, PLUGIN_TYPES
logger = logging.getLogger('wic')
def subcommand_error(args):
logger.info("invalid subcommand %s", args[0])
def display_help(subcommand, subcommands):
"""
Display help for subcommand.
"""
if subcommand not in subcommands:
return False
hlp = subcommands.get(subcommand, subcommand_error)[2]
if callable(hlp):
hlp = hlp()
pager = subprocess.Popen('less', stdin=subprocess.PIPE)
pager.communicate(hlp.encode('utf-8'))
return True
def wic_help(args, usage_str, subcommands):
"""
Subcommand help dispatcher.
"""
if args.help_topic == None or not display_help(args.help_topic, subcommands):
print(usage_str)
def get_wic_plugins_help():
"""
Combine wic_plugins_help with the help for every known
source plugin.
"""
result = wic_plugins_help
for plugin_type in PLUGIN_TYPES:
result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
for name, plugin in PluginMgr.get_plugins(plugin_type).items():
result += "\n %s plugin:\n" % name
if plugin.__doc__:
result += plugin.__doc__
else:
result += "\n %s is missing docstring\n" % plugin
return result
def invoke_subcommand(args, parser, main_command_usage, subcommands):
"""
Dispatch to subcommand handler borrowed from combo-layer.
Should use argparse, but has to work in 2.6.
"""
if not args.command:
logger.error("No subcommand specified, exiting")
parser.print_help()
return 1
elif args.command == "help":
wic_help(args, main_command_usage, subcommands)
elif args.command not in subcommands:
logger.error("Unsupported subcommand %s, exiting\n", args.command)
parser.print_help()
return 1
else:
subcmd = subcommands.get(args.command, subcommand_error)
usage = subcmd[1]
subcmd[0](args, usage)
##
# wic help and usage strings
##
wic_usage = """
Create a customized OpenEmbedded image
usage: wic [--version] | [--help] | [COMMAND [ARGS]]
Current 'wic' commands are:
help Show help for command or one of the topics (see below)
create Create a new OpenEmbedded image
list List available canned images and source plugins
Help topics:
overview wic overview - General overview of wic
plugins wic plugins - Overview and API
kickstart wic kickstart - wic kickstart reference
"""
wic_help_usage = """
usage: wic help <subcommand>
This command displays detailed help for the specified subcommand.
"""
wic_create_usage = """
Create a new OpenEmbedded image
usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
[-c, --compress-with] [-m, --bmap]
This command creates an OpenEmbedded image based on the 'OE kickstart
commands' found in the <wks file>.
The -o option can be used to place the image in a directory with a
different name and location.
See 'wic help create' for more detailed instructions.
"""
wic_create_help = """
NAME
wic create - Create a new OpenEmbedded image
SYNOPSIS
wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
[-c, --compress-with] [-m, --bmap] [--no-fstab-update]
DESCRIPTION
This command creates an OpenEmbedded image based on the 'OE
kickstart commands' found in the <wks file>.
In order to do this, wic needs to know the locations of the
various build artifacts required to build the image.
Users can explicitly specify the build artifact locations using
the -r, -b, -k, and -n options. See below for details on where
the corresponding artifacts are typically found in a normal
OpenEmbedded build.
Alternatively, users can use the -e option to have 'wic' determine
those locations for a given image. If the -e option is used, the
user needs to have set the appropriate MACHINE variable in
local.conf, and have sourced the build environment.
The -e option is used to specify the name of the image to use the
artifacts from e.g. core-image-sato.
The -r option is used to specify the path to the /rootfs dir to
use as the .wks rootfs source.
The -b option is used to specify the path to the dir containing
the boot artifacts (e.g. /EFI or /syslinux dirs) to use as the
.wks bootimg source.
The -k option is used to specify the path to the dir containing
the kernel to use in the .wks bootimg.
The -n option is used to specify the path to the native sysroot
containing the tools to use to build the image.
The -f option is used to build rootfs by running "bitbake <image>"
The -s option is used to skip the build check. The build check is
a simple sanity check used to determine whether the user has
sourced the build environment so that the -e option can operate
correctly. If the user has specified the build artifact locations
explicitly, 'wic' assumes the user knows what he or she is doing
and skips the build check.
The -D option is used to display debug information detailing
exactly what happens behind the scenes when a create request is
fulfilled (or not, as the case may be). It enumerates and
displays the command sequence used, and should be included in any
bug report describing unexpected results.
When 'wic -e' is used, the locations for the build artifacts
values are determined by 'wic -e' from the output of the 'bitbake
-e' command given an image name e.g. 'core-image-minimal' and a
given machine set in local.conf. In that case, the image is
created as if the following 'bitbake -e' variables were used:
-r: IMAGE_ROOTFS
-k: STAGING_KERNEL_DIR
-n: STAGING_DIR_NATIVE
-b: empty (plugin-specific handlers must determine this)
If 'wic -e' is not used, the user needs to select the appropriate
value for -b (as well as -r, -k, and -n).
The -o option can be used to place the image in a directory with a
different name and location.
The -c option is used to specify compressor utility to compress
an image. gzip, bzip2 and xz compressors are supported.
The -m option is used to produce .bmap file for the image. This file
can be used to flash image using bmaptool utility.
The --no-fstab-update option is used to doesn't change fstab file. When
using this option the final fstab file will be same that in rootfs and
wic doesn't update file, e.g adding a new mount point. User can control
the fstab file content in base-files recipe.
"""
wic_list_usage = """
List available OpenEmbedded images and source plugins
usage: wic list images
wic list <image> help
wic list source-plugins
This command enumerates the set of available canned images as well as
help for those images. It also can be used to list of available source
plugins.
The first form enumerates all the available 'canned' images.
The second form lists the detailed help information for a specific
'canned' image.
The third form enumerates all the available --sources (source
plugins).
See 'wic help list' for more details.
"""
wic_list_help = """
NAME
wic list - List available OpenEmbedded images and source plugins
SYNOPSIS
wic list images
wic list <image> help
wic list source-plugins
DESCRIPTION
This command enumerates the set of available canned images as well
as help for those images. It also can be used to list available
source plugins.
The first form enumerates all the available 'canned' images.
These are actually just the set of .wks files that have been moved
into the /scripts/lib/wic/canned-wks directory).
The second form lists the detailed help information for a specific
'canned' image.
The third form enumerates all the available --sources (source
plugins). The contents of a given partition are driven by code
defined in 'source plugins'. Users specify a specific plugin via
the --source parameter of the partition .wks command. Normally
this is the 'rootfs' plugin but can be any of the more specialized
sources listed by the 'list source-plugins' command. Users can
also add their own source plugins - see 'wic help plugins' for
details.
"""
wic_ls_usage = """
List content of a partitioned image
usage: wic ls <image>[:<partition>[<path>]] [--native-sysroot <path>]
This command outputs either list of image partitions or directory contents
of vfat and ext* partitions.
See 'wic help ls' for more detailed instructions.
"""
wic_ls_help = """
NAME
wic ls - List contents of partitioned image or partition
SYNOPSIS
wic ls <image>
wic ls <image>:<vfat or ext* partition>
wic ls <image>:<vfat or ext* partition><path>
wic ls <image>:<vfat or ext* partition><path> --native-sysroot <path>
DESCRIPTION
This command lists either partitions of the image or directory contents
of vfat or ext* partitions.
The first form it lists partitions of the image.
For example:
$ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic
Num Start End Size Fstype
1 1048576 24438783 23390208 fat16
2 25165824 50315263 25149440 ext4
Second and third form list directory content of the partition:
$ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
Volume in drive : is boot
Volume Serial Number is 2DF2-5F02
Directory for ::/
efi <DIR> 2017-05-11 10:54
startup nsh 26 2017-05-11 10:54
vmlinuz 6922288 2017-05-11 10:54
3 files 6 922 314 bytes
15 818 752 bytes free
$ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/EFI/boot/
Volume in drive : is boot
Volume Serial Number is 2DF2-5F02
Directory for ::/EFI/boot
. <DIR> 2017-05-11 10:54
.. <DIR> 2017-05-11 10:54
grub cfg 679 2017-05-11 10:54
bootx64 efi 571392 2017-05-11 10:54
4 files 572 071 bytes
15 818 752 bytes free
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
wic_cp_usage = """
Copy files and directories to the vfat or ext* partition
usage: wic cp <src> <image>:<partition>[<path>] [--native-sysroot <path>]
This command copies local files or directories to the vfat or ext* partitions
of partitioned image.
See 'wic help cp' for more detailed instructions.
"""
wic_cp_help = """
NAME
wic cp - copy files and directories to the vfat or ext* partitions
SYNOPSIS
wic cp <src> <image>:<partition>
wic cp <src> <image>:<partition><path>
wic cp <src> <image>:<partition><path> --native-sysroot <path>
DESCRIPTION
This command copies files and directories to the vfat or ext* partition of
the partitioned image.
The first form of it copies file or directory to the root directory of
the partition:
$ wic cp test.wks tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
$ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
Volume in drive : is boot
Volume Serial Number is DB4C-FD4C
Directory for ::/
efi <DIR> 2017-05-24 18:15
loader <DIR> 2017-05-24 18:15
startup nsh 26 2017-05-24 18:15
vmlinuz 6926384 2017-05-24 18:15
test wks 628 2017-05-24 21:22
5 files 6 927 038 bytes
15 677 440 bytes free
The second form of the command copies file or directory to the specified directory
on the partition:
$ wic cp test tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
$ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
Volume in drive : is boot
Volume Serial Number is DB4C-FD4C
Directory for ::/efi
. <DIR> 2017-05-24 18:15
.. <DIR> 2017-05-24 18:15
boot <DIR> 2017-05-24 18:15
test <DIR> 2017-05-24 21:27
4 files 0 bytes
15 675 392 bytes free
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
wic_rm_usage = """
Remove files or directories from the vfat or ext* partitions
usage: wic rm <image>:<partition><path> [--native-sysroot <path>]
This command removes files or directories from the vfat or ext* partitions of
the partitioned image.
See 'wic help rm' for more detailed instructions.
"""
wic_rm_help = """
NAME
wic rm - remove files or directories from the vfat or ext* partitions
SYNOPSIS
wic rm <src> <image>:<partition><path>
wic rm <src> <image>:<partition><path> --native-sysroot <path>
DESCRIPTION
This command removes files or directories from the vfat or ext* partition of the
partitioned image:
$ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
Volume in drive : is boot
Volume Serial Number is 11D0-DE21
Directory for ::/
libcom32 c32 186500 2017-06-02 15:15
libutil c32 24148 2017-06-02 15:15
syslinux cfg 209 2017-06-02 15:15
vesamenu c32 27104 2017-06-02 15:15
vmlinuz 6926384 2017-06-02 15:15
5 files 7 164 345 bytes
16 582 656 bytes free
$ wic rm ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/libutil.c32
$ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
Volume in drive : is boot
Volume Serial Number is 11D0-DE21
Directory for ::/
libcom32 c32 186500 2017-06-02 15:15
syslinux cfg 209 2017-06-02 15:15
vesamenu c32 27104 2017-06-02 15:15
vmlinuz 6926384 2017-06-02 15:15
4 files 7 140 197 bytes
16 607 232 bytes free
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
wic_write_usage = """
Write image to a device
usage: wic write <image> <target device> [--expand [rules]] [--native-sysroot <path>]
This command writes partitioned image to a target device (USB stick, SD card etc).
See 'wic help write' for more detailed instructions.
"""
wic_write_help = """
NAME
wic write - write an image to a device
SYNOPSIS
wic write <image> <target>
wic write <image> <target> --expand auto
wic write <image> <target> --expand 1:100M,2:300M
wic write <image> <target> --native-sysroot <path>
DESCRIPTION
This command writes an image to a target device (USB stick, SD card etc)
$ wic write ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic /dev/sdb
The --expand option is used to resize image partitions.
--expand auto expands partitions to occupy all free space available on the target device.
It's also possible to specify expansion rules in a format
<partition>:<size>[,<partition>:<size>...] for one or more partitions.
Specifying size 0 will keep partition unmodified.
Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
recommended to use size 0 for boot partition to keep image bootable.
The --native-sysroot option is used to specify the path to the native sysroot
containing the tools(parted, resize2fs) to use.
"""
wic_plugins_help = """
NAME
wic plugins - Overview and API
DESCRIPTION
plugins allow wic functionality to be extended and specialized by
users. This section documents the plugin interface, which is
currently restricted to 'source' plugins.
'Source' plugins provide a mechanism to customize various aspects
of the image generation process in wic, mainly the contents of
partitions.
Source plugins provide a mechanism for mapping values specified in
.wks files using the --source keyword to a particular plugin
implementation that populates a corresponding partition.
A source plugin is created as a subclass of SourcePlugin (see
scripts/lib/wic/pluginbase.py) and the plugin file containing it
is added to scripts/lib/wic/plugins/source/ to make the plugin
implementation available to the wic implementation.
Source plugins can also be implemented and added by external
layers - any plugins found in a scripts/lib/wic/plugins/source/
directory in an external layer will also be made available.
When the wic implementation needs to invoke a partition-specific
implementation, it looks for the plugin that has the same name as
the --source param given to that partition. For example, if the
partition is set up like this:
part /boot --source bootimg-pcbios ...
then the methods defined as class members of the plugin having the
matching bootimg-pcbios .name class member would be used.
To be more concrete, here's the plugin definition that would match
a '--source bootimg-pcbios' usage, along with an example method
that would be called by the wic implementation when it needed to
invoke an implementation-specific partition-preparation function:
class BootimgPcbiosPlugin(SourcePlugin):
name = 'bootimg-pcbios'
@classmethod
def do_prepare_partition(self, part, ...)
If the subclass itself doesn't implement a function, a 'default'
version in a superclass will be located and used, which is why all
plugins must be derived from SourcePlugin.
The SourcePlugin class defines the following methods, which is the
current set of methods that can be implemented/overridden by
--source plugins. Any methods not implemented by a SourcePlugin
subclass inherit the implementations present in the SourcePlugin
class (see the SourcePlugin source for details):
do_prepare_partition()
Called to do the actual content population for a
partition. In other words, it 'prepares' the final partition
image which will be incorporated into the disk image.
do_post_partition()
Called after the partition is created. It is useful to add post
operations e.g. signing the partition.
do_configure_partition()
Called before do_prepare_partition(), typically used to
create custom configuration files for a partition, for
example syslinux or grub config files.
do_install_disk()
Called after all partitions have been prepared and assembled
into a disk image. This provides a hook to allow
finalization of a disk image, for example to write an MBR to
it.
do_stage_partition()
Special content-staging hook called before
do_prepare_partition(), normally empty.
Typically, a partition will just use the passed-in
parameters, for example the unmodified value of bootimg_dir.
In some cases however, things may need to be more tailored.
As an example, certain files may additionally need to be
take from bootimg_dir + /boot. This hook allows those files
to be staged in a customized fashion. Note that
get_bitbake_var() allows you to access non-standard
variables that you might want to use for these types of
situations.
This scheme is extensible - adding more hooks is a simple matter
of adding more plugin methods to SourcePlugin and derived classes.
Please see the implementation for details.
"""
wic_overview_help = """
NAME
wic overview - General overview of wic
DESCRIPTION
The 'wic' command generates partitioned images from existing
OpenEmbedded build artifacts. Image generation is driven by
partitioning commands contained in an 'Openembedded kickstart'
(.wks) file (see 'wic help kickstart') specified either directly
on the command-line or as one of a selection of canned .wks files
(see 'wic list images'). When applied to a given set of build
artifacts, the result is an image or set of images that can be
directly written onto media and used on a particular system.
The 'wic' command and the infrastructure it's based on is by
definition incomplete - its purpose is to allow the generation of
customized images, and as such was designed to be completely
extensible via a plugin interface (see 'wic help plugins').
Background and Motivation
wic is meant to be a completely independent standalone utility
that initially provides easier-to-use and more flexible
replacements for a couple bits of existing functionality in
oe-core: directdisk.bbclass and mkefidisk.sh. The difference
between wic and those examples is that with wic the functionality
of those scripts is implemented by a general-purpose partitioning
'language' based on Redhat kickstart syntax).
The initial motivation and design considerations that lead to the
current tool are described exhaustively in Yocto Bug #3847
(https://bugzilla.yoctoproject.org/show_bug.cgi?id=3847).
Implementation and Examples
wic can be used in two different modes, depending on how much
control the user needs in specifying the Openembedded build
artifacts that will be used in creating the image: 'raw' and
'cooked'.
If used in 'raw' mode, artifacts are explicitly specified via
command-line arguments (see example below).
The more easily usable 'cooked' mode uses the current MACHINE
setting and a specified image name to automatically locate the
artifacts used to create the image.
OE kickstart files (.wks) can of course be specified directly on
the command-line, but the user can also choose from a set of
'canned' .wks files available via the 'wic list images' command
(example below).
In any case, the prerequisite for generating any image is to have
the build artifacts already available. The below examples assume
the user has already build a 'core-image-minimal' for a specific
machine (future versions won't require this redundant step, but
for now that's typically how build artifacts get generated).
The other prerequisite is to source the build environment:
$ source oe-init-build-env
To start out with, we'll generate an image from one of the canned
.wks files. The following generates a list of availailable
images:
$ wic list images
mkefidisk Create an EFI disk image
directdisk Create a 'pcbios' direct disk image
You can get more information about any of the available images by
typing 'wic list xxx help', where 'xxx' is one of the image names:
$ wic list mkefidisk help
Creates a partitioned EFI disk image that the user can directly dd
to boot media.
At any time, you can get help on the 'wic' command or any
subcommand (currently 'list' and 'create'). For instance, to get
the description of 'wic create' command and its parameters:
$ wic create
Usage:
Create a new OpenEmbedded image
usage: wic create <wks file or image name> [-o <DIRNAME> | ...]
[-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir] [-k, --kernel-dir]
[-n, --native-sysroot] [-f, --build-rootfs]
This command creates an OpenEmbedded image based on the 'OE
kickstart commands' found in the <wks file>.
The -o option can be used to place the image in a directory
with a different name and location.
See 'wic help create' for more detailed instructions.
...
As mentioned in the command, you can get even more detailed
information by adding 'help' to the above:
$ wic help create
So, the easiest way to create an image is to use the -e option
with a canned .wks file. To use the -e option, you need to
specify the image used to generate the artifacts and you actually
need to have the MACHINE used to build them specified in your
local.conf (these requirements aren't necessary if you aren't
using the -e options.) Below, we generate a directdisk image,
pointing the process at the core-image-minimal artifacts for the
current MACHINE:
$ wic create directdisk -e core-image-minimal
Checking basic build environment...
Done.
Creating image(s)...
Info: The new image(s) can be found here:
/var/tmp/wic/build/directdisk-201309252350-sda.direct
The following build artifacts were used to create the image(s):
ROOTFS_DIR: ...
BOOTIMG_DIR: ...
KERNEL_DIR: ...
NATIVE_SYSROOT: ...
The image(s) were created using OE kickstart file:
.../scripts/lib/wic/canned-wks/directdisk.wks
The output shows the name and location of the image created, and
so that you know exactly what was used to generate the image, each
of the artifacts and the kickstart file used.
Similarly, you can create a 'mkefidisk' image in the same way
(notice that this example uses a different machine - because it's
using the -e option, you need to change the MACHINE in your
local.conf):
$ wic create mkefidisk -e core-image-minimal
Checking basic build environment...
Done.
Creating image(s)...
Info: The new image(s) can be found here:
/var/tmp/wic/build/mkefidisk-201309260027-sda.direct
...
Here's an example that doesn't take the easy way out and manually
specifies each build artifact, along with a non-canned .wks file,
and also uses the -o option to have wic create the output
somewhere other than the default /var/tmp/wic:
$ wic create ./test.wks -o ./out --rootfs-dir
tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
--bootimg-dir tmp/sysroots/qemux86-64/usr/share
--kernel-dir tmp/deploy/images/qemux86-64
--native-sysroot tmp/sysroots/x86_64-linux
Creating image(s)...
Info: The new image(s) can be found here:
out/build/test-201507211313-sda.direct
The following build artifacts were used to create the image(s):
ROOTFS_DIR: tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
BOOTIMG_DIR: tmp/sysroots/qemux86-64/usr/share
KERNEL_DIR: tmp/deploy/images/qemux86-64
NATIVE_SYSROOT: tmp/sysroots/x86_64-linux
The image(s) were created using OE kickstart file:
./test.wks
Here is a content of test.wks:
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
part / --source rootfs --ondisk sda --fstype=ext3 --label platform --align 1024
bootloader --timeout=0 --append="rootwait rootfstype=ext3 video=vesafb vga=0x318 console=tty0"
Finally, here's an example of the actual partition language
commands used to generate the mkefidisk image i.e. these are the
contents of the mkefidisk.wks OE kickstart file:
# short-description: Create an EFI disk image
# long-description: Creates a partitioned EFI disk image that the user
# can directly dd to boot media.
part /boot --source bootimg-efi --ondisk sda --fstype=efi --active
part / --source rootfs --ondisk sda --fstype=ext3 --label platform
part swap --ondisk sda --size 44 --label swap1 --fstype=swap
bootloader --timeout=10 --append="rootwait console=ttyPCH0,115200"
You can get a complete listing and description of all the
kickstart commands available for use in .wks files from 'wic help
kickstart'.
"""
wic_kickstart_help = """
NAME
wic kickstart - wic kickstart reference
DESCRIPTION
This section provides the definitive reference to the wic
kickstart language. It also provides documentation on the list of
--source plugins available for use from the 'part' command (see
the 'Platform-specific Plugins' section below).
The current wic implementation supports only the basic kickstart
partitioning commands: partition (or part for short) and
bootloader.
The following is a listing of the commands, their syntax, and
meanings. The commands are based on the Fedora kickstart
documentation but with modifications to reflect wic capabilities.
http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
Commands
* 'part' or 'partition'
This command creates a partition on the system and uses the
following syntax:
part [<mountpoint>]
The <mountpoint> is where the partition will be mounted and
must take of one of the following forms:
/<path>: For example: /, /usr, or /home
swap: The partition will be used as swap space.
If a <mountpoint> is not specified the partition will be created
but will not be mounted.
Partitions with a <mountpoint> specified will be automatically mounted.
This is achieved by wic adding entries to the fstab during image
generation. In order for a valid fstab to be generated one of the
--ondrive, --ondisk, --use-uuid or --use-label partition options must
be used for each partition that specifies a mountpoint. Note that with
--use-{uuid,label} and non-root <mountpoint>, including swap, the mount
program must understand the PARTUUID or LABEL syntax. This currently
excludes the busybox versions of these applications.
The following are supported 'part' options:
--size: The minimum partition size. Specify an integer value
such as 500. Multipliers k, M ang G can be used. If
not specified, the size is in MB.
You do not need this option if you use --source.
--fixed-size: Exact partition size. Value format is the same
as for --size option. This option cannot be
specified along with --size. If partition data
is larger than --fixed-size and error will be
raised when assembling disk image.
--source: This option is a wic-specific option that names the
source of the data that will populate the
partition. The most common value for this option
is 'rootfs', but can be any value which maps to a
valid 'source plugin' (see 'wic help plugins').
If '--source rootfs' is used, it tells the wic
command to create a partition as large as needed
and to fill it with the contents of the root
filesystem pointed to by the '-r' wic command-line
option (or the equivalent rootfs derived from the
'-e' command-line option). The filesystem type
that will be used to create the partition is driven
by the value of the --fstype option specified for
the partition (see --fstype below).
If --source <plugin-name>' is used, it tells the
wic command to create a partition as large as
needed and to fill with the contents of the
partition that will be generated by the specified
plugin name using the data pointed to by the '-r'
wic command-line option (or the equivalent rootfs
derived from the '-e' command-line option).
Exactly what those contents and filesystem type end
up being are dependent on the given plugin
implementation.
If --source option is not used, the wic command
will create empty partition. --size parameter has
to be used to specify size of empty partition.
--ondisk or --ondrive: Forces the partition to be created on
a particular disk.
--fstype: Sets the file system type for the partition. These
apply to partitions created using '--source rootfs' (see
--source above). Valid values are:
vfat
msdos
ext2
ext3
ext4
btrfs
squashfs
swap
--fsoptions: Specifies a free-form string of options to be
used when mounting the filesystem. This string
will be copied into the /etc/fstab file of the
installed system and should be enclosed in
quotes. If not specified, the default string is
"defaults".
--label label: Specifies the label to give to the filesystem
to be made on the partition. If the given
label is already in use by another filesystem,
a new label is created for the partition.
--use-label: This option is specific to wic. It makes wic to use the
label in /etc/fstab to specify a partition. If the
--use-label and --use-uuid are used at the same time,
we prefer the uuid because it is less likely to cause
name confliction. We don't support using this parameter
on the root partition since it requires an initramfs to
parse this value and we do not currently support that.
--active: Marks the partition as active.
--align (in KBytes): This option is specific to wic and says
to start a partition on an x KBytes
boundary.
--no-table: This option is specific to wic. Space will be
reserved for the partition and it will be
populated but it will not be added to the
partition table. It may be useful for
bootloaders.
--exclude-path: This option is specific to wic. It excludes the given
relative path from the resulting image. If the path
ends with a slash, only the content of the directory
is omitted, not the directory itself. This option only
has an effect with the rootfs source plugin.
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
beyond the size specified by --size.
By default, 10MB. This option cannot be used
with --fixed-size option.
--overhead-factor: This option is specific to wic. The
size of the partition is multiplied by
this factor. It has to be greater than or
equal to 1. The default value is 1.3.
This option cannot be used with --fixed-size
option.
--part-name: This option is specific to wic. It specifies name for GPT partitions.
--part-type: This option is specific to wic. It specifies partition
type GUID for GPT partitions.
List of partition type GUIDS can be found here:
http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
--use-uuid: This option is specific to wic. It makes wic to generate
random globally unique identifier (GUID) for the partition
and use it in bootloader configuration to specify root partition.
--uuid: This option is specific to wic. It specifies partition UUID.
It's useful if preconfigured partition UUID is added to kernel command line
in bootloader configuration before running wic. In this case .wks file can
be generated or modified to set preconfigured parition UUID using this option.
--fsuuid: This option is specific to wic. It specifies filesystem UUID.
It's useful if preconfigured filesystem UUID is added to kernel command line
in bootloader configuration before running wic. In this case .wks file can
be generated or modified to set preconfigured filesystem UUID using this option.
--system-id: This option is specific to wic. It specifies partition system id. It's useful
for the harware that requires non-default partition system ids. The parameter
in one byte long hex number either with 0x prefix or without it.
--mkfs-extraopts: This option specifies extra options to pass to mkfs utility.
NOTE, that wic uses default options for some filesystems, for example
'-S 512' for mkfs.fat or '-F -i 8192' for mkfs.ext. Those options will
not take effect when --mkfs-extraopts is used. This should be taken into
account when using --mkfs-extraopts.
* bootloader
This command allows the user to specify various bootloader
options. The following are supported 'bootloader' options:
--timeout: Specifies the number of seconds before the
bootloader times out and boots the default option.
--append: Specifies kernel parameters. These will be added to
bootloader command-line - for example, the syslinux
APPEND or grub kernel command line.
--configfile: Specifies a user defined configuration file for
the bootloader. This file must be located in the
canned-wks folder or could be the full path to the
file. Using this option will override any other
bootloader option.
Note that bootloader functionality and boot partitions are
implemented by the various --source plugins that implement
bootloader functionality; the bootloader command essentially
provides a means of modifying bootloader configuration.
* include
This command allows the user to include the content of .wks file
into original .wks file.
Command uses the following syntax:
include <file>
The <file> is either path to the file or its name. If name is
specified wic will try to find file in the directories with canned
.wks files.
"""
wic_help_help = """
NAME
wic help - display a help topic
DESCRIPTION
Specify a help topic to display it. Topics are shown above.
"""
|
schleichdi2/OPENNFR-6.3-CORE
|
opennfr-openembedded-core/scripts/lib/wic/help.py
|
Python
|
gpl-2.0
| 40,562
|
__author__ = 'Giacomo Tanganelli'
|
kuggenhoffen/CoAPthon
|
httpcoapforwardproxy.py
|
Python
|
mit
| 34
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s',
'%s rows updated': 'Wierszy uaktualnionych: %s',
'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele',
'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste',
'Change Password': 'Change Password',
'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Aktualne \xc5\xbc\xc4\x85danie',
'Current response': 'Aktualna odpowied\xc5\xba',
'Current session': 'Aktualna sesja',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'Usu\xc5\x84:',
'Edit': 'Edit',
'Edit Profile': 'Edit Profile',
'Edit This App': 'Edit This App',
'Edit current record': 'Edytuj aktualny rekord',
'Hello World': 'Witaj \xc5\x9awiecie',
'Import/Export': 'Importuj/eksportuj',
'Index': 'Index',
'Internal State': 'Stan wewn\xc4\x99trzny',
'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie',
'Layout': 'Layout',
'Login': 'Zaloguj',
'Logout': 'Logout',
'Lost Password': 'Przypomnij has\xc5\x82o',
'Main Menu': 'Main Menu',
'Menu Model': 'Menu Model',
'New Record': 'Nowy rekord',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Powered by': 'Powered by',
'Query:': 'Zapytanie:',
'Register': 'Zarejestruj',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wybrane wiersze',
'Stylesheet': 'Stylesheet',
'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'Update:': 'Uaktualnij:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.',
'View': 'View',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Witaj w web2py',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'cache': 'cache',
'change password': 'change password',
'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w',
'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego',
'customize me!': 'dostosuj mnie!',
'data uploaded': 'dane wys\xc5\x82ane',
'database': 'baza danych',
'database %s select': 'wyb\xc3\xb3r z bazy danych %s',
'db': 'baza danych',
'design': 'projektuj',
'done!': 'zrobione!',
'edit profile': 'edit profile',
'export as csv file': 'eksportuj jako plik csv',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie',
'login': 'login',
'logout': 'logout',
'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony',
'next 100 rows': 'nast\xc4\x99pne 100 wierszy',
'or import from csv file': 'lub zaimportuj z pliku csv',
'previous 100 rows': 'poprzednie 100 wierszy',
'record': 'record',
'record does not exist': 'rekord nie istnieje',
'record id': 'id rekordu',
'register': 'register',
'selected': 'wybranych',
'state': 'stan',
'table': 'tabela',
'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv',
}
|
montaggroup/montag-token-redeemer
|
web2py/applications/token_redeemer/languages/pl-pl.py
|
Python
|
gpl-3.0
| 3,788
|
"""
Simple wrapper around elasticsearch-py to index/search a django Model.
"""
|
leotsem/django-elasticsearch
|
django_elasticsearch/__init__.py
|
Python
|
mit
| 79
|
# oracle.py
# Copyright (C) 2005, 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, warnings, random
from sqlalchemy import util, sql, schema, exceptions, logging
from sqlalchemy.engine import default, base
from sqlalchemy.sql import compiler, visitors
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import types as sqltypes
import datetime
class OracleNumeric(sqltypes.Numeric):
def get_col_spec(self):
if self.precision is None:
return "NUMERIC"
else:
return "NUMERIC(%(precision)s, %(length)s)" % {'precision': self.precision, 'length' : self.length}
class OracleInteger(sqltypes.Integer):
def get_col_spec(self):
return "INTEGER"
class OracleSmallInteger(sqltypes.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class OracleDate(sqltypes.Date):
def get_col_spec(self):
return "DATE"
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
def process(value):
if not isinstance(value, datetime.datetime):
return value
else:
return value.date()
return process
class OracleDateTime(sqltypes.DateTime):
def get_col_spec(self):
return "DATE"
def result_processor(self, dialect):
def process(value):
if value is None or isinstance(value,datetime.datetime):
return value
else:
# convert cx_oracle datetime object returned pre-python 2.4
return datetime.datetime(value.year,value.month,
value.day,value.hour, value.minute, value.second)
return process
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
# only if cx_oracle contains TIMESTAMP
class OracleTimestamp(sqltypes.TIMESTAMP):
def get_col_spec(self):
return "TIMESTAMP"
def get_dbapi_type(self, dialect):
return dialect.TIMESTAMP
def result_processor(self, dialect):
def process(value):
if value is None or isinstance(value,datetime.datetime):
return value
else:
# convert cx_oracle datetime object returned pre-python 2.4
return datetime.datetime(value.year,value.month,
value.day,value.hour, value.minute, value.second)
return process
class OracleString(sqltypes.String):
def get_col_spec(self):
return "VARCHAR(%(length)s)" % {'length' : self.length}
class OracleText(sqltypes.TEXT):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
def get_col_spec(self):
return "CLOB"
def result_processor(self, dialect):
super_process = super(OracleText, self).result_processor(dialect)
def process(value):
if value is None:
return None
elif hasattr(value, 'read'):
# cx_oracle doesnt seem to be consistent with CLOB returning LOB or str
if super_process:
return super_process(value.read())
else:
return value.read()
else:
if super_process:
return super_process(value)
else:
return value
return process
class OracleRaw(sqltypes.Binary):
def get_col_spec(self):
return "RAW(%(length)s)" % {'length' : self.length}
class OracleChar(sqltypes.CHAR):
def get_col_spec(self):
return "CHAR(%(length)s)" % {'length' : self.length}
class OracleBinary(sqltypes.Binary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def get_col_spec(self):
return "BLOB"
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return value.read()
return process
class OracleBoolean(sqltypes.Boolean):
def get_col_spec(self):
return "SMALLINT"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
colspecs = {
sqltypes.Integer : OracleInteger,
sqltypes.Smallinteger : OracleSmallInteger,
sqltypes.Numeric : OracleNumeric,
sqltypes.Float : OracleNumeric,
sqltypes.DateTime : OracleDateTime,
sqltypes.Date : OracleDate,
sqltypes.String : OracleString,
sqltypes.Binary : OracleBinary,
sqltypes.Boolean : OracleBoolean,
sqltypes.TEXT : OracleText,
sqltypes.TIMESTAMP : OracleTimestamp,
sqltypes.CHAR: OracleChar,
}
ischema_names = {
'VARCHAR2' : OracleString,
'DATE' : OracleDate,
'DATETIME' : OracleDateTime,
'NUMBER' : OracleNumeric,
'BLOB' : OracleBinary,
'CLOB' : OracleText,
'TIMESTAMP' : OracleTimestamp,
'RAW' : OracleRaw,
'FLOAT' : OracleNumeric,
'DOUBLE PRECISION' : OracleNumeric,
'LONG' : OracleText,
}
def descriptor():
return {'name':'oracle',
'description':'Oracle',
'arguments':[
('dsn', 'Data Source Name', None),
('user', 'Username', None),
('password', 'Password', None)
]}
class OracleExecutionContext(default.DefaultExecutionContext):
def pre_exec(self):
super(OracleExecutionContext, self).pre_exec()
if self.dialect.auto_setinputsizes:
self.set_input_sizes()
if self.compiled_parameters is not None and len(self.compiled_parameters) == 1:
for key in self.compiled_parameters[0]:
(bindparam, name, value) = self.compiled_parameters[0].get_parameter(key)
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][name] = self.out_parameters[name]
def get_result_proxy(self):
if hasattr(self, 'out_parameters'):
if self.compiled_parameters is not None and len(self.compiled_parameters) == 1:
for k in self.out_parameters:
type = self.compiled_parameters[0].get_type(k)
self.out_parameters[k] = type.dialect_impl(self.dialect).result_processor(self.dialect)(self.out_parameters[k].getvalue())
else:
for k in self.out_parameters:
self.out_parameters[k] = self.out_parameters[k].getvalue()
if self.cursor.description is not None:
for column in self.cursor.description:
type_code = column[1]
if type_code in self.dialect.ORACLE_BINARY_TYPES:
return base.BufferedColumnResultProxy(self)
return base.ResultProxy(self)
class OracleDialect(default.DefaultDialect):
supports_alter = True
supports_unicode_statements = False
max_identifier_length = 30
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
preexecute_sequences = True
def __init__(self, use_ansi=True, auto_setinputsizes=True, auto_convert_lobs=True, threaded=True, allow_twophase=True, **kwargs):
default.DefaultDialect.__init__(self, default_paramstyle='named', **kwargs)
self.use_ansi = use_ansi
self.threaded = threaded
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or hasattr(self.dbapi, 'TIMESTAMP' )
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if self.dbapi is not None:
self.ORACLE_BINARY_TYPES = [getattr(self.dbapi, k) for k in ["BFILE", "CLOB", "NCLOB", "BLOB"] if hasattr(self.dbapi, k)]
else:
self.ORACLE_BINARY_TYPES = []
def dbapi_type_map(self):
if self.dbapi is None or not self.auto_convert_lobs:
return {}
else:
# only use this for LOB objects. using it for strings, dates
# etc. leads to a little too much magic, reflection doesn't know if it should
# expect encoded strings or unicodes, etc.
return {
self.dbapi.CLOB: OracleText(),
self.dbapi.BLOB: OracleBinary(),
self.dbapi.BINARY: OracleRaw(),
}
def dbapi(cls):
import cx_Oracle
return cx_Oracle
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
dialect_opts = dict(url.query)
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
'threaded', 'allow_twophase'):
if opt in dialect_opts:
util.coerce_kw_type(dialect_opts, opt, bool)
setattr(self, opt, dialect_opts[opt])
if url.database:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
dsn = self.dbapi.makedsn(url.host, port, url.database)
else:
# we have a local tnsname
dsn = url.host
opts = dict(
user=url.username,
password=url.password,
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
)
if 'mode' in url.query:
opts['mode'] = url.query['mode']
if isinstance(opts['mode'], basestring):
mode = opts['mode'].upper()
if mode == 'SYSDBA':
opts['mode'] = self.dbapi.SYSDBA
elif mode == 'SYSOPER':
opts['mode'] = self.dbapi.SYSOPER
else:
util.coerce_kw_type(opts, 'mode', int)
# Can't set 'handle' or 'pool' via URL query args, use connect_args
return ([], opts)
def type_descriptor(self, typeobj):
return sqltypes.adapt_type(typeobj, colspecs)
def oid_column_name(self, column):
if not isinstance(column.table, (sql.TableClause, sql.Select)):
return None
else:
return "rowid"
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified."""
id = random.randint(0,2**128)
return (0x1234, "%032x" % 9, "%032x" % id)
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
connection.connection.prepare()
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
pass
def create_execution_context(self, *args, **kwargs):
return OracleExecutionContext(self, *args, **kwargs)
def has_table(self, connection, table_name, schema=None):
cursor = connection.execute("""select table_name from all_tables where table_name=:name""", {'name':self._denormalize_name(table_name)})
return bool( cursor.fetchone() is not None )
def has_sequence(self, connection, sequence_name):
cursor = connection.execute("""select sequence_name from all_sequences where sequence_name=:name""", {'name':self._denormalize_name(sequence_name)})
return bool( cursor.fetchone() is not None )
def _locate_owner_row(self, owner, name, rows, raiseerr=False):
"""return the row in the given list of rows which references the given table name and owner name."""
if not rows:
if raiseerr:
raise exceptions.NoSuchTableError(name)
else:
return None
else:
if owner is not None:
for row in rows:
if owner.upper() in row[0]:
return row
else:
if raiseerr:
raise exceptions.AssertionError("Specified owner %s does not own table %s" % (owner, name))
else:
return None
else:
if len(rows)==1:
return rows[0]
else:
if raiseerr:
raise exceptions.AssertionError("There are multiple tables with name '%s' visible to the schema, you must specifiy owner" % name)
else:
return None
def _resolve_table_owner(self, connection, name, table, dblink=''):
"""Locate the given table in the ``ALL_TAB_COLUMNS`` view,
including searching for equivalent synonyms and dblinks.
"""
c = connection.execute ("select distinct OWNER from ALL_TAB_COLUMNS%(dblink)s where TABLE_NAME = :table_name" % {'dblink':dblink}, {'table_name':name})
rows = c.fetchall()
try:
row = self._locate_owner_row(table.owner, name, rows, raiseerr=True)
return name, row['OWNER'], ''
except exceptions.SQLAlchemyError:
# locate synonyms
c = connection.execute ("""select OWNER, TABLE_OWNER, TABLE_NAME, DB_LINK
from ALL_SYNONYMS%(dblink)s
where SYNONYM_NAME = :synonym_name
and (DB_LINK IS NOT NULL
or ((TABLE_NAME, TABLE_OWNER) in
(select TABLE_NAME, OWNER from ALL_TAB_COLUMNS%(dblink)s)))""" % {'dblink':dblink},
{'synonym_name':name})
rows = c.fetchall()
row = self._locate_owner_row(table.owner, name, rows)
if row is None:
row = self._locate_owner_row("PUBLIC", name, rows)
if row is not None:
owner, name, dblink = row['TABLE_OWNER'], row['TABLE_NAME'], row['DB_LINK']
if dblink:
dblink = '@' + dblink
if not owner:
# re-resolve table owner using new dblink variable
t1, owner, t2 = self._resolve_table_owner(connection, name, table, dblink=dblink)
else:
dblink = ''
return name, owner, dblink
raise
def _normalize_name(self, name):
if name is None:
return None
elif name.upper() == name and not self.identifier_preparer._requires_quotes(name.lower().decode(self.encoding)):
return name.lower().decode(self.encoding)
else:
return name.decode(self.encoding)
def _denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper().encode(self.encoding)
else:
return name.encode(self.encoding)
def table_names(self, connection, schema):
# note that table_names() isnt loading DBLINKed or synonym'ed tables
s = "select table_name from all_tables where tablespace_name NOT IN ('SYSTEM', 'SYSAUX')"
return [self._normalize_name(row[0]) for row in connection.execute(s)]
def reflecttable(self, connection, table, include_columns):
preparer = self.identifier_preparer
# search for table, including across synonyms and dblinks.
# locate the actual name of the table, the real owner, and any dblink clause needed.
actual_name, owner, dblink = self._resolve_table_owner(connection, self._denormalize_name(table.name), table)
c = connection.execute ("select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS%(dblink)s where TABLE_NAME = :table_name and OWNER = :owner" % {'dblink':dblink}, {'table_name':actual_name, 'owner':owner})
while True:
row = c.fetchone()
if row is None:
break
found_table = True
#print "ROW:" , row
(colname, coltype, length, precision, scale, nullable, default) = (self._normalize_name(row[0]), row[1], row[2], row[3], row[4], row[5]=='Y', row[6])
if include_columns and colname not in include_columns:
continue
# INTEGER if the scale is 0 and precision is null
# NUMBER if the scale and precision are both null
# NUMBER(9,2) if the precision is 9 and the scale is 2
# NUMBER(3) if the precision is 3 and scale is 0
#length is ignored except for CHAR and VARCHAR2
if coltype=='NUMBER' :
if precision is None and scale is None:
coltype = OracleNumeric
elif precision is None and scale == 0 :
coltype = OracleInteger
else :
coltype = OracleNumeric(precision, scale)
elif coltype=='CHAR' or coltype=='VARCHAR2':
coltype = ischema_names.get(coltype, OracleString)(length)
else:
coltype = re.sub(r'\(\d+\)', '', coltype)
try:
coltype = ischema_names[coltype]
except KeyError:
warnings.warn(RuntimeWarning("Did not recognize type '%s' of column '%s'" % (coltype, colname)))
coltype = sqltypes.NULLTYPE
colargs = []
if default is not None:
colargs.append(schema.PassiveDefault(sql.text(default)))
table.append_column(schema.Column(colname, coltype, nullable=nullable, *colargs))
if not table.columns:
raise exceptions.AssertionError("Couldn't find any column information for table %s" % actual_name)
c = connection.execute("""SELECT
ac.constraint_name,
ac.constraint_type,
loc.column_name AS local_column,
rem.table_name AS remote_table,
rem.column_name AS remote_column,
rem.owner AS remote_owner
FROM all_constraints%(dblink)s ac,
all_cons_columns%(dblink)s loc,
all_cons_columns%(dblink)s rem
WHERE ac.table_name = :table_name
AND ac.constraint_type IN ('R','P')
AND ac.owner = :owner
AND ac.owner = loc.owner
AND ac.constraint_name = loc.constraint_name
AND ac.r_owner = rem.owner(+)
AND ac.r_constraint_name = rem.constraint_name(+)
-- order multiple primary keys correctly
ORDER BY ac.constraint_name, loc.position, rem.position"""
% {'dblink':dblink}, {'table_name' : actual_name, 'owner' : owner})
fks = {}
while True:
row = c.fetchone()
if row is None:
break
#print "ROW:" , row
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = row[0:2] + tuple([self._normalize_name(x) for x in row[2:]])
if cons_type == 'P':
table.primary_key.add(table.c[local_column])
elif cons_type == 'R':
try:
fk = fks[cons_name]
except KeyError:
fk = ([], [])
fks[cons_name] = fk
if remote_table is None:
# ticket 363
warnings.warn("Got 'None' querying 'table_name' from all_cons_columns%(dblink)s - does the user have proper rights to the table?" % {'dblink':dblink})
continue
refspec = ".".join([remote_table, remote_column])
schema.Table(remote_table, table.metadata, autoload=True, autoload_with=connection, owner=remote_owner)
if local_column not in fk[0]:
fk[0].append(local_column)
if refspec not in fk[1]:
fk[1].append(refspec)
for name, value in fks.iteritems():
table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name))
OracleDialect.logger = logging.class_logger(OracleDialect)
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = 'outer_join_column'
def __init__(self, column):
self.column = column
class OracleCompiler(compiler.DefaultCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
operators = compiler.DefaultCompiler.operators.copy()
operators.update(
{
sql_operators.mod : lambda x, y:"mod(%s, %s)" % (x, y)
}
)
def __init__(self, *args, **kwargs):
super(OracleCompiler, self).__init__(*args, **kwargs)
self.__wheres = {}
def default_from(self):
"""Called when a ``SELECT`` statement has no froms, and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def apply_function_parens(self, func):
return len(func.clauses) > 0
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.DefaultCompiler.visit_join(self, join, **kwargs)
(where, parentjoin) = self.__wheres.get(join, (None, None))
class VisitOn(visitors.ClauseVisitor):
def visit_binary(s, binary):
if binary.operator == sql_operators.eq:
if binary.left.table is join.right:
binary.left = _OuterJoinColumn(binary.left)
elif binary.right.table is join.right:
binary.right = _OuterJoinColumn(binary.right)
if join.isouter:
if where is not None:
self.__wheres[join.left] = self.__wheres[parentjoin] = (sql.and_(VisitOn().traverse(join.onclause, clone=True), where), parentjoin)
else:
self.__wheres[join.left] = self.__wheres[join] = (VisitOn().traverse(join.onclause, clone=True), join)
else:
if where is not None:
self.__wheres[join.left] = self.__wheres[parentjoin] = (sql.and_(join.onclause, where), parentjoin)
else:
self.__wheres[join.left] = self.__wheres[join] = (join.onclause, join)
return self.process(join.left, asfrom=True) + ", " + self.process(join.right, asfrom=True)
def get_whereclause(self, f):
if f in self.__wheres:
return self.__wheres[f][0]
else:
return None
def visit_outer_join_column(self, vc):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
def visit_alias(self, alias, asfrom=False, **kwargs):
"""Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
if asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + " " + self.preparer.format_alias(alias, self._anonymize(alias.name))
else:
return self.process(alias.original, **kwargs)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if not getattr(select, '_oracle_visit', None) and (select._limit is not None or select._offset is not None):
# to use ROW_NUMBER(), an ORDER BY is required.
orderby = self.process(select._order_by_clause)
if not orderby:
orderby = select.oid_column
self.traverse(orderby)
orderby = self.process(orderby)
oldselect = select
select = select.column(sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" % orderby).label("ora_rn")).order_by(None)
select._oracle_visit = True
limitselect = sql.select([c for c in select.c if c.key!='ora_rn'])
if select._offset is not None:
limitselect.append_whereclause("ora_rn>%d" % select._offset)
if select._limit is not None:
limitselect.append_whereclause("ora_rn<=%d" % (select._limit + select._offset))
else:
limitselect.append_whereclause("ora_rn<=%d" % select._limit)
return self.process(limitselect, **kwargs)
else:
return compiler.DefaultCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select):
return ""
def for_update_clause(self, select):
if select.for_update=="nowait":
return " FOR UPDATE NOWAIT"
else:
return super(OracleCompiler, self).for_update_clause(select)
class OracleSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
colspec += " " + column.type.dialect_impl(self.dialect).get_col_spec()
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_sequence(self, sequence):
if not self.checkfirst or not self.dialect.has_sequence(self.connection, sequence.name):
self.append("CREATE SEQUENCE %s" % self.preparer.format_sequence(sequence))
self.execute()
class OracleSchemaDropper(compiler.SchemaDropper):
def visit_sequence(self, sequence):
if not self.checkfirst or self.dialect.has_sequence(self.connection, sequence.name):
self.append("DROP SEQUENCE %s" % self.preparer.format_sequence(sequence))
self.execute()
class OracleDefaultRunner(base.DefaultRunner):
def visit_sequence(self, seq):
return self.execute_string("SELECT " + self.dialect.identifier_preparer.format_sequence(seq) + ".nextval FROM DUAL", {})
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
def format_savepoint(self, savepoint):
name = re.sub(r'^_+', '', savepoint.ident)
return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
dialect = OracleDialect
dialect.statement_compiler = OracleCompiler
dialect.schemagenerator = OracleSchemaGenerator
dialect.schemadropper = OracleSchemaDropper
dialect.preparer = OracleIdentifierPreparer
dialect.defaultrunner = OracleDefaultRunner
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/databases/oracle.py
|
Python
|
bsd-3-clause
| 28,221
|
# -*- coding:utf-8 -*-
# Created by Hans-Thomas on 2011-05-29.
#=============================================================================
# extra_matchers.py --- Should-DSL extra matchers
#=============================================================================
from six.moves import zip_longest
from should_dsl import matcher
@matcher
class EachEqual(object):
name = 'each_be_equal_to'
def __call__(self, expected):
self._expected = expected
return self
def differ(self, given):
for n, (left, right) in enumerate(zip_longest(given, self._expected)):
if left != right:
yield n + 1, left, right
def match(self, given):
diff = list(self.differ(given))
self.diff = '\n\t'.join('%d: %r is not equal to %r' % item for item in diff)
return not diff
def message_for_failed_should(self):
return 'sequences differ\n\t' + self.diff
@matcher
def be_equal_to():
return (lambda x, y: x == y, '%r is %sequal to %r')
@matcher
def be_in():
return (lambda item, container: item in container, '%r is %sinto %r')
#.............................................................................
# extra_matchers.py
|
htmue/python-wishes
|
vows/extra_matchers.py
|
Python
|
unlicense
| 1,246
|
#Given an integer n, return the number of trailing zeroes in n!.
class Solution(object):
def trailingZeroes(self, n):
fives = 0
while n > 0:
fives += n / 5
n = n / 5
return fives
|
SakiFu/leetcode
|
python/factorial_trailing_zeroes.py
|
Python
|
mit
| 232
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: moylop260 (moylop260@vauxoo.com)
# Isaac Lopez (isaac@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name':'Purchase - supplier',
'version':'1.0',
'depends':["base","account","purchase"],
'author' : 'Vauxoo',
"description": """Purchse supplier, whe you validate a purchase, the partner is converted in product supplier
""",
'category' : 'Purchases',
'website': 'http://www.vauxoo.com',
'init_xml': [
],
'update_xml': [
#~ 'partner_view.xml',
#~ 'purchase_workflow.xml',
],
'demo_xml': [
],
'installable': True,
'active': False,
}
|
lertech/extra-addons
|
purchase_supplier/__openerp__.py
|
Python
|
gpl-3.0
| 1,813
|
# This will contain all items that are obtainable by players in the game
# Armor Types, Skill to use, and armor class
armortypes = {'Padded Cloth': ('light armor', 10),
'Leather Armor': ('light armor', 12),
'Chain Mail': ('medium armor', 13),
'Half Plate Armor': ('medium armor', 15),
'Splint Armor': ('heavy armor', 16),
'Full Plate Armor': ('heavy armor', 18),
'Round Shield': ('hybrid', 5),
'Tower Shield': ('hybrid', 10)}
weapontypes = {'Long Sword': ('melee', 5),
'Wooden Staff': ('magic', 3),
'Gemmed Staff': ('magic', 5),
'One-Handed Sword': ('hybrid', 4)}
"""
for k, (val1, val2) in armortypes.items():
if val1 == 'light armor':
print(k, '-', val2, 'AC')
"""
|
ebeaber/RPG-The-Game
|
game_files/items.py
|
Python
|
gpl-3.0
| 824
|
#!/usr/bin/env python
'''
tables for STM32L476RG
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32l4xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32L4xx/platform.mk"
}
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 0,
'F': 0,
'G': 0,
'H': 0,
'I': 0,
'J': 0,
'K': 0
}
# MCU parameters
mcu = {
# ram map, as list of (address, size-kb, flags)
# flags of 1 means DMA-capable
# flags of 2 means faster memory for CPU intensive work
'RAM_MAP' : [
# use 2nd mapping of SRAM2 to allow a single memory space
(0x20000000, 96, 1), # SRAM1
(0x10000000, 32, 2), # SRAM2
],
'EXPECTED_CLOCK' : 80000000,
}
DMA_Map = {
# format is (DMA_TABLE, StreamNum, Channel)
# extracted from tabula-STM32L431-DMA.csv
"ADC1" : [(1,1,0),(2,3,0)],
"ADC2" : [(1,2,0),(2,4,0)],
"AES_IN" : [(2,1,6),(2,5,6)],
"AES_OUT" : [(2,2,6),(2,3,6)],
"DAC_CH1" : [(1,3,6),(2,4,3)],
"DAC_CH2" : [(1,4,5),(2,5,3)],
"DFSDM1_FLT0" : [(1,5,0)],
"DFSDM1_FLT1" : [(1,6,0)],
"I2C1_RX" : [(1,7,3),(2,6,5)],
"I2C1_TX" : [(1,6,3),(2,7,5)],
"I2C2_RX" : [(1,5,3)],
"I2C2_TX" : [(1,4,3)],
"I2C3_RX" : [(1,3,3)],
"I2C3_TX" : [(1,2,3)],
"I2C4_RX" : [(2,1,0)],
"I2C4_TX" : [(2,2,0)],
"LPUART1_RX" : [(2,7,4)],
"LPUART1_TX" : [(2,6,4)],
"QUADSPI" : [(1,5,5),(2,7,3)],
"SAI1_A" : [(2,1,1),(2,6,1)],
"SAI1_B" : [(2,2,1),(2,7,1)],
"SAI2_A" : [(1,6,1)],
"SAI2_B" : [(1,7,1)],
"SDMMC1" : [(2,4,7),(2,5,7)],
"SPI1_RX" : [(1,2,1),(2,3,4)],
"SPI1_TX" : [(1,3,1),(2,4,4)],
"SPI2_RX" : [(1,4,1)],
"SPI2_TX" : [(1,5,1)],
"SPI3_RX" : [(2,1,3)],
"SPI3_TX" : [(2,2,3)],
"SWPMI1_RX" : [(2,1,4)],
"SWPMI1_TX" : [(2,2,4)],
"TIM15_CH1" : [(1,5,7)],
"TIM15_COM" : [(1,5,7)],
"TIM15_TRIG" : [(1,5,7)],
"TIM15_UP" : [(1,5,7)],
"TIM16_CH1,TIM16_UP" : [(1,3,4),(1,6,4)],
"TIM1_CH1" : [(1,2,7)],
"TIM1_CH2" : [(1,3,7)],
"TIM1_CH3" : [(1,7,7)],
"TIM1_CH4" : [(1,4,7)],
"TIM1_COM" : [(1,4,7)],
"TIM1_TRIG" : [(1,4,7)],
"TIM1_UP" : [(1,6,7)],
"TIM2_CH1" : [(1,5,4)],
"TIM2_CH2,TIM2_CH4" : [(1,7,4)],
"TIM2_CH3" : [(1,1,4)],
"TIM2_UP" : [(1,2,4)],
"TIM3_CH1" : [(1,6,5)],
"TIM3_CH3" : [(1,2,5)],
"TIM3_CH4" : [(1,3,5)],
"TIM3_TRIG" : [(1,6,5)],
"TIM3_UP" : [(1,3,5)],
"TIM6_UP" : [(1,3,6),(2,4,3)],
"TIM7_UP" : [(2,5,3)],
"TIM7_UP." : [(1,4,5)],
"UART4_RX" : [(2,5,2)],
"UART4_TX" : [(2,3,2)],
"USART1_RX" : [(1,5,2),(2,7,2)],
"USART1_TX" : [(1,4,2),(2,6,2)],
"USART2_RX" : [(1,6,2)],
"USART2_TX" : [(1,7,2)],
"USART3_RX" : [(1,3,2)],
"USART3_TX" : [(1,2,2)],
}
AltFunction_map = {
# format is PIN:FUNCTION : AFNUM
# extracted from l476-af.txt
"PA0:EVENTOUT" : 15,
"PA0:SAI1_EXTCLK" : 13,
"PA0:TIM2_CH1" : 1,
"PA0:TIM2_ETR" : 14,
"PA0:TIM5_CH1" : 2,
"PA0:TIM8_ETR" : 3,
"PA0:UART4_TX" : 8,
"PA0:USART2_CTS" : 7,
"PA1:EVENTOUT" : 15,
"PA1:LCD_SEG0" : 11,
"PA1:TIM15_CH1N" : 14,
"PA1:TIM2_CH2" : 1,
"PA1:TIM5_CH2" : 2,
"PA1:UART4_RX" : 8,
"PA1:USART2_DE" : 7,
"PA1:USART2_RTS" : 7,
"PA2:EVENTOUT" : 15,
"PA2:LCD_SEG1" : 11,
"PA2:SAI2_EXTCLK" : 13,
"PA2:TIM15_CH1" : 14,
"PA2:TIM2_CH3" : 1,
"PA2:TIM5_CH3" : 2,
"PA2:USART2_TX" : 7,
"PA3:EVENTOUT" : 15,
"PA3:LCD_SEG2" : 11,
"PA3:TIM15_CH2" : 14,
"PA3:TIM2_CH4" : 1,
"PA3:TIM5_CH4" : 2,
"PA3:USART2_RX" : 7,
"PA4:EVENTOUT" : 15,
"PA4:LPTIM2_OUT" : 14,
"PA4:SAI1_FS_B" : 13,
"PA4:SPI1_NSS" : 5,
"PA4:SPI3_NSS" : 6,
"PA4:USART2_CK" : 7,
"PA5:EVENTOUT" : 15,
"PA5:LPTIM2_ETR" : 14,
"PA5:SPI1_SCK" : 5,
"PA5:TIM2_CH1" : 1,
"PA5:TIM2_ETR" : 2,
"PA5:TIM8_CH1N" : 3,
"PA6:EVENTOUT" : 15,
"PA6:LCD_SEG3" : 11,
"PA6:QUADSPI_BK1_IO3" : 10,
"PA6:SPI1_MISO" : 5,
"PA6:TIM16_CH1" : 14,
"PA6:TIM1_BKIN" : 1,
"PA6:TIM1_BKIN_COMP2" : 12,
"PA6:TIM3_CH1" : 2,
"PA6:TIM8_BKIN" : 3,
"PA6:TIM8_BKIN_COMP2" : 13,
"PA6:USART3_CTS" : 7,
"PA7:EVENTOUT" : 15,
"PA7:LCD_SEG4" : 11,
"PA7:QUADSPI_BK1_IO2" : 10,
"PA7:SPI1_MOSI" : 5,
"PA7:TIM17_CH1" : 14,
"PA7:TIM1_CH1N" : 1,
"PA7:TIM3_CH2" : 2,
"PA7:TIM8_CH1N" : 3,
"PA8:EVENTOUT" : 15,
"PA8:LCD_COM0" : 11,
"PA8:LPTIM2_OUT" : 14,
"PA8:RCC_MCO" : 0,
"PA8:TIM1_CH1" : 1,
"PA8:USART1_CK" : 7,
"PA8:USB_OTG_FS_SOF" : 10,
"PA9:EVENTOUT" : 15,
"PA9:LCD_COM1" : 11,
"PA9:TIM15_BKIN" : 14,
"PA9:TIM1_CH2" : 1,
"PA9:USART1_TX" : 7,
"PA10:EVENTOUT" : 15,
"PA10:LCD_COM2" : 11,
"PA10:TIM17_BKIN" : 14,
"PA10:TIM1_CH3" : 1,
"PA10:USART1_RX" : 7,
"PA10:USB_OTG_FS_ID" : 10,
"PA11:CAN1_RX" : 9,
"PA11:EVENTOUT" : 15,
"PA11:TIM1_BKIN2" : 2,
"PA11:TIM1_BKIN2_COMP1" : 12,
"PA11:TIM1_CH4" : 1,
"PA11:USART1_CTS" : 7,
"PA11:USB_OTG_FS_DM" : 10,
"PA12:CAN1_TX" : 9,
"PA12:EVENTOUT" : 15,
"PA12:TIM1_ETR" : 1,
"PA12:USART1_DE" : 7,
"PA12:USART1_RTS" : 7,
"PA12:USB_OTG_FS_DP" : 10,
"PA13:EVENTOUT" : 15,
"PA13:IR_OUT" : 1,
"PA13:JTMS-SWDIO" : 0,
"PA13:USB_OTG_FS_NOE" : 10,
"PA14:EVENTOUT" : 15,
"PA14:JTCK-SWCLK" : 0,
"PA15:EVENTOUT" : 15,
"PA15:LCD_SEG17" : 11,
"PA15:SAI2_FS_B" : 13,
"PA15:SPI1_NSS" : 5,
"PA15:SPI3_NSS" : 6,
"PA15:SYS_JTDI" : 0,
"PA15:TIM2_CH1" : 1,
"PA15:TIM2_ETR" : 2,
"PA15:TSC_G3_IO1" : 9,
"PA15:UART4_DE" : 8,
"PA15:UART4_RTS" : 8,
"PB0:COMP1_OUT" : 12,
"PB0:EVENTOUT" : 15,
"PB0:LCD_SEG5" : 11,
"PB0:QUADSPI_BK1_IO1" : 10,
"PB0:TIM1_CH2N" : 1,
"PB0:TIM3_CH3" : 2,
"PB0:TIM8_CH2N" : 3,
"PB0:USART3_CK" : 7,
"PB1:DFSDM1_DATIN0" : 6,
"PB1:EVENTOUT" : 15,
"PB1:LCD_SEG6" : 11,
"PB1:LPTIM2_IN1" : 14,
"PB1:QUADSPI_BK1_IO0" : 10,
"PB1:TIM1_CH3N" : 1,
"PB1:TIM3_CH4" : 2,
"PB1:TIM8_CH3N" : 3,
"PB1:USART3_DE" : 7,
"PB1:USART3_RTS" : 7,
"PB2:DFSDM1_CKIN0" : 6,
"PB2:EVENTOUT" : 15,
"PB2:I2C3_SMBA" : 4,
"PB2:LPTIM1_OUT" : 1,
"PB2:RTC_OUT_ALARM" : 0,
"PB2:RTC_OUT_CALIB" : 0,
"PB3:EVENTOUT" : 15,
"PB3:LCD_SEG7" : 11,
"PB3:SAI1_SCK_B" : 13,
"PB3:SPI1_SCK" : 5,
"PB3:SPI3_SCK" : 6,
"PB3:SYS_JTDO-SWO" : 0,
"PB3:TIM2_CH2" : 1,
"PB3:USART1_DE" : 7,
"PB3:USART1_RTS" : 7,
"PB4:EVENTOUT" : 15,
"PB4:LCD_SEG8" : 11,
"PB4:SAI1_MCLK_B" : 13,
"PB4:SPI1_MISO" : 5,
"PB4:SPI3_MISO" : 6,
"PB4:SYS_JTRST" : 0,
"PB4:TIM17_BKIN" : 14,
"PB4:TIM3_CH1" : 2,
"PB4:TSC_G2_IO1" : 9,
"PB4:UART5_DE" : 8,
"PB4:UART5_RTS" : 8,
"PB4:USART1_CTS" : 7,
"PB5:COMP2_OUT" : 12,
"PB5:EVENTOUT" : 15,
"PB5:I2C1_SMBA" : 4,
"PB5:LCD_SEG9" : 11,
"PB5:LPTIM1_IN1" : 1,
"PB5:SAI1_SD_B" : 13,
"PB5:SPI1_MOSI" : 5,
"PB5:SPI3_MOSI" : 6,
"PB5:TIM16_BKIN" : 14,
"PB5:TIM3_CH2" : 2,
"PB5:TSC_G2_IO2" : 9,
"PB5:UART5_CTS" : 8,
"PB5:USART1_CK" : 7,
"PB6:DFSDM1_DATIN5" : 6,
"PB6:EVENTOUT" : 15,
"PB6:I2C1_SCL" : 4,
"PB6:LPTIM1_ETR" : 1,
"PB6:SAI1_FS_B" : 13,
"PB6:TIM16_CH1N" : 14,
"PB6:TIM4_CH1" : 2,
"PB6:TIM8_BKIN2" : 3,
"PB6:TIM8_BKIN2_COMP2" : 12,
"PB6:TSC_G2_IO3" : 9,
"PB6:USART1_TX" : 7,
"PB7:DFSDM1_CKIN5" : 6,
"PB7:EVENTOUT" : 15,
"PB7:I2C1_SDA" : 4,
"PB7:LCD_SEG21" : 11,
"PB7:LPTIM1_IN2" : 1,
"PB7:TIM17_CH1N" : 14,
"PB7:TIM4_CH2" : 2,
"PB7:TIM8_BKIN" : 3,
"PB7:TIM8_BKIN_COMP1" : 13,
"PB7:TSC_G2_IO4" : 9,
"PB7:UART4_CTS" : 8,
"PB7:USART1_RX" : 7,
"PB8:CAN1_RX" : 9,
"PB8:DFSDM1_DATIN6" : 6,
"PB8:EVENTOUT" : 15,
"PB8:I2C1_SCL" : 4,
"PB8:LCD_SEG16" : 11,
"PB8:SAI1_MCLK_A" : 13,
"PB8:SDMMC1_D4" : 12,
"PB8:TIM16_CH1" : 14,
"PB8:TIM4_CH3" : 2,
"PB9:CAN1_TX" : 9,
"PB9:DFSDM1_CKIN6" : 6,
"PB9:EVENTOUT" : 15,
"PB9:I2C1_SDA" : 4,
"PB9:IR_OUT" : 1,
"PB9:LCD_COM3" : 11,
"PB9:SAI1_FS_A" : 13,
"PB9:SDMMC1_D5" : 12,
"PB9:SPI2_NSS" : 5,
"PB9:TIM17_CH1" : 14,
"PB9:TIM4_CH4" : 2,
"PB10:COMP1_OUT" : 12,
"PB10:DFSDM1_DATIN7" : 6,
"PB10:EVENTOUT" : 15,
"PB10:I2C2_SCL" : 4,
"PB10:LCD_SEG10" : 11,
"PB10:LPUART1_RX" : 8,
"PB10:QUADSPI_CLK" : 10,
"PB10:SAI1_SCK_A" : 13,
"PB10:SPI2_SCK" : 5,
"PB10:TIM2_CH3" : 1,
"PB10:USART3_TX" : 7,
"PB11:COMP2_OUT" : 12,
"PB11:DFSDM1_CKIN7" : 6,
"PB11:EVENTOUT" : 15,
"PB11:I2C2_SDA" : 4,
"PB11:LCD_SEG11" : 11,
"PB11:LPUART1_TX" : 8,
"PB11:QUADSPI_NCS" : 10,
"PB11:TIM2_CH4" : 1,
"PB11:USART3_RX" : 7,
"PB12:DFSDM1_DATIN1" : 6,
"PB12:EVENTOUT" : 15,
"PB12:I2C2_SMBA" : 4,
"PB12:LCD_SEG12" : 11,
"PB12:LPUART1_DE" : 8,
"PB12:LPUART1_RTS" : 8,
"PB12:SAI2_FS_A" : 13,
"PB12:SPI2_NSS" : 5,
"PB12:SWPMI1_IO" : 12,
"PB12:TIM15_BKIN" : 14,
"PB12:TIM1_BKIN" : 1,
"PB12:TIM1_BKIN_COMP2" : 3,
"PB12:TSC_G1_IO1" : 9,
"PB12:USART3_CK" : 7,
"PB13:DFSDM1_CKIN1" : 6,
"PB13:EVENTOUT" : 15,
"PB13:I2C2_SCL" : 4,
"PB13:LCD_SEG13" : 11,
"PB13:LPUART1_CTS" : 8,
"PB13:SAI2_SCK_A" : 13,
"PB13:SPI2_SCK" : 5,
"PB13:SWPMI1_TX" : 12,
"PB13:TIM15_CH1N" : 14,
"PB13:TIM1_CH1N" : 1,
"PB13:TSC_G1_IO2" : 9,
"PB13:USART3_CTS" : 7,
"PB14:DFSDM1_DATIN2" : 6,
"PB14:EVENTOUT" : 15,
"PB14:I2C2_SDA" : 4,
"PB14:LCD_SEG14" : 11,
"PB14:SAI2_MCLK_A" : 13,
"PB14:SPI2_MISO" : 5,
"PB14:SWPMI1_RX" : 12,
"PB14:TIM15_CH1" : 14,
"PB14:TIM1_CH2N" : 1,
"PB14:TIM8_CH2N" : 3,
"PB14:TSC_G1_IO3" : 9,
"PB14:USART3_DE" : 7,
"PB14:USART3_RTS" : 7,
"PB15:DFSDM1_CKIN2" : 6,
"PB15:EVENTOUT" : 15,
"PB15:LCD_SEG15" : 11,
"PB15:RTC_REFIN" : 0,
"PB15:SAI2_SD_A" : 13,
"PB15:SPI2_MOSI" : 5,
"PB15:SWPMI1_SUSPEND" : 12,
"PB15:TIM15_CH2" : 14,
"PB15:TIM1_CH3N" : 1,
"PB15:TIM8_CH3N" : 3,
"PB15:TSC_G1_IO4" : 9,
"PC0:DFSDM1_DATIN4" : 6,
"PC0:EVENTOUT" : 15,
"PC0:I2C3_SCL" : 4,
"PC0:LCD_SEG18" : 11,
"PC0:LPTIM1_IN1" : 1,
"PC0:LPTIM2_IN1" : 14,
"PC0:LPUART1_RX" : 8,
"PC1:DFSDM1_CKIN4" : 6,
"PC1:EVENTOUT" : 15,
"PC1:I2C3_SDA" : 4,
"PC1:LCD_SEG19" : 11,
"PC1:LPTIM1_OUT" : 1,
"PC1:LPUART1_TX" : 8,
"PC2:DFSDM1_CKOUT" : 6,
"PC2:EVENTOUT" : 15,
"PC2:LCD_SEG20" : 11,
"PC2:LPTIM1_IN2" : 1,
"PC2:SPI2_MISO" : 5,
"PC3:EVENTOUT" : 15,
"PC3:LCD_VLCD" : 11,
"PC3:LPTIM1_ETR" : 1,
"PC3:LPTIM2_ETR" : 14,
"PC3:SAI1_SD_A" : 13,
"PC3:SPI2_MOSI" : 5,
"PC4:EVENTOUT" : 15,
"PC4:LCD_SEG22" : 11,
"PC4:USART3_TX" : 7,
"PC5:EVENTOUT" : 15,
"PC5:LCD_SEG23" : 11,
"PC5:USART3_RX" : 7,
"PC6:DFSDM1_CKIN3" : 6,
"PC6:EVENTOUT" : 15,
"PC6:LCD_SEG24" : 11,
"PC6:SAI2_MCLK_A" : 13,
"PC6:SDMMC1_D6" : 12,
"PC6:TIM3_CH1" : 2,
"PC6:TIM8_CH1" : 3,
"PC6:TSC_G4_IO1" : 9,
"PC7:DFSDM1_DATIN3" : 6,
"PC7:EVENTOUT" : 15,
"PC7:LCD_SEG25" : 11,
"PC7:SAI2_MCLK_B" : 13,
"PC7:SDMMC1_D7" : 12,
"PC7:TIM3_CH2" : 2,
"PC7:TIM8_CH2" : 3,
"PC7:TSC_G4_IO2" : 9,
"PC8:EVENTOUT" : 15,
"PC8:LCD_SEG26" : 11,
"PC8:SDMMC1_D0" : 12,
"PC8:TIM3_CH3" : 2,
"PC8:TIM8_CH3" : 3,
"PC8:TSC_G4_IO3" : 9,
"PC9:EVENTOUT" : 15,
"PC9:LCD_SEG27" : 11,
"PC9:SAI2_EXTCLK" : 13,
"PC9:SDMMC1_D1" : 12,
"PC9:TIM3_CH4" : 2,
"PC9:TIM8_BKIN2" : 1,
"PC9:TIM8_BKIN2_COMP1" : 14,
"PC9:TIM8_CH4" : 3,
"PC9:TSC_G4_IO4" : 9,
"PC9:USB_OTG_FS_NOE" : 10,
"PC10:EVENTOUT" : 15,
"PC10:LCD_COM4" : 11,
"PC10:LCD_SEG28" : 11,
"PC10:LCD_SEG40" : 11,
"PC10:SAI2_SCK_B" : 13,
"PC10:SDMMC1_D2" : 12,
"PC10:SPI3_SCK" : 6,
"PC10:TSC_G3_IO2" : 9,
"PC10:UART4_TX" : 8,
"PC10:USART3_TX" : 7,
"PC11:EVENTOUT" : 15,
"PC11:LCD_COM5" : 11,
"PC11:LCD_SEG29" : 11,
"PC11:LCD_SEG41" : 11,
"PC11:SAI2_MCLK_B" : 13,
"PC11:SDMMC1_D3" : 12,
"PC11:SPI3_MISO" : 6,
"PC11:TSC_G3_IO3" : 9,
"PC11:UART4_RX" : 8,
"PC11:USART3_RX" : 7,
"PC12:EVENTOUT" : 15,
"PC12:LCD_COM6" : 11,
"PC12:LCD_SEG30" : 11,
"PC12:LCD_SEG42" : 11,
"PC12:SAI2_SD_B" : 13,
"PC12:SDMMC1_CK" : 12,
"PC12:SPI3_MOSI" : 6,
"PC12:TSC_G3_IO4" : 9,
"PC12:UART5_TX" : 8,
"PC12:USART3_CK" : 7,
"PC13:EVENTOUT" : 15,
"PC14:EVENTOUT" : 15,
"PC15:EVENTOUT" : 15,
"PD2:EVENTOUT" : 15,
"PD2:LCD_COM7" : 11,
"PD2:LCD_SEG31" : 11,
"PD2:LCD_SEG43" : 11,
"PD2:SDMMC1_CMD" : 12,
"PD2:TIM3_ETR" : 2,
"PD2:TSC_SYNC" : 9,
"PD2:UART5_RX" : 8,
"PD2:USART3_DE" : 7,
"PD2:USART3_RTS" : 7,
"PH0:EVENTOUT" : 15,
"PH1:EVENTOUT" : 15,
}
ADC1_map = {
# format is PIN : ADC1_CHAN
"PA0" : 5,
"PA1" : 6,
"PA2" : 7,
"PA3" : 8,
"PA4" : 9,
"PA5" : 10,
"PA6" : 11,
"PA7" : 12,
"PC4" : 13,
"PC5" : 14,
"PB0" : 15,
"PB1" : 16,
"PC0" : 1,
"PC1" : 2,
"PC2" : 3,
"PC3" : 4,
}
|
diydrones/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32L476xx.py
|
Python
|
gpl-3.0
| 13,731
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_selectiontools_LogicalTie_leaves_01():
staff = Staff("c' ~ c'16")
assert inspect_(staff[0]).get_logical_tie().leaves == tuple(staff[:])
def test_selectiontools_LogicalTie_leaves_02():
staff = Staff("c'")
assert inspect_(staff[0]).get_logical_tie().leaves == (staff[0], )
|
mscuthbert/abjad
|
abjad/tools/selectiontools/test/test_selectiontools_LogicalTie_leaves.py
|
Python
|
gpl-3.0
| 349
|
list1 = ["Jan", "I am the best", "Mar", "Apr", "May", "June", "July", "August", "Sep", "Oct", "Nov", "Dec"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "June", "July", "August", "Sep", "Oct", "Nov", "Dec"]
a = int(input("Enter the year: "))
if a % 4 == 0:
for x in list1:
print(x)
else:
for y in months:
print(y)
|
samas-tech/Python-calculator
|
question6.py
|
Python
|
gpl-3.0
| 351
|
import os.path as op
import scipy.optimize as so
import numpy as np
import h5py
from nipype.utils.filemanip import split_filename
from scipy.spatial.distance import cdist
def write_search_points(locations, geo_file):
print('Writing search point .geo file to {f}'.format(f=geo_file))
f = open(geo_file, "w")
f.write('View "Simplex Search Points" {\n')
for idx, location in enumerate(locations):
x,y,z = location
pt_str = (
'SP(%5.2f, %5.2f, %5.2f){%5i};' % (x, y, z, idx))
f.write(pt_str + '\n')
f.write('};\n')
f.write('View "Simplex Search Point Names" {\n')
for idx, location in enumerate(locations):
x,y,z = location
pt_str = (
'T3(%5.2f, %5.2f, %5.2f,0){"%s"};' % (x, y, z, "Point" + str(idx)))
f.write(pt_str + '\n')
f.write('};\n')
f.close()
return geo_file
def random_initial_guess(centroids):
C = np.array(centroids)
mx = np.max(C,0)
mn = np.min(C,0)
p_range = np.abs(mx) + np.abs(mn)
p0 = np.random.rand(3)*p_range - np.abs(mx)
bounds = [(mn[0],mx[0]), (mn[1],mx[1]), (mn[2],mx[2])]
return p0, bounds
def calculate_element_cost(p, leadfield_matrix, V, centroids, element_ids):
p = np.array([p])
distances = cdist(p, centroids)
mindist = np.min(distances)
lf_idx = np.where(distances == mindist)[1]
L = np.transpose(leadfield_matrix[lf_idx * 3:lf_idx * 3 + 3])
I = np.eye(len(V))
Linv = np.linalg.pinv(L)
val = np.dot(V.T, I - np.dot(L,Linv))
R = np.dot(val,V)
#cost = np.linalg.norm(R)
cost = np.sqrt(R / np.linalg.norm(V))
if np.isnan(cost):
cost = 0
print(p,cost)
return cost
def single_dipole_search(electrode_potential, leadfield, mesh_file, elements_to_consider=[1002]):
from forward.mesh import read_mesh, get_closest_element_to_point
from nipype import logging
iflogger = logging.getLogger('interface')
geo_file = op.abspath("search_points.geo")
data_name = "leadfield"
lf_data_file = h5py.File(leadfield, "r")
lf_data = lf_data_file.get(data_name)
leadfield_matrix = lf_data.value
_, lf_name, _ = split_filename(leadfield)
bounds = (0,leadfield_matrix.shape[0]/3)
mesh_data, _, _, _ = read_mesh(mesh_file, elements_to_consider)
centroids = []
element_ids = []
for poly in mesh_data:
element_ids.append(poly["element_id"])
centroids.append(poly["centroid"])
p0, bounds = random_initial_guess(centroids)
#p0 = np.array([0,0,60])
mw = {}
args = (leadfield_matrix, electrode_potential, centroids, element_ids)
mw['args'] = args
#mw['bounds'] = bounds
#res = so.basinhopping(calculate_element_cost, p0, T=0.01, stepsize = 3, minimizer_kwargs=mw, disp=True, niter_success=20)
# Perform downhill search, minimizing cost function
xopt, fopt, n_iter, funcalls, _, allvecs = so.fmin(calculate_element_cost, p0, ftol=0.00000001, args=(leadfield_matrix, electrode_potential, centroids, element_ids), xtol = 1, disp=True, full_output=True, retall=True)
write_search_points(allvecs, geo_file)
# Need to minimize cost by changing values of orientation and magnitude:
_, element_idx, centroid, element_data, lf_idx = get_closest_element_to_point(mesh_file, elements_to_consider, np.array([xopt]))
L = np.transpose(leadfield_matrix[lf_idx * 3:lf_idx * 3 + 3])
V = electrode_potential
qopt, _, _, _ = np.linalg.lstsq(L, V)
return xopt, qopt, element_data, geo_file
|
CyclotronResearchCentre/forward
|
forward/search.py
|
Python
|
gpl-2.0
| 3,531
|
"""Tests that our control algorithms find same policy on test problems."""
import itertools
from typing import Callable
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
from differential_value_iteration.algorithms import dvi
from differential_value_iteration.algorithms import mdvi
from differential_value_iteration.algorithms import rvi
from differential_value_iteration.environments import garet
from differential_value_iteration.environments import micro
from differential_value_iteration.environments import structure
_MDPS = (micro.create_mdp1, micro.create_mdp2, garet.GARET1, garet.GARET2,
garet.GARET3)
_DTYPES = (np.float64, )
class PolicyTest(parameterized.TestCase):
@parameterized.parameters(itertools.product(_MDPS, _DTYPES))
def test_identical_policies_sync(self,
mdp_constructor: Callable[[np.dtype], structure.MarkovDecisionProcess],
dtype: np.dtype):
environment = mdp_constructor(dtype=dtype)
rvi_control = rvi.Control(
mdp=environment,
step_size=.75,
initial_values=np.zeros(environment.num_states, dtype=dtype),
reference_index=0,
synchronized=True)
dvi_control = dvi.Control(
mdp=environment,
step_size=.1,
beta=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=True)
mdvi_control_1 = mdvi.Control1(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=True)
mdvi_control_2 = mdvi.Control2(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=True)
for i in range(500):
rvi_control.update()
dvi_control.update()
mdvi_control_1.update()
mdvi_control_2.update()
with self.subTest('rvi vs dvi'):
np.testing.assert_array_equal(rvi_control.greedy_policy(),
dvi_control.greedy_policy())
with self.subTest('rvi vs mdvi1'):
np.testing.assert_array_equal(rvi_control.greedy_policy(),
mdvi_control_1.greedy_policy())
with self.subTest('mdvi1 vs mdvi2'):
np.testing.assert_array_equal(mdvi_control_1.greedy_policy(),
mdvi_control_2.greedy_policy())
@parameterized.parameters(itertools.product(_MDPS, _DTYPES))
def test_identical_policies_async(self,
mdp_constructor: Callable[[np.dtype], structure.MarkovDecisionProcess],
dtype: np.dtype):
environment = mdp_constructor(dtype=dtype)
rvi_control = rvi.Control(
mdp=environment,
step_size=.75,
initial_values=np.zeros(environment.num_states, dtype=dtype),
reference_index=0,
synchronized=False)
dvi_control = dvi.Control(
mdp=environment,
step_size=.1,
beta=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=False)
mdvi_control_1 = mdvi.Control1(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=False)
mdvi_control_2 = mdvi.Control2(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=False)
for _ in range(500):
for _ in range(environment.num_states):
rvi_control.update()
dvi_control.update()
mdvi_control_1.update()
mdvi_control_2.update()
with self.subTest('rvi vs dvi'):
np.testing.assert_array_equal(rvi_control.greedy_policy(),
dvi_control.greedy_policy())
with self.subTest('rvi vs mdvi1'):
np.testing.assert_array_equal(rvi_control.greedy_policy(),
mdvi_control_1.greedy_policy())
with self.subTest('mdvi1 vs mdvi2'):
np.testing.assert_array_equal(mdvi_control_1.greedy_policy(),
mdvi_control_2.greedy_policy())
@parameterized.parameters(itertools.product((garet.GARET1,),_DTYPES))
def test_identical_policy_values_sync(self,
mdp_constructor: Callable[[np.dtype], structure.MarkovDecisionProcess],
dtype: np.dtype):
"""Not useful now, can be used to compare return from different policies."""
environment = mdp_constructor(dtype=dtype)
mdvi_control_1 = mdvi.Control1(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=True)
mdvi_control_2 = mdvi.Control2(
mdp=environment,
step_size=.1,
beta=.1,
threshold=.1,
initial_r_bar=0.,
initial_values=np.zeros(environment.num_states, dtype=dtype),
synchronized=True)
for i in range(500):
mdvi_control_1.update()
mdvi_control_2.update()
control_1_policy = mdvi_control_1.greedy_policy()
control_2_policy = mdvi_control_2.greedy_policy()
differences = np.sum(np.where(control_1_policy == control_2_policy, 0, 1))
with self.subTest('policies match'):
self.assertEqual(differences, 0)
# Tuples are better for array indexing.
control_1_policy = tuple(control_1_policy)
control_2_policy = tuple(control_2_policy)
initial_state_distribution = np.zeros(environment.num_states, np.float32)
# Start from State 1
initial_state_distribution[0] = 1.
iterations = 100
control_1_return = generate_rewards(environment,
control_1_policy,
iterations=iterations,
initial_state_distribution=initial_state_distribution)
control_2_return = generate_rewards(environment,
control_2_policy,
iterations=iterations,
initial_state_distribution=initial_state_distribution)
with self.subTest('returns match'):
self.assertAlmostEqual(control_1_return, control_2_return)
def generate_rewards(environment: structure.MarkovDecisionProcess,
policy: np.ndarray, iterations: int,
initial_state_distribution: np.ndarray):
total_return = 0.
policy_rewards = environment.rewards[
policy, np.arange(0, environment.num_states)]
policy_transitions = environment.transitions[policy, np.arange(0, environment.num_states)]
state_distribution = initial_state_distribution.copy()
for _ in range(iterations):
total_return += np.dot(policy_rewards, state_distribution)
state_distribution = np.dot(policy_transitions, state_distribution)
return total_return / iterations
if __name__ == '__main__':
absltest.main()
|
abhisheknaik96/differential-value-iteration
|
src/differential_value_iteration/algorithms/policy_test.py
|
Python
|
mit
| 7,233
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep)
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not local_file:
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
jnerin/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 15,222
|
#!/usr/bin/env python
WEBSERVER_PORT = 8001
WEBSERVER_ROOT = './fixtures/legacy-benchmarks/'
WEBSERVER_URL = 'http://localhost:%s' % WEBSERVER_PORT
import unittest
from harness.environment import WebServer
from harness.artemis import execute_artemis
def events_configuration_report(uuid, url, exclude):
report = execute_artemis(uuid, url,
iterations=100,
strategy_form_input='random',
strategy_priority='constant',
exclude=exclude)
return report
def const_configuration_report(uuid, url, exclude):
report = execute_artemis(uuid, url,
iterations=100,
strategy_form_input='javascript-constants',
strategy_priority='constant',
exclude=exclude)
return report
def cov_configuration_report(uuid, url, exclude):
report = execute_artemis(uuid, url,
iterations=100,
strategy_form_input='javascript-constants',
strategy_priority='coverage',
exclude=exclude)
return report
def all_configuration_report(uuid, url, exclude):
report = execute_artemis(uuid, url,
iterations=100,
strategy_form_input='javascript-constants',
strategy_priority='all',
exclude=exclude)
return report
def assert_coverage_is_circa_expected(testCase, report, expected, linesOfCode, paperLinesOfCode, minMargin=0.1):
covered = float(report.get("WebKit::coverage::covered-unique", -1)) / linesOfCode
testCase.assertAlmostEqual(expected, covered,
delta=minMargin + (1-max(0, min(paperLinesOfCode / linesOfCode, 1))) * 0.1)
class HtmlEditTest(unittest.TestCase):
url = '%s/htmledit/demo_full.html' % WEBSERVER_URL
uuid = 'htmledit'
loc = 734
paperLoc = 568
filesToExclude = ["%s/htmledit/htmlbox.min.js" % WEBSERVER_URL,
"%s/htmledit/htmlbox.undoredomanager.js" % WEBSERVER_URL,
"%s/htmledit/jquery-1.3.2.min.js" % WEBSERVER_URL]
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
class T3dModelTest(unittest.TestCase):
url = '%s/3dmodel/index.html' % WEBSERVER_URL
uuid = '3dmodel'
loc = 492
paperLoc = 393
filesToExclude = []
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.74, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.74, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.74, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.74, self.loc, self.paperLoc)
class AjaxPollerTest(unittest.TestCase):
url = '%s/ajax-poller/ajax-poller.php' % WEBSERVER_URL
uuid = 'ajaxPoller'
filesToExclude = []
loc = 349
paperLoc = 250
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.78, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.78, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.78, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.78, self.loc, self.paperLoc)
class AjaxTabsTest(unittest.TestCase):
url = '%s/ajaxtabs/demo.htm' % WEBSERVER_URL
uuid = 'ajaxTabs'
loc = 208
paperLoc = 156
filesToExclude = []
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.88, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.88, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.89, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.89, self.loc, self.paperLoc)
class BallPoolTest(unittest.TestCase):
url = '%s/ball_pool/index.html' % WEBSERVER_URL
uuid = 'ballpool'
loc = 327
paperLoc = 256
filesToExclude = ["%s/ball_pool/js/box2d.js" % WEBSERVER_URL,
"%s/ball_pool/js/protoclass.js" % WEBSERVER_URL]
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.89, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.89, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.90, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.90, self.loc, self.paperLoc)
class DragableBoxesTest(unittest.TestCase):
url = '%s/dragable-boxes/dragable-boxes.html' % WEBSERVER_URL
uuid = 'dragableBoxes'
loc = 961
paperLoc = 697
filesToExclude = []
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.61, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.61, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
class DynamicArticlesTest(unittest.TestCase):
url = '%s/dynamicArticles/index.html' % WEBSERVER_URL
uuid = 'dynamicArticles'
loc = 171
paperLoc = 156
filesToExclude = []
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.82, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.82, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.75, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.82, self.loc, self.paperLoc)
class FractalViewerTest(unittest.TestCase):
url = '%s/fractal_viewer/index.html' % WEBSERVER_URL
uuid = 'fractalViewer'
loc = 1298
paperLoc = 750
filesToExclude = ['%s/fractal_viewer/js/lib/jquery-1.3.js' % WEBSERVER_URL]
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.63, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.75, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.75, self.loc, self.paperLoc)
class HomeostasisTest(unittest.TestCase):
url = '%s/homeostasis/index.html' % WEBSERVER_URL
uuid = 'homeostasis'
loc = 3303
paperLoc = 2037
filesToExclude = []
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.62, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.63, self.loc, self.paperLoc)
class PacmanTest(unittest.TestCase):
url = '%s/pacman/index.html' % WEBSERVER_URL
uuid = 'pacman'
loc = 3471
paperLoc = 1857
filesToExclude = ["%s/pacman/src/js/pacman10-hp.2.js" % WEBSERVER_URL,
"%s/pacman/src/js/pacman10-hp.js" % WEBSERVER_URL]
def test_events_configuration(self):
report = events_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_const_configuration(self):
report = const_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_cov_configuration(self):
report = cov_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
def test_all_configuration(self):
report = all_configuration_report(self.uuid, self.url, self.filesToExclude)
assert_coverage_is_circa_expected(self, report, 0.44, self.loc, self.paperLoc)
if __name__ == '__main__':
server = WebServer(WEBSERVER_ROOT, WEBSERVER_PORT)
unittest.main()
del server
|
cs-au-dk/Artemis
|
artemis-code/tests/system/benchmarks.py
|
Python
|
gpl-3.0
| 12,831
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import weakref
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as layers_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
class Layer(object):
"""Base layer class.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Users will just instantiate it and then treat it as a callable.
We recommend that descendants of Layer implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_variable()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Read-only properties:
`name`: The name of the layer (string).
`dtype`: Default dtype of the layer (default of `None` means use the
type of the first input).
`trainable_variables`: List of trainable variables.
`non_trainable_variables`: List of non-trainable variables.
`variables`: List of all variables of this layer, trainable and
non-trainable.
`updates`: List of update ops of this layer.
`losses`: List of losses added by this layer.
Mutable properties:
`trainable`: Whether the layer should be trained (boolean).
`input_spec`: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
activity_regularizer=None, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
'input_shape', # For compatibility with Keras `Sequential` model.
'batch_size', # For compatibility with Keras `Sequential` model.
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
# Mutable properties
self.trainable = trainable
self.built = False
self.input_spec = None
if activity_regularizer and context.in_eager_mode():
raise ValueError(
('Activity regularization is not supported when executing eagerly. '
'Got activity_regularizer=%s') % (activity_regularizer,))
self._activity_regularizer = activity_regularizer
self._trainable_weights = []
self._non_trainable_weights = []
self._updates = []
# When executing eagerly, _losses is a list of zero-argument lambdas which
# return tensors. When using graph execution, _losses is a list of ops.
self._losses = []
self._reuse = kwargs.get('_reuse')
self._graph = ops.get_default_graph()
self._per_input_losses = {}
self._per_input_updates = {}
self._dtype = None if dtype is None else dtypes.as_dtype(dtype).name
call_fn_args = estimator_util.fn_args(self.call)
self._compute_previous_mask = ('mask' in call_fn_args or
hasattr(self, 'compute_mask'))
self._call_has_scope_arg = 'scope' in call_fn_args
# These lists will be filled via successive calls
# to self._add_inbound_node().
self._inbound_nodes = []
self._outbound_nodes = []
self._init_set_name(name)
# Holds functions for creating regularizer ops.
self._regularizer_factories = []
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
with vs.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
# Set `_batch_input_shape` attribute
# for compatibility with Keras `Sequential` model.
if 'input_shape' in kwargs:
batch_size = kwargs.get('batch_size')
self._batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
@property
def dtype(self):
return self._dtype
@property
def name(self):
return self._name
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
@property
def trainable_weights(self):
return self._trainable_weights if self.trainable else []
@property
def non_trainable_weights(self):
if self.trainable:
return self._non_trainable_weights
else:
return self._trainable_weights + self._non_trainable_weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.weights
@property
def updates(self):
if context.in_eager_mode():
raise RuntimeError('Layer.updates not supported in Eager mode.')
return self._updates
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored in Eager mode.
Arguments:
updates: Update op, or list/tuple of update ops.
inputs: Optional input tensor(s) that the update(s) depend on. Must
match the `inputs` argument passed to the `__call__` method at the time
the updates are created. If `None` is passed, the updates are assumed
to be unconditional, and will apply across all dataflows of the layer.
"""
if context.in_eager_mode():
return # Updates already applied when in eager mode.
updates = _to_list(updates)
if not updates:
return
self._updates += updates
if inputs is not None:
inputs = nest.flatten(inputs)
if not inputs:
inputs = None
if inputs is not None:
# We compute an ID that uniquely identifies the list of tensors.
# This ID is order-sensitive.
inputs_hash = layers_util.object_list_uid(inputs)
else:
inputs_hash = None
if inputs_hash not in self._per_input_updates:
self._per_input_updates[inputs_hash] = []
self._per_input_updates[inputs_hash] += updates
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Must match the `inputs` argument passed to the `__call__` method
at the time the updates were created.
If you pass `inputs=None`, unconditional updates are returned.
Returns:
List of update ops of the layer that depend on `inputs`.
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.get_updates_for not supported in Eager mode.')
if inputs is not None:
inputs = nest.flatten(inputs)
if not inputs:
inputs = None
if inputs is not None:
inputs_hash = layers_util.object_list_uid(inputs)
else:
inputs_hash = None
return self._per_input_updates.get(inputs_hash, [])
def _get_regularizer_factories(self):
try:
# Some subclasses of Layer do not use its constructor.
return self._regularizer_factories
except AttributeError:
self._regularizer_factories = []
return self._regularizer_factories
def _maybe_create_variable_regularizers(self):
"""Creates added but uninstantiated regularizers."""
factories = self._get_regularizer_factories()
if factories:
for factory in factories:
factory()
factories[:] = []
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Note that when executing eagerly, getting this property evaluates
regularizers. When using graph execution, variable regularization ops have
already been created and are simply returned here.
Returns:
A list of tensors.
"""
self._maybe_create_variable_regularizers()
if context.in_eager_mode():
# _losses may only contain variable regularization losses when executing
# eagerly, and they have been saved as lambdas to be executed when
# requested.
return [regularizer() for regularizer in self._losses]
else:
return self._losses
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Note that `add_loss` is not supported when executing eagerly. Instead,
variable regularizers may be added through `add_variable`. Activity
regularization is not supported directly (but such losses may be returned
from `Layer.call()`).
Arguments:
losses: Loss tensor, or list/tuple of tensors.
inputs: Optional input tensor(s) that the loss(es) depend on. Must
match the `inputs` argument passed to the `__call__` method at the time
the losses are created. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.add_loss not supported in Eager mode.')
losses = _to_list(losses)
if not losses:
return
self._losses += losses
if inputs is not None:
inputs = nest.flatten(inputs)
if not inputs:
inputs = None
if inputs is not None:
# We compute an ID that uniquely identifies the list of tensors.
# This ID is order-sensitive.
inputs_hash = layers_util.object_list_uid(inputs)
else:
inputs_hash = None
if inputs_hash not in self._per_input_losses:
self._per_input_losses[inputs_hash] = []
self._per_input_losses[inputs_hash] += losses
_add_elements_to_collection(losses, ops.GraphKeys.REGULARIZATION_LOSSES)
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Must match the `inputs` argument passed to the `__call__`
method at the time the losses were created.
If you pass `inputs=None`, unconditional losses are returned,
such as weight regularization losses.
Returns:
List of loss tensors of the layer that depend on `inputs`.
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.get_losses_for not supported in Eager mode.')
if inputs is not None:
inputs = nest.flatten(inputs)
if not inputs:
inputs = None
if inputs is not None:
inputs_hash = layers_util.object_list_uid(inputs)
else:
inputs_hash = None
self._maybe_create_variable_regularizers()
return self._per_input_losses.get(inputs_hash, [])
def build(self, _):
"""Creates the variables of the layer."""
self.built = True
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
return inputs
def _name_scope_name(self, current_variable_scope):
"""Determines op naming for the Layer."""
return current_variable_scope.original_name_scope
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer given the input shape.
Args:
input_shape: A (possibly nested tuple of) `TensorShape`. It need not
be fully defined (e.g. the batch size may be unknown).
Returns:
A (possibly nested tuple of) `TensorShape`.
Raises:
TypeError: if `input_shape` is not a (possibly nested tuple of)
`TensorShape`.
ValueError: if `input_shape` is incomplete or is incompatible with the
the layer.
"""
raise NotImplementedError
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = _to_snake_case(self.__class__.__name__)
name = _unique_layer_name(base_name, name_uid_map=name_uid_map,
avoid_names=avoid_names, namespace=namespace,
zero_based=zero_based)
return (name, base_name)
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with vs.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with vs.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None,
trainable=True, constraint=None,
partitioner=None):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable.
constraint: constraint instance (callable).
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.fixed_size_partitioner` and
`tf.variable_axis_size_partitioner`. For more details, see the
documentation of `tf.get_variable` and the "Variable Partitioners
and Sharding" section of the API guide.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called in Eager mode with partioned variable
regularization.
"""
in_graph_mode = context.in_graph_mode()
if in_graph_mode:
existing_variables = set(tf_variables.global_variables())
if dtype is None:
dtype = self.dtype or dtypes.float32
self._set_scope(None)
reuse = self.built or self._reuse
with vs.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
with ops.name_scope(self._name_scope_name(scope)):
variable = vs.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtypes.as_dtype(dtype),
constraint=constraint,
trainable=trainable and self.trainable,
partitioner=partitioner)
if in_graph_mode:
if (trainable and self.trainable
and variable not in tf_variables.trainable_variables()):
# A custom getter / variable scope overrode the trainable flag.
trainable = False
if variable in existing_variables:
# To match the behavior of tf.get_variable(), we only apply
# regularization if the variable is newly created.
return variable
if regularizer:
def regularizer_factory():
if context.in_graph_mode():
with vs.variable_scope(scope, reuse=reuse,
auxiliary_name_scope=False):
with ops.name_scope(self._name_scope_name(scope)):
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self.add_loss(regularization)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self.add_loss(regularization)
else:
if isinstance(variable, tf_variables.PartitionedVariable):
raise RuntimeError(
'Partitioned variable regularization is not yet '
'supported when executing eagerly. File a feature request'
'if this is important to you.')
# Save a zero-argument lambda which runs the regularizer on the
# variable, to be executed when `Layer.losses` is requested.
# This makes losses responsive to variable updates when
# executing eagerly.
self._losses.append(lambda: regularizer(variable))
if hasattr(self, '_defer_regularizers') and self._defer_regularizers:
# _defer_regularizers exists and is set to True if `build` was
# invoked in `__call__`: deferring regularizer construction
# prevents the regularizer from being created in an `init_scope`.
self._get_regularizer_factories().append(regularizer_factory)
else:
regularizer_factory()
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
self._set_scope(kwargs.pop('scope', None))
input_list = nest.flatten(inputs)
in_graph_mode = context.in_graph_mode()
in_deferred_mode = isinstance(input_list[0], _DeferredTensor)
# Ensure the Layer, if being reused, is working with inputs from
# the same graph as where it was created.
if in_graph_mode:
try:
ops._get_graph_from_inputs(input_list, graph=self.graph) # pylint: disable=protected-access
except ValueError as e:
raise ValueError('Input graph and Layer graph are not the same: %s' % e)
if in_graph_mode or in_deferred_mode:
user_kwargs = copy.copy(kwargs)
# Handle Keras mask propagation from previous layer to current layer.
previous_mask = None
if (not hasattr(self, '_compute_previous_mask') or
self._compute_previous_mask):
previous_mask = _collect_previous_mask(inputs)
if ('mask' in estimator_util.fn_args(self.call) and
'mask' not in kwargs and
not _is_all_none(previous_mask)):
# The previous layer generated a mask, and mask was not explicitly pass
# to __call__, hence we set previous_mask as the default value.
kwargs['mask'] = previous_mask
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope
except AttributeError:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
self._always_reuse_variable_scope = vs.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
scope_context_manager = self._always_reuse_variable_scope
else:
scope_context_manager = vs.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
with ops.name_scope(self._name_scope_name(scope)):
if not self.built:
if not in_graph_mode:
# Activity regularization is currently unsupported in Eager mode.
if self._activity_regularizer:
raise ValueError('activity_regularizer currently unsupported in '
'Eager mode. Found an activity_regularizer in '
'%s(%s).' % (self.__class__.__name__, self))
if not in_graph_mode and not in_deferred_mode:
# TODO(agarwal): support _keras_history in Eager mode.
for x in input_list:
if hasattr(x, '_keras_history'):
raise ValueError('_keras_history currently unsupported in '
'Eager mode. Found _keras_history in %s while '
'executing __call__ for %s(%s)' %
(x, self.__class_.__name__, self))
# Check input assumptions set before layer building, e.g. input rank.
self._assert_input_compatibility(inputs)
if input_list and self._dtype is None:
try:
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
input_shapes = nest.map_structure(lambda x: x.get_shape(), inputs)
# Signal to `add_variable` that regularizer construction should be
# deferred.
self._defer_regularizers = True
with ops.init_scope():
self.build(input_shapes)
# Create any regularizers added by `build`.
self._maybe_create_variable_regularizers()
self._defer_regularizers = False
try:
# Note: not all sub-classes of Layer call Layer.__init__ (especially
# the ones under tensorflow/python/keras). Hence we recompute this
# attribute here if it is not set.
# TODO(agarwal): Fix the sub-classes and avoid this complexity.
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
call_has_scope_arg = 'scope' in estimator_util.fn_args(self.call)
if call_has_scope_arg:
kwargs['scope'] = scope
# Check input assumptions set after layer building, e.g. input shape.
if in_graph_mode or in_deferred_mode:
self._assert_input_compatibility(inputs)
if not in_deferred_mode:
outputs = self.call(inputs, *args, **kwargs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a Tensor '
'or a list of Tensors, not None.')
else:
# Deferred mode behavior: use `compute_output_shape` to
# infer the number of outputs of the layer and their shapes.
output_shapes = self.compute_output_shape(input_shapes)
output_shapes = nest.flatten(output_shapes)
outputs = [
# TODO(fchollet): name the deferred tensors?
_DeferredTensor(shape=shape, dtype=self._dtype)
for shape in output_shapes
]
if len(outputs) == 1:
outputs = outputs[0]
if in_graph_mode:
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self._activity_regularizer(output)
self.add_loss(activity_regularization, inputs=inputs)
if not in_deferred_mode:
# TODO(fchollet): consider how masking will work with deferred mode.
# Handle mask computation and propagation to the next layer.
if hasattr(self, 'compute_mask'):
output_mask = self.compute_mask(inputs, previous_mask)
if isinstance(outputs, list):
if output_mask is None:
output_mask = [None for _ in range(len(outputs))]
for x, m in zip(outputs, output_mask):
x._keras_mask = m # pylint: disable=protected-access
else:
outputs._keras_mask = output_mask # pylint: disable=protected-access
if in_graph_mode:
# If all input tensors have history metadata,
# we update the output tensors
# with corresponding history metadata, thus eventually allowing to use
# these tensors to instantiate a Network.
if _have_all_keras_metadata(inputs):
# If the layer returns tensors from its inputs, unmodified,
# we copy them to avoid loss of tensor metadata.
output_ls = nest.flatten(outputs)
output_ls_copy = []
for x in output_ls:
if x in input_list:
with ops.name_scope(scope.original_name_scope):
x = array_ops.identity(x)
output_ls_copy.append(x)
if len(output_ls_copy) == 1:
outputs = output_ls_copy[0]
else:
outputs = output_ls_copy
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
if in_deferred_mode or in_graph_mode:
if _have_all_keras_metadata(inputs):
# Add an inbound node to the layer, so it can keep track of this call.
# This updates the layer history of the output tensor(s).
self._add_inbound_node(
input_tensors=inputs, output_tensors=outputs, arguments=user_kwargs)
self.built = True
return outputs
@property
def graph(self):
if context.in_eager_mode():
raise RuntimeError('Layer.graph not supported in Eager mode.')
return self._graph
def __deepcopy__(self, memo):
no_copy = set(['_graph'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif _is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def apply(self, inputs, *args, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
def _add_inbound_node(self,
input_tensors,
output_tensors,
arguments=None):
"""Internal method to create an inbound node for the layer.
Arguments:
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
"""
input_tensors = nest.flatten(input_tensors)
output_tensors = nest.flatten(output_tensors)
# Collect input tensor(s) coordinates.
inbound_layers = []
node_indices = []
tensor_indices = []
for x in input_tensors:
assert hasattr(x, '_keras_history')
inbound_layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
inbound_layers.append(inbound_layer)
node_indices.append(node_index)
tensor_indices.append(tensor_index)
# Create node, add it to inbound nodes.
Node(
self,
inbound_layers=inbound_layers,
node_indices=node_indices,
tensor_indices=tensor_indices,
input_tensors=input_tensors,
output_tensors=output_tensors,
arguments=arguments)
# Update tensor history metadata.
for i in range(len(output_tensors)):
# The metadata attribute consists of 1) a layer instance
# 2) a node index for the layer, 3) a tensor index for the node.
# The allows layer reuse (multiple nodes per layer) and multi-output
# or multi-input layers (e.g. a layer can return multiple tensors,
# and each can be sent to a different layer).
output_tensors[i]._keras_history = (self, len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
assert context.in_graph_mode()
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if len(values) == 1:
return values[0]
else:
return values
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError(
'Layer.get_input_shape_at not supported in Eager mode.')
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError(
'Layer.get_output_shape_at not supported in Eager mode.')
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.get_input_at not supported in Eager mode.')
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.get_output_at not supported in Eager mode.')
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.input not supported in Eager mode.')
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.output not supported in Eager mode.')
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.input_shape not supported in Eager mode.')
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
input_shapes = self._inbound_nodes[0].input_shapes
if len(input_shapes) == 1:
return tuple(tensor_shape.TensorShape(input_shapes[0]).as_list())
else:
return [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in input_shapes
]
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if self.__class__.__name__ == 'Sequential':
self.build() # pylint: disable=no-value-for-parameter
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
weight_shapes = [w.get_shape().as_list() for w in self.weights]
return int(sum([np.prod(w) for w in weight_shapes]))
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError('Layer.output_shape not supported in Eager mode.')
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
output_shapes = self._inbound_nodes[0].output_shapes
if len(output_shapes) == 1:
return tuple(tensor_shape.TensorShape(output_shapes[0]).as_list())
else:
return [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shapes
]
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
def _assert_input_compatibility(self, inputs):
"""Checks compatibility between the layer and provided inputs.
This checks that the tensor(s) `inputs` verify the input assumptions
of the layer (if any). If not, a clear and actional exception gets raised.
Arguments:
inputs: input tensor or list of input tensors.
Raises:
ValueError: in case of mismatch between
the provided inputs and the expectations of the layer.
"""
if not self.input_spec:
return
if not isinstance(self.input_spec, (list, tuple)):
input_spec = nest.flatten(self.input_spec)
else:
input_spec = self.input_spec
inputs = nest.flatten(inputs)
if len(inputs) != len(input_spec):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(input_spec)) + ' inputs, '
'but it received ' + str(len(inputs)) +
' input tensors. Inputs received: ' + str(inputs))
for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
if spec is None:
continue
if (spec.ndim is not None or
spec.min_ndim is not None or
spec.max_ndim is not None):
if x.get_shape().ndims is None:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'its rank is undefined, but the layer requires a '
'defined rank.')
# Check ndim.
if spec.ndim is not None:
ndim = x.get_shape().ndims
if ndim != spec.ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'expected ndim=' + str(spec.ndim) + ', found ndim=' +
str(ndim) + '. Full shape received: ' +
str(x.get_shape().as_list()))
if spec.max_ndim is not None:
ndim = x.get_shape().ndims
if ndim is not None and ndim > spec.max_ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'expected max_ndim=' + str(spec.max_ndim) +
', found ndim=' + str(ndim))
if spec.min_ndim is not None:
ndim = x.get_shape().ndims
if ndim is not None and ndim < spec.min_ndim:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
': expected min_ndim=' + str(spec.min_ndim) +
', found ndim=' + str(ndim) +
'. Full shape received: ' +
str(x.get_shape().as_list()))
# Check dtype.
if spec.dtype is not None:
if x.dtype != spec.dtype:
raise ValueError('Input ' + str(input_index) + ' of layer ' +
self.name + ' is incompatible with the layer: '
'expected dtype=' + str(spec.dtype) +
', found dtype=' + str(x.dtype))
# Check specific shape axes.
if spec.axes:
shape = x.get_shape().as_list()
if shape is not None:
for axis, value in spec.axes.items():
if hasattr(value, 'value'):
value = value.value
if value is not None and shape[int(axis)] not in {value, None}:
raise ValueError(
'Input ' + str(input_index) + ' of layer ' + self.name + ' is'
' incompatible with the layer: expected axis ' + str(axis) +
' of input shape to have value ' + str(value) +
' but received input with shape ' + str(shape))
# Check shape.
if spec.shape is not None:
shape = x.get_shape().as_list()
if shape is not None:
for spec_dim, dim in zip(spec.shape, shape):
if spec_dim is not None and dim is not None:
if spec_dim != dim:
raise ValueError('Input ' + str(input_index) +
' is incompatible with layer ' + self.name +
': expected shape=' + str(spec.shape) +
', found shape=' + str(shape))
class InputSpec(object):
"""Specifies the ndim, dtype and shape of every input to a layer.
Every layer should expose (if appropriate) an `input_spec` attribute:
a list of instances of InputSpec (one per input tensor).
A None entry in a shape is compatible with any dimension,
a None shape is compatible with any shape.
Arguments:
dtype: Expected DataType of the input.
shape: Shape tuple, expected shape of the input
(may include None for unchecked axes).
ndim: Integer, expected rank of the input.
max_ndim: Integer, maximum rank of the input.
min_ndim: Integer, minimum rank of the input.
axes: Dictionary mapping integer axes to
a specific dimension value.
"""
def __init__(self,
dtype=None,
shape=None,
ndim=None,
max_ndim=None,
min_ndim=None,
axes=None):
self.dtype = dtype
self.shape = shape
if shape is not None:
self.ndim = len(shape)
else:
self.ndim = ndim
self.max_ndim = max_ndim
self.min_ndim = min_ndim
self.axes = axes or {}
class Node(object):
"""A `Node` describes the connectivity between two layers.
Each time a layer is connected to some new input,
a node is added to `layer._inbound_nodes`.
Each time the output of a layer is used by another layer,
a node is added to `layer._outbound_nodes`.
Arguments:
outbound_layer: the layer that takes
`input_tensors` and turns them into `output_tensors`
(the node gets created when the `call`
method of the layer was called).
inbound_layers: a list of layers, the same length as `input_tensors`,
the layers from where `input_tensors` originate.
node_indices: a list of integers, the same length as `inbound_layers`.
`node_indices[i]` is the origin node of `input_tensors[i]`
(necessary since each inbound layer might have several nodes,
e.g. if the layer is being shared with a different data stream).
tensor_indices: a list of integers,
the same length as `inbound_layers`.
`tensor_indices[i]` is the index of `input_tensors[i]` within the
output of the inbound layer
(necessary since each inbound layer might
have multiple tensor outputs, with each one being
independently manipulable).
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
`node_indices` and `tensor_indices` are basically fine-grained coordinates
describing the origin of the `input_tensors`.
A node from layer A to layer B is added to:
- A._outbound_nodes
- B._inbound_nodes
"""
def __init__(self,
outbound_layer,
inbound_layers,
node_indices,
tensor_indices,
input_tensors,
output_tensors,
arguments=None):
# Layer instance (NOT a list).
if isinstance(outbound_layer, list):
raise ValueError(
'`outbound_layer` should be a layer instance, not a list.')
# this is the layer that takes a list of input tensors
# and turns them into a list of output tensors.
# the current node will be added to
# the inbound_nodes of outbound_layer.
self.outbound_layer = outbound_layer
# The following 3 properties describe where
# the input tensors come from: which layers,
# and for each layer, which node and which
# tensor output of each node.
# List of layer instances.
self.inbound_layers = inbound_layers
# List of integers, 1:1 mapping with inbound_layers.
self.node_indices = node_indices
# List of integers, 1:1 mapping with inbound_layers.
self.tensor_indices = tensor_indices
# Following 2 properties:
# tensor inputs and outputs of outbound_layer.
# List of tensors. 1:1 mapping with inbound_layers.
self.input_tensors = input_tensors
# List of tensors, created by outbound_layer.call().
self.output_tensors = output_tensors
# Following 2 properties: input and output shapes.
# List of shape tuples, shapes of input_tensors.
self.input_shapes = [layers_util.static_shape(x) for x in input_tensors]
# List of shape tuples, shapes of output_tensors.
self.output_shapes = [layers_util.static_shape(x) for x in output_tensors]
# Optional keyword arguments to layer's `call`.
self.arguments = arguments
# Add nodes to all layers involved.
for layer in inbound_layers:
if layer is not None:
# For compatibility with external Keras, we use the deprecated
# accessor here.
layer.outbound_nodes.append(self)
# For compatibility with external Keras, we use the deprecated
# accessor here.
outbound_layer.inbound_nodes.append(self)
def get_config(self):
inbound_names = []
for layer in self.inbound_layers:
if layer:
inbound_names.append(layer.name)
else:
inbound_names.append(None)
return {
'outbound_layer': self.outbound_layer.name,
'inbound_layers': inbound_names,
'node_indices': self.node_indices,
'tensor_indices': self.tensor_indices
}
class _DeferredTensor(object):
"""Tensor-like object used to build graphs of layers in Eager mode.
When calling a layer on a DeferredTensor, the layer will not perform any
computation and will simply perfom shape inference to return new
DeferredTensors with appropriate shape information. Thus DeferredTensor
behaves like a graph-mode Tensor when manipulated by layers.
"""
def __init__(self, shape, dtype, name=None):
self.shape = tensor_shape.TensorShape(shape)
self.dtype = dtypes.as_dtype(dtype)
self.name = name
def get_shape(self):
return self.shape
def __str__(self):
return "DeferredTensor('%s', shape=%s, dtype=%s)" % (self.name,
self.get_shape(),
self.dtype.name)
def __repr__(self):
return "<_DeferredTensor '%s' shape=%s dtype=%s>" % (self.name,
self.get_shape(),
self.dtype.name)
def _is_tensor_or_tensor_list(v):
v = nest.flatten(v)
if v and isinstance(v[0], ops.Tensor):
return True
else:
return False
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collection_list):
if context.in_eager_mode():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = nest.flatten(elements)
collection_list = nest.flatten(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
def _is_all_none(iterable_or_element):
if not isinstance(iterable_or_element, (list, tuple)):
iterable = [iterable_or_element]
else:
iterable = iterable_or_element
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def _have_all_keras_metadata(iterable_or_element):
if not isinstance(iterable_or_element, (list, tuple)):
iterable = [iterable_or_element]
else:
iterable = iterable_or_element
return all([hasattr(x, '_keras_history') for x in iterable])
def _collect_previous_mask(input_tensors):
"""Retrieves the output mask(s) of the previous node.
Arguments:
input_tensors: A tensor or list of tensors.
Returns:
A mask tensor or list of mask tensors.
"""
input_tensors = nest.flatten(input_tensors)
masks = []
for x in input_tensors:
if hasattr(x, '_keras_mask'):
mask = x._keras_mask # pylint: disable=protected-access
masks.append(mask)
else:
masks.append(None)
if len(masks) == 1:
return masks[0]
return masks
# A global dictionary mapping graph objects to an index of counters used
# for various layer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
def _get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_LAYER_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map
return name_uid_map
def _unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace='',
zero_based=False):
"""Makes a layer name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = _get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
|
jwlawson/tensorflow
|
tensorflow/python/layers/base.py
|
Python
|
apache-2.0
| 59,631
|
# coding=utf-8
"""Request handler for series and episodes."""
from __future__ import unicode_literals
import logging
from medusa.helper.exceptions import EpisodeDeletedException
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import (
BaseRequestHandler,
BooleanField,
IntegerField,
iter_nested_items,
set_nested_value,
)
from medusa.server.api.v2.series import SeriesHandler
from medusa.tv.episode import Episode, EpisodeNumber
from medusa.tv.series import Series, SeriesIdentifier
from six import iteritems
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class EpisodeHandler(BaseRequestHandler):
"""Episodes request handler."""
#: parent resource handler
parent_handler = SeriesHandler
#: resource name
name = 'episodes'
#: identifier
identifier = ('episode_slug', r'[\w-]+')
#: path param
path_param = ('path_param', r'\w+')
#: allowed HTTP methods
allowed_methods = ('GET', 'PATCH', 'DELETE',)
def get(self, series_slug, episode_slug, path_param):
"""Query episode information.
:param series_slug: series slug. E.g.: tvdb1234
:param episode_number:
:param path_param:
"""
series_identifier = SeriesIdentifier.from_slug(series_slug)
if not series_identifier:
return self._bad_request('Invalid series slug')
series = Series.find_by_identifier(series_identifier)
if not series:
return self._not_found('Series not found')
if not episode_slug:
detailed = self._parse_boolean(self.get_argument('detailed', default=False))
season = self._parse(self.get_argument('season', None), int)
data = [e.to_json(detailed=detailed) for e in series.get_all_episodes(season=season)]
return self._paginate(data, sort='airDate')
episode_number = EpisodeNumber.from_slug(episode_slug)
if not episode_number:
return self._bad_request('Invalid episode number')
episode = Episode.find_by_series_and_episode(series, episode_number)
if not episode:
return self._not_found('Episode not found')
detailed = self._parse_boolean(self.get_argument('detailed', default=True))
data = episode.to_json(detailed=detailed)
if path_param:
if path_param == 'metadata':
data = episode.metadata() if episode.is_location_valid() else {}
elif path_param in data:
data = data[path_param]
else:
return self._bad_request("Invalid path parameter '{0}'".format(path_param))
return self._ok(data=data)
def patch(self, series_slug, episode_slug=None, path_param=None):
"""Patch episode."""
series_identifier = SeriesIdentifier.from_slug(series_slug)
if not series_identifier:
return self._bad_request('Invalid series slug')
series = Series.find_by_identifier(series_identifier)
if not series:
return self._not_found('Series not found')
data = json_decode(self.request.body)
# Multi-patch request
if not episode_slug:
return self._patch_multi(series, data)
episode_number = EpisodeNumber.from_slug(episode_slug)
if not episode_number:
return self._bad_request('Invalid episode number')
episode = Episode.find_by_series_and_episode(series, episode_number)
if not episode:
return self._not_found('Episode not found')
accepted = self._patch_episode(episode, data)
return self._ok(data=accepted)
def _patch_multi(self, series, request_data):
"""Patch multiple episodes."""
statuses = {}
for slug, data in iteritems(request_data):
episode_number = EpisodeNumber.from_slug(slug)
if not episode_number:
statuses[slug] = {'status': 400}
continue
episode = Episode.find_by_series_and_episode(series, episode_number)
if not episode:
statuses[slug] = {'status': 404}
continue
self._patch_episode(episode, data)
statuses[slug] = {'status': 200}
return self._multi_status(data=statuses)
@staticmethod
def _patch_episode(episode, data):
"""Patch episode and save the changes to DB."""
accepted = {}
ignored = {}
patches = {
'status': IntegerField(episode, 'status'),
'quality': IntegerField(episode, 'quality'),
'watched': BooleanField(episode, 'watched'),
}
for key, value in iter_nested_items(data):
patch_field = patches.get(key)
if patch_field and patch_field.patch(episode, value):
set_nested_value(accepted, key, value)
else:
set_nested_value(ignored, key, value)
# Save patched attributes in db.
episode.save_to_db()
if ignored:
log.warning(
'Episode patch for {episode} ignored {items!r}',
{'episode': episode.identifier, 'items': ignored},
)
return accepted
def delete(self, series_slug, episode_slug, **kwargs):
"""Delete the episode."""
if not series_slug:
return self._method_not_allowed('Deleting multiple series are not allowed')
identifier = SeriesIdentifier.from_slug(series_slug)
if not identifier:
return self._bad_request('Invalid series identifier')
series = Series.find_by_identifier(identifier)
if not series:
return self._not_found('Series not found')
episode_number = EpisodeNumber.from_slug(episode_slug)
if not episode_number:
return self._bad_request('Invalid episode number')
episode = Episode.find_by_series_and_episode(series, episode_number)
if not episode:
return self._not_found('Episode not found')
try:
episode.delete_episode()
except EpisodeDeletedException:
return self._no_content()
else:
return self._conflict('Unable to delete episode')
|
pymedusa/SickRage
|
medusa/server/api/v2/episodes.py
|
Python
|
gpl-3.0
| 6,368
|
"""
hill_cipher.py
@author Elliot and Erica
"""
import random
import numpy as np
from cryptography_utilities import (gcd, extended_gcd)
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET_SIZE = len(ALPHABET)
N = 3
TOTIENT_26 = 12
def random_matrix(size=N):
"""Generate a NxN matrix filled with random integers"""
array = [[random.randint(0, ALPHABET_SIZE - 1)
for _ in range(size)]
for _ in range(size)]
return np.matrix(array)
def valid_key(matrix):
"""Determine if the given matrix can be used as a key"""
return gcd(int(np.linalg.det(matrix)), ALPHABET_SIZE) == 1
def generate_key(size=N):
"""Return a NxN matrix filled with random integers that qualifies
as a valid key.
"""
matrix = random_matrix()
while not valid_matrix(matrix):
matrix = random_matrix()
return matrix
def to_int(char):
"""Using ALPHABET, find the numeric representation of the given
character.
"""
char_map = {char: number for number, char in enumerate(ALPHABET)}
return char_map[char]
def to_char(integer):
"""Using ALPHABET, find the character representation of the given
integer.
"""
integer_map = {number: char for number, char in enumerate(ALPHABET)}
return integer_map[integer]
def number_matrix(string):
"""Convert a string into an integer matrix."""
row = 0
array = [[] for _ in range(N)]
for char in make_correct_length(string):
array[row].append(to_int(char))
row = (row + 1) % N
return np.matrix(array)
def make_correct_length(string):
"""Add x characters if the input string cannot be split into even
blocks.
"""
if not len(string) % N == 0:
string += 'x' * (N - (len(string) % N))
return string
def modular_inverse(number):
"""Calculate the modular multiplicative inverse of a number."""
gcd, x, y = extended_gcd(ALPHABET_SIZE, number)
if y < 0:
return y + 26
else:
return y
def switch_key(key):
"""Change an encryption key into a decryption key or
vice-versa.
"""
key_determinant = int(np.linalg.det(key))
inverse = np.linalg.inv(key)
adjugate = np.multiply(inverse, key_determinant)
raw_switch = np.multiply(modular_inverse(key_determinant),
adjugate)
modder = np.vectorize(lambda n: int(n) % 26)
return modder(raw_switch)
def hill_cipher_encrypt(plain_text, key):
"""Determine cipher text given plain text. The key should be an
NxN matrix.
"""
cipher_matrix = np.dot(key, number_matrix(plain_text))
cipher_text = ''
for number in np.nditer(cipher_matrix):
cipher_text += to_char(number % ALPHABET_SIZE)
return cipher_text
def hill_cipher_decrypt(cipher_text, key, encryption_key=False):
"""Determine plain text given cipher text. The key should be an
NxN matrix.
"""
if encryption_key:
key = switch_key(key)
plain_matrix = np.dot(key, number_matrix(cipher_text))
plain_text = ''
for number in np.nditer(plain_matrix):
plain_text += to_char(number % ALPHABET_SIZE)
return plain_text
|
ElliotPenson/cryptography
|
hill_cipher.py
|
Python
|
mit
| 3,152
|
# coding: utf-8
from __future__ import unicode_literals
import datetime
import re
import time
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_ord,
)
from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
sanitized_Request,
int_or_none,
str_or_none,
encode_data_uri,
url_basename,
)
class LetvIE(InfoExtractor):
IE_DESC = '乐视网'
_VALID_URL = r'http://www\.letv\.com/ptv/vplay/(?P<id>\d+).html'
_TESTS = [{
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
'md5': 'edadcfe5406976f42f9f266057ee5e40',
'info_dict': {
'id': '22005890',
'ext': 'mp4',
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
},
'params': {
'hls_prefer_native': True,
},
}, {
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
'info_dict': {
'id': '1415246',
'ext': 'mp4',
'title': '美人天下01',
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
},
'params': {
'hls_prefer_native': True,
},
}, {
'note': 'This video is available only in Mainland China, thus a proxy is needed',
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
'md5': '2424c74948a62e5f31988438979c5ad1',
'info_dict': {
'id': '1118082',
'ext': 'mp4',
'title': '与龙共舞 完整版',
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
},
'params': {
'hls_prefer_native': True,
},
'skip': 'Only available in China',
}]
@staticmethod
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# ror() and calc_time_key() are reversed from a embedded swf file in KLetvPlayer.swf
def ror(self, param1, param2):
_loc3_ = 0
while _loc3_ < param2:
param1 = self.urshift(param1, 1) + ((param1 & 1) << 31)
_loc3_ += 1
return param1
def calc_time_key(self, param1):
_loc2_ = 773625421
_loc3_ = self.ror(param1, _loc2_ % 13)
_loc3_ = _loc3_ ^ _loc2_
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
return _loc3_
# see M3U8Encryption class in KLetvPlayer.swf
@staticmethod
def decrypt_m3u8(encrypted_data):
if encrypted_data[:5].decode('utf-8').lower() != 'vc_01':
return encrypted_data
encrypted_data = encrypted_data[5:]
_loc4_ = bytearray()
while encrypted_data:
b = compat_ord(encrypted_data[0])
_loc4_.extend([b // 16, b & 0x0f])
encrypted_data = encrypted_data[1:]
idx = len(_loc4_) - 11
_loc4_ = _loc4_[idx:] + _loc4_[:idx]
_loc7_ = bytearray()
while _loc4_:
_loc7_.append(_loc4_[0] * 16 + _loc4_[1])
_loc4_ = _loc4_[2:]
return bytes(_loc7_)
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
params = {
'id': media_id,
'platid': 1,
'splatid': 101,
'format': 1,
'tkey': self.calc_time_key(int(time.time())),
'domain': 'www.letv.com'
}
play_json_req = sanitized_Request(
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
play_json_req.add_header('Ytdl-request-proxy', cn_verification_proxy)
play_json = self._download_json(
play_json_req,
media_id, 'Downloading playJson data')
# Check for errors
playstatus = play_json['playstatus']
if playstatus['status'] == 0:
flag = playstatus['flag']
if flag == 1:
msg = 'Country %s auth error' % playstatus['country']
else:
msg = 'Generic error. flag = %d' % flag
raise ExtractorError(msg, expected=True)
playurl = play_json['playurl']
formats = ['350', '1000', '1300', '720p', '1080p']
dispatch = playurl['dispatch']
urls = []
for format_id in formats:
if format_id in dispatch:
media_url = playurl['domain'][0] + dispatch[format_id][0]
media_url += '&' + compat_urllib_parse.urlencode({
'm3v': 1,
'format': 1,
'expect': 3,
'rateid': format_id,
})
nodes_data = self._download_json(
media_url, media_id,
'Download JSON metadata for format %s' % format_id)
req = self._request_webpage(
nodes_data['nodelist'][0]['location'], media_id,
note='Downloading m3u8 information for format %s' % format_id)
m3u8_data = self.decrypt_m3u8(req.read())
url_info_dict = {
'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'),
'ext': determine_ext(dispatch[format_id][1]),
'format_id': format_id,
'protocol': 'm3u8',
}
if format_id[-1:] == 'p':
url_info_dict['height'] = int_or_none(format_id[:-1])
urls.append(url_info_dict)
publish_time = parse_iso8601(self._html_search_regex(
r'发布时间 ([^<>]+) ', page, 'publish time', default=None),
delimiter=' ', timezone=datetime.timedelta(hours=8))
description = self._html_search_meta('description', page, fatal=False)
return {
'id': media_id,
'formats': urls,
'title': playurl['title'],
'thumbnail': playurl['pic'],
'description': description,
'timestamp': publish_time,
}
class LetvTvIE(InfoExtractor):
_VALID_URL = r'http://www.letv.com/tv/(?P<id>\d+).html'
_TESTS = [{
'url': 'http://www.letv.com/tv/46177.html',
'info_dict': {
'id': '46177',
'title': '美人天下',
'description': 'md5:395666ff41b44080396e59570dbac01c'
},
'playlist_count': 35
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
page = self._download_webpage(url, playlist_id)
media_urls = list(set(re.findall(
r'http://www.letv.com/ptv/vplay/\d+.html', page)))
entries = [self.url_result(media_url, ie='Letv')
for media_url in media_urls]
title = self._html_search_meta('keywords', page,
fatal=False).split(',')[0]
description = self._html_search_meta('description', page, fatal=False)
return self.playlist_result(entries, playlist_id, playlist_title=title,
playlist_description=description)
class LetvPlaylistIE(LetvTvIE):
_VALID_URL = r'http://tv.letv.com/[a-z]+/(?P<id>[a-z]+)/index.s?html'
_TESTS = [{
'url': 'http://tv.letv.com/izt/wuzetian/index.html',
'info_dict': {
'id': 'wuzetian',
'title': '武媚娘传奇',
'description': 'md5:e12499475ab3d50219e5bba00b3cb248'
},
# This playlist contains some extra videos other than the drama itself
'playlist_mincount': 96
}, {
'url': 'http://tv.letv.com/pzt/lswjzzjc/index.shtml',
'info_dict': {
'id': 'lswjzzjc',
# The title should be "劲舞青春", but I can't find a simple way to
# determine the playlist title
'title': '乐视午间自制剧场',
'description': 'md5:b1eef244f45589a7b5b1af9ff25a4489'
},
'playlist_mincount': 7
}]
class LetvCloudIE(InfoExtractor):
IE_DESC = '乐视云'
_VALID_URL = r'https?://yuntv\.letv\.com/bcloud.html\?.+'
_TESTS = [{
'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=467623dedf',
'md5': '26450599afd64c513bc77030ad15db44',
'info_dict': {
'id': 'p7jnfw5hw9_467623dedf',
'ext': 'mp4',
'title': 'Video p7jnfw5hw9_467623dedf',
},
}, {
'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=ec93197892&pu=2c7cd40209&auto_play=1&gpcflag=1&width=640&height=360',
'info_dict': {
'id': 'p7jnfw5hw9_ec93197892',
'ext': 'mp4',
'title': 'Video p7jnfw5hw9_ec93197892',
},
}, {
'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=187060b6fd',
'info_dict': {
'id': 'p7jnfw5hw9_187060b6fd',
'ext': 'mp4',
'title': 'Video p7jnfw5hw9_187060b6fd',
},
}]
def _real_extract(self, url):
uu_mobj = re.search('uu=([\w]+)', url)
vu_mobj = re.search('vu=([\w]+)', url)
if not uu_mobj or not vu_mobj:
raise ExtractorError('Invalid URL: %s' % url, expected=True)
uu = uu_mobj.group(1)
vu = vu_mobj.group(1)
media_id = uu + '_' + vu
play_json_req = sanitized_Request(
'http://api.letvcloud.com/gpc.php?cf=html5&sign=signxxxxx&ver=2.2&format=json&' +
'uu=' + uu + '&vu=' + vu)
play_json = self._download_json(play_json_req, media_id, 'Downloading playJson data')
if not play_json.get('data'):
if play_json.get('message'):
raise ExtractorError('Letv cloud said: %s' % play_json['message'], expected=True)
elif play_json.get('code'):
raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True)
else:
raise ExtractorError('Letv cloud returned an unknwon error')
def b64decode(s):
return base64.b64decode(s.encode('utf-8')).decode('utf-8')
formats = []
for media in play_json['data']['video_info']['media'].values():
play_url = media['play_url']
url = b64decode(play_url['main_url'])
decoded_url = b64decode(url_basename(url))
formats.append({
'url': url,
'ext': determine_ext(decoded_url),
'format_id': int_or_none(play_url.get('vtype')),
'format_note': str_or_none(play_url.get('definition')),
'width': int_or_none(play_url.get('vwidth')),
'height': int_or_none(play_url.get('vheight')),
})
self._sort_formats(formats)
return {
'id': media_id,
'title': 'Video %s' % media_id,
'formats': formats,
}
|
dyn888/youtube-dl
|
youtube_dl/extractor/letv.py
|
Python
|
unlicense
| 11,107
|
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# run in project examples directory with:
# sudo ./total_clone_per_second.py"
from bcc import BPF
from time import sleep
# bpf program in restricted C language.
prog = """
BPF_TABLE("array", u32, u32, stats, 1);
int hello_world(void *ctx) {
u32 key = 0, value = 0, *val;
val = stats.lookup_or_init(&key, &value);
lock_xadd(val, 1);
return 0;
}
"""
b = BPF(text=prog)
# getting shared kernel map
stats_map = b.get_table("stats")
# attaching hello_world function to sys_clone system call.
b.attach_kprobe(event="sys_clone", fn_name="hello_world")
for x in range(0, 10):
stats_map[ stats_map.Key(0) ] = stats_map.Leaf(0)
sleep(1)
print "Total sys_clone per second =", stats_map[ stats_map.Key(0) ].value;
|
zaafar/ebpf_turtle
|
bpf_demo/total_clone_per_second.py
|
Python
|
apache-2.0
| 841
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import tokenize
from builtins import range
from collections import defaultdict
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
|
foursquare/pants
|
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/trailing_whitespace.py
|
Python
|
apache-2.0
| 2,252
|
from django.db import models
class DeveloperInfo(models.Model):
name = models.CharField(max_length=100, blank=True)
city = models.CharField(max_length=50, blank=True)
|
teamjomigi/django-backend
|
src/api/v1/hackatrain/models.py
|
Python
|
gpl-3.0
| 177
|
import sys
from html import unescape
from pathlib import Path
import pytest
@pytest.fixture
def compile_template(scratch_env):
def compile_template(source, name="tmpl.html"):
Path(scratch_env.root_path, "templates", name).write_text(
source, encoding="utf-8"
)
return scratch_env.jinja_env.get_template(name)
return compile_template
def test_jinja2_feature_autoescape(compile_template):
tmpl = compile_template("{{ value }}", "tmpl.html")
rendered = tmpl.render(value="<tag>")
assert unescape(rendered) == "<tag>"
assert "<" not in rendered
def test_jinja2_feature_with(compile_template):
tmpl = compile_template("{% with x = 'good' %}{{ x }}{% endwith %}")
assert tmpl.render() == "good"
def test_jinja2_feature_do(compile_template):
tmpl = compile_template(
"{% set x = ['a'] %}{% do x.append('b') %}{{ x|join('-') }}"
)
assert tmpl.render() == "a-b"
def test_no_reference_cycle_in_environment(project):
env = project.make_env(load_plugins=False)
# reference count should be two: one from our `env` variable, and
# another from the argument to sys.getrefcount
assert sys.getrefcount(env) == 2
|
lektor/lektor
|
tests/test_environment.py
|
Python
|
bsd-3-clause
| 1,211
|
"""
set some general constants in CGI units
"""
import numpy as np
import scipy.constants as sc
pi = np.pi; # noqa - PI
k_b = sc.k*1e7; # noqa - Boltzmann constant in erg/K
m_p = sc.proton_mass*1e3; # noqa - proton mass in g
Grav = sc.G*1e3; # noqa - gravitational constant in cm^3 g^-1 s^-2
AU = sc.au*1e2; # noqa - astronomical unit in cm
year = sc.Julian_year; # noqa - year in s
mu = 2.3e0; # noqa - mean molecular mass in proton masses
M_sun = 1.9891e+33; # noqa - mass of the sun in g
R_sun = 69550800000.0; # noqa - radius of the sun in cm
sig_h2 = 2e-15; # noqa - cross section of H2 [cm^2]
|
birnstiel/two-pop-py
|
twopoppy/const.py
|
Python
|
gpl-3.0
| 819
|
# -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from haha.extensions import login_manager
from haha.public.forms import LoginForm
from haha.user.forms import RegisterForm
from haha.user.models import User
from haha.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return render_template('public/home.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
|
starduliang/haha
|
haha/public/views.py
|
Python
|
bsd-3-clause
| 1,978
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
|
jmargeta/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
Python
|
bsd-3-clause
| 1,647
|
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
from PySide import QtCore
__doc__ = """Path Hop object and FreeCAD command"""
# Qt translation handling
def translate(context, text, disambig=None):
return QtCore.QCoreApplication.translate(context, text, disambig)
class ObjectHop:
def __init__(self, obj):
obj.addProperty("App::PropertyLink", "NextObject", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","The object to be reached by this hop"))
obj.addProperty("App::PropertyDistance", "HopHeight", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","The Z height of the hop"))
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def execute(self, obj):
nextpoint = FreeCAD.Vector()
if obj.NextObject:
if obj.NextObject.isDerivedFrom("Path::Feature"):
# look for the first position of the next path
for c in obj.NextObject.Path.Commands:
if c.Name in ["G0", "G00", "G1", "G01", "G2", "G02", "G3", "G03"]:
nextpoint = c.Placement.Base
break
# absolute coords, millimeters, cancel offsets
output = "G90\nG21\nG40\n"
# go up to the given height
output += "G0 Z" + str(obj.HopHeight.Value) + "\n"
# go horizontally to the position of nextpoint
output += "G0 X" + str(nextpoint.x) + " Y" + str(nextpoint.y) + "\n"
# print output
path = Path.Path(output)
obj.Path = path
class ViewProviderPathHop:
def __init__(self, vobj):
self.Object = vobj.Object
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
def getIcon(self):
return ":/icons/Path_Hop.svg"
def __getstate__(self):
return None
def __setstate__(self, state):
return None
class CommandPathHop:
def GetResources(self):
return {'Pixmap': 'Path_Hop',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Hop", "Hop"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Hop", "Creates a Path Hop object")}
def IsActive(self):
if FreeCAD.ActiveDocument is not None:
for o in FreeCAD.ActiveDocument.Objects:
if o.Name[:3] == "Job":
return True
return False
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(
translate("Path_Hop", "Please select one path object")+"\n")
return
if not selection[0].isDerivedFrom("Path::Feature"):
FreeCAD.Console.PrintError(
translate("Path_Hop", "The selected object is not a path")+"\n")
return
FreeCAD.ActiveDocument.openTransaction(
translate("Path_Hop", "Create Hop"))
FreeCADGui.addModule("PathScripts.PathHop")
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand(
'obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Hop")')
FreeCADGui.doCommand('PathScripts.PathHop.ObjectHop(obj)')
FreeCADGui.doCommand(
'PathScripts.PathHop.ViewProviderPathHop(obj.ViewObject)')
FreeCADGui.doCommand(
'obj.NextObject = FreeCAD.ActiveDocument.' + selection[0].Name)
FreeCADGui.doCommand('PathScripts.PathUtils.addToJob(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Hop', CommandPathHop())
FreeCAD.Console.PrintLog("Loading PathHop... done\n")
|
sanguinariojoe/FreeCAD
|
src/Mod/Path/PathScripts/PathHop.py
|
Python
|
lgpl-2.1
| 5,422
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
# Copyright (C) 2022 RERO.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio-Admin Flask extension."""
from __future__ import absolute_import, print_function
import warnings
import importlib_metadata
from flask_admin import Admin, AdminIndexView
from invenio_db import db
from werkzeug.utils import import_string
from . import config
from .views import protected_adminview_factory
class _AdminState(object):
"""State for Invenio-Admin."""
def __init__(self, app, admin, permission_factory, view_class_factory):
"""Initialize state.
:param app: The Flask application.
:param admin: The Flask-Admin application.
:param permission_factory: The permission factory to restrict access.
:param view_class_factory: The view class factory to initialize them.
"""
# Create admin instance.
self.app = app
self.admin = admin
self.permission_factory = permission_factory
self.view_class_factory = view_class_factory
def register_view(self, view_class, *args, **kwargs):
"""Register an admin view on this admin instance.
:param view_class: The view class name passed to the view factory.
:param args: Positional arugments for view class.
:param kwargs: Keyword arguments to view class.
"""
protected_view_class = self.view_class_factory(view_class)
if 'endpoint' not in kwargs:
kwargs['endpoint'] = view_class(*args, **kwargs).endpoint
self.admin.add_view(protected_view_class(*args, **kwargs))
def load_entry_point_group(self, entry_point_group):
"""Load administration interface from entry point group.
:param str entry_point_group: Name of the entry point group.
"""
for ep in set(importlib_metadata.entry_points(
group=entry_point_group
)):
admin_ep = dict(ep.load())
keys = tuple(
k in admin_ep for k in ('model', 'modelview', 'view_class'))
if keys == (False, False, True):
self.register_view(
admin_ep.pop('view_class'),
*admin_ep.pop('args', []),
**admin_ep.pop('kwargs', {})
)
elif keys == (True, True, False):
warnings.warn(
'Usage of model and modelview kwargs are deprecated in '
'favor of view_class, args and kwargs.',
PendingDeprecationWarning
)
self.register_view(
admin_ep.pop('modelview'),
admin_ep.pop('model'),
admin_ep.pop('session', db.session),
**admin_ep
)
else:
raise Exception(
'Admin entry point dictionary must contain '
'either "view_class" OR "model" and "modelview" keys.')
class InvenioAdmin(object):
"""Invenio-Admin extension."""
def __init__(self, app=None, **kwargs):
"""Invenio-Admin extension initialization.
:param app: The Flask application. (Default: ``None``)
:param kwargs: Passed to :meth:`init_app`.
"""
if app:
self._state = self.init_app(app, **kwargs)
def init_app(self,
app,
entry_point_group='invenio_admin.views',
permission_factory=None,
view_class_factory=protected_adminview_factory,
index_view_class=AdminIndexView):
"""Flask application initialization.
:param app: The Flask application.
:param entry_point_group: Name of entry point group to load
views/models from. (Default: ``'invenio_admin.views'``)
:param permission_factory: Default permission factory to use when
protecting an admin view. (Default:
:func:`~.permissions.admin_permission_factory`)
:param view_class_factory: Factory for creating admin view classes on
the fly. Used to protect admin views with authentication and
authorization. (Default:
:func:`~.views.protected_adminview_factory`)
:param index_view_class: Specify administrative interface index page.
(Default: :class:`flask_admin.base.AdminIndexView`)
:param kwargs: Passed to :class:`flask_admin.base.Admin`.
:returns: Extension state.
"""
self.init_config(app)
default_permission_factory = app.config['ADMIN_PERMISSION_FACTORY']
permission_factory = permission_factory or \
import_string(default_permission_factory)
# Create administration app.
admin = Admin(
app,
name=app.config['ADMIN_APPNAME'],
template_mode=app.config['ADMIN_TEMPLATE_MODE'],
index_view=view_class_factory(index_view_class)(),
)
@app.before_first_request
def lazy_base_template():
"""Initialize admin base template lazily."""
base_template = app.config.get('ADMIN_BASE_TEMPLATE')
if base_template:
admin.base_template = base_template
# Create admin state
state = _AdminState(app, admin, permission_factory, view_class_factory)
if entry_point_group:
state.load_entry_point_group(entry_point_group)
app.extensions['invenio-admin'] = state
return state
@staticmethod
def init_config(app):
"""Initialize configuration.
:param app: The Flask application.
"""
# Set default configuration
for k in dir(config):
if k == 'ADMIN_BASE_TEMPLATE' and getattr(config, k) is None:
continue
if k.startswith('ADMIN_'):
app.config.setdefault(k, getattr(config, k))
def __getattr__(self, name):
"""Proxy to state object.
:param name: Attribute name of the state.
"""
return getattr(self._state, name, None)
|
inveniosoftware/invenio-admin
|
invenio_admin/ext.py
|
Python
|
mit
| 6,287
|
#!/usr/bin/python
#
# Copyright (C) 2016 sssd-qe contributors.
#
from setuptools import setup
REQUIRES = [
'paramiko',
'PyYAML',
'python-ldap',
'pytest_multihost',
'pytest']
with open('README.rst', 'r') as f:
README = f.read()
setup_args = dict(
name='sssd.testlib',
version='0.1-11',
description='System Services Security Daemon python test suite',
long_description=README,
author=u'SSSD QE Team',
url='http://git.app.eng.bos.redhat.com/git/sssd-qe-tests.git/',
packages=[
'sssd',
'sssd.testlib',
'sssd.testlib.common',
],
package_data={'': ['LICENSE']},
install_requires=REQUIRES,
license='GNU GPL v3.0',
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
if __name__ == '__main__':
setup(**setup_args)
|
npmccallum/sssd
|
src/tests/python/setup.py
|
Python
|
gpl-3.0
| 871
|
# This file is part of EventGhost.
# Copyright (C) 2005 Lars-Peter Voss <bitmonster@eventghost.org>
#
# EventGhost is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# EventGhost is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EventGhost; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import eg
import wx
import base64
import pickle
from ActionItem import ActionItem
from TreeItem import TreeItem
class PluginItem(ActionItem):
xmlTag = "Plugin"
icon = eg.Icons.PLUGIN_ICON
isRenameable = False
info = None
def GetData(self):
attr, text = TreeItem.GetData(self)
del attr[0]
attr.append(('File', self.pluginFile))
attr.append(('Identifier', self.executable.info.evalName))
text = base64.b64encode(pickle.dumps(self.info.args, 2))
return attr, text
def __init__(self, parent, node):
TreeItem.__init__(self, parent, node)
if node.text:
try:
args = pickle.loads(base64.b64decode(node.text))
except:
args = ()
else:
args = ()
ident = node.attrib.get('identifier', None)
pluginStr = node.attrib['file']
self.pluginFile = pluginStr
self.info = info = eg.PluginInfo.Open(pluginStr, ident, args, self)
self.name = eg.text.General.pluginLabel % info.label
if info.icon != self.icon:
self.icon = eg.Icons.PluginSubIcon(info.icon)
#self.icon = info.icon
self.executable = info.instance
def GetLabel(self):
return self.name
def GetTypeName(self):
return self.executable.info.name
@eg.LogIt
def RestoreState(self):
if self.isEnabled:
eg.actionThread.Call(self.info.Start)
def SetAttributes(self, tree, itemId):
if self.info.lastException or self.info.initFailed:
tree.SetItemTextColour(itemId, eg.colour.pluginError)
def _SetColour(self, colour):
if self.HasValidId():
self.tree.SetItemTextColour(self.id, colour)
def SetErrorState(self):
wx.CallAfter(self._SetColour, eg.colour.pluginError)
def ClearErrorState(self):
wx.CallAfter(self._SetColour, eg.colour.treeItem)
def Refresh(self):
pass
def Execute(self):
if not self.isEnabled:
return None, None
if eg.config.logActions:
self.Print(self.name)
if self.shouldSelectOnExecute:
#self.Select()
wx.CallAfter(self.Select)
eg.indent += 1
self.info.Start()
eg.indent -= 1
eg.result = self.executable
return None, None
def Enable(self, flag=True):
ActionItem.Enable(self, flag)
if flag:
eg.actionThread.Call(self.info.Start)
else:
eg.actionThread.Call(self.info.Stop)
def _Delete(self):
info = self.info
def DoIt():
info.Close()
info.instance.OnDelete()
info.RemovePluginInstance()
eg.actionThread.Call(DoIt)
ActionItem._Delete(self)
self.executable = None
self.info = None
def AskDelete(self):
actionItemCls = self.document.ActionItem
def SearchFunc(obj):
if obj.__class__ == actionItemCls:
if obj.executable and obj.executable.plugin == self.executable:
return True
return None
if self.root.Traverse(SearchFunc) is not None:
eg.MessageBox(
eg.text.General.deletePlugin,
eg.APP_NAME,
wx.NO_DEFAULT|wx.OK|wx.ICON_EXCLAMATION
)
return False
if not TreeItem.AskDelete(self):
return False
return True
def AskCut(self):
return self.AskDelete()
def NeedsStartupConfiguration(self):
if self.info.instance.Configure.im_func != eg.PluginBase.Configure.im_func:
return True
return False
@eg.LogIt
def ShowHelp(self, parent=None):
if self.helpDialog:
self.helpDialog.Raise()
return
plugin = self.info.instance
self.helpDialog = eg.HtmlDialog(
parent,
eg.text.General.pluginLabel % plugin.name,
eg.Utils.MergeUrl(plugin.description, plugin.info.url),
plugin.info.icon.GetWxIcon(),
basePath=plugin.info.GetPath()
)
def OnClose(dummyEvent):
self.helpDialog.Destroy()
del self.helpDialog
self.helpDialog.Bind(wx.EVT_CLOSE, OnClose)
self.helpDialog.okButton.Bind(wx.EVT_BUTTON, OnClose)
self.helpDialog.Show()
def GetArgs(self):
return self.info.args
@eg.LogIt
def SetArgs(self, args):
info = self.info
if not info.lastException and args == self.info.args:
return
self.info.args = args
label = info.instance.GetLabel(*args)
if label != info.label:
info.label = label
self.name = eg.text.General.pluginLabel % label
if self.id:
self.tree.SetItemText(
self.id,
self.name
)
self.RefreshAllVisibleActions()
if self.isEnabled:
eg.actionThread.Call(self.info.Stop)
eg.actionThread.Call(self.info.Start)
def RefreshAllVisibleActions(self):
"""
Calls Refresh() for all currently visible actions of this plugin.
"""
actionItemCls = self.document.ActionItem
plugin = self.info.instance
def Traverse(item):
if item.__class__ == actionItemCls:
if item.executable.plugin == plugin:
item.Refresh()
else:
if item.childs and item.isExpanded:
for child in item.childs:
Traverse(child)
Traverse(self.root)
|
garbear/EventGhost
|
eg/Classes/PluginItem.py
|
Python
|
gpl-2.0
| 6,747
|
from django.test import TestCase
from django.conf import settings
from restclients.dao import *
import re
class SWSTestFileDAO(TestCase):
def test_dao_response(self):
with self.settings(RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'):
dao = SWS_DAO()
response = dao.getURL("/file_doesnt_exist", {})
self.assertEqual(response.status, 404, "File DAO returns a 404 for missing files")
response = dao.getURL("/student/", {})
self.assertEqual(response.status, 200, "File DAO returns 200 for found files")
html = response.data
if not re.search('student/v4', html):
self.fail("Doesn't contains a link to v4")
if re.search('student/v2', html):
self.fail("shouldn't contain a link to v2")
|
UWIT-IAM/uw-restclients
|
restclients/test/sws/file_implementation/dao.py
|
Python
|
apache-2.0
| 845
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Script to search vcf files for mutations within specific coordinates
# Input:
# -A vcf file
#
# Output:
# -A Roary-like file with mutations sorted in rows, strains as columns and presence/absence in cells
# -Columns: Chromosome, Position, variant (eg C->T), type (eg missense, synonymous, frameshift etc)
# Reading VCF
# File metainfo starts as ##key=value
# These are always formed and should be caught
# example ##fileformat=VCFv4.3 - give warning if format is off
# Columns 8 MANDATORY
# CHROM POS ID REF ALT QUAL FILTER INFO
# OPTIONAL COLUMNS
# FORMAT SAMPLE1 SAMPLE2 etc
# All data lines are tab-delimited
# CHROM : string, no whitespace
# POS : integer. Can have many lines with same pos. Pos=0 or N+1 for telomere positions
# ID : semicolon-delimited list of strings
# REF : string, ACGTN (can be multiple)
# ALT : comma-separated list, ACGTN* (* = allele is missing due to overlapping deletion)
# (NOTE: Suggest splitting ALT variants into different lines to preserve binarity)
# QUAL : float
# FILTER : PASS or semicolon-delimited list
# INFO : semicolon-delimited list of key=value pairs or flags
# FORMAT (optional) : colon-delimited list.
# Genotype fields - Genotype always first field
# GT encoded as allele values separated by | or /. 0 = reference. 1 = first ALT. 2 = second alt etc
# NOTE: Haploid calls (bacteria) have only 1 value
# NOTE: / means genotype unphased. | means genotype phased
# INFO field SVtypes : DELetion, INSertion, DUPlication, INVersion, CNV
import sys
import argparse
import os
import csv
import re
import traceback
__version__ = '0.1b'
__author__ = 'Ola Brynildsrud'
__credits = ['Ola Brynildsrud']
__email__ = 'olbb@fhi.no'
def main():
"""
Converts VCF files (version 4.x) to Scoary format
"""
##########################################################################
# Parse command line arguments
parser = argparse.ArgumentParser(
description='This script takes in vcf files and creates a '
'presence/absence matrix of mutations in the '
'Roary/Scoary format',
epilog='by Ola Brynildsrud (olbb@fhi.no)')
parser.add_argument(
'--out',
action='store',
default='./mutations_presence_absence.csv',
help='The path to the output file')
parser.add_argument(
'--types',
action='store',
default='ALL',
help='The types of variants to include in the output. NOTE: This '
'works if TYPE=XX can be found in the INFO column of the vcf '
'file. The special keyword ALL includes all types. This is '
'the default setting. Common types are snp, mnp, ins, del '
'and complex. Give as comma-separated list. '
'Example: --types snp,ins,del')
parser.add_argument(
'--version',
action='version',
version=__version__)
parser.add_argument(
'--force',
action='store_true',
default=False,
help='Force overwriting of output file. (If it already '
'exists)')
parser.add_argument(
'vcf',
action='store',
metavar='<VCF_file>',
help='The VCF file to convert to Roary/Scoary format')
args = parser.parse_args()
if args.types is not "ALL":
args.types = args.types.split(",")
if os.path.isfile(args.out) and not args.force:
sys.exit("Outfile already exists. Change name of outfile or "
"run with --force")
if not os.path.isfile(args.vcf):
sys.exit("Unable to locate input file %s" % args.vcf)
with open(args.vcf,'rU') as vcffile, open(args.out,'w') as outfile:
lines = csv.reader(vcffile, delimiter='\t', quotechar='"')
metainfo = {"##INFO" : {},
"##FILTER" : {},
"##FORMAT" : {},
"##ALT" : {},
"##contig" : {},
"##META" : {},
"##SAMPLE" : {},
"##PEDIGREE" : {}
}
#for line in lines:
while True:
try:
line = next(lines)
except StopIteration:
print(traceback.print_exc())
sys.exit("ERROR: There appears to be only metainformation "
"(lines starting with ##) in your VCF file.")
# Get metainfo from file
if line[0][:2] == '##':
infoline = re.split('=',line[0], maxsplit=1)
# Capture list output for complex tags
if infoline[0] in metainfo:
ID=re.search(r'ID=(\w+)',infoline[1]).group(1)
infolist = re.split(',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)',infoline[1].strip("<>"))
metainfo[infoline[0]][ID] = {}
# Enter all elements in infolist into appropriate dic
for e in infolist:
esplit = e.split("=")
metainfo[infoline[0]][ID][esplit[0]] = esplit[1]
else:
metainfo[infoline[0]] = infoline[1]
else:
# Have reached the data section of the file
data = {"header": line}
break
try:
vcfversion = metainfo["##fileformat"].split("v")[1]
if int(vcfversion[0]) != 4:
print("WARNING: A VCF format other than 4.x detected."
" File parsing may proceed with errors.")
else:
print("VCF version %s detected" % vcfversion)
except:
print("WARNING: Could not detect VCF format. Expected "
"v4.x. File parsing may proceed with errors.")
print(traceback.print_exc())
# Check that genotype fields have a single allele
if metainfo["##FORMAT"]["GT"]["Number"] != "1":
sys.exit("ERROR: Expected a single allele per genotype. Scoary "
"only works for haploid organisms.")
# Have now caught all metainformation. Now get column information
#header = next(line)
#print header
data["header"] = data["header"][:9] + ["DUMMY"] + data["header"][9:]
outfile.write(','.join('"' + c + '"' for c in data["header"]) + '\n')
while True:
try:
line = next(lines)
except StopIteration:
print("Reached the end of the file")
sys.exit(0)
# Check if line is allowed:
if args.types is not "ALL":
vartype = re.search(r'TYPE=(\w+)',line[7]).group(1)
if vartype not in args.types:
continue
# Split line if ALT contains more than one variant
if "," in line[4]:
orgline = line[:]
alts = line[4].split(",")
c = 1
for a in alts:
newline = orgline[:]
newline[4] = a
# Only get GT
newline[9:] = \
[cell.split(":")[0] for cell in orgline[9:]]
# Fix dummy comparisons
newline[9:] = fixdummy(newline[9:], c)
newline = newline[:9] + ["True"] + newline[9:]
c += 1
writeLine(newline, outfile)
# Genotype fields need to be 0 or 1
# GT is always first in colon-separated list
else:
newline = line[:9] + ["False"] + line[9:]
writeLine(newline, outfile)
def writeLine(line, outfile):
writeline = line[:9] + [cell.split(":")[0] for cell in line[9:]]
outfile.write(','.join('"' + c + '"' for c in writeline) + '\n')
def fixdummy(line,c):
newline = line[:]
try:
for x in range(len(line)):
if line[x] == ".":
# Missing data get entered as reference / no presence
newline[x] = "0"
elif int(line[x]) == c:
newline[x] = "1"
else:
newline[x] = "0"
except ValueError:
print(newline, c)
sys.exit(-1)
return newline
########
# MAIN #
########
if __name__ == '__main__':
main()
|
AdmiralenOla/Scoary
|
scoary/vcf2scoary.py
|
Python
|
gpl-3.0
| 8,390
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.auth.identity.generic import password
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient.tests.unit.auth import utils
class PasswordTests(utils.GenericPluginTestCase):
PLUGIN_CLASS = password.Password
V2_PLUGIN_CLASS = v2.Password
V3_PLUGIN_CLASS = v3.Password
def new_plugin(self, **kwargs):
kwargs.setdefault('username', uuid.uuid4().hex)
kwargs.setdefault('password', uuid.uuid4().hex)
return super(PasswordTests, self).new_plugin(**kwargs)
def test_with_user_domain_params(self):
self.stub_discovery()
self.assertCreateV3(domain_id=uuid.uuid4().hex,
user_domain_id=uuid.uuid4().hex)
def test_v3_user_params_v2_url(self):
self.stub_discovery(v3=False)
self.assertDiscoveryFailure(user_domain_id=uuid.uuid4().hex)
def test_options(self):
opts = [o.name for o in self.PLUGIN_CLASS.get_options()]
allowed_opts = ['user-name',
'user-domain-id',
'user-domain-name',
'user-id',
'password',
'domain-id',
'domain-name',
'tenant-id',
'tenant-name',
'project-id',
'project-name',
'project-domain-id',
'project-domain-name',
'trust-id',
'auth-url']
self.assertEqual(set(allowed_opts), set(opts))
self.assertEqual(len(allowed_opts), len(opts))
|
ging/python-keystoneclient
|
keystoneclient/tests/unit/auth/test_password.py
|
Python
|
apache-2.0
| 2,262
|
from __future__ import print_function
from nltk import pos_tag, word_tokenize
import textblob, json
text = []
usda = {}
with open('USDA-food-db/usda-reduced.json') as f:
for line in f:
ingredient = json.loads(line)
usda[ingredient["name"]["long"]] = ingredient
translation = {}
print("Getting text")
for ingredient in usda:
text.append(ingredient)
text.append(ingredient.lower())
text.append("I eat "+ingredient+".")
text.append("I eat "+ingredient.lower()+".")
text.append(ingredient+" tastes good.")
text.append(ingredient.lower()+" tastes good.")
translation[ingredient.lower()] = ingredient
tags = []
done = 0.0
print("Getting tags")
for phrase in text:
tags.append((phrase, "NLTK", [tag[0].lower() for tag in pos_tag(word_tokenize(phrase)) if tag[1] in ["NN","NNP","NNS", "NNPS", "NP"] and len(tag[0].lower())>1]))
tags.append((phrase, "TextBlob", [tag[0].lower() for tag in textblob.TextBlob(phrase).tags if tag[1] in ["NN","NNP","NNS", "NNPS", "NP"] and len(tag[0].lower())>1]))
done += 1
print("%.1f %%" % (100*done/len(text)))
nltk_nouns = {}
textblob_nouns = {}
print("Grouping tags")
for tag in tags:
if "I eat " in tag[0]:
key = tag[0][6:-1].lower()
elif " tastes good." in tag[0]:
key = tag[0][:-13].lower()
else:
key = tag[0].lower()
if tag[1] == "NLTK":
if key not in nltk_nouns:
nltk_nouns[key] = set()
nltk_nouns[key].update(tag[2])
else:
if key not in textblob_nouns:
textblob_nouns[key] = set()
textblob_nouns[key].update(tag[2])
print("Adding tags")
done = 0.0
for ingredient in nltk_nouns:
usda[translation[ingredient]]["name"]["split"] = list(nltk_nouns[ingredient].union(textblob_nouns[ingredient]))
print("Saving USDA")
with open("USDA-food-db/usda-full.json", 'wb+') as f:
for ingredient_name, ingredient in usda.iteritems():
f.write(json.dumps(ingredient)+'\n')
|
moiri-gamboni/diet
|
recipe-parser/usda_nouns.py
|
Python
|
gpl-3.0
| 1,840
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from hacking import core
from neutron_lib.hacking import checks
def flake8ext(f):
"""Decorator to indicate flake8 extension.
This is borrowed from hacking.core.flake8ext(), but at now it is used
only for unit tests to know which are neutron flake8 extensions.
"""
f.name = __name__
return f
# Guidelines for writing new hacking checks
#
# - Use only for Neutron specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
# - Add test cases for each new rule to
# neutron/tests/unit/hacking/test_checks.py
unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b")
unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b")
filter_match = re.compile(r".*filter\(lambda ")
tests_imports_dot = re.compile(r"\bimport[\s]+neutron.tests\b")
tests_imports_from1 = re.compile(r"\bfrom[\s]+neutron.tests\b")
tests_imports_from2 = re.compile(r"\bfrom[\s]+neutron[\s]+import[\s]+tests\b")
@flake8ext
def check_assert_called_once_with(logical_line, filename):
"""N322 - Try to detect unintended calls of nonexistent mock methods like:
assert_called_once
assertCalledOnceWith
assert_has_called
called_once_with
"""
if 'neutron/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any(x for x in check_calls if x in uncased_line):
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
@flake8ext
def check_asserttruefalse(logical_line, filename):
"""N328 - Don't use assertEqual(True/False, observed)."""
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
@flake8ext
def check_assertempty(logical_line, filename):
"""N330 - Enforce using assertEqual parameter ordering in case of empty
objects.
"""
if 'neutron/tests/' in filename:
msg = ("N330: Use assertEqual(*empty*, observed) instead of "
"assertEqual(observed, *empty*). *empty* contains "
"{}, [], (), set(), '', \"\"")
empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
if re.search(reg, logical_line):
yield (0, msg)
@flake8ext
def check_assertisinstance(logical_line, filename):
"""N331 - Enforce using assertIsInstance."""
if 'neutron/tests/' in filename:
if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
logical_line):
msg = ("N331: Use assertIsInstance(observed, type) instead "
"of assertTrue(isinstance(observed, type))")
yield (0, msg)
@flake8ext
def check_assertequal_for_httpcode(logical_line, filename):
"""N332 - Enforce correct oredering for httpcode in assertEqual."""
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
"instead of assertEqual(observed_http_code, expected_http_code)")
if 'neutron/tests/' in filename:
if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)",
logical_line):
yield (0, msg)
@flake8ext
def check_oslo_i18n_wrapper(logical_line, filename, noqa):
"""N340 - Check for neutron.i18n usage.
Okay(neutron/foo/bar.py): from neutron._i18n import _
Okay(neutron_lbaas/foo/bar.py): from neutron_lbaas._i18n import _
N340(neutron/foo/bar.py): from neutron.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron_lbaas.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron.i18n import _
N340(neutron_lbaas/foo/bar.py): from neutron._i18n import _
Okay(neutron/foo/bar.py): from neutron.i18n import _ # noqa
"""
if noqa:
return
split_line = logical_line.split()
modulename = os.path.normpath(filename).split('/')[0]
bad_i18n_module = '%s.i18n' % modulename
if (len(split_line) > 1 and split_line[0] in ('import', 'from')):
if (split_line[1] == bad_i18n_module or
modulename != 'neutron' and split_line[1] in ('neutron.i18n',
'neutron._i18n')):
msg = ("N340: %(found)s is found. Use %(module)s._i18n instead."
% {'found': split_line[1], 'module': modulename})
yield (0, msg)
@flake8ext
def check_builtins_gettext(logical_line, tokens, filename, lines, noqa):
"""N341 - Check usage of builtins gettext _().
Okay(neutron/foo.py): from neutron._i18n import _\n_('foo')
N341(neutron/foo.py): _('foo')
Okay(neutron/_i18n.py): _('foo')
Okay(neutron/i18n.py): _('foo')
Okay(neutron/foo.py): _('foo') # noqa
"""
if noqa:
return
modulename = os.path.normpath(filename).split('/')[0]
if '%s/tests' % modulename in filename:
return
if os.path.basename(filename) in ('i18n.py', '_i18n.py'):
return
token_values = [t[1] for t in tokens]
i18n_wrapper = '%s._i18n' % modulename
if '_' in token_values:
i18n_import_line_found = False
for line in lines:
split_line = [elm.rstrip(',') for elm in line.split()]
if (len(split_line) > 1 and split_line[0] == 'from' and
split_line[1] == i18n_wrapper and
'_' in split_line):
i18n_import_line_found = True
break
if not i18n_import_line_found:
msg = ("N341: _ from python builtins module is used. "
"Use _ from %s instead." % i18n_wrapper)
yield (0, msg)
@core.flake8ext
@core.off_by_default
def check_unittest_imports(logical_line):
"""N334 - Use unittest2 instead of unittest"""
if (re.match(unittest_imports_from, logical_line) or
re.match(unittest_imports_dot, logical_line)):
msg = "N334: '%s' must be used instead of '%s'." % (
logical_line.replace('unittest', 'unittest2'), logical_line)
yield (0, msg)
@flake8ext
def check_no_imports_from_tests(logical_line, filename, noqa):
"""N343 Production code must not import from neutron.tests.*
"""
msg = ("N343 Production code must not import from neutron.tests.*")
if noqa:
return
if 'neutron/tests/' in filename:
return
for regex in tests_imports_dot, tests_imports_from1, tests_imports_from2:
if re.match(regex, logical_line):
yield(0, msg)
@flake8ext
def check_python3_no_filter(logical_line):
"""N344 - Use list comprehension instead of filter(lambda)."""
msg = ("N344: Use list comprehension instead of "
"filter(lambda obj: test(obj), data) on python3.")
if filter_match.match(logical_line):
yield(0, msg)
@flake8ext
def check_no_sqlalchemy_event_import(logical_line, filename, noqa):
"""N346 - Use neutron.db.api.sqla_listen instead of sqlalchemy event."""
if noqa:
return
is_import = (logical_line.startswith('import') or
logical_line.startswith('from'))
if not is_import:
return
for kw in ('sqlalchemy', 'event'):
if kw not in logical_line:
return
yield (0, "N346: Register sqlalchemy events through "
"neutron.db.api.sqla_listen so they can be cleaned up between "
"unit tests")
def factory(register):
checks.factory(register)
register(check_assert_called_once_with)
register(check_asserttruefalse)
register(check_assertempty)
register(check_assertisinstance)
register(check_assertequal_for_httpcode)
register(check_oslo_i18n_wrapper)
register(check_builtins_gettext)
register(check_unittest_imports)
register(check_no_imports_from_tests)
register(check_python3_no_filter)
register(check_no_sqlalchemy_event_import)
|
eayunstack/neutron
|
neutron/hacking/checks.py
|
Python
|
apache-2.0
| 9,976
|
import pytest
import os
import numpy as np
from threeML.plugins.XYLike import XYLike
from threeML import Model, DataList, JointLikelihood, PointSource
from threeML import BayesianAnalysis, Uniform_prior, Log_uniform_prior
from threeML.analysis_results import MLEResults, load_analysis_results, AnalysisResultsSet
from astromodels import Line, Gaussian
_cache = {}
# These are the same simulated dataset we use in the test of the XY plugin
x = np.linspace(0, 10, 50)
poiss_sig = [44, 43, 38, 25, 51, 37, 46, 47, 55, 36, 40, 32, 46, 37, 44, 42, 50, 48, 52, 47, 39, 55, 80, 93, 123, 135,
96, 74, 43, 49, 43, 51, 27, 32, 35, 42, 43, 49, 38, 43, 59, 54, 50, 40, 50, 57, 55, 47, 38, 64]
def _get_mle_analysis_results():
global _cache
if 'ar' in _cache:
return _cache['ar']
y = np.array(poiss_sig)
xy = XYLike("test", x, y, poisson_data=True)
fitfun = Line() + Gaussian()
fitfun.a_1.bounds = (-10, 10.0)
fitfun.b_1.bounds = (-100, 100.0)
fitfun.F_2 = 60.0
fitfun.F_2.bounds = (1e-3, 200.0)
fitfun.mu_2 = 5.0
fitfun.mu_2.bounds = (0.0, 100.0)
fitfun.sigma_2.bounds = (1e-3, 10.0)
model = Model(PointSource('fake',0.0, 0.0, fitfun))
data = DataList(xy)
jl = JointLikelihood(model, data)
_ = jl.fit()
ar = jl.results
# Cache it so we don't continue doing it
_cache['ar'] = ar
return ar
def _get_bayes_analysis_results():
global _cache
if 'arb' in _cache:
return _cache['arb']
y = np.array(poiss_sig)
xy = XYLike("test", x, y, poisson_data=True)
fitfun = Line() + Gaussian()
fitfun.a_1.bounds = (-10, 10.0)
fitfun.b_1.bounds = (-100, 100.0)
fitfun.F_2 = 60.0
fitfun.F_2.bounds = (1e-3, 200.0)
fitfun.mu_2 = 5.0
fitfun.mu_2.bounds = (0.0, 100.0)
fitfun.sigma_2.bounds = (1e-3, 10.0)
model = Model(PointSource('fake',0.0, 0.0, fitfun))
data = DataList(xy)
# Exactly the same can be done for a Bayesian analysis
# Let's run it first
ar = _get_mle_analysis_results()
for parameter in ar.optimized_model:
model[parameter.path].value = parameter.value
model.fake.spectrum.main.composite.a_1.set_uninformative_prior(Uniform_prior)
model.fake.spectrum.main.composite.b_1.set_uninformative_prior(Uniform_prior)
model.fake.spectrum.main.composite.F_2.set_uninformative_prior(Log_uniform_prior)
model.fake.spectrum.main.composite.mu_2.set_uninformative_prior(Uniform_prior)
model.fake.spectrum.main.composite.sigma_2.set_uninformative_prior(Log_uniform_prior)
bs = BayesianAnalysis(model, data)
_ = bs.sample(20, 100, 1000)
arb = bs.results
_cache['arb'] = arb
return arb
def _results_are_same(res1, res2, bayes=False):
# Check that they are the same
if not bayes:
# Check covariance
assert np.allclose(res1.covariance_matrix, res2.covariance_matrix)
else:
# Check samples
np.allclose(res1.samples, res2.samples)
frame1 = res1.get_data_frame()
frame2 = res2.get_data_frame()
# Remove the units (which cannot be checked with np.allclose)
unit1 = frame1.pop('unit')
unit2 = frame2.pop('unit')
assert np.allclose(frame1.values, frame2.values, rtol=0.15)
assert np.all(unit1 == unit2)
# Now check the values for the statistics
s1 = res1.optimal_statistic_values
s2 = res2.optimal_statistic_values
assert np.allclose(s1.values, s2.values)
def test_analysis_results_input_output():
ar = _get_mle_analysis_results() # type: MLEResults
temp_file = "__test_mle.fits"
ar.write_to(temp_file, overwrite=True)
ar_reloaded = load_analysis_results(temp_file)
os.remove(temp_file)
_results_are_same(ar, ar_reloaded)
def test_analysis_set_input_output():
ar = _get_mle_analysis_results()
ar2 = _get_mle_analysis_results()
analysis_set = AnalysisResultsSet([ar, ar2])
analysis_set.set_bins("testing", [-1, 1], [3, 5], unit = 's')
temp_file = "_analysis_set_test"
analysis_set.write_to(temp_file, overwrite=True)
analysis_set_reloaded = load_analysis_results(temp_file)
os.remove(temp_file)
# Test they are the same
assert len(analysis_set_reloaded) == len(analysis_set)
for res1, res2 in zip(analysis_set, analysis_set_reloaded):
_results_are_same(res1, res2)
def test_error_propagation():
ar = _get_mle_analysis_results()
# You can use the results for propagating errors non-linearly for analytical functions
p1 = ar.get_variates("fake.spectrum.main.composite.a_1")
p2 = ar.get_variates("fake.spectrum.main.composite.b_1")
print(p1)
print(p2)
res = p1 + p2
assert abs(res.value - (p1.value + p2.value)) / (p1.value + p2.value) < 0.01
# Make ratio with error 0
res = p1 / p1
low_b, hi_b = res.equal_tail_confidence_interval()
assert low_b == 1
assert hi_b == 1
# Now with a function
fitfun = ar.optimized_model.fake.spectrum.main.shape
arguments = {}
for par in fitfun.parameters.values():
if par.free:
this_name = par.name
this_variate = ar.get_variates(par.path)
# Do not use more than 1000 values (would make computation too slow for nothing)
if len(this_variate) > 1000:
this_variate = np.random.choice(this_variate, size=1000)
arguments[this_name] = this_variate
# Prepare the error propagator function
pp = ar.propagate(ar.optimized_model.fake.spectrum.main.shape.evaluate_at, **arguments)
new_variate = pp(5.0)
assert abs(new_variate.median - 130.0) < 20
low_b, hi_b = new_variate.equal_tail_confidence_interval()
assert abs(low_b - 120) < 20
assert abs(hi_b - 140) < 20
def test_bayesian_input_output():
rb1 = _get_bayes_analysis_results()
temp_file = "_test_bayes.fits"
rb1.write_to(temp_file, overwrite=True)
rb2 = load_analysis_results(temp_file)
os.remove(temp_file)
_results_are_same(rb1, rb2, bayes=True)
def test_corner_plotting():
ar = _get_bayes_analysis_results()
ar.corner_plot()
|
volodymyrss/3ML
|
threeML/test/test_analysis_results.py
|
Python
|
bsd-3-clause
| 6,185
|
"""
Gaussian quadrature utilities for use with the Python Active-subspaces Utility
Library.
"""
import numpy as np
import misc as mi
def r_hermite(N):
"""Recurrence coefficients for the Hermite orthogonal polynomials.
Parameters
----------
N : int
the number of recurrence coefficients
Returns
-------
ab : ndarray
an `N`-by-2 array of the recurrence coefficients
See Also
--------
utils.quadrature.jacobi_matrix
utils.quadrature.gauss_hermite
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if not isinstance(N, int):
raise TypeError('N must be an int')
if N <= 0:
raise ValueError('Parameters out of range.')
if N == 1:
return np.array([[0.0, 1.0]])
else:
n = np.array(range(1, N+1))
b = np.vstack((1.0, n.reshape((N, 1))))
a = np.zeros(b.shape)
ab = np.hstack((a, b))
return ab
def r_jacobi(N,l,r,a,b):
"""Recurrence coefficients for the Legendre orthogonal polynomials.
Parameters
----------
N : int
the number of recurrence coefficients
l : float
the left endpoint of the interval
r : float
the right endpoint of the interval
a : float
Jacobi weight parameter
b : float
Jacobi weight parameter
Returns
-------
ab : ndarray
an `N`-by-2 array of the recurrence coefficients
See Also
--------
utils.quadrature.jacobi_matrix
utils.quadrature.gauss_legendre
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if not isinstance(N, int):
raise TypeError('N must be an int')
if N <= 0:
raise ValueError('Parameters out of range.')
a0 = (b-a)/(a+b+2.0)
ab = np.zeros((N+1,2))
b2a2 = b**2 - a**2
s, o = (r-l)/2.0, l + (r-l)/2.0
# first row
ab[0,0] = s*a0 + o
ab[0,1] = 1
for k in range(1,N+1):
ab[k,0] = s*b2a2/((2*(k)+a+b)*(2*(k+1) + a+b)) + o
if k==1:
ab[k,1] = ((r-l)**2*(k)*(k+a)*(k+b)) / ((2.0*(k)+a+b)**2*(2.0*(k)+a+b+1))
else:
ab[k,1] = ((r-l)**2*(k)*(k+a)*(k+b)*(k+a+b)) / ((2.0*(k)+a+b)**2*(2.0*(k)+a+b+1)*(2.0*(k)+a+b-1))
return ab
def jacobi_matrix(ab):
"""Tri-diagonal Jacobi matrix of recurrence coefficients.
Parameters
----------
ab : ndarray
N-by-2 array of recurrence coefficients
Returns
-------
J : ndarray
(N-1)-by-(N-1) symmetric, tridiagonal Jacobi matrix associated with the
orthogonal polynomials
See Also
--------
utils.quadrature.r_hermite
utils.quadrature.gauss_hermite
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if len(ab.shape) != 2:
raise ValueError('ab must be 2 dimensional')
if ab.shape[1] != 2:
raise ValueError('ab must have two columns')
n = ab.shape[0] - 1
if n == 0:
return ab[0,0]
else:
J = np.zeros((n, n))
J[0,0] = ab[0,0]
J[0,1] = np.sqrt(ab[1,1])
for i in range(1,n-1):
J[i,i] = ab[i,0]
J[i,i-1] = np.sqrt(ab[i,1])
J[i,i+1] = np.sqrt(ab[i+1,1])
J[n-1,n-1] = ab[n-1,0]
J[n-1,n-2] = np.sqrt(ab[n-1,1])
return J
def gl1d(N):
"""One-dimensional Gauss-Legendre quadrature rule.
Parameters
----------
N : int
number of nodes in the quadrature rule
Returns
-------
x : ndarray
N-by-1 array of quadrature nodes
w : ndarray
N-by-1 array of quadrature weights
See Also
--------
utils.quadrature.gauss_legendre
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
return g1d(N, 'Legendre')
def gh1d(N):
"""One-dimensional Gauss-Hermite quadrature rule.
Parameters
----------
N : int
number of nodes in the quadrature rule
Returns
-------
x : ndarray
N-by-1 array of quadrature nodes
w : ndarray
N-by-1 array of quadrature weights
See Also
--------
utils.quadrature.gauss_hermite
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
return g1d(N, 'Hermite')
def g1d(N, quadtype):
"""One-dimensional Gaussian quadrature rule.
Parameters
----------
N : int
number of nodes in the quadrature rule
quadtype : str
type of quadrature rule {'Legendre', 'Hermite'}
Returns
-------
x : ndarray
N-by-1 array of quadrature nodes
w : ndarray
N-by-1 array of quadrature weights
See Also
--------
utils.quadrature.gauss_hermite
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if N > 1:
if quadtype == 'Hermite':
ab = r_hermite(N)
elif quadtype == 'Legendre':
ab = r_jacobi(N, -1, 1, 0, 0)
else:
raise ValueError('quadtype must be Legendre or Hermite')
J = jacobi_matrix(ab)
e, V = np.linalg.eig(J)
ind = np.argsort(e)
x = e[ind].reshape((N, 1))
x[np.fabs(x) < 1e-12] = 0.0
w = (V[0,ind]*V[0,ind]).reshape((N, 1))
else:
x, w = np.array([[0.0]]),np.array([[1.0]])
return x, w
def gauss_hermite(N):
"""Tensor product Gauss-Hermite quadrature rule.
Parameters
----------
N : int[]
number of nodes in each dimension of the quadrature rule
Returns
-------
x : ndarray
N-by-1 array of quadrature nodes
w : ndarray
N-by-1 array of quadrature weights
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if isinstance(N, int):
N = [N]
if type(N) is not list:
raise TypeError('N must be a list.')
if len(N) == 1:
x, w = gh1d(N[0])
else:
x = np.array([[1.0]])
w = np.array([[1.0]])
for n in N:
xi, wi = gh1d(n)
xL = np.kron(x.copy(), np.ones(xi.shape))
xU = np.kron(np.ones((x.shape[0],1)), xi)
x = np.hstack((xL, xU))
w = np.kron(w.copy(), wi)
x, w = np.atleast_2d(x[:,1:]), mi.atleast_2d_col(w)
return x, w
def gauss_legendre(N):
"""Tensor product Gauss-Legendre quadrature rule.
Parameters
----------
N : int[]
number of nodes in each dimension of the quadrature rule
Returns
-------
x : ndarray
N-by-1 array of quadrature nodes
w : ndarray
N-by-1 array of quadrature weights
Notes
-----
This computation is inspired by Walter Gautschi's code at
https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html.
"""
if isinstance(N, int):
N = [N]
if type(N) is not list:
raise TypeError('N must be a list.')
if len(N) == 1:
x, w = gl1d(N[0])
else:
x = np.array([[1.0]])
w = np.array([[1.0]])
for n in N:
xi, wi = gl1d(n)
xL = np.kron(x.copy(), np.ones(xi.shape))
xU = np.kron(np.ones((x.shape[0],1)), xi)
x = np.hstack((xL, xU))
w = np.kron(w.copy(), wi)
x, w = np.atleast_2d(x[:,1:]), mi.atleast_2d_col(w)
return x, w
|
paulcon/active_subspaces
|
active_subspaces/utils/quadrature.py
|
Python
|
mit
| 7,875
|
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KSeparator(__PyQt4_QtGui.QFrame):
# no doc
def orientation(self, *args, **kwargs): # real signature unknown
pass
def setOrientation(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KSeparator.py
|
Python
|
gpl-2.0
| 621
|
import datetime
from flask import request
from flask.ext.restful import reqparse, fields, marshal
import cred.config
from cred.exceptions import ClientNotFound
from cred.common import util
from cred.models.client import Client as ClientModel
full_client_fields = {
'id': fields.Integer,
'device': fields.String,
'location': fields.String,
'uri': fields.Url('clients_item', absolute=True),
}
simple_client_fields = {
'id': fields.Integer,
'uri': fields.Url('clients_item', absolute=True),
}
class Clients(util.AuthenticatedResource):
"""Methods going to the /clients route."""
def get(self):
"""
Get a list of all active clients, based on pingtimeout configuration.
Also accepts query parameters:
full=<bool>
before=<int>
after=<int>
limit=<int>
offset=<int>
which allows for a more fine-grained control.
"""
self.require_read_permission()
clients = util.get_db_items(
request,
Model=ClientModel,
default_fields=simple_client_fields,
full_fields=full_client_fields,
base_query=ClientModel.query.filter(
ClientModel.last_active > (datetime.datetime.utcnow() - datetime.timedelta(seconds=cred.config.loaded_configuration['pingtimeout']))
)
)
return {
'status': 200,
'message': 'OK',
'clients': clients
}, 200
class ClientsMe(util.AuthenticatedResource):
"""Methods going to the /clients/me route."""
def get(self):
"""Fetch information about the client itself."""
self.require_read_permission()
client = self.client
return {
'status': 200,
'message': 'OK',
'client': marshal(client, full_client_fields)
}, 200
class ClientsItem(util.AuthenticatedResource):
"""Methods going to the /clients/<int:id> route."""
def get(self, id):
"""Fetch information about a specific client."""
self.require_read_permission()
client = ClientModel.query.filter_by(id=id).first()
if not client:
raise ClientNotFound()
return {
'status': 200,
'message': 'OK',
'client': marshal(client, full_client_fields)
}, 200
|
Tehnix/cred-server
|
cred/resources/clients.py
|
Python
|
bsd-3-clause
| 2,387
|
import logging
import redis
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import PasswordChangeView
from django.http import HttpResponseForbidden, JsonResponse, HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import redirect, render, render_to_response
from django.urls import reverse_lazy, reverse
from app.config.settings import AppSettings
from app.productdb.utils import login_required_if_login_only_mode
from django_project.celery import app as celery, TaskState, get_meta_data_for_task
from django_project import context_processors
logger = logging.getLogger("productdb")
def custom_page_not_found_view(request, exception):
response = render(request, 'django_project/custom_404_page.html', {})
response.status_code = 404
return response
def custom_error_view(request):
response = render(request, 'django_project/custom_500_page.html', {})
response.status_code = 500
return response
def custom_bad_request_view(request, exception):
response = render(request, 'django_project/custom_400_page.html', {})
response.status_code = 400
return response
def custom_permission_denied_view(request, exception):
response = render(request, 'django_project/custom_403_page.html', {})
response.status_code = 403
return response
def custom_csrf_failure_page(request, reason=""):
context = {
"message": "Form expired" if reason == "" else reason
}
return render_to_response('django_project/custom_csrf_failure_page.html', context)
class ChangePasswordView(LoginRequiredMixin, PasswordChangeView):
template_name = "django_project/change_password.html"
success_url = reverse_lazy("custom_password_change_done")
def get(self, request, *args, **kwargs):
if context_processors.is_ldap_authenticated_user(request)["IS_LDAP_ACCOUNT"]:
return HttpResponseForbidden("You're not allowed to change your password in this application")
return super().get(request, *args, **kwargs)
@login_required
def custom_password_change_done(request):
"""thank you page with link to homepage"""
# check if the request comes from an LDAP account, if so, raise a PermissionDenied exception
if context_processors.is_ldap_authenticated_user(request)["IS_LDAP_ACCOUNT"]:
return HttpResponseForbidden("You're not allowed to change your password in this application")
else:
return render(request, "django_project/password_change_done.html", context={})
def login_user(request):
"""login user
:param request:
:return:
"""
app_config = AppSettings()
context = {
"login_only_mode": app_config.is_login_only_mode()
}
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("productdb:home"))
if request.GET:
context["next"] = request.GET['next']
else:
context["next"] = None
if request.method == 'POST':
# authenticate user
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
if context["next"] and not context["next"].startswith("/productdb/login"):
return HttpResponseRedirect(context["next"])
else:
return HttpResponseRedirect(reverse("productdb:home"))
else:
context["message"] = "User account was disabled.<br>Please contact the administrator."
else:
context["message"] = "Login failed, invalid credentials"
return render(request, "django_project/login.html", context=context)
@login_required
def logout_user(request):
"""logout user
:param request:
:return:
"""
if request.user.is_authenticated:
logout(request)
return redirect(reverse("login"))
def task_progress_view(request, task_id):
"""Progress view for an asynchronous task"""
if login_required_if_login_only_mode(request):
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
default_title = "Please wait..."
redirect_default = reverse("productdb:home")
meta_data = get_meta_data_for_task(task_id)
# title of the progress view
if "title" in meta_data.keys():
title = meta_data["title"]
else:
title = default_title
# redirect after task is completed
if "redirect_to" in meta_data.keys():
redirect_to = meta_data["redirect_to"]
auto_redirect = meta_data.get("auto_redirect", False)
else:
logger.warning("Cannot find redirect link to task meta data, use homepage")
redirect_to = redirect_default
auto_redirect = False
context = {
"task_id": task_id,
"title": title,
"redirect_to": redirect_to,
"auto_redirect": auto_redirect
}
return render(request, "django_project/task_progress_view.html", context=context)
def task_status_ajax(request, task_id):
"""returns a JSON representation of the task state"""
if settings.DEBUG: # show results for task in debug mode
valid_request = True
else:
valid_request = request.is_ajax()
if valid_request:
try:
task = celery.AsyncResult(task_id)
if task.state == TaskState.PENDING:
response = {
"state": "pending",
"status_message": "try to start task"
}
elif task.state == TaskState.STARTED or task.state.lower() == TaskState.PROCESSING:
response = {
"state": "processing",
"status_message": task.info.get("status_message", "")
}
elif task.state == TaskState.SUCCESS:
response = {
"state": "success",
"status_message": task.info.get("status_message", "")
}
if "error_message" in task.info:
response["error_message"] = task.info["error_message"]
if "data" in task.info:
response["data"] = task.info["data"]
else:
# something went wrong in the within the task
response = {
"state": "failed",
"error_message": str(task.info), # this is the exception that was raised
}
except redis.ConnectionError:
logger.error("cannot get task update", exc_info=True)
response = {
"state": "failed",
"error_message": "A server process (redis) is not running, please contact the administrator"
}
except Exception: # catch any exception
logger.error("cannot get task update", exc_info=True)
response = {
"state": "failed",
"error_message": "Unknown error: " + str(task.info), # this is the exception raised
}
logger.debug("task state for %s is\n%s" % (task_id, str(response)))
return JsonResponse(response)
else:
return HttpResponse("Bad Request", status=400)
|
hoelsner/product-database
|
django_project/views.py
|
Python
|
mit
| 7,479
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'IBAN Bank Accounts',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module installs the base for IBAN (International Bank Account Number) bank accounts and checks for it's validity.
======================================================================================================================
The ability to extract the correctly represented local accounts from IBAN accounts
with a single statement.
""",
'depends': ['account_accountant'],
'demo': ['demo/iban_demo.xml'],
'installable': True,
'auto_install': False,
}
|
minhphung171093/GreenERP
|
openerp/addons/base_iban/__openerp__.py
|
Python
|
gpl-3.0
| 702
|
# -*- coding: utf-8 -*-
import argparse
import io
import datetime
import logging
import os
import manage
import mock
import time
os.environ['SECUREDROP_ENV'] = 'test' # noqa
from models import Journalist, db
from .utils import db_helper
YUBIKEY_HOTP = ['cb a0 5f ad 41 a2 ff 4e eb 53 56 3a 1b f7 23 2e ce fc dc',
'cb a0 5f ad 41 a2 ff 4e eb 53 56 3a 1b f7 23 2e ce fc dc d7']
def test_parse_args():
# just test that the arg parser is stable
manage.get_args()
def test_not_verbose(caplog):
args = manage.get_args().parse_args(['run'])
manage.setup_verbosity(args)
manage.log.debug('INVISIBLE')
assert 'INVISIBLE' not in caplog.text
def test_verbose(caplog):
args = manage.get_args().parse_args(['--verbose', 'run'])
manage.setup_verbosity(args)
manage.log.debug('VISIBLE')
assert 'VISIBLE' in caplog.text
def test_get_username_success():
with mock.patch("manage.obtain_input", return_value='jen'):
assert manage._get_username() == 'jen'
def test_get_username_fail():
bad_username = 'a' * (Journalist.MIN_USERNAME_LEN - 1)
with mock.patch("manage.obtain_input",
side_effect=[bad_username, 'jen']):
assert manage._get_username() == 'jen'
def test_get_yubikey_usage_yes():
with mock.patch("manage.obtain_input", return_value='y'):
assert manage._get_yubikey_usage()
def test_get_yubikey_usage_no():
with mock.patch("manage.obtain_input", return_value='n'):
assert not manage._get_yubikey_usage()
# Note: we use the `journalist_app` fixture because it creates the DB
def test_handle_invalid_secret(journalist_app, config, mocker, capsys):
"""Regression test for bad secret logic in manage.py"""
mocker.patch("manage._get_username", return_value='ntoll'),
mocker.patch("manage._get_yubikey_usage", return_value=True),
mocker.patch("manage.obtain_input", side_effect=YUBIKEY_HOTP),
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
# We will try to provide one invalid and one valid secret
return_value = manage._add_user()
out, err = capsys.readouterr()
assert return_value == 0
assert 'Try again.' in out
assert 'successfully added' in out
finally:
manage.config = original_config
# Note: we use the `journalist_app` fixture because it creates the DB
def test_exception_handling_when_duplicate_username(journalist_app,
config,
mocker, capsys):
"""Regression test for duplicate username logic in manage.py"""
mocker.patch("manage._get_username", return_value='foo-bar-baz')
mocker.patch("manage._get_yubikey_usage", return_value=False)
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
# Inserting the user for the first time should succeed
return_value = manage._add_user()
out, err = capsys.readouterr()
assert return_value == 0
assert 'successfully added' in out
# Inserting the user for a second time should fail
return_value = manage._add_user()
out, err = capsys.readouterr()
assert return_value == 1
assert 'ERROR: That username is already taken!' in out
finally:
manage.config = original_config
# Note: we use the `journalist_app` fixture because it creates the DB
def test_delete_user(journalist_app, config, mocker):
mocker.patch("manage._get_username", return_value='test-user-56789')
mocker.patch("manage._get_yubikey_usage", return_value=False)
mocker.patch("manage._get_username_to_delete",
return_value='test-user-56789')
mocker.patch('manage._get_delete_confirmation', return_value=True)
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
return_value = manage._add_user()
assert return_value == 0
return_value = manage.delete_user(args=None)
assert return_value == 0
finally:
manage.config = original_config
# Note: we use the `journalist_app` fixture because it creates the DB
def test_delete_non_existent_user(journalist_app, config, mocker, capsys):
mocker.patch("manage._get_username_to_delete",
return_value='does-not-exist')
mocker.patch('manage._get_delete_confirmation', return_value=True)
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
return_value = manage.delete_user(args=None)
out, err = capsys.readouterr()
assert return_value == 0
assert 'ERROR: That user was not found!' in out
finally:
manage.config = original_config
def test_get_username_to_delete(mocker):
mocker.patch("manage.obtain_input", return_value='test-user-12345')
return_value = manage._get_username_to_delete()
assert return_value == 'test-user-12345'
def test_reset(journalist_app, test_journo, alembic_config, config):
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
# Override the hardcoded alembic.ini value
manage.config.TEST_ALEMBIC_INI = alembic_config
args = argparse.Namespace(store_dir=config.STORE_DIR)
return_value = manage.reset(args=args)
assert return_value == 0
assert os.path.exists(config.DATABASE_FILE)
assert os.path.exists(config.STORE_DIR)
# Verify journalist user present in the database is gone
with journalist_app.app_context():
res = Journalist.query \
.filter_by(username=test_journo['username']).one_or_none()
assert res is None
finally:
manage.config = original_config
def test_get_username(mocker):
mocker.patch("manage.obtain_input", return_value='foo-bar-baz')
assert manage._get_username() == 'foo-bar-baz'
def test_clean_tmp_do_nothing(caplog):
args = argparse.Namespace(days=0,
directory=' UNLIKELY::::::::::::::::: ',
verbose=logging.DEBUG)
manage.setup_verbosity(args)
manage.clean_tmp(args)
assert 'does not exist, do nothing' in caplog.text
def test_clean_tmp_too_young(config, caplog):
args = argparse.Namespace(days=24*60*60,
directory=config.TEMP_DIR,
verbose=logging.DEBUG)
# create a file
io.open(os.path.join(config.TEMP_DIR, 'FILE'), 'a').close()
manage.setup_verbosity(args)
manage.clean_tmp(args)
assert 'modified less than' in caplog.text
def test_clean_tmp_removed(config, caplog):
args = argparse.Namespace(days=0,
directory=config.TEMP_DIR,
verbose=logging.DEBUG)
fname = os.path.join(config.TEMP_DIR, 'FILE')
with io.open(fname, 'a'):
old = time.time() - 24*60*60
os.utime(fname, (old, old))
manage.setup_verbosity(args)
manage.clean_tmp(args)
assert 'FILE removed' in caplog.text
def test_were_there_submissions_today(source_app, config):
original_config = manage.config
try:
# We need to override the config to point at the per-test DB
manage.config = config
data_root = config.SECUREDROP_DATA_ROOT
args = argparse.Namespace(data_root=data_root,
verbose=logging.DEBUG)
with source_app.app_context():
count_file = os.path.join(data_root, 'submissions_today.txt')
source, codename = db_helper.init_source_without_keypair()
source.last_updated = (datetime.datetime.utcnow() -
datetime.timedelta(hours=24*2))
db.session.commit()
manage.were_there_submissions_today(args)
assert io.open(count_file).read() == "0"
source.last_updated = datetime.datetime.utcnow()
db.session.commit()
manage.were_there_submissions_today(args)
assert io.open(count_file).read() == "1"
finally:
manage.config = original_config
|
ehartsuyker/securedrop
|
securedrop/tests/test_manage.py
|
Python
|
agpl-3.0
| 8,503
|
# -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2017 Sergey Satskiy <sergey.satskiy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Codimension main window status bar"""
import os.path
from utils.pixmapcache import getIcon
from plugins.vcssupport.intervaldlg import VCSUpdateIntervalConfigDialog
from .qt import Qt, QPalette, QColor, QMenu, QDialog, QApplication
from .labels import (StatusBarPixmapLabel, StatusBarPathLabel,
StatusBarFramedLabel)
class MainWindowStatusBarMixin:
"""Main window status bar mixin"""
def __init__(self):
self.__statusBar = None
self.sbLanguage = None
self.sbFile = None
self.sbEol = None
self.sbPos = None
self.sbLine = None
self.sbWritable = None
self.sbEncoding = None
self.sbPyflakes = None
self.sbCC = None
self.sbVCSStatus = None
self.sbDebugState = None
self.__createStatusBar()
def __createStatusBar(self):
"""Creates status bar"""
self.__statusBar = self.statusBar()
self.__statusBar.setSizeGripEnabled(True)
self.sbVCSStatus = StatusBarPixmapLabel('ignore', self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbVCSStatus)
self.sbVCSStatus.setVisible(False)
self.sbVCSStatus.setContextMenuPolicy(Qt.CustomContextMenu)
self.sbVCSStatus.customContextMenuRequested.connect(
self._showVCSLabelContextMenu)
self.sbDebugState = StatusBarFramedLabel(
text='Debugger: unknown', callback=None, parent=self.__statusBar)
dbgPalette = self.sbDebugState.palette()
dbgPalette.setColor(QPalette.Background, QColor(255, 255, 127))
self.sbDebugState.setPalette(dbgPalette)
self.__statusBar.addPermanentWidget(self.sbDebugState)
self.sbDebugState.setVisible(False)
self.sbLanguage = StatusBarFramedLabel(parent=self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbLanguage)
self.sbEncoding = StatusBarFramedLabel(parent=self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbEncoding)
self.sbEol = StatusBarFramedLabel(parent=self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbEol)
self.sbWritable = StatusBarFramedLabel(parent=self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbWritable)
self.sbPyflakes = StatusBarPixmapLabel('signal', self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbPyflakes)
self.sbCC = StatusBarPixmapLabel('signal', self.__statusBar)
self.__statusBar.addPermanentWidget(self.sbCC)
self.sbFile = StatusBarPathLabel(
callback=self._onPathLabelDoubleClick,
parent=self.__statusBar)
self.sbFile.setMaximumWidth(512)
self.sbFile.setMinimumWidth(128)
self.__statusBar.addPermanentWidget(self.sbFile, True)
self.sbFile.setContextMenuPolicy(Qt.CustomContextMenu)
self.sbFile.customContextMenuRequested.connect(
self._showPathLabelContextMenu)
self.sbLine = StatusBarFramedLabel(callback=self.copyLine,
parent=self.__statusBar)
self.sbLine.setMinimumWidth(72)
self.sbLine.setAlignment(Qt.AlignCenter)
self.__statusBar.addPermanentWidget(self.sbLine)
self.sbPos = StatusBarFramedLabel(callback=self.copyPos,
parent=self.__statusBar)
self.sbPos.setMinimumWidth(72)
self.sbPos.setAlignment(Qt.AlignCenter)
self.__statusBar.addPermanentWidget(self.sbPos)
def copyLine(self):
"""Copies the line number to the buffer"""
self.__copyLinePos(self.sbLine)
def copyPos(self):
"""Copies the pos number to the buffer"""
self.__copyLinePos(self.sbPos)
@staticmethod
def __copyLinePos(label):
"""Copies the line/pos label content to the buffer"""
txt = label.text().strip().lower()
if not txt.endswith('n/a'):
txt = txt[txt.find(':') + 1:].strip()
QApplication.clipboard().setText(txt)
def showStatusBarMessage(self, msg, timeout=10000):
"""Shows a temporary status bar message, default 10sec"""
self.__statusBar.showMessage(msg, timeout)
def clearStatusBarMessage(self):
"""Clears the status bar message in the given slot"""
self.__statusBar.clearMessage()
def getCurrentStatusBarMessage(self):
"""Provides the current status bar message"""
return self.__statusBar.currentMessage()
def _showVCSLabelContextMenu(self, pos):
"""Triggered when a context menu is requested for a VCS label"""
contextMenu = QMenu(self)
contextMenu.addAction(getIcon("vcsintervalmenu.png"),
"Configure monitor interval",
self.__onVCSMonitorInterval)
contextMenu.popup(self.sbVCSStatus.mapToGlobal(pos))
def __onVCSMonitorInterval(self):
"""Runs the VCS monitor interval setting dialog"""
dlg = VCSUpdateIntervalConfigDialog(
self.settings['vcsstatusupdateinterval'], self)
if dlg.exec_() == QDialog.Accepted:
self.settings['vcsstatusupdateinterval'] = dlg.interval
def _showPathLabelContextMenu(self, pos):
"""Triggered when a context menu is requested for the path label"""
contextMenu = QMenu(self)
contextMenu.addAction(getIcon('copymenu.png'),
'Copy full path to clipboard (double click)',
self._onPathLabelDoubleClick)
contextMenu.addSeparator()
contextMenu.addAction(getIcon(''), 'Copy directory path to clipboard',
self._onCopyDirToClipboard)
contextMenu.addAction(getIcon(''), 'Copy file name to clipboard',
self._onCopyFileNameToClipboard)
contextMenu.popup(self.sbFile.mapToGlobal(pos))
def _onPathLabelDoubleClick(self):
"""Double click on the status bar path label"""
txt = self.__getPathLabelFilePath()
if txt.lower() not in ['', 'n/a']:
QApplication.clipboard().setText(txt)
def _onCopyDirToClipboard(self):
"""Copies the dir path of the current file into the clipboard"""
txt = self.__getPathLabelFilePath()
if txt.lower() not in ['', 'n/a']:
try:
QApplication.clipboard().setText(os.path.dirname(txt) +
os.path.sep)
except:
pass
def _onCopyFileNameToClipboard(self):
"""Copies the file name of the current file into the clipboard"""
txt = self.__getPathLabelFilePath()
if txt.lower() not in ['', 'n/a']:
try:
QApplication.clipboard().setText(os.path.basename(txt))
except:
pass
def __getPathLabelFilePath(self):
"""Provides undecorated path label content"""
txt = str(self.sbFile.getPath())
if txt.startswith('File: '):
txt = txt.replace('File: ', '')
return txt
|
SergeySatskiy/codimension
|
codimension/ui/mainstatusbar.py
|
Python
|
gpl-3.0
| 7,924
|
# -*- coding: utf-8 -*-
# Authors: Natalia B Bidart <natalia.bidart@canonical.com>
#
# Copyright 2010 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""The GTK graphical interface for the control panel for Ubuntu One."""
DBUS_BUS_NAME = 'com.ubuntuone.controlpanel.gui'
DBUS_PATH = '/gui'
DBUS_IFACE_GUI = 'com.ubuntuone.controlpanel.gui'
# Unused import main
# pylint: disable=W0611
from ubuntuone.controlpanel.gui.gtk.gui import main
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/ubuntuone-control-panel/ubuntuone/controlpanel/gui/gtk/__init__.py
|
Python
|
gpl-3.0
| 1,007
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Role
====
Defines the basic interfaces for a plugin. These interfaces are
inherited by the *core* class of a plugin. The *core* class of a
plugin is then the one that will be notified the
activation/deactivation of a plugin via the ``activate/deactivate``
methods.
For simple (near trivial) plugin systems, one can directly use the
following interfaces.
Extensibility
=============
In your own software, you'll probably want to build derived classes of
the ``IPlugin`` class as it is a mere interface with no specific
functionality.
Your software's plugins should then inherit your very own plugin class
(itself derived from ``IPlugin``).
Where and how to code these plugins is explained in the section about
the :doc:`PluginManager`.
API
===
"""
class IPlugin(object):
"""
The most simple interface to be inherited when creating a plugin.
"""
def __init__(self):
"""
Set the basic variables.
"""
self.is_activated = False
def activate(self):
"""
Called at plugin activation.
"""
self.is_activated = True
def deactivate(self):
"""
Called when the plugin is disabled.
"""
self.is_activated = False
|
cgroza/gEcrit
|
yapsy/IPlugin.py
|
Python
|
gpl-3.0
| 1,190
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright © 2011: Lattyware <gareth@lattyware.co.uk>
This file is part of unrest.
unrest is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
unrest is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
Just as a quick note, please remember if you are reading this, 48 hours. It's
not clean, well documented or particularly well done in general, but that wasn't
the point. Hopefully it's a bit of fun.
"""
import os
import math as maths
import random
import sf
import scenes
class Entity:
def __init__(self, *args):
self.interactive = False
self.init(*args)
self.image = sf.Texture.load_from_file(bytes(self.image_path, 'UTF-8'))
self.sprite = sf.Sprite(self.image)
self.sprite.origin = (self.sprite.width/2, self.sprite.height)
self.sprite.position = (args[0], 500)
self.postinit()
def _set_x(self, x):
self.sprite.x = x
def _get_x(self):
return self.sprite.x
x = property(_get_x, _set_x)
def postinit(self):
pass
def init(self):
raise NotImplementedError
def handle_event(self, scene, cursor, event):
if event.type == sf.Event.MOUSE_BUTTON_RELEASED:
if self.contains(cursor.position) and self.interactive and not scene.fade:
scene.player.set_callback(cursor.x, self.interact, scene)
return True
return False
def interact(self, scene):
raise NotImplementedError
def draw(self, target):
target.draw(self.sprite)
def contains(self, point):
x, y = point
hx, y2 = self.sprite.position
x1 = hx-self.sprite.width/2
x2, y1 = hx+self.sprite.width/2, y2-self.sprite.height
if x > x1 and y > y1 and x < x2 and y < y2:
return True
else:
return False
def update(self, time):
raise NotImplementedError
class Boundary(Entity):
def init(self, direction, to, frm=None):
self.name = to.__name__
self.image_path = os.path.join("assets", direction+".png")
self.text = sf.Text(to.__name__, sf.Font.DEFAULT_FONT, 20)
self.direction = direction
self.interactive = "Go To"
self.to = to
self.frm = frm
def postinit(self):
if self.direction == "right":
self.sprite.position = (784, 600)
self.text.position = (800, 200)
self.text.rotate(90)
else:
self.sprite.position = (16, 600)
self.text.rotate(-90)
self.text.position = (0, 200)
def update(self, time):
pass
def interact(self, scene):
if self.frm:
begin = self.frm
else:
if self.direction == "right":
begin = 60
else:
begin = 740
scene.finish(self.to, begin)
def draw(self, target):
Entity.draw(self, target)
target.draw(self.text)
class Person(Entity):
people = [
os.path.join("assets", "man.png"),
os.path.join("assets", "woman.png"),
os.path.join("assets", "girl.png"),
os.path.join("assets", "boy.png"),
]
adults = [
os.path.join("assets", "man.png"),
os.path.join("assets", "woman.png"),
]
def init(self, x, adults=False, speeds=(0.05, 0.15)):
self.speed = random.uniform(*speeds)
if adults:
self.image_path = random.choice(self.adults)
else:
self.image_path = random.choice(self.people)
if x < 400:
self.direction = 1
else:
self.direction = -1
def postinit(self):
if self.direction == -1:
self.sprite.flip_x(True)
def update(self, time):
self.x += self.direction*self.speed*time
if self.x < -20 or self.x > 820:
del(self)
def _set_x(self, x):
self.sprite.x = x
def _get_x(self):
return self.sprite.x
x = property(_get_x, _set_x)
class Player(Entity):
def init(self, x):
self.target = None
self.callback = None
self.speed = 0.3
self.right = True
self.image_path = os.path.join("assets", "you.png")
self.action = False
self.wait = None
self.time = 0
def update(self, time):
if self.action:
if not self.target:
if self.wait:
self.time += time
if self.time > self.wait:
self.wait = None
self.time = 0
else:
try:
call, *args = next(self.action)
getattr(self, call)(*args)
except StopIteration:
self.action = None
if self.target:
if maths.fabs(self.x-self.target) <= self.speed*time:
if self.callback:
func, args = self.callback
func(*args)
self.callback = None
self.target = None
else:
if self.x > self.target:
mod = -self.speed*time
self.sprite.flip_x(True)
self.right = False
else:
mod = self.speed*time
self.sprite.flip_x(not self.right)
self.right = True
self.x += mod
def handle_event(self, scene, cursor, event):
if event.type == sf.Event.MOUSE_BUTTON_RELEASED and not self.action and not scene.fade:
self.target = cursor.position[0]
if self.target < 0:
self.target = 0
elif self.target > 800:
self.target = 800
return True
else:
return False
def perform(self, action):
self.action = iter(action)
self.target = None
def set(self, attr, x):
#Used for actions.
setattr(self, attr, x)
def set_other(self, other, attr, x):
#Used for actions.
setattr(other, attr, x)
def call_other(self, other, func, *args):
#Used for actions.
getattr(other, func)(*args)
def change_scene(self, scene, to, *args):
#Used for actions.
scene.finish(to, *args)
def reload(self, scene, *args):
scene.fadeout(scene.__class__, *args)
def change_image(self, image):
pos = self.sprite.x, self.sprite.y
right = self.right
self.sprite = sf.Sprite(image)
self.sprite.x, self.sprite.y = pos
self.sprite.flip_x(not right)
#self.sprite.texture = image
#self.sprite.resize(image.width, image.height)
self.sprite.origin = (self.sprite.width/2, self.sprite.height)
def change_other_image(self, other, image):
pos = other.sprite.x, other.sprite.y
right = other.right
other.sprite = sf.Sprite(image)
other.sprite.x, other.sprite.y = pos
other.sprite.flip_x(not right)
#other.sprite.texture = image
#other.sprite.resize(image.width, image.height)
other.sprite.origin = (other.sprite.width/2, other.sprite.height)
def set_callback(self, x, func, *args):
if not self.action:
self.callback = (func, args)
self.target = x
class Her(Entity):
def init(self, x, scene):
if scene.state.achievements["Girlfriend"]:
self.name = "Girlfriend"
else:
self.name = "Girl"
self.speed = 0.3
if scene.__class__.__name__ == "Bar":
self.interactive = "Offer A Drink"
elif scene.state.achievements["Father"]:
self.interactive = "\"Spend Some Time Together\""
else:
self.interactive = "Have A Child"
self.image_path = os.path.join("assets", "her.png")
self.target = None
self.right = True
def interact(self, scene):
if scene.__class__.__name__ == "Bar":
scene.state.increase(scene, "Poor", 1)
scene.state.decrease(scene, "Lonely", 3)
scene.state.decrease(scene, "Stressed", 1)
scene.state.increase(scene, "Addiction", 1)
pdrink = sf.Texture.load_from_file(bytes(os.path.join("assets", "drink.png"), 'UTF-8'))
hdrink = sf.Texture.load_from_file(bytes(os.path.join("assets", "herdrink.png"), 'UTF-8'))
speed = scene.player.speed
if scene.state.achievements["Have Home"] and \
scene.state.issues["Uneducated"] < 8 and scene.state.issues["Overweight"] < 6:
scene.state.achievements["Girlfriend"] = True
parts = [
("set", "speed", speed/3),
("set_other", self, "speed", speed/4),
("set_other", self, "target", 535),
("set", "target", 460),
("set", "target", 470),
("change_other_image", self, hdrink),
("change_image", pdrink),
("set", "wait", 4000),
("set_other", self, "speed", speed/3),
("change_image", scene.player.image),
("change_other_image", self, self.image),
("set_other", self, "target", 1),
("set", "target", 1),
("change_scene", scene, scenes.House)
]
else:
parts = [
("set", "speed", speed/3),
("set_other", self, "speed", speed/4),
("set_other", self, "target", 535),
("set", "target", 460),
("set", "target", 470),
("change_other_image", self, hdrink),
("change_image", pdrink),
("set", "wait", 2000),
("change_image", scene.player.image),
("set", "target", 1),
("change_scene", scene, scenes.City, 300)
]
scene.player.perform(parts)
else:
speed = scene.player.speed
if not scene.state.achievements["Father"]:
scene.state.achievements["Father"] = True
scene.state.decrease(scene, "Lonely", 3)
scene.state.decrease(scene, "Stressed", 3)
scene.state.decrease(scene, "Bored", 1)
parts = [
("set", "speed", speed/5),
("set_other", self, "speed", speed/3),
("set_other", self, "target", 175),
("set", "target", 125),
("reload", scene),
]
scene.player.perform(parts)
def update(self, time):
if self.target:
if maths.fabs(self.x-self.target) <= self.speed*time:
self.target = None
else:
if self.x >= self.target:
mod = -self.speed*time
self.sprite.flip_x(True)
self.right = False
else:
mod = self.speed*time
self.sprite.flip_x(not self.right)
self.right = True
self.x += mod
class Him(Entity):
def init(self, x):
self.name = "Son"
self.interactive = "Play"
self.image_path = os.path.join("assets", "him.png")
def interact(self, scene):
scene.state.decrease(scene, "Lonely", 2)
scene.state.decrease(scene, "Stressed", 1)
scene.state.decrease(scene, "Bored", 1)
parts = [
("reload", scene, self.x),
]
scene.player.perform(parts)
def update(self, time):
pass
class JobCentre(Entity):
def init(self, x, scene):
self.name = "Job Centre"
if scene.state.achievements["Educated"]:
self.interactive = "Complex Job (Freelance Business Adviser)"
else:
self.interactive = "Simple Job (Hand Out Leaflets)"
self.image_path = os.path.join("assets", "jobcentre.png")
def interact(self, scene):
if scene.state.achievements["Educated"]:
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Stressed", 3)
scene.state.decrease(scene, "Poor", 5)
scene.player.perform(parts)
else:
x = scene.player.x
speed = scene.player.speed
image = sf.Texture.load_from_file(bytes(os.path.join("assets", "leaflets.png"), 'UTF-8'))
parts = [
("set", "speed", speed/2),
("change_image", image),
("set", "target", x-200),
("set", "target", x+200),
("set", "target", x-200),
("set", "target", x+200),
("set", "speed", speed),
("change_image", scene.player.image),
("set", "target", x),
]
scene.state.increase(scene, "Bored", 1)
scene.state.increase(scene, "Stressed", 1)
scene.state.decrease(scene, "Poor", 3)
scene.player.perform(parts)
def update(self, time):
pass
class EstateAgent(Entity):
def init(self, x, scene):
self.name = "Estate Agent"
if scene.state.achievements["Have Home"]:
self.interactive = False
else:
self.interactive = "Rent House"
self.image_path = os.path.join("assets", "estateagent.png")
def interact(self, scene):
scene.state.increase(scene, "Poor", 4)
scene.house.interactive = "Enter House"
self.interactive = False
scene.state.achievements["Have Home"] = True
image = sf.Texture.load_from_file(bytes(os.path.join("assets", "key.png"), 'UTF-8'))
speed = scene.player.speed
parts = [
("change_image", image),
("set", "speed", speed/4),
("set", "target", 600),
("change_image", scene.player.image),
("set", "speed", speed),
("change_scene", scene, scenes.House),
]
scene.player.perform(parts)
def update(self, time):
pass
class Bar(Entity):
def init(self, x):
self.interactive = "Enter"
self.image_path = os.path.join("assets", "bar.png")
def interact(self, scene):
scene.finish(scenes.Bar)
def update(self, time):
pass
class Fridge(Entity):
def init(self, x, scene):
self.interactive = "Eat"
if scene.state.achievements["Father"]:
self.image_path = os.path.join("assets", "fridgehim.png")
else:
self.image_path = os.path.join("assets", "fridge.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.decrease(scene, "Stressed", 1)
scene.state.increase(scene, "Overweight", 2)
scene.player.perform(parts)
def update(self, time):
pass
class Gym(Entity):
def init(self, x):
self.interactive = "Work Out"
self.image_path = os.path.join("assets", "gym.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Bored", 2)
scene.state.increase(scene, "Poor", 1)
scene.state.decrease(scene, "Overweight", 2)
scene.player.perform(parts)
def update(self, time):
pass
class DrugDealer(Entity):
def init(self, x):
self.name= "Drug Dealer"
self.interactive = "Buy Drugs"
self.image_path = os.path.join("assets", "drugdealer.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Addiction", 5)
scene.state.increase(scene, "Poor", 2)
scene.state.decrease(scene, "Stressed", 4)
scene.state.decrease(scene, "Bored", 4)
scene.state.increase(scene, "Guilty", 3)
scene.player.perform(parts)
def update(self, time):
pass
class College(Entity):
def init(self, x):
self.interactive = "Take Course"
self.image_path = os.path.join("assets", "college.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.decrease(scene, "Uneducated", 3)
scene.state.increase(scene, "Poor", 2)
scene.state.increase(scene, "Stressed", 1)
scene.player.perform(parts)
def update(self, time):
pass
class BarInside(Entity):
def init(self, x):
self.name = "Bar"
self.interactive = "Get Drink"
self.image_path = os.path.join("assets", "barinside.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Addiction", 1)
scene.state.increase(scene, "Poor", 1)
scene.state.decrease(scene, "Stressed", 1)
scene.player.perform(parts)
def update(self, time):
pass
class House(Entity):
def init(self, x, scene):
if scene.state.achievements["Have Home"]:
self.interactive = "Enter"
else:
self.interactive = False
self.image_path = os.path.join("assets", "house.png")
def interact(self, scene):
scene.finish(scenes.House)
def update(self, time):
pass
class TV(Entity):
def init(self, x):
self.interactive = "Watch TV"
self.image_path = os.path.join("assets", "tv.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Lonely", 1)
scene.state.decrease(scene, "Stressed", 1)
scene.state.decrease(scene, "Bored", 2)
scene.player.perform(parts)
def update(self, time):
pass
class Bed(Entity):
def init(self, x, asleep=False):
self.interactive = "Sleep"
self.image_path = os.path.join("assets", "bed.png")
if asleep:
self.image_path = os.path.join("assets", "beduse.png")
def interact(self, scene):
parts = [
("reload", scene, self.x),
]
scene.state.increase(scene, "Lonely", 2)
scene.state.decrease(scene, "Stressed", 2)
scene.state.decrease(scene, "Addiction", 1)
scene.state.decrease(scene, "Guilty", 1)
scene.player.perform(parts)
def update(self, time):
pass
|
Lattyware/unrest
|
entities.py
|
Python
|
gpl-3.0
| 15,667
|
'''
test adapted from https://wiki.fysik.dtu.dk/ase/tutorials/neb/idpp.html#example-1-ethane
'''
from __future__ import print_function
from espresso import iEspresso, NEBEspresso
from ase.build import molecule
from ase.neb import NEBTools
from ase.optimize.fire import FIRE as QuasiNewton
from asetools import smart_cell
def test_ethene_rotation(tmpdir):
tmpdir.chdir()
# Optimise molecule
initial = molecule('C2H6')
smart_cell(initial, vac=4.0, h=0.01)
initial.set_calculator(iEspresso(pw=300, dw=4000, kpts='gamma'))
qn = QuasiNewton(initial, 'initial.traj')
qn.run(fmax=0.01)
# Create final state
final = initial.copy()
final.positions[2:5] = initial.positions[[3, 4, 2]]
final.set_calculator(iEspresso(pw=300, dw=4000, kpts='gamma'))
final.get_potential_energy()
# Generate blank images
images = [initial]
nimage = 7
for i in range(nimage):
image = initial.copy()
image.set_calculator(iEspresso(pw=300, dw=4000, kpts='gamma'))
images.append(image)
images.append(final)
# Run IDPP interpolation
neb = NEBEspresso(images)
neb.interpolate('idpp')
# Run NEB calculation
qn = QuasiNewton(neb, logfile='ethane_linear.log', trajectory='neb.traj')
qn.run(fmax=0.05)
nt = NEBTools(neb.images)
print('fmax: ', nt.get_fmax())
print('Ef, dE: ', nt.get_barrier())
|
lmmentel/ase-espresso
|
tests/test_nebespresso.py
|
Python
|
gpl-3.0
| 1,396
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# FollowMe Butia - FollowMe
# Copyright (C) 2010-2013
# This program was created to use with the robot Butia.
# Butia is a project from Facultad de Ingenieria - Uruguay
# Facultad de Ingenieria web site: <http://www.fing.edu.uy/>
# Butia project web site: <http://www.fing.edu.uy/inco/proyectos/butia/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Alan Aguiar <alanjas@gmail.com>
# Aylen Ricca <ar18_90@hotmail.com>
# Rodrigo Dearmas <piegrande46@hotmail.com>
import pygame
import pygame.camera
from gettext import gettext as _
class FollowMe(object):
def __init__(self, parent):
pygame.init()
pygame.camera.init()
if parent:
self.display = pygame.display.get_surface()
else:
self.display = pygame.display.set_mode((1200, 900))
self.use_threshold_view = True
self.use_outline_view = True
self.use_rects_view = True
self.tamanioc = (320, 240)
self.brightness = 128
self.cam = None
self.get_camera()
self.calc((960, 720))
self.show_grid = False
def get_camera(self, mode='RGB'):
self.stop_camera()
self.lcamaras = pygame.camera.list_cameras()
if self.lcamaras:
self.cam = pygame.camera.Camera(self.lcamaras[0], self.tamanioc, mode)
try:
self.cam.start()
self.set_camera_flags()
self.captura = pygame.surface.Surface(self.tamanioc, 0, self.display)
self.captura_aux = pygame.surface.Surface(self.tamanioc, 0, self.display)
self.captura_aux2 = pygame.surface.Surface(self.tamanioc, 0, self.display)
self.captura_to_show = pygame.surface.Surface(self.tamanioc, 0, self.display)
except:
print _('Error on initialization of the camera')
else:
print _('No cameras was found')
def stop_camera(self):
if self.cam:
try:
self.cam.stop()
self.cam = None
except:
print _('Error in stop camera')
def set_camera_flags(self):
self.cam.set_controls(True, False, self.brightness)
res = self.cam.get_controls()
self.flip = res[0]
self.tamanioc = self.cam.get_size()
def calc(self, tamanio):
self.show_size = tamanio
pantalla_x, pantalla_y = self.display.get_size()
self.c1 = (self.show_size[0] / self.tamanioc[0])
self.c2 = (self.show_size[1] / self.tamanioc[1])
self.xc = (self.tamanioc[0] - 50) / 2.0
self.yc = (self.tamanioc[1] - 50) / 2.0
self.xcm = (pantalla_x - 50) / 2.0
self.ycm = (pantalla_y - 50) / 2.0
self.xblit = (pantalla_x - self.show_size[0]) / 2
self.yblit = (pantalla_y - self.show_size[1]) / 2
self.txd = self.show_size[0] / 15.0
self.tyd = self.show_size[1] / 3.0
self.x_c_square = int(self.tamanioc[0] / 2)
self.y_c_square = int(self.tamanioc[1] / 2)
def calibrate(self):
self.captura = self.cam.get_image(self.captura)
if not(self.flip):
self.captura = pygame.transform.flip(self.captura,True,False)
# Obtiene un color un poco mas "oscuro" que lo que es
#color = pygame.transform.average_color(self.captura, (self.xc,self.yc,50,50))
# Obtengo el color del pixel ubicado en el centro
color = self.captura.get_at((self.x_c_square, self.y_c_square))
self.display.fill((84,185,72))
self.captura_to_show = pygame.transform.scale(self.captura, (int(self.show_size[0]), int(self.show_size[1])))
self.display.blit(self.captura_to_show, (self.xblit, self.yblit))
pygame.draw.rect(self.display, (255,0,0), (self.xcm,self.ycm,50,50), 4)
self.display.fill(color, (0,0,120,120))
pygame.draw.rect(self.display, (0,0,0), (0,0,120,120), 4)
return color
def get_position(self, color, threshold, pixels):
self.captura = self.cam.get_image(self.captura)
if not(self.flip):
self.captura = pygame.transform.flip(self.captura, True, False)
if self.use_threshold_view:
pygame.transform.threshold(self.captura_aux2, self.captura, color, (threshold[0],threshold[1], threshold[2]), (0,0,0))
pygame.transform.threshold(self.captura_aux, self.captura_aux2, (0, 0, 0), (10, 10, 10), (255, 255, 255))
mascara = pygame.mask.from_threshold(self.captura_aux, (255, 255, 255), (10, 10, 10))
else:
mascara = pygame.mask.from_threshold(self.captura, color, (10, 10, 10))
conexa = mascara.connected_component()
if (conexa.count() > pixels):
x, y = conexa.centroid()
else:
x, y = (-1, -1)
return conexa, (x, y)
def generate_capture_to_show(self):
if self.use_threshold_view:
self.captura_to_show = pygame.transform.scale(self.captura_aux, (int(self.show_size[0]), int(self.show_size[1])))
else:
self.captura_to_show = pygame.transform.scale(self.captura, (int(self.show_size[0]), int(self.show_size[1])))
def show_centroid_position(self, pos):
pygame.draw.rect(self.captura_to_show, (255,0,0), (pos[0]*self.c1, pos[1]*self.c2, 20, 20), 16)
def show_outline(self, mask):
points = mask.outline()
for p in points:
pygame.draw.rect(self.captura_to_show, (0,0,255), (p[0]*self.c1, p[1]*self.c2, 5, 5), 5)
def show_rects(self, mask):
rects = mask.get_bounding_rects()
for r in rects:
pygame.draw.rect(self.captura_to_show, (0,255,0), (r[0]*self.c1, r[1]*self.c2, r[2]*self.c1, r[3]*self.c2), 5)
def show_in_screen(self, color):
self.display.fill((84,185,72))
if (self.show_grid == True):
self.draw_grid()
self.display.blit(self.captura_to_show, (self.xblit, self.yblit))
pygame.draw.rect(self.display, (0,0,0), (0,0,120,120), 4)
def draw_grid(self):
# draw verticals
pygame.draw.line(self.captura_to_show, (250, 40, 40), (0, self.tyd), (self.show_size[0],self.tyd), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (0, 2*self.tyd), (self.show_size[0], 2*self.tyd), 3)
# draw horizontals
pygame.draw.line(self.captura_to_show, (250, 40, 40), (2*self.txd, 0), (2*self.txd, self.show_size[1]), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (4*self.txd, 0), (4*self.txd, self.show_size[1]), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (6*self.txd, 0), (6*self.txd, self.show_size[1]), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (9*self.txd, 0), (9*self.txd, self.show_size[1]), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (11*self.txd, 0), (11*self.txd, self.show_size[1]), 3)
pygame.draw.line(self.captura_to_show, (250, 40, 40), (13*self.txd, 0), (13*self.txd, self.show_size[1]), 3)
def clean(self):
self.display.fill((84, 185, 72))
|
AlanJAS/followme
|
followme.py
|
Python
|
gpl-3.0
| 7,788
|
# Copyright 2011-2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
# make pytest happy
# in pytest context module dependencies are not loaded
# thus geo fields are unknown
from odoo.addons import base_geoengine # noqa
class DummyAbstractModel(models.AbstractModel):
_name = 'test.abstract.dummy'
_description = 'test.abstract.dummy'
geo_line = fields.GeoLine(string="Line")
class DummyInheritAbstract(models.Model):
_name = 'test.dummy.from_abstract'
_inherit = 'test.abstract.dummy'
name = fields.Char()
class DummyModel(models.Model):
_name = 'test.dummy'
_description = 'test.dummy'
name = fields.Char()
geo_multipolygon = fields.GeoMultiPolygon()
geo_point = fields.GeoPoint()
class DummyModelRelated(models.Model):
_name = 'test.dummy.related'
_description = 'test.dummy.related'
dummy_test_id = fields.Many2one(
comodel_name='test.dummy', string='Dummy test')
dummy_geo_multipolygon = fields.GeoMultiPolygon(
related='dummy_test_id.geo_multipolygon', string='Related Geom')
|
OCA/geospatial
|
test_base_geoengine/models.py
|
Python
|
agpl-3.0
| 1,137
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import atexit
import datetime
import os
from typing import List, Union
from qcelemental.util import which, which_import
from . import core
# Numpy place holder for files and cleanup
numpy_files = []
def register_numpy_file(filename):
if not filename.endswith('.npy'): filename += '.npy'
if filename not in numpy_files:
numpy_files.append(filename)
def clean_numpy_files():
for nfile in numpy_files:
os.unlink(nfile)
atexit.register(clean_numpy_files)
def exit_printing(start_time: datetime.datetime = None, success: bool = None) -> None:
"""Prints the exit time and status.
Parameters
----------
start_time
starting time from which the elapsed time is computed.
success
Provides a success flag, otherwise uses the ``_success_flag_`` global variable
Returns
-------
None
"""
end_time = datetime.datetime.now()
core.print_out("\n Psi4 stopped on: {}".format(end_time.strftime('%A, %d %B %Y %I:%M%p')))
if start_time is not None:
run_time = end_time - start_time
run_time = str(run_time).split('.')
run_time = run_time[0] + '.' + run_time[1][:2]
core.print_out("\n Psi4 wall time for execution: {}\n".format(run_time))
if success is None:
success = _success_flag_
if success:
core.print_out("\n*** Psi4 exiting successfully. Buy a developer a beer!\n")
else:
core.print_out("\n*** Psi4 encountered an error. Buy a developer more coffee!\n")
core.print_out("*** Resources and help at github.com/psi4/psi4.\n")
_success_flag_ = False
# Working directory
_input_dir_ = os.getcwd()
def get_input_directory():
return _input_dir_
# Add-Ons
def _CMake_to_Py_boolean(cmakevar):
if cmakevar.upper() in ["1", "ON", "YES", "TRUE", "Y"]:
return True
else:
return False
def psi4_which(command, *, return_bool: bool = False, raise_error: bool = False,
raise_msg: str = None) -> Union[bool, None, str]:
"""Test to see if a command is available in Psi4 search path.
Returns
-------
str or None
By default, returns command path if command found or `None` if not.
Environment is $PSIPATH:$PATH, less any None values.
bool
When `return_bool=True`, returns whether or not found.
Raises
------
ModuleNotFoundError
When `raises_error=True` and command not found.
"""
lenv = (os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep) if x != '']) +
os.pathsep + os.environ.get('PATH', ''))
return which(command=command, return_bool=return_bool, raise_error=raise_error, raise_msg=raise_msg, env=lenv)
_addons_ = {
"ambit": _CMake_to_Py_boolean("@ENABLE_ambit@"),
"chemps2": _CMake_to_Py_boolean("@ENABLE_CheMPS2@"),
"dkh": _CMake_to_Py_boolean("@ENABLE_dkh@"),
"libefp": which_import("pylibefp", return_bool=True),
"erd": _CMake_to_Py_boolean("@ENABLE_erd@"),
"gdma": _CMake_to_Py_boolean("@ENABLE_gdma@"),
"ipi": which_import("ipi", return_bool=True),
"pcmsolver": _CMake_to_Py_boolean("@ENABLE_PCMSolver@"),
"cppe": which_import("cppe", return_bool=True),
"simint": _CMake_to_Py_boolean("@ENABLE_simint@"),
"dftd3": psi4_which("dftd3", return_bool=True),
"cfour": psi4_which("xcfour", return_bool=True),
"mrcc": psi4_which("dmrcc", return_bool=True),
"gcp": psi4_which("gcp", return_bool=True),
"v2rdm_casscf": which_import("v2rdm_casscf", return_bool=True),
"gpu_dfcc": which_import("gpu_dfcc", return_bool=True),
"forte": which_import("forte", return_bool=True),
"snsmp2": which_import("snsmp2", return_bool=True),
"resp": which_import("resp", return_bool=True),
"psi4fockci": which_import("psi4fockci", return_bool=True),
"adcc": which_import("adcc", return_bool=True),
"mdi": which_import("mdi", return_bool=True),
"cct3": which_import("cct3", return_bool=True),
"dftd4": which_import("dftd4", return_bool=True),
}
def addons(request: str = None) -> Union[bool, List[str]]:
"""Returns boolean of whether Add-On *request* is available to Psi4,
either compiled in or searchable in $PSIPATH:$PATH, as relevant. If
*request* not passed, returns list of available Add-Ons.
"""
if request is None:
return sorted([k for k, v in _addons_.items() if v])
return _addons_[request.lower()]
# Testing
def test(extent: str = "full", extras: List = None) -> int:
"""Runs a test suite through pytest.
Parameters
----------
extent
{'smoke', 'quick', 'full', 'long'}
All choices are defined, but choices may be redundant in some projects.
* _smoke_ will be minimal "is-working?" test(s).
* _quick_ will be as much coverage as can be got quickly, approx. 1/3 tests.
* _full_ will be the whole test suite, less some exceedingly long outliers.
* _long_ will be the whole test suite.
extras
Additional arguments to pass to `pytest`.
Returns
-------
int
Return code from `pytest.main()`. 0 for pass, 1 for fail.
"""
try:
import pytest
except ImportError:
raise RuntimeError('Testing module `pytest` is not installed. Run `conda install pytest`')
abs_test_dir = os.path.sep.join([os.path.abspath(os.path.dirname(__file__)), "tests"])
command = ['-rws', '-v']
if extent.lower() == 'smoke':
command.extend(['-m', 'smoke'])
elif extent.lower() == 'quick':
command.extend(['-m', 'quick or smoke'])
elif extent.lower() == 'full':
command.extend(['-m', 'not long'])
elif extent.lower() == 'long':
pass
if extras is not None:
command.extend(extras)
command.extend(['--capture=sys', abs_test_dir])
retcode = pytest.main(command)
return retcode
|
jturney/psi4
|
psi4/extras.py
|
Python
|
lgpl-3.0
| 6,830
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.vision.v1p1beta1 ImageAnnotator API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.vision_v1p1beta1.gapic import enums
from google.cloud.vision_v1p1beta1.gapic import image_annotator_client_config
from google.cloud.vision_v1p1beta1.gapic.transports import (
image_annotator_grpc_transport,
)
from google.cloud.vision_v1p1beta1.proto import image_annotator_pb2
from google.cloud.vision_v1p1beta1.proto import image_annotator_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-vision").version
class ImageAnnotatorClient(object):
"""
Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
SERVICE_ADDRESS = "vision.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.vision.v1p1beta1.ImageAnnotator"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ImageAnnotatorClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.ImageAnnotatorGrpcTransport,
Callable[[~.Credentials, type], ~.ImageAnnotatorGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = image_annotator_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=image_annotator_grpc_transport.ImageAnnotatorGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = image_annotator_grpc_transport.ImageAnnotatorGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def batch_annotate_images(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Run image detection and annotation for a batch of images.
Example:
>>> from google.cloud import vision_v1p1beta1
>>>
>>> client = vision_v1p1beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.batch_annotate_images(requests)
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p1beta1.types.AnnotateImageRequest]]): Individual image annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p1beta1.types.AnnotateImageRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p1beta1.types.BatchAnnotateImagesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_annotate_images" not in self._inner_api_calls:
self._inner_api_calls[
"batch_annotate_images"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_annotate_images,
default_retry=self._method_configs["BatchAnnotateImages"].retry,
default_timeout=self._method_configs["BatchAnnotateImages"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.BatchAnnotateImagesRequest(requests=requests)
return self._inner_api_calls["batch_annotate_images"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
dhermes/google-cloud-python
|
vision/google/cloud/vision_v1p1beta1/gapic/image_annotator_client.py
|
Python
|
apache-2.0
| 9,918
|
import sys
import argparse
from twilight.lib import tweets
import logging
logger = logging.getLogger(__name__)
def main(parser):
'''
Search TTV2 lines for matches within a certain bounding box
'''
parser.add_argument('north', type=float)
parser.add_argument('east', type=float)
parser.add_argument('south', type=float)
parser.add_argument('west', type=float)
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
opts = parser.parse_args()
from geo.types import BoundingBox
bounding_box = BoundingBox(opts.west, opts.south, opts.east, opts.north)
for i, line in enumerate(opts.input):
tweet = tweets.TTV2.from_line(line)
if tweet.coordinates != '':
lon, lat = map(float, tweet.coordinates.split(','))
if bounding_box.contains(lon, lat):
print >> opts.output, line.encode('utf8')
if i % 1000 == 0:
logger.debug('Progress: line # %d', i)
|
chbrown/tweetjobs
|
tweetjobs/cli/bbox.py
|
Python
|
mit
| 1,095
|
# -*- coding: UTF-8 -*-
"""Hash-related checking functions and argument types.
"""
import re
__all__ = __features__ = []
# hash check functions
__all__ += ["is_hash", "is_md5", "is_sha1", "is_sha224", "is_sha256",
"is_sha384", "is_sha512"]
is_hash = lambda h: any(__check_hash(h, a, False) is not None for a in \
HASH_LEN.keys())
is_md5 = lambda h: __check_hash(h, "md5", False) is not None
is_sha1 = lambda h: __check_hash(h, "sha1", False) is not None
is_sha224 = lambda h: __check_hash(h, "sha224", False) is not None
is_sha256 = lambda h: __check_hash(h, "sha256", False) is not None
is_sha384 = lambda h: __check_hash(h, "sha384", False) is not None
is_sha512 = lambda h: __check_hash(h, "sha512", False) is not None
# hash-related argument types
__all__ += ["any_hash", "md5_hash", "sha1_hash", "sha224_hash", "sha256_hash",
"sha512_hash"]
HASH_LEN = {'md5': 32, 'sha1': 40, 'sha224': 56, 'sha256': 64, 'sha384': 96,
'sha512': 128}
def __check_hash(s, algo, fail=True):
l = HASH_LEN[algo]
if re.match(r"(?i)^[a-f0-9]{%d}$" % l, s) is None:
if fail:
raise ValueError("Bad {} hash".format(algo))
return
return s
md5_hash = lambda h: __check_hash(h, "md5")
sha1_hash = lambda h: __check_hash(h, "sha1")
sha224_hash = lambda h: __check_hash(h, "sha224")
sha256_hash = lambda h: __check_hash(h, "sha256")
sha384_hash = lambda h: __check_hash(h, "sha384")
sha512_hash = lambda h: __check_hash(h, "sha512")
def any_hash(h):
if not any(__check_hash(h, a, False) is not None for a in HASH_LEN.keys()):
raise ValueError("Bad hash")
return h
|
dhondta/tinyscript
|
tinyscript/helpers/data/types/hash.py
|
Python
|
agpl-3.0
| 1,678
|
#takes in an eQTL file and outputs allele freq and deviation from HWE
#0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
#scaf pac locus freq fold effect dom d_over_a f p hom1 het hom2 Rsquared f(hom1/het/hom2)
#scaf8 20911662 9414108 0.801075268817 0.198924731183 1852.76344538 -2564.06193821 -1.38391220132 2.88186576785 0.0612160359523 3705.52689076 -711.298492832 0 0.0601869981805 59,31,3
import sys
from scipy import stats
import numpy
# hwe takes observed genotypes (in list) and the freq of ref allele (p), outputs p value
def hwe(obs, freq):
hwObs = numpy.array([float(x) for x in obs])
sampleSize = sum(hwObs)
p = float(freq)
hwExp = numpy.array([x*sampleSize for x in [p*p, 2*p*(1.0-p),(1.0-p)*(1.0-p)]])
chisq = stats.chisquare(hwObs, hwExp, 1)
return(chisq[1])
def __main__():
eqtlFile = open(sys.argv[1],'r')
out = open(sys.argv[1]+".hwe",'w')
out.write("maf obs exp p\n")
for line in eqtlFile:
ent = line.split()
if ent[0] == "scaf":
continue
p = float(ent[3])
maf = float(ent[4])
#hwFreqs = [maf*maf, 2.0*maf*(1.0-maf), (1.0-maf)*(1.0-maf)]
hwObs = numpy.array([float(x) for x in ent[14].split(',')])
sampleSize = sum(hwObs)
hwExp = numpy.array([x*sampleSize for x in [p*p, 2*p*(1.0-p),(1.0-p)*(1.0-p)]])
chisq = stats.chisquare(hwObs, hwExp,1)
out.write(" ".join([str(maf),','.join([str(x) for x in hwObs]),','.join([str(x) for x in hwExp]),str(chisq[1]),"\n"]))
|
emjosephs/eQTL
|
calc_HW_deviations.py
|
Python
|
mit
| 1,527
|
import random
class Customer:
wallet = 1000
bought = False
def see(self,prev_customer):
if prev_customer is not None:
return prev_customer.bought
else:
return False
def decide(self,price,prev_customer):
if self.see(prev_customer):
self.wtp = self.wtp + 10
if self.wtp >= price:
self.wallet = self.wallet - price
self.bought = True
#print "bought"
def __init__(self):
self.wtp = random.randint(0,100)
num_customer = 100
customers = []
for index in range(0,num_customer):
customer = Customer()
customers.append(customer)
price = 50
prev_customer = None
for customer in customers:
customer.decide(price,prev_customer)
prev_customer = customer
|
omsktransmash/ScientificPythonTutorial
|
jamaica/purchase_oop.py
|
Python
|
apache-2.0
| 841
|
__author__ = 'alexei'
from util import *
import gensim
import datagen
import math
from gensim import corpora, models, similarities
import operator
# from sent2vec.word2vec import Sent2Vec
def train_paragraph_vectors():
p = Parser()
set = build_dataset(p, "labeled", inject=True)
print set[:2]
exit(0)
model = Sent2Vec(set, model_file='../data/word2vec_augumented.model')
model.save_sent2vec_format("../paragraph_model.vec")
def build_corpus():
p = Parser()
ans = []
set0 = build_dataset(p, "rotten")
set1 = build_dataset(p, "labeled", inject=True)
set2 = build_dataset(p, "unlabeled")
set3 = build_dataset(p, "test")
ans += set0
ans += set1
ans += set2
ans += set3
counter = {}
res = set()
for phrase in ans:
for word in phrase:
if word in res:
pass
else:
if word in counter:
counter[word] += 1
if counter[word] > 10:
res.add(word)
else:
counter[word] = 1
dictionary = corpora.Dictionary(list(res))
dictionary.save('../data/full_dataset.dict')
print dictionary
return ans
def word2vec_train():
sentences = build_corpus()
print "TrainWord2Vec: ", len(sentences)
t = Timer()
model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
t.measure("word2vec model built in: ")
model.save('../data/word2vec_augumented.model')
return model
def get_feature_cluster(model, seed_list):
seed_set = set(seed_list)
features = set()
while len(seed_list) > 0:
seed = seed_list.pop(0)
features.add(seed)
# print seed
try:
sim_words = model.most_similar_cosmul(positive=list(features), topn=20)
except KeyError:
return features
for feat in sim_words:
word, score = feat
if score < 0.89:
break
if not "_" in word:
if word not in seed_set:
seed_list.append(word)
seed_set.add(word)
return features
def word2vec_fun():
model = gensim.models.Word2Vec.load('../data/word2vec_augumented.model')
model.init_sims(replace=True)
# print model.most_similar_cosmul(positive=['begin'], topn=20)
# print nltk.pos_tag(["completely", "undeserved", "prize"])
# print model["great"]
# for actor in good_actors:
# print model.most_similar_cosmul(positive=['floundering'], topn=20)
# print model.most_similar_cosmul(positive=['actress'], negative=["man"], topn=10)
# print model.similarity('enthralls', 'great')
# print model.similarity('NEG', 'great')
return model
def cluster_wordvectors():
model = word2vec_fun()
dictionary = corpora.Dictionary.load('../data/full_dataset.dict')
prior = load_words_prior()
(clusters, word_cluster) = feature_extraction()
new_words = set()
for word in dictionary.token2id:
if "_" in word or word in word_cluster:
continue
label = 0
if word in prior:
label = prior[word]
best_score = 0
best_idx = -1
cluster = get_feature_cluster(model, [word])
if len(cluster) == 1:
new_words.add(word)
else:
for idx in clusters:
other = clusters[idx]
if label == 0 or label == other[0]:
features = other[1]
common = cluster & features
if len(common) >= 2 and len(common) > best_score:
best_score = len(common)
best_idx = idx
# print label, word
# print cluster
#
# print best_score
# print best_idx
# print clusters[best_idx][1]
if best_idx == -1:
if len(cluster) > 1:
# print "new cluster", cluster
clusters[len(clusters)] = (label, cluster)
for w in cluster:
word_cluster[w] = len(clusters)
else:
new_words.add(word)
else:
# print "append word ", word, "to cluster", best_idx
clusters[best_idx][1].add(word)
clusters[len(clusters)] = (0, new_words)
with open("../data/full_clusters.csv", "w") as outfile:
for idx in clusters:
cluster = clusters[idx]
outfile.write("%d" % int(cluster[0]))
for word in cluster[1]:
try:
outfile.write("\t%s" % word)
except UnicodeEncodeError:
continue
outfile.write("\n")
import nltk
def spawn_feature_vectors():
model = word2vec_fun()
with open("../data/feature_vectors.csv", "a") as outfile:
feature_vector = get_feature_cluster(model, ["insight"])
string = ""
for feature in feature_vector:
string += "\t" + feature
outfile.write("%s\n" % string)
def feature_extraction(path):
feature_vectors = {}
feature_map = {}
path = "../data/" + path #new_feature_vectors.csv"
for idx, line in enumerate(open(path, "rb")):
elems = line.split()
feats = set(elems[1:])
feature_vectors[idx] = (elems[0], feats)
for elem in feats:
if elem in feature_map:
print "Duplicate seed!!!"
print elem, feats, feature_vectors[feature_map[elem]]
exit(0)
feature_map[elem] = idx
return feature_vectors, feature_map
from spacy.en import English
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import TanhLayer
import numpy as np
def nn_attempt():
#(feature_vector, feature_map) = feature_extraction("full_clusters.csv")
# data = extract_data("labeled")
# parser = Parser()
# num_reviews = len(data["review"])
model = word2vec_fun()
num_features = len(model["very"])
ds = SupervisedDataSet(100, 1)
# "actor" index is 1
empty = np.zeros((num_features,), dtype="float32")
positive = ["fantastic", "outstanding", "awesome", "magnificent", "marvelous", "fabulous", "fine", "impressive", "quality", "tremendous", "phenomenal",
"superb", "terrific", "amazing", "wonderful", "amusing", "pretty", "nice", "good", "exceptional", "excellent", "talented", "great", "splendid",
"brilliant", "neat", "interesting", "commendable", "greatest",
"finest", "best", "beautiful", "lovely", "wonderfully", "brilliantly", "superbly", "beautifully", "gorgeous", "excellently", "capably", "masterfully",
"expertly", "brilliantly", "incredible"]
positive2 = ["9/10", "5/5", "4/5", "8/10", "7/10", "10/10"]
negative2 = ["1/10", "2/10", "3/10", "4/10", "5/10", "6/10"]
prefix = np.add(empty, model["rating"])
prefix = np.add(prefix, model["movie"])
for w in positive2:
ds.addSample(tuple(list(np.add(prefix, model[w]))), (1,))
for w in positive2:
ds.addSample(tuple(list(np.add(prefix, model[w]))), (1,))
for w in negative2:
ds.addSample(tuple(list(np.add(prefix, model[w]))), (0,))
for w in negative2:
ds.addSample(tuple(list(np.add(prefix, model[w]))), (0,))
negative = []
# not_vec = np.add(empty, model["not"])
# idx = 0
# for w in positive:
# ds.addSample(tuple(list(np.add(empty, model[w]))), (1,))
# ds.addSample(tuple(list(np.add(not_vec, model[w]))), (0,))
# ds.addSample(tuple(list(np.add(features, model["good"]))), (1,))
# ds.addSample(tuple(list(np.add(features, model["great"]))), (1,))
# ds.addSample(tuple(list(np.add(features, model["amazing"]))), (1,))
# ds.addSample(tuple(list(np.add(features, model["terrible"]))), (0,))
# ds.addSample(tuple(list(np.add(features, model["terrible"]))), (0,))
# ds.addSample(tuple(list(np.add(features, model["bad"]))), (0,))
# ds.addSample(tuple(list(np.add(features, model["best"]))), (1,))
# ds.addSample(tuple(list(np.add(t1, model["good"]))), (0,))
# ds.addSample(tuple(list(np.add(t1, model["great"]))), (0,))
# ds.addSample(tuple(list(np.add(t1, model["amazing"]))), (0,))
# ds.addSample(tuple(list(np.add(t1, model["bad"]))), (1,))
print len(ds)
net = buildNetwork(100, 500, 1, bias=True)#, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence(verbose=True, validationProportion=0.15, maxEpochs=100000, continueEpochs=10)
ds2 = SupervisedDataSet(100, 1)
ds2.addSample(tuple(list(np.add(prefix, model["7/10"]))), (1,))
print net.activateOnDataset(ds)
print net.activateOnDataset(ds2)
# for idx in xrange(num_reviews):
# sentences = parser.extract_terms(data["review"][idx])
# label = data["sentiment"][idx]
#
#
#
#
# print sentences, label
# exit(0)
def main():
# build_corpus()
# word2vec_train()
# word2vec_fun()
# print feature_extraction()
# cluster_wordvectors()
# test()
# spawn_feature_vectors()
# feature_builder()
# nn_attempt()
train_paragraph_vectors()
pass
def test():
model = word2vec_fun()
p = Parser()
ref = datagen.reference_labels()
data = extract_data("test")
num_reviews = len(data["review"])
reviews = []
for idx in xrange(num_reviews):
sentences = p.extract_terms(data["review"][idx])
print "label", ref[data["id"][idx].replace("\"", "")]
if idx == 1:
for sentence in sentences:
print sentence
exit(0)
# print get_feature_cluster(model, sentence)
# exit(0)
#print sentences
# exit(0)
def update_dict(fdict, word, label):
if word in fdict:
(neg, pos) = fdict[word]
else:
(neg, pos) = (0, 0)
if label:
fdict[word] = (neg, pos + 1)
else:
fdict[word] = (neg + 1, pos)
def feature_builder():
feature_dict = {}
feature_vectors, feature_map = feature_extraction()
num_vectors = len(feature_vectors) + 1
feature_vectors[num_vectors] = set()
print "number of feature vectors", num_vectors
model = word2vec_fun()
parser = Parser()
data = extract_data("labeled")
num_reviews = len(data["review"])
samples = []
sample_count = [0, 0]
for idx in xrange(num_reviews):
sentences = parser.extract_terms(data["review"][idx])
label = int(data["sentiment"][idx])
sample_count[label] += 1
for sentence in sentences:
prior = -1
sample = []
for word in sentence:
if word in feature_map and feature_map[word] != num_vectors:
sample.append((feature_map[word], word))
prior = feature_map[word]
else:
best_score = 0.0
best_id = 0
# for jdx in feature_vectors:
# vec = feature_vectors[jdx]
# score = 0.0
# for elem in vec:
# score += model.similarity(word, elem)
# score /= len(vec)
# if score > best_score:
# best_score, best_id = score, jdx
#
# print best_score, best_id
#
# if best_score > 0.6:
# sample.append((best_id, word))
# feature_map[word] = best_id
# else:
if prior == num_vectors:
(feat_id, gram) = sample[len(sample) - 1]
sample[len(sample) - 1] = (feat_id, gram + "_" + word)
else:
sample.append((num_vectors, word))
feature_map[word] = num_vectors
feature_vectors[num_vectors].add(word)
prior = num_vectors
# for (_, w) in sample:
# update_dict(feature_dict, w, label)
if len(sample) > 1:
samples.append((sample, label))
# print samples
# exit(0)
# break
# with open("../data/content_structure.csv", "a") as outfile:
# for sample in samples:
# line = ""
# for feature in sample:
# line += "\t" + str(feature[0])
# outfile.write("%s\n" % line)
with open("../data/feature_sequence.csv", "wb") as outfile:
result_dict = {}
for sample in samples:
feat_list = []
label = sample[1]
for feature in sample[0]:
feat_list.append(feature[0])
_len = len(feat_list)
if _len > 5:
feat_list.pop(0)
if _len >= 4:
v = (feat_list[-4], feat_list[-3], feat_list[-2], feat_list[-1])
update_dict(result_dict, v, label)
if _len >= 3:
v = (feat_list[-3], feat_list[-2], feat_list[-1])
update_dict(result_dict, v, label)
if _len >= 2:
v = (feat_list[-2], feat_list[-1])
update_dict(result_dict, v, label)
outfile.write("\t%s\t%s\t\n" % (sample_count[0], sample_count[1]))
for value in result_dict:
(neg, pos) = result_dict[value]
outfile.write("%s\t%s\t%s\t%s\t\n" % (value, neg, pos, pos - neg))
#
# total_count = sample_count[0] + sample_count[1]
# sample_count[0] /= total_count
# sample_count[1] /= total_count
#
# print sample_count
#
# for feature in feature_dict:
# (neg, pos) = feature_dict[feature]
# if neg + pos >= 5:
# outfile.write("%s\t%s\t%s\n" % (feature, neg, pos))
# # t = Timer()
# for idx in xrange(num_reviews):
#
# # if idx % 10 == 0:
# print (idx + 1), len(feature_vectors), "..."
#
# sentences = parser.extract_terms(data["review"][idx])
#
# for sentence in sentences:
# for word in sentence:
# if word not in feature_map:
# vector = get_feature_cluster(model, [word])
#
# if len(vector) == 1:
# continue
#
# jdx = len(feature_vectors) + 1
# feature_map[word] = jdx
# feature_vectors[jdx] = vector
# feature_count[jdx] = 1
# else:
# feature_count[feature_map[word]] += 1
#
# print feature_vectors
# print feature_count
# exit(0)
# reviews += sentences
# with open("../data/feature_vectors.csv", "wb") as outfile:
# for idx in feature_count:
# outfile.write("%s\t%s\n" % (feature_count[idx], feature_vectors[idx]))
# t.measure("labeled" + " dataset built in: ")
# return reviews
if __name__ == "__main__":
main()
|
johnthebrave/sentiment-mining
|
src/old_scrips/main.py
|
Python
|
gpl-2.0
| 15,630
|
import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr)
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = False
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
for t in threads:
t.start()
time.sleep(1.0)
exit = True
for t in threads:
t.join()
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
firmlyjin/brython
|
www/tests/unittests/test/test_gc.py
|
Python
|
bsd-3-clause
| 29,494
|
## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"sysfont, used in the font module to find system fonts"
import os, sys
#create simple version of the font name
def _simplename(name):
for char in '_ -':
name = name.replace(char, '')
name = name.lower()
name = name.replace('-', '')
name = name.replace("'", '')
return name
#insert a font and style into the font dictionary
def _addfont(name, bold, italic, font, fontdict):
if not fontdict.has_key(name):
fontdict[name] = {}
fontdict[name][bold, italic] = font
#read the fonts on windows
def initsysfonts_win32():
import _winreg
fonts = {}
mods = 'demibold', 'narrow', 'light', 'unicode', 'bt', 'mt'
fontdir = os.path.join(os.environ['WINDIR'], "Fonts")
#this is a list of registry keys containing information
#about fonts installed on the system.
keys = []
#find valid registry keys containing font information.
possible_keys = [
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts",
r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts"
]
for key_name in possible_keys:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name)
keys.append(key)
except WindowsError:
pass
for key in keys:
fontdict = {}
for i in range(_winreg.QueryInfoKey(key)[1]):
try: name, font, t = _winreg.EnumValue(key,i)
except EnvironmentError: break
# try and handle windows unicode strings for some file names.
# here are two documents with some information about it:
# http://www.python.org/peps/pep-0277.html
# https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA
try:
font = str(font)
except UnicodeEncodeError:
# MBCS is the windows encoding for unicode file names.
try:
font = font.encode('MBCS')
except:
# no goodness with str or MBCS encoding... skip this font.
continue
if font[-4:].lower() not in [".ttf", ".ttc"]:
continue
if os.sep not in font:
font = os.path.join(fontdir, font)
if name[-10:] == '(TrueType)':
name = name[:-11]
name = name.lower().split()
bold = italic = 0
for m in mods:
if m in name:
name.remove(m)
if 'bold' in name:
name.remove('bold')
bold = 1
if 'italic' in name:
name.remove('italic')
italic = 1
name = ''.join(name)
name=_simplename(name)
_addfont(name, bold, italic, font, fonts)
return fonts
#read of the fonts on osx (fill me in!)
def initsysfonts_darwin():
paths = ['/Library/Fonts',
'~/Library/Fonts',
'/Local/Library/Fonts',
'/Network/Library/Fonts']
fonts = {}
for p in paths:
if os.path.isdir(p):
pass
#os.path.walk(p, _fontwalk, fonts)
return fonts
#read the fonts on unix
def initsysfonts_unix():
fonts = {}
# we use the fc-list from fontconfig to get a list of fonts.
try:
# note, we use popen3 for if fc-list isn't there to stop stderr printing.
flin, flout, flerr = os.popen3('fc-list : file family style')
except:
return fonts
try:
for line in flout:
try:
filename, family, style = line.split(':', 2)
if filename[-4:].lower() in ['.ttf', '.ttc']:
bold = style.find('Bold') >= 0
italic = style.find('Italic') >= 0
oblique = style.find('Oblique') >= 0
_addfont(_simplename(family), bold, italic or oblique, filename, fonts)
except:
# try the next one.
pass
except:
pass
return fonts
#create alias entries
def create_aliases():
aliases = (
('monospace', 'misc-fixed', 'courier', 'couriernew', 'console',
'fixed', 'mono', 'freemono', 'bitstreamverasansmono',
'verasansmono', 'monotype', 'lucidaconsole'),
('sans', 'arial', 'helvetica', 'swiss', 'freesans',
'bitstreamverasans', 'verasans', 'verdana', 'tahoma'),
('serif', 'times', 'freeserif', 'bitstreamveraserif', 'roman',
'timesroman', 'timesnewroman', 'dutch', 'veraserif',
'georgia'),
('wingdings', 'wingbats'),
)
for set in aliases:
found = None
fname = None
for name in set:
if Sysfonts.has_key(name):
found = Sysfonts[name]
fname = name
break
if not found:
continue
for name in set:
if not Sysfonts.has_key(name):
Sysalias[name] = found
Sysfonts = {}
Sysalias = {}
#initialize it all, called once
def initsysfonts():
if sys.platform == 'win32':
fonts = initsysfonts_win32()
elif sys.platform == 'darwin':
fonts = initsysfonts_darwin()
else:
fonts = initsysfonts_unix()
Sysfonts.update(fonts)
create_aliases()
if not Sysfonts: #dummy so we don't try to reinit
Sysfonts[None] = None
#the exported functions
def SysFont(name, size, bold=False, italic=False):
"""pygame.font.SysFont(name, size, bold=False, italic=False) -> Font
create a pygame Font from system font resources
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fallback on the builtin pygame font if the given font
is not found.
Name can also be a comma separated list of names, in
which case set of names will be searched in order. Pygame
uses a small set of common font aliases, if the specific
font you ask for is not available, a reasonable alternative
may be used.
"""
import pygame.font
if not Sysfonts:
initsysfonts()
gotbold = gotitalic = False
fontname = None
if name:
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
while not fontname:
plainname = styles.get((False, False))
fontname = styles.get((bold, italic))
if not fontname:
fontname = plainname
elif plainname != fontname:
gotbold = bold
gotitalic = italic
if fontname: break
font = pygame.font.Font(fontname, size)
if bold and not gotbold:
font.set_bold(1)
if italic and not gotitalic:
font.set_italic(1)
return font
def get_fonts():
"""pygame.font.get_fonts() -> list
get a list of system font names
Returns the list of all found system fonts. Note that
the names of the fonts will be all lowercase with spaces
removed. This is how pygame internally stores the font
names for matching.
"""
if not Sysfonts:
initsysfonts()
return Sysfonts.keys()
def match_font(name, bold=0, italic=0):
"""pygame.font.match_font(name, bold=0, italic=0) -> name
find the filename for the named system font
This performs the same font search as the SysFont()
function, only it returns the path to the TTF file
that would be loaded. The font name can be a comma
separated list of font names to try.
If no match is found, None is returned.
"""
if not Sysfonts:
initsysfonts()
fontname = None
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
while not fontname:
fontname = styles.get((bold, italic))
if italic:
italic = 0
elif bold:
bold = 0
elif not fontname:
fontname = styles.values()[0]
if fontname: break
return fontname
|
Daksh/Colors
|
pygame/sysfont.py
|
Python
|
gpl-3.0
| 9,569
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid -**InaSAFE Wizard**
This module provides: Function Centric Wizard Step: Exposure Layer From Canvas
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'qgis@borysjurgiel.pl'
__revision__ = '$Format:%H$'
__date__ = '16/03/2016'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
# noinspection PyPackageRequirements
from PyQt4 import QtCore, QtGui
# noinspection PyPackageRequirements
from PyQt4.QtCore import pyqtSignature
# noinspection PyPackageRequirements
from PyQt4.QtGui import QListWidgetItem
from qgis.core import QgsMapLayerRegistry
from safe.gui.tools.wizard.wizard_step import get_wizard_step_ui_class
from safe.gui.tools.wizard.wizard_step import WizardStep
from safe.gui.tools.wizard.wizard_utils import layers_intersect
FORM_CLASS = get_wizard_step_ui_class(__file__)
class StepFcExpLayerFromCanvas(WizardStep, FORM_CLASS):
"""Function Centric Wizard Step: Exposure Layer From Canvas"""
def is_ready_to_next_step(self):
"""Check if the step is complete. If so, there is
no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
return bool(self.selected_canvas_explayer())
def get_previous_step(self):
"""Find the proper step when user clicks the Previous button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
new_step = self.parent.step_fc_explayer_origin
return new_step
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if self.parent.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent.parent_step = self
self.parent.existing_keywords = None
self.parent.set_mode_label_to_keywords_creation()
new_step = self.parent.step_kw_purpose
else:
if layers_intersect(self.parent.hazard_layer,
self.parent.exposure_layer):
new_step = self.parent.step_fc_agglayer_origin
else:
new_step = self.parent.step_fc_disjoint_layers
return new_step
# prevents actions being handled twice
# noinspection PyPep8Naming
@pyqtSignature('')
def on_lstCanvasExpLayers_itemSelectionChanged(self):
"""Update layer description label
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
self.parent.exposure_layer = self.selected_canvas_explayer()
lblText = self.parent.get_layer_description_from_canvas(
self.parent.exposure_layer, 'exposure')
self.lblDescribeCanvasExpLayer.setText(lblText)
self.parent.pbnNext.setEnabled(True)
def selected_canvas_explayer(self):
"""Obtain the canvas exposure layer selected by user.
:returns: The currently selected map layer in the list.
:rtype: QgsMapLayer
"""
if self.lstCanvasExpLayers.selectedItems():
item = self.lstCanvasExpLayers.currentItem()
else:
return None
try:
layer_id = item.data(QtCore.Qt.UserRole)
except (AttributeError, NameError):
layer_id = None
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def list_compatible_canvas_layers(self):
"""Fill the list widget with compatible layers.
:returns: Metadata of found layers.
:rtype: list of dicts
"""
italic_font = QtGui.QFont()
italic_font.setItalic(True)
list_widget = self.lstCanvasExpLayers
# Add compatible layers
list_widget.clear()
for layer in self.parent.get_compatible_canvas_layers('exposure'):
item = QListWidgetItem(layer['name'], list_widget)
item.setData(QtCore.Qt.UserRole, layer['id'])
if not layer['keywords']:
item.setFont(italic_font)
list_widget.addItem(item)
def set_widgets(self):
"""Set widgets on the Exposure Layer From Canvas tab"""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.parent.layer before clearing the list
last_layer = self.parent.layer and self.parent.layer.id() or None
self.lblDescribeCanvasExpLayer.clear()
self.list_compatible_canvas_layers()
self.auto_select_one_item(self.lstCanvasExpLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in xrange(self.lstCanvasExpLayers.count()):
item = self.lstCanvasExpLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasExpLayers.setCurrentRow(layers.index(last_layer))
|
Samweli/inasafe
|
safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py
|
Python
|
gpl-3.0
| 5,476
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
from webassets.filter import Filter
import dukpy
__all__ = ('BabelJSX', )
class BabelJSX(Filter):
name = 'babeljsx'
max_debug_level = None
options = {
'loader': 'BABEL_MODULES_LOADER'
}
def input(self, _in, out, **kw):
options = {'filename': os.path.basename(kw['source_path'])}
if self.loader == 'systemjs':
options['plugins'] = ['transform-es2015-modules-systemjs']
elif self.loader == 'umd':
options['plugins'] = ['transform-es2015-modules-umd']
src = dukpy.jsx_compile(_in.read(), **options)
out.write(src)
|
amol-/dukpy
|
dukpy/webassets/jsxfilter.py
|
Python
|
mit
| 700
|
from jobrunner.cli.show import show
from tests.testcase import TestCase
class TestShow(TestCase):
def setUp(self):
self.parse_show_arguments = self.set_up_patch(
'jobrunner.cli.show.parse_show_arguments'
)
self.show_logbook = self.set_up_patch(
'jobrunner.cli.show.show_logbook'
)
def test_show_parses_show_arguments(self):
show()
self.parse_show_arguments.assert_called_once_with()
def test_show_shows_logbooks(self):
show()
self.show_logbook.assert_called_once_with()
|
vdloo/jobrunner
|
tests/unit/jobrunner/cli/show/test_show.py
|
Python
|
apache-2.0
| 576
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 00:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20161025_0712'),
]
operations = [
migrations.AlterField(
model_name='language',
name='language_text',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='userinfo',
name='age',
field=models.SmallIntegerField(default=20),
preserve_default=False,
),
]
|
hamasho/endojo
|
apps/registration/migrations/0003_auto_20161026_0005.py
|
Python
|
gpl-3.0
| 671
|
from sys import argv
import os
from glob import glob
from ppci.api import asm, cc, link, objcopy, get_arch
from ppci.binutils.objectfile import merge_memories
from ppci.lang.c import COptions
from ppci.utils.reporting import HtmlReportGenerator
def get_sources(folder, extension):
resfiles = []
resdirs = []
for x in os.walk(folder):
for y in glob(os.path.join(x[0], extension)):
resfiles.append(y)
resdirs.append(x[0])
return((resdirs, resfiles))
with open('report.html', 'w') as f:
arch = get_arch('riscv')
o1 = asm("startdbg.s", arch)
reporter = HtmlReportGenerator(f)
path = os.path.join('.','csrc',argv[1])
dirs, srcs = get_sources(path, '*.c')
srcs += [os.path.join('.','csrc','bsp.c')] + [os.path.join('.','csrc','lib.c')] + [os.path.join('.','csrc','gdbstub.c')]
dirs += [os.path.join('.','csrc')]
obj = []
coptions = COptions()
for dir in dirs:
coptions.add_include_path(dir)
for src in srcs:
with open(src) as f:
obj.append(cc(f, "riscv", coptions=coptions, debug=True, reporter=reporter))
obj = link([o1] + obj, "firmware.mmap", use_runtime=True, reporter=reporter, debug=True)
with open("firmware.oj", "w") as of:
obj.save(of)
objcopy(obj, "flash", "elf", "firmware.elf")
objcopy(obj, "flash", "bin", "code.bin")
objcopy(obj, "ram", "bin", "data.bin")
size = 0x8000
cimg = obj.get_image('flash')
dimg = obj.get_image('ram')
img = merge_memories(cimg, dimg, 'img')
imgdata = img.data
with open("firmware.hex", "w") as f:
for i in range(size):
if i < len(imgdata) // 4:
w = imgdata[4 * i: 4 * i + 4]
print("%02x%02x%02x%02x" % (w[3], w[2], w[1], w[0]), file=f)
else:
print("00000000", file=f)
|
windelbouwman/ppci-mirror
|
examples/riscvpicorv32/mkfwdbgc.py
|
Python
|
bsd-2-clause
| 1,833
|
from ethereum import abi
import time
import os
import json
from ethereum.utils import encode_hex, str_to_bytes
FILL = 1
VERIFY = 2
TIME = 3
fixture_path = os.path.join(os.path.dirname(__file__), '../..', 'fixtures')
fill_abi_test = lambda params: run_abi_test(params, FILL)
check_abi_test = lambda params: run_abi_test(params, VERIFY)
def bytesify(li):
return [str_to_bytes(x) if isinstance(x, str) else x for x in li]
def run_abi_test(params, mode):
types, args = params['types'], params['args']
out = abi.encode_abi(types, args)
assert bytesify(abi.decode_abi(types, out)) == bytesify(args)
if mode == FILL:
params['result'] = encode_hex(out)
return params
elif mode == VERIFY:
assert params['result'] == encode_hex(out)
elif mode == TIME:
x = time.time()
abi.encode_abi(types, args)
y = time.time()
abi.decode_abi(out, args)
return {
'encoding': y - x,
'decoding': time.time() - y
}
def generate_test_params(testsource, metafunc, skip_func=None, exclude_func=None):
import pytest
if ['filename', 'testname', 'testdata'] != metafunc.fixturenames:
return
fixtures = get_tests_from_file_or_dir(
os.path.join(fixture_path, testsource))
base_dir = os.path.dirname(os.path.dirname(__file__))
params = []
for filename, tests in fixtures.items():
if isinstance(tests, dict):
filename = os.path.relpath(filename, base_dir)
for testname, testdata in tests.items():
if exclude_func and exclude_func(filename, testname, testdata):
continue
if skip_func:
skipif = pytest.mark.skipif(
skip_func(filename, testname, testdata),
reason="Excluded"
)
params.append(skipif((filename, testname, testdata)))
else:
params.append((filename, testname, testdata))
metafunc.parametrize(
('filename', 'testname', 'testdata'),
params
)
return params[::-1]
def get_tests_from_file_or_dir(dname, json_only=False):
if os.path.isfile(dname):
if dname[-5:] == '.json' or not json_only:
with open(dname) as f:
return {dname: json.load(f)}
else:
return {}
else:
o = {}
for f in os.listdir(dname):
fullpath = os.path.join(dname, f)
for k, v in list(get_tests_from_file_or_dir(fullpath, True).items()):
o[k] = v
return o
|
nirenzang/Serpent-Pyethereum-Tutorial
|
pyethereum/ethereum/tools/testutils.py
|
Python
|
gpl-3.0
| 2,645
|
#*-* coding:UTF-8*-*
"""
Este trasto actúa como relé, reenvía los datos de los sensores al servdiro de test.
"""
"""
El tema que recibe los datos (ya sea serie o cualquier otra cosa)
- Este se puede quedar dormido o algo leyendo de un fichero o algo así
El tema que procesa esos datos y los envia al server
- A este tema le puedo meter una cola o algo
"""
import serial
import requests
import time
import sys
import json
import queue
import threading
import random
#Esta clase se encarga de conseguir los datos
class Worker(threading.Thread):
def generate_rnd_msg(self):
self.rnd_msg = {}
msgs = ['!action>', '!data>']
rnd = random.randint(0,1)
self.rnd_msg['msg'] = msgs[rnd]
if rnd == 0:
self.rnd_msg['action'] = 'water'
if rnd == 1:
self.rnd_msg['data'] = 'soil!500:temp!24:light!800'
def get_msg(self):
if self.source == 'serial':
msg = ser.readline().decode()[:-2]
if self.source == 'mock':
time.sleep(5)
self.generate_rnd_msg()
msg = self.rnd_msg['msg']
return msg
def get_action(self):
if self.source == 'serial':
action = ser.readline().decode()[:-2]
if self.source == 'mock':
action = self.rnd_msg['action']
return action
def get_data(self):
if self.source == 'serial':
data = ser.readline().decode()[:-2]
if self.source == 'mock':
data = self.rnd_msg['data']
return data
def run(self):
#source indica que clase se usa para conseguir los datos (arduino via serie o mock)
self.source = self._kwargs['source']
if self.source == 'mock':
self.rnd_msg = {}
if self.source == 'serial':
self.port = input("Serial port: ")
self.ser = serial.Serial("/dev/tty"+port, 9600)
while True:
msg = self.get_msg()
if msg == "!action>":
action = self.get_action()
cmd = {'msg':'action', 'data': action}
q.put(cmd)
if msg == "!data>":
cmd = {'msg': 'data', 'data':{}}
data = self.get_data()
for pair in data.split(":"):
k,v = pair.split("!")
cmd['data'][k] = v
q.put(cmd)
#La parte que envía los datos al server por un lado
with open("relay_cfg.json", "r") as f:
cfg = json.load(f)
baseurl = cfg['URL']
soil_endpoint = baseurl+'/insertar'
#action_endpoint = baseurl+'/action/'
def new_measure(data):
r = requests.post(soil_endpoint, data=json.dumps({'data':data}))
print(r.text)
#TODO
def new_action(data):
r = requests.post(action_endpoint, data=json.dumps(data))
print(r.text)
#cola de trabajos
q = queue.Queue()
#lanzar worker thread
t = Worker(kwargs={'source': 'mock'})
t.start()
while True:
cmd = q.get()
print(cmd)
if cmd['msg'] == "action":
#TODO
#print(cmd['data'])
#new_action(cmd['data'])
pass
if cmd['msg'] == "data":
#print(cmd['data'])
new_measure(cmd['data'])
q.task_done()
|
OpenComunCas/PWS
|
arduino/testserver/relay/relay.py
|
Python
|
gpl-3.0
| 3,299
|
"""
Utilities for Caltech BE/Bi 103.
Author: Justin Bois
"""
import collections
import random
import warnings
import numbers
import matplotlib.path as path
import numpy as np
import pandas as pd
import scipy.odr
import scipy.stats as st
import statsmodels.tools.numdiff as smnd
import skimage.io
import skimage.measure
import emcee
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import seaborn as sns
# ########################################################################## #
# COLOR CONVERSION UTILITIES #
# ########################################################################## #
def rgb_frac_to_hex(rgb_frac):
"""
Convert fractional RGB values to hexidecimal color string.
Parameters
----------
rgb_frac : array_like, shape (3,)
Fractional RGB values; each entry is between 0 and 1.
Returns
-------
str
Hexidecimal string for the given RGB color.
Examples
--------
>>> rgb_frac_to_hex((0.65, 0.23, 1.0))
'#a53aff'
>>> rgb_frac_to_hex((1.0, 1.0, 1.0))
'#ffffff'
"""
if len(rgb_frac) != 3:
raise RuntimeError('`rgb_frac` must have exactly three entries.')
if (np.array(rgb_frac) < 0).any() or (np.array(rgb_frac) > 1).any():
raise RuntimeError('RGB values must be between 0 and 1.')
return '#{0:02x}{1:02x}{2:02x}'.format(int(rgb_frac[0] * 255),
int(rgb_frac[1] * 255),
int(rgb_frac[2] * 255))
def data_to_hex_color(x, palette, x_range=[0, 1], na_value='#000000'):
"""
Convert a value to a hexidecimal color according to
color palette.
Parameters
----------
x : float or int
Value to be converted to hexidecimal color.
palette : list of 3-tuples
Color palette as returned from seaborn.color_palette().
List of 3-tuples containing fractional RGB values.
x_range : array_list, shape (2,), default = [0, 1]
Low and high value of the range of values `x` may
assume.
Returns
-------
str
Hexidecimal string.
Examples
--------
>>> data_to_hex_color(0.7, sns.colorpalette())
'#ccb974'
>>> data_to_hex_color(7.1, [(1, 0, 0), (0, 1, 0), (0, 0, 1)], [0, 10])
'#0000ff'
"""
if x is None or np.isnan(x):
return na_value
elif x > x_range[1] or x < x_range[0]:
raise RuntimeError('data outside of range')
elif x == x_range[1]:
return rgb_frac_to_hex(palette[-1])
# Fractional position of x in x_range
f = (x - x_range[0]) / (x_range[1] - x_range[0])
return rgb_frac_to_hex(palette[int(f * len(palette))])
def im_merge_cmy(im_cyan, im_magenta, im_yellow=None):
"""
Merge channels to make RGB image that has cyan, magenta, and
yellow.
Parameters
----------
im_cyan: array_like
Image represented in cyan channel. Must be same shape
as `im_magenta` and `im_yellow`.
im_magenta: array_like
Image represented in magenta channel. Must be same shape
as `im_yellow` and `im_yellow`.
im_yellow: array_like
Image represented in yellow channel. Must be same shape
as `im_cyan` and `im_magenta`.
Returns
-------
output : array_like, dtype float, shape (*im_cyan.shape, 3)
RGB image the give CMY coloring of image
Notes
-----
.. All input images are streched so that their pixel intensities
go from 0 to 1.
"""
im_cyan_scaled = \
(im_cyan - im_cyan.min()) / (im_cyan.max() - im_cyan.min())
im_magenta_scaled = \
(im_magenta - im_magenta.min()) / (im_magenta.max() - im_magenta.min())
if im_yellow is None:
im_yellow_scaled = np.zeros_like(im_cyan)
else:
im_yellow_scaled = \
(im_yellow - im_yellow.min()) / (im_yellow.max() - im_yellow.min())
# Convert images to RGB with magenta, cyan, and yellow channels
im_cyan_scaled_rgb = np.dstack((np.zeros_like(im_cyan_scaled),
im_cyan_scaled,
im_cyan_scaled))
im_magenta_scaled_rgb = np.dstack((im_magenta_scaled,
np.zeros_like(im_magenta_scaled),
im_magenta_scaled))
im_yellow_scaled_rgb = np.dstack((im_yellow_scaled,
im_yellow_scaled,
np.zeros_like(im_yellow_scaled)))
# Merge together
merged_image = \
im_cyan_scaled_rgb + im_magenta_scaled_rgb + im_yellow_scaled_rgb
# Scale each channel to be between zero and 1
merged_image[:, :, 0] /= merged_image[:, :, 0].max()
merged_image[:, :, 1] /= merged_image[:, :, 1].max()
merged_image[:, :, 2] /= merged_image[:, :, 2].max()
return merged_image
# ########################################################################## #
# BOKEH UTILITIES #
# ########################################################################## #
def bokeh_matplot(df, i_col, j_col, data_col, data_range=None, n_colors=21,
label_ticks=True, colormap='RdBu_r', plot_width=1000,
plot_height=1000, x_axis_location='auto',
toolbar_location='left',
tools='reset,resize,hover,save,pan,box_zoom,wheel_zoom',
**kwargs):
"""
Create Bokeh plot of a matrix.
Parameters
----------
df : Pandas DataFrame
Tidy DataFrame to be plotted as a matrix.
i_col : hashable object
Column in `df` to be used for row indices of matrix.
j_col : hashable object
Column in `df` to be used for column indices of matrix.
data_col : hashable object
Column containing values to be plotted. These values
set which color is displayed in the plot and also are
displayed in the hover tool.
data_range : array_like, shape (2,)
Low and high values that data may take, used for scaling
the color. Default is the range of the inputted data.
n_colors : int, default = 21
Number of colors to be used in colormap.
label_ticks : bool, default = True
If False, do not put tick labels
colormap : str, default = 'RdBu_r'
Any of the allowed seaborn colormaps.
plot_width : int, default 1000
Width of plot in pixels.
plot_height : int, default 1000
Height of plot in pixels.
x_axis_location : str, default = None
Location of the x-axis around the plot. If 'auto' and first
element of `df[i_col]` is numerical, x-axis will be placed below
with the lower left corner as the origin. Otherwise, above
with the upper left corner as the origin.
toolbar_location : str, default = 'left'
Location of the Bokeh toolbar around the plot
tools : str, default = 'reset,resize,hover,save,pan,box_zoom,wheel_zoom'
Tools to show in the Bokeh toolbar
**kwargs
Arbitrary keyword arguments passed to bokeh.plotting.figure
Returns
-------
Bokeh plotting object
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> data = np.array(np.unravel_index(range(9), a.shape) + (a.ravel(),)).T
>>> df = pd.DataFrame(data, columns=['i', 'j', 'data'])
>>> bokeh.plotting.output_file('test_matplot.html')
>>> p = bokeh_matplot(df, i_col, j_col, data_col, n_colors=21,
colormap='RdBu_r', plot_width=1000,
plot_height=1000)
>>> bokeh.plotting.show(p)
"""
# Copy the DataFrame
df_ = df.copy()
# Convert i, j to strings so not interpreted as physical space
df_[i_col] = df_[i_col].astype(str)
df_[j_col] = df_[j_col].astype(str)
# Get data range
if data_range is None:
data_range = (df[data_col].min(), df[data_col].max())
elif (data_range[0] > df[data_col].min()) \
or (data_range[1] < df[data_col].max()):
raise RuntimeError('Data out of specified range.')
# Get colors
palette = sns.color_palette(colormap, n_colors)
# Compute colors for squares
df_['color'] = df_[data_col].apply(data_to_hex_color,
args=(palette, data_range))
# Data source
source = bokeh.plotting.ColumnDataSource(df_)
# only reverse the y-axis and put the x-axis on top
# if the x-axis is categorical:
if x_axis_location == 'auto':
if isinstance(df[j_col].iloc[0], numbers.Number):
y_range = list(df_[i_col].unique())
x_axis_location = 'below'
else:
y_range = list(reversed(list(df_[i_col].unique())))
x_axis_location = 'above'
elif x_axis_location == 'above':
y_range = list(reversed(list(df_[i_col].unique())))
elif x_axis_location == 'below':
y_range = list(df_[i_col].unique())
# Set up figure
p = bokeh.plotting.figure(x_range=list(df_[j_col].unique()),
y_range=y_range,
x_axis_location=x_axis_location,
plot_width=plot_width,
plot_height=plot_height,
toolbar_location=toolbar_location,
tools=tools, **kwargs)
# Populate colored squares
p.rect(j_col, i_col, 1, 1, source=source, color='color', line_color=None)
# Set remaining properties
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
if label_ticks:
p.axis.major_label_text_font_size = '8pt'
else:
p.axis.major_label_text_color = None
p.axis.major_label_text_font_size = '0pt'
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi / 3
# Build hover tool
hover = p.select(dict(type=bokeh.models.HoverTool))
hover.tooltips = collections.OrderedDict([('i', ' @' + i_col),
('j', ' @' + j_col),
(data_col, ' @' + data_col)])
return p
def bokeh_boxplot(df, value, label, ylabel=None, sort=True, plot_width=650,
plot_height=450, box_fill_color='medium_purple',
background_fill_color='#DFDFE5',
tools='reset,resize,hover,save,pan,box_zoom,wheel_zoom',
**kwargs):
"""
Make a Bokeh box plot from a tidy DataFrame.
Parameters
----------
df : tidy Pandas DataFrame
DataFrame to be used for plotting
value : hashable object
Column of DataFrame containing data to be used.
label : hashable object
Column of DataFrame use to categorize.
ylabel : str, default None
Text for y-axis label
sort : Boolean, default True
If True, sort DataFrame by label so that x-axis labels are
alphabetical.
plot_width : int, default 650
Width of plot in pixels.
plot_height : int, default 450
Height of plot in pixels.
box_fill_color : string
Fill color of boxes, default = 'medium_purple'
background_fill_color : str, default = '#DFDFE5'
Fill color of the plot background
tools : str, default = 'reset,resize,hover,save,pan,box_zoom,wheel_zoom'
Tools to show in the Bokeh toolbar
**kwargs
Arbitrary keyword arguments passed to bokeh.plotting.figure
Returns
-------
Bokeh plotting object
Example
-------
>>> cats = list('ABCD')
>>> values = np.random.randn(200)
>>> labels = np.random.choice(cats, 200)
>>> df = pd.DataFrame({'label': labels, 'value': values})
>>> bokeh.plotting.output_file('test_boxplot.html')
>>> p = bokeh_boxplot(df, value='value', label='label')
>>> bokeh.plotting.show(p)
Notes
-----
.. Based largely on example code found here:
https://github.com/bokeh/bokeh/blob/master/examples/plotting/file/boxplot.py
"""
# Sort DataFrame by labels for alphabetical x-labeling
if sort:
df_sort = df.sort_values(label)
else:
df_sort = df.copy()
# Convert labels to string to allow categorical axis labels
df_sort[label] = df_sort[label].astype(str)
# Get the categories
cats = list(df_sort[label].unique())
# Group Data frame
df_gb = df_sort.groupby(label)
# Compute quartiles for each group
q1 = df_gb[value].quantile(q=0.25)
q2 = df_gb[value].quantile(q=0.5)
q3 = df_gb[value].quantile(q=0.75)
# Compute interquartile region and upper and lower bounds for outliers
iqr = q3 - q1
upper_cutoff = q3 + 1.5 * iqr
lower_cutoff = q1 - 1.5 * iqr
# Find the outliers for each category
def outliers(group):
cat = group.name
outlier_inds = (group[value] > upper_cutoff[cat]) | \
(group[value] < lower_cutoff[cat])
return group[value][outlier_inds]
# Apply outlier finder
out = df_gb.apply(outliers).dropna()
# Points of outliers for plotting
outx = []
outy = []
if not out.empty:
for cat in cats:
if not out[cat].empty:
for val in out[cat]:
outx.append(cat)
outy.append(val)
# Shrink whiskers to smallest and largest non-outlier
qmin = df_gb[value].min()
qmax = df_gb[value].max()
upper = upper_cutoff.combine(qmax, min)
lower = lower_cutoff.combine(qmin, max)
# Reindex to make sure ordering is right when plotting
upper = upper.reindex(cats)
lower = lower.reindex(cats)
q1 = q1.reindex(cats)
q2 = q2.reindex(cats)
q3 = q3.reindex(cats)
# Build figure
p = bokeh.plotting.figure(x_range=cats,
background_fill_color=background_fill_color,
plot_width=plot_width, plot_height=plot_height,
tools=tools,
**kwargs)
p.ygrid.grid_line_color = 'white'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_width = 2
p.yaxis.axis_label = ylabel
# stems
p.segment(cats, upper, cats, q3, line_width=2, line_color="black")
p.segment(cats, lower, cats, q1, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3 + q1) / 2, 0.5, q3 - q1, fill_color="mediumpurple",
alpha=0.7, line_width=2, line_color="black")
# median (almost-0 height rects simpler than segments)
y_range = qmax.max() - qmin.min()
p.rect(cats, q2, 0.5, 0.0001 * y_range, line_color="black",
line_width=2, fill_color='black')
# whiskers (almost-0 height rects simpler than segments with
# categorial x-axis)
p.rect(cats, lower, 0.2, 0.0001 * y_range, line_color='black',
fill_color='black')
p.rect(cats, upper, 0.2, 0.0001 * y_range, line_color='black',
fill_color='black')
# outliers
p.circle(outx, outy, size=6, color='black')
return p
def bokeh_imrgb(im, plot_height=400, plot_width=None,
tools='pan,box_zoom,wheel_zoom,reset,resize'):
"""
Make a Bokeh Figure instance displaying an RGB image.
If the image is already 32 bit, just display it
"""
# Make 32 bit image
if len(im.shape) == 2 and im.dtype == np.uint32:
im_disp = im
else:
im_disp = rgb_to_rgba32(im)
# Get shape
n, m = im_disp.shape
# Determine plot height and width
if plot_height is not None and plot_width is None:
plot_width = int(m/n * plot_height)
elif plot_height is None and plot_width is not None:
plot_height = int(n/m * plot_width)
elif plot_height is None and plot_width is None:
plot_heigt = 400
plot_width = int(m/n * plot_height)
# Set up figure with appropriate dimensions
p = bokeh.plotting.figure(plot_height=plot_height, plot_width=plot_width,
x_range=[0, m], y_range=[0, n], tools=tools)
# Display the image, setting the origin and heights/widths properly
p.image_rgba(image=[im_disp], x=0, y=0, dw=m, dh=n)
return p
def bokeh_im(im, plot_height=400, plot_width=None,
color_palette=bokeh.palettes.gray(256),
tools='pan,box_zoom,wheel_zoom,reset,resize'):
"""
"""
# Get shape
n, m = im.shape
# Determine plot height and width
if plot_height is not None and plot_width is None:
plot_width = int(m/n * plot_height)
elif plot_height is None and plot_width is not None:
plot_height = int(n/m * plot_width)
elif plot_height is None and plot_width is None:
plot_heigt = 400
plot_width = int(m/n * plot_height)
p = bokeh.plotting.figure(plot_height=plot_height, plot_width=plot_width,
x_range=[0, m], y_range=[0, n], tools=tools)
# Set color mapper
color = bokeh.models.LinearColorMapper(color_palette)
# Display the image
p.image(image=[im], x=0, y=0, dw=m, dh=n, color_mapper=color)
return p
# ########################################################################## #
# MCMC UTILITIES #
# ########################################################################## #
def generic_log_posterior(log_prior, log_likelihood, params, logpargs=(),
loglargs=()):
"""
Generic log posterior for MCMC calculations
Parameters
----------
log_prior : function
Function to compute the log prior.
Call signature: log_prior(params, *logpargs)
log_likelihood : function
Function to compute the log prior.
Call signature: log_likelhood(params, *loglargs)
params : ndarray
Numpy array containing the parameters of the posterior.
logpargs : tuple, default ()
Tuple of parameters to be passed to log_prior.
loglargs : tuple, default ()
Tuple of parameters to be passed to log_likelihood.
Returns
-------
output : float
The logarithm of the posterior evaluated at `params`.
"""
# Compute log prior
lp = log_prior(params, *logpargs)
# If log prior is -inf, return that
if lp == -np.inf:
return -np.inf
# Compute and return posterior
return lp + log_likelihood(params, *loglargs)
def sampler_to_dataframe(sampler, columns=None):
"""
Convert output of an emcee sampler to a Pandas DataFrame.
Parameters
----------
sampler : emcee.EnsembleSampler or emcee.PTSampler instance
Sampler instance form which MCMC has already been run.
Returns
-------
output : DataFrame
Pandas DataFrame containing the samples. Each column is
a variable, except: 'lnprob' and 'chain' for an
EnsembleSampler, and 'lnlike', 'lnprob', 'beta_ind',
'beta', and 'chain' for a PTSampler. These contain obvious
values.
"""
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
if columns is None:
columns = list(range(sampler.chain.shape[-1]))
if isinstance(sampler, emcee.EnsembleSampler):
n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(data=sampler.flatchain, columns=columns)
df['lnprob'] = sampler.flatlnprobability
df['chain'] = np.concatenate([i * np.ones(n_steps, dtype=int)
for i in range(n_walkers)])
elif isinstance(sampler, emcee.PTSampler):
n_temps, n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(
data=sampler.flatchain.reshape(
(n_temps * n_walkers * n_steps, n_dim)),
columns=columns)
df['lnlike'] = sampler.lnlikelihood.flatten()
df['lnprob'] = sampler.lnprobability.flatten()
beta_inds = [i * np.ones(n_steps * n_walkers, dtype=int)
for i, _ in enumerate(sampler.betas)]
df['beta_ind'] = np.concatenate(beta_inds)
df['beta'] = sampler.betas[df['beta_ind']]
chain_inds = [j * np.ones(n_steps, dtype=int)
for i, _ in enumerate(sampler.betas)
for j in range(n_walkers)]
df['chain'] = np.concatenate(chain_inds)
else:
raise RuntimeError('Invalid sample input.')
return df
def run_ensemble_emcee(log_post=None, n_burn=100, n_steps=100,
n_walkers=None, p_dict=None, p0=None, columns=None,
args=(), threads=None, thin=1, return_sampler=False,
return_pos=False):
"""
Run emcee.
Parameters
----------
log_post : function
The function that computes the log posterior. Must be of
the form log_post(p, *args), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int, default 100
Number of burn steps
n_steps : int, default 100
Number of MCMC samples to take
n_walkers : int
Number of walkers, ignored if p0 is None
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[i,j] is the starting point for walk i along variable j.
If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_sampler : bool, default False
If True, return sampler as well as DataFrame with results.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'lnprob': Log posterior probability
sampler : emcee.EnsembleSampler instance, optional
The sampler instance.
pos : ndarray, shape (nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, i] = p_dict[key][0](*(p_dict[key][1] + (n_walkers,)))
# Set up the EnsembleSampler instance
if threads is not None:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args, threads=threads)
else:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, n_steps, thin=thin)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, sampler, pos)
return_bool = (True, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def run_pt_emcee(log_like, log_prior, n_burn, n_steps, n_temps=None,
n_walkers=None, p_dict=None, p0=None, columns=None,
loglargs=(), logpargs=(), threads=None, thin=1,
return_lnZ=False, return_sampler=False, return_pos=False):
"""
Run emcee.
Parameters
----------
log_like : function
The function that computes the log likelihood. Must be of
the form log_like(p, *llargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
log_prior : function
The function that computes the log prior. Must be of
the form log_post(p, *lpargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int
Number of burn steps
n_steps : int
Number of MCMC samples to take
n_temps : int
The number of temperatures to use in PT sampling.
n_walkers : int
Number of walkers
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[k,i,j] is the starting point for walk i along variable j
for temperature k. If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_lnZ : bool, default False
If True, additionally return lnZ and dlnZ.
return_sampler : bool, default False
If True, additionally return sampler.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'beta': Inverse temperature
'beta_ind': Index of beta in list of betas
'lnlike': Log likelihood
'lnprob': Log posterior probability (with beta multiplying
log likelihood)
lnZ : float, optional
ln Z(1), which is equal to the evidence of the
parameter estimation problem.
dlnZ : float, optional
The estimated error in the lnZ calculation.
sampler : emcee.PTSampler instance, optional
The sampler instance.
pos : ndarray, shape (ntemps, nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_temps, n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_temps, n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, :, i] = p_dict[key][0](
*(p_dict[key][1] + ((n_temps, n_walkers),)))
# Set up the PTSampler instance
if threads is not None:
sampler = emcee.PTSampler(n_temps, n_walkers, n_dim, log_like,
log_prior, loglargs=loglargs,
logpargs=logpargs, threads=threads)
else:
sampler = emcee.PTSampler(n_temps, n_walkers, n_dim, log_like,
log_prior, loglargs=loglargs,
logpargs=logpargs)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, n_steps, thin=thin)
# Compute thermodynamic integral
lnZ, dlnZ = sampler.thermodynamic_integration_log_evidence(fburnin=0)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, lnZ, dlnZ, sampler, pos)
return_bool = (True, return_lnZ, return_lnZ, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def lnZ(df_mcmc):
"""
Compute log Z(1) from PTMCMC traces stored in DataFrame.
Parameters
----------
df_mcmc : pandas DataFrame, as outputted from run_ptmcmc.
DataFrame containing output of a parallel tempering MCMC
run. Only need to contain columns pertinent to computing
ln Z, which are 'beta_int', 'lnlike', and 'beta'.
Returns
-------
output : float
ln Z as computed by thermodynamic integration. This is
equivalent to what is obtained by calling
`sampler.thermodynamic_integration_log_evidence(fburnin=0)`
where `sampler` is an emcee.PTSampler instance.
Notes
-----
.. This is useful when the DataFrame from a PTSampler is too
large to store in RAM.
"""
# Average the log likelihood over the samples
log_mean = np.zeros(len(df_mcmc['beta_ind'].unique()))
for i, b in enumerate(df_mcmc['beta_ind'].unique()):
log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean()
# Set of betas (temperatures)
betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,)))
# Approximate quadrature
return np.dot(log_mean, -np.diff(betas))
def extract_1d_hist(samples, nbins=100, density=True):
"""
Compute a 1d histogram with x-values at bin centers.
Meant to be used with MCMC samples.
Parameters
----------
samples : array
1D array of MCMC samples
nbins : int
Number of bins in histogram
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Returns
-------
count : array, shape (nbins,)
The counts, appropriately weighted depending on the
`density` kwarg, for the histogram.
x : array, shape (nbins,)
The positions of the bin centers.
"""
# Obtain histogram
count, bins = np.histogram(trace, bins=nbins, density=density)
# Make the bins into the bin centers, not the edges
x = (bins[:-1] + bins[1:]) / 2.0
return count, x
def extract_2d_hist(samples_x, samples_y, bins=100, density=True,
meshgrid=False):
"""
Compute a 2d histogram with x,y-values at bin centers.
Meant to be used with MCMC samples.
Parameters
----------
samples_x : array
1D array of MCMC samples for x-axis
samples_y : array
1D array of MCMC samples for y-axis
bins : int
Number of bins in histogram. The same binning is
used in the x and y directions.
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
meshgrid : bool, options
If True, the returned `x` and `y` arrays are two-dimensional
as constructed with np.meshgrid(). If False, `x` and `y`
are returned as 1D arrays.
Returns
-------
count : array, shape (nbins, nbins)
The counts, appropriately weighted depending on the
`density` kwarg, for the histogram.
x : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the x-axis.
y : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the y-axis.
"""
# Obtain histogram
count, x_bins, y_bins = np.histogram2d(samples_x, samples_y, bins=bins,
normed=density)
# Make the bins into the bin centers, not the edges
x = (x_bins[:-1] + x_bins[1:]) / 2.0
y = (y_bins[:-1] + y_bins[1:]) / 2.0
# Make mesh grid out of x_bins and y_bins
if meshgrid:
y, x = np.meshgrid(x, y)
return count.transpose(), x, y
def norm_cumsum_2d(sample_x, sample_y, bins=100, meshgrid=False):
"""
Returns 1 - the normalized cumulative sum of two sets of samples.
Parameters
----------
samples_x : array
1D array of MCMC samples for x-axis
samples_y : array
1D array of MCMC samples for y-axis
bins : int
Number of bins in histogram. The same binning is
used in the x and y directions.
meshgrid : bool, options
If True, the returned `x` and `y` arrays are two-dimensional
as constructed with np.meshgrid(). If False, `x` and `y`
are returned as 1D arrays.
Returns
-------
norm_cumcum : array, shape (nbins, nbins)
1 - the normalized cumulative sum of two sets of samples.
I.e., an isocontour on this surface at level alpha encompasses
a fraction alpha of the total probability.
x : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the x-axis.
y : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the y-axis.
Notes
-----
.. To make a contour plot with contour lines drawn to contain
68.27, 95.45, and 99.73% of the total probability, use the
output of this function as:
plt.contourf(x, y, norm_cumsum, levels=(0.6827, 0.9545, 0.9973))
"""
# Compute the histogram
count, x, y = extract_2d_hist(sample_x, sample_y, bins=bins,
density=False, meshgrid=meshgrid)
# Remember the shape
shape = count.shape
count = count.ravel()
# Inverse sort the histogram
isort = np.argsort(count)[::-1]
unsort = np.argsort(isort)
# Compute the cumulative sum and normalize
count_cumsum = count[isort].cumsum()
count_cumsum /= count_cumsum[-1]
# Normalized, reshaped cumulative sum
return count_cumsum[unsort].reshape(shape), x, y
def hpd(trace, mass_frac):
"""
Returns highest probability density region given by
a set of samples.
Parameters
----------
trace : array
1D array of MCMC samples for a single variable
mass_frac : float with 0 < mass_frac <= 1
The fraction of the probability to be included in
the HPD. For example, `massfrac` = 0.95 gives a
95% HPD.
Returns
-------
output : array, shape (2,)
The bounds of the HPD
"""
# Get sorted list
d = np.sort(np.copy(trace))
# Number of total samples taken
n = len(trace)
# Get number of samples that should be included in HPD
n_samples = np.floor(mass_frac * n).astype(int)
# Get width (in units of data) of all intervals with n_samples samples
int_width = d[n_samples:] - d[:n - n_samples]
# Pick out minimal interval
min_int = np.argmin(int_width)
# Return interval
return np.array([d[min_int], d[min_int + n_samples]])
# ########################################################################## #
# IMAGE PROCESSING UTILITIES #
# ########################################################################## #
class SimpleImageCollection(object):
"""
Load a collection of images.
Parameters
----------
load_pattern : string or list
If string, uses glob to generate list of files containing
images. If list, this is the list of files containing images.
load_func : callable, default skimage.io.imread
Function to be called to load images.
conserve_memory : bool, default True
If True, do not load all images into RAM. If False, load
all into a list.
Returns
-------
ic : SimpleImageCollection instance
ic[n] gives image n of the image collection.
Notes
-----
.. Any keyword arguments except those listed above are passed into
load_func as kwargs.
.. This is a much simplified (and therefore faster) version of
skimage.io.ImageCollection.
"""
def __init__(self, load_pattern, load_func=skimage.io.imread,
conserve_memory=True, **load_func_kwargs):
if isinstance(load_pattern, str):
self.fnames = glob.glob(load_pattern)
else:
self.fnames = load_pattern
self.conserve_memory = conserve_memory
if self.conserve_memory:
self.load_func = load_func
self.kwargs = load_func_kwargs
else:
self.ims = [load_func(f, **load_func_kwargs) for f in self.fnames]
def __getitem__(self, n):
"""
Return selected image.
"""
if self.conserve_memory:
return self.load_func(self.fnames[n], **self.load_func_kwargs)
else:
return self.ims[n]
def simple_image_collection(im_glob, load_func=skimage.io.imread,
conserve_memory=True, **load_func_kwargs):
"""
Load a collection of images.
Parameters
----------
load_pattern : string or list
If string, uses glob to generate list of files containing
images. If list, this is the list of files containing images.
load_func : callable, default skimage.io.imread
Function to be called to load images.
conserve_memory : bool, default True
If True, do not load all images into RAM. If False, load
all into a list.
Returns
-------
ic : SimpleImageCollection instance
ic[n] gives image n of the image collection.
Notes
-----
.. Any keyword arguments except those listed above are passed into
load_func as kwargs.
.. This is a much simplified (and therefore faster) version of
skimage.io.ImageCollection.
"""
return SimpleImageCollection(im_glob, load_func=load_func,
conserve_memory=conserve_memory,
**load_func_kwargs)
def rgb_to_rgba32(im, flip=True):
"""
Convert an RGB image to a 32 bit-encoded RGBA image.
Parameters
----------
im : nd_array, shape (m, n, 3)
Input m by n RGB image.
flip : bool, default True
If True, up-down flit the image. This is useful
for display with Bokeh.
Returns
-------
output : nd_array, shape (m, n), dtype int32
RGB image encoded as 32-bit integers.
Notes
-----
.. The input image is converted to 8-bit and then encoded
as 32-bit. The main use for this function is encoding images
for display with Bokeh, so this data loss is ok.
"""
# Ensure it has three channels
if len(im.shape) != 3 or im.shape[2] !=3:
raise RuntimeError('Input image is not RGB.')
# Get image shape
n, m, _ = im.shape
# Convert to 8-bit, which is expected for viewing
im_8 = skimage.img_as_ubyte(im)
# Add the alpha channel, which is expected by Bokeh
im_rgba = np.dstack((im_8, 255*np.ones_like(im_8[:,:,0])))
# Reshape into 32 bit. Must flip up/down for proper orientation
return np.flipud(im_rgba.view(dtype=np.int32).reshape(n, m))
def verts_to_roi(verts, size_i, size_j):
"""
Converts list of vertices to an ROI and ROI bounding box
Parameters
----------
verts : array_like, shape (n_verts, 2)
List of vertices of a polygon with no crossing lines. The units
describing the positions of the vertices are interpixel spacing.
size_i : int
Number of pixels in the i-direction (number of rows) in
the image
size_j : int
Number of pixels in the j-direction (number of columns) in
the image
Returns
-------
roi : array_like, Boolean, shape (size_i, size_j)
roi[i,j] is True if pixel (i,j) is in the ROI.
roi[i,j] is False otherwise
roi_bbox : tuple of slice objects
To get a subimage with the bounding box of the ROI, use
im[roi_bbox].
roi_box : array_like, shape is size of bounding box or ROI
A mask for the ROI with the same dimension as the bounding
box. The indexing starts at zero at the upper right corner
of the box.
"""
# Make list of all points in the image in units of pixels
i = np.arange(size_i)
j = np.arange(size_j)
ii, jj = np.meshgrid(j, i)
pts = np.array(list(zip(ii.ravel(), jj.ravel())))
# Make a path object from vertices
p = path.Path(verts)
# Get list of points that are in roi
in_roi = p.contains_points(pts)
# Convert it to an image
roi = in_roi.reshape((size_i, size_j)).astype(np.bool)
# Get bounding box of ROI
regions = skimage.measure.regionprops(roi.astype(np.int))
bbox = regions[0].bbox
roi_bbox = np.s_[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1]
# Get ROI mask for just within bounding box
roi_box = roi[roi_bbox]
# Return boolean in same shape as image
return (roi, roi_bbox, roi_box)
class CostesColocalization(object):
"""
Generic class just to store attributes
"""
def __init__(self, **kw):
self.__dict__ = kw
def costes_coloc(im_1, im_2, psf_width=3, n_scramble=1000, thresh_r=0.0,
roi=None, roi_method='all', do_manders=True):
"""
Perform Costes colocalization analysis on a pair of images.
Parameters
----------
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
psf_width: int, default 3
Width, in pixels of the point spread function.
n_scramble: int, default 1000
Number of strambled image comparisons to do to get statistics.
thresh_r: float, default 0.0
Threshold Pearson r value to be considered colocalized.
roi: array_like, dtype bool, default None
Boolean image the same shape as `im_1` and `im_2` that
is True for pixels within the ROI.
roi_method: str, default 'all'
If 'all', all pixels of a given subimage must be within
the ROI for the subimage itself to be considered part
of the ROI. If 'any', if any one pixel is within the ROI,
the subimage is considered part of the ROI.
do_manders: bool, default True
If True, compute the Manders coefficients.
Returns
-------
output: A CostesColocalization instance.
The CostesColocalization instance has the following attributes.
im_1, im_2, psf_width, n_scramble, thresh_r, roi,
roi_method: As in the input parameters.
a: slope of the regression line I_2 = a * I_1 + b
b: intercept of regression line I_2 = a * I_1 + b
M_1: Manders coefficient for image 1
M_2: Manders coefficient for image 2
pearson_r: Pearson coerrelaction coefficient of the pixels
in the two images.
p_coloc: The probability of colocalization being present
in the two images.
"""
# Make mirrored boundaries in preparation for scrambling
im_1_mirror = mirror_edges(im_1, psf_width)
im_2_mirror = mirror_edges(im_2, psf_width)
# Set up ROI
if roi is None:
roi = np.ones_like(im_1, dtype='bool')
# Rename images to be sliced ROI
im_1 = im_1[roi]
im_2 = im_2[roi]
# Mirror ROI at edges
roi_mirror = mirror_edges(roi, psf_width)
# Compute the blocks that we'll scramble
blocks_1 = im_to_blocks(im_1_mirror, psf_width, roi_mirror, roi_method)
blocks_2 = im_to_blocks(im_2_mirror, psf_width, roi_mirror, roi_method)
# Flatten second list of blocks for Pearson calculations
blocks_2_flat = np.array(blocks_2).flatten()
# Compute the Pearson coefficient
pearson_r, _ = st.pearsonr(np.array(blocks_1).ravel(), blocks_2_flat)
# Do image scrambling and r calculations
r_scr = np.empty(n_scramble)
for i in range(n_scramble):
random.shuffle(blocks_1)
r, _ = scipy.stats.pearsonr(np.array(blocks_1).ravel(), blocks_2_flat)
r_scr[i] = r
# Compute percent chance of coloc
p_coloc = (r_scr < pearson_r).sum() / n_scramble
# Now do work to compute adjusted Manders's coefficients
if do_manders:
# Get the linear relationship between im_2 and im_1
a, b = odr_linear(im_1.ravel(), im_2.ravel())
# Perform threshold calculation
thresh_1 = find_thresh(im_1, im_2, a, b, thresh_r=thresh_r)
thresh_2 = a * thresh_1 + b
# Compute Costes's update to the Manders's coefficients
inds = (im_1 > thresh_1) & (im_2 > thresh_2)
M_1 = im_1[inds].sum() / im_1.sum()
M_2 = im_2[inds].sum() / im_2.sum()
# Toss results into class for returning
return CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=thresh_r,
thresh_1=thresh_1, thresh_2=thresh_2, a=a, b=b, M_1=M_1,
M_2=M_2, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
else:
return CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=None,
thresh_1=None, thresh_2=None, a=None, b=None, M_1=None,
M_2=None, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
def odr_linear(x, y, intercept=None, beta0=None):
"""
Performs orthogonal linear regression on x, y data.
Parameters
----------
x: array_like
x-data, 1D array. Must be the same lengths as `y`.
y: array_like
y-data, 1D array. Must be the same lengths as `x`.
intercept: float, default None
If not None, fixes the intercept.
beta0: array_like, shape (2,)
Guess at the slope and intercept, respectively.
Returns
-------
output: ndarray, shape (2,)
Array containing slope and intercept of ODR line.
"""
def linear_fun(p, x):
return p[0] * x + p[1]
def linear_fun_fixed(p, x):
return p[0] * x + intercept
# Set the model to be used for the ODR fitting
if intercept is None:
model = scipy.odr.Model(linear_fun)
if beta0 is None:
beta0 = (0.0, 1.0)
else:
model = scipy.odr.Model(linear_fun_fixed)
if beta0 is None:
beta0 = (1.0,)
# Make a Data instance
data = scipy.odr.Data(x, y)
# Instantiate ODR
odr = scipy.odr.ODR(data, model, beta0=beta0)
# Perform ODR fit
try:
result = odr.run()
except scipy.odr.odr_error:
raise scipy.odr.odr_error('ORD failed.')
return result.beta
def find_thresh(im_1, im_2, a, b, thresh_r=0.0):
"""
Find the threshold pixel intensity of `im_1` where
the Pearson correlation between the images goes below `thresh_r`.
Parameters
----------
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
a: float
Slope of the ORD regression of `im_2` vs. `im_1`.
b: float
Intercept of the ORD regression of `im_2` vs. `im_1`.
thresh_r: float, default 0.0
Threshold Pearson correlation
Returns
-------
output: int or float
The threshold pixel intensity for colocalization
(see notes below).
Notes
-----
.. To determine which pixels are colocalized in two images, we
do the following:
1. Perform a regression based on all points of to give
I_2 = a * I_1 + b.
2. Define T = I_1.max().
3. Compute the Pearson r value considering all pixels with
I_1 < T and I_2 < a * T + b.
4. If r <= thresh_r decrement T and goto 3. Otherwise,
save $T_1 = T$ and $T_2 = a * T + b.
5. Pixels with I_2 > T_2 and I_1 > T_1 are colocalized.
This function returns T.
"""
if im_1.dtype not in [np.uint16, np.uint8]:
incr = (im_1.max() - im_1.min()) / 256.0
else:
incr = 1
thresh_max = im_1.max()
thresh_min = im_1.min()
thresh = thresh_max
r = pearsonr_below_thresh(thresh, im_1, im_2, a, b)
min_r = r
min_thresh = thresh
while thresh > thresh_min and r > thresh_r:
thresh -= incr
r = pearsonr_below_thresh(thresh, im_1, im_2, a, b)
if min_r > r:
min_r = r
min_thresh = thresh
if thresh == thresh_min:
thresh = min_thresh
return thresh
def pearsonr_below_thresh(thresh, im_1, im_2, a, b):
"""
The Pearson r between two images for pixel values below
threshold.
Parameters
----------
thresh: float or int
The threshold value of pixel intensities to consider for
`im_1`.
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
a: float
Slope of the ORD regression of `im_2` vs. `im_1`.
b: float
Intercept of the ORD regression of `im_2` vs. `im_1`.
"""
inds = (im_1 <= thresh) | (im_2 <= a * thresh + b)
r, _ = st.pearsonr(im_1[inds], im_2[inds])
return r
def mirror_edges(im, psf_width):
"""
Given a 2D image pads the boundaries by mirroring so that the
dimensions of the image are multiples for the width of the
point spread function.
Parameters
----------
im: array_like
Image to mirror edges
psf_width: int
The width, in pixels, of the point spread function
Returns
-------
output: array_like
Image with mirrored edges
"""
# How much we need to pad
pad_i = psf_width - (im.shape[0] % psf_width)
pad_j = psf_width - (im.shape[1] % psf_width)
# Get widths
pad_top = pad_i // 2
pad_bottom = pad_i - pad_top
pad_left = pad_j // 2
pad_right = pad_j - pad_left
# Do the padding
return np.pad(im, ((pad_top, pad_bottom), (pad_left, pad_right)),
mode='reflect')
def im_to_blocks(im, width, roi=None, roi_method='all'):
"""
Converts image to list of square subimages called "blocks."
Parameters
----------
im: array_like
Image to convert to a list of blocks.
width: int
Width of square blocks in units of pixels.
roi: array_like, dtype bool, default None
Boolean image the same shape as `im_1` and `im_2` that
is True for pixels within the ROI.
roi_method: str, default 'all'
If 'all', all pixels of a given subimage must be within
the ROI for the subimage itself to be considered part
of the ROI. If 'any', if any one pixel is within the ROI,
the subimage is considered part of the ROI.
Returns
-------
output: list of ndarrays
Each entry is a `width` by `width` NumPy array containing
a block.
"""
# Initialize ROI
if roi is None:
roi = np.ones_like(im)
# Specify method for determining if in ROI or not
if roi_method == 'all':
roi_test = np.all
else:
roi_test = np.any
# Construct list of blocks
return [im[i:i + width, j:j + width]
for i in range(0, im.shape[0], width)
for j in range(0, im.shape[1], width)
if roi_test(roi[i:i + width, j:j + width])]
# ########################################################################## #
# GENERAL UTILITIES #
# ########################################################################## #
def ecdf(data, conventional=False, buff=0.1, min_x=None, max_x=None):
"""
Computes the x and y values for an ECDF of a one-dimensional
data set.
Parameters
----------
data : array_like
Array of data to be plotted as an ECDF.
conventional : bool, default False
If True, generates x,y values for "conventional" ECDF, which
give staircase style ECDF when plotted as plt.plot(x, y, '-').
Otherwise, gives points x,y corresponding to the concave
corners of the conventional ECDF, plotted as
plt.plot(x, y, '.').
buff : float, default 0.1
How long the tails at y = 0 and y = 1 should extend as a
fraction of the total range of the data. Ignored if
`coneventional` is False.
min_x : float, default -np.inf
If min_x is greater than extent computed from `buff`, tail at
y = 0 extends to min_x. Ignored if `coneventional` is False.
max_x : float, default -np.inf
If max_x is less than extent computed from `buff`, tail at
y = 0 extends to max_x. Ignored if `coneventional` is False.
Returns
-------
x : array_like, shape (n_data, )
The x-values for plotting the ECDF.
y : array_like, shape (n_data, )
The y-values for plotting the ECDF.
"""
# Get x and y values for data points
x, y = np.sort(data), np.arange(1, len(data)+1) / len(data)
if conventional:
# Set defaults for min and max tails
if min_x is None:
min_x = -np.inf
if max_x is None:
max_x = np.inf
# Set up output arrays
x_conv = np.empty(2*(len(x) + 1))
y_conv = np.empty(2*(len(x) + 1))
# y-values for steps
y_conv[:2] = 0
y_conv[2::2] = y
y_conv[3::2] = y
# x- values for steps
x_conv[0] = max(min_x, x[0] - (x[-1] - x[0])*buff)
x_conv[1] = x[0]
x_conv[2::2] = x
x_conv[3:-1:2] = x[1:]
x_conv[-1] = min(max_x, x[-1] + (x[-1] - x[0])*buff)
return x_conv, y_conv
return x, y
def approx_hess(x, f, epsilon=None, args=(), kwargs={}):
"""
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x, `*args`, `**kwargs`)
epsilon : float or array-like, optional
Stepsize used, if None, then stepsize is automatically chosen
according to EPS**(1/4)*x.
args : tuple
Arguments for function `f`.
kwargs : dict
Keyword arguments for function `f`.
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
Equation (9) in Ridout. Computes the Hessian as::
1/(4*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j]
- d[k]*e[k])) -
(f(x - d[j]*e[j] + d[k]*e[k]) - f(x - d[j]*e[j]
- d[k]*e[k]))
where e[j] is a vector with element j == 1 and the rest are zero and
d[i] is epsilon[i].
References
----------:
Ridout, M.S. (2009) Statistical applications of the complex-step method
of numerical differentiation. The American Statistician, 63, 66-74
Copyright
---------
This is an adaptation of the function approx_hess3() in
statsmodels.tools.numdiff. That code is BSD (3 clause) licensed as
follows:
Copyright (C) 2006, Jonathan E. Taylor
All rights reserved.
Copyright (c) 2006-2008 Scipy Developers.
All rights reserved.
Copyright (c) 2009-2012 Statsmodels Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Statsmodels nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL STATSMODELS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
n = len(x)
h = smnd._get_epsilon(x, 4, epsilon, n)
ee = np.diag(h)
hess = np.outer(h,h)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x + ee[i, :] - ee[j, :],) + args), **kwargs)
- (f(*((x - ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs))
)/(4.*hess[i, j])
hess[j, i] = hess[i, j]
return hess
|
justinbois/bebi103_utils
|
legacy/bebi103.py
|
Python
|
mit
| 61,231
|
import numpy, numpy, os, csv, logging
from math import log
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
path = os.path.dirname(__file__)
def precision(expected, found):
tp = 0 # true positives
fp = 0 # false positives
for doc in found:
if doc in expected:
tp += 1
else:
fp += 1
p = tp / (tp + fp) # Total of relevant docs retrieved (true positives) / total retrieved by the search engine
return p
def recall(expected, found):
tp = 0
for doc in found:
if doc in expected:
tp += 1
rec = tp / len(expected) # Total of relevant docs retrieved (true positives) / total relevant docs
return rec
def F1(precision, recall):
return 2 * ((precision * recall) / (precision + recall))
def average(array):
if len(array) == 0:
return 0
return sum(array) / len(array)
class result:
def __init__(self, position, docs):
self.position = position
self.docs = docs
def compare(expected, search_result, stemmer = False):
docs_expected = []
docs_found = []
for e in expected:
arr = []
for doc in e.docs:
arr.append(doc[0])
docs_expected.append(arr)
for sr in search_result:
arr = []
for doc in sr.docs:
similarity = float(doc[2])
# Considering that the search engine retrieves only docs with a similirity > 0
if(similarity > 0):
arr.append(doc[1])
docs_found.append(arr)
number_of_queries = len(docs_found)
# Arrays
precision_10_arr = []
precision_arr = []
recall_arr = []
recall_10_arr = []
f1_arr = []
dcg_arr = []
ndcg_arr = []
# Arrays to store 11 points for Precision Recall chart
chart_p1 = []
chart_p2 = []
chart_p3 = []
chart_p4 = []
chart_p5 = []
chart_p6 = []
chart_p7 = []
chart_p8 = []
chart_p9 = []
chart_p10 = []
chart_p11 = []
# For each query let's calculate the metrics...
for i in range(0, number_of_queries):
precision_10 = precision(docs_expected[i], docs_found[i][:10])
precision_10_arr.append(precision_10)
p = precision(docs_expected[i], docs_found[i])
precision_arr.append(p)
recall_10 = recall(docs_expected[i], docs_found[i][:10])
recall_10_arr.append(recall_10)
rec = recall(docs_expected[i], docs_found[i])
recall_arr.append(rec)
f1 = F1(p, rec)
f1_arr.append(f1)
if rec > 0 and rec < 0.05:
chart_p1.append(p)
if rec > 0.05 and rec < 0.15:
chart_p2.append(p)
if rec > 0.15 and rec < 0.25:
chart_p3.append(p)
if rec > 0.25 and rec < 0.35:
chart_p4.append(p)
if rec > 0.35 and rec < 0.45:
chart_p5.append(p)
if rec > 0.45 and rec < 0.55:
chart_p6.append(p)
if rec > 0.55 and rec < 0.65:
chart_p7.append(p)
if rec > 0.65 and rec < 0.75:
chart_p8.append(p)
if rec > 0.75 and rec < 0.85:
chart_p9.append(p)
if rec > 0.85 and rec < 0.95:
chart_p10.append(p)
if rec > 0.95:
chart_p11.append(p)
# Calculating averages...
chart_p1 = average(chart_p1)
chart_p2 = average(chart_p2)
chart_p3 = average(chart_p3)
chart_p4 = average(chart_p4)
chart_p5 = average(chart_p5)
chart_p6 = average(chart_p6)
chart_p7 = average(chart_p7)
chart_p8 = average(chart_p8)
chart_p9 = average(chart_p9)
chart_p10 = average(chart_p10)
chart_p11 = average(chart_p11)
# Report.txt text file
text_file = open(os.path.join(path, "../output", "report.txt"), "a" if stemmer else "w")
text_file.write("With Potter Stemmer:\n" if stemmer else "Without Potter Stemmer:\n")
text_file.write("Precision@10: " + str(average(precision_10_arr)) + "\n")
text_file.write("MAP: " + str(average(precision_arr)) + "\n")
text_file.write("F1: " + str(average(f1_arr)) + "\n\n")
text_file.close()
csv_name = ("11points_with_stemmer" if stemmer else "11points_without_stemmer") + ".csv"
with open(os.path.join(path, "../output", csv_name), 'w', newline = '') as _11points_CSV:
writer = csv.writer(_11points_CSV, delimiter = ';', quotechar = '|', quoting = csv.QUOTE_MINIMAL)
writer.writerow([0 , chart_p1])
writer.writerow([0.1, chart_p2])
writer.writerow([0.2, chart_p3])
writer.writerow([0.3, chart_p4])
writer.writerow([0.4, chart_p5])
writer.writerow([0.5, chart_p6])
writer.writerow([0.6, chart_p7])
writer.writerow([0.7, chart_p8])
writer.writerow([0.8, chart_p9])
writer.writerow([0.9, chart_p10])
writer.writerow([1.0, chart_p11])
def main():
# Configuring Logger
logger = logging.getLogger('Exercise 2')
logger.setLevel(logging.DEBUG)
# Create handlers
file_handler = logging.FileHandler(os.path.join(path, '../log/results_comparer.log'), mode = 'w')
file_handler.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# Create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# Add the handlers to the Logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.info("Results Comparer started...")
expected = []
results_without_stemmer = []
results_with_stemmer = []
# Reading expected and results CSV files
with open(os.path.join(path, "../output/expected.csv")) as expected_CSV:
reader = csv.reader(expected_CSV, delimiter = ';', quotechar = '|')
for row in reader:
expected.append(result(row[0], eval(row[1])))
logger.info("Read expected.csv file")
with open(os.path.join(path, "../output/results_without_stemmer.csv")) as results_CSV:
reader = csv.reader(results_CSV, delimiter = ';', quotechar = '|')
for row in reader:
results_without_stemmer.append(result(row[0], eval(row[1])))
logger.info("Read results_without_stemmer.csv file")
with open(os.path.join(path, "../output/results_with_stemmer.csv")) as results_CSV:
reader = csv.reader(results_CSV, delimiter = ';', quotechar = '|')
for row in reader:
results_with_stemmer.append(result(row[0], eval(row[1])))
logger.info("Read results_with_stemmer.csv file")
compare(expected, results_without_stemmer)
compare(expected, results_with_stemmer, True)
logger.info("Results Comparer finished")
main()
|
leniel/InformationSearchRetrieval
|
Exercise 2/modules/results_comparer.py
|
Python
|
mit
| 7,121
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pk15.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
rolandgeider/pk15-orakel
|
manage.py
|
Python
|
agpl-3.0
| 247
|
import amo
import amo.tests
import waffle
from users.models import UserProfile
from mkt.purchase.utils import payments_enabled
from mkt.site.fixtures import fixture
from test_utils import RequestFactory
class TestUtils(amo.tests.TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.req = RequestFactory().get('/')
def test_settings(self):
with self.settings(PAYMENT_LIMITED=False):
assert payments_enabled(self.req)
def test_not_flag(self):
with self.settings(PAYMENT_LIMITED=True):
assert not payments_enabled(self.req)
def test_flag(self):
profile = UserProfile.objects.get(pk=2519)
flag = waffle.models.Flag.objects.create(name='override-app-payments')
flag.everyone = None
flag.users.add(profile.user)
flag.save()
self.req.user = profile.user
with self.settings(PAYMENT_LIMITED=True):
assert payments_enabled(self.req)
|
Joergen/zamboni
|
mkt/purchase/tests/test_utils_.py
|
Python
|
bsd-3-clause
| 983
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"This is some interesting educational program"
# wxRays (C) 2012 Serhii Lysovenko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function, unicode_literals
import __builtin__
import os
from addons import Addons
import locale
osp = os.path
VERSION = '0.1.1'
def install_gt():
try:
from gettext import install
LOCALEDIR = osp.join(osp.dirname(__file__), 'locale')
if osp.isdir(LOCALEDIR):
install('wxRays', LOCALEDIR, True)
else:
install('wxRays', unicode=True)
except ImportError:
__builtin__.__dict__["_"] = unicode
class Settings:
def __init__(self):
import ConfigParser
self.__config = ConfigParser.RawConfigParser()
if os.name == 'posix':
aphom = osp.expanduser("~/.config")
if osp.isdir(aphom):
self.__app_home = aphom + "/wxRays"
else:
self.__app_home = osp.expanduser("~/.wxRays")
elif name == 'nt':
if osp.isdir(expanduser("~/Application Data")):
self.__app_home = osp.expanduser("~/Application Data/wxRays")
else:
self.__app_home = osp.expanduser("~/wxRays")
else:
self.__app_home = osp.normpath(osp.expanduser("~/wxRays"))
if osp.isfile(self.__app_home):
os.remove(self.__app_home)
if not osp.isdir(self.__app_home):
os.mkdir(self.__app_home, 0755)
self.__config.read(osp.join(self.__app_home, "wxRays.cfg"))
def declare_section(self, section):
if not self.__config.has_section(section):
self.__config.add_section(section)
def get(self, name, default, section='DEFAULT'):
if not self.__config.has_option(section, name):
return default
deft = type(default)
try:
if deft == float:
return self.__config.getfloat(section, name)
if deft == int:
return self.__config.getint(section, name)
if deft == bool:
return self.__config.getboolean(section, name)
except ValueError:
return default
return self.__config.get(section, name)
def set(self, name, val, section='DEFAULT'):
# compensation of some stuppidness in ConfigParser with boolean
# processing
if type(val) == bool:
val = str(val)
self.__config.set(section, name, val)
def get_home(self, name=''):
if name:
return osp.join(self.__app_home, name)
return self.__app_home
def save(self):
fobj = open(self.get_home("wxRays.cfg"), "w")
self.__config.write(fobj)
fobj.close()
def prog_init():
__builtin__.__dict__['APP_SETT'] = Settings()
__builtin__.__dict__['PROG_NAME'] = u"wxRays"
APP_SETT.addons = Addons()
APP_SETT.addons.set_active()
locale.setlocale(locale.LC_NUMERIC, "")
install_gt()
|
Lysovenko/wxRays
|
settings.py
|
Python
|
gpl-3.0
| 3,701
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**ui_constants.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines **sIBL_GUI** package ui constants through the :class:`UiConstants` class.
**Others:**
"""
from __future__ import unicode_literals
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["UiConstants"]
class UiConstants():
"""
Defines **sIBL_GUI** package ui constants.
"""
ui_file = "sIBL_GUI.ui"
"""
:param ui_file: Application ui file.
:type ui_file: unicode
"""
windows_stylesheet_file = "styles/Windows_styleSheet.qss"
"""
:param windows_stylesheet_file: Application Windows Os stylesheet file.
:type windows_stylesheet_file: unicode
"""
darwin_stylesheet_file = "styles/Darwin_styleSheet.qss"
"""
:param darwin_stylesheet_file: Application Mac Os X Os stylesheet file.
:type darwin_stylesheet_file: unicode
"""
linux_stylesheet_file = "styles/Linux_styleSheet.qss"
"""
:param linux_stylesheet_file: Application Linux Os stylesheet file.
:type linux_stylesheet_file: unicode
"""
windows_style = "plastique"
"""
:param windows_style: Application Windows Os style.
:type windows_style: unicode
"""
darwin_style = "plastique"
"""
:param darwin_style: Application Mac Os X Os style.
:type darwin_style: unicode
"""
linux_style = "plastique"
"""
:param linux_style: Application Linux Os style.
:type linux_style: unicode
"""
settings_file = "preferences/Default_Settings.rc"
"""
:param settings_file: Application defaults settings file.
:type settings_file: unicode
"""
layouts_file = "layouts/Default_Layouts.rc"
"""
:param layouts_file: Application defaults layouts file.
:type layouts_file: unicode
"""
application_windows_icon = "images/Icon_Light.png"
"""
:param application_windows_icon: Application icon file.
:type application_windows_icon: unicode
"""
splash_screen_image = "images/sIBL_GUI_SpashScreen.png"
"""
:param splash_screen_image: Application splashscreen image.
:type splash_screen_image: unicode
"""
logo_image = "images/sIBL_GUI_Logo.png"
"""
:param logo_image: Application logo image.
:type logo_image: unicode
"""
default_toolbar_icon_size = 32
"""
:param default_toolbar_icon_size: Application toolbar icons size.
:type default_toolbar_icon_size: int
"""
central_widget_icon = "images/Central_Widget.png"
"""
:param central_widget_icon: Application **Central Widget** icon.
:type central_widget_icon: unicode
"""
central_widget_hover_icon = "images/Central_Widget_Hover.png"
"""
:param central_widget_hover_icon: Application **Central Widget** hover icon.
:type central_widget_hover_icon: unicode
"""
central_widget_active_icon = "images/Central_Widget_Active.png"
"""
:param central_widget_active_icon: Application **Central Widget** active icon.
:type central_widget_active_icon: unicode
"""
custom_layouts_icon = "images/Custom_Layouts.png"
"""
:param custom_layouts_icon: Application **Custom Layouts** icon.
:type custom_layouts_icon: unicode
"""
custom_layouts_hover_icon = "images/Custom_Layouts_Hover.png"
"""
:param custom_layouts_hover_icon: Application **Custom Layouts** hover icon.
:type custom_layouts_hover_icon: unicode
"""
custom_layouts_active_icon = "images/Custom_Layouts_Active.png"
"""
:param custom_layouts_active_icon: Application **Custom Layouts** active icon.
:type custom_layouts_active_icon: unicode
"""
miscellaneous_icon = "images/Miscellaneous.png"
"""
:param miscellaneous_icon: Application **Miscellaneous** icon.
:type miscellaneous_icon: unicode
"""
miscellaneous_hover_icon = "images/Miscellaneous_Hover.png"
"""
:param miscellaneous_hover_icon: Application **Miscellaneous** hover icon.
:type miscellaneous_hover_icon: unicode
"""
miscellaneous_active_icon = "images/Miscellaneous_Active.png"
"""
:param miscellaneous_active_icon: Application **Miscellaneous** active icon.
:type miscellaneous_active_icon: unicode
"""
library_icon = "images/Library.png"
"""
:param library_icon: Application **Library** icon.
:type library_icon: unicode
"""
library_hover_icon = "images/Library_Hover.png"
"""
:param library_hover_icon: Application **Library** hover icon.
:type library_hover_icon: unicode
"""
library_active_icon = "images/Library_Active.png"
"""
:param library_active_icon: Application **Library** active icon.
:type library_active_icon: unicode
"""
inspect_icon = "images/Inspect.png"
"""
:param inspect_icon: Application **Inspect** icon.
:type inspect_icon: unicode
"""
inspect_hover_icon = "images/Inspect_Hover.png"
"""
:param inspect_hover_icon: Application **Inspect** hover icon.
:type inspect_hover_icon: unicode
"""
inspect_active_icon = "images/Inspect_Active.png"
"""
:param inspect_active_icon: Application **Inspect** active icon.
:type inspect_active_icon: unicode
"""
export_icon = "images/Export.png"
"""
:param export_icon: Application **Export** icon.
:type export_icon: unicode
"""
export_hover_icon = "images/Export_Hover.png"
"""
:param export_hover_icon: Application **Export** hover icon.
:type export_hover_icon: unicode
"""
export_active_icon = "images/Export_Active.png"
"""
:param export_active_icon: Application **Export** active icon.
:type export_active_icon: unicode
"""
edit_icon = "images/Edit.png"
"""
:param edit_icon: Application **Edit** icon.
:type edit_icon: unicode
"""
edit_hover_icon = "images/Edit_Hover.png"
"""
:param edit_hover_icon: Application **Edit** hover icon.
:type edit_hover_icon: unicode
"""
edit_active_icon = "images/Edit_Active.png"
"""
:param edit_active_icon: Application **Edit** active icon.
:type edit_active_icon: unicode
"""
preferences_icon = "images/Preferences.png"
"""
:param preferences_icon: Application **Preferences** icon.
:type preferences_icon: unicode
"""
preferences_hover_icon = "images/Preferences_Hover.png"
"""
:param preferences_hover_icon: Application **Preferences** hover icon.
:type preferences_hover_icon: unicode
"""
preferences_active_icon = "images/Preferences_Active.png"
"""
:param preferences_active_icon: Application **Preferences** active icon.
:type preferences_active_icon: unicode
"""
format_error_image = "images/Thumbnail_Format_Not_Supported_Yet.png"
"""
:param format_error_image: Application format error image thumbnail.
:type format_error_image: unicode
"""
missing_image = "images/Thumbnail_Not_Found.png"
"""
:param missing_image: Application missing image thumbnail.
:type missing_image: unicode
"""
loading_image = "images/Loading.png"
"""
:param loading_image: Application loading image thumbnail.
:type loading_image: unicode
"""
startup_layout = "startup_centric"
"""
:param startup_layout: Application startup layout.
:type startup_layout: unicode
"""
development_layout = "edit_centric"
"""
:param development_layout: Application development layout.
:type development_layout: unicode
"""
help_file = "http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Documentation/Help/index.html"
"""Application online help file:
'**http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Documentation/Help/index.html**' ( String )"""
api_file = "http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Documentation/Api/index.html"
"""Application online api file:
'**http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Documentation/Api/index.html**' ( String )"""
make_donation_file = "http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Donations/Make_A_Donation.html"
"""Application online donation file:
'**http://kelsolaar.hdrlabs.com/sIBL_GUI/Support/Donations/Make_A_Donation.html**' ( String )"""
native_image_formats = {"Bmp": "\.bmp$",
"Jpeg": "\.jpeg$",
"Jpg": "\.jpg$",
"Png": "\.png$"}
"""
:param native_image_formats: Application native image file formats.
:type native_image_formats: dict
"""
third_party_image_formats = {"Exr": ("\.exr$"),
"Hdr": ("\.hdr$"),
"Tif": ("\.tif$"),
"Tiff": ("\.tiff$"),
"Tga": ("\.tga$")}
"""
:param third_party_image_formats: Application third party image file formats.
:type third_party_image_formats: dict
"""
thumbnails_sizes = {"Default": None,
"XLarge": 512,
"Large": 256,
"Medium": 128,
"Small": 64,
"XSmall": 32,
"Special1": 600}
"""
:param thumbnails_sizes: Application thumbnails sizes.
:type thumbnails_sizes: dict
"""
thumbnails_cache_directory = "thumbnails"
"""Thumbnails cache directory."""
crittercism_id = "5075c158d5f9b9796b000002"
"""
:param crittercism_id: Crittercism Id.
:type crittercism_id: unicode
"""
|
KelSolaar/sIBL_GUI
|
sibl_gui/globals/ui_constants.py
|
Python
|
gpl-3.0
| 9,825
|
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getAgentSection
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
class JobManifest( object ):
def __init__( self, manifest = "" ):
self.__manifest = CFG()
self.__dirty = False
self.__ops = False
if manifest:
result = self.load( manifest )
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
def clone( self ):
manifest = JobManifest()
manifest.__manifest = self.__manifest.clone()
manifest.__dirty = self.__dirty
manifest.__ops = self.__ops
return manifest
def isDirty( self ):
return self.__dirty
def setDirty( self ):
self.__dirty = True
def clearDirty( self ):
self.__dirty = False
def load( self, dataString ):
"""
Auto discover format type based on [ .. ] of JDL
"""
dataString = dataString.strip()
if dataString[0] == "[" and dataString[-1] == "]":
return self.loadJDL( dataString )
else:
return self.loadCFG( dataString )
def loadJDL( self, jdlString ):
"""
Load job manifest from JDL format
"""
result = loadJDLAsCFG( jdlString.strip() )
if not result[ 'OK' ]:
self.__manifest = CFG()
return result
self.__manifest = result[ 'Value' ][0]
return S_OK()
def loadCFG( self, cfgString ):
"""
Load job manifest from CFG format
"""
try:
self.__manifest.loadFromBuffer( cfgString )
except Exception, e:
return S_ERROR( "Can't load manifest from cfg: %s" % str( e ) )
return S_OK()
def dumpAsCFG( self ):
return str( self.__manifest )
def getAsCFG( self ):
return self.__manifest.clone()
def dumpAsJDL( self ):
return dumpCFGAsJDL( self.__manifest )
def __getCSValue( self, varName, defaultVal = None ):
if not self.__ops:
self.__ops = Operations( group = self.__manifest[ 'OwnerGroup' ], setup = self.__manifest[ 'DIRACSetup' ] )
if varName[0] != "/":
varName = "JobDescription/%s" % varName
return self.__ops.getValue( varName, defaultVal )
def __checkNumericalVar( self, varName, defaultVal, minVal, maxVal ):
"""
Check a numerical var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue( "Default%s" % varName , defaultVal )
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
try:
varValue = long( varValue )
except:
return S_ERROR( "%s must be a number" % varName )
minVal = self.__getCSValue( "Min%s" % varName, minVal )
maxVal = self.__getCSValue( "Max%s" % varName, maxVal )
varValue = max( minVal, min( varValue, maxVal ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkChoiceVar( self, varName, defaultVal, choices ):
"""
Check a choice var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue( "Default%s" % varName , defaultVal )
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
if varValue not in self.__getCSValue( "Choices%s" % varName , choices ):
return S_ERROR( "%s is not a valid value for %s" % ( varValue, varName ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkMultiChoice( self, varName, choices ):
"""
Check a multi choice var
"""
initialVal = False
if varName not in self.__manifest:
return S_OK()
else:
varValue = self.__manifest[ varName ]
initialVal = varValue
choices = self.__getCSValue( "Choices%s" % varName , choices )
for v in List.fromChar( varValue ):
if v not in choices:
return S_ERROR( "%s is not a valid value for %s" % ( v, varName ) )
if initialVal != varValue:
self.__manifest.setOption( varName, varValue )
return S_OK( varValue )
def __checkMaxInputData( self, maxNumber ):
"""
Check Maximum Number of Input Data files allowed
"""
initialVal = False
varName = "InputData"
if varName not in self.__manifest:
return S_OK()
varValue = self.__manifest[ varName ]
if len( List.fromChar( varValue ) ) > maxNumber:
return S_ERROR( 'Number of Input Data Files (%s) greater than current limit: %s' % ( len( List.fromChar( varValue ) ) , maxNumber ) )
return S_OK()
def __contains__( self, key ):
""" Check if the manifest has the required key
"""
return key in self.__manifest
def setOptionsFromDict( self, varDict ):
for k in sorted( varDict ):
self.setOption( k, varDict[ k ] )
def check( self ):
"""
Check that the manifest is OK
"""
for k in [ 'OwnerName', 'OwnerDN', 'OwnerGroup', 'DIRACSetup' ]:
if k not in self.__manifest:
return S_ERROR( "Missing var %s in manifest" % k )
#Check CPUTime
result = self.__checkNumericalVar( "CPUTime", 86400, 100, 500000 )
if not result[ 'OK' ]:
return result
result = self.__checkNumericalVar( "Priority", 1, 0, 10 )
if not result[ 'OK' ]:
return result
allowedSubmitPools = []
for option in [ "DefaultSubmitPools", "SubmitPools", "AllowedSubmitPools" ]:
allowedSubmitPools += gConfig.getValue( "%s/%s" % ( getAgentSection( "WorkloadManagement/TaskQueueDirector" ),
option ),
[] )
result = self.__checkMultiChoice( "SubmitPools", list( set( allowedSubmitPools ) ) )
if not result[ 'OK' ]:
return result
result = self.__checkMultiChoice( "PilotTypes", [ 'private' ] )
if not result[ 'OK' ]:
return result
maxInputData = Operations().getValue( "JobDescription/MaxInputData", 500 )
result = self.__checkMaxInputData( maxInputData )
if not result[ 'OK' ]:
return result
transformationTypes = Operations().getValue( "Transformations/DataProcessing", [] )
result = self.__checkMultiChoice( "JobType", ['User', 'Test', 'Hospital'] + transformationTypes )
if not result[ 'OK' ]:
return result
return S_OK()
def createSection( self, secName, contents = False ):
if secName not in self.__manifest:
if contents and not isinstance( contents, CFG ):
return S_ERROR( "Contents for section %s is not a cfg object" % secName )
self.__dirty = True
return S_OK( self.__manifest.createNewSection( secName, contents = contents ) )
return S_ERROR( "Section %s already exists" % secName )
def getSection( self, secName ):
self.__dirty = True
sec = self.__manifest[ secName ]
if not sec:
return S_ERROR( "%s does not exist" )
return S_OK( sec )
def setSectionContents( self, secName, contents ):
if contents and not isinstance( contents, CFG ):
return S_ERROR( "Contents for section %s is not a cfg object" % secName )
self.__dirty = True
if secName in self.__manifest:
self.__manifest[ secName ].reset()
self.__manifest[ secName ].mergeWith( contents )
else:
self.__manifest.createNewSection( secName, contents = contents )
def setOption( self, varName, varValue ):
"""
Set a var in job manifest
"""
self.__dirty = True
levels = List.fromChar( varName, "/" )
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
cfg.createNewSection( l )
cfg = cfg[ l ]
cfg.setOption( levels[-1], varValue )
def remove( self, opName ):
levels = List.fromChar( opName, "/" )
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
return S_ERROR( "%s does not exist" % opName )
cfg = cfg[ l ]
if cfg.deleteKey( levels[ -1 ] ):
self.__dirty = True
return S_OK()
return S_ERROR( "%s does not exist" % opName )
def getOption( self, varName, defaultValue = None ):
"""
Get a variable from the job manifest
"""
cfg = self.__manifest
return cfg.getOption( varName, defaultValue )
def getOptionList( self, section = "" ):
"""
Get a list of variables in a section of the job manifest
"""
cfg = self.__manifest.getRecursive( section )
if not cfg or 'value' not in cfg:
return []
cfg = cfg[ 'value' ]
return cfg.listOptions()
def isOption( self, opName ):
"""
Check if it is a valid option
"""
return self.__manifest.isOption( opName )
def getSectionList( self, section = "" ):
"""
Get a list of sections in the job manifest
"""
cfg = self.__manifest.getRecursive( section )
if not cfg or 'value' not in cfg:
return []
cfg = cfg[ 'value' ]
return cfg.listSections()
def expand( self ):
"""
Expand all options into themselves
"""
self.__manifest.expand()
|
miloszz/DIRAC
|
WorkloadManagementSystem/Client/JobState/JobManifest.py
|
Python
|
gpl-3.0
| 9,078
|
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import re
import sys
try:
from math import gcd
except ImportError:
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread, range
import heapq
from ._common import weekday as weekdaybase
from .tz import tzutc, tzlocal
# For warning about deprecation of until and count
from warnings import warn
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if self._dtstart and self._until:
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError(
'RRULE UNTIL values must be specified in UTC when DTSTART '
'is timezone-aware'
)
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = ()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = ()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = {dtstart.hour}
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = {dtstart.minute}
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC5545, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC5545-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append('RRULE:' + ';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True }
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported EXDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but
# may be found only once.
value_found = False
TZID = None
valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"}
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = TZID_NAMES[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, ' +
'mapping, or None, ' +
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
if parm not in valid_values:
raise ValueError("unsupported DTSTART parm: "+parm)
else:
if value_found:
msg = ("Duplicate value parameter found in " +
"DTSTART: " + parm)
raise ValueError(msg)
value_found = True
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
if TZID is not None:
if dtstart.tzinfo is None:
dtstart = dtstart.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART specifies multiple timezones')
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
|
ledtvavs/repository.ledtv
|
script.tvguide.Vader/resources/lib/dateutil/rrule.py
|
Python
|
gpl-3.0
| 64,642
|
#!/usr/bin/env python
'''@package docstring
Just a giant list of processes and properties
'''
processes = {
'TTbarDMJets_pseudoscalar_Mchi-1_Mphi-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('TTbarDM','MC',1),
'thq':('thq','MC',0.7927),
'thw':('thw','MC',0.147),
'GluGlu_HToInvisible_M125_13TeV_powheg_pythia8':('ggFHinv_m125','MC',48.6),
'VBF_HToInvisible_M125_13TeV_powheg_pythia8':('vbfHinv_m125','MC',3.78),
'VBF_HToInvisible_M110_13TeV_powheg_pythia8':('vbfHinv_m110','MC',4.34),
'VBF_HToInvisible_M150_13TeV_powheg_pythia8':('vbfHinv_m150','MC',3.239),
'VBF_HToInvisible_M200_13TeV_powheg_pythia8':('vbfHinv_m200','MC',2.282),
'VBF_HToInvisible_M300_13TeV_powheg_pythia8':('vbfHinv_m300','MC',1.256),
'VBF_HToInvisible_M500_13TeV_powheg_pythia8':('vbfHinv_m500','MC',0.4872),
'VBF_HToInvisible_M600_13TeV_powheg_pythia8':('vbfHinv_m600','MC',0.3274),
'VBF_HToInvisible_M800_13TeV_powheg_pythia8':('vbfHinv_m800','MC',0.1622),
'VBF_HToInvisible_M1000_13TeV_powheg_pythia8':('vbfHinv_m1000','MC',0.08732),
'GluGlu_HToInvisible_M110_13TeV_powheg_pythia8':('ggFHinv_m110','MC',57.90),
'GluGlu_HToInvisible_M150_13TeV_powheg_pythia8':('ggFHinv_m150','MC',31.29),
'GluGlu_HToInvisible_M200_13TeV_powheg_pythia8':('ggFHinv_m200','MC',16.94),
'GluGlu_HToInvisible_M300_13TeV_powheg_pythia8':('ggFHinv_m300','MC',6.590),
'GluGlu_HToInvisible_M500_13TeV_powheg_pythia8':('ggFHinv_m500','MC',1.709),
'GluGlu_HToInvisible_M600_13TeV_powheg_pythia8':('ggFHinv_m600','MC',1.001),
'GluGlu_HToInvisible_M800_13TeV_powheg_pythia8':('ggFHinv_m800','MC',0.4015),
'GluGlu_HToInvisible_M1000_13TeV_powheg_pythia8':('ggFHinv_m1000','MC',0.1845),
'ZprimeToTTJet_M-500_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-500','MC',1),
'ZprimeToTTJet_M-750_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-750','MC',1),
'ZprimeToTTJet_M-1000_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-1000','MC',1),
'ZprimeToTTJet_M-1250_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-1250','MC',1),
'ZprimeToTTJet_M-1500_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-1500','MC',1),
'ZprimeToTTJet_M-2000_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-2000','MC',1),
'ZprimeToTTJet_M-2500_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-2500','MC',1),
'ZprimeToTTJet_M-3000_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-3000','MC',1),
'ZprimeToTTJet_M-3500_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-3500','MC',1),
'ZprimeToTTJet_M-4000_TuneCUETP8M1_13TeV-amcatnlo-pythia8':('ZpTT_med-4000','MC',1),
'ZprimeToTT_M-1000_W-100_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1000_w-100','MC',1),
'ZprimeToTT_M-1000_W-10_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1000_w-10','MC',1),
'ZprimeToTT_M-1000_W-300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1000_w-300','MC',1),
'ZprimeToTT_M-1250_W-125_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1250_w-125','MC',1),
'ZprimeToTT_M-1250_W-12p5_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1250_w-12p5','MC',1),
'ZprimeToTT_M-1500_W-150_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1500_w-150','MC',1),
'ZprimeToTT_M-1500_W-15_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-1500_w-15','MC',1),
'ZprimeToTT_M-2000_W-200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-2000_w-200','MC',1),
'ZprimeToTT_M-2000_W-20_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-2000_w-20','MC',1),
'ZprimeToTT_M-2000_W-600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-2000_w-600','MC',1),
'ZprimeToTT_M-2500_W-250_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-2500_w-250','MC',1),
'ZprimeToTT_M-2500_W-25_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-2500_w-25','MC',1),
'ZprimeToTT_M-3000_W-300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-3000_w-300','MC',1),
'ZprimeToTT_M-3000_W-30_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-3000_w-30','MC',1),
'ZprimeToTT_M-3500_W-350_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-3500_w-350','MC',1),
'ZprimeToTT_M-3500_W-35_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-3500_w-35','MC',1),
'ZprimeToTT_M-4000_W-1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-4000_w-1200','MC',1),
'ZprimeToTT_M-4000_W-400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-4000_w-400','MC',1),
'ZprimeToTT_M-4000_W-40_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-4000_w-40','MC',1),
'ZprimeToTT_M-4500_W-450_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-4500_w-450','MC',1),
'ZprimeToTT_M-4500_W-45_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-4500_w-45','MC',1),
'ZprimeToTT_M-5000_W-1500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-5000_w-1500','MC',1),
'ZprimeToTT_M-5000_W-500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-5000_w-500','MC',1),
'ZprimeToTT_M-5000_W-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-5000_w-50','MC',1),
'ZprimeToTT_M-500_W-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-500_w-50','MC',1),
'ZprimeToTT_M-500_W-5_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-500_w-5','MC',1),
'ZprimeToTT_M-750_W-75_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-750_w-75','MC',1),
'ZprimeToTT_M-750_W-7p5_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':('ZpTT_lo_med-750_w-7p5','MC',1),
'thq':('thq','MC',0.7927),
'thw':('thw','MC',0.147),
'ZprimeToWW_narrow_M-800_13TeV-madgraph':('ZpWW_med-800','MC',1),
'ZprimeToWW_narrow_M-1000_13TeV-madgraph':('ZpWW_med-1000','MC',1),
'ZprimeToWW_narrow_M-1200_13TeV-madgraph':('ZpWW_med-1200','MC',1),
'ZprimeToWW_narrow_M-1400_13TeV-madgraph':('ZpWW_med-1400','MC',1),
'ZprimeToWW_narrow_M-1600_13TeV-madgraph':('ZpWW_med-1600','MC',1),
'ZprimeToWW_narrow_M-1800_13TeV-madgraph':('ZpWW_med-1800','MC',1),
'ZprimeToWW_narrow_M-2000_13TeV-madgraph':('ZpWW_med-2000','MC',1),
'ZprimeToWW_narrow_M-2500_13TeV-madgraph':('ZpWW_med-2500','MC',1),
'ST_tch_DM-scalar_LO-100_1-13_TeV':('ST_tch_DM-scalar_LO-100_1-13_TeV','MC',0.293*0.68),
'ST_tch_DM-scalar_LO-300_1-13_TeV':('ST_tch_DM-scalar_LO-300_1-13_TeV','MC',0.03202*0.68),
'ST_tch_DM-scalar_LO-500_1-13_TeV':('ST_tch_DM-scalar_LO-500_1-13_TeV','MC',0.004996*0.68),
'ST_tch_DM-scalar_LO-1000_1-13_TeV':('ST_tch_DM-scalar_LO-1000_1-13_TeV','MC',0.0003009*0.68),
'TT_DM-scalar_LO-300_1-13_TeV':('TT_DM-scalar_LO-300_1-13_TeV','MC',0.03045),
'TT_DM-scalar_LO-500_1-13_TeV':('TT_DM-scalar_LO-500_1-13_TeV','MC',0.004947),
'TT_DM-scalar_LO-1000_1-13_TeV':('TT_DM-scalar_LO-1000_1-13_TeV','MC',0.000736),
'MonoHbb_ZpBaryonic_MZp-10_MChi-1_13TeV-madgraph':('ZpBaryonic_med-10_dm-1','MC',1.4971719048020),
'MonoHbb_ZpBaryonic_MZp-10_MChi-10_13TeV-madgraph':('ZpBaryonic_med-10_dm-10','MC',0.0068827725015),
'MonoHbb_ZpBaryonic_MZp-10_MChi-50_13TeV-madgraph':('ZpBaryonic_med-10_dm-50','MC',0.0001182107976),
'MonoHbb_ZpBaryonic_MZp-10_MChi-150_13TeV-madgraph':('ZpBaryonic_med-10_dm-150','MC',0.0000102513973),
'MonoHbb_ZpBaryonic_MZp-10_MChi-500_13TeV-madgraph':('ZpBaryonic_med-10_dm-500','MC',0.0000000026006),
'MonoHbb_ZpBaryonic_MZp-10_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-10_dm-1000','MC',0.0000000000160),
'MonoHbb_ZpBaryonic_MZp-15_MChi-10_13TeV-madgraph':('ZpBaryonic_med-15_dm-10','MC',0.0241243264082),
'MonoHbb_ZpBaryonic_MZp-20_MChi-1_13TeV-madgraph':('ZpBaryonic_med-20_dm-1','MC',1.5708303614837),
'MonoHbb_ZpBaryonic_MZp-50_MChi-1_13TeV-madgraph':('ZpBaryonic_med-50_dm-1','MC',1.8787525219888),
'MonoHbb_ZpBaryonic_MZp-50_MChi-10_13TeV-madgraph':('ZpBaryonic_med-50_dm-10','MC',1.8686472610676),
'MonoHbb_ZpBaryonic_MZp-50_MChi-50_13TeV-madgraph':('ZpBaryonic_med-50_dm-50','MC',0.0041445173799),
'MonoHbb_ZpBaryonic_MZp-95_MChi-50_13TeV-madgraph':('ZpBaryonic_med-95_dm-50','MC',0.1070881411920),
'MonoHbb_ZpBaryonic_MZp-100_MChi-1_13TeV-madgraph':('ZpBaryonic_med-100_dm-1','MC',1.8350014490170),
'MonoHbb_ZpBaryonic_MZp-100_MChi-10_13TeV-madgraph':('ZpBaryonic_med-100_dm-10','MC',1.8311374853540),
'MonoHbb_ZpBaryonic_MZp-200_MChi-1_13TeV-madgraph':('ZpBaryonic_med-200_dm-1','MC',1.4723542212437),
'MonoHbb_ZpBaryonic_MZp-200_MChi-50_13TeV-madgraph':('ZpBaryonic_med-200_dm-50','MC',1.2073355514877),
'MonoHbb_ZpBaryonic_MZp-200_MChi-150_13TeV-madgraph':('ZpBaryonic_med-200_dm-150','MC',0.0018402642784),
'MonoHbb_ZpBaryonic_MZp-295_MChi-150_13TeV-madgraph':('ZpBaryonic_med-295_dm-150','MC',0.0772562835636),
'MonoHbb_ZpBaryonic_MZp-300_MChi-1_13TeV-madgraph':('ZpBaryonic_med-300_dm-1','MC',1.3081597528204),
'MonoHbb_ZpBaryonic_MZp-300_MChi-50_13TeV-madgraph':('ZpBaryonic_med-300_dm-50','MC',1.1234969709678),
'MonoHbb_ZpBaryonic_MZp-500_MChi-1_13TeV-madgraph':('ZpBaryonic_med-500_dm-1','MC',0.6310480800692),
'MonoHbb_ZpBaryonic_MZp-500_MChi-150_13TeV-madgraph':('ZpBaryonic_med-500_dm-150','MC',0.3922022706199),
'MonoHbb_ZpBaryonic_MZp-500_MChi-500_13TeV-madgraph':('ZpBaryonic_med-500_dm-500','MC',0.0000118690907),
'MonoHbb_ZpBaryonic_MZp-995_MChi-500_13TeV-madgraph':('ZpBaryonic_med-995_dm-500','MC',0.0000000466117),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-1000_dm-1','MC',0.1164211478301),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-150_13TeV-madgraph':('ZpBaryonic_med-1000_dm-150','MC',0.1130981455808),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-1000_dm-1000','MC',0.0000003182419),
'MonoHbb_ZpBaryonic_MZp-1995_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-1995_dm-1000','MC',0.0004210309298),
'MonoHbb_ZpBaryonic_MZp-2000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-2000_dm-1','MC',0.0080403926209),
'MonoHbb_ZpBaryonic_MZp-2000_MChi-500_13TeV-madgraph':('ZpBaryonic_med-2000_dm-500','MC',0.0073116588302),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-10000_dm-1','MC',0.0000000068756),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-10_13TeV-madgraph':('ZpBaryonic_med-10000_dm-10','MC',0.0000000068844),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-50_13TeV-madgraph':('ZpBaryonic_med-10000_dm-50','MC',0.0000000068310),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-150_13TeV-madgraph':('ZpBaryonic_med-10000_dm-150','MC',0.0000000066844),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-500_13TeV-madgraph':('ZpBaryonic_med-10000_dm-500','MC',0.0000000055185),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-10000_dm-1000','MC',0.0000000038070),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-600_13TeV-madgraph':('ZpA0h_med-800_dm-600','MC',0.010101*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-500_13TeV-madgraph':('ZpA0h_med-800_dm-500','MC',0.0367*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-400_13TeV-madgraph':('ZpA0h_med-800_dm-400','MC',0.091124*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-300_13TeV-madgraph':('ZpA0h_med-800_dm-300','MC',0.27765*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-600_MA0-400_13TeV-madgraph':('ZpA0h_med-600_dm-400','MC',0.063916*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-600_MA0-300_13TeV-madgraph':('ZpA0h_med-600_dm-300','MC',0.45127*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-800_13TeV-madgraph':('ZpA0h_med-2500_dm-800','MC',0.0007806*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-700_13TeV-madgraph':('ZpA0h_med-2500_dm-700','MC',0.0008699*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-600_13TeV-madgraph':('ZpA0h_med-2500_dm-600','MC',0.0009678*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-500_13TeV-madgraph':('ZpA0h_med-2500_dm-500','MC',0.0010859*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-400_13TeV-madgraph':('ZpA0h_med-2500_dm-400','MC',0.0013246*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-300_13TeV-madgraph':('ZpA0h_med-2500_dm-300','MC',0.0025458*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-800_13TeV-madgraph':('ZpA0h_med-2000_dm-800','MC',0.0020981*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-700_13TeV-madgraph':('ZpA0h_med-2000_dm-700','MC',0.0024651*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-600_13TeV-madgraph':('ZpA0h_med-2000_dm-600','MC',0.0028691*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-500_13TeV-madgraph':('ZpA0h_med-2000_dm-500','MC',0.003341*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-400_13TeV-madgraph':('ZpA0h_med-2000_dm-400','MC',0.0041934*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-300_13TeV-madgraph':('ZpA0h_med-2000_dm-300','MC',0.0082317*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-800_13TeV-madgraph':('ZpA0h_med-1700_dm-800','MC',0.0036537*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-700_13TeV-madgraph':('ZpA0h_med-1700_dm-700','MC',0.0045823*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-600_13TeV-madgraph':('ZpA0h_med-1700_dm-600','MC',0.0056149*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-500_13TeV-madgraph':('ZpA0h_med-1700_dm-500','MC',0.0068221*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-400_13TeV-madgraph':('ZpA0h_med-1700_dm-400','MC',0.0088466*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-300_13TeV-madgraph':('ZpA0h_med-1700_dm-300','MC',0.017786*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-800_13TeV-madgraph':('ZpA0h_med-1400_dm-800','MC',0.0055221*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-700_13TeV-madgraph':('ZpA0h_med-1400_dm-700','MC',0.0079659*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-600_13TeV-madgraph':('ZpA0h_med-1400_dm-600','MC',0.010822*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-500_13TeV-madgraph':('ZpA0h_med-1400_dm-500','MC',0.014225*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-400_13TeV-madgraph':('ZpA0h_med-1400_dm-400','MC',0.019609*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-300_13TeV-madgraph':('ZpA0h_med-1400_dm-300','MC',0.041208*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-800_13TeV-madgraph':('ZpA0h_med-1200_dm-800','MC',0.005655*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-700_13TeV-madgraph':('ZpA0h_med-1200_dm-700','MC',0.010028*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-600_13TeV-madgraph':('ZpA0h_med-1200_dm-600','MC',0.015763*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-500_13TeV-madgraph':('ZpA0h_med-1200_dm-500','MC',0.022935*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-400_13TeV-madgraph':('ZpA0h_med-1200_dm-400','MC',0.034039*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-300_13TeV-madgraph':('ZpA0h_med-1200_dm-300','MC',0.075451*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-800_13TeV-madgraph':('ZpA0h_med-1000_dm-800','MC',0.0027483*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-700_13TeV-madgraph':('ZpA0h_med-1000_dm-700','MC',0.0085776*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-600_13TeV-madgraph':('ZpA0h_med-1000_dm-600','MC',0.01897*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-500_13TeV-madgraph':('ZpA0h_med-1000_dm-500','MC',0.03431*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-400_13TeV-madgraph':('ZpA0h_med-1000_dm-400','MC',0.058824*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-300_13TeV-madgraph':('ZpA0h_med-1000_dm-300','MC',0.14383*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-1_13TeV-madgraph':('ZpBaryonic_med-10_dm-1','MC',2.594752001*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-10_13TeV-madgraph':('ZpBaryonic_med-10_dm-10','MC',0.0119285485*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-50_13TeV-madgraph':('ZpBaryonic_med-10_dm-50','MC',0.00020487139962*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-150_13TeV-madgraph':('ZpBaryonic_med-10_dm-150','MC',3.14857697501e-06*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-500_13TeV-madgraph':('ZpBaryonic_med-10_dm-500','MC',4.5071850285e-09*0.578),
'MonoHbb_ZpBaryonic_MZp-10_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-10_dm-1000','MC',2.7728742863e-11*0.578),
'MonoHbb_ZpBaryonic_MZp-15_MChi-10_13TeV-madgraph':('ZpBaryonic_med-15_dm-10','MC',0.041809924451*0.578),
'MonoHbb_ZpBaryonic_MZp-20_MChi-1_13TeV-madgraph':('ZpBaryonic_med-20_dm-1','MC',2.72240963862*0.578),
'MonoHbb_ZpBaryonic_MZp-50_MChi-1_13TeV-madgraph':('ZpBaryonic_med-50_dm-1','MC',3.25607022875*0.578),
'MonoHbb_ZpBaryonic_MZp-50_MChi-10_13TeV-madgraph':('ZpBaryonic_med-50_dm-10','MC',3.23855677828*0.578),
'MonoHbb_ZpBaryonic_MZp-50_MChi-50_13TeV-madgraph':('ZpBaryonic_med-50_dm-50','MC',0.00718287240877*0.578),
'MonoHbb_ZpBaryonic_MZp-95_MChi-50_13TeV-madgraph':('ZpBaryonic_med-95_dm-50','MC',0.185594698773*0.578),
'MonoHbb_ZpBaryonic_MZp-100_MChi-1_13TeV-madgraph':('ZpBaryonic_med-100_dm-1','MC',3.18024514561*0.578),
'MonoHbb_ZpBaryonic_MZp-100_MChi-10_13TeV-madgraph':('ZpBaryonic_med-100_dm-10','MC',3.17354850148*0.578),
'MonoHbb_ZpBaryonic_MZp-200_MChi-1_13TeV-madgraph':('ZpBaryonic_med-200_dm-1','MC',2.5517404181*0.578),
'MonoHbb_ZpBaryonic_MZp-200_MChi-50_13TeV-madgraph':('ZpBaryonic_med-200_dm-50','MC',2.09243596445*0.578),
'MonoHbb_ZpBaryonic_MZp-200_MChi-150_13TeV-madgraph':('ZpBaryonic_med-200_dm-150','MC',0.00318936616712*0.578),
'MonoHbb_ZpBaryonic_MZp-295_MChi-150_13TeV-madgraph':('ZpBaryonic_med-295_dm-150','MC',0.133893039105*0.578),
'MonoHbb_ZpBaryonic_MZp-300_MChi-1_13TeV-madgraph':('ZpBaryonic_med-300_dm-1','MC',2.26717461494*0.578),
'MonoHbb_ZpBaryonic_MZp-300_MChi-50_13TeV-madgraph':('ZpBaryonic_med-300_dm-50','MC',1.94713513166*0.578),
'MonoHbb_ZpBaryonic_MZp-500_MChi-1_13TeV-madgraph':('ZpBaryonic_med-500_dm-1','MC',1.09367084934*0.578),
'MonoHbb_ZpBaryonic_MZp-500_MChi-150_13TeV-madgraph':('ZpBaryonic_med-500_dm-150','MC',0.679726638856*0.578),
'MonoHbb_ZpBaryonic_MZp-500_MChi-500_13TeV-madgraph':('ZpBaryonic_med-500_dm-500','MC',2.05703477766e-05*0.578),
'MonoHbb_ZpBaryonic_MZp-995_MChi-500_13TeV-madgraph':('ZpBaryonic_med-995_dm-500','MC',0.0111215826866*0.578),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-1000_dm-1','MC',0.201769753605*0.578),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-150_13TeV-madgraph':('ZpBaryonic_med-1000_dm-150','MC',0.19601065092*0.578),
'MonoHbb_ZpBaryonic_MZp-1000_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-1000_dm-1000','MC',5.51545710471e-07*0.578),
'MonoHbb_ZpBaryonic_MZp-1995_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-1995_dm-1000','MC',0.000729689653045*0.578),
'MonoHbb_ZpBaryonic_MZp-2000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-2000_dm-1','MC',0.0139348225665*0.578),
'MonoHbb_ZpBaryonic_MZp-2000_MChi-500_13TeV-madgraph':('ZpBaryonic_med-2000_dm-500','MC',0.012671852392*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-1_13TeV-madgraph':('ZpBaryonic_med-10000_dm-1','MC',1.19161364309e-08*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-10_13TeV-madgraph':('ZpBaryonic_med-10000_dm-10','MC',1.19314052769e-08*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-50_13TeV-madgraph':('ZpBaryonic_med-10000_dm-50','MC',1.18388857948e-08*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-150_13TeV-madgraph':('ZpBaryonic_med-10000_dm-150','MC',1.15848042141e-08*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-500_13TeV-madgraph':('ZpBaryonic_med-10000_dm-500','MC',9.56417074962e-09*0.578),
'MonoHbb_ZpBaryonic_MZp-10000_MChi-1000_13TeV-madgraph':('ZpBaryonic_med-10000_dm-1000','MC',6.59793071766e-09*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2750_MA0-300_13TeV-madgraph':('ZpA0h_med-2750_dm-300','MC',0.0015511*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2750_MA0-500_13TeV-madgraph':('ZpA0h_med-2750_dm-500','MC',0.00067388*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2750_MA0-800_13TeV-madgraph':('ZpA0h_med-2750_dm-800','MC',0.00050351*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3000_MA0-300_13TeV-madgraph':('ZpA0h_med-3000_dm-300','MC',0.00092077*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3000_MA0-500_13TeV-madgraph':('ZpA0h_med-3000_dm-500','MC',0.0004022*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3000_MA0-800_13TeV-madgraph':('ZpA0h_med-3000_dm-800','MC',0.00030894*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3500_MA0-300_13TeV-madgraph':('ZpA0h_med-3500_dm-300','MC',0.00033718*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3500_MA0-500_13TeV-madgraph':('ZpA0h_med-3500_dm-500','MC',0.00014885*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-3500_MA0-800_13TeV-madgraph':('ZpA0h_med-3500_dm-800','MC',0.00011855*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-4000_MA0-300_13TeV-madgraph':('ZpA0h_med-4000_dm-300','MC',0.00012964*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-4000_MA0-500_13TeV-madgraph':('ZpA0h_med-4000_dm-500','MC',5.7149e-05*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-4000_MA0-800_13TeV-madgraph':('ZpA0h_med-4000_dm-800','MC',4.6312e-05*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-600_13TeV-madgraph':('ZpA0h_med-800_dm-600','MC',0.010350*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-500_13TeV-madgraph':('ZpA0h_med-800_dm-500','MC',0.037616*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-400_13TeV-madgraph':('ZpA0h_med-800_dm-400','MC',0.093376*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-800_MA0-300_13TeV-madgraph':('ZpA0h_med-800_dm-300','MC',0.2848*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-600_MA0-400_13TeV-madgraph':('ZpA0h_med-600_dm-400','MC',0.065307*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-600_MA0-300_13TeV-madgraph':('ZpA0h_med-600_dm-300','MC',0.46223*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-800_13TeV-madgraph':('ZpA0h_med-2500_dm-800','MC',0.00082449*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-700_13TeV-madgraph':('ZpA0h_med-2500_dm-700','MC',0.00091718*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-600_13TeV-madgraph':('ZpA0h_med-2500_dm-600','MC',0.0010187*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-500_13TeV-madgraph':('ZpA0h_med-2500_dm-500','MC',0.0011446*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-400_13TeV-madgraph':('ZpA0h_med-2500_dm-400','MC',0.0013922*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2500_MA0-300_13TeV-madgraph':('ZpA0h_med-2500_dm-300','MC',0.0026787*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-800_13TeV-madgraph':('ZpA0h_med-2000_dm-800','MC',0.0021931*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-700_13TeV-madgraph':('ZpA0h_med-2000_dm-700','MC',0.0025816*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-600_13TeV-madgraph':('ZpA0h_med-2000_dm-600','MC',0.0029991*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-500_13TeV-madgraph':('ZpA0h_med-2000_dm-500','MC',0.0034998*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-400_13TeV-madgraph':('ZpA0h_med-2000_dm-400','MC',0.0043899*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-2000_MA0-300_13TeV-madgraph':('ZpA0h_med-2000_dm-300','MC',0.0086008*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-800_13TeV-madgraph':('ZpA0h_med-1700_dm-800','MC',0.0038043*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-700_13TeV-madgraph':('ZpA0h_med-1700_dm-700','MC',0.0047758*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-600_13TeV-madgraph':('ZpA0h_med-1700_dm-600','MC',0.0058468*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-500_13TeV-madgraph':('ZpA0h_med-1700_dm-500','MC',0.0071071*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-400_13TeV-madgraph':('ZpA0h_med-1700_dm-400','MC',0.0092082*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1700_MA0-300_13TeV-madgraph':('ZpA0h_med-1700_dm-300','MC',0.018545*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-800_13TeV-madgraph':('ZpA0h_med-1400_dm-800','MC',0.0057179*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-700_13TeV-madgraph':('ZpA0h_med-1400_dm-700','MC',0.0082681*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-600_13TeV-madgraph':('ZpA0h_med-1400_dm-600','MC',0.011224*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-500_13TeV-madgraph':('ZpA0h_med-1400_dm-500','MC',0.014723*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-400_13TeV-madgraph':('ZpA0h_med-1400_dm-400','MC',0.020306*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1400_MA0-300_13TeV-madgraph':('ZpA0h_med-1400_dm-300','MC',0.042687*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-800_13TeV-madgraph':('ZpA0h_med-1200_dm-800','MC',0.0058353*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-700_13TeV-madgraph':('ZpA0h_med-1200_dm-700','MC',0.010364*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-600_13TeV-madgraph':('ZpA0h_med-1200_dm-600','MC',0.016254*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-500_13TeV-madgraph':('ZpA0h_med-1200_dm-500','MC',0.023706*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-400_13TeV-madgraph':('ZpA0h_med-1200_dm-400','MC',0.03524*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1200_MA0-300_13TeV-madgraph':('ZpA0h_med-1200_dm-300','MC',0.078164*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-800_13TeV-madgraph':('ZpA0h_med-1000_dm-800','MC',0.00283922*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-700_13TeV-madgraph':('ZpA0h_med-1000_dm-700','MC',0.00881814*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-600_13TeV-madgraph':('ZpA0h_med-1000_dm-600','MC',0.019542*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-500_13TeV-madgraph':('ZpA0h_med-1000_dm-500','MC',0.035347*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-400_13TeV-madgraph':('ZpA0h_med-1000_dm-400','MC',0.060531*0.578),
'ZprimeToA0hToA0chichihbb_2HDM_MZp-1000_MA0-300_13TeV-madgraph':('ZpA0h_med-1000_dm-300','MC',0.14771*0.578),
'TTbarDMJets_scalar_Mchi-10_Mphi-100' : ('TTbarDMJets_scalar_Mchi-10_Mphi-100','MC',1),
'TTbarDMJets_scalar_Mchi-10_Mphi-10' : ('TTbarDMJets_scalar_Mchi-10_Mphi-10','MC',1),
'TTbarDMJets_scalar_Mchi-10_Mphi-15' : ('TTbarDMJets_scalar_Mchi-10_Mphi-15','MC',1),
'TTbarDMJets_scalar_Mchi-10_Mphi-50' : ('TTbarDMJets_scalar_Mchi-10_Mphi-50','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-10000' : ('TTbarDMJets_scalar_Mchi-1_Mphi-10000','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-100' : ('TTbarDMJets_scalar_Mchi-1_Mphi-100','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-10' : ('TTbarDMJets_scalar_Mchi-1_Mphi-10','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-200' : ('TTbarDMJets_scalar_Mchi-1_Mphi-200','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-20' : ('TTbarDMJets_scalar_Mchi-1_Mphi-20','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-300' : ('TTbarDMJets_scalar_Mchi-1_Mphi-300','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-500' : ('TTbarDMJets_scalar_Mchi-1_Mphi-500','MC',1),
'TTbarDMJets_scalar_Mchi-1_Mphi-50' : ('TTbarDMJets_scalar_Mchi-1_Mphi-50','MC',1),
'TTbarDMJets_scalar_Mchi-50_Mphi-10' : ('TTbarDMJets_scalar_Mchi-50_Mphi-10','MC',1),
'TTbarDMJets_scalar_Mchi-50_Mphi-200' : ('TTbarDMJets_scalar_Mchi-50_Mphi-200','MC',1),
'TTbarDMJets_scalar_Mchi-50_Mphi-300' : ('TTbarDMJets_scalar_Mchi-50_Mphi-300','MC',1),
'TTbarDMJets_scalar_Mchi-50_Mphi-50' : ('TTbarDMJets_scalar_Mchi-50_Mphi-50','MC',1),
'TTbarDMJets_scalar_Mchi-50_Mphi-95' : ('TTbarDMJets_scalar_Mchi-50_Mphi-95','MC',1),
}
|
sidnarayanan/PandaCore
|
Tools/python/processes/BSM.py
|
Python
|
mit
| 27,294
|
#!/usr/bin/env blender
import sys
import os
import bpy
import addon_utils
def _main(args):
default, state = addon_utils.check("io_EDM")
if not state:
import io_EDM
io_EDM.register()
try:
myArgumentIndex = next(i for i, v in enumerate(sys.argv) if v == "--")
args = args[myArgumentIndex+1:]
filepath = args[0]
except StopIteration:
filepath = "test.edm"
print("Writing", filepath)
# Call the import operator
bpy.ops.export_mesh.edm(filepath=filepath)
if __name__ == "__main__":
if _main(sys.argv) == -1:
sys.exit()
|
ndevenish/Blender_ioEDM
|
utils/write.py
|
Python
|
mit
| 564
|
#!/usr/bin/env python3
import collections
import pathlib
root = pathlib.Path('../SWMH-BETA/SWMH')
globs = [
'common/**/*.*',
'events/*.*',
'history/**/*.*',
'interface/*.gui',
'localisation/*.*',
'map/*.csv',
'map/*.map',
'map/*.txt'
]
charlocs = collections.defaultdict(list)
for glob in globs:
for path in sorted(root.glob(glob)):
with path.open('rb') as f:
for i, line in enumerate(f):
for char in line:
if char in range(0x80, 0xA0):
charlocs[char].append((path.name, i))
for char, locs in sorted(charlocs.items()):
print('{:x}:'.format(char))
for loc in locs:
print('\tFile "{}", line {}'.format(*loc))
|
zijistark/ck2utils
|
esc/old/cp1252test.py
|
Python
|
gpl-2.0
| 743
|
'''
<Run reachability test on Internet2>
Copyright 2012, Stanford University. This file is licensed under GPL v2 plus
a special exception, as described in included LICENSE_EXCEPTION.txt.
Created on Mar 11, 2012
@author: James Hongyi Zeng
'''
from examples.load_internet2_backbone import *
from config_parser.juniper_parser import juniperRouter
from headerspace.hs import *
from headerspace.applications import *
from time import time
#from multiprocessing import Pool
ntf_global = []
ttf_global = []
dst_port_ids_global = []
def find_reachability_multiprocess(in_port, input_pkt):
paths = []
propagation = []
p_node = {}
p_node["hdr"] = input_pkt
p_node["port"] = in_port
p_node["visits"] = []
p_node["hs_history"] = []
propagation.append(p_node)
#loop_count = 0
while len(propagation)>0:
#get the next node in propagation graph and apply it to NTF and TTF
print "Propagation has length: %d"%len(propagation)
results = map(two_step, propagation)
tmp_propagate = []
for result in results:
(local_propagation, local_paths) = result
tmp_propagate.extend(local_propagation)
paths.extend(local_paths)
propagation = tmp_propagate
return paths
def two_step (p_node):
propagation = []
paths = []
next_hp = ntf_global.T(p_node["hdr"],p_node["port"])
for (next_h,next_ps) in next_hp:
for next_p in next_ps:
if next_p in dst_port_ids_global:
reached = {}
reached["hdr"] = next_h
reached["port"] = next_p
reached["visits"] = p_node["visits"]+[p_node["port"]]
#reached["visits"] = list(p_node["visits"])
#reached["visits"].append(p_node["port"])
reached["hs_history"] = list(p_node["hs_history"])
paths.append(reached)
else:
linked = ttf_global.T(next_h,next_p)
for (linked_h,linked_ports) in linked:
for linked_p in linked_ports:
new_p_node = {}
new_p_node["hdr"] = linked_h
new_p_node["port"] = linked_p
new_p_node["visits"] = p_node["visits"]+[p_node["port"]]
#new_p_node["visits"].append(p_node["port"])
#new_p_node["visits"].append(next_p)
new_p_node["hs_history"] = list(p_node["hs_history"])
new_p_node["hs_history"].append(p_node["hdr"])
if linked_p in dst_port_ids_global:
paths.append(new_p_node)
elif linked_p in new_p_node["visits"]:
#loop_count += 1
pass
#print "WARNING: detected a loop - branch aborted: \nHeaderSpace: %s\n Visited Ports: %s\nLast Port %d "%(\
# new_p_node["hdr"],new_p_node["visits"],new_p_node["port"])
else:
#tmp_propagate.append(new_p_node)
propagation.append(new_p_node)
return (propagation, paths)
def main():
global ntf_global
global ttf_global
global dst_port_ids_global
ntf_global = load_internet2_backbone_ntf()
ttf_global = load_internet2_backbone_ttf()
(port_map,port_reverse_map) = load_internet2_backbone_port_to_id_map()
cs = juniperRouter(1)
output_port_addition = cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST
all_x = byte_array_get_all_x(ntf_global.length)
#cs.set_field(all_x, "vlan", 32, 0)
#cs.set_field(all_x, "ip_dst", dotted_ip_to_int("64.57.28.243"), 8)
#cs.set_field(all_x, "ip_src", dotted_ip_to_int("172.27.76.233"), 0)
#cs.set_field(all_x, "tcp_dst", 22, 0)
#cs.set_field(all_x, "ip_proto", 6, 0)
test_pkt = headerspace(ntf_global.length)
test_pkt.add_hs(all_x)
src_port_id = port_map["atla"]["xe-0/1/1"]
dst_port_ids_global = [port_map["atla"]["xe-1/0/2"]+output_port_addition]
st = time()
paths = find_reachability_multiprocess(src_port_id,test_pkt)
#paths = find_reachability(ntf_global, ttf_global, src_port_id, dst_port_ids_global, test_pkt)
en = time()
print_loops(paths, port_reverse_map)
print len(paths)
#loops = detect_loop(ntf,ttf,loop_port_ids,port_reverse_map,None,output_port_addition)
#en = time()
#print_loops(loops, port_reverse_map)
#print len(loops)
print en-st
if __name__ == "__main__":
main()
|
Br1an6/ACS_Netplumber_Implementation
|
hsa-python/net_plumbing/examples/i2/run_reachability_internet2_bb.py
|
Python
|
gpl-2.0
| 4,737
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from jacket.compute import exception
from jacket.i18n import _LW
from jacket.objects import compute as objects
from jacket.objects.compute import fields
from jacket.compute.pci import stats
from jacket.compute.pci import whitelist
from jacket.compute.virt import hardware
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and tracks the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically. The devices
information is updated to DB when devices information is changed.
"""
def __init__(self, context, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.stats = stats.PciDeviceStats()
self.dev_filter = whitelist.Whitelist(CONF.pci_passthrough_whitelist)
self._context = context
if node_id:
self.pci_devs = objects.PciDeviceList.get_by_compute_node(
context, node_id)
else:
self.pci_devs = objects.PciDeviceList(objects=[])
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev.instance_uuid
if dev.status == fields.PciDeviceStatus.CLAIMED:
self.claims[uuid].append(dev)
elif dev.status == fields.PciDeviceStatus.ALLOCATED:
self.allocations[uuid].append(dev)
elif dev.status == fields.PciDeviceStatus.AVAILABLE:
self.stats.add_device(dev)
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
with dev.obj_alternate_context(context):
dev.save()
if dev.status == fields.PciDeviceStatus.DELETED:
self.pci_devs.objects.remove(dev)
@property
def pci_stats(self):
return self.stats
def update_devices_from_hypervisor_resources(self, devices_json):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
:param devices_json: The JSON-ified string of device information
that is returned from the virt driver's
get_available_resource() call in the
pci_passthrough_devices key.
"""
devices = []
for dev in jsonutils.loads(devices_json):
if self.dev_filter.device_assignable(dev):
devices.append(dev)
self._set_hvdevs(devices)
def _set_hvdevs(self, devices):
exist_addrs = set([dev.address for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed.address in exist_addrs - new_addrs:
try:
existed.remove()
except exception.PciDeviceInvalidStatus as e:
LOG.warning(_LW("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
"%(pci_exception)s"),
{'status': existed.status,
'instance_uuid': existed.instance_uuid,
'pci_exception': e.format_message()})
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = fields.PciDeviceStatus.REMOVED
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed.address))
new_value['compute_node_id'] = self.node_id
if existed.status in (fields.PciDeviceStatus.CLAIMED,
fields.PciDeviceStatus.ALLOCATED):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to an instance,
# we defer the change till the instance is destroyed.
# We will not sync the new properties with database
# before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[new_value['address']] = new_value
else:
existed.update_device(new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = objects.PciDevice.create(self._context, dev)
self.pci_devs.objects.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, context, instance, prefix=''):
pci_requests = objects.InstancePCIRequests.get_by_instance(
context, instance)
if not pci_requests.requests:
return None
instance_numa_topology = hardware.instance_topology_from_instance(
instance)
instance_cells = None
if instance_numa_topology:
instance_cells = instance_numa_topology.cells
devs = self.stats.consume_requests(pci_requests.requests,
instance_cells)
if not devs:
return None
for dev in devs:
dev.claim(instance)
if instance_numa_topology and any(
dev.numa_node is None for dev in devs):
LOG.warning(_LW("Assigning a pci device without numa affinity to"
"instance %(instance)s which has numa topology"),
{'instance': instance['uuid']})
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
dev.allocate(instance)
def allocate_instance(self, instance):
devs = self.claims.pop(instance['uuid'], [])
self._allocate_instance(instance, devs)
if devs:
self.allocations[instance['uuid']] += devs
def claim_instance(self, context, instance):
if not self.pci_devs:
return
devs = self._claim_instance(context, instance)
if devs:
self.claims[instance['uuid']] = devs
return devs
return None
def _free_device(self, dev, instance=None):
freed_devs = dev.free(instance)
stale = self.stale.pop(dev.address, None)
if stale:
dev.update_device(stale)
for dev in freed_devs:
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When an instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if dev.status in (fields.PciDeviceStatus.CLAIMED,
fields.PciDeviceStatus.ALLOCATED):
if dev.instance_uuid == instance['uuid']:
self._free_device(dev)
def free_instance(self, context, instance):
if self.allocations.pop(instance['uuid'], None):
self._free_instance(instance)
elif self.claims.pop(instance['uuid'], None):
self._free_instance(instance)
def update_pci_for_instance(self, context, instance, sign):
"""Update PCI usage information if devices are de/allocated.
"""
if not self.pci_devs:
return
if sign == -1:
self.free_instance(context, instance)
if sign == 1:
self.allocate_instance(instance)
def update_pci_for_migration(self, context, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
:param sign: claim devices for instance when sign is 1, remove
the claims when sign is -1
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(context, instance, 'new_')
if devs:
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:
self._free_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = set(inst['uuid'] for inst in instances)
existed |= set(mig['instance_uuid'] for mig in migrations)
existed |= set(inst['uuid'] for inst in orphans)
for uuid in self.claims.keys():
if uuid not in existed:
devs = self.claims.pop(uuid, [])
for dev in devs:
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
def get_instance_pci_devs(inst, request_id=None):
"""Get the devices allocated to one or all requests for an instance.
- For generic PCI request, the request id is None.
- For sr-iov networking, the request id is a valid uuid
- There are a couple of cases where all the PCI devices allocated to an
instance need to be returned. Refer to libvirt driver that handles
soft_reboot and hard_boot of 'xen' instances.
"""
pci_devices = inst.pci_devices
return [device for device in pci_devices if
device.request_id == request_id or request_id == 'all']
|
HybridF5/jacket
|
jacket/compute/pci/manager.py
|
Python
|
apache-2.0
| 12,313
|
from collections import Counter
from micall.utils.translation import translate, ambig_dict
AMINO_ALPHABET = 'ACDEFGHIKLMNPQRSTVWY*'
MAX_CUTOFF = 'MAX'
FIRST_CUTOFF = 'FIRST'
class SeedAmino(object):
"""
Records the frequencies of amino acids at a given position of the
aligned reads as determined by the consensus sequence.
"""
def __init__(self, consensus_nuc_index, counts=None):
self.v3_overlap = 0
self.consensus_nuc_index = consensus_nuc_index
self.all_consensus_nuc_indexes = set()
if consensus_nuc_index is not None:
self.all_consensus_nuc_indexes.add(consensus_nuc_index)
self.counts = counts or Counter() # {amino: count}
self.codon_counts = Counter() # {codon_nucs: count}
self.nucleotides = []
for i in range(3):
seed_nuc = SeedNucleotide()
if consensus_nuc_index is not None:
seed_nuc.consensus_index = consensus_nuc_index + i
self.nucleotides.append(seed_nuc)
self.low_quality = 0
self.partial = 0
self.deletions = 0
self.read_count = 0
self.ref_offset = 0
self.nucleotides_to_skip = 0
def __repr__(self):
if self.counts:
return 'SeedAmino({!r}, {!r})'.format(self.consensus_nuc_index,
dict(self.counts))
return 'SeedAmino({})'.format(self.consensus_nuc_index)
def count_aminos(self, codon_seq, count):
""" Record a set of reads at this position in the seed reference.
@param codon_seq: a string of three nucleotides that were read at this
position, may be padded with spaces at the start
or end of a sequence, or dashes for deletions
@param count: the number of times they were read
"""
self.read_count += count
self.codon_counts[codon_seq] += count
if 'N' in codon_seq:
self.low_quality += count
elif '---' == codon_seq:
self.deletions += count
elif '-' in codon_seq:
self.partial += count # Partial deletion
elif ' ' not in codon_seq and 'n' not in codon_seq and len(codon_seq) == 3:
amino = translate(codon_seq.upper())
self.counts[amino] += count
elif 'nnn' == codon_seq:
# Don't count the gap between forward and reverse reads in a pair.
self.read_count -= count
for i, nuc in enumerate(codon_seq):
if nuc != ' ':
seed_nucleotide = self.nucleotides[i]
seed_nucleotide.count_nucleotides(nuc, count)
def add(self, other: 'SeedAmino', start_nuc: int = 0, end_nuc: int = 2):
""" Add counts from another SeedAmino to this one.
:param other: source to copy from
:param start_nuc: first nucleotide index to copy: 0, 1, or 2.
:param end_nuc: last nucleotide index to copy: 0, 1, or 2.
"""
self.all_consensus_nuc_indexes.update(other.all_consensus_nuc_indexes)
if (self.read_count and other.read_count and
self.consensus_nuc_index != other.consensus_nuc_index):
self.consensus_nuc_index = None
elif other.read_count:
self.consensus_nuc_index = other.consensus_nuc_index
if 0 < start_nuc or end_nuc < 2:
prefix = ' ' * start_nuc
for nucs, count in other.codon_counts.items():
self.count_aminos(prefix + nucs[start_nuc:end_nuc+1], count)
if self.consensus_nuc_index == other.consensus_nuc_index:
for seed_nuc, other_nuc in zip(self.nucleotides,
other.nucleotides):
seed_nuc.consensus_index = other_nuc.consensus_index
else:
self.counts += other.counts
for nuc, other_nuc in zip(self.nucleotides, other.nucleotides):
nuc.add(other_nuc)
self.partial += other.partial
self.deletions += other.deletions
self.read_count += other.read_count
self.low_quality += other.low_quality
self.nucleotides_to_skip = other.nucleotides_to_skip
self.ref_offset = other.ref_offset
def get_report(self) -> str:
""" Build a report string with the counts of each amino acid.
Report how many times each amino acid was seen in count_aminos().
@return: comma-separated list of counts in the same order as the
AMINO_ALPHABET list
"""
return ','.join([str(self.counts[amino])
for amino in AMINO_ALPHABET])
def apply_repeat(self, repeated_nuc: int) -> 'SeedAmino':
new_amino = SeedAmino(self.consensus_nuc_index)
for codon, count in self.codon_counts.items():
new_codon = codon[:repeated_nuc + 1] + codon[repeated_nuc:2]
new_amino.count_aminos(new_codon, count)
return new_amino
def get_consensus(self) -> str:
""" Find the amino acid that was seen most often in count_aminos().
If there is a tie, just pick one of the tied amino acids.
@return: the letter of the most common amino acid
"""
consensus = self.counts.most_common(1)
if consensus:
return consensus[0][0]
if self.read_count:
return '?'
return '-'
def count_overlap(self, other):
for nuc1, nuc2 in zip(self.nucleotides, other.nucleotides):
nuc1.count_overlap(nuc2)
self.v3_overlap = max(self.v3_overlap, nuc1.v3_overlap)
class SeedNucleotide(object):
"""
Records the frequencies of nucleotides at a given position of the
aligned reads as determined by the consensus sequence.
"""
COUNTED_NUCS = 'ACTG-'
def __init__(self, counts=None):
self.v3_overlap = self.clip_count = self.insertion_count = 0
self.counts = counts or Counter()
self.consensus_index = None
def __repr__(self):
return 'SeedNucleotide({!r})'.format(dict(self.counts))
def count_nucleotides(self, nuc_seq, count=1):
""" Record a set of reads at this position in the seed reference.
@param nuc_seq: a single nucleotide letter that was read at this
position
@param count: the number of times it was read
"""
if nuc_seq == 'n':
"Represents gap between forward and reverse read, ignore."
else:
self.counts[nuc_seq] += count
def add(self, other):
total_count = sum(self.counts.values())
other_count = sum(other.counts.values())
if total_count and other_count:
self.consensus_index = None
elif other_count:
self.consensus_index = other.consensus_index
self.counts += other.counts
self.clip_count += other.clip_count
self.insertion_count += other.insertion_count
def get_report(self):
""" Build a report string with the counts of each nucleotide.
Report how many times each nucleotide was seen in count_nucleotides().
@return: comma-separated list of counts for A, C, G, and T.
"""
return ','.join(map(str, [self.counts[nuc] for nuc in 'ACGT']))
def get_coverage(self):
return sum(self.counts[nuc] for nuc in self.COUNTED_NUCS)
def get_consensus(self, mixture_cutoff, no_coverage='', discard_deletions=False):
""" Choose consensus nucleotide or mixture from the counts.
@param mixture_cutoff: the minimum fraction of reads
that a nucleotide must be found in for it to be considered,
or MAX_CUTOFF to consider only the most common nucleotide.
@param no_coverage: what to return when there are no reads mapped to
this position.
@param discard_deletions: whether to return nothing for the deletions (if False: '-')
@return: The letter for the consensus nucleotide or mixture.
Nucleotide mixtures are encoded by IUPAC symbols, and the most common
nucleotide can be a mixture if there is a tie.
"""
if not self.counts:
return no_coverage
coverage = self.get_coverage()
if mixture_cutoff not in (MAX_CUTOFF, FIRST_CUTOFF):
min_count = coverage * mixture_cutoff
else:
min_count = 0
mixture = []
for nuc, count in self.counts.most_common():
if count < min_count:
break
if nuc in self.COUNTED_NUCS:
mixture.append(nuc)
if mixture_cutoff in (MAX_CUTOFF, FIRST_CUTOFF):
# Catch any ties before breaking out.
min_count = count
has_deletion = '-' in mixture
if has_deletion:
mixture.remove('-')
if len(mixture) > 1:
mixture.sort()
if mixture_cutoff == FIRST_CUTOFF:
consensus = mixture[0]
else:
consensus = ambig_dict[''.join(mixture)]
elif len(mixture) == 1:
# no ambiguity
consensus = mixture[0]
else:
# Nothing left to go in the mixture.
consensus = '-' if has_deletion else 'N'
if has_deletion:
consensus = consensus.lower()
if consensus == '-' and discard_deletions:
consensus = ''
return consensus
def count_overlap(self, other):
for nuc in 'ACGT':
self.v3_overlap += other.counts[nuc]
class ReportNucleotide:
def __init__(self, position: int, seed_nucleotide: SeedNucleotide = None):
self.position = position
if seed_nucleotide is None:
self.seed_nucleotide = SeedNucleotide()
else:
self.seed_nucleotide = seed_nucleotide
def __repr__(self):
return f'ReportNucleotide({self.position!r}, {self.seed_nucleotide!r})'
class ReportAmino(object):
def __init__(self, seed_amino: SeedAmino, position: int):
""" Create a new instance.
@param seed_amino: Counts for the
"""
self.seed_amino = seed_amino
self.position = position
self.max_clip_count = 0
self.insertion_count = 0
def __repr__(self):
return 'ReportAmino({!r}, {})'.format(self.seed_amino, self.position)
|
cfe-lab/MiCall
|
micall/utils/report_amino.py
|
Python
|
agpl-3.0
| 10,418
|
#!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
import sys
import math
import time
import random
import numpy
from bzrc import BZRC, Command
from numpy import linspace
class Agent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.commands = []
self.num_ticks = 0
self.MAXTICKS = 100
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = [flag for flag in flags if flag.color != self.constants['team']]
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.obstacles = self.bzrc.get_obstacles()
self.commands = []
if self.num_ticks % self.MAXTICKS == 0:
for tank in mytanks:
# make sure the velocity is between 0.5 and 1
magnitude = random.random() * 0.5 + 0.5
relative_angle = 0.5
command = Command(tank.index, magnitude, 2 * relative_angle, False)
self.commands.append(command)
results = self.bzrc.do_commands(self.commands)
self.num_ticks = self.num_ticks + 1
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = Agent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
|
adamjchristiansen/CS470
|
bzagents/other_pigeons/wild_pigeon.py
|
Python
|
gpl-3.0
| 2,746
|
from functools import reduce as ft_reduce
from .errors import CraftAiError
from .formatters import format_decision_rules
from .operators import OPERATORS
def _is_is_reducer(rule_1, rule_2):
if rule_1["operand"] and (rule_1["operand"] != rule_2["operand"]):
raise CraftAiError(
"Operator '{}' can't have different value. Set to '{}' and receive '{}'".format(
OPERATORS["IS"], rule_1["operand"], rule_2["operand"]
)
)
return {
"property": rule_1["property"],
"operator": OPERATORS["IS"],
"operand": rule_2["operand"],
}
def _in_in_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2_from = rule_2["operand"][0]
op_2_to = rule_2["operand"][1]
op_1_is_cyclic = op_1_from > op_1_to
op_2_is_cyclic = op_2_from > op_2_to
op_2_from_in_op_1 = (
(op_2_from >= op_1_from or op_2_from <= op_1_to)
if op_1_is_cyclic
else (op_2_from >= op_1_from and op_2_from <= op_1_to)
)
op_2_to_in_op_1 = (
(op_2_to >= op_1_from or op_2_to <= op_1_to)
if op_1_is_cyclic
else (op_2_to >= op_1_from and op_2_to <= op_1_to)
)
op_1_from_in_op_2 = (
(op_1_from >= op_2_from or op_1_from <= op_2_to)
if op_2_is_cyclic
else (op_1_from >= op_2_from and op_1_from <= op_2_to)
)
op_1_to_in_op_2 = (
(op_1_to >= op_2_from or op_1_to <= op_2_to)
if op_2_is_cyclic
else (op_1_to >= op_2_from and op_1_to <= op_2_to)
)
if op_1_from_in_op_2 and op_1_to_in_op_2:
# op_1 belongs to op_2
# | op_1 |
# | op_2 |
return rule_1
if op_2_from_in_op_1 and op_2_to_in_op_1:
# op_2 belongs to op_1
# | op_1 |
# | op_2 |
return rule_2
if op_2_from_in_op_1 and op_1_to_in_op_2:
# overlap 1
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_2_from, op_1_to],
}
if op_2_to_in_op_1 and op_1_from_in_op_2:
# overlap 2
# | op_1 |
# | op_2 |
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_1_from, op_2_to],
}
# disjointed
# | op_1 |
# | op_2 |
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
def _in_gte_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2 = rule_2["operand"]
op_1_is_cyclic = op_1_from > op_1_to
if op_1_is_cyclic:
# Cyclics makes no sense with single bound limits
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_to:
# op_2 after op_1, disjointed
# | op_1 |
# |op_2
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_from and op_2 < op_1_to:
# op_2 belongs to op_1
# | op_1 |
# |op_2
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_2, op_1_to],
}
# op_2 before op_1
# | op_1 |
# |op_2
return rule_1
def _in_lt_reducer(rule_1, rule_2):
op_1_from = rule_1["operand"][0]
op_1_to = rule_1["operand"][1]
op_2 = rule_2["operand"]
op_1_is_cyclic = op_1_from > op_1_to
if op_1_is_cyclic:
# Cyclics makes no sense with single bound limits
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 < op_1_from:
# op_2 before op_1, disjointed
# | op_1 |
# op_2|
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
if op_2 >= op_1_from and op_2 < op_1_to:
# op_2 belongs to op_1
# | op_1 |
# |op_2
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [op_1_from, op_2],
}
# op_2 after op_1
# | op_1 |
# op_2|
return rule_1
def _gte_lt_reducer(rule_1, rule_2):
new_lower_bound = rule_1["operand"]
new_upper_bound = rule_2["operand"]
if new_upper_bound < new_lower_bound:
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""the resulting rule is not fulfillable.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
return {
"property": rule_1["property"],
"operator": OPERATORS["IN_INTERVAL"],
"operand": [new_lower_bound, new_upper_bound],
}
REDUCER_FROM_DECISION_RULE = {
OPERATORS["IS"]: {OPERATORS["IS"]: _is_is_reducer},
OPERATORS["IN_INTERVAL"]: {
OPERATORS["IN_INTERVAL"]: _in_in_reducer,
OPERATORS["GTE"]: _in_gte_reducer,
OPERATORS["LT"]: _in_lt_reducer,
},
OPERATORS["GTE"]: {
OPERATORS["IN_INTERVAL"]: lambda rule_1, rule_2: _in_gte_reducer(
rule_2, rule_1
),
OPERATORS["GTE"]: lambda rule_1, rule_2: {
"property": rule_1["property"],
"operator": OPERATORS["GTE"],
"operand": max(rule_1["operand"], rule_2["operand"]),
},
OPERATORS["LT"]: _gte_lt_reducer,
},
OPERATORS["LT"]: {
OPERATORS["IN_INTERVAL"]: lambda rule_1, rule_2: _in_lt_reducer(rule_2, rule_1),
OPERATORS["GTE"]: lambda rule_1, rule_2: _gte_lt_reducer(rule_2, rule_1),
OPERATORS["LT"]: lambda rule_1, rule_2: {
"property": rule_1["property"],
"operator": OPERATORS["LT"],
"operand": min(rule_1["operand"], rule_2["operand"]),
},
},
}
def _decision_rules_reducer(rule_1, rule_2):
if rule_1 is None or rule_2 is None:
return rule_1 if rule_1 is not None else rule_2
if (
rule_1["operator"] not in REDUCER_FROM_DECISION_RULE
or rule_2["operator"] not in REDUCER_FROM_DECISION_RULE[rule_1["operator"]]
):
raise CraftAiError(
"""Unable to reduce decision rules '{}' and '{}': """
"""incompatible operators.""".format(
format_decision_rules([rule_1]), format_decision_rules([rule_2])
)
)
return REDUCER_FROM_DECISION_RULE[rule_1["operator"]][rule_2["operator"]](
rule_1, rule_2
)
def _unique_seq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def reduce_decision_rules(rules):
properties = _unique_seq([rule["property"] for rule in rules])
return [
ft_reduce(
_decision_rules_reducer, [rule for rule in rules if rule["property"] == p]
)
for p in properties
]
|
craft-ai/craft-ai-client-python
|
craft_ai/reducer.py
|
Python
|
bsd-3-clause
| 8,085
|
#coding: utf-8
from scapy.all import *
class WILDCARD:
""" Used to indicate that some fields in a scapy packet should be ignored when comparing """
pass
class NO_PKT:
""" Indicate that a sent packet should have no reply """
pass
def pkt_match(expected, actual):
""" Check if all fields described in packet `expected` match the fields of pkt `actual`' """
if expected == NO_PKT and actual == NO_PKT:
return True
elif expected == NO_PKT or actual == NO_PKT:
return False
if expected.oif != WILDCARD and expected.oif != actual.oif:
# This can't be added to `fields` because it's not a proper scapy field
return False
fields = {
IPv6: ('src', 'dst'),
IPv6ExtHdrSegmentRouting: ('addresses', 'lastentry', 'segleft', 'tag',
'unused1', 'protected', 'oam', 'alert', 'hmac', 'unused2'), # Flags
IPv6ExtHdrSegmentRoutingTLVHMAC : ('hmac', 'keyid'),
IPv6ExtHdrSegmentRoutingTLVIngressNode : ('ingress_node',),
IPv6ExtHdrSegmentRoutingTLVEgressNode : ('egress_node',),
IPv6ExtHdrSegmentRoutingTLVOpaque : ('container',),
IPv6ExtHdrSegmentRoutingTLVPadding : ('len',),
IPv6ExtHdrSegmentRoutingTLVNSHCarrier : ('nsh_object',),
IPv6ExtHdrSegmentRoutingTLV : ('type', 'value'),
TCP: ('sport', 'dport'),
UDP: ('sport', 'dport'),
Raw: ('load',)
}
layer = 0
while 1:
sub_expected, sub_actual = expected.getlayer(layer), actual.getlayer(layer)
if sub_expected.__class__ != sub_actual.__class__:
return False
if sub_actual == None: # Compared all layers
return True
if sub_actual.__class__ not in fields: # Unknown layer ..
return False
for field in fields[sub_expected.__class__]:
# Don't care if field not set in expected packet
if getattr(sub_expected, field) != WILDCARD and \
getattr(sub_expected, field) != getattr(sub_actual, field):
return False
layer += 1
def pkt_str(pkt):
if pkt == NO_PKT:
return "none"
_ = lambda x: x if x != WILDCARD else "*"
def srh_str(srh):
from collections import OrderedDict
segs = list(srh.addresses)
if srh.segleft and srh.segleft < len(segs):
segs[srh.segleft] = "+"+segs[srh.segleft]
options = OrderedDict((('sl',srh.segleft), ('le',srh.lastentry)))
if srh.tag:
options['tag'] = srh.tag
flags = ""
fl_mapping = {'oam':'O', 'hmac':'H', 'alert':'A','protected':'P'} # TODO organiser selon draft
for key,val in fl_mapping.items():
if getattr(srh,key) == 1:
flags += val
if flags != "":
options['fl'] = flags
tlvs = []
for tlv in srh.tlv_objects:
if isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVHMAC):
tlvs.append('{{HMAC: {}, {}}}'.format(tlv.hmac.encode('hex'), tlv.keyid))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVPadding):
tlvs.append('{{Pad: {}}}'.format(tlv.len))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVIngressNode):
tlvs.append('{{Ingr: {}}}'.format(tlv.ingress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVEgressNode):
tlvs.append('{{Egr: {}}}'.format(tlv.egress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVOpaque):
tlvs.append('{{Opaq: {}}}'.format(tlv.container.encode('hex')))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVNSHCarrier):
tlvs.append('{{NSH: {}}}'.format(tlv.nsh_object.encode('hex')))
else:
tlvs.append('{{Type:{} Value:{}}}'.format(tlv.type, tlv.value.encode('hex')))
return "[{}] <{}>{}".format(",".join(segs), ",".join(map(lambda key: "{} {}".format(key, options[key]),options)), "" if not tlvs else " "+" ".join(tlvs))
def ip_str(ip):
return "{} -> {}".format(_(ip.src), _(ip.dst))
def udp_str(udp):
if udp.sport or udp.dport:
return "UDP({},{})".format(_(udp.sport), _(udp.dport))
return "UDP"
def tcp_str(tcp):
if tcp.sport or tcp.dport:
return "TCP({},{})".format(_(tcp.sport), _(tcp.dport))
return "TCP"
def payload_str(raw):
if raw.load == WILDCARD:
return "*"
return '"{}"'.format(raw.load)
fcts = {
IPv6: ip_str,
IPv6ExtHdrSegmentRouting: srh_str,
UDP: udp_str,
TCP: tcp_str,
Raw: payload_str
}
i = 0
protos = []
while 1:
layer = pkt.getlayer(i)
if layer == None:
break
elif isinstance(layer, IPv6ExtHdrSegmentRoutingTLV):
pass
elif layer.__class__ in fcts:
protos.append(fcts[layer.__class__](layer))
else:
protos.append(layer.name)
i += 1
iface = ""
if pkt.oif and pkt.oif != "dum0" and pkt.oif != WILDCARD:
iface = "({}) ".format(pkt.oif)
return iface+" / ".join(protos)
class Event:
type = None
cmd = None #only used if CMD
pkt = None # only used if PKT
answer = None
expected_answer = None
oif = None # only used if OIF
PKT = 1
CMD = 2
OIF = 3
def __unicode__(self):
return self.__str__()
def __str__(self):
if self.type == Event.PKT:
s = "> {}".format(self.pkt)
if self.expected_answer:
s += "\n< {}".format(self.expected_answer)
return s
elif self.type == Event.CMD:
return "`"+self.cmd+"`"
elif self.type == Event.OIF:
return "if add {}".format(self.oif)
else:
return "Unknown event"
def __repr__(self):
return self.__str__()
|
Zashas/segway
|
structs.py
|
Python
|
gpl-3.0
| 5,933
|
"""
This script allows to plot two curves, using data stored in an SQLite
database. Each curve is percentage correct as function of time since last change point.
"""
import dataset
import matplotlib.pyplot as plt
def plot_clicks_perfs(low_rate, high_rate, data, fignum):
"""
Generate two curves (one per parameter pair)
Plot curves in order to visualize the cross-over effect
:param pair1: 2-element tuple (snr, h)
:snr: float
:h: float in Hz
:param pair2: like pair1, for the second curve
:param data: database object from dataset module
:param columnnames: dict containing column names from db
columnnames = {'commit': 'comm',
'trial_duration': 'dur',
'snr': 'snr',
'h': 'h',
'seed': 'seed',
'bin_number': 'numb',
'bin_width': 'bwidth',
'init_state': 'init',
'end_state': 'end',
'decision': 'dec',
'correctness': 'correct'}
:param fignum: figure number (integer)
:return: produces cross-over plot for the two given curves
"""
result1 = data.query('SELECT {} AS assumedrate, AVG({}) AS performance FROM perf \
WHERE {} = {} AND {} = {} \
GROUP BY {} \
ORDER BY {};'.format('assumedh',
'score',
'lowrate', low_rate[0],
'highrate', high_rate[0],
'assumedh',
'assumedh'))
curve1 = []
x1 = []
maxpoints = 4
points = 1
for row_data in result1:
if points > maxpoints:
break
else:
points += 1
x1 += [row_data['assumedrate']]
curve1 += [row_data['performance']]
result2 = data.query('SELECT {} AS assumedrate, AVG({}) AS performance FROM perf \
WHERE {} = {} AND {} = {} \
GROUP BY {} \
ORDER BY {};'.format('assumedh',
'score',
'lowrate', low_rate[1],
'highrate', high_rate[1],
'assumedh',
'assumedh'))
curve2 = []
x2 = []
points = 1
for row_data in result2:
if points > maxpoints:
break
else:
points += 1
x2 += [row_data['assumedrate']]
curve2 += [row_data['performance']]
plt.figure(fignum)
plt.plot(x1, curve1, x2, curve2, linewidth=3.0)
plt.title('ODE - 10,000 trials per point - low/high rate = 14/26')
plt.legend(['low ' + str(low_rate[0]) + '; high ' + str(high_rate[0]),
'low ' + str(low_rate[1]) + '; high ' + str(high_rate[1])])
plt.xlabel('assumed hazard rate')
plt.ylabel('percentage correct')
plt.ylim(.5,1)
plt.show()
if __name__ == "__main__":
# name of SQLite db
dbname = 'compute11_10000_lowsnr_h1_clicks'
# create connection to SQLite db
db = dataset.connect('sqlite:///' + dbname + '.db')
# get handle for specific table of the db
table = db['perf']
plot_clicks_perfs([14, 8], [26, 32], db, 1)
|
aernesto/change-rate-inference
|
ClickTask/Python/Performances/plot_2perfs_clicks.py
|
Python
|
mit
| 3,716
|
"""This module provides datastructures and algorithms for scaling."""
from __future__ import annotations
|
dials/dials
|
algorithms/scaling/__init__.py
|
Python
|
bsd-3-clause
| 106
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.