commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
1518347c2c1ceb482031ca091d54dcae25eed083 | Refactor flip | zl/indicators/flip.py | zl/indicators/flip.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Jason Koelker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numbers
from zipline.transforms import utils as transforms
BULL = 'Bull'
BEAR = 'Bear'
def flip(events, field):
Yp = events[-1][field]
Xp = events[-2][field]
X = events[0][field]
Y = events[1][field]
if (Xp > X) and (Yp < Y):
return BEAR
if (Xp < X) and (Yp > Y):
return BULL
class Flip(object):
__metaclass__ = transforms.TransformMeta
def __init__(self, period=4, field='close_price'):
self.period = period
self.field = field
self.sid_windows = collections.defaultdict(self.create_window)
def create_window(self):
return FlipWindow(self.period, self.field)
def update(self, event):
window = self.sid_windows[event.sid]
window.update(event)
return window()
class FlipWindow(transforms.EventWindow):
def __init__(self, period, field):
transforms.EventWindow.__init__(self, window_length=period + 2)
self.period = period
self.field = field
def handle_add(self, event):
assert self.field in event
assert isinstance(event[self.field], numbers.Number)
def handle_remove(self, event):
pass
def __call__(self):
if len(self.ticks) < self.window_length:
return
return flip(self.ticks, self.field)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Jason Koelker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numbers
from zipline.transforms import utils as transforms
BULL = 'Bull'
BEAR = 'Bear'
class Flip(object):
__metaclass__ = transforms.TransformMeta
def __init__(self, period=4, setup_price='close_price'):
self.period = period
self.setup_price = setup_price
self.sid_windows = collections.defaultdict(self.create_window)
def create_window(self):
return FlipWindow(self.period, self.setup_price)
def update(self, event):
window = self.sid_windows[event.sid]
window.update(event)
return window()
class FlipWindow(transforms.EventWindow):
def __init__(self, period, setup_price):
transforms.EventWindow.__init__(self, window_length=period + 2)
self.period = period
self.setup_price = setup_price
def handle_add(self, event):
assert self.setup_price in event
assert isinstance(event[self.setup_price], numbers.Number)
def handle_remove(self, event):
pass
def __call__(self):
if len(self.ticks) < self.window_length:
return
Yp = self.ticks[-1][self.setup_price]
Xp = self.ticks[-2][self.setup_price]
X = self.ticks[0][self.setup_price]
Y = self.ticks[1][self.setup_price]
if (Xp > X) and (Yp < Y):
return BEAR
if (Xp < X) and (Yp > Y):
return BULL
| Python | 0 |
0be54cb28387c535bea17e6c3a1a277151b9648a | Add the url name for students_info view to gci.views.helper.url_names. | app/soc/modules/gci/views/helper/url_names.py | app/soc/modules/gci/views/helper/url_names.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for storing GCI related URL names.
"""
GCI_LIST_ORG_INVITES = 'gci_list_org_invites'
GCI_LIST_INVITES = 'gci_list_invites'
GCI_MANAGE_INVITE = 'gci_manage_invite'
GCI_RESPOND_INVITE = 'gci_respond_invite'
GCI_SEND_INVITE = 'gci_send_invite'
GCI_MANAGE_REQUEST = 'gci_manage_request'
GCI_RESPOND_REQUEST = 'gci_respond_request'
GCI_SEND_REQUEST = 'gci_send_request'
GCI_LEADERBOARD = 'gci_leaderboard'
GCI_STUDENT_TASKS = 'gci_student_tasks'
GCI_STUDENT_FORM_DOWNLOAD = 'gci_student_form_download'
CREATE_GCI_ORG_PROFILE = 'create_gci_org_profile'
EDIT_GCI_ORG_PROFILE = 'edit_gci_org_profile'
GCI_ORG_HOME = 'gci_org_home'
GCI_VIEW_TASK = 'gci_view_task'
# GET PARAMETERS WHICH ARE USED THROUGHOUT THE MODULE
#TODO(dhans): consider creation of a separate module for that
"""GET parameter which should be set in order to download Consent Form.
"""
CONSENT_FORM_GET_PARAM = 'consent_form'
"""GET parameter which should be set in order to download Student ID Form.
"""
STUDENT_ID_FORM_GET_PARAM = 'student_id_form'
GCI_STUDENTS_INFO = 'gci_students_info' | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for storing GCI related URL names.
"""
GCI_LIST_ORG_INVITES = 'gci_list_org_invites'
GCI_LIST_INVITES = 'gci_list_invites'
GCI_MANAGE_INVITE = 'gci_manage_invite'
GCI_RESPOND_INVITE = 'gci_respond_invite'
GCI_SEND_INVITE = 'gci_send_invite'
GCI_MANAGE_REQUEST = 'gci_manage_request'
GCI_RESPOND_REQUEST = 'gci_respond_request'
GCI_SEND_REQUEST = 'gci_send_request'
GCI_LEADERBOARD = 'gci_leaderboard'
GCI_STUDENT_TASKS = 'gci_student_tasks'
GCI_STUDENT_FORM_DOWNLOAD = 'gci_student_form_download'
CREATE_GCI_ORG_PROFILE = 'create_gci_org_profile'
EDIT_GCI_ORG_PROFILE = 'edit_gci_org_profile'
GCI_ORG_HOME = 'gci_org_home'
GCI_VIEW_TASK = 'gci_view_task'
# GET PARAMETERS WHICH ARE USED THROUGHOUT THE MODULE
#TODO(dhans): consider creation of a separate module for that
"""GET parameter which should be set in order to download Consent Form.
"""
CONSENT_FORM_GET_PARAM = 'consent_form'
"""GET parameter which should be set in order to download Student ID Form.
"""
STUDENT_ID_FORM_GET_PARAM = 'student_id_form' | Python | 0 |
65c6c0b5ac47caac71c6c1284d84c1004d348c01 | Fix imports at top of file. | partner_relations/model/__init__.py | partner_relations/model/__init__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_partner
from . import res_partner_relation
from . import res_partner_relation_type
from . import res_partner_relation_all
from . import res_partner_relation_type_selection
PADDING = 10
def get_partner_type(partner):
"""Get partner type for relation.
:param partner: a res.partner either a company or not
:return: 'c' for company or 'p' for person
:rtype: str
"""
return 'c' if partner.is_company else 'p'
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
PADDING = 10
def get_partner_type(partner):
"""Get partner type for relation.
:param partner: a res.partner either a company or not
:return: 'c' for company or 'p' for person
:rtype: str
"""
return 'c' if partner.is_company else 'p'
from . import res_partner
from . import res_partner_relation
from . import res_partner_relation_type
from . import res_partner_relation_all
from . import res_partner_relation_type_selection
| Python | 0 |
59ce82a3a98be7eb68c6d41117bced6802a84ee1 | fix for latest jwt | pivportal/lib/pivportal/security.py | pivportal/lib/pivportal/security.py | """ Command Line Interface Module """
from flask import Response, request
import json
import re
import jwt
import datetime
from functools import wraps
# Redis "requests" hash
# {"12345678": { "username": X, "client_ip": X, "authorized": False, "time": time.time()}}
# {"dn1": "user1", "dn2": "user2"}
dn_to_username = {}
register_ticket_timeout = 60
def dn_is_valid(dn):
if re.match(r'^[a-zA-Z0-9_\-\,\(\)\+\=\:\s\. ]+$', dn):
return True
return False
def username_is_valid(username):
if re.match(r'^[a-zA-Z0-9_\-]+$', username) and username in dn_to_username.values():
return True
return False
def requestid_is_valid(requestid):
if re.match(r'^[a-zA-Z0-9]+$', requestid) and len(requestid) == 16:
return True
return False
def ip_is_valid(ip):
if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', ip):
return True
return False
def is_duplicate_register(username, requestid, auth_requests):
if requestid in auth_requests:
this_request = json.loads(auth_requests[requestid])
if this_request["username"] == username:
# Request Is Already Registered
return True
return False
def create_token(user, secret_key):
payload = {
# subject
'sub': user,
#issued at
'iat': datetime.datetime.utcnow(),
#expiry
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1)
}
token = jwt.encode(payload, secret_key, algorithm='HS256')
return token.encode().decode('unicode_escape')
def parse_token(token, secret_key):
return jwt.encode().decode(token, secret_key, algorithms='HS256')
def token_required(secret_key):
def token_required_decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
g = f.__globals__
if not request.headers.get('Authorization'):
return Response(response="Missing authorization header", status=401)
try:
payload = parse_token(request.headers.get('Authorization').split()[1], secret_key)
except jwt.exceptions.DecodeError:
return Response(response="Token is invalid", status=401)
except jwt.exceptions.ExpiredSignatureError:
return Response(response="Token has expired", status=401)
# Set username for decorated func
g["username"] = payload['sub']
return f(*args, **kwargs)
return decorated_function
return token_required_decorator
def valid_client_cert_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
g = f.__globals__
if not request.headers.get('SSL_CLIENT_S_DN'):
return Response(response="Missing Client DN Header", status=401)
# Get Client DN
user_dn = request.headers.get('SSL_CLIENT_S_DN')
# Valid DN
if not dn_is_valid(user_dn):
return Response(response=json.dumps({"response": " Invalid Request DN %s" % user_dn}), status=400, mimetype="application/json")
# Authorize User
if user_dn not in dn_to_username:
return Response(response=json.dumps({"response": "Authentication Failure for DN %s" % user_dn}), status=401, mimetype="application/json")
username = dn_to_username[user_dn]
# Verify Request
if not username_is_valid(username):
return Response(response=json.dumps({"response": " Invalid Request Username"}), status=400, mimetype="application/json")
# Set username for decorated func
g["username"] = username
return f(*args, **kwargs)
return decorated_function
| """ Command Line Interface Module """
from flask import Response, request
import json
import re
import jwt
import datetime
from functools import wraps
# Redis "requests" hash
# {"12345678": { "username": X, "client_ip": X, "authorized": False, "time": time.time()}}
# {"dn1": "user1", "dn2": "user2"}
dn_to_username = {}
register_ticket_timeout = 60
def dn_is_valid(dn):
if re.match(r'^[a-zA-Z0-9_\-\,\(\)\+\=\:\s\. ]+$', dn):
return True
return False
def username_is_valid(username):
if re.match(r'^[a-zA-Z0-9_\-]+$', username) and username in dn_to_username.values():
return True
return False
def requestid_is_valid(requestid):
if re.match(r'^[a-zA-Z0-9]+$', requestid) and len(requestid) == 16:
return True
return False
def ip_is_valid(ip):
if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', ip):
return True
return False
def is_duplicate_register(username, requestid, auth_requests):
if requestid in auth_requests:
this_request = json.loads(auth_requests[requestid])
if this_request["username"] == username:
# Request Is Already Registered
return True
return False
def create_token(user, secret_key):
payload = {
# subject
'sub': user,
#issued at
'iat': datetime.datetime.utcnow(),
#expiry
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1)
}
token = jwt.encode(payload, secret_key, algorithm='HS256')
return token.encode().decode('unicode_escape')
def parse_token(token, secret_key):
return jwt.encode().decode(token, secret_key, algorithms='HS256')
def token_required(secret_key):
def token_required_decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
g = f.__globals__
if not request.headers.get('Authorization'):
return Response(response="Missing authorization header", status=401)
try:
payload = parse_token(request.headers.get('Authorization').split()[1], secret_key)
except jwt.DecodeError:
return Response(response="Token is invalid", status=401)
except jwt.ExpiredSignatureError:
return Response(response="Token has expired", status=401)
# Set username for decorated func
g["username"] = payload['sub']
return f(*args, **kwargs)
return decorated_function
return token_required_decorator
def valid_client_cert_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
g = f.__globals__
if not request.headers.get('SSL_CLIENT_S_DN'):
return Response(response="Missing Client DN Header", status=401)
# Get Client DN
user_dn = request.headers.get('SSL_CLIENT_S_DN')
# Valid DN
if not dn_is_valid(user_dn):
return Response(response=json.dumps({"response": " Invalid Request DN %s" % user_dn}), status=400, mimetype="application/json")
# Authorize User
if user_dn not in dn_to_username:
return Response(response=json.dumps({"response": "Authentication Failure for DN %s" % user_dn}), status=401, mimetype="application/json")
username = dn_to_username[user_dn]
# Verify Request
if not username_is_valid(username):
return Response(response=json.dumps({"response": " Invalid Request Username"}), status=400, mimetype="application/json")
# Set username for decorated func
g["username"] = username
return f(*args, **kwargs)
return decorated_function
| Python | 0 |
a056ddc885d7eb333ab323f7552bfffd35635a8a | Add period at end of plug-in description | plugins/ChangeLogPlugin/__init__.py | plugins/ChangeLogPlugin/__init__.py | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import ChangeLog
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Changelog"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Shows changes since latest checked version."),
"api": 2
}
}
def register(app):
return {"extension": ChangeLog.ChangeLog()} | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import ChangeLog
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Changelog"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Shows changes since latest checked version"),
"api": 2
}
}
def register(app):
return {"extension": ChangeLog.ChangeLog()} | Python | 0 |
9d92862f903b4683f1365e7ae82dd48d60e86d34 | Add new urls, login and register | aeSupernova/urls.py | aeSupernova/urls.py | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from login import views
import login
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'aeSupernova.views.home', name='home'),
# url(r'^aeSupernova/', include('aeSupernova.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
#url(r'^$', 'aeSupernova.view.index'),
url(r'^index/$', TemplateView.as_view(template_name='index.html')),
url(r'^opticalSheet/', include('aeSupernova.opticalSheet.urls')),
url(r'^datafile/', include('aeSupernova.datafile.urls')),
url(r'^header/', include('aeSupernova.header.urls')),
url(r'^generator/', include('aeSupernova.generator.urls')),
url(r'^control/', include('aeSupernova.control.urls')),
url(r'^presentation/', include('aeSupernova.presentation.urls')),
url(r'^encoder/', include('aeSupernova.encoder.urls')),
url(r'^lerJupiter/', include('aeSupernova.lerJupiter.urls')),
url(r'^algeLin/', include('aeSupernova.algeLin.urls')),
url(r'^login/', include('login.urls')),
url(r'^register/$', views.register, name='register'),
)
| from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'aeSupernova.views.home', name='home'),
# url(r'^aeSupernova/', include('aeSupernova.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
#url(r'^$', 'aeSupernova.view.index'),
url(r'^index/$', TemplateView.as_view(template_name='index.html')),
url(r'^opticalSheet/', include('aeSupernova.opticalSheet.urls')),
url(r'^datafile/', include('aeSupernova.datafile.urls')),
url(r'^header/', include('aeSupernova.header.urls')),
url(r'^generator/', include('aeSupernova.generator.urls')),
url(r'^control/', include('aeSupernova.control.urls')),
url(r'^presentation/', include('aeSupernova.presentation.urls')),
url(r'^encoder/', include('aeSupernova.encoder.urls')),
url(r'^lerJupiter/', include('aeSupernova.lerJupiter.urls')),
url(r'^algeLin/', include('aeSupernova.algeLin.urls')),
)
| Python | 0 |
1fb2a774765bc46e1bc2474136f135c59006c787 | Return ConversationType in serializer | yunity/conversations/serializers.py | yunity/conversations/serializers.py | from rest_framework import serializers
from rest_framework.fields import CharField, DateTimeField, SerializerMethodField
from rest_framework.relations import PrimaryKeyRelatedField
from yunity.api.serializers import UserSerializer
from yunity.conversations.models import ConversationMessage as MessageModel, ConversationType
from yunity.conversations.models import Conversation as ConversationModel
from yunity.users.models import User as UserModel
class MessageSerializer(serializers.Serializer):
content = CharField(max_length=100000)
author = PrimaryKeyRelatedField(read_only=True)
time = DateTimeField(read_only=True, source='created_at')
def create(self, validated_data):
message = MessageModel.objects.create(
sent_by_id=self.context['request'].user.id,
in_conversation_id=self.context['request'].data['in_conversation_id'],
**validated_data)
return message
class ConversationSerializer(serializers.Serializer):
topic = CharField(max_length=150, required=False)
# Writing
with_participants = PrimaryKeyRelatedField(many=True, write_only=True, queryset=UserModel.objects.all())
message = CharField(max_length=100000, write_only=True)
# Reading
id = PrimaryKeyRelatedField(read_only=True)
type = SerializerMethodField(read_only=True)
participants = UserSerializer(many=True, read_only=True)
messages = MessageSerializer(many=True, read_only=True)
def get_type(self, obj):
return ConversationType.name(obj.type)
def create(self, validated_data):
"""
Create new conversation with other users and a message
"""
participant_ids = [_.id for _ in validated_data['with_participants']] + \
[self.context['request'].user.id, ]
if len(participant_ids) > 2:
chat_type = ConversationType.MULTICHAT
else:
chat_type = ConversationType.ONE_ON_ONE
chat = ConversationModel.objects.create(type=chat_type)
chat.participants = participant_ids
chat.save()
MessageModel.objects.create(
sent_by_id=self.context['request'].user.id,
in_conversation_id=chat.id,
content=validated_data['message']['content'],
)
return chat
def update(self, conversation, validated_data):
conversation.name = validated_data.get('name', conversation.name)
conversation.save()
return conversation
def validate_with_participants(self, value):
if len(value) < 1:
raise serializers.ValidationError("No chat participants given")
if len(value) == 1 and self.context['request'].user.id in value:
raise serializers.ValidationError("Requesting user is only participant")
return value
| from rest_framework import serializers
from rest_framework.fields import CharField, DateTimeField
from rest_framework.relations import PrimaryKeyRelatedField
from yunity.api.serializers import UserSerializer
from yunity.conversations.models import ConversationMessage as MessageModel, ConversationType
from yunity.conversations.models import Conversation as ConversationModel
from yunity.users.models import User as UserModel
class MessageSerializer(serializers.Serializer):
content = CharField(max_length=100000)
author = PrimaryKeyRelatedField(read_only=True)
time = DateTimeField(read_only=True, source='created_at')
def create(self, validated_data):
message = MessageModel.objects.create(
sent_by_id=self.context['request'].user.id,
in_conversation_id=self.context['request'].data['in_conversation_id'],
**validated_data)
return message
class ConversationSerializer(serializers.Serializer):
topic = CharField(max_length=150, required=False)
# Writing
with_participants = PrimaryKeyRelatedField(many=True, write_only=True, queryset=UserModel.objects.all())
message = CharField(max_length=100000, write_only=True)
# Reading
id = PrimaryKeyRelatedField(read_only=True)
participants = UserSerializer(many=True, read_only=True)
messages = MessageSerializer(many=True, read_only=True)
def create(self, validated_data):
"""
Create new conversation with other users and a message
"""
participant_ids = [_.id for _ in validated_data['with_participants']] + \
[self.context['request'].user.id, ]
if len(participant_ids) > 2:
chat_type = ConversationType.MULTICHAT
else:
chat_type = ConversationType.ONE_ON_ONE
chat = ConversationModel.objects.create(type=chat_type)
chat.participants = participant_ids
chat.save()
MessageModel.objects.create(
sent_by_id=self.context['request'].user.id,
in_conversation_id=chat.id,
content=validated_data['message']['content'],
)
return chat
def update(self, conversation, validated_data):
conversation.name = validated_data.get('name', conversation.name)
conversation.save()
return conversation
def validate_with_participants(self, value):
if len(value) < 1:
raise serializers.ValidationError("No chat participants given")
if len(value) == 1 and self.context['request'].user.id in value:
raise serializers.ValidationError("Requesting user is only participant")
return value
| Python | 0.000001 |
255ddb1a6910e590cb454a0d4e03f51b8d7b2092 | Update setup.py console script to use cli instead of main | {{cookiecutter.repo_name}}/setup.py | {{cookiecutter.repo_name}}/setup.py | import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='{{cookiecutter.repo_name}}',
version='{{cookiecutter.version}}',
author='{{cookiecutter.full_name}}',
author_email='{{cookiecutter.email}}',
description='{{cookiecutter.short_description}}',
long_description=read('README.rst'),
license='MIT',
keywords=(
"Python, cookiecutter, kivy, buildozer, pytest, projects, project "
"templates, example, documentation, tutorial, setup.py, package, "
"android, touch, mobile, NUI"
),
url='https://github.com/{{cookiecutter.github_username}}/{{cookiecutter.repo_name}}',
install_requires=['kivy>={{cookiecutter.kivy_version}}'],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'{{cookiecutter.repo_name}}={{cookiecutter.repo_name}}.cli:main'
]
},
tests_require=['pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Software Development :: User Interfaces',
],
)
| import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='{{cookiecutter.repo_name}}',
version='{{cookiecutter.version}}',
author='{{cookiecutter.full_name}}',
author_email='{{cookiecutter.email}}',
description='{{cookiecutter.short_description}}',
long_description=read('README.rst'),
license='MIT',
keywords=(
"Python, cookiecutter, kivy, buildozer, pytest, projects, project "
"templates, example, documentation, tutorial, setup.py, package, "
"android, touch, mobile, NUI"
),
url='https://github.com/{{cookiecutter.github_username}}/{{cookiecutter.repo_name}}',
install_requires=['kivy>={{cookiecutter.kivy_version}}'],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'{{cookiecutter.repo_name}}={{cookiecutter.repo_name}}.main:main'
]
},
tests_require=['pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Software Development :: User Interfaces',
],
)
| Python | 0 |
2a4e5ad6ac5e5400564d0dc9306c2ab30b9dba98 | bump version | pinax_theme_bootstrap/__init__.py | pinax_theme_bootstrap/__init__.py | __version__ = "0.1.4" | __version__ = "0.1.3" | Python | 0 |
e428bd3776257030c538e85fe94154686b3e4ff0 | Make gen_fh.py script always use the next available FH number (#151) | scripts/gen_fh.py | scripts/gen_fh.py | # Generate this week's friday hack
# To generate some other FH pass in a number as argument
# e.g python gen_fh.py 1 generates next week's
# e.g python gen_fh.py 3 generates next next next week's
# As for numbering, it will take the next number
# (e.g. if the previous post is FH #1000, the generated one will be FH #1001)
# Please first update data/friday_hacks.yml before running this
import yaml
from datetime import datetime, timedelta
from os import listdir
from os.path import isfile, join
from sys import argv
import re
with open('../data/friday_hacks.yml', 'r') as fin:
doc = yaml.load(fin)
start_date = datetime.strptime(doc['start_date'],
'%Y-%m-%d %H:%M:%S +0800')
# Time delta fixes weird bug
now = datetime.today() - timedelta(hours=3)
# Sick undocumented feature
if len(argv) > 1:
now += timedelta(days=7 * int(argv[1]))
hacks = doc['hacks']
cur = start_date
next_hack = None
next_date = None
for hack in hacks:
if cur > now:
next_hack = hack
next_date = cur
break
cur += timedelta(days=7)
if not next_hack:
print "Dude semester's over"
quit()
if not next_hack.get('topics'):
print "Dude no hackz"
quit()
date = cur
print "Creating FH post for " + str(cur)
name = raw_input("Your name? ")
# so future-proof it's sick
fhre = re.compile(
r'^20[0-9][0-9]-[01][0-9]-[0-3][0-9]-friday-hacks-([1-9][0-9]*)\.md$')
num = 0
# so.. tempted... to... use lazy evaluation
for f in listdir('../content/post/'):
result = fhre.search(f)
if result:
cur = int(result.group(1))
if cur > num:
num = cur
num += 1
# In case you want to skip FH numbers BUT WHYYY!?!?
# What is abstraction?
# if len(argv) > 1:
# num += int(argv[1])
print "Creating FH post for #" + str(num) + ", at " + str(date)
# In case you want a different name, BUT WHYYY!?!?
# name = raw_input("Your name? ")
# now witness templating in raw string
content = '''\
---
title: "Friday Hacks #{num}, {month} {day}"
date: {now}
author: {author}
url: /{year}/{no_of_month}/friday-hacks-{num}
---
--- say something as introduction ---
{{{{% friday_hack_header venue="{venue}" date="{month} {day}" %}}}}
'''.format(
num=num,
now=datetime.today(),
year=next_date.strftime("%Y"),
month=next_date.strftime("%B"),
no_of_month=next_date.strftime('%m'),
day=next_date.day,
author=name,
venue=next_hack['venue']) + '\n'.join([
'''
### {talk_name}
#### Talk Description:
--- describe ----
#### Speaker Profile
--- describe ----
'''.format(talk_name=topic['title']) for topic in next_hack['topics']
])
filename = '../content/post/{now}-friday-hacks-{num}.md'.format(
now=next_date.strftime("%Y-%m-%d"),
num=num,
month=next_date.strftime('%b'),
day=next_date.day,
)
with open(filename, 'a') as fout:
fout.write(content)
| # Generate this week's friday hack
# To generate some other FH pass in a number as argument
# e.g python gen_fh.py 1 generates next week's
# e.g python gen_fh.py 3 generates next next next week's
# Please first update data/friday_hacks.yml before running this
import yaml
from datetime import datetime, timedelta
from os import listdir
from os.path import isfile, join
from sys import argv
import re
with open('../data/friday_hacks.yml', 'r') as fin:
doc = yaml.load(fin)
start_date = datetime.strptime(doc['start_date'],
'%Y-%m-%d %H:%M:%S +0800')
# Time delta fixes weird bug
now = datetime.today() - timedelta(hours=3)
# Sick undocumented feature
if len(argv) > 1:
now += timedelta(days=7 * int(argv[1]))
hacks = doc['hacks']
cur = start_date
next_hack = None
next_date = None
for hack in hacks:
if cur > now:
next_hack = hack
next_date = cur
break
cur += timedelta(days=7)
if not next_hack:
print "Dude semester's over"
quit()
if not next_hack.get('topics'):
print "Dude no hackz"
quit()
date = cur
print "Creating FH post for " + str(cur)
name = raw_input("Your name? ")
# so future-proof it's sick
fhre = re.compile(
r'^20[0-9][0-9]-[01][0-9]-[0-3][0-9]-friday-hacks-([1-9][0-9]*)\.md$')
num = 0
# so.. tempted... to... use lazy evaluation
for f in listdir('../content/post/'):
result = fhre.search(f)
if result:
cur = int(result.group(1))
if cur > num:
num = cur
num += 1
# What is abstraction?
if len(argv) > 1:
num += int(argv[1])
print "Creating FH post for #" + str(num) + ", at " + str(date)
# In case you want a different name, BUT WHYYY!?!?
# name = raw_input("Your name? ")
# now witness templating in raw string
content = '''\
---
title: "Friday Hacks #{num}, {month} {day}"
date: {now}
author: {author}
url: /{year}/{no_of_month}/friday-hacks-{num}
---
--- say something as introduction ---
{{{{% friday_hack_header venue="{venue}" date="{month} {day}" %}}}}
'''.format(
num=num,
now=datetime.today(),
year=next_date.strftime("%Y"),
month=next_date.strftime("%B"),
no_of_month=next_date.strftime('%m'),
day=next_date.day,
author=name,
venue=next_hack['venue']) + '\n'.join([
'''
### {talk_name}
#### Talk Description:
--- describe ----
#### Speaker Profile
--- describe ----
'''.format(talk_name=topic['title']) for topic in next_hack['topics']
])
filename = '../content/post/{now}-friday-hacks-{num}.md'.format(
now=next_date.strftime("%Y-%m-%d"),
num=num,
month=next_date.strftime('%b'),
day=next_date.day,
)
with open(filename, 'a') as fout:
fout.write(content)
| Python | 0 |
7faa33c1eff79223252d6a7c4fe5ad033383df6c | Bump version | l10n_ch_payment_slip/__openerp__.py | l10n_ch_payment_slip/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Switzerland - Payment Slip (BVR/ESR)',
'summary': 'Print ESR/BVR payment slip with your invoices',
'description': """
Swiss Payment slip known as ESR/BVR
===================================
This addon allows you to print the ESR/BVR report Using Qweb report.
The ESR/BVR is grenerated as an image and is availabe in a fields
of the `l10n_ch.payment_slip` Model.
The ESR/BVR is created each time an invoice is validated.
To modify it you have to cancel it and reconfirm the invoice.
You can adjust the print out of ESR/BVR, which depend on each printer,
for every company in the "BVR Data" tab.
This is especialy useful when using pre-printed paper.
An option also allow you to print the ESR/BVR in background when using
white paper.
This module will also allows you to import v11 files provided
by financial institute into a bank statement
To do so, use the wizard provided in bank statement.
This module also adds transaction_ref field on entries in order to manage
reconciliation in multi payment context (unique reference needed on
account.move.line). Many BVR can now be printed from on invoice for each
payment terms.
""",
'version': '8.0.2.1.1',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Localization',
'website': 'http://www.camptocamp.com',
'license': 'AGPL-3',
'depends': ['base',
'account',
'account_payment',
'report',
'l10n_ch_base_bank',
'base_transaction_id'],
'data': ["company_view.xml",
"bank_view.xml",
"account_invoice_view.xml",
"wizard/bvr_import_view.xml",
"report/report_declaration.xml",
"security/ir.model.access.csv"],
'demo': [],
'test': [],
'auto_install': False,
'installable': True,
'images': []
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Switzerland - Payment Slip (BVR/ESR)',
'summary': 'Print ESR/BVR payment slip with your invoices',
'description': """
Swiss Payment slip known as ESR/BVR
===================================
This addon allows you to print the ESR/BVR report Using Qweb report.
The ESR/BVR is grenerated as an image and is availabe in a fields
of the `l10n_ch.payment_slip` Model.
The ESR/BVR is created each time an invoice is validated.
To modify it you have to cancel it and reconfirm the invoice.
You can adjust the print out of ESR/BVR, which depend on each printer,
for every company in the "BVR Data" tab.
This is especialy useful when using pre-printed paper.
An option also allow you to print the ESR/BVR in background when using
white paper.
This module will also allows you to import v11 files provided
by financial institute into a bank statement
To do so, use the wizard provided in bank statement.
This module also adds transaction_ref field on entries in order to manage
reconciliation in multi payment context (unique reference needed on
account.move.line). Many BVR can now be printed from on invoice for each
payment terms.
""",
'version': '8.0.2.1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Localization',
'website': 'http://www.camptocamp.com',
'license': 'AGPL-3',
'depends': ['base',
'account',
'account_payment',
'report',
'l10n_ch_base_bank',
'base_transaction_id'],
'data': ["company_view.xml",
"bank_view.xml",
"account_invoice_view.xml",
"wizard/bvr_import_view.xml",
"report/report_declaration.xml",
"security/ir.model.access.csv"],
'demo': [],
'test': [],
'auto_install': False,
'installable': True,
'images': []
}
| Python | 0 |
2e608036c8611026f9fb47a762901700891e284e | use BufferedWriter for gzip files -- 30% faster writing | cutadapt/xopen.py | cutadapt/xopen.py | """
Open compressed files transparently.
"""
import gzip
import sys
import io
__author__ = 'Marcel Martin'
import sys
if sys.version_info[0] >= 3:
basestring = str
from codecs import getreader, getwriter
if sys.version_info < (2, 7):
buffered_reader = lambda x: x
buffered_writer = lambda x: x
else:
buffered_reader = io.BufferedReader
buffered_writer = io.BufferedWriter
def xopen(filename, mode='r'):
"""
Replacement for the "open" function that can also open
files that have been compressed with gzip. If the filename ends with .gz,
the file is opened with gzip.open(). If it doesn't, the regular open()
is used. If the filename is '-', standard output (mode 'w') or input
(mode 'r') is returned.
"""
assert isinstance(filename, basestring)
if filename == '-':
return sys.stdin if 'r' in mode else sys.stdout
if filename.endswith('.gz'):
if sys.version_info[0] < 3:
if 'r' in mode:
return buffered_reader(gzip.open(filename, mode))
else:
return buffered_writer(gzip.open(filename, mode))
else:
if 'r' in mode:
return getreader('ascii')(gzip.open(filename, mode))
else:
return getwriter('ascii')(gzip.open(filename, mode))
else:
return open(filename, mode)
| """
Open compressed files transparently.
"""
import gzip
import sys
import io
__author__ = 'Marcel Martin'
import sys
if sys.version_info[0] >= 3:
basestring = str
from codecs import getreader, getwriter
if sys.version_info < (2, 7):
buffered_reader = lambda x: x
else:
buffered_reader = io.BufferedReader
def xopen(filename, mode='r'):
"""
Replacement for the "open" function that can also open
files that have been compressed with gzip. If the filename ends with .gz,
the file is opened with gzip.open(). If it doesn't, the regular open()
is used. If the filename is '-', standard output (mode 'w') or input
(mode 'r') is returned.
"""
assert isinstance(filename, basestring)
if filename == '-':
return sys.stdin if 'r' in mode else sys.stdout
if filename.endswith('.gz'):
if sys.version_info[0] < 3:
if 'r' in mode:
return buffered_reader(gzip.open(filename, mode))
else:
return gzip.open(filename, mode)
else:
if 'r' in mode:
return getreader('ascii')(gzip.open(filename, mode))
else:
return getwriter('ascii')(gzip.open(filename, mode))
else:
return open(filename, mode)
| Python | 0 |
fc683685d7df05ee0acc63a216c5b8fd99462219 | use f strings | metaci/plan/templatetags/templatehelpers.py | metaci/plan/templatetags/templatehelpers.py | """
https://simpleisbetterthancomplex.com/snippet/2016/08/22/dealing-with-querystring-parameters.html
"""
from django import template
register = template.Library()
@register.simple_tag
def relative_url(value, field_name, urlencode=None):
url = f"?{field_name}={value}"
if urlencode:
querystring = urlencode.split("&")
filtered_querystring = [p for p in querystring if p.split("=")[0] != field_name]
encoded_querystring = "&".join(filtered_querystring)
url = f"{url}&{encoded_querystring}"
return url
| """
https://simpleisbetterthancomplex.com/snippet/2016/08/22/dealing-with-querystring-parameters.html
"""
from django import template
register = template.Library()
@register.simple_tag
def relative_url(value, field_name, urlencode=None):
url = "?{}={}".format(field_name, value)
if urlencode:
querystring = urlencode.split("&")
filtered_querystring = [p for p in querystring if p.split("=")[0] != field_name]
encoded_querystring = "&".join(filtered_querystring)
url = "{}&{}".format(url, encoded_querystring)
return url
| Python | 0.020803 |
01e2be42f93f4e68b79ecee21818881158ecb759 | fix the whitelist + blacklist when neither are specified | environment_kernels/core.py | environment_kernels/core.py | # -*- coding: utf-8 -*-
import os
import glob
import platform
from jupyter_client.kernelspec import KernelSpecManager, KernelSpec, NoSuchKernel
from traitlets import List
__all__ = ['EnvironmentKernelSpecManager']
try:
import conda.config
HAVE_CONDA = True
except ImportError:
HAVE_CONDA = False
class EnvironmentKernelSpecManager(KernelSpecManager):
"""
A Jupyter Kenel manager which dyamically checks for Environments
Given a list of base directories, this class searches for the pattern::
BASE_DIR/NAME/bin/ipython
where NAME is taken to be the name of the environment.
"""
# Take the default home DIR for conda and virtualenv as the default
_default_dirs = ['~/.conda/envs/', '~/.virtualenvs']
# Check for the windows specific CONDA_ENVS_PATH variable and add it to the
# list if set.
if os.environ.get('CONDA_ENVS_PATH', False):
_default_dirs.append(os.environ['CONDA_ENVS_PATH'])
# If we are running inside conda we can get all the env dirs:
if HAVE_CONDA:
_default_dirs += conda.config.envs_dirs
# Remove any duplicates
_default_dirs = list(set(map(os.path.expanduser, _default_dirs)))
env_dirs = List(_default_dirs, config=True)
extra_env_dirs = List([], config=True)
blacklist_envs = List([], config=True)
whitelist_envs = List([], config=True)
def validate_env(self, envname):
"""
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
"""
if self.whitelist_envs and envname in self.whitelist_envs:
return True
elif self.whitelist_envs:
return False
if self.blacklist_envs and envname not in self.blacklist_envs:
return True
elif self.blacklist_envs:
return False
else:
return True
def _get_env_paths(self):
if platform.system() == 'Windows':
search = '*/Scripts/ipython'
else:
search = '*/bin/ipython'
return [os.path.join(os.path.expanduser(base_dir), search)
for base_dir in self.env_dirs + self.extra_env_dirs]
def find_python_paths(self):
# find a python executeable
python_dirs = {}
for env_path in self._get_env_paths():
for python_exe in glob.glob(env_path):
venv_dir = os.path.split(os.path.split(python_exe)[0])[0]
venv_name = os.path.split(venv_dir)[1]
if self.validate_env(venv_name):
python_dirs.update({venv_name: venv_dir})
return python_dirs
def venv_kernel_specs(self):
python_dirs = self.find_python_paths()
kspecs = {}
for venv_name, venv_dir in python_dirs.items():
exe_name = os.path.join(venv_dir, 'bin/python')
kspec_dict = {"argv": [exe_name,
"-m",
"IPython.kernel",
"-f",
"{connection_file}"],
"display_name": "Environment ({})".format(venv_name),
"env": {}}
kspecs.update({venv_name: KernelSpec(**kspec_dict)})
return kspecs
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
d = super(EnvironmentKernelSpecManager, self).find_kernel_specs()
d.update(self.find_python_paths())
return d
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
try:
return super(EnvironmentKernelSpecManager, self).get_kernel_spec(kernel_name)
except (NoSuchKernel, FileNotFoundError):
if kernel_name.lower() in self.venv_kernel_specs():
return self.venv_kernel_specs()[kernel_name.lower()]
else:
raise NoSuchKernel(kernel_name)
| # -*- coding: utf-8 -*-
import os
import glob
import platform
from jupyter_client.kernelspec import KernelSpecManager, KernelSpec, NoSuchKernel
from traitlets import List
__all__ = ['EnvironmentKernelSpecManager']
try:
import conda.config
HAVE_CONDA = True
except ImportError:
HAVE_CONDA = False
class EnvironmentKernelSpecManager(KernelSpecManager):
"""
A Jupyter Kenel manager which dyamically checks for Environments
Given a list of base directories, this class searches for the pattern::
BASE_DIR/NAME/bin/ipython
where NAME is taken to be the name of the environment.
"""
# Take the default home DIR for conda and virtualenv as the default
_default_dirs = ['~/.conda/envs/', '~/.virtualenvs']
# Check for the windows specific CONDA_ENVS_PATH variable and add it to the
# list if set.
if os.environ.get('CONDA_ENVS_PATH', False):
_default_dirs.append(os.environ['CONDA_ENVS_PATH'])
# If we are running inside conda we can get all the env dirs:
if HAVE_CONDA:
_default_dirs += conda.config.envs_dirs
# Remove any duplicates
_default_dirs = list(set(map(os.path.expanduser, _default_dirs)))
env_dirs = List(_default_dirs, config=True)
extra_env_dirs = List([], config=True)
blacklist_envs = List([], config=True)
whitelist_envs = List([], config=True)
def validate_env(self, envname):
"""
Check the name of the environment against the black list and the
whitelist. If a whitelist is specified only it is checked.
"""
if self.whitelist_envs and envname in self.whitelist_envs:
return True
elif self.whitelist_envs:
return False
if self.blacklist_envs and envname not in self.blacklist_envs:
return True
else:
return False
def _get_env_paths(self):
if platform.system() == 'Windows':
search = '*/Scripts/ipython'
else:
search = '*/bin/ipython'
return [os.path.join(os.path.expanduser(base_dir), search)
for base_dir in self.env_dirs + self.extra_env_dirs]
def find_python_paths(self):
# find a python executeable
python_dirs = {}
for env_path in self._get_env_paths():
for python_exe in glob.glob(env_path):
venv_dir = os.path.split(os.path.split(python_exe)[0])[0]
venv_name = os.path.split(venv_dir)[1]
if self.validate_env(venv_name):
python_dirs.update({venv_name: venv_dir})
return python_dirs
def venv_kernel_specs(self):
python_dirs = self.find_python_paths()
kspecs = {}
for venv_name, venv_dir in python_dirs.items():
exe_name = os.path.join(venv_dir, 'bin/python')
kspec_dict = {"argv": [exe_name,
"-m",
"IPython.kernel",
"-f",
"{connection_file}"],
"display_name": "Environment ({})".format(venv_name),
"env": {}}
kspecs.update({venv_name: KernelSpec(**kspec_dict)})
return kspecs
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
d = super(EnvironmentKernelSpecManager, self).find_kernel_specs()
d.update(self.find_python_paths())
return d
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
try:
return super(EnvironmentKernelSpecManager, self).get_kernel_spec(kernel_name)
except (NoSuchKernel, FileNotFoundError):
if kernel_name.lower() in self.venv_kernel_specs():
return self.venv_kernel_specs()[kernel_name.lower()]
else:
raise NoSuchKernel(kernel_name)
| Python | 0.000018 |
0430957f2b65ee0e14821027a15cfb956e976c62 | make method static | RatS/tmdb/tmdb_ratings_inserter.py | RatS/tmdb/tmdb_ratings_inserter.py | import time
from RatS.base.base_ratings_uploader import RatingsUploader
from RatS.tmdb.tmdb_site import TMDB
class TMDBRatingsInserter(RatingsUploader):
def __init__(self, args):
super(TMDBRatingsInserter, self).__init__(TMDB(args), args)
self.url_for_csv_file_upload = self._get_url_for_csv_upload()
self.css_id_of_file_input_element = 'csv_file'
self.xpath_selector_for_submit_button = "//form[@name='import_csv']//input[@type='submit']"
@staticmethod
def _get_url_for_csv_upload():
return 'https://www.themoviedb.org/settings/import-list'
def pre_upload_action(self):
cookie_accept_button = self.site.browser.find_element_by_id('cookie_notice')\
.find_elements_by_class_name('accept')
if cookie_accept_button is not None and len(cookie_accept_button) > 0:
cookie_accept_button[0].click()
time.sleep(1)
| import time
from RatS.base.base_ratings_uploader import RatingsUploader
from RatS.tmdb.tmdb_site import TMDB
class TMDBRatingsInserter(RatingsUploader):
def __init__(self, args):
super(TMDBRatingsInserter, self).__init__(TMDB(args), args)
self.url_for_csv_file_upload = self._get_url_for_csv_upload()
self.css_id_of_file_input_element = 'csv_file'
self.xpath_selector_for_submit_button = "//form[@name='import_csv']//input[@type='submit']"
def _get_url_for_csv_upload(self):
return 'https://www.themoviedb.org/settings/import-list'
def pre_upload_action(self):
cookie_accept_button = self.site.browser.find_element_by_id('cookie_notice')\
.find_elements_by_class_name('accept')
if cookie_accept_button is not None and len(cookie_accept_button) > 0:
cookie_accept_button[0].click()
time.sleep(1)
| Python | 0.000023 |
35a0904506ff51946c9c26a4ca9f61ae2f08b63e | bump to 1.6 | eventdispatcher/__init__.py | eventdispatcher/__init__.py | __author__ = 'Calvin Lobo'
__version__ = '1.6'
from .property import Property
from .dictproperty import DictProperty
from .listproperty import ListProperty
from .unitproperty import UnitProperty
class BindError(Exception):
pass
class EventDispatcher(object):
def __init__(self, **kwargs):
self.event_dispatcher_event_callbacks = {}
bindings = {}
# Walk through the MRO looking for Property attributes in the classes. Then register and bind them to
# 'on_<prop_name>' if it exists.
for cls in self.__class__.__mro__:
for prop_name, prop in cls.__dict__.iteritems():
if isinstance(prop, Property):
prop.name = prop_name
prop.register(self, prop_name, prop.default_value)
if hasattr(self, 'on_{}'.format(prop_name)):
func = getattr(self, 'on_{}'.format(prop_name))
bindings.update({prop_name: func})
self.bind(**bindings)
def dispatch(self, key, *args):
for callback in self.event_dispatcher_properties[key]['callbacks']:
if callback(*args):
break
def dispatch_event(self, event, *args):
for callback in self.event_dispatcher_event_callbacks[event]:
if callback(*args):
break
def register_event(self, name):
if hasattr(self, 'on_{}'.format(name)):
self.event_dispatcher_event_callbacks[name] = [getattr(self, 'on_{}'.format(name))]
else:
self.event_dispatcher_event_callbacks[name] = []
def unbind(self, **kwargs):
all_properties = self.event_dispatcher_properties
for prop_name, callback in kwargs.iteritems():
if prop_name in all_properties:
try:
all_properties[prop_name]['callbacks'].remove(callback)
except ValueError:
raise BindError("No binding for {} in property '{}'".format(callback.__name__, prop_name))
elif prop_name in self.event_dispatcher_event_callbacks:
try:
self.event_dispatcher_event_callbacks[prop_name].remove(callback)
except ValueError:
raise BindError("No binding for {} in event '{}'".format(callback.__name__, prop_name))
else:
raise BindError('No property or event by the name of %s' % prop_name)
def unbind_all(self, *args):
all_properties = self.event_dispatcher_properties
for prop_name in args:
if prop_name in all_properties:
del all_properties[prop_name]['callbacks'][:]
elif prop_name in self.event_dispatcher_event_callbacks:
del self.event_dispatcher_event_callbacks[prop_name][:]
else:
raise BindError("No such property or event '%s'" % prop_name)
def bind(self, **kwargs):
for prop_name, callback in kwargs.iteritems():
if prop_name in self.event_dispatcher_properties:
# Queue the callback into the property
self.event_dispatcher_properties[prop_name]['callbacks'].append(callback)
elif prop_name in self.event_dispatcher_event_callbacks:
# If a property was not found, search in events
self.event_dispatcher_event_callbacks[prop_name].append(callback)
else:
raise BindError("No property or event by the name of '%s'" % prop_name)
def setter(self, prop_name):
return lambda inst, value: setattr(self, prop_name, value)
def get_dispatcher_property(self, prop_name):
return self.event_dispatcher_properties[prop_name]['property']
| __author__ = 'Calvin Lobo'
__version__ = '1.5'
from .property import Property
from .dictproperty import DictProperty
from .listproperty import ListProperty
from .unitproperty import UnitProperty
class BindError(Exception):
pass
class EventDispatcher(object):
def __init__(self, **kwargs):
self.event_dispatcher_event_callbacks = {}
bindings = {}
# Walk through the MRO looking for Property attributes in the classes. Then register and bind them to
# 'on_<prop_name>' if it exists.
for cls in self.__class__.__mro__:
for prop_name, prop in cls.__dict__.iteritems():
if isinstance(prop, Property):
prop.name = prop_name
prop.register(self, prop_name, prop.default_value)
if hasattr(self, 'on_{}'.format(prop_name)):
func = getattr(self, 'on_{}'.format(prop_name))
bindings.update({prop_name: func})
self.bind(**bindings)
def dispatch(self, key, *args):
for callback in self.event_dispatcher_properties[key]['callbacks']:
if callback(*args):
break
def dispatch_event(self, event, *args):
for callback in self.event_dispatcher_event_callbacks[event]:
if callback(*args):
break
def register_event(self, name):
if hasattr(self, 'on_{}'.format(name)):
self.event_dispatcher_event_callbacks[name] = [getattr(self, 'on_{}'.format(name))]
else:
self.event_dispatcher_event_callbacks[name] = []
def unbind(self, **kwargs):
all_properties = self.event_dispatcher_properties
for prop_name, callback in kwargs.iteritems():
if prop_name in all_properties:
try:
all_properties[prop_name]['callbacks'].remove(callback)
except ValueError:
raise BindError("No binding for {} in property '{}'".format(callback.__name__, prop_name))
elif prop_name in self.event_dispatcher_event_callbacks:
try:
self.event_dispatcher_event_callbacks[prop_name].remove(callback)
except ValueError:
raise BindError("No binding for {} in event '{}'".format(callback.__name__, prop_name))
else:
raise BindError('No property or event by the name of %s' % prop_name)
def unbind_all(self, *args):
all_properties = self.event_dispatcher_properties
for prop_name in args:
if prop_name in all_properties:
del all_properties[prop_name]['callbacks'][:]
elif prop_name in self.event_dispatcher_event_callbacks:
del self.event_dispatcher_event_callbacks[prop_name][:]
else:
raise BindError("No such property or event '%s'" % prop_name)
def bind(self, **kwargs):
for prop_name, callback in kwargs.iteritems():
if prop_name in self.event_dispatcher_properties:
# Queue the callback into the property
self.event_dispatcher_properties[prop_name]['callbacks'].append(callback)
elif prop_name in self.event_dispatcher_event_callbacks:
# If a property was not found, search in events
self.event_dispatcher_event_callbacks[prop_name].append(callback)
else:
raise BindError("No property or event by the name of '%s'" % prop_name)
def setter(self, prop_name):
return lambda inst, value: setattr(self, prop_name, value)
def get_dispatcher_property(self, prop_name):
return self.event_dispatcher_properties[prop_name]['property']
| Python | 0.000003 |
fe0f2b85af896b91001b39098a1a234399247293 | add `field_pk` for `parameter_name` for custom primary key in model | dal_admin_filters/__init__.py | dal_admin_filters/__init__.py | # -*- encoding: utf-8 -*-
from dal import autocomplete
from django import forms
from django.contrib.admin.filters import SimpleListFilter
from django.core.exceptions import ImproperlyConfigured
from django.forms.widgets import Media, MEDIA_TYPES
class AutocompleteFilter(SimpleListFilter):
template = "dal_admin_filters/autocomplete-filter.html"
title = ''
field_name = ''
field_pk = 'id'
autocomplete_url = ''
is_placeholder_title = False
widget_attrs = {}
class Media:
css = {
'all': (
'autocomplete_light/vendor/select2/dist/css/select2.css',
'autocomplete_light/select2.css',
'dal_admin_filters/css/autocomplete-fix.css'
)
}
js = (
'autocomplete_light/jquery.init.js',
'autocomplete_light/autocomplete.init.js',
'autocomplete_light/vendor/select2/dist/js/select2.full.js',
'autocomplete_light/select2.js',
'dal_admin_filters/js/querystring.js',
)
def __init__(self, request, params, model, model_admin):
if self.parameter_name:
raise AttributeError(
'Rename attribute `parameter_name` to '
'`field_name` for {}'.format(self.__class__)
)
self.parameter_name = '{}__{}__exact'.format(self.field_name, self.field_pk)
super(AutocompleteFilter, self).__init__(request, params, model, model_admin)
self._add_media(model_admin)
field = forms.ModelChoiceField(
queryset=getattr(model, self.field_name).get_queryset(),
widget=autocomplete.ModelSelect2(
url=self.autocomplete_url,
)
)
attrs = self.widget_attrs.copy()
attrs['id'] = 'id-%s-dal-filter' % self.field_name
if self.is_placeholder_title:
attrs['data-placeholder'] = "By " + self.title
self.rendered_widget = field.widget.render(
name=self.parameter_name,
value=self.used_parameters.get(self.parameter_name, ''),
attrs=attrs
)
def _add_media(self, model_admin):
if not hasattr(model_admin, 'Media'):
raise ImproperlyConfigured('Add empty Media class to %s. Sorry about this bug.' % model_admin)
def _get_media(obj):
return Media(media=getattr(obj, 'Media', None))
media = _get_media(model_admin) + _get_media(AutocompleteFilter) + _get_media(self)
for name in MEDIA_TYPES:
setattr(model_admin.Media, name, getattr(media, "_" + name))
def has_output(self):
return True
def lookups(self, request, model_admin):
return ()
def queryset(self, request, queryset):
if self.value():
return queryset.filter(**{self.parameter_name: self.value()})
else:
return queryset
| # -*- encoding: utf-8 -*-
from dal import autocomplete
from django import forms
from django.contrib.admin.filters import SimpleListFilter
from django.core.exceptions import ImproperlyConfigured
from django.forms.widgets import Media, MEDIA_TYPES
class AutocompleteFilter(SimpleListFilter):
template = "dal_admin_filters/autocomplete-filter.html"
title = ''
field_name = ''
autocomplete_url = ''
is_placeholder_title = False
widget_attrs = {}
class Media:
css = {
'all': (
'autocomplete_light/vendor/select2/dist/css/select2.css',
'autocomplete_light/select2.css',
'dal_admin_filters/css/autocomplete-fix.css'
)
}
js = (
'autocomplete_light/jquery.init.js',
'autocomplete_light/autocomplete.init.js',
'autocomplete_light/vendor/select2/dist/js/select2.full.js',
'autocomplete_light/select2.js',
'dal_admin_filters/js/querystring.js',
)
def __init__(self, request, params, model, model_admin):
if self.parameter_name:
raise AttributeError(
'Rename attribute `parameter_name` to '
'`field_name` for {}'.format(self.__class__)
)
self.parameter_name = '{}__id__exact'.format(self.field_name)
super(AutocompleteFilter, self).__init__(request, params, model, model_admin)
self._add_media(model_admin)
field = forms.ModelChoiceField(
queryset=getattr(model, self.field_name).get_queryset(),
widget=autocomplete.ModelSelect2(
url=self.autocomplete_url,
)
)
attrs = self.widget_attrs.copy()
attrs['id'] = 'id-%s-dal-filter' % self.field_name
if self.is_placeholder_title:
attrs['data-placeholder'] = "By " + self.title
self.rendered_widget = field.widget.render(
name=self.parameter_name,
value=self.used_parameters.get(self.parameter_name, ''),
attrs=attrs
)
def _add_media(self, model_admin):
if not hasattr(model_admin, 'Media'):
raise ImproperlyConfigured('Add empty Media class to %s. Sorry about this bug.' % model_admin)
def _get_media(obj):
return Media(media=getattr(obj, 'Media', None))
media = _get_media(model_admin) + _get_media(AutocompleteFilter) + _get_media(self)
for name in MEDIA_TYPES:
setattr(model_admin.Media, name, getattr(media, "_" + name))
def has_output(self):
return True
def lookups(self, request, model_admin):
return ()
def queryset(self, request, queryset):
if self.value():
return queryset.filter(**{self.parameter_name: self.value()})
else:
return queryset
| Python | 0 |
76f5e98aec0024fb6d015004e1f3f26434a01fc2 | Update _version.py | core/_version.py | core/_version.py | """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.5.3"
| """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.5.2"
| Python | 0.000002 |
92febbffb91943f13cfac8c00e55103b20645b70 | Update [MediaContainer] children with the correct `section` object | plex/objects/library/container.py | plex/objects/library/container.py | from plex.objects.core.base import Property
from plex.objects.container import Container
from plex.objects.library.section import Section
class MediaContainer(Container):
section = Property(resolver=lambda: MediaContainer.construct_section)
title1 = Property
title2 = Property
identifier = Property
art = Property
thumb = Property
view_group = Property('viewGroup')
view_mode = Property('viewMode', int)
media_tag_prefix = Property('mediaTagPrefix')
media_tag_version = Property('mediaTagVersion')
no_cache = Property('nocache', bool)
allow_sync = Property('allowSync', bool)
mixed_parents = Property('mixedParents', bool)
@staticmethod
def construct_section(client, node):
attribute_map = {
'key': 'librarySectionID',
'uuid': 'librarySectionUUID',
'title': 'librarySectionTitle'
}
return Section.construct(client, node, attribute_map, child=True)
def __iter__(self):
for item in super(MediaContainer, self).__iter__():
item.section = self.section
yield item
| from plex.objects.core.base import Property
from plex.objects.container import Container
from plex.objects.library.section import Section
class MediaContainer(Container):
section = Property(resolver=lambda: MediaContainer.construct_section)
title1 = Property
title2 = Property
identifier = Property
art = Property
thumb = Property
view_group = Property('viewGroup')
view_mode = Property('viewMode', int)
media_tag_prefix = Property('mediaTagPrefix')
media_tag_version = Property('mediaTagVersion')
no_cache = Property('nocache', bool)
allow_sync = Property('allowSync', bool)
mixed_parents = Property('mixedParents', bool)
@staticmethod
def construct_section(client, node):
attribute_map = {
'key': 'librarySectionID',
'uuid': 'librarySectionUUID',
'title': 'librarySectionTitle'
}
return Section.construct(client, node, attribute_map, child=True)
| Python | 0 |
d4aa45b39eab5ce4b06d6343344afb05a0bf8582 | Fix pep8. | tryfer/tests/test_formatters.py | tryfer/tests/test_formatters.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from twisted.trial.unittest import TestCase
from tryfer import formatters
class TestFormatters(TestCase):
def test_ipv4_to_int(self):
""" Thrift expects ipv4 address to be a signed 32-bit integer.
Previously this function converted ip addresses to an unsigned 32-bit
int. struct.pack is strict about integer overflows for signed 32-bit
integers, so this function very much needs to produce a signed integer
to allow IP addresses in the upper half to work
"""
# ip that doesn't overflow in signed 32-bit
low_ip = '127.0.0.1'
# ip that does overflow in signed 32-bit
high_ip = '172.17.1.1'
low_ip_as_int = formatters.ipv4_to_int(low_ip)
high_ip_as_int = formatters.ipv4_to_int(high_ip)
# both parsed ips should be packable as signed 32-bit int
struct.pack('!i', low_ip_as_int)
struct.pack('!i', high_ip_as_int)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from twisted.trial.unittest import TestCase
from tryfer import formatters
class TestFormatters(TestCase):
def test_ipv4_to_int(self):
""" Thrift expects ipv4 address to be a signed 32-bit integer.
Previously this function converted ip addresses to an unsigned 32-bit
int. struct.pack is strict about integer overflows for signed 32-bit
integers, so this function very much needs to produce a signed integer
to allow IP addresses in the upper half to work
"""
# ip that doesn't overflow in signed 32-bit
low_ip = '127.0.0.1'
# ip that does overflow in signed 32-bit
high_ip = '172.17.1.1'
low_ip_as_int = formatters.ipv4_to_int(low_ip)
high_ip_as_int = formatters.ipv4_to_int(high_ip)
# both parsed ips should be packable as signed 32-bit int
struct.pack('!i', low_ip_as_int)
struct.pack('!i', high_ip_as_int)
| Python | 0 |
35293cecc99a629b3a185e69cf9ed3a339d9d1cf | Remove indentation level for easier review | automat/_introspection.py | automat/_introspection.py | """
Python introspection helpers.
"""
from types import CodeType as code, FunctionType as function
def copycode(template, changes):
if hasattr(code, "replace"):
return template.replace(**{"co_" + k : v for k, v in changes.items()})
names = [
"argcount", "nlocals", "stacksize", "flags", "code", "consts",
"names", "varnames", "filename", "name", "firstlineno", "lnotab",
"freevars", "cellvars"
]
if hasattr(code, "co_kwonlyargcount"):
names.insert(1, "kwonlyargcount")
if hasattr(code, "co_posonlyargcount"):
# PEP 570 added "positional only arguments"
names.insert(1, "posonlyargcount")
values = [
changes.get(name, getattr(template, "co_" + name))
for name in names
]
return code(*values)
def copyfunction(template, funcchanges, codechanges):
names = [
"globals", "name", "defaults", "closure",
]
values = [
funcchanges.get(name, getattr(template, "__" + name + "__"))
for name in names
]
return function(copycode(template.__code__, codechanges), *values)
def preserveName(f):
"""
Preserve the name of the given function on the decorated function.
"""
def decorator(decorated):
return copyfunction(decorated,
dict(name=f.__name__), dict(name=f.__name__))
return decorator
| """
Python introspection helpers.
"""
from types import CodeType as code, FunctionType as function
def copycode(template, changes):
if hasattr(code, "replace"):
return template.replace(**{"co_" + k : v for k, v in changes.items()})
else:
names = [
"argcount", "nlocals", "stacksize", "flags", "code", "consts",
"names", "varnames", "filename", "name", "firstlineno", "lnotab",
"freevars", "cellvars"
]
if hasattr(code, "co_kwonlyargcount"):
names.insert(1, "kwonlyargcount")
if hasattr(code, "co_posonlyargcount"):
# PEP 570 added "positional only arguments"
names.insert(1, "posonlyargcount")
values = [
changes.get(name, getattr(template, "co_" + name))
for name in names
]
return code(*values)
def copyfunction(template, funcchanges, codechanges):
names = [
"globals", "name", "defaults", "closure",
]
values = [
funcchanges.get(name, getattr(template, "__" + name + "__"))
for name in names
]
return function(copycode(template.__code__, codechanges), *values)
def preserveName(f):
"""
Preserve the name of the given function on the decorated function.
"""
def decorator(decorated):
return copyfunction(decorated,
dict(name=f.__name__), dict(name=f.__name__))
return decorator
| Python | 0.000004 |
9a1eb2dbe37c13c82477ed5787eeb985994cac8f | add Python2 shebang to helper.py | scripts/helper.py | scripts/helper.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
scripts.init_webhook
~~~~~~~~~~~~~~~~~~~~
A simple script to manage the webhook.
:copyright: (c) 2016 by Lujeni.
:license: BSD, see LICENSE for more details.
"""
import argparse
import sys
from trello import TrelloClient
from slugify import slugify
from matterllo.utils import config
from matterllo.utils import logger
SETTINGS = config()
LOGGING = logger()
def main():
try:
parser = argparse.ArgumentParser(description="Webhook helpers")
parser.add_argument('--cleanup', dest='cleanup', action='store_true', help='delete webhook from your SETTINGS.')
parser.add_argument('--update', dest='update', action='store_true', help='upsert webhook from your SETTINGS.')
parser.add_argument('--init', dest='init', action='store_true', help='delete and create webhook from your SETTINGS.')
args = parser.parse_args()
if not args.cleanup and not args.update and not args.init:
print parser.print_help()
sys.exit(0)
client = TrelloClient(api_key=SETTINGS['trello_api_key'], token=SETTINGS['trello_api_token'])
trello_boards = client.list_boards()
boards_name = [slugify(b['name']) for b in SETTINGS.get('boards', {}).values()]
# cleanup part
if args.cleanup or args.init:
result = [h.delete() for h in client.list_hooks()]
LOGGING.info('delete {} webhook'.format(len(result)))
# update / init part
if args.update or args.init:
for board in trello_boards:
board_name = slugify(board.name)
if board_name not in boards_name:
continue
LOGGING.info('try to create webhook board :: {}'.format(board_name))
url = SETTINGS['callback_url'] + '/trelloCallbacks/'
result = client.create_hook(url, board.id)
LOGGING.info('create webhook board :: {} :: {}'.format(board_name, result))
except Exception as e:
LOGGING.error('unable init webhook :: {}'.format(e))
sys.exit(1)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""
scripts.init_webhook
~~~~~~~~~~~~~~~~~~~~
A simple script to manage the webhook.
:copyright: (c) 2016 by Lujeni.
:license: BSD, see LICENSE for more details.
"""
import argparse
import sys
from trello import TrelloClient
from slugify import slugify
from matterllo.utils import config
from matterllo.utils import logger
SETTINGS = config()
LOGGING = logger()
def main():
try:
parser = argparse.ArgumentParser(description="Webhook helpers")
parser.add_argument('--cleanup', dest='cleanup', action='store_true', help='delete webhook from your SETTINGS.')
parser.add_argument('--update', dest='update', action='store_true', help='upsert webhook from your SETTINGS.')
parser.add_argument('--init', dest='init', action='store_true', help='delete and create webhook from your SETTINGS.')
args = parser.parse_args()
if not args.cleanup and not args.update and not args.init:
print parser.print_help()
sys.exit(0)
client = TrelloClient(api_key=SETTINGS['trello_api_key'], token=SETTINGS['trello_api_token'])
trello_boards = client.list_boards()
boards_name = [slugify(b['name']) for b in SETTINGS.get('boards', {}).values()]
# cleanup part
if args.cleanup or args.init:
result = [h.delete() for h in client.list_hooks()]
LOGGING.info('delete {} webhook'.format(len(result)))
# update / init part
if args.update or args.init:
for board in trello_boards:
board_name = slugify(board.name)
if board_name not in boards_name:
continue
LOGGING.info('try to create webhook board :: {}'.format(board_name))
url = SETTINGS['callback_url'] + '/trelloCallbacks/'
result = client.create_hook(url, board.id)
LOGGING.info('create webhook board :: {} :: {}'.format(board_name, result))
except Exception as e:
LOGGING.error('unable init webhook :: {}'.format(e))
sys.exit(1)
if __name__ == '__main__':
main()
| Python | 0.000002 |
35ee18926743b6ab0356ef278da9cb14a3263246 | Print field in output | jjvm.py | jjvm.py | #!/usr/bin/python
import argparse
import os
import struct
import sys
CP_STRUCT_SIZES = { 7:3, 10:5 }
###############
### CLASSES ###
###############
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
###################
### SUBROUTINES ###
###################
def lenCpStruct(tag):
if tag in CP_STRUCT_SIZES:
return CP_STRUCT_SIZES[tag]
else:
return -1
############
### MAIN ###
############
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
c.seek(8)
cpCount = struct.unpack(">H", c.read(2))[0] - 1
cpIndex = 1
print "Constant pool count: %d" % cpCount;
while cpIndex <= cpCount:
cpTag = ord(c.read(1))
print "Field %d: %d" % (cpIndex, cpTag)
cpStructSize = lenCpStruct(cpTag)
if cpStructSize < 0:
print "ERROR: cpStructSize %d for tag %d" % (cpStructSize, cpTag)
sys.exit(1)
# print "Size: %d" % cpStructSize
cpIndex += 1
c.seek(cpStructSize - 1, os.SEEK_CUR)
| #!/usr/bin/python
import argparse
import os
import struct
import sys
CP_STRUCT_SIZES = { 7:3, 10:5 }
###############
### CLASSES ###
###############
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
###################
### SUBROUTINES ###
###################
def lenCpStruct(tag):
if tag in CP_STRUCT_SIZES:
return CP_STRUCT_SIZES[tag]
else:
return -1
############
### MAIN ###
############
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
c.seek(8)
cpCount = struct.unpack(">H", c.read(2))[0] - 1
print "Constant pool count: %d" % cpCount;
while cpCount >= 0:
cpTag = ord(c.read(1))
print "Got tag: %d" % cpTag
cpStructSize = lenCpStruct(cpTag)
if cpStructSize < 0:
print "ERROR: cpStructSize %d for tag %d" % (cpStructSize, cpTag)
sys.exit(1)
print "Size: %d" % cpStructSize
cpCount -= 1
c.seek(cpStructSize - 1, os.SEEK_CUR)
| Python | 0.000279 |
b7cdab4dea63b91bbc4840ec4f0f147ac9fce7b0 | Make tests for EvapReadFile | tests/test_evap_read_file.py | tests/test_evap_read_file.py | #!/usr/bin/env python
# Nosetests for the TopoFlow EvapReadFile component.
import os
from nose.tools import assert_is_not_none, assert_equals
from cmt.components import EvapReadFile as Component
from . import example_dir
cfg_file = os.path.join(example_dir, 'June_20_67_evap_read_file.cfg')
var_name = 'land_surface_water__evaporation_volume_flux'
def setup_module():
global component
component = Component()
def teardown_module():
pass
# The file June_20_67_2D-ETrate-in.nc is missing; I had to generate it
# with the EvapEnergyBalance component.
def test_irf():
component.initialize(cfg_file)
component.update(1.0)
component.finalize()
def test_get_component_name():
x = component.get_component_name()
assert_equals(x, 'TopoFlow_Evaporation_Read_File')
def test_get_start_time():
x = component.get_start_time()
assert_equals(x, 0.0)
def test_get_end_time():
x = component.get_end_time()
assert_equals(x, 36000.0)
def test_get_var_type():
x = component.get_var_type(var_name)
assert_equals(x, 'float64')
def test_get_var_units():
x = component.get_var_units(var_name)
assert_equals(x, 'm s-1')
def test_get_var_itemsize():
x = component.get_var_itemsize(var_name)
assert_equals(x, 8)
# The get_var_nbytes method isn't implemented in TopoFlow.
# def test_get_var_nbytes():
# x = component.get_var_nbytes(var_name)
def test_get_value():
x = component.get_value(var_name)
assert_is_not_none(x)
def test_get_var_grid():
x = component.get_var_grid(var_name)
assert_equals(x, 0)
def test_get_grid_type():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_type(grid_id)
assert_equals(x, 'uniform')
def test_get_grid_rank():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_rank(grid_id)
assert_equals(x, 2)
def test_get_grid_shape():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_shape(grid_id)
assert_equals(x[0], 44)
assert_equals(x[1], 29)
def test_get_grid_size():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_size(grid_id)
assert_equals(x, 44*29)
def test_get_grid_spacing():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_spacing(grid_id)
assert_equals(x[0], 30.0)
assert_equals(x[1], 30.0)
def test_get_grid_origin():
grid_id = component.get_var_grid(var_name)
x = component.get_grid_origin(grid_id)
assert_equals(x[0], 4560090.42)
assert_equals(x[1], 277850.358)
| #!/usr/bin/env python
import os
from cmt.components import EvapReadFile as Component
from . import example_dir
cfg_file = os.path.join(example_dir, 'June_20_67_evap_read_file.cfg')
# Fails because June_20_67_2D-ETrate-in.nc is missing
def test_irf():
component = Component()
component.initialize(cfg_file)
component.update(1.0)
component.finalize()
| Python | 0.000001 |
78e4e0294673e71b552e2b793a2188e3f419206e | Update emon_AZURE_HA.py | azure/ha/emon_AZURE_HA.py | azure/ha/emon_AZURE_HA.py | #!/usr/bin/python
# F5 Networks - External Monitor: Azure HA
# https://github.com/ArtiomL/f5networks
# Artiom Lichtenstein
# v0.4, 04/08/2016
import json
import os
import requests
from signal import SIGKILL
from subprocess import call
import sys
# Log level to /var/log/ltm
intLogLevel = 2
strLogID = '[-v0.4.160804-] emon_AZURE_HA.py - '
# Azure RM Auth
strSubs = '<Subscription ID>'
strTenantID = '<TenantID>'
strAppID = '<App ID>'
strPass = '<Password>'
strTokenEP = 'https://login.microsoftonline.com/%s/oauth2/token' % strTenantID
strMgmtURI = 'https://management.azure.com/'
strBearer = ''
# Logger command
strLogger = 'logger -p local0.error '
class clsExCodes:
intArgs = 8
intArmAuth = 4
def funLog(intMesLevel, strMessage):
if intLogLevel >= intMesLevel:
lstCmd = strLogger.split(' ')
lstCmd.append(strLogID + strMessage)
call(lstCmd)
def funARMAuth():
objPayload = { 'grant_type': 'client_credentials', 'client_id': strAppID, 'client_secret': strPass, 'resource': strMgmtURI }
try:
objAuthResp = requests.post(url=strTokenEP, data=objPayload)
dicAJSON = json.loads(objAuthResp.content)
if 'access_token' in dicAJSON.keys():
return dicAJSON['access_token']
except requests.exceptions.RequestException as e:
funLog(2, str(e))
return 'BearERROR'
def funCurState():
funLog(1, 'Current local state: ')
def funFailover():
funLog(1, 'Azure failover...')
def main():
if len(sys.argv) < 3:
funLog(1, 'Not enough arguments!')
sys.exit(clsExCodes.intArgs)
# Remove IPv6/IPv4 compatibility prefix (LTM passes addresses in IPv6 format)
strIP = sys.argv[1].strip(':f')
strPort = sys.argv[2]
# PID file
strPFile = '_'.join(['/var/run/', os.path.basename(sys.argv[0]), strIP, strPort + '.pid'])
# PID
strPID = str(os.getpid())
funLog(2, strPFile + ' ' + strPID)
# Kill the last instance of this monitor if hung
if os.path.isfile(strPFile):
try:
os.kill(int(file(strPFile, 'r').read()), SIGKILL)
funLog(1, 'Killed the last hung instance of this monitor.')
except OSError:
pass
# Record current PID
file(strPFile, 'w').write(str(os.getpid()))
# Health Monitor
try:
objResp = requests.head(''.join(['https://', strIP, ':', strPort]), verify = False)
if objResp.status_code == 200:
os.unlink(strPFile)
# Any standard output stops the script from running. Clean up any temporary files before the standard output operation
funLog(2, 'Peer: ' + strIP + ' is up.' )
print 'UP'
sys.exit()
except requests.exceptions.RequestException as e:
funLog(2, str(e))
# Peer down, ARM action needed
global strBearer
strBearer = funARMAuth()
funLog(2, 'ARM Bearer: ' + strBearer)
if strBearer == 'BearERROR':
funLog(1, 'ARM Auth Error!')
sys.exit(clsExCodes.intArmAuth)
funCurState()
funFailover()
os.unlink(strPFile)
sys.exit(1)
if __name__ == '__main__':
main()
| #!/usr/bin/python
# F5 Networks - External Monitor: Azure HA
# https://github.com/ArtiomL/f5networks
# Artiom Lichtenstein
# v0.4, 04/08/2016
import json
import os
import requests
from signal import SIGKILL
from subprocess import call
import sys
# Log level to /var/log/ltm
intLogLevel = 2
strLogID = '[-v0.4.160804-] emon_AZURE_HA.py - '
# Azure RM Auth
strSubs = ''
strTenantID = ''
strAppID = ''
strPass = ''
strTokenEP = 'https://login.microsoftonline.com/%s/oauth2/token' % strTenantID
strMgmtURI = 'https://management.azure.com/'
strBearer = ''
# Logger command
strLogger = 'logger -p local0.error '
class clsExCodes:
intArgs = 8
intArmAuth = 4
def funLog(intMesLevel, strMessage):
if intLogLevel >= intMesLevel:
lstCmd = strLogger.split(' ')
lstCmd.append(strLogID + strMessage)
call(lstCmd)
def funARMAuth():
objPayload = { 'grant_type': 'client_credentials', 'client_id': strAppID, 'client_secret': strPass, 'resource': strMgmtURI }
try:
objAuthResp = requests.post(url=strTokenEP, data=objPayload)
dicAJSON = json.loads(objAuthResp.content)
if 'access_token' in dicAJSON.keys():
return dicAJSON['access_token']
except requests.exceptions.RequestException as e:
funLog(2, str(e))
return 'BearERROR'
def funCurState():
funLog(1, 'Current local state: ')
def funFailover():
funLog(1, 'Azure failover...')
def main():
if len(sys.argv) < 3:
funLog(1, 'Not enough arguments!')
sys.exit(clsExCodes.intArgs)
# Remove IPv6/IPv4 compatibility prefix (LTM passes addresses in IPv6 format)
strIP = sys.argv[1].strip(':f')
strPort = sys.argv[2]
# PID file
strPFile = '_'.join(['/var/run/', os.path.basename(sys.argv[0]), strIP, strPort + '.pid'])
# PID
strPID = str(os.getpid())
funLog(2, strPFile + ' ' + strPID)
# Kill the last instance of this monitor if hung
if os.path.isfile(strPFile):
try:
os.kill(int(file(strPFile, 'r').read()), SIGKILL)
funLog(1, 'Killed the last hung instance of this monitor.')
except OSError:
pass
# Record current PID
file(strPFile, 'w').write(str(os.getpid()))
# Health Monitor
try:
objResp = requests.head(''.join(['https://', strIP, ':', strPort]), verify = False)
if objResp.status_code == 200:
os.unlink(strPFile)
# Any standard output stops the script from running. Clean up any temporary files before the standard output operation
funLog(2, 'Peer: ' + strIP + ' is up.' )
print 'UP'
sys.exit()
except requests.exceptions.RequestException as e:
funLog(2, str(e))
# Peer down, ARM action needed
global strBearer
strBearer = funARMAuth()
funLog(2, 'ARM Bearer: ' + strBearer)
if strBearer == 'BearERROR':
funLog(1, 'ARM Auth Error!')
sys.exit(clsExCodes.intArmAuth)
funCurState()
funFailover()
os.unlink(strPFile)
sys.exit(1)
if __name__ == '__main__':
main()
| Python | 0.000001 |
43f502122ae617bae1c63d44692d590fbb5d53ab | fix weights | scripts/tester.py | scripts/tester.py | #!/usr/bin/env python
import itertools
import platform
import subprocess
import sys
import math
def normalize(x):
denom = sum(x)
return [e/denom for e in x]
def scale(x, a):
return [e * a for e in x]
def argcmp(x, comp, predicate):
idx = None
val = None
for i in xrange(len(x)):
if not predicate(x[i]):
continue
if idx is None or comp(x[i], val):
idx = i
val = x[i]
if idx is None:
# couldn't find it
raise Exception("no argmin satisfiying predicate")
return idx
def argmin(x, predicate):
return argcmp(x, lambda a, b: a < b, predicate)
def argmax(x, predicate):
return argcmp(x, lambda a, b: a > b, predicate)
def allocate(nworkers, weights):
approx = map(int, map(math.ceil, scale(weights, nworkers)))
diff = sum(approx) - nworkers
if diff > 0:
while diff > 0:
i = argmin(approx, predicate=lambda x: x > 0)
approx[i] -= 1
diff -= 1
elif diff < 0:
i = argmax(approx, lambda x: True)
approx[i] += -diff
acc = 0
ret = []
for x in approx:
ret.append(range(acc, acc + x))
acc += x
return ret
def run(cmd):
print >>sys.stderr, '[INFO] running command %s' % str(cmd)
p = subprocess.Popen(cmd, stdin=open('/dev/null', 'r'), stdout=subprocess.PIPE)
r = p.stdout.read()
p.wait()
return r
if __name__ == '__main__':
NCORES = [1, 2, 4, 8, 16, 24, 32]
WSET = [18]
node = platform.node()
if node == 'modis2':
LOGGERS = [
('data.log', 1.),
('/data/scidb/001/2/stephentu/data.log', 1.),
('/data/scidb/001/3/stephentu/data.log', 1.),
]
elif node == 'istc3':
LOGGERS = [
('data.log', 1./3.),
('/f0/stephentu/data.log', 2./3.),
]
else:
print "unknown node", node
assert False, "Unknown node!"
weights = normalize([x[1] for x in LOGGERS])
logfile_cmds = list(itertools.chain.from_iterable([['--logfile', f] for f, _ in LOGGERS]))
for ncores, ws in itertools.product(NCORES, WSET):
allocations = allocate(ncores, weights)
alloc_cmds = list(
itertools.chain.from_iterable([['--assignment', ','.join(map(str, alloc))] for alloc in allocations]))
cmd = ['./persist_test'] + \
logfile_cmds + \
alloc_cmds + \
['--num-threads', str(ncores),
'--strategy', 'epoch',
'--writeset', str(ws),
'--valuesize', '32']
output = run(cmd)
print output
| #!/usr/bin/env python
import itertools
import platform
import subprocess
import sys
import math
def normalize(x):
denom = sum(x)
return [e/denom for e in x]
def scale(x, a):
return [e * a for e in x]
def argcmp(x, comp, predicate):
idx = None
val = None
for i in xrange(len(x)):
if not predicate(x[i]):
continue
if idx is None or comp(x[i], val):
idx = i
val = x[i]
if idx is None:
# couldn't find it
raise Exception("no argmin satisfiying predicate")
return idx
def argmin(x, predicate):
return argcmp(x, lambda a, b: a < b, predicate)
def argmax(x, predicate):
return argcmp(x, lambda a, b: a > b, predicate)
def allocate(nworkers, weights):
approx = map(int, map(math.ceil, scale(weights, nworkers)))
diff = sum(approx) - nworkers
if diff > 0:
while diff > 0:
i = argmin(approx, predicate=lambda x: x > 0)
approx[i] -= 1
diff -= 1
elif diff < 0:
i = argmax(approx, lambda x: True)
approx[i] += -diff
acc = 0
ret = []
for x in approx:
ret.append(range(acc, acc + x))
acc += x
return ret
def run(cmd):
print >>sys.stderr, '[INFO] running command %s' % str(cmd)
p = subprocess.Popen(cmd, stdin=open('/dev/null', 'r'), stdout=subprocess.PIPE)
r = p.stdout.read()
p.wait()
return r
if __name__ == '__main__':
NCORES = [1, 2, 4, 8, 16, 24, 32]
WSET = [18]
node = platform.node()
if node == 'modis2':
LOGGERS = [
('data.log', 1.),
('/data/scidb/001/2/stephentu/data.log', 1.),
('/data/scidb/001/3/stephentu/data.log', 1.),
]
elif node == 'istc3':
LOGGERS = [
('data.log', 2./3.),
('/f0/stephentu/data.log', 1.),
]
else:
print "unknown node", node
assert False, "Unknown node!"
weights = normalize([x[1] for x in LOGGERS])
logfile_cmds = list(itertools.chain.from_iterable([['--logfile', f] for f, _ in LOGGERS]))
for ncores, ws in itertools.product(NCORES, WSET):
allocations = allocate(ncores, weights)
alloc_cmds = list(
itertools.chain.from_iterable([['--assignment', ','.join(map(str, alloc))] for alloc in allocations]))
cmd = ['./persist_test'] + \
logfile_cmds + \
alloc_cmds + \
['--num-threads', str(ncores),
'--strategy', 'epoch',
'--writeset', str(ws),
'--valuesize', '32']
output = run(cmd)
print output
| Python | 0.000006 |
2c6ccdacc2c4e54cf0a12618d60c963d9c67ef62 | Fix for DjangoCMS 3.5: get_cms_setting | djangocms_page_sitemap/settings.py | djangocms_page_sitemap/settings.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST = {
'always': _('always'),
'hourly': _('hourly'),
'daily': _('daily'),
'weekly': _('weekly'),
'monthly': _('monthly'),
'yearly': _('yearly'),
'never': _('never'),
}
PAGE_SITEMAP_CHANGEFREQ_LIST = getattr(
settings, 'PAGE_SITEMAP_CHANGEFREQ_LIST', PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST
)
PAGE_SITEMAP_DEFAULT_CHANGEFREQ = getattr(
settings, 'PAGE_SITEMAP_DEFAULT_CHANGEFREQ', CMSSitemap.changefreq
)
PAGE_SITEMAP_CACHE_DURATION = get_cms_setting('CACHE_DURATIONS')['menus']
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from cms.utils import get_cms_setting
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST = {
'always': _('always'),
'hourly': _('hourly'),
'daily': _('daily'),
'weekly': _('weekly'),
'monthly': _('monthly'),
'yearly': _('yearly'),
'never': _('never'),
}
PAGE_SITEMAP_CHANGEFREQ_LIST = getattr(
settings, 'PAGE_SITEMAP_CHANGEFREQ_LIST', PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST
)
PAGE_SITEMAP_DEFAULT_CHANGEFREQ = getattr(
settings, 'PAGE_SITEMAP_DEFAULT_CHANGEFREQ', CMSSitemap.changefreq
)
PAGE_SITEMAP_CACHE_DURATION = get_cms_setting('CACHE_DURATIONS')['menus']
| Python | 0.00069 |
f603e8b394ea2b3ed9329b6948119970eb6aaa46 | add test for transition | lbworkflow/tests/test_transition.py | lbworkflow/tests/test_transition.py | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from lbworkflow.core.transition import TransitionExecutor
from lbworkflow.views.helper import user_wf_info_as_dict
from .test_base import BaseTests
from .leave.models import Leave
User = get_user_model()
class TransitionExecutorTests(BaseTests):
def test_submit(self):
leave = self.leave
instance = self.leave.pinstance
leave.submit_process()
# A1 will auto agree
self.assertEqual(leave.pinstance.cur_activity.name, 'A2')
self.assertEqual(leave.pinstance.get_operators_display(), 'tom')
# A3 not auto agree
workitem = instance.get_todo_workitem()
transition = instance.get_agree_transition()
TransitionExecutor(self.users['tom'], instance, workitem, transition).execute()
self.assertEqual(leave.pinstance.cur_activity.name, 'A3')
class ViewTests(BaseTests):
def setUp(self):
super(ViewTests, self).setUp()
self.leave.submit_process()
leave = self.leave
ctx = user_wf_info_as_dict(leave, self.users['tom'])
transitions = ctx['transitions']
transition = transitions[0]
self.transition_url = transition.get_app_url(ctx['workitem'])
self.workitem = ctx['workitem']
self.client.login(username='tom', password='password')
def test_execute_transition(self):
resp = self.client.get(self.transition_url)
self.assertEqual(resp.status_code, 200)
def test_execute_transition(self):
resp = self.client.post(self.transition_url)
self.assertRedirects(resp, '/wf/todo/')
leave = Leave.objects.get(pk=self.leave.pk)
self.assertEqual('A3', leave.pinstance.cur_activity.name)
def test_simple_agree(self):
url = reverse('wf_agree')
resp = self.client.post('%s?wi_id=%s' % (url, self.workitem.pk))
self.assertRedirects(resp, '/wf/todo/')
leave = Leave.objects.get(pk=self.leave.pk)
self.assertEqual('A3', leave.pinstance.cur_activity.name)
| from django.contrib.auth import get_user_model
from lbworkflow.core.transition import TransitionExecutor
from lbworkflow.views.helper import user_wf_info_as_dict
from .test_base import BaseTests
User = get_user_model()
class TransitionExecutorTests(BaseTests):
def test_submit(self):
leave = self.leave
instance = self.leave.pinstance
leave.submit_process()
# A1 will auto agree
self.assertEqual(leave.pinstance.cur_activity.name, 'A2')
self.assertEqual(leave.pinstance.get_operators_display(), 'tom')
# A3 not auto agree
workitem = instance.get_todo_workitem()
transition = instance.get_agree_transition()
TransitionExecutor(self.users['tom'], instance, workitem, transition).execute()
self.assertEqual(leave.pinstance.cur_activity.name, 'A3')
class ViewTests(BaseTests):
def setUp(self):
super(ViewTests, self).setUp()
self.leave.submit_process()
def test_execute_transition(self):
self.client.login(username='tom', password='password')
leave = self.leave
ctx = user_wf_info_as_dict(leave, self.users['tom'])
transitions = ctx['transitions']
transition = transitions[0]
url = transition.get_app_url(ctx['workitem'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
| Python | 0 |
51d8d354f1a75b83becad880eec7cbac86d52e74 | Convert test to pytest syntax | tests/test_generate_files.py | tests/test_generate_files.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
TestGenerateFiles.test_generate_files
"""
from __future__ import unicode_literals
import os
import io
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
from cookiecutter import utils
@pytest.fixture(scope="function")
def clean_system_remove_additional_folders(request, clean_system):
"""
Use the global clean_system fixture and run additional teardown code to
remove some special folders.
For a better understanding - order of fixture calls:
clean_system setup code
clean_system_remove_additional_folders setup code
clean_system_remove_additional_folders teardown code
clean_system teardown code
"""
def remove_additional_folders():
if os.path.exists('inputpizzä'):
utils.rmtree('inputpizzä')
if os.path.exists('inputgreen'):
utils.rmtree('inputgreen')
if os.path.exists('inputbinary_files'):
utils.rmtree('inputbinary_files')
if os.path.exists('tests/custom_output_dir'):
utils.rmtree('tests/custom_output_dir')
if os.path.exists('inputpermissions'):
utils.rmtree('inputpermissions')
request.addfinalizer(remove_additional_folders)
@pytest.mark.usefixtures("clean_system_remove_additional_folders")
def test_generate_files_nontemplated_exception():
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.generate_files(
context={
'cookiecutter': {'food': 'pizza'}
},
repo_dir='tests/test-generate-files-nontemplated'
)
@pytest.mark.usefixtures("clean_system_remove_additional_folders")
def test_generate_files():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
simple_file = 'inputpizzä/simple.txt'
assert os.path.isfile(simple_file)
simple_text = io.open(simple_file, 'rt', encoding='utf-8').read()
assert simple_text == u'I eat pizzä'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
TestGenerateFiles.test_generate_files
"""
from __future__ import unicode_literals
import os
import io
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
from cookiecutter import utils
@pytest.fixture(scope="function")
def clean_system_remove_additional_folders(request, clean_system):
"""
Use the global clean_system fixture and run additional teardown code to
remove some special folders.
For a better understanding - order of fixture calls:
clean_system setup code
clean_system_remove_additional_folders setup code
clean_system_remove_additional_folders teardown code
clean_system teardown code
"""
def remove_additional_folders():
if os.path.exists('inputpizzä'):
utils.rmtree('inputpizzä')
if os.path.exists('inputgreen'):
utils.rmtree('inputgreen')
if os.path.exists('inputbinary_files'):
utils.rmtree('inputbinary_files')
if os.path.exists('tests/custom_output_dir'):
utils.rmtree('tests/custom_output_dir')
if os.path.exists('inputpermissions'):
utils.rmtree('inputpermissions')
request.addfinalizer(remove_additional_folders)
@pytest.mark.usefixtures("clean_system_remove_additional_folders")
def test_generate_files_nontemplated_exception():
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.generate_files(
context={'cookiecutter': {'food': 'pizza'}},
repo_dir='tests/test-generate-files-nontemplated'
)
def test_generate_files(self):
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
self.assertTrue(os.path.isfile('inputpizzä/simple.txt'))
simple_text = io.open('inputpizzä/simple.txt', 'rt', encoding='utf-8').read()
self.assertEqual(simple_text, u'I eat pizzä')
| Python | 0.999999 |
c4963df740e82d476500d2d998b288d0213806ee | Allow searching in the authorization code admin. | src/commoner/promocodes/admin.py | src/commoner/promocodes/admin.py | from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import HiddenInput
from commoner.promocodes.models import PromoCode
class PromoCodeAdminForm(forms.ModelForm):
code = forms.CharField(initial='', widget=HiddenInput())
send_email = forms.BooleanField(label=_(u'Send invitation letter?'), required=False)
def __init__(self, *args, **kwargs):
# if not done here, unique_code_string is only loaded when admin is bootstrapped
if 'instance' not in kwargs:
kwargs['initial'] = {'code': PromoCode.objects.unique_code_string()}
super(PromoCodeAdminForm, self).__init__(*args, **kwargs)
def save(self, force_insert=False, force_update=False, commit=True):
code = super(PromoCodeAdminForm, self).save(commit)
if self.cleaned_data['send_email']:
PromoCode.objects.send_invite_letter(code)
return code
class Meta:
model = PromoCode
class PromoCodeAdmin(admin.ModelAdmin):
form = PromoCodeAdminForm
list_display = ('recipient', 'code', 'created', 'used')
fields = ('code', 'recipient', 'expires', 'transaction_id', 'contribution_id', 'send_email',)
ordering = ('-created',)
search_fields = ('recipient', 'transaction_id', 'contribution_id',)
# get the pretty admin boolean icons, still no filter abilities
def used(self, object):
return object.used
used.short_description = _(u'Redeemed code')
used.boolean = True
admin.site.register(PromoCode, PromoCodeAdmin)
| from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms.widgets import HiddenInput
from commoner.promocodes.models import PromoCode
class PromoCodeAdminForm(forms.ModelForm):
code = forms.CharField(initial='', widget=HiddenInput())
send_email = forms.BooleanField(label=_(u'Send invitation letter?'), required=False)
def __init__(self, *args, **kwargs):
# if not done here, unique_code_string is only loaded when admin is bootstrapped
if 'instance' not in kwargs:
kwargs['initial'] = {'code': PromoCode.objects.unique_code_string()}
super(PromoCodeAdminForm, self).__init__(*args, **kwargs)
def save(self, force_insert=False, force_update=False, commit=True):
code = super(PromoCodeAdminForm, self).save(commit)
if self.cleaned_data['send_email']:
PromoCode.objects.send_invite_letter(code)
return code
class Meta:
model = PromoCode
class PromoCodeAdmin(admin.ModelAdmin):
form = PromoCodeAdminForm
list_display = ('recipient', 'code', 'created', 'used')
fields = ('code', 'recipient', 'expires', 'transaction_id', 'contribution_id', 'send_email',)
ordering = ('-created',)
# get the pretty admin boolean icons, still no filter abilities
def used(self, object):
return object.used
used.short_description = _(u'Redeemed code')
used.boolean = True
admin.site.register(PromoCode, PromoCodeAdmin)
| Python | 0 |
1037524e5dbeb4482beb41b2d951690c77ce2316 | Fix invalid syntax | lowfat/management/commands/load2018applications.py | lowfat/management/commands/load2018applications.py | import pandas as pd
from django.contrib.auth.models import User, BaseUserManager
from django.core.management.base import BaseCommand
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2018 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2018.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
received_offer = line['Fellow'] == 'Yes'
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2017,
"fellow": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"department": line["Department"] if pd.notnull(line["Department"]) else "",
"group": line["Group within Department (if any)"] if pd.notnull(line["Group within Department (if any)"]) else "",
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"],
"funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "",
"website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "",
"orcid": line["ORCID"] if pd.notnull(line["ORCID"]) else "",
"google_scholar": line["Google Scholar"] if pd.notnull(line["Google Scholar"]) else "",
"github": line["GitHub"] if pd.notnull(line["GitHub"]) else "",
"gitlab": line["GitLab"] if pd.notnull(line["GitLab"]) else "",
"twitter": line["Twitter handle"] if pd.notnull(line["Twitter handle"]) else "",
"is_into_training": line["Have training in plans - added by AN"] == "Yes",
"carpentries_instructor": line["Carpentry instructor - added by AN"] == "Yes",
"research_software_engineer": line["RSE - added by AN"] == "Yes",
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = User.objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| import pandas as pd
from django.contrib.auth.models import User, BaseUserManager
from django.core.management.base import BaseCommand
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2018 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2018.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
received_offer = line['Fellow'] == 'Yes'
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2017,
"fellow": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"department": line["Department"] if pd.notnull(line["Department"]) else "",
"group": line["Group within Department (if any)"] if pd.notnull(line["Group within Department (if any)"]) else "",
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"],
"funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "",
"website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "",
"orcid": line["ORCID"] if pd.notnull(line["ORCID"]) else "",
"google_scholar": line["Google Scholar"] if pd.notnull(line["Google Scholar"]) else "",
"github": line["GitHub"] if pd.notnull(line["GitHub"]) else "",
"gitlab": line["GitLab"] if pd.notnull(line["GitLab"]) else "",
"twitter": line["Twitter handle"] if pd.notnull(line["Twitter handle"]) else "",
"is_into_training": line["Have training in plans - added by AN"] == "Yes",
"carpentries_instructor": line["Carpentry instructor - added by AN"] == "Yes",
"research_software_engineer": if line["RSE - added by AN"] == "Yes",
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = User.objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| Python | 0.999586 |
07764dba867b7da57e4134237aeaf65429b8a0ef | Fix problem with nan for social ID | lowfat/management/commands/load2018applications.py | lowfat/management/commands/load2018applications.py | import pandas as pd
from django.contrib.auth.models import User, BaseUserManager
from django.core.management.base import BaseCommand
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2018 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2018.csv')
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
received_offer = True if line['Fellow'] == 'Yes' else False
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2017,
"selected": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"department": line["Department"],
"group": line["Group within Department (if any)"],
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"],
"funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "",
"website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "",
"orcid": line["ORCID"] if pd.notnull(line["ORCID"]) else "",
"google_scholar": line["Google Scholar"] if pd.notnull(line["Google Scholar"]) else "",
"github": line["GitHub"] if pd.notnull(line["GitHub"]) else "",
"gitlab": line["GitLab"] if pd.notnull(line["GitLab"]) else "",
"twitter": line["Twitter handle"] if pd.notnull(line["Twitter handle"]) else "",
"is_into_training": True if line["Have training in plans - added by AN"] == "Yes" else False,
"carpentries_instructor": True if line["Carpentry instructor - added by AN"] == "Yes" else False,
"research_software_engineer": True if line["RSE - added by AN"] == "Yes" else False,
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = User.objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| import pandas as pd
from django.contrib.auth.models import User, BaseUserManager
from django.core.management.base import BaseCommand
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2018 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2018.csv')
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
received_offer = True if line['Fellow'] == 'Yes' else False
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2017,
"selected": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"department": line["Department"],
"group": line["Group within Department (if any)"],
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"],
"funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "",
"website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "",
"orcid": line["ORCID"],
"google_scholar": line["Google Scholar"],
"github": line["GitHub"],
"gitlab": line["GitLab"],
"twitter": line["Twitter handle"],
"is_into_training": True if line["Have training in plans - added by AN"] == "Yes" else False,
"carpentries_instructor": True if line["Carpentry instructor - added by AN"] == "Yes" else False,
"research_software_engineer": True if line["RSE - added by AN"] == "Yes" else False,
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = User.objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| Python | 0.00012 |
442f6c9eae5c64c3438f89c2968b0343c1f4ed6e | Revise script docstring | alg_find_peak_1D.py | alg_find_peak_1D.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
"""Find a peak in 1D array.
Support a is an array of length n.
If a is an array of length 1, a[0] is a peak.
In general, a[k] is a peak iff a[k] >= a[k - 1] and a[k] >= a[k + 1].
If a[0] >= a[1], then a[0] is a peak.
If a[n - 1] >= a[n - 2], then a[n - 1] is a peak.
"""
def find_peak_naive(arr):
"""Find peak by naive iteration.
Time complexity: O(n).
"""
for i in range(len(arr)):
if i == 0:
if arr[i] >= arr[i + 1]:
return arr[i]
elif i == (len(arr) - 1):
if arr[i] >= arr[i - 1]:
return arr[i]
else:
if arr[i] >= arr[i - 1] and arr[i] >= arr[i + 1]:
return arr[i]
def find_peak(arr):
"""Find peak by divide-end-conquer algorithm.
Time complexity: O(logn).
"""
if len(arr) == 1:
return arr[0]
else:
mid = len(arr) // 2
if arr[mid] <= arr[mid - 1]:
return find_peak(arr[:mid-1])
elif arr[mid] <= arr[mid + 1]:
return find_peak(arr[mid+1:])
else:
return arr[mid]
def main():
import time
# Array of length 5 with peak 4.
arr = [0, 1, 4, 3, 2]
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
# Array of long length.
arr = np.random.permutation(10000000)
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
"""Find a peak in 1D array.
Support a is an array of length n.
If a is an array of length 1, a[0] is a peak.
In general k, a[k] is a peak iff a[k] >= a[k - 1] and a[k] >= a[k + 1].
If a[0] >= a[1], then a[0] is a peak.
If a[n - 1] >= a[n - 2], then a[n - 1] is a peak.
"""
def find_peak_naive(arr):
"""Find peak by naive iteration.
Time complexity: O(n).
"""
for i in range(len(arr)):
if i == 0:
if arr[i] >= arr[i + 1]:
return arr[i]
elif i == (len(arr) - 1):
if arr[i] >= arr[i - 1]:
return arr[i]
else:
if arr[i] >= arr[i - 1] and arr[i] >= arr[i + 1]:
return arr[i]
def find_peak(arr):
"""Find peak by divide-end-conquer algorithm.
Time complexity: O(logn).
"""
if len(arr) == 1:
return arr[0]
else:
mid = len(arr) // 2
if arr[mid] <= arr[mid - 1]:
return find_peak(arr[:mid-1])
elif arr[mid] <= arr[mid + 1]:
return find_peak(arr[mid+1:])
else:
return arr[mid]
def main():
import time
# Array of length 5 with peak 4.
arr = [0, 1, 4, 3, 2]
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
# Array of long length.
arr = np.random.permutation(10000000)
time_start = time.time()
peak = find_peak_naive(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
time_start = time.time()
peak = find_peak(arr)
time_run = time.time() - time_start
print('Peak: {}'.format(peak))
print('Time for find_peak_naive(): {}'.format(time_run))
if __name__ == '__main__':
main()
| Python | 0.000002 |
545f04982267a34daaacc3afb94cd50db3821550 | Update ghost.py | home/Humanoid/ghost.py | home/Humanoid/ghost.py | ###################################################
# This is a basic script to carry on a conversation
# with ghost
###################################################
# create service
ghost = Runtime.start("ghost", "WebGui")
ear = Runtime.start("ear", "WebkitSpeechRecognition")
ghostchat = Runtime.start("ghostchat", "ProgramAB")
htmlfilter = Runtime.start("htmlfilter", "HtmlFilter")
mouth = Runtime.start("mouth", "NaturalReaderSpeech")
# creating the connections and routes
# - I'll need to check on these - might
# need to just "attach" some services together
ear.addTextListener(ghostchat)
ghostchat.addTextListener(htmlfilter)
htmlfilter.addTextListener(mouth)
# start a chatbot session
ghostchat.startSession("ProgramAB/bots", "ghostchat")
voices = mouth.getVoices()
# I've also tried removing this because I got an iteration error for this line
# for voice in voices:
# NaturalReaderSpeech.setVoice("Ryan")
| ###################################################
# This is a basic script to carry on a conversation
# with ghost
###################################################
# create service
ghost = Runtime.start("ghost", "WebGui")
ear = Runtime.start("ear", "WebkitSpeechRecognition")
ghostchat = Runtime.start("ghostchat", "ProgramAB")
htmlfilter = Runtime.start("htmlfilter", "HtmlFilter")
mouth = Runtime.start("mouth", "NaturalReaderSpeech")
# start a chatbot session
ghostchat.startSession("ProgramAB/bots", "ghostchat")
voices = mouth.getVoices()
# I've also tried removing this because I got an iteration error for this line
# for voice in voices:
# NaturalReaderSpeech.setVoice("Ryan")
# - I'll need to check on these - might
# need to just "attach" some services together
ear.addTextListener(ghostchat)
ghostchat.addTextListener(htmlfilter)
htmlfilter.addTextListener(mouth)
| Python | 0.000001 |
64938b5bb185f7f38716c166a2aa59a0713bc989 | fix for sqlite test db | tests/runtests.py | tests/runtests.py | """
Test support harness for doing setup.py test.
See http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/.
"""
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
# Bootstrap Django's settings.
from django.conf import settings
settings.DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}
}
settings.TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
settings.NOSE_PLUGINS = ['tests.noseplugins.TestDiscoveryPlugin']
def runtests():
"""Test runner for setup.py test."""
# Run you some tests.
import django.test.utils
runner_class = django.test.utils.get_runner(settings)
test_runner = runner_class(verbosity=1, interactive=True)
failures = test_runner.run_tests(['hyperadmin'])
# Okay, so this is a nasty hack. If this isn't here, `setup.py test` craps out
# when generating a coverage report via Nose. I have no idea why, or what's
# supposed to be going on here, but this seems to fix the problem, and I
# *really* want coverage, so, unless someone can tell me *why* I shouldn't
# do this, I'm going to just whistle innocently and keep on doing this.
sys.exitfunc = lambda: 0
sys.exit(failures)
| """
Test support harness for doing setup.py test.
See http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/.
"""
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
# Bootstrap Django's settings.
from django.conf import settings
settings.DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory;'}
}
settings.TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
settings.NOSE_PLUGINS = ['tests.noseplugins.TestDiscoveryPlugin']
def runtests():
"""Test runner for setup.py test."""
# Run you some tests.
import django.test.utils
runner_class = django.test.utils.get_runner(settings)
test_runner = runner_class(verbosity=1, interactive=True)
failures = test_runner.run_tests(['hyperadmin'])
# Okay, so this is a nasty hack. If this isn't here, `setup.py test` craps out
# when generating a coverage report via Nose. I have no idea why, or what's
# supposed to be going on here, but this seems to fix the problem, and I
# *really* want coverage, so, unless someone can tell me *why* I shouldn't
# do this, I'm going to just whistle innocently and keep on doing this.
sys.exitfunc = lambda: 0
sys.exit(failures)
| Python | 0 |
05f28064187c56d70d8f50c920676b81b7eb9f32 | make test run faster | bdot/tests/test_carray.py | bdot/tests/test_carray.py | import nose
import bdot
import bcolz
import numpy as np
from numpy.testing import assert_array_equal
def test_dot_int64():
matrix = np.random.random_integers(0, 12000, size=(30000, 100))
bcarray = bdot.carray(matrix, chunklen=2**13, cparams=bcolz.cparams(clevel=2))
v = bcarray[0]
result = bcarray.dot(v)
expected = matrix.dot(v)
assert_array_equal(expected, result) | import nose
import bdot
import bcolz
import numpy as np
from numpy.testing import assert_array_equal
def test_dot_int64():
matrix = np.random.random_integers(0, 12000, size=(300000, 100))
bcarray = bdot.carray(matrix, chunklen=2**13, cparams=bcolz.cparams(clevel=2))
v = bcarray[0]
result = bcarray.dot(v)
expected = matrix.dot(v)
assert_array_equal(expected, result) | Python | 0.000003 |
5e2f393238d976e576b390b668c7ce2f13a1e0c1 | Update to use Py3 print() (#1142) | example/scripts/add-line.py | example/scripts/add-line.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import sys
import getopt
import re
def findLine(pattern, fp):
line = fp.readline()
line_number = 1
while line:
#print("Line {}: {}".format(line_number, line.strip()))
if pattern in line:
return line_number
line = fp.readline()
line_number += 1
return -1
def insertBefore(filename, pattern, text):
with open(filename, 'r+') as fp:
line_number = findLine(pattern, fp)
if(line_number > 0):
print('Insert', text,'to line', line_number)
fp.seek(0)
lines = fp.readlines()
fp.seek(0)
lines.insert(line_number - 1, text + '\n')
fp.writelines(lines)
return
print('pattern',text,'not found!')
def replaceText(filename, pattern, text):
with open(filename, 'r') as fp:
lines = fp.read()
fp.close()
lines = (re.sub(pattern, text, lines))
print('Replace', pattern ,'to', text)
fp = open(filename, 'w')
fp.write(lines)
fp.close()
def main(argv):
inputfile = ''
string = ''
text = ''
replace = False
try:
opts, args = getopt.getopt(argv, "hi:s:t:r")
except getopt.GetoptError:
print('add-line.py -i <inputfile> -s <string> -t <text>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('add-line.py -i <inputfile> -s <string> -t <text>')
sys.exit()
elif opt in ("-i"):
inputfile = arg
elif opt in ("-s"):
string = arg
elif opt in ("-t"):
text = arg
elif opt in ("-r"):
replace = True
if(replace):
replaceText(inputfile, string, text)
else:
insertBefore(inputfile, string, text)
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import getopt
import re
def findLine(pattern, fp):
line = fp.readline()
line_number = 1
while line:
#print("Line {}: {}".format(line_number, line.strip()))
if pattern in line:
return line_number
line = fp.readline()
line_number += 1
return -1
def insertBefore(filename, pattern, text):
with open(filename, 'r+') as fp:
line_number = findLine(pattern, fp)
if(line_number > 0):
print 'Insert', text,'to line', line_number
fp.seek(0)
lines = fp.readlines()
fp.seek(0)
lines.insert(line_number - 1, text + '\n')
fp.writelines(lines)
return
print 'pattern',text,'not found!'
def replaceText(filename, pattern, text):
with open(filename, 'r') as fp:
lines = fp.read()
fp.close()
lines = (re.sub(pattern, text, lines))
print 'Replace', pattern ,'to', text
fp = open(filename, 'w')
fp.write(lines)
fp.close()
def main(argv):
inputfile = ''
string = ''
text = ''
replace = False
try:
opts, args = getopt.getopt(argv, "hi:s:t:r")
except getopt.GetoptError:
print 'add-line.py -i <inputfile> -s <string> -t <text>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'add-line.py -i <inputfile> -s <string> -t <text>'
sys.exit()
elif opt in ("-i"):
inputfile = arg
elif opt in ("-s"):
string = arg
elif opt in ("-t"):
text = arg
elif opt in ("-r"):
replace = True
if(replace):
replaceText(inputfile, string, text)
else:
insertBefore(inputfile, string, text)
if __name__ == "__main__":
main(sys.argv[1:])
| Python | 0.9995 |
06cb55639d2bc504d0ec1b9fb073c40e00751328 | Disable output example_pic.png if exists | doc/sample_code/demo_plot_state.py | doc/sample_code/demo_plot_state.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pyogi.board import Board
from pyogi.plot import plot_board
if __name__ == '__main__':
board = Board()
board.set_initial_state()
board.players = ['先手', '後手']
board.move('+7776FU')
board.move('-3334FU')
board.move('+2868HI')
board.move('-2288UM')
board.move('+7988GI')
# Plot by materials
savepath = 'example_pic.png'
if os.path.exists(savepath):
savepath = None
plot_board(board, savepath=savepath, mode='pic')
# Plot using matplotlib
board.plot_state_mpl(figsize=(8, 9))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyogi.board import Board
from pyogi.plot import plot_board
if __name__ == '__main__':
board = Board()
board.set_initial_state()
board.players = ['先手', '後手']
board.move('+7776FU')
board.move('-3334FU')
board.move('+2868HI')
board.move('-2288UM')
board.move('+7988GI')
# Plot by materials
plot_board(board, savepath='example_pic.png', mode='pic')
# Plot using matplotlib
board.plot_state_mpl(figsize=(8, 9))
| Python | 0.999949 |
75f8a41c00e06f52102bf5f87a093d4ffef34f97 | simplify the saving/loading of the lists | lib/maintain_lists_of_entries.py | lib/maintain_lists_of_entries.py | from .load_data_from_file import load_data_from_file
from .save_data import save_data
from .paths import mappings_path
import json
import os
def maintain_lists_of_entries(all_courses):
data_sets = {
'departments': set(),
'instructors': set(),
'times': set(),
'locations': set(),
'gereqs': set(),
'types': set(),
}
for key in data_sets:
filename = os.path.join(mappings_path, 'valid_%s.json' % key)
data = load_data_from_file(filename)
data_sets[key] = set(json.loads(data))
for course in all_courses:
data_sets['departments'].update(course.get('depts', []))
data_sets['instructors'].update(course.get('instructors', []))
data_sets['times'].update(course.get('times', []))
data_sets['locations'].update(course.get('places', []))
data_sets['gereqs'].update(course.get('gereqs', []))
data_sets['types'].add(course.get('type', ''))
for key in data_sets:
data_sets[key] = sorted(data_sets[key])
for key, data in data_sets.items():
filename = os.path.join(mappings_path, 'valid_%s.json' % key)
json_data = json.dumps(data, indent='\t', separators=(',', ': '))
save_data(json_data + '\n', filename)
| from .load_data_from_file import load_data_from_file
from .save_data import save_data
from .paths import mappings_path
import json
import os
def maintain_lists_of_entries(all_courses):
data_sets = {
'departments': set(),
'instructors': set(),
'times': set(),
'locations': set(),
'gereqs': set(),
'types': set(),
}
for key in data_sets:
filename = os.path.join(mappings_path, 'valid_%s.json' % key)
data = load_data_from_file(filename)
data_sets[key] = set(json.loads(data)[key])
for course in all_courses:
data_sets['departments'].update(course.get('depts', []))
data_sets['instructors'].update(course.get('instructors', []))
data_sets['times'].update(course.get('times', []))
data_sets['locations'].update(course.get('places', []))
data_sets['gereqs'].update(course.get('gereqs', []))
data_sets['types'].add(course.get('type', ''))
for key in data_sets:
data_sets[key] = sorted(data_sets[key])
for key, data in data_sets.items():
filename = os.path.join(mappings_path, 'valid_%s.json' % key)
json_data = json.dumps({key: data},
indent='\t', separators=(',', ': '))
save_data(json_data, filename)
| Python | 0.000237 |
298f016de19ece68f3574400af77d8a0c38a1467 | Update tests. | Orange/widgets/tests/test_scatterplot_density.py | Orange/widgets/tests/test_scatterplot_density.py | from unittest import TestCase
from math import *
import numpy as np
from numpy.random import random, randint, uniform
from PyQt4.QtGui import QColor
from Orange.widgets.visualize.owscatterplotgraph import compute_density, grid_sample
class TestScatterplotDensity(TestCase):
def random_data(self, n_grid, n_colors, n_data):
mx, Mx = 200, 2000
my, My = 300, 3000
mr, Mr = 10, 500
x_grid = sorted(uniform(mx, Mx, n_grid))
y_grid = sorted(uniform(my, My, n_grid))
colors = [QColor(randint(256), randint(256), randint(256), randint(256)) for i in range(n_colors)]
cx = uniform(mx, Mx, n_colors)
cy = uniform(my, My, n_colors)
cr = uniform(mr, Mr, n_colors)
x_data, y_data, rgb_data = [], [], []
for i in range(n_data):
c = randint(n_colors)
r = uniform(1, cr[c])
a = random()*2*pi
x_data.append(cx[c]+r*cos(a))
y_data.append(cy[c]+r*sin(a))
rgb_data.append(colors[c].getRgb()[:3])
return x_grid, y_grid, x_data, y_data, rgb_data
def test_random(self):
x_grid, y_grid, x_data, y_data, rgb_data = self.random_data(n_grid=50, n_colors=5, n_data=121)
img = compute_density(x_grid, y_grid, x_data, y_data, rgb_data)
self.assertTrue(img.shape == (50, 50, 4))
self.assertTrue(np.all(0 <= img) and np.all(img < 256))
def test_single_class(self):
x_grid, y_grid, x_data, y_data, rgb_data = self.random_data(n_grid=50, n_colors=1, n_data=100)
img = compute_density(x_grid, y_grid, x_data, y_data, rgb_data)
self.assertTrue(np.all(img[:, :, 3] == 128))
def test_sampling(self):
x_data = [4, 1] + list(uniform(10, 20, 1000))
y_data = [95, 3] + list(uniform(15, 20, 1000))
sample = grid_sample(x_data, y_data, k=30, g=10)
self.assertIn(0, sample)
self.assertIn(1, sample)
| from unittest import TestCase
from collections import Counter
from math import *
import numpy as np
from numpy.random import random, randint, shuffle, uniform
from PyQt4.QtGui import QColor
from sklearn.neighbors import NearestNeighbors
from Orange.widgets.visualize.owscatterplotgraph import compute_density as compute_density_cpp
class TestScatterplotDensity(TestCase):
# reference Python implementation
def compute_density_py(self, x_grid, y_grid, x_data, y_data, rgb_data):
k = int(len(x_data)**0.5)
distinct_colors = len(set(rgb_data))
lo, hi = ceil(k/distinct_colors), k
# find nearest neighbours of all grid points
grid = [[x, y] for x in x_grid for y in y_grid]
clf = NearestNeighbors()
clf.fit(np.column_stack((x_data, y_data)))
dist, ind = clf.kneighbors(grid, k)
# combine colors of found neighbours
colors = []
for neigh in ind:
cnt = Counter(rgb_data[i] for i in neigh)
main_color, color_count = cnt.most_common(1)[0]
a = int(128*((color_count-lo)/(hi-lo))) if lo != hi else 128
colors += [(main_color[0], main_color[1], main_color[2], a)]
return np.array(colors).reshape((len(x_grid), len(y_grid), 4))
def random_data(self, n_grid, n_colors, n_data):
mx, Mx = 200, 2000
my, My = 300, 3000
mr, Mr = 10, 500
x_grid = sorted(uniform(mx, Mx, n_grid))
y_grid = sorted(uniform(my, My, n_grid))
colors = [QColor(randint(256), randint(256), randint(256), randint(256)) for i in range(n_colors)]
cx = uniform(mx, Mx, n_colors)
cy = uniform(my, My, n_colors)
cr = uniform(mr, Mr, n_colors)
x_data, y_data, rgb_data = [], [], []
for i in range(n_data):
c = randint(n_colors)
r = uniform(1, cr[c])
a = random()*2*pi
x_data.append(cx[c]+r*cos(a))
y_data.append(cy[c]+r*sin(a))
rgb_data.append(colors[c].getRgb()[:3])
return x_grid, y_grid, x_data, y_data, rgb_data
def test_random(self):
x_grid, y_grid, x_data, y_data, rgb_data = self.random_data(n_grid=50, n_colors=5, n_data=121)
img_py = self.compute_density_py(x_grid, y_grid, x_data, y_data, rgb_data)
img_cpp = compute_density_cpp(x_grid, y_grid, x_data, y_data, rgb_data)
self.assertGreater(np.sum(img_py == img_cpp)/img_py.size, 0.9)
def test_few_colors(self):
for c in [1, 2]:
x_grid, y_grid, x_data, y_data, rgb_data = self.random_data(n_grid=50, n_colors=c, n_data=121)
img_py = self.compute_density_py(x_grid, y_grid, x_data, y_data, rgb_data)
img_cpp = compute_density_cpp(x_grid, y_grid, x_data, y_data, rgb_data)
self.assertTrue(np.all(img_py == img_cpp))
def test_grid_data(self):
x_coord = uniform(-1, 1, 13)
y_coord = uniform(-1, 1, 13)
xy = [(x,y) for x in x_coord for y in y_coord]
xy = xy*3
shuffle(xy)
x_data, y_data = zip(*xy)
rgb_data = [(255,0,0) if x < y else (0,0,255) for x, y in xy]
x_grid = sorted(uniform(-2, 2, 31))
y_grid = sorted(uniform(-2, 2, 31))
img_py = self.compute_density_py(x_grid, y_grid, x_data, y_data, rgb_data)
img_cpp = compute_density_cpp(x_grid, y_grid, x_data, y_data, rgb_data)
self.assertTrue(np.all(img_py == img_cpp))
| Python | 0 |
f3c6a888b4462e2fab43faba6dbe2af4bafff1bb | Update add-snmpproxy-collector.py | scripts/monitoring/proxy_snmp/add-snmpproxy-collector.py | scripts/monitoring/proxy_snmp/add-snmpproxy-collector.py | from cloudify import ctx
from cloudify import exceptions
import diamond_agent.tasks as diamond
import os
workdir = ctx.plugin.workdir
paths = diamond.get_paths(workdir.replace("script","diamond"))
name = 'SNMPProxyCollector'
collector_dir = os.path.join(paths['collectors'], name)
if not os.path.exists(collector_dir):
os.mkdir(collector_dir)
collector_file = os.path.join(collector_dir, '{0}.py'.format(name))
ctx.download_resource('scripts/monitoring/proxy_snmp/snmpproxy.py', collector_file)
config = ctx.target.instance.runtime_properties.get('snmp_collector_config', {})
config.update({'enabled': True,
'hostname': '{0}.{1}.{2}'.format(diamond.get_host_id(ctx.target),
ctx.target.node.name,
ctx.target.instance.id)
})
config_full_path = os.path.join(paths['collectors_config'], '{0}.conf'.format(name))
diamond.write_config(config_full_path, config)
try:
diamond.stop_diamond(paths['config'])
except:
pass
try:
diamond.start_diamond(paths['config'])
except:
exceptions.RecoverableError("Failed to start diamond", 30)
pass
| from cloudify import ctx
from cloudify import exceptions
import diamond_agent.tasks as diamond
import os
paths = diamond.get_paths(ctx.plugin.workdir)
name = 'SNMPProxyCollector'
collector_dir = os.path.join(paths['collectors'], name)
if not os.path.exists(collector_dir):
os.mkdir(collector_dir)
collector_file = os.path.join(collector_dir, '{0}.py'.format(name))
ctx.download_resource('scripts/monitoring/proxy_snmp/snmpproxy.py', collector_file)
config = ctx.target.instance.runtime_properties.get('snmp_collector_config', {})
config.update({'enabled': True,
'hostname': '{0}.{1}.{2}'.format(diamond.get_host_id(ctx.target),
ctx.target.node.name,
ctx.target.instance.id)
})
config_full_path = os.path.join(paths['collectors_config'], '{0}.conf'.format(name))
diamond.write_config(config_full_path, config)
try:
diamond.stop_diamond(paths['config'])
except:
pass
try:
diamond.start_diamond(paths['config'])
except:
exceptions.RecoverableError("Failed to start diamond", 30)
pass
| Python | 0 |
c9027e8aebe853d1c85fcac24b09caeb8ea5f403 | Bump version to 0.3.0 | bands_inspect/__init__.py | bands_inspect/__init__.py | # -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
A tool for modifying, comparing and plotting electronic bandstructures.
"""
from . import kpoints
from . import eigenvals
from . import compare
from . import lattice
from . import plot
__version__ = '0.3.0'
| # -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
A tool for modifying, comparing and plotting electronic bandstructures.
"""
from . import kpoints
from . import eigenvals
from . import compare
from . import lattice
from . import plot
__version__ = '0.2.3'
| Python | 0.000001 |
889a2608d1d4038a8c7ee1c445530fd1750c00e0 | Optimize styling according to pylint | preprocessing/collect_unigrams.py | preprocessing/collect_unigrams.py | # -*- coding: utf-8 -*-
"""
File to collect all unigrams and all name-unigrams (label PER) from a corpus file.
The corpus file must have one document/article per line. The words must be labeled in the
form word/LABEL.
Example file content:
Yestarday John/PER Doe/PER said something amazing.
Washington/LOC D.C./LOC is the capital of the U.S.
The foobird is a special species of birds. It's commonly found on mars.
...
Execute via:
python -m preprocessing/collect_unigrams
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from model.unigrams import Unigrams
# All capitalized constants come from this file
import config as cfg
def main():
"""Main function. Gathers all unigrams and name-unigrams, see documantation at the top."""
# collect all unigrams (all labels, including "O")
print("Collecting unigrams...")
ug_all = Unigrams()
ug_all.fill_from_articles(cfg.ARTICLES_FILEPATH, verbose=True)
ug_all.write_to_file(cfg.UNIGRAMS_FILEPATH)
ug_all = None
# collect only unigrams of label PER
print("Collecting person names (label=PER)...")
ug_names = Unigrams()
ug_names.fill_from_articles_labels(cfg.ARTICLES_FILEPATH, ["PER"], verbose=True)
ug_names.write_to_file(cfg.UNIGRAMS_PERSON_FILEPATH)
print("Finished.")
# ---------------
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
File to collect all unigrams and all name-unigrams (label PER) from a corpus file.
The corpus file must have one document/article per line. The words must be labeled in the
form word/LABEL.
Example file content:
Yestarday John/PER Doe/PER said something amazing.
Washington/LOC D.C./LOC is the capital of the U.S.
The foobird is a special species of birds. It's commonly found on mars.
...
Execute via:
python -m preprocessing/collect_unigrams
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from model.unigrams import Unigrams
# All capitalized constants come from this file
from config import *
def main():
"""Main function. Gathers all unigrams and name-unigrams, see documantation at the top."""
# collect all unigrams (all labels, including "O")
print("Collecting unigrams...")
ug_all = Unigrams()
ug_all.fill_from_articles(ARTICLES_FILEPATH, verbose=True)
ug_all.write_to_file(UNIGRAMS_FILEPATH)
ug_all = None
# collect only unigrams of label PER
print("Collecting person names (label=PER)...")
ug_names = Unigrams()
ug_names.fill_from_articles_labels(ARTICLES_FILEPATH, ["PER"], verbose=True)
ug_names.write_to_file(UNIGRAMS_PERSON_FILEPATH)
print("Finished.")
# ---------------
if __name__ == "__main__":
main()
| Python | 0 |
9b5fd8dba4885cd0cc2de10f7ff6c8066aee0277 | Fix possibles issues with pulseaudiowidget | barython/widgets/audio.py | barython/widgets/audio.py | #!/usr/bin/env python3
import logging
from .base import SubprocessWidget
from barython.hooks.audio import PulseAudioHook
logger = logging.getLogger("barython")
class PulseAudioWidget(SubprocessWidget):
def handler(self, event, *args, **kwargs):
"""
Filter events sent by notifications
"""
# Only notify if there is something changes in pulseaudio
event_change_msg = "Event 'change' on destination"
if event_change_msg in event:
logger.debug("PA: line \"{}\" catched.".format(event))
return self.update()
def organize_result(self, volume, output_mute=None, input_mute=None,
*args, **kwargs):
"""
Override this method to change the infos to print
"""
return "{}".format(volume)
def handle_result(self, output=None, *args, **kwargs):
# As pulseaudio-ctl add events in pactl subscribe, flush output
try:
if output != "" and output is not None:
output = self.organize_result(*output.split())
super().handle_result(output=output)
except Exception as e:
logger.error("Error in PulseAudioWidget: {}", e)
def __init__(self, cmd=["pulseaudio-ctl", "full-status"],
*args, **kwargs):
super().__init__(*args, **kwargs, cmd=cmd, infinite=False)
# Update the widget when PA volume changes
self.hooks.subscribe(self.handler, PulseAudioHook)
| #!/usr/bin/env python3
import logging
from .base import SubprocessWidget
from barython.hooks.audio import PulseAudioHook
logger = logging.getLogger("barython")
class PulseAudioWidget(SubprocessWidget):
def handler(self, event, *args, **kwargs):
"""
Filter events sent by the notifications
"""
# Only notify if there is something changes in pulseaudio
event_change_msg = "Event 'change' on destination"
if event_change_msg in event:
logger.debug("PA: line \"{}\" catched.".format(event))
return self.update()
def organize_result(self, volume, output_mute=None, input_mute=None,
*args, **kwargs):
"""
Override this method to change the infos to print
"""
return "{}".format(volume)
def handle_result(self, output=None, *args, **kwargs):
# As pulseaudio-ctl add events in pactl subscribe, flush output
try:
if output != "" and output is not None:
output = self.organize_result(*output.split())
super().handle_result(output=output)
except Exception as e:
logger.error("Error in PulseAudioWidget: {}", e)
def __init__(self, cmd=["pulseaudio-ctl", "full-status"],
*args, **kwargs):
super().__init__(cmd, infinite=False, *args, **kwargs)
# Update the widget when PA volume changes
self.hooks.subscribe(self.handler, PulseAudioHook)
| Python | 0.000002 |
7f113399e4277ecbbfdde41d683c22082f7e19bd | Add DOI parsing to identifiers | scrapi/harvesters/smithsonian.py | scrapi/harvesters/smithsonian.py | '''
Harvester for the Smithsonian Digital Repository for the SHARE project
Example API call: http://repository.si.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
import re
from scrapi.base import helpers
from scrapi.base import OAIHarvester
class SiHarvester(OAIHarvester):
short_name = 'smithsonian'
long_name = 'Smithsonian Digital Repository'
url = 'http://repository.si.edu/oai/request'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": {
"objectUris": [('//dc:identifier/node()', get_doi_from_identifier)]
}
})
base_url = 'http://repository.si.edu/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
def get_doi_from_identifier(identifiers):
doi_re = re.compile(r'10\.\S*\/\S*')
identifiers = [identifiers] if not isinstance(identifiers, list) else identifiers
for identifier in identifiers:
try:
found_doi = doi_re.search(identifier).group()
return 'http://dx.doi.org/{}'.format(found_doi)
except AttributeError:
continue
| '''
Harvester for the Smithsonian Digital Repository for the SHARE project
Example API call: http://repository.si.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class SiHarvester(OAIHarvester):
short_name = 'smithsonian'
long_name = 'Smithsonian Digital Repository'
url = 'http://repository.si.edu/oai/request'
base_url = 'http://repository.si.edu/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
| Python | 0.00003 |
b65a2ee41d16efd1a056727e59c229eb8258070f | set deafult DB_host as localhost | tests/settings.py | tests/settings.py | import os
INSTALLED_APPS = (
'model_utils',
'tests',
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("DB_NAME", "modelutils"),
"USER": os.environ.get("DB_USER", 'postgres'),
"PASSWORD": os.environ.get("DB_PASSWORD", ""),
"HOST": os.environ.get("DB_HOST", "localhost"),
"PORT": os.environ.get("DB_PORT", 5432)
},
}
SECRET_KEY = 'dummy'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
| import os
INSTALLED_APPS = (
'model_utils',
'tests',
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("DB_NAME", "modelutils"),
"USER": os.environ.get("DB_USER", 'postgres'),
"PASSWORD": os.environ.get("DB_PASSWORD", ""),
"HOST": os.environ.get("DB_HOST", ""),
"PORT": os.environ.get("DB_PORT", 5432)
},
}
SECRET_KEY = 'dummy'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
| Python | 0.999466 |
dcf7af23fa237cd761f1a589e2e268875d296841 | Test settings updated | tests/settings.py | tests/settings.py | # -*- coding: utf-8 -*-
# Standard library imports
import os
# Third party imports
from django.conf import global_settings as default_settings
from django.conf import settings
# Local application / specific library imports
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
TEST_SETTINGS = {
'DEBUG': False,
'TEMPLATE_DEBUG': False,
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
'TEMPLATE_CONTEXT_PROCESSORS': default_settings.TEMPLATE_CONTEXT_PROCESSORS,
'INSTALLED_APPS': (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'precise_bbcode',
'tests',
),
'ROOT_URLCONF': 'tests._testsite.urls',
'MIDDLEWARE_CLASSES': default_settings.MIDDLEWARE_CLASSES,
'ADMINS': ('admin@example.com',),
'MEDIA_ROOT': os.path.join(TEST_ROOT, '_testdata/media/'),
'SITE_ID': 1,
# Setting this explicitly prevents Django 1.7+ from showing a
# warning regarding a changed default test runner. The test
# suite is run with nose, so it does not matter.
'SILENCED_SYSTEM_CHECKS': ['1_6.W001'],
}
def configure():
if not settings.configured:
settings.configure(**TEST_SETTINGS)
| # -*- coding: utf-8 -*-
# Standard library imports
import os
# Third party imports
from django.conf import global_settings as default_settings
from django.conf import settings
# Local application / specific library imports
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
TEST_SETTINGS = {
'DEBUG': False,
'TEMPLATE_DEBUG': False,
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
'TEMPLATE_CONTEXT_PROCESSORS': default_settings.TEMPLATE_CONTEXT_PROCESSORS,
'INSTALLED_APPS': (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'precise_bbcode',
'tests',
),
'ROOT_URLCONF': 'tests._testsite.urls',
'MIDDLEWARE_CLASSES': default_settings.MIDDLEWARE_CLASSES,
'ADMINS': ('admin@example.com',),
'MEDIA_ROOT': os.path.join(TEST_ROOT, '_testdata/media/'),
'SITE_ID': 1,
}
def configure():
if not settings.configured:
settings.configure(**TEST_SETTINGS)
| Python | 0 |
0b4cbd10ec6deb85603ad21a3ff1a7c3141da315 | Change matrix size for convolutionSeparable2 test | examples/convolution/run.py | examples/convolution/run.py | #!/usr/bin/env python
import subprocess
from xml.dom.minidom import parseString
stddevThreshold = 0.1 # tolerate up to 10% variation
nTrials = 7
nThreads = 2
def run(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
ret = proc.poll()
if ret:
print '%s: returned failure %d' % (cmd[0], ret)
print '-----------------------------'
print '%s' % err
print '%s' % out
error = subprocess.CalledProcessError(ret, cmd)
error.output = out
raise error
return out
def compile(radius):
cmd = [
"../../src/pbc",
"--preproc=/usr/bin/cpp -DKERNEL_RADIUS=%d" % radius,
"convolutionSeparable2.pbcc"
]
run(cmd)
def test(size, mode):
# TODO: copy config to dest location?
cmd = [
"./convolutionSeparable2",
"--time",
"--trials=%d" % nTrials,
"--isolation",
"--threads=%d" % nThreads,
"-n", "%d" % size,
"--config=./convolutionSeparable2.%s.cfg" % mode
]
res = run(cmd)
x = parseString(res)
timing = x.getElementsByTagName('timing')[0]
stddev = float(timing.getAttribute('stddev'))
t = float(timing.getAttribute('median'))
# For now, just warn if it seems unreasonable
if stddev > t * stddevThreshold:
print 'WARNING: stddev for %s with n=%d, was high: %f' % (mode, size, stddev)
return t,stddev
def test_radius(radius, sizes=[2048,3500]):
print 'Testing radius=%d' % radius
compile(radius)
res = []
for size in sizes:
# TODO: add localmem option
for sep in ['2d', 'sep']:
for local in ['local', 'nolocal']:
mode = '%s.%s' % (sep,local)
t,stddev = test(size, mode)
res.append( (radius, size, mode, t, stddev) )
#print '%d^2, %s takes %f (stddev: %f)' % (size, mode, t, stddev)
return res
# Run all tests
res = []
for radius in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
t = test_radius(radius)
res.extend( t )
for r,s,m,t,dev in res:
print 'R=%d, %dx%d, %s takes:\t%f (stddev: %f)' % (r, s, s, m, t, dev)
| #!/usr/bin/env python
import subprocess
from xml.dom.minidom import parseString
stddevThreshold = 0.1 # tolerate up to 10% variation
nTrials = 5
nThreads = 2
def run(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
ret = proc.poll()
if ret:
print '%s: returned failure %d' % (cmd[0], ret)
print '-----------------------------'
print '%s' % err
print '%s' % out
error = subprocess.CalledProcessError(ret, cmd)
error.output = out
raise error
return out
def compile(radius):
cmd = [
"../../src/pbc",
"--preproc=/usr/bin/cpp -DKERNEL_RADIUS=%d" % radius,
"convolutionSeparable2.pbcc"
]
run(cmd)
def test(size, mode):
# TODO: copy config to dest location?
cmd = [
"./convolutionSeparable2",
"--time",
"--trials=%d" % nTrials,
"--isolation",
"--threads=%d" % nThreads,
"-n", "%d" % size,
"--config=./convolutionSeparable2.%s.cfg" % mode
]
res = run(cmd)
x = parseString(res)
timing = x.getElementsByTagName('timing')[0]
stddev = float(timing.getAttribute('stddev'))
t = float(timing.getAttribute('median'))
# For now, just warn if it seems unreasonable
if stddev > t * stddevThreshold:
print 'WARNING: stddev for %s with n=%d, was high: %f' % (mode, size, stddev)
return t,stddev
def test_radius(radius, sizes=[2048]): # TODO: try 4096
print 'Testing radius=%d' % radius
compile(radius)
res = []
for size in sizes:
# TODO: add localmem option
for sep in ['2d', 'sep']:
for local in ['local', 'nolocal']:
mode = '%s.%s' % (sep,local)
t,stddev = test(size, mode)
res.append( (radius, size, mode, t, stddev) )
#print '%d^2, %s takes %f (stddev: %f)' % (size, mode, t, stddev)
return res
# Run all tests
res = []
for radius in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
t = test_radius(radius)
res.extend( t )
for r,s,m,t,dev in res:
print 'R=%d, %dx%d, %s takes:\t%f (stddev: %f)' % (r, s, s, m, t, dev)
| Python | 0 |
bd1e6eba5fa8f47606319dab6ae378383c31a366 | fix sdb unit tests | tests/unit/sdb/test_vault.py | tests/unit/sdb/test_vault.py | # -*- coding: utf-8 -*-
'''
Test case for the vault SDB module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import (
MagicMock,
call,
patch)
# Import Salt libs
import salt.sdb.vault as vault
class TestVaultSDB(LoaderModuleMockMixin, TestCase):
'''
Test case for the vault SDB module
'''
def setup_loader_modules(self):
return {
vault: {
'__opts__': {
'vault': {
'url': "http://127.0.0.1",
"auth": {
'token': 'test',
'method': 'token'
}
}
}
}
}
def test_set(self):
'''
Test salt.sdb.vault.set function
'''
version = {'v2': False, 'data': None, 'metadata': None, 'type': None}
mock_version = MagicMock(return_value=version)
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}), \
patch.dict(vault.__utils__, {'vault.is_v2': mock_version}):
vault.set_('sdb://myvault/path/to/foo/bar', 'super awesome')
assert mock_vault.call_args_list == [call('POST',
'v1/sdb://myvault/path/to/foo',
None, json={'bar': 'super awesome'})]
def test_set_question_mark(self):
'''
Test salt.sdb.vault.set_ while using the old
deprecated solution with a question mark.
'''
version = {'v2': False, 'data': None, 'metadata': None, 'type': None}
mock_version = MagicMock(return_value=version)
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}), \
patch.dict(vault.__utils__, {'vault.is_v2': mock_version}):
vault.set_('sdb://myvault/path/to/foo?bar', 'super awesome')
assert mock_vault.call_args_list == [call('POST',
'v1/sdb://myvault/path/to/foo',
None, json={'bar': 'super awesome'})]
def test_get(self):
'''
Test salt.sdb.vault.get function
'''
version = {'v2': False, 'data': None, 'metadata': None, 'type': None}
mock_version = MagicMock(return_value=version)
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
mock_vault.content.return_value = [{'data': {'bar', 'test'}}]
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}), \
patch.dict(vault.__utils__, {'vault.is_v2': mock_version}):
vault.get('sdb://myvault/path/to/foo/bar')
assert mock_vault.call_args_list == [call('GET',
'v1/sdb://myvault/path/to/foo',
None)]
def test_get_question_mark(self):
'''
Test salt.sdb.vault.get while using the old
deprecated solution with a question mark.
'''
version = {'v2': False, 'data': None, 'metadata': None, 'type': None}
mock_version = MagicMock(return_value=version)
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
mock_vault.content.return_value = [{'data': {'bar', 'test'}}]
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}), \
patch.dict(vault.__utils__, {'vault.is_v2': mock_version}):
vault.get('sdb://myvault/path/to/foo?bar')
assert mock_vault.call_args_list == [call('GET',
'v1/sdb://myvault/path/to/foo',
None)]
| # -*- coding: utf-8 -*-
'''
Test case for the vault SDB module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.unit import TestCase
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import (
MagicMock,
call,
patch)
# Import Salt libs
import salt.sdb.vault as vault
class TestVaultSDB(LoaderModuleMockMixin, TestCase):
'''
Test case for the vault SDB module
'''
def setup_loader_modules(self):
return {
vault: {
'__opts__': {
'vault': {
'url': "http://127.0.0.1",
"auth": {
'token': 'test',
'method': 'token'
}
}
}
}
}
def test_set(self):
'''
Test salt.sdb.vault.set function
'''
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}):
vault.set_('sdb://myvault/path/to/foo/bar', 'super awesome')
assert mock_vault.call_args_list == [call('POST',
'v1/sdb://myvault/path/to/foo',
None, json={'bar': 'super awesome'})]
def test_set_question_mark(self):
'''
Test salt.sdb.vault.set_ while using the old
deprecated solution with a question mark.
'''
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}):
vault.set_('sdb://myvault/path/to/foo?bar', 'super awesome')
assert mock_vault.call_args_list == [call('POST',
'v1/sdb://myvault/path/to/foo',
None, json={'bar': 'super awesome'})]
def test_get(self):
'''
Test salt.sdb.vault.get function
'''
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
mock_vault.content.return_value = [{'data': {'bar', 'test'}}]
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}):
vault.get('sdb://myvault/path/to/foo/bar')
assert mock_vault.call_args_list == [call('GET',
'v1/sdb://myvault/path/to/foo',
None)]
def test_get_question_mark(self):
'''
Test salt.sdb.vault.get while using the old
deprecated solution with a question mark.
'''
mock_vault = MagicMock()
mock_vault.return_value.status_code = 200
mock_vault.content.return_value = [{'data': {'bar', 'test'}}]
with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}):
vault.get('sdb://myvault/path/to/foo?bar')
assert mock_vault.call_args_list == [call('GET',
'v1/sdb://myvault/path/to/foo',
None)]
| Python | 0.000001 |
5fa36e781729fbfe5e3343f921e52eebf0062e75 | Switch rackspace env variables to prettyconf | tests/settings.py | tests/settings.py | import hashlib
import os
from tempfile import mkdtemp
from time import time
from prettyconf.configuration import Configuration
config = Configuration()
# Append epoch to prevent test runs from clobbering each other.
CONTAINER_PREFIX = 'cloud-storage-test-' + str(int(time()))
SECRET = hashlib.sha1(os.urandom(128)).hexdigest()
SALT = hashlib.sha1(os.urandom(128)).hexdigest()
TEXT_FILENAME = 'flask.txt'
TEXT_STREAM_FILENAME = 'flask-stream.txt'
TEXT_FORM_FILENAME = 'flask-form.txt'
TEXT_MD5_CHECKSUM = '2a5a634f5c8d931350e83e41c9b3b0bb'
BINARY_FILENAME = 'avatar.png'
BINARY_FORM_FILENAME = 'avatar-form.png'
BINARY_STREAM_FILENAME = 'avatar-stream.png'
BINARY_MD5_CHECKSUM = '2f907a59924ad96b7478074ed96b05f0'
BINARY_OPTIONS = {
'meta_data': {
'owner-id': 'da17c32d-21c2-4bfe-b083-e2e78187d868',
'owner-email': 'user.one@startup.com'
},
'content_type': 'image/png',
'content_disposition': 'attachment; filename=avatar-attachment.png',
}
AMAZON_KEY = config('AMAZON_KEY', default=None)
AMAZON_SECRET = config('AMAZON_SECRET', default=None)
AMAZON_REGION = config('AMAZON_REGION', default='us-east-1')
GOOGLE_CREDENTIALS = config('GOOGLE_CREDENTIALS', default=None)
RACKSPACE_KEY = config('RACKSPACE_KEY', default=None)
RACKSPACE_SECRET = config('RACKSPACE_SECRET', default=None)
RACKSPACE_REGION = config('RACKSPACE_REGION', default='IAD')
LOCAL_KEY = config('LOCAL_KEY', default=mkdtemp(prefix='cloud-storage-test-'))
if not os.path.exists(LOCAL_KEY):
os.makedirs(LOCAL_KEY)
LOCAL_SECRET = config('LOCAL_SECRET', default='local-storage-secret')
| import hashlib
import os
from tempfile import mkdtemp
from time import time
from prettyconf.configuration import Configuration
config = Configuration()
# Append epoch to prevent test runs from clobbering each other.
CONTAINER_PREFIX = 'cloud-storage-test-' + str(int(time()))
SECRET = hashlib.sha1(os.urandom(128)).hexdigest()
SALT = hashlib.sha1(os.urandom(128)).hexdigest()
TEXT_FILENAME = 'flask.txt'
TEXT_STREAM_FILENAME = 'flask-stream.txt'
TEXT_FORM_FILENAME = 'flask-form.txt'
TEXT_MD5_CHECKSUM = '2a5a634f5c8d931350e83e41c9b3b0bb'
BINARY_FILENAME = 'avatar.png'
BINARY_FORM_FILENAME = 'avatar-form.png'
BINARY_STREAM_FILENAME = 'avatar-stream.png'
BINARY_MD5_CHECKSUM = '2f907a59924ad96b7478074ed96b05f0'
BINARY_OPTIONS = {
'meta_data': {
'owner-id': 'da17c32d-21c2-4bfe-b083-e2e78187d868',
'owner-email': 'user.one@startup.com'
},
'content_type': 'image/png',
'content_disposition': 'attachment; filename=avatar-attachment.png',
}
AMAZON_KEY = config('AMAZON_KEY', default=None)
AMAZON_SECRET = config('AMAZON_SECRET', default=None)
AMAZON_REGION = config('AMAZON_REGION', default='us-east-1')
GOOGLE_CREDENTIALS = config('GOOGLE_CREDENTIALS', default=None)
RACKSPACE_KEY = os.environ['RACKSPACE_KEY']
RACKSPACE_SECRET = os.environ['RACKSPACE_SECRET']
RACKSPACE_REGION = os.environ['RACKSPACE_REGION']
# RACKSPACE_KEY = config('RACKSPACE_KEY', default=None)
# RACKSPACE_SECRET = config('RACKSPACE_SECRET', default=None)
# RACKSPACE_REGION = config('RACKSPACE_REGION', default='IAD')
LOCAL_KEY = config('LOCAL_KEY', default=mkdtemp(prefix='cloud-storage-test-'))
if not os.path.exists(LOCAL_KEY):
os.makedirs(LOCAL_KEY)
LOCAL_SECRET = config('LOCAL_SECRET', default='local-storage-secret')
| Python | 0 |
7f5f11aa1922ba8094184c877c5f60d4b2b72f89 | use pkresource to find zip_file and pkunit.work_dir | tests/srw_test.py | tests/srw_test.py | # -*- coding: utf-8 -*-
u"""PyTest for :mod:`sirepo.template.srw.py`
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('srwl_bl')
from pykern import pkresource
from pykern import pkunit
import os
import py.path
import sirepo
import tempfile
_EPS = 1e-3
zip_file = pkresource.filename('static/dat/magnetic_measurements.zip', sirepo)
def test_find_tab_undulator_length_1():
from sirepo.template import srw
gap = 6.82
res = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert res['dat_file'] == 'ivu21_srx_g6_8c.dat'
assert res['closest_gap'] == 6.8
assert abs(res['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_1s():
from sirepo.template import srw
gap = '6.82'
res = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert res['dat_file'] == 'ivu21_srx_g6_8c.dat'
assert res['closest_gap'] == 6.8
assert abs(res['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_2():
from sirepo.template import srw
gap = 3
res = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert res['dat_file'] == 'ivu21_srx_g6_2c.dat'
assert res['closest_gap'] == 6.2
assert abs(res['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_3():
from sirepo.template import srw
gap = 45
res = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert res['dat_file'] == 'ivu21_srx_g40_0c.dat'
assert res['closest_gap'] == 40
assert abs(res['found_length'] - 2.5) < _EPS
def test_prepare_aux_files_1():
from sirepo.template import srw
data = {
'models': {
'simulation': {
'sourceType': 't'
},
'tabulatedUndulator': {
'magneticFile': 'magnetic_measurements.zip',
'indexFile': '',
'magnMeasFolder': '',
}
}
}
srw.prepare_aux_files(pkunit.work_dir(), data)
assert data['models']['tabulatedUndulator']['magnMeasFolder'] == ''
assert data['models']['tabulatedUndulator']['indexFile'] == 'ivu21_srx_sum.txt'
| # -*- coding: utf-8 -*-
u"""PyTest for :mod:`sirepo.template.srw.py`
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import os
import py.path
import pytest
import tempfile
_EPS = 1e-3
zip_file = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../sirepo/package_data/static/dat/magnetic_measurements.zip',
)
)
pytest.importorskip('srwl_bl')
def test_find_tab_undulator_length_1():
from sirepo.template import srw
gap = 6.82
tab_parameters = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert tab_parameters['dat_file'] == 'ivu21_srx_g6_8c.dat'
assert tab_parameters['closest_gap'] == 6.8
assert abs(tab_parameters['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_1s():
from sirepo.template import srw
gap = '6.82'
tab_parameters = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert tab_parameters['dat_file'] == 'ivu21_srx_g6_8c.dat'
assert tab_parameters['closest_gap'] == 6.8
assert abs(tab_parameters['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_2():
from sirepo.template import srw
gap = 3
tab_parameters = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert tab_parameters['dat_file'] == 'ivu21_srx_g6_2c.dat'
assert tab_parameters['closest_gap'] == 6.2
assert abs(tab_parameters['found_length'] - 2.5) < _EPS
def test_find_tab_undulator_length_3():
from sirepo.template import srw
gap = 45
tab_parameters = srw._find_tab_undulator_length(zip_file=zip_file, gap=gap)
assert tab_parameters['dat_file'] == 'ivu21_srx_g40_0c.dat'
assert tab_parameters['closest_gap'] == 40
assert abs(tab_parameters['found_length'] - 2.5) < _EPS
def test_prepare_aux_files_1():
from sirepo.template import srw
tmp_dir = _prepare_env()
data = {
'models': {
'simulation': {
'sourceType': 't'
},
'tabulatedUndulator': {
'magneticFile': 'magnetic_measurements.zip',
'indexFile': '',
'magnMeasFolder': '',
}
}
}
srw.prepare_aux_files(tmp_dir, data)
_clean_env(tmp_dir)
assert data['models']['tabulatedUndulator']['magnMeasFolder'] == ''
assert data['models']['tabulatedUndulator']['indexFile'] == 'ivu21_srx_sum.txt'
def _clean_env(tmp_dir):
try:
tmp_dir.remove(ignore_errors=True)
except:
pass
def _prepare_env():
return py.path.local(tempfile.mkdtemp())
| Python | 0 |
6e3ebff613254c7e13d89cd3599e030947a5072f | fix coverage report | tests/unittest/test_calls.py | tests/unittest/test_calls.py | import os
import sys
from unittest import TestCase, mock
from unittest.mock import patch
# I hate it but I can't get the coverage report to work without it, must be before RequestsLibrary import
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../src/')))
import RequestsLibrary
lib = RequestsLibrary.RequestsLibrary()
HTTP_LOCAL_SERVER = 'http://localhost:5000'
sess_headers = {'content-type': False}
post_headers = {'Content-Type': 'application/json'}
class TestCalls(TestCase):
def test_import_defaults(self):
RequestsLibrary.RequestsLibrary()
@patch('RequestsLibrary.RequestsLibrary._common_request')
def test_post_request_with_empty_data(self, common_request):
lib.create_session('http_server', HTTP_LOCAL_SERVER, sess_headers)
lib.post_request('http_server', '/anything', data="", headers=post_headers)
common_request.assert_called_with('post', mock.ANY, '/anything', allow_redirects=True, data='',
files=None, headers={'Content-Type': 'application/json'}, json=None,
params=None, timeout=None)
| from unittest import TestCase, mock
from unittest.mock import patch
import RequestsLibrary
lib = RequestsLibrary.RequestsLibrary()
HTTP_LOCAL_SERVER = 'http://localhost:5000'
sess_headers = {'content-type': False}
post_headers = {'Content-Type': 'application/json'}
class TestCalls(TestCase):
@patch('RequestsLibrary.RequestsLibrary._common_request')
def test_post_request_with_empty_data(self, common_request):
lib.create_session('http_server', HTTP_LOCAL_SERVER, sess_headers)
lib.post_request('http_server', '/anything', data="", headers=post_headers)
common_request.assert_called_with('post', mock.ANY, '/anything', allow_redirects=True, data='',
files=None, headers={'Content-Type': 'application/json'}, json=None,
params=None, timeout=None)
| Python | 0.000001 |
0179b6ce31856c18c9faaa58a55d8882e4a260ce | Configure Pool in if __name__ block | scripts/new_multiproc_manager.py | scripts/new_multiproc_manager.py | """
CPOL Level 1b main production line.
@title: CPOL_PROD_1b
@author: Valentin Louf <valentin.louf@monash.edu>
@institution: Bureau of Meteorology
@date: 1/03/2019
@version: 1
.. autosummary::
:toctree: generated/
timeout_handler
chunks
production_line_manager
production_line_multiproc
main
"""
# Python Standard Library
import os
import gc
import glob
import argparse
import traceback
from multiprocessing import Pool
import pandas as pd
import cpol_processing
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
From http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def main(infile):
"""
It calls the production line and manages it. Buffer function that is used
to catch any problem with the processing line without screwing the whole
multiprocessing stuff.
Parameters:
===========
infile: str
Name of the input radar file.
outpath: str
Path for saving output data.
"""
try:
cpol_processing.process_and_save(infile, OUTPATH, sound_dir=SOUND_DIR)
except Exception:
traceback.print_exc()
# logging.error(f"Failed to process {infile}", exc_info=True)
return None
if __name__ == '__main__':
"""
Global variables definition.
"""
# Main global variables (Path directories).
INPATH = "/g/data/hj10/cpol_level_1a/ppi/"
OUTPATH = "/g/data/hj10/cpol_level_1b/"
SOUND_DIR = "/g/data2/rr5/CPOL_radar/DARWIN_radiosonde"
LOG_FILE_PATH = "/short/kl02/vhl548/"
# Parse arguments
parser_description = "Processing of radar data from level 1a to level 1b."
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument(
'-s',
'--start-date',
dest='start_date',
default=None,
type=str,
help='Starting date.',
required=True)
parser.add_argument(
'-e',
'--end-date',
dest='end_date',
default=None,
type=str,
help='Ending date.',
required=True)
args = parser.parse_args()
START_DATE = args.start_date
END_DATE = args.end_date
for day in pd.date_range(START_DATE, END_DATE):
input_dir = os.path.join(INPATH, str(day.year), day.strftime("%Y%m%d"), "*.*")
flist = sorted(glob.glob(input_dir))
if len(flist) == 0:
print('No file found for {}.'.format(day.strftime("%Y-%b-%d")))
continue
print(f'{len(flist)} files found for ' + day.strftime("%Y-%b-%d"))
for flist_chunk in chunks(flist, 32):
with Pool() as pool:
pool.map(main, flist)
| """
CPOL Level 1b main production line.
@title: CPOL_PROD_1b
@author: Valentin Louf <valentin.louf@monash.edu>
@institution: Bureau of Meteorology
@date: 1/03/2019
@version: 1
.. autosummary::
:toctree: generated/
timeout_handler
chunks
production_line_manager
production_line_multiproc
main
"""
# Python Standard Library
import os
import gc
import glob
import argparse
import traceback
import pandas as pd
import dask.bag as db
import cpol_processing
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
From http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def production_line_manager(infile):
"""
The production line manager calls the production line and manages it ;-).
Buffer function that is used to catch any problem with the processing line
without screwing the whole multiprocessing stuff.
Parameters:
===========
infile: str
Name of the input radar file.
outpath: str
Path for saving output data.
"""
try:
cpol_processing.process_and_save(infile, OUTPATH, sound_dir=SOUND_DIR)
except Exception:
traceback.print_exc()
# logging.error(f"Failed to process {infile}", exc_info=True)
gc.collect()
return None
def main():
date_list = pd.date_range(START_DATE, END_DATE)
for day in date_list:
input_dir = os.path.join(INPATH, str(day.year), day.strftime("%Y%m%d"), "*.*")
flist = sorted(glob.glob(input_dir))
if len(flist) == 0:
print('No file found for {}.'.format(day.strftime("%Y-%b-%d")))
continue
print(f'{len(flist)} files found for ' + day.strftime("%Y-%b-%d"))
for flist_chunk in chunks(flist, 16):
bag = db.from_sequence(flist_chunk).map(production_line_manager)
bag.compute()
# with Pool(16) as pool:
# pool.map(production_line_manager, flist)
return None
if __name__ == '__main__':
"""
Global variables definition.
"""
# Main global variables (Path directories).
INPATH = "/g/data/hj10/cpol_level_1a/ppi/"
OUTPATH = "/g/data/hj10/cpol_level_1b/"
SOUND_DIR = "/g/data2/rr5/CPOL_radar/DARWIN_radiosonde"
LOG_FILE_PATH = "/short/kl02/vhl548/"
# Parse arguments
parser_description = "Processing of radar data from level 1a to level 1b."
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument(
'-s',
'--start-date',
dest='start_date',
default=None,
type=str,
help='Starting date.',
required=True)
parser.add_argument(
'-e',
'--end-date',
dest='end_date',
default=None,
type=str,
help='Ending date.',
required=True)
args = parser.parse_args()
START_DATE = args.start_date
END_DATE = args.end_date
# Creating the general log file.
# logname = "cpol_level1b_from_{}_to_{}.log".format(START_DATE, END_DATE)
# log_file_name = os.path.join(LOG_FILE_PATH, logname)
# logging.basicConfig(
# level=logging.WARNING,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# filename=log_file_name,
# filemode='w+')
# logger = logging.getLogger(__name__)
main()
| Python | 0.99994 |
3dd0ac13a5c2a3e0dc949d60e807b438c36636a9 | Fix for post_process. | core/tessagon.py | core/tessagon.py | from tessagon.core.grid_tile_generator import GridTileGenerator
from tessagon.core.rotate_tile_generator import RotateTileGenerator
class Tessagon:
def __init__(self, **kwargs):
if 'function' in kwargs:
self.f = kwargs['function']
else:
raise ValueError('Must specify a function')
self.tile_class = self.init_tile_class()
if 'tile_generator' in kwargs:
self.tile_generator = kwargs['tile_generator'](self, **kwargs)
elif 'rot_factor' in kwargs:
self.tile_generator = RotateTileGenerator(self, **kwargs)
else:
self.tile_generator = GridTileGenerator(self, **kwargs)
# Optional post processing function
self.post_process = None
if 'post_process' in kwargs:
self.post_process = kwargs['post_process']
if 'adaptor_class' in kwargs:
adaptor_class = kwargs['adaptor_class']
self.mesh_adaptor = adaptor_class(**kwargs)
else:
raise ValueError('Must provide a mesh adaptor class')
self.tiles = None
self.face_types = {}
self.vert_types = {}
def create_mesh(self):
self._initialize_tiles()
self.mesh_adaptor.create_empty_mesh()
self._calculate_verts()
self._calculate_faces()
self.mesh_adaptor.finish_mesh()
if self.post_process:
# Run user defined post-processing code
# Need to pass self here (this could be designed better)
self.post_process(self)
return self.mesh_adaptor.get_mesh()
def inspect(self):
print("\n=== %s ===\n" % (self.__class__.__name__))
for i in range(len(self.tiles)):
self.tiles[i].inspect(tile_number=i)
### Below are protected
def _initialize_tiles(self):
self.tiles = self.tile_generator.create_tiles()
def _calculate_verts(self):
for tile in self.tiles:
tile.calculate_verts()
def _calculate_faces(self):
for tile in self.tiles:
tile.calculate_faces()
| from tessagon.core.grid_tile_generator import GridTileGenerator
from tessagon.core.rotate_tile_generator import RotateTileGenerator
class Tessagon:
def __init__(self, **kwargs):
if 'function' in kwargs:
self.f = kwargs['function']
else:
raise ValueError('Must specify a function')
self.tile_class = self.init_tile_class()
if 'tile_generator' in kwargs:
self.tile_generator = kwargs['tile_generator'](self, **kwargs)
elif 'rot_factor' in kwargs:
self.tile_generator = RotateTileGenerator(self, **kwargs)
else:
self.tile_generator = GridTileGenerator(self, **kwargs)
# Optional post processing function
self.post_process = None
if 'post_process' in kwargs:
self.post_process = kwargs['post_process']
if 'adaptor_class' in kwargs:
adaptor_class = kwargs['adaptor_class']
self.mesh_adaptor = adaptor_class(**kwargs)
else:
raise ValueError('Must provide a mesh adaptor class')
self.tiles = None
self.face_types = {}
self.vert_types = {}
def create_mesh(self):
self._initialize_tiles()
self.mesh_adaptor.create_empty_mesh()
self._calculate_verts()
self._calculate_faces()
self.mesh_adaptor.finish_mesh()
if self.post_process:
self.post_process()
return self.mesh_adaptor.get_mesh()
def inspect(self):
print("\n=== %s ===\n" % (self.__class__.__name__))
for i in range(len(self.tiles)):
self.tiles[i].inspect(tile_number=i)
### Below are protected
def _initialize_tiles(self):
self.tiles = self.tile_generator.create_tiles()
def _calculate_verts(self):
for tile in self.tiles:
tile.calculate_verts()
def _calculate_faces(self):
for tile in self.tiles:
tile.calculate_faces()
| Python | 0 |
d71353d8d1e0778f121c3ec07067d617ab3ce932 | Add run() method to Backend and make start() a wrapper for it. Also set backend.running in backend.start and backend.stop. Whatever code runs in a loop in backend.run() needs to check self.running periodically to make sure it should still be running. | lib/rapidsms/backends/backend.py | lib/rapidsms/backends/backend.py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
class Backend(object):
def __init__ (self, router):
self.router = router
self.running = False
def log(self, level, message):
self.router.log(level, message)
def start(self):
self.running = True
try:
self.run()
finally:
self.running = False
def run (self):
raise NotImplementedError
def stop(self):
self.running = False
def send(self):
raise NotImplementedError
def receive(self):
raise NotImplementedError
| #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
class Backend(object):
def __init__ (self, router):
self.router = router
def log(self, level, message):
self.router.log(level, message)
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def send(self):
raise NotImplementedError
def receive(self):
raise NotImplementedError
| Python | 0 |
3eacd6c42126f08c9f941f47bab59430d1180c59 | use default bulk actions | reversedict/indexer.py | reversedict/indexer.py | import contextlib
import collections
import nlp
import elastic
DEFAULT_SEEDS = ['philosophy','science','art','health','emotion']
def index_terms(seeds=None, max_count=5000):
'''
Index words by their definitions and synonyms.
Starts with a list of seed word, e.g. top 100 used terms.
Index the words, queue words occured in definitions for
indexing later. When dequeueing, pop the next most used word.
'''
with connect_search() as (index_term, indexed):
with init_queue(indexed, seeds) as (push_queue, pop_queue):
term = pop_queue()
while term:
print 'indexing', term
linked_terms = index_term(term)
push_queue(linked_terms)
if max_count and max_count <= len(indexed):
break
term = pop_queue()
print 'indexed', len(indexed), 'terms'
return True
@contextlib.contextmanager
def connect_search():
elastic.client.indices.create(index=elastic.SEARCH_INDEX, ignore=400)
actions = {}
def index_term(term):
'''
Look up definitions and synonyms of term,
then returns their tokens for indexing further
'''
definitions, synonyms = nlp.get_definitions_synonyms(term)
if not definitions:
return []
doc = {'term':term,
'definitions':definitions,
'synonyms':synonyms}
actions[term] = {'_op_type':'index',
'_id':hash(term),
'_index':elastic.SEARCH_INDEX,
'_type':'term',
'doc':doc
}
actions_count = len(actions)
if actions_count > 1000 and actions_count % 1000 == 0:
commit_index_actions()
return nlp.tokenize(*definitions + synonyms)
def commit_index_actions():
actionables = filter(None, actions.values())
results = elastic.helpers.bulk(elastic.client, actionables)
for is_success, response in results:
if not is_success:
print response
print 'committed', len(actionables), 'terms'; print
for term in actions:
actions[term] = None
return True
try:
yield index_term, actions.viewkeys()
finally:
if actions:
commit_index_actions()
elastic.client.indices.refresh(index=elastic.SEARCH_INDEX)
@contextlib.contextmanager
def init_queue(indexed, seeds=None):
seeds = seeds or DEFAULT_SEEDS
queue = collections.Counter()
is_not_indexed = lambda t: t not in indexed
def yield_terms():
while seeds:
yield seeds.pop(0)
while queue:
term,_ = queue.most_common(1)[0]
del queue[term]
yield term
def pop():
for term in yield_terms():
if is_not_indexed(term):
return term
def push(tokens):
queue.update(filter(is_not_indexed, tokens))
yield push, pop
| import contextlib
import collections
import nlp
import elastic
DEFAULT_SEEDS = ['philosophy','science','art','health','emotion']
def index_terms(seeds=None, max_count=5000):
'''
Index words by their definitions and synonyms.
Starts with a list of seed word, e.g. top 100 used terms.
Index the words, queue words occured in definitions for
indexing later. When dequeueing, pop the next most used word.
'''
with connect_search() as (index_term, indexed):
with init_queue(indexed, seeds) as (push_queue, pop_queue):
term = pop_queue()
while term:
print 'indexing', term
linked_terms = index_term(term)
push_queue(linked_terms)
if max_count and max_count <= len(indexed):
break
term = pop_queue()
print 'indexed', len(indexed), 'terms'
return True
@contextlib.contextmanager
def connect_search():
elastic.client.indices.create(index=elastic.SEARCH_INDEX, ignore=400)
actions = {}
def index_term(term):
'''
Look up definitions and synonyms of term,
then returns their tokens for indexing further
'''
definitions, synonyms = nlp.get_definitions_synonyms(term)
if not definitions:
return []
doc = {'term':term,
'definitions':definitions,
'synonyms':synonyms}
actions[term] = {'_op_type':'index',
'_id':hash(term),
'_index':elastic.SEARCH_INDEX,
'_type':'term',
'doc':doc
}
actions_count = len(actions)
if actions_count > 1000 and actions_count % 1000 == 0:
commit_index_actions()
return nlp.tokenize(*definitions + synonyms)
def commit_index_actions():
actionables = filter(None, actions.values())
results = elastic.helpers.parallel_bulk(elastic.client, actionables)
for is_success, response in results:
if not is_success:
print response
print 'committed', len(actionables), 'terms'; print
for term in actions:
actions[term] = None
return True
try:
yield index_term, actions.viewkeys()
finally:
if actions:
commit_index_actions()
elastic.client.indices.refresh(index=elastic.SEARCH_INDEX)
@contextlib.contextmanager
def init_queue(indexed, seeds=None):
seeds = seeds or DEFAULT_SEEDS
queue = collections.Counter()
is_not_indexed = lambda t: t not in indexed
def yield_terms():
while seeds:
yield seeds.pop(0)
while queue:
term,_ = queue.most_common(1)[0]
del queue[term]
yield term
def pop():
for term in yield_terms():
if is_not_indexed(term):
return term
def push(tokens):
queue.update(filter(is_not_indexed, tokens))
yield push, pop
| Python | 0 |
f81bc19a9627225113ff1a3fa2aa0e6446402acb | test that shorten is text/plain (answer no :S) | tests/test_api.py | tests/test_api.py | import unittest
from flask import url_for
import summerurlapp
import appconfig
import types
class SummerApiTestCase(unittest.TestCase):
"""Test that the API works as intended"""
testurl_http1 = "http://random.org"
testurl_bad = "random.org"
def setUp(self):
summerurlapp.app.config.from_object(appconfig.TestConfig)
self.app = summerurlapp.app.test_client()
summerurlapp.init_db()
def tearDown(self):
summerurlapp.init_db()
# use init_db() to clear the test db after testcase
def post_shorten(self, link):
return self.app.post("/api/shorten", data = dict(link = link))
def test_shorten(self):
resp = self.post_shorten(self.testurl_http1)
self.assertEqual(resp.headers['Content-Type'], "text/plain")
def test_getbyid_ok(self):
respPost = self.post_shorten(self.testurl_http1)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, self.testurl_http1)
def test_getbyid_appendscheme(self):
respPost = self.post_shorten(self.testurl_bad)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, "http://" + self.testurl_bad)
def test_getbyid_noid(self):
resp = self.app.get('/api/9000')
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/api/nonexistentid')
self.assertEqual(resp.status_code, 404)
| import unittest
from flask import url_for
import summerurlapp
import appconfig
import types
class SummerApiTestCase(unittest.TestCase):
"""Test that the API works as intended"""
testurl_http1 = "http://random.org"
testurl_bad = "random.org"
def setUp(self):
summerurlapp.app.config.from_object(appconfig.TestConfig)
self.app = summerurlapp.app.test_client()
summerurlapp.init_db()
def tearDown(self):
summerurlapp.init_db()
# use init_db() to clear the test db after testcase
def post_shorten(self, link):
return self.app.post("/api/shorten", data = dict(link = link))
def test_shorten(self):
resp = self.post_shorten(self.testurl_http1)
self.assertEqual(resp.data[0], "1")
def test_getbyid_ok(self):
respPost = self.post_shorten(self.testurl_http1)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, self.testurl_http1)
def test_getbyid_appendscheme(self):
respPost = self.post_shorten(self.testurl_bad)
gotid = respPost.data[0]
respId = self.app.get('/api/' + gotid)
self.assertEqual(respId.status_code, 301)
self.assertEqual(respId.location, "http://" + self.testurl_bad)
def test_getbyid_noid(self):
resp = self.app.get('/api/9000')
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/api/nonexistentid')
self.assertEqual(resp.status_code, 404)
| Python | 0.999964 |
06ef2e114949e1e733227fb6ddad56f1ff4cbaed | Bump the version to 0.0.2 and add a __version_info__ tuple | llic.py | llic.py | """
Low-Level iCalendar library.
"""
from __future__ import unicode_literals
import pytz
__version__ = "0.0.2"
__version_info__ = tuple(int(n) for n in __version__.split("."))
DEFAULT_ICAL_LINE_LENGTH = 75
CRLF = b"\r\n"
CRLF_WRAP = b"\r\n "
NAME_VALUE_SEPARATOR = b":"
class BaseCalendarWriter(object):
def __init__(self, output, line_length=DEFAULT_ICAL_LINE_LENGTH):
self.output = output
self.line_length = line_length
self.line_position = 0
def write(self, octets):
assert self.line_position <= self.line_length
octets_len = len(octets)
if octets_len + self.line_position <= self.line_length:
self.output.write(octets)
self.line_position += octets_len
else:
self.__wrap_write(octets)
def __wrap_write(self, octets):
out = self.output
while octets:
write_count = self.line_length - self.line_position
out.write(octets[:write_count])
self.endline(True)
octets = octets[write_count:]
def endline(self, is_wrapping):
out = self.output
if is_wrapping:
out.write(CRLF_WRAP)
self.line_position = 1
else:
out.write(CRLF)
self.line_position = 0
def start_contentline(self, name):
self.write(name)
self.write(NAME_VALUE_SEPARATOR)
def value(self, value):
self.write(value)
def end_contentline(self):
self.endline(False)
TEXT_DELETE_CHARS = b"".join(chr(c) for c in range(0x0, 0x20))
class TypesCalendarWriterHelperMixin(object):
# The following range of chars cannot occur in iCalendar TEXT, so we
# just delete them.
def as_text(self, text):
"""
Write text escaped as an iCalendar TEXT value.
"""
if isinstance(text, unicode):
text = text.encode("utf-8")
# TEXT must be escaped as follows:
# \\ encodes \, \N or \n encodes newline
# \; encodes ;, \, encodes ,
text = text.replace(b"\\", b"\\\\") # escape \
text = text.replace(b"\n", b"\\n")
text = text.replace(b";", b"\\;")
text = text.replace(b",", b"\\,")
text = text.translate(None, TEXT_DELETE_CHARS)
return text
def as_datetime(self, dt):
if dt.tzinfo is None:
raise ValueError("dt must have a tzinfo, got: {!r}".format(dt))
if dt.tzinfo != pytz.utc:
dt = dt.astimezone(pytz.utc)
return dt.strftime("%Y%m%dT%H%M%SZ")
class CalendarWriterHelperMixin(object):
def contentline(self, name, value):
self.start_contentline(name)
self.value(value)
self.end_contentline()
def begin(self, section):
self.contentline("BEGIN", section)
def end(self, section):
self.contentline("END", section)
class CalendarWriter(TypesCalendarWriterHelperMixin, CalendarWriterHelperMixin, BaseCalendarWriter):
pass | """
Low-Level iCalendar library.
"""
from __future__ import unicode_literals
import pytz
__version__ = "0.0.1"
DEFAULT_ICAL_LINE_LENGTH = 75
CRLF = b"\r\n"
CRLF_WRAP = b"\r\n "
NAME_VALUE_SEPARATOR = b":"
class BaseCalendarWriter(object):
def __init__(self, output, line_length=DEFAULT_ICAL_LINE_LENGTH):
self.output = output
self.line_length = line_length
self.line_position = 0
def write(self, octets):
assert self.line_position <= self.line_length
octets_len = len(octets)
if octets_len + self.line_position <= self.line_length:
self.output.write(octets)
self.line_position += octets_len
else:
self.__wrap_write(octets)
def __wrap_write(self, octets):
out = self.output
while octets:
write_count = self.line_length - self.line_position
out.write(octets[:write_count])
self.endline(True)
octets = octets[write_count:]
def endline(self, is_wrapping):
out = self.output
if is_wrapping:
out.write(CRLF_WRAP)
self.line_position = 1
else:
out.write(CRLF)
self.line_position = 0
def start_contentline(self, name):
self.write(name)
self.write(NAME_VALUE_SEPARATOR)
def value(self, value):
self.write(value)
def end_contentline(self):
self.endline(False)
TEXT_DELETE_CHARS = b"".join(chr(c) for c in range(0x0, 0x20))
class TypesCalendarWriterHelperMixin(object):
# The following range of chars cannot occur in iCalendar TEXT, so we
# just delete them.
def as_text(self, text):
"""
Write text escaped as an iCalendar TEXT value.
"""
if isinstance(text, unicode):
text = text.encode("utf-8")
# TEXT must be escaped as follows:
# \\ encodes \, \N or \n encodes newline
# \; encodes ;, \, encodes ,
text = text.replace(b"\\", b"\\\\") # escape \
text = text.replace(b"\n", b"\\n")
text = text.replace(b";", b"\\;")
text = text.replace(b",", b"\\,")
text = text.translate(None, TEXT_DELETE_CHARS)
return text
def as_datetime(self, dt):
if dt.tzinfo is None:
raise ValueError("dt must have a tzinfo, got: {!r}".format(dt))
if dt.tzinfo != pytz.utc:
dt = dt.astimezone(pytz.utc)
return dt.strftime("%Y%m%dT%H%M%SZ")
class CalendarWriterHelperMixin(object):
def contentline(self, name, value):
self.start_contentline(name)
self.value(value)
self.end_contentline()
def begin(self, section):
self.contentline("BEGIN", section)
def end(self, section):
self.contentline("END", section)
class CalendarWriter(TypesCalendarWriterHelperMixin, CalendarWriterHelperMixin, BaseCalendarWriter):
pass | Python | 0.999962 |
d2a66d7251266e732246a67c4232e51464ef0952 | Support NZ geocoding | load.py | load.py | import json
import os
from datetime import date
import boto3
import requests
from googlemaps import Client
from twilio.rest import TwilioRestClient
s3 = boto3.resource('s3')
def run_poll(event, context):
""" Update S3 with latest trackpoints from Spot. """
s3_history = get_history_from_s3()
s3_latest_timeStamp = s3_history[-1]['timeStamp']
spot_new = get_history_from_spot( s3_latest_timeStamp )
if spot_new:
combined_history = s3_history + spot_new
write_to_s3(combined_history)
notify_by_text(s3_history[-1], combined_history[-1])
else:
pass
def latest_status():
""" Return a textual summary of the latest trackpoint. """
latest = get_history_from_s3()[-1]
return '{}\'s last tracked location was {} at {}.'.format(latest['name'], latest['location'], latest['timeStamp'])
def is_newer_than(s3_latest_timeStamp, spot_message):
return s3_latest_timeStamp < str(spot_message['dateTime'])
def get_history_from_spot(s3_latest_timeStamp):
gmaps = _get_gmaps()
def _build_track_point(spot_message):
def _reverse_geocode(lat, lon):
try:
#Hack
googleResp = gmaps.reverse_geocode((lat, lon))[0]['address_components']
for addr in googleResp:
if 'postal_town' in addr['types']:
return addr['long_name']
for addr in googleResp:
if 'locality' in addr['types']:
return addr['long_name']
except:
return '[Reverse Geocode Error]'
track_point = dict()
track_point['lat'] = spot_message['latitude']
track_point['lon'] = spot_message['longitude']
track_point['timeStamp'] = str(spot_message['dateTime'])
track_point['messageType'] = str(spot_message['messageType'])
track_point['location'] = _reverse_geocode(spot_message['latitude'], spot_message['longitude'])
track_point['name'] = str(spot_message['messengerName'])
return track_point
r = requests.get(
'https://api.findmespot.com/spot-main-web/consumer/rest-api/2.0/public/feed/0kh77fpkuvgEaVFm0LklfeKXetFB6Iqgr/message.json')
track = [_build_track_point(msg) for msg in r.json()['response']['feedMessageResponse']['messages']['message'] if is_newer_than(s3_latest_timeStamp, msg)]
track.reverse()
return track
def get_history_from_s3():
return json.loads(s3.Object('bikerid.es', 'track/history.json').get()['Body'].read())
def write_to_s3(history):
def _write_file_to_s3( name, content ):
s3.Object('bikerid.es', 'track/{}.json'.format(name)).put(Body=json.dumps(content),
ContentType='application/json')
_write_file_to_s3('latest',history[-1])
_write_file_to_s3('history',history)
def notify_by_text(previous_trackpoint, trackpoint):
if previous_trackpoint['timeStamp'].startswith(date.today().strftime('%Y-%m-%d')):
return
twilio = _get_twilio()
message = 'New location: {} at {}.'.format(trackpoint['location'], trackpoint['timeStamp'])
for number in os.environ.get('alerts_numbers', '').split(','):
twilio.messages.create(body=message, to=number, from_='+441631402022')
def _get_twilio():
return TwilioRestClient(os.environ['twilio_account'], os.environ['twilio_token'])
def _get_gmaps():
return Client(key=os.environ.get('gmaps_key', None))
if __name__ == '__main__':
run_poll(None, None)
| import json
import os
from datetime import date
import boto3
import requests
from googlemaps import Client
from twilio.rest import TwilioRestClient
s3 = boto3.resource('s3')
def run_poll(event, context):
""" Update S3 with latest trackpoints from Spot. """
s3_history = get_history_from_s3()
s3_latest_timeStamp = s3_history[-1]['timeStamp']
spot_new = get_history_from_spot( s3_latest_timeStamp )
if spot_new:
combined_history = s3_history + spot_new
write_to_s3(combined_history)
notify_by_text(s3_history[-1], combined_history[-1])
else:
pass
def latest_status():
""" Return a textual summary of the latest trackpoint. """
latest = get_history_from_s3()[-1]
return '{}\'s last tracked location was {} at {}.'.format(latest['name'], latest['location'], latest['timeStamp'])
def is_newer_than(s3_latest_timeStamp, spot_message):
return s3_latest_timeStamp < str(spot_message['dateTime'])
def get_history_from_spot(s3_latest_timeStamp):
gmaps = _get_gmaps()
def _build_track_point(spot_message):
def _reverse_geocode(lat, lon):
try:
for addr in gmaps.reverse_geocode((lat, lon))[0]['address_components']:
if 'postal_town' in addr['types']:
return addr['long_name']
except:
return '[Reverse Geocode Error]'
track_point = dict()
track_point['lat'] = spot_message['latitude']
track_point['lon'] = spot_message['longitude']
track_point['timeStamp'] = str(spot_message['dateTime'])
track_point['messageType'] = str(spot_message['messageType'])
track_point['location'] = _reverse_geocode(spot_message['latitude'], spot_message['longitude'])
track_point['name'] = str(spot_message['messengerName'])
return track_point
r = requests.get(
'https://api.findmespot.com/spot-main-web/consumer/rest-api/2.0/public/feed/0kh77fpkuvgEaVFm0LklfeKXetFB6Iqgr/message.json')
track = [_build_track_point(msg) for msg in r.json()['response']['feedMessageResponse']['messages']['message'] if is_newer_than(s3_latest_timeStamp, msg)]
track.reverse()
return track
def get_history_from_s3():
return json.loads(s3.Object('bikerid.es', 'track/history.json').get()['Body'].read())
def write_to_s3(history):
def _write_file_to_s3( name, content ):
s3.Object('bikerid.es', 'track/{}.json'.format(name)).put(Body=json.dumps(content),
ContentType='application/json')
_write_file_to_s3('latest',history[-1])
_write_file_to_s3('history',history)
def notify_by_text(previous_trackpoint, trackpoint):
if previous_trackpoint['timeStamp'].startswith(date.today().strftime('%Y-%m-%d')):
return
twilio = _get_twilio()
message = 'New location: {} at {}.'.format(trackpoint['location'], trackpoint['timeStamp'])
for number in os.environ.get('alerts_numbers', '').split(','):
twilio.messages.create(body=message, to=number, from_='+441631402022')
def _get_twilio():
return TwilioRestClient(os.environ['twilio_account'], os.environ['twilio_token'])
def _get_gmaps():
return Client(key=os.environ.get('gmaps_key', None))
if __name__ == '__main__':
run_poll(None, None)
| Python | 0 |
a396332ad66d31ac5caa1fcbf92ed564615fac85 | Use assert_raises in test_cli | tests/test_cli.py | tests/test_cli.py | import subprocess
import os
from nose.tools import eq_, assert_raises
# Get the filename of 'halibote.txt', which contains some mojibake about
# Harry Potter in Chinese
THIS_DIR = os.path.dirname(__file__)
TEST_FILENAME = os.path.join(THIS_DIR, 'halibote.txt')
CORRECT_OUTPUT = '【更新】《哈利波特》石堧卜才新婚娶初戀今痠逝\n'
FAILED_OUTPUT = '''ftfy error:
This input couldn't be decoded as 'windows-1252'. We got the following error:
'charmap' codec can't decode byte 0x90 in position 5: character maps to <undefined>
ftfy works best when its input is in a known encoding. You can use `ftfy -g`
to guess, if you're desperate. Otherwise, give the encoding name with the
`-e` option, such as `ftfy -e latin-1`.
'''
def get_command_output(args, stdin=None):
return subprocess.check_output(args, stdin=stdin, stderr=subprocess.STDOUT, timeout=5).decode('utf-8')
def test_basic():
output = get_command_output(['ftfy', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_guess_bytes():
output = get_command_output(['ftfy', '-g', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_alternate_encoding():
# The file isn't really in Windows-1252. But that's a problem ftfy
# can fix, if it's allowed to be sloppy when reading the file.
output = get_command_output(['ftfy', '-e', 'sloppy-windows-1252', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_wrong_encoding():
# It's more of a problem when the file doesn't actually decode.
with assert_raises(subprocess.CalledProcessError) as context:
get_command_output(['ftfy', '-e', 'windows-1252', TEST_FILENAME])
e = context.exception
eq_(e.output.decode('utf-8'), FAILED_OUTPUT)
def test_stdin():
with open(TEST_FILENAME, 'rb') as infile:
output = get_command_output(['ftfy'], stdin=infile)
eq_(output, CORRECT_OUTPUT)
| import subprocess
import os
from nose.tools import eq_
# Get the filename of 'halibote.txt', which contains some mojibake about
# Harry Potter in Chinese
THIS_DIR = os.path.dirname(__file__)
TEST_FILENAME = os.path.join(THIS_DIR, 'halibote.txt')
CORRECT_OUTPUT = '【更新】《哈利波特》石堧卜才新婚娶初戀今痠逝\n'
FAILED_OUTPUT = '''ftfy error:
This input couldn't be decoded as 'windows-1252'. We got the following error:
'charmap' codec can't decode byte 0x90 in position 5: character maps to <undefined>
ftfy works best when its input is in a known encoding. You can use `ftfy -g`
to guess, if you're desperate. Otherwise, give the encoding name with the
`-e` option, such as `ftfy -e latin-1`.
'''
def get_command_output(args, stdin=None):
return subprocess.check_output(args, stdin=stdin, stderr=subprocess.STDOUT, timeout=5).decode('utf-8')
def test_basic():
output = get_command_output(['ftfy', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_guess_bytes():
output = get_command_output(['ftfy', '-g', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_alternate_encoding():
# The file isn't really in Windows-1252. But that's a problem ftfy
# can fix, if it's allowed to be sloppy when reading the file.
output = get_command_output(['ftfy', '-e', 'sloppy-windows-1252', TEST_FILENAME])
eq_(output, CORRECT_OUTPUT)
def test_wrong_encoding():
# It's more of a problem when the file doesn't actually decode.
try:
get_command_output(['ftfy', '-e', 'windows-1252', TEST_FILENAME])
assert False, "Should have raised a CalledProcessError"
except subprocess.CalledProcessError as e:
eq_(e.output.decode('utf-8'), FAILED_OUTPUT)
def test_stdin():
with open(TEST_FILENAME, 'rb') as infile:
output = get_command_output(['ftfy'], stdin=infile)
eq_(output, CORRECT_OUTPUT)
| Python | 0.000001 |
83dc9b5f80268a5bd23a737d66a219067353f2b7 | change parameter handling | test_files.py | test_files.py | #!/usr/bin/env python
# Generate test directories to mess with from a list of filenames.
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-t', '--target')
args = parser.parse_args()
base_dir = args.target if args.target else 'testing'
input_file = args.input or 'filenames.tsv'
with open(input_file) as fh:
[os.makedirs(os.path.join(base_dir, row.split()[0])) for row in fh.readlines()]
| #!/usr/bin/env python
# Generate test directories to mess with from a list of filenames.
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-t', '--target')
args = parser.parse_args()
base_dir = args.target if args.target else 'testing'
input_file = args.input if args.input is not None else 'filenames.tsv'
with open(input_file) as fh:
[os.makedirs(os.path.join(base_dir, row.split()[0])) for row in fh.readlines()]
| Python | 0.000003 |
158eea6914360f45b456c2a4a15f9c9bcd26beca | fix incorrect test name | tests/test_svm.py | tests/test_svm.py | import unittest
from context import lily
from lily import svm, optimizer
from numpy import mat
import logging
logging.basicConfig(level=logging.WARNING, format="%(lineno)d\t%(message)s")
class TestSvm(unittest.TestCase):
def setUp(self):
self.tolerance = 0.001
self.C = 0.6
self.iterations = 40
self.data_matrix = []
self.label_matrix = []
self.test_data = [[-0.017612, 14.053064, 0],
[-1.395634, 4.662541, 1],
[-0.752157, 6.538620, 0],
[-1.322371, 7.152853, 0],
[0.423363, 11.054677, 0],
[0.406704, 7.067335, 1],
[0.667394, 12.741452, 0],
[-2.460150, 6.866805, 1],
[0.569411, 9.548755, 0],
[-0.026632, 10.427743, 0],
[0.850433, 6.920334, 1],
[1.347183, 13.175500, 0],
[1.176813, 3.167020, 1],
[-1.781871, 9.097953, 0],
[-0.566606, 5.749003, 1]]
for line in self.test_data:
self.data_matrix.append([float(line[0]), float(line[1])])
self.label_matrix.append(float(line[2]))
self.os = optimizer.Optimizer(mat(self.data_matrix),
mat(self.label_matrix).transpose(),
self.C,
self.tolerance)
def test_calculate_ek(self):
"""svm - calculate_ek calculates E value for a given alpha"""
for k in range(len(self.test_data)):
ek = svm.calculate_ek(self.os, k)
self.assertEqual(ek, -1.0)
def test_clip_alpha_greater_than_h(self):
"""svm - clip_alpha returns H when alpha greater than H"""
alpha = 8
H = 6
L = 5
self.assertEqual(svm.clip_alpha(alpha, H, L), 6)
def test_clip_alpha_less_than_l(self):
"""svm - clip_alpha returns L when alpha less than L"""
alpha = 8
H = 6
L = 7
self.assertEqual(svm.clip_alpha(alpha, H, L), 7)
def test_select_j_rand_doesnt_select_i(self):
"""svm - select_j_rand does not select i"""
i = 4
m = 76
self.assertNotEqual(svm.select_j_rand(i, m), i)
def test_needs_optimization_returns_false_for_low_ei(self):
"""
svm - needs_optimization returns false for small nonneg ei
"""
self.assertFalse(svm.needs_optimization(self.os, 5, 0.1))
def test_needs_optimization_returns_true_for_neg_ei(self):
"""
svm - needs_optimization returns true for small neg ei
"""
self.assertTrue(svm.needs_optimization(self.os, 5, -5.1))
if __name__ == '__main__':
unittest.main()
| import unittest
from context import lily
from lily import svm, optimizer
from numpy import mat
import logging
logging.basicConfig(level=logging.WARNING, format="%(lineno)d\t%(message)s")
class TestSvm(unittest.TestCase):
def setUp(self):
self.tolerance = 0.001
self.C = 0.6
self.iterations = 40
self.data_matrix = []
self.label_matrix = []
self.test_data = [[-0.017612, 14.053064, 0],
[-1.395634, 4.662541, 1],
[-0.752157, 6.538620, 0],
[-1.322371, 7.152853, 0],
[0.423363, 11.054677, 0],
[0.406704, 7.067335, 1],
[0.667394, 12.741452, 0],
[-2.460150, 6.866805, 1],
[0.569411, 9.548755, 0],
[-0.026632, 10.427743, 0],
[0.850433, 6.920334, 1],
[1.347183, 13.175500, 0],
[1.176813, 3.167020, 1],
[-1.781871, 9.097953, 0],
[-0.566606, 5.749003, 1]]
for line in self.test_data:
self.data_matrix.append([float(line[0]), float(line[1])])
self.label_matrix.append(float(line[2]))
self.os = optimizer.Optimizer(mat(self.data_matrix),
mat(self.label_matrix).transpose(),
self.C,
self.tolerance)
def test_calculate_ek(self):
"""svm - calculate_ek calculates E value for a given alpha"""
for k in range(len(self.test_data)):
ek = svm.calculate_ek(self.os, k)
self.assertEqual(ek, -1.0)
def test_clip_alpha_greater_than_h(self):
"""svm - clip_alpha returns H when alpha greater than H"""
alpha = 8
H = 6
L = 5
self.assertEqual(svm.clip_alpha(alpha, H, L), 6)
def test_clip_alpha_less_than_l(self):
"""svm - clip_alpha returns L when alpha less than L"""
alpha = 8
H = 6
L = 7
self.assertEqual(svm.clip_alpha(alpha, H, L), 7)
def test_select_j_rand_doesnt_select_i(self):
"""svm - select_j_rand does not select i"""
i = 4
m = 76
self.assertNotEqual(svm.select_j_rand(i, m), i)
def test_needs_optimization_returns_false_for_low_ei(self):
"""
svm - needs_optimization returns false for small nonneg ei
"""
self.assertFalse(svm.needs_optimization(self.os, 5, 0.1))
def test_needs_optimization_returns_false_for_high_ei(self):
"""
svm - needs_optimization returns true for small neg ei
"""
self.assertTrue(svm.needs_optimization(self.os, 5, -5.1))
if __name__ == '__main__':
unittest.main()
| Python | 0.9998 |
e94b2593424518632c704f4a440df3bc51cbcd3e | fix failing tests. | tests/test_uri.py | tests/test_uri.py | # encoding: utf-8
import unittest
from resources import URI
from resources import IRI
class TestURISnowman(unittest.TestCase):
def setUp(self):
idna = u"\N{SNOWMAN}".encode('idna')
uri = "http://u:p@www.%s:80/path" % idna
self.uri = URI(uri)
def testFail(self):
self.assertRaises(TypeError, URI, u"http://\u2603/")
def test_repr(self):
expect = "URI('http://www.xn--n3h/path', encoding='idna')".encode('ascii')
self.assertEquals(repr(self.uri), expect)
def test_netloc(self):
expect = "u:p@www.xn--n3h:80".encode('ascii')
self.assertEquals(self.uri.netloc, expect)
def test_hostname(self):
expect = "www.xn--n3h".encode('ascii')
self.assertEquals(self.uri.hostname, expect)
def test_port(self):
expect = "80"
self.assertEquals(self.uri.port, expect)
def test_path(self):
expect = "/path".encode('ascii')
self.assertEquals(self.uri.path, expect)
| # encoding: utf-8
import unittest
from resources import URI
from resources import IRI
class TestURISnowman(unittest.TestCase):
def setUp(self):
uri = "http://u:p@" + "www.\N{SNOWMAN}".encode('idna') + ":80/path"
self.uri = URI(uri)
def testFail(self):
self.assertRaises(TypeError, URI, u"http://\u2603/")
def test_repr(self):
expect = "URI('http://www.xn--n3h/path', encoding='idna')".encode('ascii')
self.assertEquals(repr(self.uri), expect)
def test_netloc(self):
expect = "u:p@www.xn--n3h:80".encode('ascii')
self.assertEquals(self.uri.netloc, expect)
def test_hostname(self):
expect = "www.xn--n3h".encode('ascii')
self.assertEquals(self.uri.hostname, expect)
def test_port(self):
expect = "80"
self.assertEquals(self.uri.port, expect)
def test_path(self):
expect = "/path".encode('ascii')
self.assertEquals(self.uri.path, expect)
| Python | 0.000001 |
4104ea04d75b400e7a2a4d71c259ceb0957f8992 | include the absolute url to the onsite page | crate_project/apps/packages/api.py | crate_project/apps/packages/api.py | from tastypie import fields
from tastypie.resources import ModelResource
from packages.models import Package, Release
class PackageResource(ModelResource):
releases = fields.ToManyField("packages.api.ReleaseResource", "releases")
class Meta:
allowed_methods = ["get"]
include_absolute_url = True
queryset = Package.objects.all()
resource_name = "package"
class ReleaseResource(ModelResource):
package = fields.ForeignKey(PackageResource, "package")
class Meta:
allowed_methods = ["get"]
fields = [
"author", "author_email", "created", "description", "download_uri",
"license", "maintainer", "maintainer_email", "package", "platform",
"requires_python", "summary", "version"
]
include_absolute_url = True
queryset = Release.objects.all()
resource_name = "release"
| from tastypie import fields
from tastypie.resources import ModelResource
from packages.models import Package, Release
class PackageResource(ModelResource):
releases = fields.ToManyField("packages.api.ReleaseResource", "releases")
class Meta:
allowed_methods = ["get"]
queryset = Package.objects.all()
resource_name = "package"
class ReleaseResource(ModelResource):
package = fields.ForeignKey(PackageResource, "package")
class Meta:
allowed_methods = ["get"]
fields = [
"author", "author_email", "created", "description", "download_uri",
"license", "maintainer", "maintainer_email", "package", "platform",
"requires_python", "summary", "version"
]
queryset = Release.objects.all()
resource_name = "release"
| Python | 0 |
6bc3e784828c1f339ab4fd5fe3ca6dc80a07bb46 | Enable logs | crawler/tasks.py | crawler/tasks.py | from __future__ import absolute_import, unicode_literals
from .celery import app
from celery.utils.log import get_task_logger
from .crawler import crawl_url
logger = get_task_logger(__name__)
@app.task(rate_limit="6/s", queue='crawler')
def crawl_url_task(url, value):
crawl_url(url, value)
response, status, redirected = crawl_url(url)
if response is not None:
logger.info(str(url) + " | " + str(response.status_code) + " | " + str(response.reason) +
" | " + str(response.headers['Content-Type']) + " | " + str(status) + " | Redirected: " + str(redirected))
else:
logger.info(url + " | " + str(status) + " | Redirected: " + str(redirected))
| from __future__ import absolute_import, unicode_literals
from .celery import app
# from celery.utils.log import get_task_logger
from .crawler import crawl_url
# logger = get_task_logger(__name__)
@app.task(rate_limit="6/s", queue='crawler')
def crawl_url_task(url, value):
crawl_url(url, value)
# response, status, redirected = crawl_url(url)
# if response is not None:
# logger.info(str(url) + " | " + str(response.status_code) + " | " + str(response.reason) +
# " | " + str(response.headers['Content-Type']) + " | " + str(status) + " | Redirected: " + str(redirected))
# else:
# logger.info(url + " | " + str(status) + " | Redirected: " + str(redirected))
| Python | 0.000001 |
89d70f5794969cb8d71201504b8645a8359f5b70 | read config file strings as unicode | credentials/settings/production.py | credentials/settings/production.py | from os import environ
import yaml
from credentials.settings.base import *
from credentials.settings.utils import get_env_setting, get_logger_config
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
LOGGING = get_logger_config()
# Keep track of the names of settings that represent dicts. Instead of overriding the values in base.py,
# the values read from disk should UPDATE the pre-configured dicts.
DICT_UPDATE_KEYS = ('JWT_AUTH',)
# AMAZON S3 STORAGE CONFIGURATION
# See: https://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# This may be overridden by the yaml in CREDENTIALS_CFG, but it should
# be here as a default.
FILE_STORAGE_BACKEND = {}
CONFIG_FILE = get_env_setting('CREDENTIALS_CFG')
with open(CONFIG_FILE, encoding='utf-8') as f:
config_from_yaml = yaml.load(f)
# Remove the items that should be used to update dicts, and apply them separately rather
# than pumping them into the local vars.
dict_updates = {key: config_from_yaml.pop(key, None) for key in DICT_UPDATE_KEYS}
for key, value in list(dict_updates.items()):
if value:
vars()[key].update(value)
vars().update(config_from_yaml)
# Load the files storage backend settings for django storages
vars().update(FILE_STORAGE_BACKEND)
if 'EXTRA_APPS' in locals():
INSTALLED_APPS += EXTRA_APPS
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.items():
DATABASES['default'][override] = value
| from os import environ
import yaml
from credentials.settings.base import *
from credentials.settings.utils import get_env_setting, get_logger_config
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['*']
LOGGING = get_logger_config()
# Keep track of the names of settings that represent dicts. Instead of overriding the values in base.py,
# the values read from disk should UPDATE the pre-configured dicts.
DICT_UPDATE_KEYS = ('JWT_AUTH',)
# AMAZON S3 STORAGE CONFIGURATION
# See: https://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# This may be overridden by the yaml in CREDENTIALS_CFG, but it should
# be here as a default.
FILE_STORAGE_BACKEND = {}
CONFIG_FILE = get_env_setting('CREDENTIALS_CFG')
with open(CONFIG_FILE) as f:
config_from_yaml = yaml.load(f)
# Remove the items that should be used to update dicts, and apply them separately rather
# than pumping them into the local vars.
dict_updates = {key: config_from_yaml.pop(key, None) for key in DICT_UPDATE_KEYS}
for key, value in list(dict_updates.items()):
if value:
vars()[key].update(value)
vars().update(config_from_yaml)
# Load the files storage backend settings for django storages
vars().update(FILE_STORAGE_BACKEND)
if 'EXTRA_APPS' in locals():
INSTALLED_APPS += EXTRA_APPS
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.items():
DATABASES['default'][override] = value
| Python | 0.000344 |
8257411a58f03d8a353129f2813cbc516a0e40c6 | Make sure API tests are registered | editorsnotes/api/tests/__init__.py | editorsnotes/api/tests/__init__.py | from .serializers import *
from .views import *
| Python | 0 | |
314de0fd750e582f5156c29d623d9362f5c037e3 | Add missing "file=sys.stdout" [skip ci] | test_utils.py | test_utils.py | import _thread
import docker
import os
import socket
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
class TestFixtureServer(object):
def __init__(self):
self.port = 9999
self.ip = self.get_python_server_ip()
def get_python_server_ip(self):
# https://stackoverflow.com/a/166589
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
host_ip = s.getsockname()[0]
s.close()
return host_ip
def _start_server(self):
server = HTTPServer((self.ip, self.port), SimpleHTTPRequestHandler)
server.serve_forever()
def start_server_in_background(self):
print(
"Starting Test Fixture Server on: http://{}:{}".format(
self.ip, self.port
), file=sys.stdout
)
# start the server in a background thread
_thread.start_new_thread(self._start_server, ())
class TestContainerRunner(object):
def __init__(self):
self.client = docker.from_env()
self.container_name = os.environ["CONTAINER_NAME"]
self.image_name = "image-" + os.environ["STAMP"]
self.repository = os.environ["REPO"]
self.containers = []
self.test_fixture_server = TestFixtureServer()
self.test_fixture_server.start_server_in_background()
self.outer_volume_path = "/tmp/" + self.container_name
self.inner_volume_path = "/refinery-data"
self._pull_image()
self._build_image()
def __enter__(self):
self.run()
def __exit__(self, *args):
if not os.environ.get("CONTINUOUS_INTEGRATION"):
self.docker_cleanup()
def _pull_image(self):
print("Pulling image: {}".format(self.image_name), file=sys.stdout)
self.client.images.pull(self.repository)
def _build_image(self):
print("Building image: {}".format(self.image_name), file=sys.stdout)
self.client.images.build(
path=".",
tag=self.image_name,
rm=True,
forcerm=True,
cache_from=[self.repository]
)
def run(self):
print("Creating container: {}".format(self.container_name),
file=sys.stdout)
container = self.client.containers.run(
self.image_name,
detach=True,
name=self.container_name,
environment={
"INPUT_JSON_URL":
"http://{}:{}/test-data/input.json".format(
self.test_fixture_server.ip,
self.test_fixture_server.port
)
},
ports={"80/tcp": None},
publish_all_ports=True,
extra_hosts={socket.gethostname(): self.test_fixture_server.ip},
volumes={
self.outer_volume_path: {
'bind': self.inner_volume_path, 'mode': 'rw'
}
}
)
self.containers.append(container)
def docker_cleanup(self):
print("Cleaning up TestContainerRunner containers/images...",
file=sys.stdout)
for container in self.containers:
container.remove(force=True, v=True)
self.client.images.remove(self.image_name)
| import _thread
import docker
import os
import socket
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
class TestFixtureServer(object):
def __init__(self):
self.port = 9999
self.ip = self.get_python_server_ip()
def get_python_server_ip(self):
# https://stackoverflow.com/a/166589
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
host_ip = s.getsockname()[0]
s.close()
return host_ip
def _start_server(self):
server = HTTPServer((self.ip, self.port), SimpleHTTPRequestHandler)
server.serve_forever()
def start_server_in_background(self):
print(
"Starting Test Fixture Server on: http://{}:{}".format(
self.ip, self.port
), file=sys.stdout
)
# start the server in a background thread
_thread.start_new_thread(self._start_server, ())
class TestContainerRunner(object):
def __init__(self):
self.client = docker.from_env()
self.container_name = os.environ["CONTAINER_NAME"]
self.image_name = "image-" + os.environ["STAMP"]
self.repository = os.environ["REPO"]
self.containers = []
self.test_fixture_server = TestFixtureServer()
self.test_fixture_server.start_server_in_background()
self.outer_volume_path = "/tmp/" + self.container_name
self.inner_volume_path = "/refinery-data"
self._pull_image()
self._build_image()
def __enter__(self):
self.run()
def __exit__(self, *args):
if not os.environ.get("CONTINUOUS_INTEGRATION"):
self.docker_cleanup()
def _pull_image(self):
print("Pulling image: {}".format(self.image_name), file=sys.stdout)
self.client.images.pull(self.repository)
def _build_image(self):
print("Building image: {}".format(self.image_name))
self.client.images.build(
path=".",
tag=self.image_name,
rm=True,
forcerm=True,
cache_from=[self.repository]
)
def run(self):
print("Creating container: {}".format(self.container_name),
file=sys.stdout)
container = self.client.containers.run(
self.image_name,
detach=True,
name=self.container_name,
environment={
"INPUT_JSON_URL":
"http://{}:{}/test-data/input.json".format(
self.test_fixture_server.ip,
self.test_fixture_server.port
)
},
ports={"80/tcp": None},
publish_all_ports=True,
extra_hosts={socket.gethostname(): self.test_fixture_server.ip},
volumes={
self.outer_volume_path: {
'bind': self.inner_volume_path, 'mode': 'rw'
}
}
)
self.containers.append(container)
def docker_cleanup(self):
print("Cleaning up TestContainerRunner containers/images...",
file=sys.stdout)
for container in self.containers:
container.remove(force=True, v=True)
self.client.images.remove(self.image_name)
| Python | 0.000001 |
79bd1165ece909ad8c302f7127c8c8fa1cd67794 | Remove unused parameters. When calling, use named parameters to keep things clear. | api/base/filters.py | api/base/filters.py | import re
import functools
from modularodm import Q
from rest_framework.filters import OrderingFilter
from rest_framework import serializers as ser
class ODMOrderingFilter(OrderingFilter):
"""Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm."""
# override
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.sort(*ordering)
return queryset
query_pattern = re.compile(r'filter\[\s*(?P<field>\S*)\s*\]\s*')
def query_params_to_fields(query_params):
return {
query_pattern.match(key).groupdict()['field']: value
for key, value in query_params.items()
if query_pattern.match(key)
}
# Used to make intersection "reduce-able"
def intersect(x, y):
return x & y
class ODMFilterMixin(object):
"""View mixin that adds a get_query_from_request method which converts query params
of the form `filter[field_name]=value` into an ODM Query object.
Subclasses must define `get_default_odm_query()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a list of strings representing the field names as they appear in the serialization.
"""
# TODO Handle simple and complex non-standard fields
TRUTHY = set(['true', 'True', 1, '1'])
FALSY = set(['false', 'False', 0, '0'])
DEFAULT_OPERATOR = 'eq'
# For the field_comparison_operators, instances can be a class or a tuple of classes
field_comparison_operators = [
{
'field_type': ser.CharField,
'comparison_operator': 'icontains'
},
{
'field_type': ser.ListField,
'comparison_operator': 'in'
}
]
def get_comparison_operator(self, key):
for operator in self.field_comparison_operators:
if isinstance(self.serializer_class._declared_fields[key], operator['field_type']):
return operator['comparison_operator']
return self.DEFAULT_OPERATOR
def is_filterable_field(self, key):
try:
return key.strip() in self.serializer_class.filterable_fields
except AttributeError:
return key.strip() in self.serializer_class._declared_fields
def get_default_odm_query(self):
raise NotImplementedError('Must define get_default_odm_query')
def get_query_from_request(self):
query = self.query_params_to_odm_query(self.request.QUERY_PARAMS)
if not query:
query = self.get_default_odm_query()
return query
def query_params_to_odm_query(self, query_params):
"""Convert query params to a modularodm Query object."""
fields_dict = query_params_to_fields(query_params)
if fields_dict:
query_parts = [
Q(self.convert_key(key=key), self.get_comparison_operator(key=key), self.convert_value(value=value))
for key, value in fields_dict.items() if self.is_filterable_field(key=key)
]
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error.
try:
query = functools.reduce(intersect, query_parts)
except TypeError:
query = None
else:
query = None
return query
# Used so that that queries by _id will work
def convert_key(self, key):
key = key.strip()
if self.serializer_class._declared_fields[key].source:
return self.serializer_class._declared_fields[key].source
return key
# Used to convert string values from query params to Python booleans when necessary
def convert_value(self, value):
value = value.strip()
if value in self.TRUTHY:
return True
elif value in self.FALSY:
return False
# Convert me to current user's pk
elif value == 'me' and not self.request.user.is_anonymous():
return self.request.user.pk
else:
return value
| import re
import functools
from modularodm import Q
from rest_framework.filters import OrderingFilter
from rest_framework import serializers as ser
class ODMOrderingFilter(OrderingFilter):
"""Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm."""
# override
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.sort(*ordering)
return queryset
query_pattern = re.compile(r'filter\[\s*(?P<field>\S*)\s*\]\s*')
def query_params_to_fields(query_params):
return {
query_pattern.match(key).groupdict()['field']: value
for key, value in query_params.items()
if query_pattern.match(key)
}
# Used to make intersection "reduce-able"
def intersect(x, y):
return x & y
class ODMFilterMixin(object):
"""View mixin that adds a get_query_from_request method which converts query params
of the form `filter[field_name]=value` into an ODM Query object.
Subclasses must define `get_default_odm_query()`.
Serializers that want to restrict which fields are used for filtering need to have a variable called
filterable_fields which is a list of strings representing the field names as they appear in the serialization.
"""
# TODO Handle simple and complex non-standard fields
TRUTHY = set(['true', 'True', 1, '1'])
FALSY = set(['false', 'False', 0, '0'])
DEFAULT_OPERATOR = 'eq'
# For the field_comparison_operators, instances can be a class or a tuple of classes
field_comparison_operators = [
{
'field_type': ser.CharField,
'comparison_operator': 'icontains'
},
{
'field_type': ser.ListField,
'comparison_operator': 'in'
}
]
def get_comparison_operator(self, key, value):
for operator in self.field_comparison_operators:
if isinstance(self.serializer_class._declared_fields[key], operator['field_type']):
return operator['comparison_operator']
return self.DEFAULT_OPERATOR
def is_filterable_field(self, key, value):
try:
return key.strip() in self.serializer_class.filterable_fields
except AttributeError:
return key.strip() in self.serializer_class._declared_fields
def get_default_odm_query(self):
raise NotImplementedError('Must define get_default_odm_query')
def get_query_from_request(self):
query = self.query_params_to_odm_query(self.request.QUERY_PARAMS)
if not query:
query = self.get_default_odm_query()
return query
def query_params_to_odm_query(self, query_params):
"""Convert query params to a modularodm Query object."""
fields_dict = query_params_to_fields(query_params)
if fields_dict:
query_parts = [
Q(self.convert_key(key, value), self.get_comparison_operator(key, value), self.convert_value(key, value))
for key, value in fields_dict.items() if self.is_filterable_field(key, value)
]
# TODO Ensure that if you try to filter on an invalid field, it returns a useful error.
try:
query = functools.reduce(intersect, query_parts)
except TypeError:
query = None
else:
query = None
return query
# Used so that that queries by _id will work
def convert_key(self, key, value):
key = key.strip()
if self.serializer_class._declared_fields[key].source:
return self.serializer_class._declared_fields[key].source
return key
# Used to convert string values from query params to Python booleans when necessary
def convert_value(self, key, value):
value = value.strip()
if value in self.TRUTHY:
return True
elif value in self.FALSY:
return False
# Convert me to current user's pk
elif value == 'me' and not self.request.user.is_anonymous():
return self.request.user.pk
else:
return value
| Python | 0 |
2982d38d863e6f7654c4939a526d6e783525f8d6 | refactor compare_players | cribbage/main.py | cribbage/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from cribbage.game import Game
from cribbage.randomplayer import RandomCribbagePlayer
from cribbage.simpleplayer import SimpleCribbagePlayer
def compare_players(players, num_games=1000):
stats = [0, 0]
for i in range(num_games):
g = Game(players)
g.play()
stats[g.winner] += 1
return stats
# ------------------------------------------------------------
# Cribbage Game
stats = compare_players([RandomCribbagePlayer(), RandomCribbagePlayer()])
# stats
# [487, 513]
stats = compare_players([RandomCribbagePlayer(), SimpleCribbagePlayer()], 500)
# with discard()
# stats
# [16, 484]
# with play_card()
# stats
# [12, 488]
# 0.976 success against random player
# http://www.socscistatistics.com/tests/chisquare/Default2.aspx
# The chi-square statistic is 0.5879. The p-value is .443236.
stats = compare_players([RandomCribbagePlayer(),
SimpleCribbagePlayer(estimate_discard=False)],
500)
# stats
# [161, 339]
stats = compare_players([SimpleCribbagePlayer(),
SimpleCribbagePlayer(estimate_playcard=False)],
500)
# stats
# [326, 174]
# stats (after optimizing code)
# [298, 202]
# [325, 175]
def myfunc():
stats = compare_players([SimpleCribbagePlayer(),
SimpleCribbagePlayer(estimate_playcard=False)],
100)
import cProfile
cProfile.run('myfunc()', sort='time')
# deck=make_deck()
# random.shuffle(deck)
# p=SimpleCribbagePlayer()
# hand=deck[:6]
# def wrap_discard():
# for i in range(1000):
# p.discard(hand,False)
# import hotshot
# prof = hotshot.Profile("stones.prof")
# prof.runcall(wrap_discard)
# prof.close()
# import hotshot.stats
# stats = hotshot.stats.load("stones.prof")
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
stats = compare_players([SimpleCribbagePlayer(estimate_discard=False),
SimpleCribbagePlayer(estimate_playcard=False)],
500)
# stats
# [48, 452]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from cribbage.game import Game
from cribbage.randomplayer import RandomCribbagePlayer
from cribbage.simpleplayer import SimpleCribbagePlayer
# ------------------------------------------------------------
# Cribbage Game
stats = [0,0]
for i in range(1000):
g = Game([RandomCribbagePlayer(), RandomCribbagePlayer()])
g.play()
stats[g.winner] += 1
# stats
# [487, 513]
stats = [0,0]
for i in range(500):
g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer()])
g.play()
stats[g.winner] += 1
# with discard()
# stats
# [16, 484]
# with play_card()
# stats
# [12, 488]
# 0.976 success against random player
# http://www.socscistatistics.com/tests/chisquare/Default2.aspx
# The chi-square statistic is 0.5879. The p-value is .443236.
stats = [0,0]
for i in range(500):
g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer(estimate_discard=False)])
g.play()
stats[g.winner] += 1
# stats
# [161, 339]
stats = [0,0]
for i in range(500):
g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
# stats
# [326, 174]
# stats (after optimizing code)
# [298, 202]
# [325, 175]
def myfunc():
stats = [0,0]
for i in range(100):
g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
import cProfile
cProfile.run('myfunc()', sort='time')
# deck=make_deck()
# random.shuffle(deck)
# p=SimpleCribbagePlayer()
# hand=deck[:6]
# def wrap_discard():
# for i in range(1000):
# p.discard(hand,False)
# import hotshot
# prof = hotshot.Profile("stones.prof")
# prof.runcall(wrap_discard)
# prof.close()
# import hotshot.stats
# stats = hotshot.stats.load("stones.prof")
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
stats = [0,0]
for i in range(500):
g = Game([SimpleCribbagePlayer(estimate_discard=False), SimpleCribbagePlayer(estimate_playcard=False)])
g.play()
stats[g.winner] += 1
# stats
# [48, 452]
| Python | 0.000004 |
7ec191ce0b82827013485a98db84cd66aa2ca1b4 | use the currently checked out branch | lib/spack/spack/cmd/bootstrap.py | lib/spack/spack/cmd/bootstrap.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import join_path, mkdirp
import spack
from spack.util.executable import which
_SPACK_UPSTREAM = 'https://github.com/llnl/spack'
description = "Create a new installation of spack in another prefix"
def setup_parser(subparser):
subparser.add_argument('prefix', help="names of prefix where we should install spack")
def get_origin_info():
git_dir = join_path(spack.prefix, '.git')
git = which('git', required=True)
try:
branch = git('symbolic-ref', '--short', 'HEAD', output=str)
except ProcessError:
branch = 'develop'
tty.warn('No branch found; using default branch: %s' % branch)
try:
origin_url = git(
'--git-dir=%s' % git_dir,
'config', '--get', 'remote.origin.url',
output=str)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn('No git repository found; '
'using default upstream URL: %s' % origin_url)
return (origin_url.strip(), branch.strip())
def bootstrap(parser, args):
origin_url, branch = get_origin_info()
prefix = args.prefix
tty.msg("Fetching spack from origin: %s" % origin_url)
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
mkdirp(prefix)
if os.path.exists(join_path(prefix, '.git')):
tty.die("There already seems to be a git repository in %s" % prefix)
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die("There are already files there! Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
"%s/bin/spack" % prefix,
"%s/lib/spack/..." % prefix)
os.chdir(prefix)
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),
'-n', '-q')
git('reset', '--hard', 'origin/%s' % branch, '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import join_path, mkdirp
import spack
from spack.util.executable import which
_SPACK_UPSTREAM = 'https://github.com/llnl/spack'
description = "Create a new installation of spack in another prefix"
def setup_parser(subparser):
subparser.add_argument('prefix', help="names of prefix where we should install spack")
def get_origin_url():
git_dir = join_path(spack.prefix, '.git')
git = which('git', required=True)
try:
origin_url = git(
'--git-dir=%s' % git_dir,
'config', '--get', 'remote.origin.url',
output=str)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn('No git repository found; '
'using default upstream URL: %s' % origin_url)
return origin_url.strip()
def bootstrap(parser, args):
origin_url = get_origin_url()
prefix = args.prefix
tty.msg("Fetching spack from origin: %s" % origin_url)
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
mkdirp(prefix)
if os.path.exists(join_path(prefix, '.git')):
tty.die("There already seems to be a git repository in %s" % prefix)
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die("There are already files there! Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
"%s/bin/spack" % prefix,
"%s/lib/spack/..." % prefix)
os.chdir(prefix)
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q')
git('reset', '--hard', 'origin/master', '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
| Python | 0 |
1c432cd4cfb0d6f8bb54727e2b16b58fd7feb390 | Add more categorical data | tests/data.py | tests/data.py | import pytest
import numpy as np
@pytest.fixture
def continuous_data():
x = np.array(range(5))
y = np.array(range(5))
return x, y
@pytest.fixture
def continuous_data_complicated():
x = np.array(range(10))
y = np.array(list(range(5)) + [6, 6.1, 6.2, 7, 7.1])
return x, y
@pytest.fixture
def categorical_data():
x = np.array(range(6))
y = np.array([0, 0, 0, 1, 1, 1])
return x, y
@pytest.fixture
def tall_matrix_data():
x = np.array([[1, 2, 3],
[1.1, 2.05, 3],
[0.99, 2, 3],
[0.98, 2.1, 3]])
return x
@pytest.fixture
def tall_matrix_data_2():
x = np.array([[1, 2, 3],
[1.1, 2.05, 3],
[0.99, 2, 3],
[0.98, 2.1, 3]])
y = np.array(range(4))
return x, y
@pytest.fixture
def categorical_2Dmatrix_data():
x = np.array([[1, 2],
[3, 3],
[5, 2],
[1, 4],
[9, 6],
[8, 8]])
y = np.array([0, 0, 0, 0, 1, 1])
return x, y
@pytest.fixture
def categorical_2Dmatrix_data_big():
x = np.array([[1.1, 1.5],
[1, 2],
[3, 3],
[5, 2],
[1, 4],
[9, 6],
[8, 8],
[8.1, 9],
[7.7, 7.1],
[6, 12],
[10, 6]])
y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
return x, y
@pytest.fixture
def categorical_2Dmatrix_bernoulli_data():
x = np.array([[0, 0],
[0, 0],
[1, 1],
[0, 0],
[1, 1],
[1, 1]])
y = np.array([0, 0, 0, 0, 1, 1])
return x, y
| import pytest
import numpy as np
@pytest.fixture
def continuous_data():
x = np.array(range(5))
y = np.array(range(5))
return x, y
@pytest.fixture
def continuous_data_complicated():
x = np.array(range(10))
y = np.array(list(range(5)) + [6, 6.1, 6.2, 7, 7.1])
return x, y
@pytest.fixture
def categorical_data():
x = np.array(range(6))
y = np.array([0, 0, 0, 1, 1, 1])
return x, y
@pytest.fixture
def tall_matrix_data():
x = np.array([[1, 2, 3],
[1.1, 2.05, 3],
[0.99, 2, 3],
[0.98, 2.1, 3]])
return x
@pytest.fixture
def categorical_2Dmatrix_data():
x = np.array([[1, 2],
[3, 3],
[5, 2],
[1, 4],
[9, 6],
[8, 8]])
y = np.array([0, 0, 0, 0, 1, 1])
return x, y
def categorical_2Dmatrix_data_big():
x = np.array([[1.1, 1.5],
[1, 2],
[3, 3],
[5, 2],
[1, 4],
[9, 6],
[8, 8],
[8.1, 9],
[7.7, 7.1],
[6, 12],
[10, 6]])
y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
return x, y
@pytest.fixture
def categorical_2Dmatrix_bernoulli_data():
x = np.array([[0, 0],
[0, 0],
[1, 1],
[0, 0],
[1, 1],
[1, 1]])
y = np.array([0, 0, 0, 0, 1, 1])
return x, y
| Python | 0.000073 |
a0546aafb34d27b55a59fdf499727cb5ef44cca4 | Print table-header | performance/routine.py | performance/routine.py | from performance import web
import time
import threading
class Tool:
def __init__(self, config):
if not isinstance(config, Config):
raise TypeError('No performance.routine.Config object')
self.config = config
def run(self):
if self.config.is_valid():
run_event = threading.Event()
run_event.set()
finish_event = FinishEvent()
clients = []
print(' > Starting tests')
print(' > Stop tests with CTRL-C')
print(' URL Time Code')
for client_index in range(self.config.clients_count):
client = web.Client(
host=self.config.host,
requests=self.config.requests,
do_requests_counter=self.config.requests_per_client,
event=run_event,
finish_event=finish_event
)
clients.append(client)
client.start()
try:
while finish_event.finished < self.config.clients_count:
time.sleep(.1)
print(' > Finished tests')
except KeyboardInterrupt:
run_event.clear()
for client in clients:
client.join()
print(' > Exited with CTRL-C')
else:
print('Invalid configuration')
class Config:
def __init__(self, host, requests_per_client=10, clients_count=1):
self.host = host
self.requests = []
self.requests_per_client = requests_per_client
self.clients_count = clients_count
def add_request(self, request):
if not isinstance(request, web.Request):
raise TypeError('No performance.web.Request object')
self.requests.append(request)
def is_valid(self):
return not(
not self.requests
or
self.clients_count < 1
or
self.requests_per_client < 1
)
class FinishEvent:
def __init__(self):
self.finished = 0
def finish(self):
self.finished = self.finished + 1
| from performance import web
import time
import threading
class Tool:
def __init__(self, config):
if not isinstance(config, Config):
raise TypeError('No performance.routine.Config object')
self.config = config
def run(self):
if self.config.is_valid():
run_event = threading.Event()
run_event.set()
finish_event = FinishEvent()
clients = []
print(' > Starting tests')
print(' > Stop tests with CTRL-C')
for client_index in range(self.config.clients_count):
client = web.Client(
host=self.config.host,
requests=self.config.requests,
do_requests_counter=self.config.requests_per_client,
event=run_event,
finish_event=finish_event
)
clients.append(client)
client.start()
try:
while finish_event.finished < self.config.clients_count:
time.sleep(.1)
print(' > Finished tests')
except KeyboardInterrupt:
run_event.clear()
for client in clients:
client.join()
print(' > Exited with CTRL-C')
else:
print('Invalid configuration')
class Config:
def __init__(self, host, requests_per_client=10, clients_count=1):
self.host = host
self.requests = []
self.requests_per_client = requests_per_client
self.clients_count = clients_count
def add_request(self, request):
if not isinstance(request, web.Request):
raise TypeError('No performance.web.Request object')
self.requests.append(request)
def is_valid(self):
return not(
not self.requests
or
self.clients_count < 1
or
self.requests_per_client < 1
)
class FinishEvent:
def __init__(self):
self.finished = 0
def finish(self):
self.finished = self.finished + 1
| Python | 0.999599 |
0415361dcd6171f0f407ee528fa0761bf1e914b0 | Add proc name to gunicorn conf. | mezzanine/project_template/deploy/gunicorn.conf.py | mezzanine/project_template/deploy/gunicorn.conf.py | import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
| import os
bind = "127.0.0.1:%(port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
| Python | 0 |
56606d3234fbebc504feec201e4a99a3adcd5023 | Fix code for pyflake8 convention | mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py | mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from .common import _parse_risk_formula
class MgmtsystemHazard(models.Model):
_inherit = "mgmtsystem.hazard"
risk_type_id = fields.Many2one(
'mgmtsystem.hazard.risk.type',
'Risk Type',
required=True,
)
risk = fields.Integer(compute="_compute_risk", string='Risk')
residual_risk_ids = fields.One2many(
'mgmtsystem.hazard.residual_risk',
'hazard_id',
'Residual Risk Evaluations',
)
@api.depends("probability_id", "severity_id", "usage_id")
def _compute_risk(self):
mycompany = self.env['res.users'].browse(self._uid).company_id
for hazard in self:
if hazard.probability_id and\
hazard.severity_id and\
hazard.usage_id:
hazard.risk = _parse_risk_formula(
mycompany.risk_computation_id.name,
hazard.probability_id.value,
hazard.severity_id.value,
hazard.usage_id.value
)
else:
hazard.risk = False
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from .common import _parse_risk_formula
class MgmtsystemHazard(models.Model):
_inherit = "mgmtsystem.hazard"
risk_type_id = fields.Many2one(
'mgmtsystem.hazard.risk.type',
'Risk Type',
required=True,
)
risk = fields.Integer(compute="_compute_risk", string='Risk')
residual_risk_ids = fields.One2many(
'mgmtsystem.hazard.residual_risk',
'hazard_id',
'Residual Risk Evaluations',
)
@api.depends("probability_id", "severity_id", "usage_id")
def _compute_risk(self):
mycompany = self.env['res.users'].browse(self._uid).company_id
for hazard in self:
if hazard.probability_id and hazard.severity_id and hazard.usage_id:
hazard.risk = _parse_risk_formula(
mycompany.risk_computation_id.name,
hazard.probability_id.value,
hazard.severity_id.value,
hazard.usage_id.value
)
else:
hazard.risk = False
| Python | 0.000009 |
7c70a7c4b8b4cb002bcb8d683268c91de26d11c4 | formatname must be a string | tldp/doctypes/docbook4xml.py | tldp/doctypes/docbook4xml.py | #! /usr/bin/python
from ..utils import logger
from .common import SignatureChecker
class Docbook4XML(SignatureChecker):
formatname = 'DocBook 4.x XML'
extensions = ['.xml']
signatures = ['-//OASIS//DTD DocBook XML V4.1.2//EN',
'-//OASIS//DTD DocBook XML V4.2//EN',
'-//OASIS//DTD DocBook XML V4.2//EN',
'-//OASIS//DTD DocBook XML V4.4//EN',
'-//OASIS//DTD DocBook XML V4.5//EN', ]
tools = ['xsltproc', 'html2text', 'fop', 'dblatex']
files = ['']
def create_txt(self):
logger.info("Creating txt for %s", self.source.stem)
def create_pdf(self):
logger.info("Creating PDF for %s", self.source.stem)
def create_html(self):
logger.info("Creating chunked HTML for %s", self.source.stem)
def create_htmls(self):
logger.info("Creating single page HTML for %s", self.source.stem)
#
# -- end of file
#
# -- end of file
| #! /usr/bin/python
from ..utils import logger
from .common import SignatureChecker
class Docbook4XML(SignatureChecker):
formatname = ['DocBook 4.x XML']
extensions = ['.xml']
signatures = ['-//OASIS//DTD DocBook XML V4.1.2//EN',
'-//OASIS//DTD DocBook XML V4.2//EN',
'-//OASIS//DTD DocBook XML V4.2//EN',
'-//OASIS//DTD DocBook XML V4.4//EN',
'-//OASIS//DTD DocBook XML V4.5//EN', ]
tools = ['xsltproc', 'html2text', 'fop', 'dblatex']
files = ['']
def create_txt(self):
logger.info("Creating txt for %s", self.source.stem)
def create_pdf(self):
logger.info("Creating PDF for %s", self.source.stem)
def create_html(self):
logger.info("Creating chunked HTML for %s", self.source.stem)
def create_htmls(self):
logger.info("Creating single page HTML for %s", self.source.stem)
#
# -- end of file
#
# -- end of file
| Python | 0.999577 |
96113152179ca81f24b85c19420fae7078907035 | change to ipn ver | amazon_buttons/views.py | amazon_buttons/views.py | from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from amazon_buttons import models
import datetime
from django.conf import settings
import urllib
from amazon_buttons import buttonconf
from amazon_buttons import _crypt
@csrf_exempt
def ipn_handler(request):
ipn = models.ipn_response()
ipn.datetime = datetime.datetime.fromtimestamp(int(request.POST['transactionDate']))
for key, val in request.POST.iteritems():
attrib = getattr(ipn, key, None)
if attrib:
setattr(ipn, key, val)
if settings.AMAZON_IPN_VERIFY:
if settings.AMAZON_SANDBOX:
ver_url = buttonconf.SANDBOX_VERIFY
else:
ver_url = buttonconf.LIVE_VERIFY
prepd_data = buttonconf.DEFAULT_IPNVER_DATA
prepd_data['UrlEndPoint'] = settings.DOMAIN_FOR_AMAZON_IPN + reverse('amazon_ipn')
prepd_data['target_url'] = ver_url
prepd_data['HttpParameters'] = urllib.urlencode(request.POST)
prepd_data['AWSAccessKeyId'] = settings.AMAZON_ACCESS_KEY
prepd_data['Timestamp'] = datetime.datetime.now().isoformat()
s_key = settings.AMAZON_SECRET_KEY
prepd_data['Signature'] = _crypt.sig_maker(s_key, prepd_data,'GET')
del prepd_data['target_url']
fin_url = urllib.urlencode(prepd_data)
print fin_url
else:
ipn.save()
return HttpResponse('Done')
| from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from amazon_buttons import models
import datetime
from django.conf import settings
import urllib
from amazon_buttons import buttonconf
from amazon_buttons import _crypt
@csrf_exempt
def ipn_handler(request):
ipn = models.ipn_response()
ipn.datetime = datetime.datetime.fromtimestamp(int(request.POST['transactionDate']))
for key, val in request.POST.iteritems():
attrib = getattr(ipn, key, None)
if attrib:
setattr(ipn, key, val)
if settings.AMAZON_IPN_VERIFY:
if settings.AMAZON_SANDBOX:
ver_url = buttonconf.SANDBOX_VERIFY
else:
ver_url = buttonconf.LIVE_VERIFY
prepd_data = buttonconf.DEFAULT_IPNVER_DATA
prepd_data['UrlEndPoint'] = ver_url
prepd_data['target_url'] = ver_url
prepd_data['HttpParameters'] = urllib.urlencode(request.POST)
prepd_data['AWSAccessKeyId'] = settings.AMAZON_ACCESS_KEY
prepd_data['Timestamp'] = datetime.datetime.now().isoformat()
s_key = settings.AMAZON_SECRET_KEY
prepd_data['Signature'] = _crypt.sig_maker(s_key, prepd_data,'GET')
del prepd_data['target_url']
fin_url = urllib.urlencode(prepd_data)
print fin_url
else:
ipn.save()
return HttpResponse('Done')
| Python | 0.999401 |
15b0d333f434568b8c7a6f78b9773cd00d149638 | update loadxl tests | src/encoded/tests/test_loadxl.py | src/encoded/tests/test_loadxl.py | import pytest
from encoded import loadxl
import json
from unittest import mock
from pkg_resources import resource_filename
pytestmark = pytest.mark.working
def test_gen_access_keys(testapp, admin):
res = loadxl.generate_access_key(testapp,
store_access_key='local',
email=admin['email'])
res = json.loads(res)
assert res['default']['server'] == 'http://localhost:8000'
assert res['default']['secret']
assert res['default']['key']
def test_gen_access_keys_on_server(testapp, admin):
old_get = testapp.get
def side_effect(path):
from webtest.response import TestResponse
if path == '/health?format=json':
tr = TestResponse()
tr.json_body = {"beanstalk_env": "fourfront-webprod"}
tr.content_type = 'application/json'
return tr
else:
return old_get(path)
testapp.get = mock.Mock(side_effect=side_effect)
with mock.patch('encoded.loadxl.get_beanstalk_real_url') as mocked_url:
mocked_url.return_value = 'http://fourfront-hotseat'
res = loadxl.generate_access_key(testapp,
store_access_key='s3',
email=admin['email'])
res = json.loads(res)
assert res['default']['server'] == 'http://fourfront-hotseat'
assert res['default']['secret']
assert res['default']['key']
assert mocked_url.called_once()
def test_load_data_endpoint(testapp):
data = {'fdn_dir': 'master-inserts',
'itype': ['award', 'lab', 'user']}
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=200)
assert res.json['status'] == 'success'
def test_load_data_endpoint_returns_error_if_incorrect_keyword(testapp):
data = {'mdn_dir': 'master-inserts',
'itype': ['user']}
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=422)
assert res.json['status'] == 'error'
assert res.json['@graph']
def test_load_data_endpoint_returns_error_if_incorrect_data(testapp):
data = {'fdn_dir': 'master-inserts',
'itype': ['user']}
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=422)
assert res.json['status'] == 'error'
assert res.json['@graph']
def test_load_data_user_specified_config(testapp):
data = {'fdn_dir': 'master-inserts',
'itype': ['user', 'lab', 'award']}
config_uri = 'test.ini'
data['config_uri'] = config_uri
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=200)
assert res.json['status'] == 'success'
mocked_app.assert_called_once_with(config_uri, 'app')
def test_load_data_local_dir(testapp):
expected_dir = resource_filename('encoded', 'tests/data/perf-testing/')
with mock.patch('encoded.loadxl.get_app') as mocked_app:
with mock.patch('encoded.loadxl.load_all') as load_all:
mocked_app.return_value = testapp.app
load_all.return_value = None
res = testapp.post_json('/load_data', {'fdn_dir': 'perf-testing'}, status=200)
assert res.json['status'] == 'success'
load_all.assert_called_once_with(mock.ANY, expected_dir, None, itype=None, overwrite=False)
| import pytest
from encoded import loadxl
import json
from unittest import mock
from pkg_resources import resource_filename
pytestmark = pytest.mark.working
def test_gen_access_keys(testapp, admin):
res = loadxl.generate_access_key(testapp,
store_access_key='local',
email=admin['email'])
res = json.loads(res)
assert res['default']['server'] == 'http://localhost:8000'
assert res['default']['secret']
assert res['default']['key']
def test_gen_access_keys_on_server(testapp, admin):
old_get = testapp.get
def side_effect(path):
from webtest.response import TestResponse
if path == '/health?format=json':
tr = TestResponse()
tr.json_body = {"beanstalk_env": "fourfront-webprod"}
tr.content_type = 'application/json'
return tr
else:
return old_get(path)
testapp.get = mock.Mock(side_effect=side_effect)
with mock.patch('encoded.loadxl.get_beanstalk_real_url') as mocked_url:
mocked_url.return_value = 'http://fourfront-hotseat'
res = loadxl.generate_access_key(testapp,
store_access_key='s3',
email=admin['email'])
res = json.loads(res)
assert res['default']['server'] == 'http://fourfront-hotseat'
assert res['default']['secret']
assert res['default']['key']
assert mocked_url.called_once()
def test_load_data_endpoint(testapp):
master_inserts = resource_filename('encoded', 'tests/data/master-inserts/')
data = {}
data['user'] = loadxl.read_single_sheet(master_inserts, 'user')
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=200)
assert res.json['status'] == 'success'
def test_load_data_endpoint_returns_error_if_incorrect_data(testapp):
master_inserts = resource_filename('encoded', 'tests/data/master-inserts/')
data = {}
data['user'] = loadxl.read_single_sheet(master_inserts, 'user')
data['lab'] = loadxl.read_single_sheet(master_inserts, 'lab')
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', data, status=422)
assert res.json['status'] == 'error'
assert res.json['@graph']
def test_load_data_user_specified_config(testapp):
config_uri= 'test.ini'
with mock.patch('encoded.loadxl.get_app') as mocked_app:
mocked_app.return_value = testapp.app
res = testapp.post_json('/load_data', {'config_uri': config_uri}, status=200)
assert res.json['status'] == 'success'
mocked_app.assert_called_once_with(config_uri, 'app')
def test_load_data_local_dir(testapp):
expected_dir = resource_filename('encoded', 'tests/data/perf-testing/')
with mock.patch('encoded.loadxl.get_app') as mocked_app:
with mock.patch('encoded.loadxl.load_all') as load_all:
mocked_app.return_value = testapp.app
load_all.return_value = None
res = testapp.post_json('/load_data', {'local_dir': 'perf-testing'}, status=200)
assert res.json['status'] == 'success'
load_all.assert_called_once_with(mock.ANY, expected_dir, [])
| Python | 0 |
38b36bcf3ba639faff7563f96db9ed45ce9ae5a4 | normalize qgis jobname | eventkit_cloud/ui/helpers.py | eventkit_cloud/ui/helpers.py | from __future__ import absolute_import
from contextlib import contextmanager
import os
from django.conf import settings
from django.utils import timezone
from django.template.loader import get_template, render_to_string
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(prevdir)
def get_style_files():
"""
:return: A list of all of the static files used for styles (e.g. icons)
"""
style_dir = os.path.join(os.path.dirname(__file__), 'static', 'ui', 'styles')
return get_file_paths(style_dir)
def generate_qgs_style(run_uid=None, export_provider_task=None):
"""
Task to create QGIS project file with styles for osm.
"""
from eventkit_cloud.tasks.models import ExportRun
from ..tasks.export_tasks import TaskStates
from ..tasks.task_runners import normalize_name
run = ExportRun.objects.get(uid=run_uid)
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid))
job_name = run.job.name.lower()
provider_tasks = run.provider_tasks.all()
provider_details = []
if export_provider_task:
provider_slug = export_provider_task.slug
provider_detail = {'provider_slug': provider_slug, 'file_path': ''}
provider_details += [provider_detail]
else:
for provider_task in provider_tasks:
if TaskStates[provider_task.status] not in TaskStates.get_incomplete_states():
provider_slug = provider_task.slug
for export_task in provider_task.tasks.all():
try:
filename = export_task.result.filename
except Exception:
continue
full_file_path = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid),
provider_task.slug, filename)
if not os.path.isfile(full_file_path):
logger.error("Could not find file {0} for export {1}.".format(full_file_path,
export_task.name))
continue
# Exclude zip files created by zip_export_provider
if not full_file_path.endswith(".zip"):
provider_detail = {'provider_slug': provider_slug, 'file_path': full_file_path}
provider_details += [provider_detail]
style_file = os.path.join(stage_dir, '{0}-{1}.qgs'.format(normalize_name(job_name),
timezone.now().strftime("%Y%m%d")))
with open(style_file, 'w') as open_file:
open_file.write(render_to_string('styles/Style.qgs', context={'job_name': normalize_name(job_name),
'job_date_time': '{0}'.format(
timezone.now().strftime("%Y%m%d%H%M%S%f")[
:-3]),
'provider_details': provider_details,
'bbox': run.job.extents}))
return style_file
def get_file_paths(directory):
paths = {}
with cd(directory):
for dirpath, _, filenames in os.walk('./'):
for f in filenames:
paths[os.path.abspath(os.path.join(dirpath, f))] = os.path.join(dirpath, f)
return paths
| from __future__ import absolute_import
from contextlib import contextmanager
import os
from django.conf import settings
from django.utils import timezone
from django.template.loader import get_template, render_to_string
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(prevdir)
def get_style_files():
"""
:return: A list of all of the static files used for styles (e.g. icons)
"""
style_dir = os.path.join(os.path.dirname(__file__), 'static', 'ui', 'styles')
return get_file_paths(style_dir)
def generate_qgs_style(run_uid=None, export_provider_task=None):
"""
Task to create QGIS project file with styles for osm.
"""
from eventkit_cloud.tasks.models import ExportRun
from ..tasks.export_tasks import TaskStates
run = ExportRun.objects.get(uid=run_uid)
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid))
job_name = run.job.name.lower()
provider_tasks = run.provider_tasks.all()
provider_details = []
if export_provider_task:
provider_slug = export_provider_task.slug
provider_detail = {'provider_slug': provider_slug, 'file_path': ''}
provider_details += [provider_detail]
else:
for provider_task in provider_tasks:
if TaskStates[provider_task.status] not in TaskStates.get_incomplete_states():
provider_slug = provider_task.slug
for export_task in provider_task.tasks.all():
try:
filename = export_task.result.filename
except Exception:
continue
full_file_path = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid),
provider_task.slug, filename)
if not os.path.isfile(full_file_path):
logger.error("Could not find file {0} for export {1}.".format(full_file_path,
export_task.name))
continue
# Exclude zip files created by zip_export_provider
if not full_file_path.endswith(".zip"):
provider_detail = {'provider_slug': provider_slug, 'file_path': full_file_path}
provider_details += [provider_detail]
style_file = os.path.join(stage_dir, '{0}-{1}.qgs'.format(job_name,
timezone.now().strftime("%Y%m%d")))
with open(style_file, 'w') as open_file:
open_file.write(render_to_string('styles/Style.qgs', context={'job_name': job_name,
'job_date_time': '{0}'.format(
timezone.now().strftime("%Y%m%d%H%M%S%f")[
:-3]),
'provider_details': provider_details,
'bbox': run.job.extents}))
return style_file
def get_file_paths(directory):
paths = {}
with cd(directory):
for dirpath, _, filenames in os.walk('./'):
for f in filenames:
paths[os.path.abspath(os.path.join(dirpath, f))] = os.path.join(dirpath, f)
return paths
| Python | 0.999998 |
859f97e2ba209479b0e882946afdf235ccd9e648 | Fix #1 Busy loop | pigv2/backends/glue.py | pigv2/backends/glue.py | import threading
import ipaddr
import time
#Hub: takes one message from the input queue and replicates it across all output queues
class hub(object):
def __init__(self,input,output):
#Input and output functions (usually q1.get and [q2.put,q3.put....])
self.input = input;
self.output = output;
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
def process(self):
while True:
data = self.input()
for i in self.output:
i(data)
#Network range gate: takes an IP packet from the input queue and passes it to the output queue if and only if the IP source is within a list of dymanically changing networks.
#Takes an input function, an output function and an update function (which returns a list of addresses, usually database.ip_network_table.ip_list)
class network_range_gate(object):
def __init__(self,input,output,update,update_frequency=0.1):
self.input = input;
self.output = output;
self.addresses = []
self.db_semaphore = threading.Semaphore()
self.passed = []
self.update_function = update
self.update_frequency = update_frequency
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
self.y=threading.Thread(target=self.update_addresses)
self.y.daemon=True
self.y.start()
# def debug_data(self):
# print "Gating list", self.addresses
# print "Recently passed", self.passed
# self.passed = []
def process(self):
while True:
data = self.input()
self.db_semaphore.acquire()
try:
for i in self.addresses:
if i.Contains(data['source']):
self.output(data)
#self.passed.append(data['source'])
break
except:
pass
self.db_semaphore.release()
def update_addresses(self):
while True:
time.sleep(0.1) # to avoid a busy loop
self.db_semaphore.acquire()
self.addresses = self.update_function()
self.db_semaphore.release()
| import threading
import ipaddr
#Hub: takes one message from the input queue and replicates it across all output queues
class hub(object):
def __init__(self,input,output):
#Input and output functions (usually q1.get and [q2.put,q3.put....])
self.input = input;
self.output = output;
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
def process(self):
while True:
data = self.input()
for i in self.output:
i(data)
#Network range gate: takes an IP packet from the input queue and passes it to the output queue if and only if the IP source is within a list of dymanically changing networks.
#Takes an input function, an output function and an update function (which returns a list of addresses, usually database.ip_network_table.ip_list)
class network_range_gate(object):
def __init__(self,input,output,update):
self.input = input;
self.output = output;
self.addresses = []
self.db_semaphore = threading.Semaphore()
self.passed = []
self.update_function = update
self.x=threading.Thread(target=self.process)
self.x.daemon=True
self.x.start()
self.y=threading.Thread(target=self.update_addresses)
self.y.daemon=True
self.y.start()
# def debug_data(self):
# print "Gating list", self.addresses
# print "Recently passed", self.passed
# self.passed = []
def process(self):
while True:
data = self.input()
self.db_semaphore.acquire()
try:
for i in self.addresses:
if i.Contains(data['source']):
self.output(data)
#self.passed.append(data['source'])
break
except:
pass
self.db_semaphore.release()
def update_addresses(self):
while True:
self.db_semaphore.acquire()
self.addresses = self.update_function()
self.db_semaphore.release()
| Python | 0 |
ce73fe56375bef32a0997bdbe4ab305f232d605e | rename variable | daemon/rpcservice/systemservice.py | daemon/rpcservice/systemservice.py | import psutil
import json
from rpcservice.rpcservice import RPCService
from decorator.serialize import json_decorate
from decorator.singleton import singleton
@singleton
@json_decorate
class SystemService(RPCService):
def get_server_status(self):
system_status = {
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
}
json_obj = []
json_obj.append(system_status)
return json_obj
def get_server_version(self):
pass
| import psutil
import json
from rpcservice.rpcservice import RPCService
from decorator.serialize import json_decorate
from decorator.singleton import singleton
@singleton
@json_decorate
class SystemService(RPCService):
def get_server_status(self):
cpu_status = {
"cpu": psutil.cpu_percent(),
"memory": psutil.virtual_memory().percent,
}
json_obj = []
json_obj.append(cpu_status)
return json_obj
def get_server_version(self):
pass
| Python | 0.000018 |
369be9eb7e366c1e9150f600c0d35d2814677469 | Update Autoencoder.py | Autoencoderprediction/Autoencoder.py | Autoencoderprediction/Autoencoder.py | """ Auto Encoder Example.
Using an Stacked auto encoder on MNIST handwritten digits, and evaluating its performance with different scores
References:
Tflearn.org/examples
Tensorflow.org
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
Method and Examples Used:
[1] An simple example from Tflean, which is an higher level API for tensorflow provided with an autoencoder example which reconstructed the
images but the motive here was to evaluate this autoencoder with different score so it could be fine tuned in future for various specific tasks.
Also for reconstructing the images this program used decoder which we don't need for our evaluation.
[2] Secondly the last layer for classification should be softmax layer and here I changed here acoordingly
[3] I am not using Confusion matrix from tensorflow, rather I used sklearn library for that purpose.
[4] All the steps involved in this program is commented out for better understanding of this program.
By: Jay Vala with help from Navin F Henry
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import tflearn
import tensorflow as tf
from random import randint
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
Images, Lables, testImages, testLables = mnist.load_data(one_hot=True)
# Random integer to restrict the results
f = randint(0,20)
# Placeholders to hold data before feeding it into network
x = tf.placeholder("float",[None, 784]) #for images with shape of None,784
y = tf.placeholder("float",[None, 10]) #for lables with shape of None,10
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
encoder = tflearn.fully_connected(encoder, 10, activation='softmax')
#For calculating Accuracy at every step of model training
acc= tflearn.metrics.Accuracy()
# Regression, with mean square error (learn about it more here http://tflearn.org/layers/estimator/)
net = tflearn.regression(encoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=acc, shuffle_batches=True)
# Mpdeling the Neural Network (for details http://tflearn.org/models/dnn/)
model = tflearn.DNN(net, tensorboard_verbose=0)
# Training the Neural Network (for details http://tflearn.org/models/dnn/)
model.fit(Images, Lables, n_epoch=1, validation_set=(testImages, testLables),
run_id="auto_encoder", batch_size=256,show_metric=True, snapshot_epoch=True)
# Here I evaluate the model with Test Images and Test Lables, calculating the Mean Accuracy of the model.
evaluation= model.evaluate(testImages,testLables)
print("\n")
print("\t"+"Mean accuracy of the model is :", evaluation)
# Prediction the Lables of the Images that we give to the model just to have a clear picture of Neural Netwok
lables = model.predict_label(testImages)
print("\n")
print("\t"+"The predicted labels are :",lables)
# Predicted probailites
y = model.predict(testImages)
print("\n")
print("\t"+"\t"+"\t"+"The predicted probabilities are :" )
print("\n")
print (y[f])
# Running a session to feed calculate the confusion matrix
sess = tf.Session()
# taking the argumented maximum of the predicted probabilities for generating confusion matrix
prediction = tf.argmax(y,1)
# displaying length of predictions and evaluating them in a session
with sess.as_default():
print (len(prediction.eval()))
predicted_labels = prediction.eval()
# Again importing the mnist data with one hot as false because we need to know the truepositive and other values for evaluation
Images, Lables, testImages, targetLables = mnist.load_data(one_hot=False)
# Used Sklearn library for evaluation as tensorflows library was not documented properly
# Generated the Confusion Matrix
confusionMatrix = confusion_matrix(targetLables, predicted_labels)
print (confusionMatrix)
# Classification_report in Sklearn provide all the necessary scores needed to succesfully evaluate the model.
classification = classification_report(targetLables,predicted_labels, digits=4,
target_names =['class 0','class 1','class 2','class 3','class 4','class 5','class 6','class 7','class 8','class 9'])
print (classification)
| from __future__ import division, print_function, absolute_import
import numpy as np
#import matplotlib.pyplot as plt
import tflearn
import tensorflow as tf
from random import randint
from sklearn.metrics import confusion_matrix
from tensorflow.contrib.metrics import streaming_accuracy
from tensorflow.contrib.metrics import streaming_precision
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
Images, Lables, testImages, testLables = mnist.load_data(one_hot=True)
f = randint(0,20)
x = tf.placeholder("float",[None, 784])
y = tf.placeholder("float",[None, 10])
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
encoder = tflearn.fully_connected(encoder, 10, activation='softmax')
acc= tflearn.metrics.Accuracy()
# Regression, with mean square error
net = tflearn.regression(encoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=acc, shuffle_batches=True)
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(Images, Lables, n_epoch=1, validation_set=(testImages, testLables),
run_id="auto_encoder", batch_size=256,show_metric=True, snapshot_epoch=True)
evali= model.evaluate(testImages,testLables)
print("\n")
print("\t"+"Mean accuracy of the model is :", evali)
lables = model.predict_label(testImages)
print("\n")
print("\t"+"The predicted labels are :",lables)
y = model.predict(testImages)
print("\n")
print("\t"+"\t"+"\t"+"The predicted probabilities are :" )
print("\n")
#print (prediction[10])
sess = tf.Session()
prediction = tf.argmax(y,1)
#classification = sess.run(tf.argmax(prediction), feed_dict={x: [testImages]})
with sess.as_default():
print (len(prediction.eval()))
predicted_labels = prediction.eval()
Images, Lables, testImages, targetLables = mnist.load_data(one_hot=False)
confusionMatrix = confusion_matrix(targetLables, predicted_labels)
print (confusionMatrix)
#flattenTestLable = tf.reshape(testLables,[-1])
#flattenprediction = tf.reshape(prediction,[-1])
# metrics_collections=None, updates_collections=None,
#name="Recall")
#precision = tf.metrics.precision(flattenTestLable,flattenprediction,weights=None, metrics_collections=None,
#updates_collections=None, name='precision')
#print("Precision:", precision)
#print("Recall :", recall)
#import tflearn.datasets.mnist as mnist
#Images, Lables, testImages, targetLables = mnist.load_data(one_hot=False)
#with sess.as_default():
# print ("shape of test labels ",tf.shape(testLables).eval())
# print("shape of predicted labels",tf.shape(lables).eval())
#confusionMatrix = confusion_matrix(targetLables, lables )
#with sess.as_default():
| Python | 0 |
6e8be0bf525d386cfd83ac1c0c3f66475e308234 | fix id tag | examples/RooUnfoldExample.py | examples/RooUnfoldExample.py | # ==============================================================================
# File and Version Information:
# $Id: RooUnfoldExample.py 248 2010-10-04 22:18:19Z T.J.Adye $
#
# Description:
# Simple example usage of the RooUnfold package using toy MC.
#
# Author: Tim Adye <T.J.Adye@rl.ac.uk>
#
# ==============================================================================
from ROOT import gRandom, TH1, TH1D, cout
from ROOT import RooUnfoldResponse
from ROOT import RooUnfold
from ROOT import RooUnfoldBayes
# from ROOT import RooUnfoldSvd
# from ROOT import RooUnfoldTUnfold
# ==============================================================================
# Gaussian smearing, systematic translation, and variable inefficiency
# ==============================================================================
def smear(xt):
xeff= 0.3 + (1.0-0.3)/20*(xt+10.0); # efficiency
x= gRandom.Rndm();
if x>xeff: return None;
xsmear= gRandom.Gaus(-2.5,0.2); # bias and smear
return xt+xsmear;
# ==============================================================================
# Example Unfolding
# ==============================================================================
print "==================================== TRAIN ===================================="
response= RooUnfoldResponse (40, -10.0, 10.0);
# Train with a Breit-Wigner, mean 0.3 and width 2.5.
for i in xrange(100000):
xt= gRandom.BreitWigner (0.3, 2.5);
x= smear (xt);
if x!=None:
response.Fill (x, xt);
else:
response.Miss (xt);
print "==================================== TEST ====================================="
hTrue= TH1D ("true", "Test Truth", 40, -10.0, 10.0);
hMeas= TH1D ("meas", "Test Measured", 40, -10.0, 10.0);
# Test with a Gaussian, mean 0 and width 2.
for i in xrange(10000):
xt= gRandom.Gaus (0.0, 2.0)
x= smear (xt);
hTrue.Fill(xt);
if x!=None: hMeas.Fill(x);
print "==================================== UNFOLD ==================================="
unfold= RooUnfoldBayes (response, hMeas, 4); # OR
# unfold= RooUnfoldSvd (response, hMeas, 20); # OR
# unfold= RooUnfoldTUnfold (response, hMeas);
hReco= unfold.Hreco();
unfold.PrintTable (cout, hTrue);
hReco.Draw();
hMeas.Draw("SAME");
hTrue.SetLineColor(8);
hTrue.Draw("SAME");
| # ==============================================================================
# File and Version Information:
# $Id: RooUnfoldExample.py 248 2010-10-04 22:18:19Z T.J.Adye $
#
# Description:
# Simple example usage of the RooUnfold package using toy MC.
#
# Author: Tim Adye <T.J.Adye@rl.ac.uk>
#
# ==============================================================================
from ROOT import gRandom, TH1, TH1D, cout
from ROOT import RooUnfoldResponse
from ROOT import RooUnfold
from ROOT import RooUnfoldBayes
# from ROOT import RooUnfoldSvd
# from ROOT import RooUnfoldTUnfold
# ==============================================================================
# Gaussian smearing, systematic translation, and variable inefficiency
# ==============================================================================
def smear(xt):
xeff= 0.3 + (1.0-0.3)/20*(xt+10.0); # efficiency
x= gRandom.Rndm();
if x>xeff: return None;
xsmear= gRandom.Gaus(-2.5,0.2); # bias and smear
return xt+xsmear;
# ==============================================================================
# Example Unfolding
# ==============================================================================
print "==================================== TRAIN ===================================="
response= RooUnfoldResponse (40, -10.0, 10.0);
# Train with a Breit-Wigner, mean 0.3 and width 2.5.
for i in xrange(100000):
xt= gRandom.BreitWigner (0.3, 2.5);
x= smear (xt);
if x!=None:
response.Fill (x, xt);
else:
response.Miss (xt);
print "==================================== TEST ====================================="
hTrue= TH1D ("true", "Test Truth", 40, -10.0, 10.0);
hMeas= TH1D ("meas", "Test Measured", 40, -10.0, 10.0);
# Test with a Gaussian, mean 0 and width 2.
for i in xrange(10000):
xt= gRandom.Gaus (0.0, 2.0)
x= smear (xt);
hTrue.Fill(xt);
if x!=None: hMeas.Fill(x);
print "==================================== UNFOLD ==================================="
unfold= RooUnfoldBayes (response, hMeas, 4); # OR
# unfold= RooUnfoldSvd (response, hMeas, 20); # OR
# unfold= RooUnfoldTUnfold (response, hMeas);
hReco= unfold.Hreco();
unfold.PrintTable (cout, hTrue);
hReco.Draw();
hMeas.Draw("SAME");
hTrue.SetLineColor(8);
hTrue.Draw("SAME");
| Python | 0.000005 |
4f8fe4c584f3dd4f5cc612c43534f6b2dc149a11 | Fix a bug that caused the from_config to ignore the base_path when method is set to local | simplefsabstraction/interface.py | simplefsabstraction/interface.py | import uuid
class SimpleFS:
class BadExtensionError(Exception):
def __init__(self):
super().__init__('Extension not allowed')
def exists(self, file_name):
"""
Check whether a file exists in the file system
:param file_name: the name of the file
:return: true if the file exists, false otherwise
"""
raise NotImplementedError
def save(self, source_file, dest_name, randomize=False):
"""
Save a file to the file system
:param source_file: the source file
:param dest_name: the destination name
:param randomize: use a random file name
:return the generated filename
"""
raise NotImplementedError
@staticmethod
def _check_extension(filename, extensions):
"""
Check is a filename has an allowed extension
:param filename: the filename
:return: true if allowed extension, false otherwise
"""
return any(filename.endswith(".{}".format(ext)) for ext in extensions)
@staticmethod
def _random_filename():
"""
Generate a random filename
"""
return str(uuid.uuid4())
@staticmethod
def from_config(config):
from simplefsabstraction import S3FS, LocalFS
def s3_from_config(config):
"""
Create an instance of S3FS from the config
"""
if 'access_key' in config and 'secret_key' in config:
credentials = {'access_key': config['access_key'],
'secret_key': config['secret_key']}
else:
credentials = None
try:
bucket_name = config['bucket_name']
except KeyError:
raise Exception('Please specify the bucket name in the config')
allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None
return S3FS(bucket_name, allowed_extensions=allowed_extensions, credentials=credentials)
def local_from_config(config):
"""
Create an instance of LocalFS from the config
"""
allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None
if 'base_path' in config:
return LocalFS(allowed_extensions=allowed_extensions, base_path=config['base_path'])
else:
return LocalFS(allowed_extensions=allowed_extensions)
try:
method = config['method'].lower()
except KeyError:
raise Exception('Please specify the key "method" in the config')
if method == 's3':
return s3_from_config(config)
elif method == 'local':
return local_from_config(config)
else:
raise Exception('Method "{}" not known'.format(method))
| import uuid
class SimpleFS:
class BadExtensionError(Exception):
def __init__(self):
super().__init__('Extension not allowed')
def exists(self, file_name):
"""
Check whether a file exists in the file system
:param file_name: the name of the file
:return: true if the file exists, false otherwise
"""
raise NotImplementedError
def save(self, source_file, dest_name, randomize=False):
"""
Save a file to the file system
:param source_file: the source file
:param dest_name: the destination name
:param randomize: use a random file name
:return the generated filename
"""
raise NotImplementedError
@staticmethod
def _check_extension(filename, extensions):
"""
Check is a filename has an allowed extension
:param filename: the filename
:return: true if allowed extension, false otherwise
"""
return any(filename.endswith(".{}".format(ext)) for ext in extensions)
@staticmethod
def _random_filename():
"""
Generate a random filename
"""
return str(uuid.uuid4())
@staticmethod
def from_config(config):
from simplefsabstraction import S3FS, LocalFS
def s3_from_config(config):
"""
Create an instance of S3FS from the config
"""
if 'access_key' in config and 'secret_key' in config:
credentials = {'access_key': config['access_key'],
'secret_key': config['secret_key']}
else:
credentials = None
try:
bucket_name = config['bucket_name']
except KeyError:
raise Exception('Please specify the bucket name in the config')
allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None
return S3FS(bucket_name, allowed_extensions=allowed_extensions, credentials=credentials)
def local_from_config(config):
"""
Create an instance of LocalFS from the config
"""
allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None
return LocalFS(allowed_extensions=allowed_extensions)
try:
method = config['method'].lower()
except KeyError:
raise Exception('Please specify the key "method" in the config')
if method == 's3':
return s3_from_config(config)
elif method == 'local':
return local_from_config(config)
else:
raise Exception('Method "{}" not known'.format(method))
| Python | 0.000001 |
b193a4035a0a77ba2555c41d977cf31975ac3b47 | Disable destructive action challenge for codelab. (#1059) | pylib/spinnaker/codelab_config.py | pylib/spinnaker/codelab_config.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinnaker.yaml_util import YamlBindings
def configure_codelab_igor_jenkins():
"""Configures Igor to be enabled and to point to the codelab jenkins instance.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/spinnaker-local.yml',
{
'jenkins': {
'defaultMaster': {
'name': 'CodelabJenkins',
'baseUrl': 'http://localhost:9090',
'username': 'admin',
'password': 'admin'
}
},
'igor': {
'enabled': 'true'
}
}
)
def disable_destructive_action_challenge():
"""Disables destructive action challenge for codelab.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/clouddriver.yml',
{
'credentials': {
'challengeDestructiveActionsEnvironments': ''
}
}
)
if __name__ == '__main__':
configure_codelab_igor_jenkins()
disable_destructive_action_challenge()
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinnaker.yaml_util import YamlBindings
def configure_codelab_igor_jenkins():
"""Configures Igor to be enabled and to point to the codelab jenkins instance.
"""
YamlBindings.update_yml_source(
'/opt/spinnaker/config/spinnaker-local.yml',
{
'jenkins': {
'defaultMaster': {
'name': 'CodelabJenkins',
'baseUrl': 'http://localhost:9090',
'username': 'admin',
'password': 'admin'
}
},
'igor': {
'enabled': 'true'
}
}
)
if __name__ == '__main__':
configure_codelab_igor_jenkins()
| Python | 0 |
a766bfa315f7c91f672f59bdd1b606d50467c332 | Bump version. | src/flask_components/_version.py | src/flask_components/_version.py | # -*- coding: utf-8 -*-
__version_info__ = (0, 1, 1)
__version__ = '.'.join(map(str, __version_info__))
| # -*- coding: utf-8 -*-
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
| Python | 0 |
780cdf506060ad550355e4b8928743944198ac74 | Test config | tests/test.py | tests/test.py | import json
from unittest import TestCase
import requests
from PIL import Image
from httmock import urlmatch, HTTMock
from redis import Redis
from app import app
import os
@urlmatch(netloc=r'(.*\.)?test.server\.com$')
def get_image_mock(url, request):
return open('tests/test_resources/heman.png', 'r').read()
class ImageResizeTest(TestCase):
def setUp(self):
self.r = Redis()
self.app = app.test_client()
self.test_image_name = 'test_image.jpg'
self.base_url = '/v1/resizer/{}'
self.get_image_url = self.base_url.format(
'?width={}&height={}&file={}'.format(
0, 100, 'http://test.server.com/heman.png'
)
)
self.bad_argument_url = self.base_url.format('?file=http://test.server.com/heman.png')
def tearDown(self):
self.r.delete('heman.png_0_100')
try:
os.remove(self.test_image_name)
except OSError:
pass
class TestResizeImage(ImageResizeTest):
def make_request(self, width, height):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
image = self.r.get('heman.png_0_100')
with open(self.test_image_name, 'w') as f:
f.write(image)
width, height = Image.open(self.test_image_name).size
assert response.status_code == 200
assert 'image/jpeg' in response.content_type
assert 'heman.png_0_100' in self.r.keys('heman.png_0_100')
assert image is not None
return width, height
def test_file_exists_already(self):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
with HTTMock(get_image_mock):
response_2 = self.app.get(self.get_image_url)
assert response_2.status_code == 200
def test_height(self):
width, height = self.make_request(0, 100)
assert height is not None
assert width is not None
def test_width(self):
width, height = self.make_request(100, 0)
assert width is not None
assert height is not None
def test_bad_argument(self):
with HTTMock(get_image_mock):
response = self.app.get(self.bad_argument_url)
assert response.status_code == 400
class TestUtils(TestCase):
def setUp(self):
self.app = app.test_client()
def test_ping(self):
response = self.app.get('/v1/utils/ping')
assert response.status_code == 200
assert response.get_data() == 'pong'
import config
class TestConfig(TestCase):
def setUp(self):
self.statsd_config = 'statsd_config'
self._create_statsd_config()
def tearDown(self):
try:
os.remove(self.statsd_config)
except OSError:
pass
def _create_statsd_config(self):
config = {
"statsd_host": "localhost",
"statsd_port": 8125
}
with open(self.statsd_config, 'w') as f:
f.write(json.dumps(config))
def test_statsd_config(self):
config._load_statsd_config(app, self.statsd_config)
statsd = app.config['STATSD']
assert isinstance(statsd, dict)
assert statsd['host'] == 'localhost'
assert statsd['port'] == 8125
| import json
from unittest import TestCase
import requests
from PIL import Image
from httmock import urlmatch, HTTMock
from redis import Redis
from app import app
import os
@urlmatch(netloc=r'(.*\.)?test.server\.com$')
def get_image_mock(url, request):
return open('tests/test_resources/heman.png', 'r').read()
class ImageResizeTest(TestCase):
def setUp(self):
self.r = Redis()
self.app = app.test_client()
self.test_image_name = 'test_image.jpg'
self.base_url = '/v1/resizer/{}'
self.get_image_url = self.base_url.format(
'?width={}&height={}&file={}'.format(
0, 100, 'http://test.server.com/heman.png'
)
)
self.bad_argument_url = self.base_url.format('?file=http://test.server.com/heman.png')
def tearDown(self):
self.r.delete('heman.png_0_100')
try:
os.remove(self.test_image_name)
except OSError:
pass
class TestResizeImage(ImageResizeTest):
def make_request(self, width, height):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
image = self.r.get('heman.png_0_100')
with open(self.test_image_name, 'w') as f:
f.write(image)
width, height = Image.open(self.test_image_name).size
assert response.status_code == 200
assert 'image/jpeg' in response.content_type
assert 'heman.png_0_100' in self.r.keys('heman.png_0_100')
assert image is not None
return width, height
def test_file_exists_already(self):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
with HTTMock(get_image_mock):
response_2 = self.app.get(self.get_image_url)
assert response_2.status_code == 200
def test_height(self):
width, height = self.make_request(0, 100)
assert height is not None
assert width is not None
def test_width(self):
width, height = self.make_request(100, 0)
assert width is not None
assert height is not None
def test_bad_argument(self):
with HTTMock(get_image_mock):
response = self.app.get(self.bad_argument_url)
assert response.status_code == 400
class TestUtils(TestCase):
def setUp(self):
self.app = app.test_client()
def test_ping(self):
response = self.app.get('/v1/utils/ping')
assert response.status_code == 200
assert response.get_data() == 'pong'
| Python | 0.000001 |
f84aa449780f2645a89c3fb015a2235389937ec5 | Clean up mongo fixtures a bit | blaze/tests/test_mongo.py | blaze/tests/test_mongo.py | from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
try:
pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.importorskip('fhskjfdskfhsf')
from datashape import discover, dshape
from blaze import drop, into, create_index
conn = pymongo.MongoClient()
db = conn.test_db
from pymongo import ASCENDING, DESCENDING
@pytest.yield_fixture
def empty_collec():
yield db.tmp_collection
db.tmp_collection.drop()
@pytest.yield_fixture
def bank_collec():
coll = into(db.tmp_collection, bank)
yield coll
coll.drop()
bank = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
def test_discover(bank_collec):
assert discover(bank_collec) == dshape('5 * {amount: int64, name: string}')
def test_into(empty_collec):
lhs = set(into([], into(empty_collec, bank), columns=['name', 'amount']))
rhs = set([('Alice', 100), ('Alice', 200), ('Bob', 100), ('Bob', 200),
('Bob', 300)])
assert lhs == rhs
@pytest.yield_fixture
def mongo():
pymongo = pytest.importorskip('pymongo')
conn = pymongo.MongoClient()
db = conn.test_db
db.tmp_collection.insert(bank)
yield conn
conn.close()
def test_drop(mongo):
db = mongo.test_db
drop(db.tmp_collection)
assert db.tmp_collection.count() == 0
| from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
try:
pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.importorskip('fhskjfdskfhsf')
from datashape import discover, dshape
from contextlib import contextmanager
from toolz.curried import get
from blaze import drop, into
conn = pymongo.MongoClient()
db = conn.test_db
@contextmanager
def collection(data=None):
if data is None:
data = []
coll = db.tmp_collection
if data:
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
bank = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
def test_discover():
with collection(bank) as coll:
assert discover(coll) == dshape('5 * {amount: int64, name: string}')
def test_into():
with collection([]) as coll:
key = get(['name', 'amount'])
assert set(into([], into(coll, bank), columns=['name', 'amount'])) ==\
set([('Alice', 100), ('Alice', 200), ('Bob', 100),
('Bob', 200), ('Bob', 300)])
@pytest.yield_fixture
def mongo():
pymongo = pytest.importorskip('pymongo')
conn = pymongo.MongoClient()
db = conn.test_db
db.tmp_collection.insert(bank)
yield conn
conn.close()
def test_drop(mongo):
db = mongo.test_db
drop(db.tmp_collection)
assert db.tmp_collection.count() == 0
| Python | 0 |
36408b92a74b8f9963686d215b26de57b429cd6c | Fix test_table.py record syntax. | blaze/tests/test_table.py | blaze/tests/test_table.py | from blaze import dshape
from blaze import NDTable, Table, NDArray, Array
def test_arrays():
# Assert that the pretty pritner works for all of the
# toplevel structures
expected_ds = dshape('3, int')
a = NDArray([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
a = Array([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
def test_record():
expected_ds = dshape('1, {x: int32; y: float32}')
t = NDTable([(1, 2.1), (2, 3.1)], dshape='1, {x: int32; y: float32}')
t.datashape._equal(expected_ds)
str(t)
repr(t)
def test_record_consume():
expected_ds = dshape("4, {i: int64; f: float64}")
d = {
'i' : [1, 2, 3, 4],
'f' : [4., 3., 2., 1.]
}
t = NDTable(d)
t.datashape._equal(expected_ds)
def test_record_consume2():
d = {
'a' : ["foo", "bar"],
'b' : [4., 3., 2., 1.]
}
table = NDTable(d)
| from blaze import dshape
from blaze import NDTable, Table, NDArray, Array
def test_arrays():
# Assert that the pretty pritner works for all of the
# toplevel structures
expected_ds = dshape('3, int')
a = NDArray([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
a = Array([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
def test_record():
expected_ds = dshape('1, {x: int32; y: float32}')
t = NDTable([(1, 2.1), (2, 3.1)], dshape='1, {x: int32; y: float32}')
t.datashape._equal(expected_ds)
str(t)
repr(t)
def test_record_consume():
expected_ds = dshape("4, {i: int64, f: float64}")
d = {
'i' : [1, 2, 3, 4],
'f' : [4., 3., 2., 1.]
}
t = NDTable(d)
t.datashape._equal(expected_ds)
def test_record_consume2():
d = {
'a' : ["foo", "bar"],
'b' : [4., 3., 2., 1.]
}
table = NDTable(d)
| Python | 0.000002 |
3f90d0ec25491eb64f164180139d4baf9ff238a9 | Sort the context list in alphabetical order | libravatar/context_processors.py | libravatar/context_processors.py | # Copyright (C) 2010 Jonathan Harker <jon@jon.geek.nz>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
import settings
"""
Default useful variables for the base page template.
"""
def basepage(request):
context = {}
context['analytics_propertyid'] = settings.ANALYTICS_PROPERTYID
context['avatar_url'] = settings.AVATAR_URL
context['disable_signup'] = settings.DISABLE_SIGNUP
context['libravatar_version'] = settings.LIBRAVATAR_VERSION
context['media_url'] = settings.MEDIA_URL
context['secure_avatar_url'] = settings.SECURE_AVATAR_URL
context['site_name'] = settings.SITE_NAME
context['site_url'] = settings.SITE_URL
context['support_email'] = settings.SUPPORT_EMAIL
return context
| # Copyright (C) 2010 Jonathan Harker <jon@jon.geek.nz>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
import settings
"""
Default useful variables for the base page template.
"""
def basepage(request):
context = {}
context["site_name"] = settings.SITE_NAME
context["libravatar_version"] = settings.LIBRAVATAR_VERSION
context["avatar_url"] = settings.AVATAR_URL
context["secure_avatar_url"] = settings.SECURE_AVATAR_URL
context["media_url"] = settings.MEDIA_URL
context["site_url"] = settings.SITE_URL
context["disable_signup"] = settings.DISABLE_SIGNUP
context["analytics_propertyid"] = settings.ANALYTICS_PROPERTYID
context['support_email'] = settings.SUPPORT_EMAIL
return context
| Python | 1 |
34dc1c775e4808664dcdb5824b8f2ed5f12e94a1 | add jsonp renderer and route for graph build status | app/app/__init__.py | app/app/__init__.py | from pyramid.config import Configurator
from pyramid.renderers import JSONP
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.add_renderer('jsonp', JSONP(param_name='callback'))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('site_graph', '/site/{code}')
config.add_route('entity_graph', '/entity/{id}')
config.add_route('status', '/status')
config.scan()
return config.make_wsgi_app()
| from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('site_graph', '/site/{code}')
config.add_route('entity_graph', '/entity/{id}')
config.scan()
return config.make_wsgi_app()
| Python | 0 |
55cfe1b3ce4c55eaeadbcedeba942cf6ed40f134 | revert changes in message lib | intelmq/lib/message.py | intelmq/lib/message.py | import json
import hashlib
class Event(object):
def __init__(self, event=None):
if event:
self.event = event
else:
self.event = dict()
def add(self, key, value):
if not value or key in self.event:
return False
self.event[key] = value
return True
def update(self, key, value):
if not value:
return False
self.event[key] = value
return True
def discard(self, key, value):
self.clear(key)
def clear(self, key):
if key in self.event:
return self.event.pop(key)
else:
return None
def value(self, key):
if key in self.event:
return self.event[key]
else:
return None
def keys(self):
return self.event.keys()
def items(self):
return self.event.items()
def contains(self, key):
if key in self.event:
return self.event[key]
else:
return None
def to_dict(self):
return dict(self.event)
def to_unicode(self):
return unicode(json.dumps(self.event))
@staticmethod
def from_unicode(event_string):
return Event(json.loads(event_string))
def __hash__(self):
evhash = hashlib.sha1()
for key, value in sorted(self.items()):
evhash.update(key.encode("utf-8"))
evhash.update("\xc0")
evhash.update(value.encode("utf-8"))
evhash.update("\xc0")
return int(evhash.hexdigest(), 16) # FIXME: the int stuff should be done by cache
#return hash(self.event)
def __eq__(self, event2):
return self.event == event2
def __unicode__(self):
return self.to_unicode()
def __repr__(self):
return repr(self.event)
def __str__(self):
return str(self.event)
| import json
import hashlib
class Event(object):
def __init__(self, event=None):
if event:
self.event = event
else:
self.event = dict()
def add(self, key, value):
if not value or key in self.event:
return False
self.event[key] = value
return True
def update(self, key, value):
if not value:
return False
self.event[key] = value
return True
def discard(self, key, value):
self.clear(key)
def clear(self, key):
if key in self.event:
return self.event.pop(key)
else:
return None
def value(self, key):
if key in self.event:
return self.event[key]
else:
return None
def keys(self):
return self.event.keys()
def items(self):
return self.event.items()
def contains(self, key):
if key in self.event:
return self.event[key]
else:
return None
def to_dict(self):
return dict(self.event)
def to_unicode(self):
return unicode(json.dumps(self.event))
@staticmethod
def from_unicode(event_string):
return Event(json.loads(event_string))
def __hash__(self):
evhash = hashlib.sha1()
for key, value in sorted(self.items()):
evhash.update(key.encode("utf-8"))
evhash.update("\xc0")
if type(value) != float and type(value) != int:
evhash.update(value.encode("utf-8"))
else:
evhash.update("%s"%value)
evhash.update("\xc0")
return int(evhash.hexdigest(), 16) # FIXME: the int stuff should be done by cache
#return hash(self.event)
def __eq__(self, event2):
return self.event == event2
def __unicode__(self):
return self.to_unicode()
def __repr__(self):
return repr(self.event)
def __str__(self):
return str(self.event)
| Python | 0 |
17eb885097da7b2b2418f909e2f23058245be72c | Update spotify example (#276) | examples/spotify_example.py | examples/spotify_example.py | """
Example on how to use the Spotify Controller.
NOTE: You need to install the spotipy and spotify-token dependencies.
This can be done by running the following:
pip install spotify-token
pip install git+https://github.com/plamere/spotipy.git
"""
import logging
import sys
import pychromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import spotipy
CAST_NAME = "My Chromecast"
debug = '--show-debug' in sys.argv
if debug:
logging.basicConfig(level=logging.DEBUG)
chromecasts = pychromecast.get_chromecasts()
cast = None
for _cast in chromecasts:
if _cast.name == CAST_NAME:
cast = _cast
break
if cast:
cast.wait()
device_id = None
data = st.start_session("SPOTIFY_USERNAME", "SPOTIFY_PASSWORD")
access_token = data[0]
client = spotipy.Spotify(auth=access_token)
sp = SpotifyController(access_token)
cast.register_handler(sp)
sp.launch_app()
devices_available = client.devices()
for device in devices_available['devices']:
if device['name'] == CAST_NAME:
device_id = device['id']
break
client.start_playback(device_id=device_id, uris=["spotify:track:3Zwu2K0Qa5sT6teCCHPShP"]) | """
Example on how to use the Spotify Controller.
NOTE: You need to install the spotipy and spotify-token dependencies.
This can be done by running the following:
pip install spotify-token
pip install git+https://github.com/plamere/spotipy.git
"""
import pychromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import spotipy
chromecasts = pychromecast.get_chromecasts()
cast = chromecasts[0]
cast.start()
CAST_NAME = "My Chromecast"
device_id = None
if cast.name == CAST_NAME:
data = st.start_session("SPOTIFY_USERNAME", "SPOTIFY_PASSWORD")
access_token = data[0]
client = spotipy.Spotify(auth=access_token)
sp = SpotifyController(access_token)
cast.register_handler(sp)
sp.launch_app()
devices_available = client.devices()
for device in devices_available['devices']:
if device['name'] == CAST_NAME and device['type'] == 'CastVideo':
device_id = device['id']
break
client.start_playback(device_id=device_id, uris=["spotify:track:3Zwu2K0Qa5sT6teCCHPShP"]) | Python | 0.000001 |
fd460c1b987354b01d306e2e96ab5c74f6b0d06f | add socket close call. | echo_server.py | echo_server.py | #!/usr/bin/env python
from __future__ import print_function
import socket
import email.utils
def server_socket_function():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
server_socket.bind(('127.0.0.1', 50000))
server_socket.listen(1)
try:
while True:
conn, addr = server_socket.accept()
message = conn.recv(32)
if message:
conn.sendall("I recieved your message. Stop talking to me. You are annoying.")
except KeyboardInterrupt:
conn.close()
server_socket.close()
def response_ok():
first_line = 'HTTP/1.1 200 OK'
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def response_error():
error_code = '404'
error_text = 'Not Found'
first_line = 'HTTP/1.1 {} {}'.format(error_code, error_text)
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def parse_request():
return
print(response_ok())
# if __name__ == '__main__':
# server_socket_function()
| #!/usr/bin/env python
from __future__ import print_function
import socket
import email.utils
def server_socket_function():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
server_socket.bind(('127.0.0.1', 50000))
server_socket.listen(1)
try:
while True:
conn, addr = server_socket.accept()
message = conn.recv(32)
if message:
conn.sendall("I recieved your message. Stop talking to me. You are annoying.")
except KeyboardInterrupt:
conn.close()
def response_ok():
first_line = 'HTTP/1.1 200 OK'
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def response_error():
error_code = '404'
error_text = 'Not Found'
first_line = 'HTTP/1.1 {} {}'.format(error_code, error_text)
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def parse_request():
return
print(response_ok())
# if __name__ == '__main__':
# server_socket_function()
| Python | 0 |
dd80e81732afd5b39f7120c5951a53c218723998 | Fix importing of field "number" | apps/curia_vista/management/commands/update_councillors.py | apps/curia_vista/management/commands/update_councillors.py | from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import Councillor
class Command(BaseCommand):
help = 'Import councillors from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source_base = 'http://ws.parlament.ch/councillors?format=xml&lang=de&pagenumber='
headers = {'User-Agent': 'Mozilla'}
cur_page = 1
while True:
source = source_base + str(cur_page)
cur_page += 1
try:
self.stdout.write("Starting importing from {}".format(source))
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
councillors = ElementTree.fromstring(response.content)
if not councillors:
raise CommandError("Not a valid XML file: {}".format(source))
more_pages = False
for councillor in councillors:
councillor_id = councillor.find('id').text
councillor_updated = councillor.find('updated').text
councillor_active = councillor.find('active').text == 'true'
councillor_code = councillor.find('code').text
councillor_first_name = councillor.find('firstName').text
councillor_last_name = councillor.find('lastName').text
councillor_number = councillor.find('number').text if councillor.find('number') is not None else None
councillor_official_denomination = councillor.find('officialDenomination').text
councillor_salutation_letter = councillor.find('salutationLetter').text
councillor_salutation_title = councillor.find('salutationTitle').text
if councillor.find('hasMorePages') is not None:
more_pages = 'true' == councillor.find('hasMorePages').text
councillor_model, created = Councillor.objects.update_or_create(id=councillor_id,
defaults={
'updated': councillor_updated,
'active': councillor_active,
'code': councillor_code,
'first_name': councillor_first_name,
'last_name': councillor_last_name,
'number': councillor_number,
'official_denomination': councillor_official_denomination,
'salutation_letter': councillor_salutation_letter,
'salutation_title': councillor_salutation_title})
councillor_model.full_clean()
councillor_model.save()
print(councillor_model)
self.stdout.write("Finished importing from {}".format(source))
if not more_pages:
break
self.stdout.write("Done")
| from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import Councillor
class Command(BaseCommand):
help = 'Import councillors from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source_base = 'http://ws.parlament.ch/councillors?format=xml&lang=de&pagenumber='
headers = {'User-Agent': 'Mozilla'}
cur_page = 1
while True:
source = source_base + str(cur_page)
cur_page += 1
try:
self.stdout.write("Starting importing from {}".format(source))
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
councillors = ElementTree.fromstring(response.content)
if not councillors:
raise CommandError("Not a valid XML file: {}".format(source))
more_pages = False
for councillor in councillors:
councillor_id = councillor.find('id').text
councillor_updated = councillor.find('updated').text
councillor_active = councillor.find('active').text == 'true'
councillor_code = councillor.find('code').text
councillor_first_name = councillor.find('firstName').text
councillor_last_name = councillor.find('lastName').text
councillor_number = councillor.find('number').text if 'number' in councillor else None
councillor_official_denomination = councillor.find('officialDenomination').text
councillor_salutation_letter = councillor.find('salutationLetter').text
councillor_salutation_title = councillor.find('salutationTitle').text
if councillor.find('hasMorePages') is not None:
more_pages = 'true' == councillor.find('hasMorePages').text
councillor_model, created = Councillor.objects.update_or_create(id=councillor_id,
defaults={
'updated': councillor_updated,
'active': councillor_active,
'code': councillor_code,
'first_name': councillor_first_name,
'last_name': councillor_last_name,
'number': councillor_number,
'official_denomination': councillor_official_denomination,
'salutation_letter': councillor_salutation_letter,
'salutation_title': councillor_salutation_title})
councillor_model.full_clean()
councillor_model.save()
print(councillor_model)
self.stdout.write("Finished importing from {}".format(source))
if not more_pages:
break
self.stdout.write("Done")
| Python | 0.000002 |
add0af524dafa241d7bab64093ed45c857c66c0d | Rename cfg to settings | statsSend/teamCity/teamCityStatisticsSender.py | statsSend/teamCity/teamCityStatisticsSender.py | #!/usr/bin/env python3
from dateutil import parser
from statsSend.teamCity.teamCityConnection import TeamCityConnection
from statsSend.teamCity.teamCityUrlBuilder import TeamCityUrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, settings, reporter):
self.page_size = int(settings['page_size'])
connection = TeamCityConnection(settings['user'], settings['password'])
url_builder = TeamCityUrlBuilder(settings['server_url'], settings['api_url_prefix'])
self.project = TeamCityProject(settings['project_id'], connection, url_builder, self.page_size)
self.since_timestamp = parser.parse(settings['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
categories = [build_configuration.toCategory() async for build_configuration in self.project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
async for build_configuration in self.project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
job = build_run.toJob()
self.reporter.report_job(job) | #!/usr/bin/env python3
from dateutil import parser
from statsSend.teamCity.teamCityConnection import TeamCityConnection
from statsSend.teamCity.teamCityUrlBuilder import TeamCityUrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, cfg, reporter):
self.page_size = int(cfg['page_size'])
connection = TeamCityConnection(cfg['user'], cfg['password'])
url_builder = TeamCityUrlBuilder(cfg['server_url'], cfg['api_url_prefix'])
self.project = TeamCityProject(cfg['project_id'], connection, url_builder, self.page_size)
self.since_timestamp = parser.parse(cfg['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
categories = [build_configuration.toCategory() async for build_configuration in self.project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
async for build_configuration in self.project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
job = build_run.toJob()
self.reporter.report_job(job) | Python | 0.001188 |
daba0d7eb4b77e40790624e23938b2ebb6d04fca | fix notify loop | examples/multiworker2.py | examples/multiworker2.py | # -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import time
import urllib2
from pistil.arbiter import Arbiter
from pistil.worker import Worker
from pistil.tcp.sync_worker import TcpSyncWorker
from pistil.tcp.arbiter import TcpArbiter
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
class MyTcpWorker(TcpSyncWorker):
def handle(self, sock, addr):
p = HttpStream(SocketReader(sock))
path = p.path()
data = "welcome wold"
sock.send("".join(["HTTP/1.1 200 OK\r\n",
"Content-Type: text/html\r\n",
"Content-Length:" + str(len(data)) + "\r\n",
"Connection: close\r\n\r\n",
data]))
class UrlWorker(Worker):
def run(self):
print "ici"
while self.alive:
time.sleep(0.1)
f = urllib2.urlopen("http://localhost:5000")
print f.read()
self.notify()
class MyPoolArbiter(TcpArbiter):
def on_init(self, conf):
TcpArbiter.on_init(self, conf)
# we return a spec
return (MyTcpWorker, 30, "worker", {}, "http_welcome",)
if __name__ == '__main__':
conf = {"num_workers": 3, "address": ("127.0.0.1", 5000)}
specs = [
(MyPoolArbiter, 30, "supervisor", {}, "tcp_pool"),
(UrlWorker, 30, "worker", {}, "grabber")
]
arbiter = Arbiter(conf, specs)
arbiter.run()
| # -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import time
import urllib2
from pistil.arbiter import Arbiter
from pistil.worker import Worker
from pistil.tcp.sync_worker import TcpSyncWorker
from pistil.tcp.arbiter import TcpArbiter
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
class MyTcpWorker(TcpSyncWorker):
def handle(self, sock, addr):
p = HttpStream(SocketReader(sock))
path = p.path()
data = "welcome wold"
sock.send("".join(["HTTP/1.1 200 OK\r\n",
"Content-Type: text/html\r\n",
"Content-Length:" + str(len(data)) + "\r\n",
"Connection: close\r\n\r\n",
data]))
class UrlWorker(Worker):
def run(self):
print "ici"
while self.alive:
time.sleep(0.1)
f = urllib2.urlopen("http://localhost:5000")
print f.read()
self.notify
class MyPoolArbiter(TcpArbiter):
def on_init(self, conf):
TcpArbiter.on_init(self, conf)
# we return a spec
return (MyTcpWorker, 30, "worker", {}, "http_welcome",)
if __name__ == '__main__':
conf = {"num_workers": 3, "address": ("127.0.0.1", 5000)}
specs = [
(MyPoolArbiter, 30, "supervisor", {}, "tcp_pool"),
(UrlWorker, 30, "worker", {}, "grabber")
]
arbiter = Arbiter(conf, specs)
arbiter.run()
| Python | 0.000001 |
878811a673625f9dbe0f41dd0196887f612ecf2e | Set default file extension to empty string | expand_region_handler.py | expand_region_handler.py | import re
try:
import javascript
import html
except:
from . import javascript
from . import html
def expand(string, start, end, extension=""):
if(re.compile("html|htm|xml").search(extension)):
return html.expand(string, start, end)
return javascript.expand(string, start, end) | import re
try:
import javascript
import html
except:
from . import javascript
from . import html
def expand(string, start, end, extension=None):
if(re.compile("html|htm|xml").search(extension)):
return html.expand(string, start, end)
return javascript.expand(string, start, end) | Python | 0.000005 |
58d3e0712a35052d0016fa3c3b3ffda1ba56b305 | Add some locks | lightcontrol/server.py | lightcontrol/server.py | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
from threading import RLock, Lock
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request, make_response
from lightcontrol.config import lights
from os.path import expanduser
import os.path
import json
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
app = Flask("lightcontrol")
home_dir = expanduser('~')
class Preferences:
def __init__(self, filename):
self.filename = filename
self.lock = RLock()
def read(self):
with self.lock:
if os.path.exists(self.filename):
try:
with open(self.filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except:
logger.exception("Error reading JSON. Resetting preferences")
return dict()
else:
return dict()
def write(self, d):
with self.lock:
with open(self.filename, 'wb') as f:
return f.write(json.dumps(d).encode('utf-8'))
def update(self, key, value):
with self.lock:
p = self.read()
p[key] = value
self.write(p)
pref = Preferences(filename=home_dir + '/.lightcontrol')
switch_lock = Lock()
def toggle_switch(light_name, onoff):
with switch_lock:
pref.update(light_name, onoff)
line = lights[light_name][0 if onoff else 1]
GPIO.setup(line, GPIO.OUT)
GPIO.output(line, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(line, GPIO.LOW)
@app.route("/")
def index():
return render_template("index.html", config=lights)
@app.route("/lights/<room_name>/<onoff>", methods=["POST"])
def control(room_name, onoff):
onoff = onoff == "on"
toggle_switch(room_name, onoff)
return make_response(str(onoff), 200)
@app.route("/lights/<room_name>/status", methods=["GET"])
def status(room_name):
stat = pref.read().get(room_name, False)
# update
#toggle_switch(room_name, stat)
return "1" if stat else "0"
#for name, val in pref.read().items():
# toggle_switch(name, val)
#import IPython
#IPython.embed()
| #!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request, make_response
from lightcontrol.config import lights
from os.path import expanduser
import os.path
import json
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
app = Flask("lightcontrol")
home_dir = expanduser('~')
class Preferences:
def __init__(self, filename):
self.filename = filename
def read(self):
if os.path.exists(self.filename):
try:
with open(self.filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except:
logger.exception("Error reading JSON. Resetting preferences")
return dict()
else:
return dict()
def write(self, d):
with open(self.filename, 'wb') as f:
return f.write(json.dumps(d).encode('utf-8'))
def update(self, key, value):
p = self.read()
p[key] = value
self.write(p)
pref = Preferences(filename=home_dir + '/.lightcontrol')
def toggle_switch(light_name, onoff):
line = lights[light_name][0 if onoff else 1]
GPIO.setup(line, GPIO.OUT)
GPIO.output(line, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(line, GPIO.LOW)
pref.update(light_name, onoff)
@app.route("/")
def index():
return render_template("index.html", config=lights)
@app.route("/lights/<room_name>/<onoff>", methods=["POST"])
def control(room_name, onoff):
onoff = onoff == "on"
toggle_switch(room_name, onoff)
return make_response(str(onoff), 200)
@app.route("/lights/<room_name>/status", methods=["GET"])
def status(room_name):
stat = pref.read().get(room_name, False)
# update
#toggle_switch(room_name, stat)
return "1" if stat else "0"
#for name, val in pref.read().items():
# toggle_switch(name, val)
#import IPython
#IPython.embed()
| Python | 0.000018 |
c5ea9424bd6236677c2b9301629c693181b75fc2 | Fix pep8 issues. | md-to-toc.py | md-to-toc.py | # Author: Antonio Maiorano (amaiorano@gmail.com)
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
HEADER1_UNDERLINE_RE = re.compile("^-+$")
HEADER2_UNDERLINE_RE = re.compile("^=+$")
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub("[^A-Za-z0-9\-_]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count is None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off'''
'''(i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
last_line = ""
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote
if in_block_quote:
continue
found_header = False
header_level = 0
m = HEADER_LINE_RE.match(line)
if m is not None:
header_level = len(m.group(1))
title = m.group(2)
found_header = True
if not found_header:
m = HEADER1_UNDERLINE_RE.match(line)
if m is not None:
header_level = 1
title = last_line.rstrip()
found_header = True
if not found_header:
m = HEADER2_UNDERLINE_RE.match(line)
if m is not None:
header_level = 2
title = last_line.rstrip()
found_header = True
if found_header:
results.append((header_level, title, to_github_anchor(title)))
last_line = line
# Compute min header level so we can offset output to be flush with
# left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
| # Author: Antonio Maiorano (amaiorano@gmail.com)
import os
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
HEADER1_UNDERLINE_RE = re.compile("^-+$")
HEADER2_UNDERLINE_RE = re.compile("^=+$")
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub("[^A-Za-z0-9\-_]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count == None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off (i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv = None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
last_line = ""
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote;
if in_block_quote:
continue
found_header = False
header_level = 0
m = HEADER_LINE_RE.match(line)
if m != None:
header_level = len(m.group(1))
title = m.group(2)
found_header = True
if not found_header:
m = HEADER1_UNDERLINE_RE.match(line)
if m != None:
header_level = 1
title = last_line.rstrip()
found_header = True
if not found_header:
m = HEADER2_UNDERLINE_RE.match(line)
if m != None:
header_level = 2
title = last_line.rstrip()
found_header = True
if found_header:
results.append( (header_level, title, to_github_anchor(title)) )
last_line = line
# Compute min header level so we can offset output to be flush with left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.