commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
026ba5fa78cb9916bffc23cf7dda1d1deb81b24c | Bump version 1.0.3 | story/__init__.py | story/__init__.py | """
Story - PySchool
"""
__author__ = 'PySchool'
__version__ = '1.0.3'
__licence__ = 'MIT'
| """
Story - PySchool
"""
__author__ = 'PySchool'
__version__ = '1.0.2'
__licence__ = 'MIT'
| Python | 0 |
8d0e3ae1f80e8b19292b18a20a338cbfd00364c7 | Bump to version number 1.6.0 | stream/release.py | stream/release.py | # coding=utf-8
"""
stream.release
~~~~~~~~~~~~~~
Include release information of the package.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
# CONSTANTS ###################################################################
# Development statuses:
DS_PLANNING = 1
DS_PREALPHA = 2
DS_ALPHA = 3
DS_BETA = 4
DS_STABLE = 5
DS_MATURE = 6
DS_INACTIVE = 7
DS_STRING = {
DS_PLANNING: 'Development Status :: 1 - Planning',
DS_PREALPHA: 'Development Status :: 2 - Pre-Alpha',
DS_ALPHA: 'Development Status :: 3 - Alpha',
DS_BETA: 'Development Status :: 4 - Beta',
DS_STABLE: 'Development Status :: 5 - Production/Stable',
DS_MATURE: 'Development Status :: 6 - Mature',
DS_INACTIVE: 'Development Status :: 7 - Inactive'
}
###############################################################################
# Package release information.
__title__ = 'stream'
__description__ = 'Python implementation of stream library'
__author__ = 'Ali Ghaffaari'
__email__ = 'ali.ghaffaari@mpi-inf.mpg.de'
__license__ = 'MIT'
# Release
__version__ = '1.6.0'
__status__ = DS_STABLE
# PyPI-related information
__keywords__ = 'stream protocol buffer protobuf'
__classifiers__ = [
# Development status
DS_STRING[__status__],
# License
'License :: OSI Approved :: MIT License',
# Supported Python versions.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Intended Audience and Topic
'Intended Audience :: Developers',
]
__requires__ = ['protobuf>=3.4.0', 'async_generator>=1.10', 'click>=6.0.0', 'future']
__tests_require__ = []
__extras_require__ = {
'test': ['nose>=1.0', 'coverage'],
}
__setup_requires__ = ['nose>=1.0', 'coverage']
__entry_points__ = '''
[console_scripts]
varint=stream.varint:cli
'''
| # coding=utf-8
"""
stream.release
~~~~~~~~~~~~~~
Include release information of the package.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
# CONSTANTS ###################################################################
# Development statuses:
DS_PLANNING = 1
DS_PREALPHA = 2
DS_ALPHA = 3
DS_BETA = 4
DS_STABLE = 5
DS_MATURE = 6
DS_INACTIVE = 7
DS_STRING = {
DS_PLANNING: 'Development Status :: 1 - Planning',
DS_PREALPHA: 'Development Status :: 2 - Pre-Alpha',
DS_ALPHA: 'Development Status :: 3 - Alpha',
DS_BETA: 'Development Status :: 4 - Beta',
DS_STABLE: 'Development Status :: 5 - Production/Stable',
DS_MATURE: 'Development Status :: 6 - Mature',
DS_INACTIVE: 'Development Status :: 7 - Inactive'
}
###############################################################################
# Package release information.
__title__ = 'stream'
__description__ = 'Python implementation of stream library'
__author__ = 'Ali Ghaffaari'
__email__ = 'ali.ghaffaari@mpi-inf.mpg.de'
__license__ = 'MIT'
# Release
__version__ = '1.5.2'
__status__ = DS_BETA
# PyPI-related information
__keywords__ = 'stream protocol buffer protobuf'
__classifiers__ = [
# Development status
DS_STRING[__status__],
# License
'License :: OSI Approved :: MIT License',
# Supported Python versions.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Intended Audience and Topic
'Intended Audience :: Developers',
]
__requires__ = ['protobuf>=3.4.0', 'async_generator>=1.10', 'click>=6.0.0', 'future']
__tests_require__ = []
__extras_require__ = {
'test': ['nose>=1.0', 'coverage'],
}
__setup_requires__ = ['nose>=1.0', 'coverage']
__entry_points__ = '''
[console_scripts]
varint=stream.varint:cli
'''
| Python | 0.000001 |
c8069fff1941d0739bca8716a5e26f5c02ccffe3 | Add South field tuple. | django_enumfield/fields.py | django_enumfield/fields.py | from django.db import models
class EnumField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, enumeration, *args, **kwargs):
self.enumeration = enumeration
kwargs.setdefault('choices', enumeration.get_choices())
super(EnumField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def to_python(self, value):
return self.enumeration.to_item(value)
def get_db_prep_save(self, value, connection=None):
if value is None:
return value
return self.to_python(value).value
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
def prepare(value):
v = self.to_python(value)
return self.get_db_prep_save(v, connection=connection)
if lookup_type == 'exact':
return [prepare(value)]
elif lookup_type == 'in':
return [prepare(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Lookup type %r not supported." % lookup_type)
def south_field_triple(self):
from south.modelsinspector import introspector
args, kwargs = introspector(self)
return ('django.db.models.fields.Integerfield', args, kwargs)
| from django.db import models
class EnumField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, enumeration, *args, **kwargs):
self.enumeration = enumeration
kwargs.setdefault('choices', enumeration.get_choices())
super(EnumField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def to_python(self, value):
return self.enumeration.to_item(value)
def get_db_prep_save(self, value, connection=None):
if value is None:
return value
return self.to_python(value).value
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
def prepare(value):
v = self.to_python(value)
return self.get_db_prep_save(v, connection=connection)
if lookup_type == 'exact':
return [prepare(value)]
elif lookup_type == 'in':
return [prepare(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Lookup type %r not supported." % lookup_type)
| Python | 0.000001 |
2c73fee5b0a3a527d0ee3c51291c7b4c01c9f688 | Revert "Создание скрипта изменения группы" | fixture/group.py | fixture/group.py | class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# создание новой группы
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if not wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").click()
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click()#select 1 group
wd.find_element_by_name("delete").click() #delete group
self.return_to_groups_page()
def change_group_properties(self):
wd = self.app.wd
| class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# создание новой группы
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if not wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select//option[1]").click()
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click()#select 1 group
wd.find_element_by_name("delete").click() #delete group
self.return_to_groups_page()
def change_group_properties(self):
wd = self.app.wd
self.open_groups_page()
wd.find_element_by_name("selected[]").click() # select 1 group
wd.find_element_by_name("edit").click() #delete group
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Best group")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("Header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("Footer")
wd.find_element_by_name("update").click()
self.return_to_groups_page()
| Python | 0 |
4e8177bca4335c34950adb54c0bca4bca59ef0c0 | fix error: has no attribute __subclass__ | app/auth/oauth.py | app/auth/oauth.py | from rauth import OAuth2Service
from flask import current_app, url_for, redirect, request, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = proveder_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name = 'facebook',
client_id = self.consumer_id,
client_secret = self.consumer_secret,
authorize_url = 'https://graph.facebook.com/oauth/authorize',
access_token_url = 'https://graph.facebook.com/oauth/access_token',
base_url = 'https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri= self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
| from rauth import OAuth2Service
from flask import current_app, url_for, redirect, request, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclass__():
provider = proveder_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name = 'facebook',
client_id = self.consumer_id,
client_secret = self.consumer_secret,
authorize_url = 'https://graph.facebook.com/oauth/authorize',
access_token_url = 'https://graph.facebook.com/oauth/access_token',
base_url = 'https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri= self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
| Python | 0.000033 |
65d91fe8857ab63827f1b85935d8a6647bd57543 | test refactoring | plenum/test/view_change/test_client_req_during_view_change.py | plenum/test/view_change/test_client_req_during_view_change.py | import pytest
from plenum.common.constants import NODE, TXN_TYPE, GET_TXN
from plenum.common.exceptions import RequestNackedException
from plenum.test.helper import sdk_send_random_and_check, \
sdk_send_random_requests, sdk_get_and_check_replies, sdk_gen_request, \
checkDiscardMsg
from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, sdk_sign_and_send_prepared_request
from plenum.test.testing_utils import FakeSomething
@pytest.fixture(scope='function')
def test_node(test_node):
test_node.view_changer = FakeSomething(view_change_in_progress=True,
view_no=1)
return test_node
def test_client_write_request_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 4)
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
discard_reqs = sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 1)
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, discard_reqs)
assert "Client request is discarded since view " \
"change is in progress" in e.args[0]
def test_client_get_request_not_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
_, steward_did = sdk_wallet_client
request = sdk_build_get_txn_request(looper, steward_did, 1)
sdk_request = sdk_sign_and_send_prepared_request(looper,
sdk_wallet_client,
sdk_pool_handle,
request)
sdk_get_and_check_replies(looper, [sdk_request])
def test_client_write_request_discard_in_view_change_with_dict(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE}).as_dict
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg, "view change in progress")
def test_client_get_request_not_discard_in_view_change_with_dict(test_node):
sender = "frm"
msg = sdk_gen_request({TXN_TYPE: GET_TXN}).as_dict
def post_to_client_in_box(received_msg, received_frm):
assert received_frm == sender
assert received_msg == msg
test_node.postToClientInBox = post_to_client_in_box
def discard(received_msg, reason, logLevel):
assert False, "Message {} was discard with '{}'".format(received_msg, reason)
test_node.discard = discard
test_node.unpackClientMsg(msg, sender)
def test_client_msg_discard_in_view_change_with_request(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE})
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg.as_dict, "view change in progress")
def check_nack_msg(req_key, reason, to_client):
assert "Client request is discarded since view " \
"change is in progress" == reason
| import functools
import pytest
from plenum.common.constants import NODE, TXN_TYPE, GET_TXN
from plenum.common.exceptions import RequestNackedException
from plenum.test.helper import sdk_send_random_and_check, \
sdk_send_random_requests, sdk_get_and_check_replies, sdk_gen_request, \
checkDiscardMsg
from plenum.test.pool_transactions.helper import sdk_build_get_txn_request, sdk_sign_and_send_prepared_request
from plenum.test.testing_utils import FakeSomething
@pytest.fixture(scope='function')
def test_node(test_node):
test_node.view_changer = FakeSomething(view_change_in_progress=True,
view_no=1)
return test_node
def test_client_write_request_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 4)
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
discard_reqs = sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 1)
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, discard_reqs)
assert "Client request is discarded since view " \
"change is in progress" in e.args[0]
def test_client_get_request_not_discard_in_view_change_integration(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_client):
'''
Check that client requests sent in view change will discard.
'''
for node in txnPoolNodeSet:
node.view_changer.view_change_in_progress = True
_, steward_did = sdk_wallet_client
request = sdk_build_get_txn_request(looper, steward_did, 1)
sdk_request = sdk_sign_and_send_prepared_request(looper,
sdk_wallet_client,
sdk_pool_handle,
request)
sdk_get_and_check_replies(looper, [sdk_request])
def test_client_write_request_discard_in_view_change_with_dict(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE}).as_dict
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg, "view change in progress")
def test_client_get_request_not_discard_in_view_change_with_dict(test_node):
sender = "frm"
msg = sdk_gen_request({TXN_TYPE: GET_TXN}).as_dict
def post_to_client_in_box(received_msg, received_frm):
assert received_frm == sender
assert received_msg == msg
test_node.postToClientInBox = post_to_client_in_box
def discard(received_msg, reason, logLevel):
assert False, "Message {} was discard with '{}'".format(received_msg, reason)
test_node.discard = discard
test_node.unpackClientMsg(msg, sender)
def test_client_msg_discard_in_view_change_with_request(test_node):
test_node.send_nack_to_client = check_nack_msg
msg = sdk_gen_request({TXN_TYPE: NODE})
test_node.unpackClientMsg(msg, "frm")
checkDiscardMsg([test_node, ], msg.as_dict, "view change in progress")
def check_nack_msg(req_key, reason, to_client):
assert "Client request is discarded since view " \
"change is in progress" == reason
| Python | 0 |
e8056e4e2c5ef55b46a99afaf7664a734b401443 | add pending as a "sent" state | tests/postman.py | tests/postman.py | import os
from notifications_python_client.errors import HTTPError
from config import config
from tests.test_utils import create_temp_csv, RetryException
def send_notification_via_api(client, template_id, to, message_type):
jenkins_build_id = os.getenv('BUILD_ID', 'No build id')
personalisation = {'build_id': jenkins_build_id}
if message_type == 'sms':
resp_json = client.send_sms_notification(to, template_id, personalisation)
elif message_type == 'email':
resp_json = client.send_email_notification(to, template_id, personalisation)
elif message_type == 'letter':
to.update(personalisation)
resp_json = client.send_letter_notification(template_id, to)
return resp_json['id']
def send_precompiled_letter_via_api(reference, client, pdf_file):
resp_json = client.send_precompiled_letter_notification(reference, pdf_file)
return resp_json['id']
def send_notification_via_csv(upload_csv_page, message_type, seeded=False):
service_id = config['service']['id'] if seeded else config['service']['id']
email = config['service']['seeded_user']['email'] if seeded else config['user']['email']
letter_contact = config['letter_contact_data']
if message_type == 'sms':
template_id = config['service']['templates']['sms']
directory, filename = create_temp_csv({'phone number': config['user']['mobile']})
elif message_type == 'email':
template_id = config['service']['templates']['email']
directory, filename = create_temp_csv({'email address': email})
elif message_type == 'letter':
template_id = config['service']['templates']['letter']
directory, filename = create_temp_csv(letter_contact)
upload_csv_page.go_to_upload_csv_for_service_and_template(service_id, template_id)
upload_csv_page.upload_csv(directory, filename)
notification_id = upload_csv_page.get_notification_id_after_upload()
return notification_id
class NotificationStatuses:
PENDING_VIRUS_CHECK = 'pending-virus-check'
RECEIVED = {'received'}
DELIVERED = {'delivered', 'temporary-failure', 'permanent-failure'}
SENT = RECEIVED | DELIVERED | {'sending', 'pending'}
def get_notification_by_id_via_api(client, notification_id, expected_statuses):
try:
resp = client.get_notification_by_id(notification_id)
notification_status = resp['status']
if notification_status not in expected_statuses:
raise RetryException(
(
'Notification in wrong status '
'id: {id} '
'status: {status} '
'created_at: {created_at} '
'sent_at: {sent_at} '
'completed_at: {completed_at}'
).format(**resp)
)
return resp
except HTTPError as e:
if e.status_code == 404:
message = 'Notification not created yet for id: {}'.format(notification_id)
raise RetryException(message)
else:
raise
| import os
from notifications_python_client.errors import HTTPError
from config import config
from tests.test_utils import create_temp_csv, RetryException
def send_notification_via_api(client, template_id, to, message_type):
jenkins_build_id = os.getenv('BUILD_ID', 'No build id')
personalisation = {'build_id': jenkins_build_id}
if message_type == 'sms':
resp_json = client.send_sms_notification(to, template_id, personalisation)
elif message_type == 'email':
resp_json = client.send_email_notification(to, template_id, personalisation)
elif message_type == 'letter':
to.update(personalisation)
resp_json = client.send_letter_notification(template_id, to)
return resp_json['id']
def send_precompiled_letter_via_api(reference, client, pdf_file):
resp_json = client.send_precompiled_letter_notification(reference, pdf_file)
return resp_json['id']
def send_notification_via_csv(upload_csv_page, message_type, seeded=False):
service_id = config['service']['id'] if seeded else config['service']['id']
email = config['service']['seeded_user']['email'] if seeded else config['user']['email']
letter_contact = config['letter_contact_data']
if message_type == 'sms':
template_id = config['service']['templates']['sms']
directory, filename = create_temp_csv({'phone number': config['user']['mobile']})
elif message_type == 'email':
template_id = config['service']['templates']['email']
directory, filename = create_temp_csv({'email address': email})
elif message_type == 'letter':
template_id = config['service']['templates']['letter']
directory, filename = create_temp_csv(letter_contact)
upload_csv_page.go_to_upload_csv_for_service_and_template(service_id, template_id)
upload_csv_page.upload_csv(directory, filename)
notification_id = upload_csv_page.get_notification_id_after_upload()
return notification_id
class NotificationStatuses:
PENDING_VIRUS_CHECK = 'pending-virus-check'
RECEIVED = {'received'}
DELIVERED = {'delivered', 'temporary-failure', 'permanent-failure'}
SENT = RECEIVED | DELIVERED | {'sending'}
def get_notification_by_id_via_api(client, notification_id, expected_statuses):
try:
resp = client.get_notification_by_id(notification_id)
notification_status = resp['status']
if notification_status not in expected_statuses:
raise RetryException(
(
'Notification in wrong status '
'id: {id} '
'status: {status} '
'created_at: {created_at} '
'sent_at: {sent_at} '
'completed_at: {completed_at}'
).format(**resp)
)
return resp
except HTTPError as e:
if e.status_code == 404:
message = 'Notification not created yet for id: {}'.format(notification_id)
raise RetryException(message)
else:
raise
| Python | 0.000001 |
b951c30a856611ba37bba4cc0e6ef294b55650c9 | allow code to be defined as an array of string | web/Language.py | web/Language.py | import json
import os
class Language:
def __init__(self, key):
"""
Initialize the Language object, which will contain concepts for a given structure
:param key: ID of the language in the meta_info.json file
"""
# Add an empty string to convert SafeString to str
self.key = str(key + "")
self.friendly_name = None
self.categories = None
self.concepts = None
def has_key(self):
"""
Returns a Boolean if the language key exists or not
:rtype: bool
"""
# Empty string is falsy, but text is truthy, but would return return text
return bool(self.key)
def lang_exists(self):
"""
Returns a Boolean if the language (self.key) exists in the thesauruses or not
:rtype: bool
"""
return os.path.exists(os.path.join("web", "thesauruses", self.key))
def load_structure(self, structure_key):
"""
Loads the structure file into the Language object
:param structure_key: the ID for the structure to load
"""
file_path = os.path.join(
"web", "thesauruses", self.key, structure_key) + ".json"
with open(file_path, 'r') as file:
data = file.read()
# parse file
file_json = json.loads(data)
self.friendly_name = file_json["meta"]["language_name"]
self.categories = file_json["categories"]
self.concepts = file_json[structure_key]
def concept(self, concept_key):
"""
Get the concept (including code and comment) from the concept file for that Language
:param concept_key: key for the concept to look up
:returns: a dict containing the code and comment, and possibly the 'not-implemented' flag. They are empty strings if not specified
:rtype: object
"""
if self.concepts.get(concept_key) is None:
return {
"code": "",
"comment": ""
}
if self.concepts.get(concept_key).get("not-implemented", False):
return {
"not-implemented": True,
"code": "",
"comment": self.concepts.get(concept_key).get("comment", "")
}
return self.concepts.get(concept_key)
def concept_unknown(self, concept_key):
"""
Returns a Boolean if the concept is not known
:param concept_key: ID for the concept
:return: Boolean if the concept is not known
"""
return self.concepts.get(concept_key) is None
def concept_implemented(self, concept_key):
"""
Returns a Boolean if the concept is implemented
:param concept_key: ID for the concept
:return: Boolean if the language defines this concept
"""
return self.concept(concept_key).get("not-implemented", False) is False
def concept_code(self, concept_key):
"""
Returns the code portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's code
"""
code = self.concept(concept_key).get("code")
if isinstance(code, list):
code = "\n".join(code)
return code
def concept_comment(self, concept_key):
"""
Returns the comment portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's comment
"""
return self.concept(concept_key).get("comment", "")
| import json
import os
class Language:
def __init__(self, key):
"""
Initialize the Language object, which will contain concepts for a given structure
:param key: ID of the language in the meta_info.json file
"""
# Add an empty string to convert SafeString to str
self.key = str(key + "")
self.friendly_name = None
self.categories = None
self.concepts = None
def has_key(self):
"""
Returns a Boolean if the language key exists or not
:rtype: bool
"""
# Empty string is falsy, but text is truthy, but would return return text
return bool(self.key)
def lang_exists(self):
"""
Returns a Boolean if the language (self.key) exists in the thesauruses or not
:rtype: bool
"""
return os.path.exists(os.path.join("web", "thesauruses", self.key))
def load_structure(self, structure_key):
"""
Loads the structure file into the Language object
:param structure_key: the ID for the structure to load
"""
file_path = os.path.join(
"web", "thesauruses", self.key, structure_key) + ".json"
with open(file_path, 'r') as file:
data = file.read()
# parse file
file_json = json.loads(data)
self.friendly_name = file_json["meta"]["language_name"]
self.categories = file_json["categories"]
self.concepts = file_json[structure_key]
def concept(self, concept_key):
"""
Get the concept (including code and comment) from the concept file for that Language
:param concept_key: key for the concept to look up
:returns: a dict containing the code and comment, and possibly the 'not-implemented' flag. They are empty strings if not specified
:rtype: object
"""
if self.concepts.get(concept_key) is None:
return {
"code": "",
"comment": ""
}
if self.concepts.get(concept_key).get("not-implemented", False):
return {
"not-implemented": True,
"code": "",
"comment": self.concepts.get(concept_key).get("comment", "")
}
return self.concepts.get(concept_key)
def concept_unknown(self, concept_key):
"""
Returns a Boolean if the concept is not known
:param concept_key: ID for the concept
:return: Boolean if the concept is not known
"""
return self.concepts.get(concept_key) is None
def concept_implemented(self, concept_key):
"""
Returns a Boolean if the concept is implemented
:param concept_key: ID for the concept
:return: Boolean if the language defines this concept
"""
return self.concept(concept_key).get("not-implemented", False) is False
def concept_code(self, concept_key):
"""
Returns the code portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's code
"""
return self.concept(concept_key).get("code")
def concept_comment(self, concept_key):
"""
Returns the comment portion of the provided concept
:param concept_key: ID for the concept
:return: the string containing the concept's comment
"""
return self.concept(concept_key).get("comment", "")
| Python | 0.000028 |
0781b47512cbab5fc1a090ff68b5f9d434a864af | Update examples/API_v2/lookup_users_using_user_ids.py | examples/API_v2/lookup_users_using_user_ids.py | examples/API_v2/lookup_users_using_user_ids.py | import tweepy
# Replace bearer token value with your own
bearer_token = ""
# Initializing the Tweepy client
client = tweepy.Client(bearer_token)
# Replace User IDs
ids = [2244994945, 6253282]
# By default the user ID, name and username are returned. user_fields can be
# used to specify the additional user data that you want returned for each user
# e.g. profile_image_url
users = client.get_users(ids=ids, user_fields=["profile_image_url"])
# Print the username and the user's profile image url
for user in users.data:
print(user.username)
print(user.profile_image_url)
| import tweepy
# Replace bearer token value with your own
bearer_token = ""
# Initializing the Tweepy client
client = tweepy.Client(bearer_token)
# Replace User IDs
ids = [2244994945, 6253282]
# By default the user ID, name and username are returned. user_fields can be
# used to specify the additional user data that you want returned for each user
# e.g. profile_image_url
users = client.get_users(ids, user_fields=["profile_image_url"])
# Print the username and the user's profile image url
for user in users.data:
print(user.username)
print(user.profile_image_url)
| Python | 0 |
54c81494cbbe9a20db50596e68c57e1caa624043 | Add a User post_save hook for creating user profiles | src-django/authentication/signals/user_post_save.py | src-django/authentication/signals/user_post_save.py | from authentication.models import UserProfile
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def on_user_post_save(sender, instance=None, created=False, **kwargs):
# Normally, users automatically get a Token created for them (if they do not
# already have one) when they hit
#
# rest_framework.authtoken.views.obtain_auth_token view
#
# This will create an authentication token for newly created users so the
# user registration endpoint can return a token back to Ember
# (thus avoiding the need to hit login endpoint)
if created:
user_profile = UserProfile.objects.create(user=instance, is_email_confirmed=False)
user_profile.save()
Token.objects.create(user=instance)
# Add new user to the proper user group
normal_users_group, created = Group.objects.get_or_create(name=settings.NORMAL_USER_GROUP)
instance.groups.add(normal_users_group)
| from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def on_user_post_save(sender, instance=None, created=False, **kwargs):
# Normally, users automatically get a Token created for them (if they do not
# already have one) when they hit
#
# rest_framework.authtoken.views.obtain_auth_token view
#
# This will create an authentication token for newly created users so the
# user registration endpoint can return a token back to Ember
# (thus avoiding the need to hit login endpoint)
if created:
Token.objects.create(user=instance)
# Add new user to the proper user group
normal_users_group, created = Group.objects.get_or_create(name=settings.NORMAL_USER_GROUP)
instance.groups.add(normal_users_group)
| Python | 0.000001 |
9b678e184a568baea857ca68fcacb5070db6792d | update modulation.py | examples/modulation.py | examples/modulation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
# Driver to use
from lase.drivers import Oscillo
# Modules to import
import numpy as np
import matplotlib.pyplot as plt
import time
# Connect to Lase
host = '192.168.1.4' # Lase IP address
client = KClient(host)
driver = Oscillo(client) # Replace with appropriate driver
# Enable laser
driver.start_laser()
# Set laser current
current = 15 # mA
driver.set_laser_current(current)
# Modulation on DAC
amp_mod = 0.2
freq_mod = 1e6
driver.dac[1, :] = amp_mod*np.sin(2 * np.pi * freq_mod * driver.sampling.t)
driver.set_dac()
# Signal on ADC
driver.get_adc()
signal = driver.adc[0,:]
# Plot
plt.plot(driver.sampling.t, signal)
plt.show()
# Plot
psd_signal = np.abs(np.fft.fft(signal)) ** 2
plt.semilogy(1e-6 * np.fft.fftshift(driver.sampling.f_fft), np.fft.fftshift(psd_signal))
plt.xlabel('Frequency (MHz)')
plt.show()
# Disable laser
driver.stop_laser()
driver.close() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
# Driver to use
from lase.drivers import Oscillo
# Modules to import
import numpy as np
import matplotlib.pyplot as plt
import time
# Connect to Lase
host = '192.168.1.4' # Lase IP address
client = KClient(host)
driver = Oscillo(client) # Replace with appropriate driver
# Enable laser
driver.start_laser()
# Set laser current
current = 15 #mA
driver.set_laser_current(current)
# Modulation on DAC
amp_mod = 0.2
freq_mod = 1e6
driver.dac[1,:] = amp_mod*np.sin(2*np.pi*freq_mod*driver.sampling.t)
driver.set_dac()
# Signal on ADC
driver.get_adc()
signal = driver.adc[0,:]
# Plot
plt.plot(driver.sampling.t, signal)
plt.show()
# Plot
psd_signal = np.abs(np.fft.fft(signal))**2
plt.semilogy(1e-6 * np.fft.fftshift(driver.sampling.f_fft), np.fft.fftshift(psd_signal))
plt.xlabel('Frequency (MHz)')
plt.show()
# Disable laser
driver.stop_laser()
driver.close()
| Python | 0.000001 |
1014c809638157da85794223c4990b5ae20512fa | Add crawled_at field back | hackernews_scrapy/items.py | hackernews_scrapy/items.py | # -*- coding: utf-8 -*-
import scrapy
class HackernewsScrapyItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
crawled_at = scrapy.Field(serializer=str)
| # -*- coding: utf-8 -*-
import scrapy
class HackernewsScrapyItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
| Python | 0 |
d8cb4384f32f4d0e20f3212a36cc01915260f7a8 | Support custom actions in search router | tests/routers.py | tests/routers.py | """Search router."""
from rest_framework.routers import DefaultRouter, DynamicRoute, Route
class SearchRouter(DefaultRouter):
"""Custom router for search endpoints.
Search endpoints don't follow REST principles and thus don't need
routes that default router provides.
"""
routes = [
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={"get": "list", "post": "list_with_post"},
name="{basename}",
initkwargs={},
detail=False,
),
# Dynamically generated list routes. Generated using
# @action(detail=False) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=False,
initkwargs={}
),
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
detail=True,
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes. Generated using
# @action(detail=True) decorator on methods of the viewset.
DynamicRoute(
url=r'^{prefix}/{lookup}/{url_path}{trailing_slash}$',
name='{basename}-{url_name}',
detail=True,
initkwargs={}
),
]
| """Search router."""
from rest_framework.routers import DefaultRouter, Route
class SearchRouter(DefaultRouter):
"""Custom router for search endpoints.
Search endpoints don't follow REST principles and thus don't need
routes that default router provides.
"""
routes = [
Route(
url=r"^{prefix}{trailing_slash}$",
mapping={"get": "list", "post": "list_with_post"},
name="{basename}",
initkwargs={},
detail=False,
)
]
| Python | 0 |
43922bb7cf5015cbf3538195d3d4f93ff8c9ec18 | Bump version | tomb_cli/__about__.py | tomb_cli/__about__.py | __title__ = 'tomb_cli'
__summary__ = 'Top level CLI command for tomb'
__uri__ = 'http://github.com/tomborine/tomb_cli'
__version__ = '0.0.2'
__author__ = 'John Anderson'
__email__ = 'sontek@gmail.com'
__license__ = 'MIT'
__copyright__ = '2015 John Anderson (sontek)'
| __title__ = 'tomb_cli'
__summary__ = 'Top level CLI command for tomb'
__uri__ = 'http://github.com/tomborine/tomb_cli'
__version__ = '0.0.1'
__author__ = 'John Anderson'
__email__ = 'sontek@gmail.com'
__license__ = 'MIT'
__copyright__ = '2015 John Anderson (sontek)'
| Python | 0 |
18f373ffc1e49b33708ae2303b61ccf76ffa686e | Use pylab.load to read in data. | examples/ortho_demo.py | examples/ortho_demo.py | from matplotlib.toolkits.basemap import Basemap
from pylab import *
# read in topo data from pickle (on a regular lat/lon grid)
etopo = array(load('etopo20data.gz'),'f')
lons = array(load('etopo20lons.gz'),'f')
lats = array(load('etopo20lats.gz'),'f')
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(raw_input('enter reference longitude (lon_0):'))
lat_0 = float(raw_input('enter reference latitude (lat_0):'))
fillcont = int(raw_input('fill continents? (1 for yes, 0 for no):'))
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
# compute native map projection coordinates for lat/lon grid.
lons, lats = meshgrid(lons, lats)
x,y = m(lons,lats)
# create figure with same aspect ratio as map.
fig=m.createfigure().add_axes([0.05,0.05,0.9,0.9])
# make filled contour plot.
cs = m.contourf(x,y,etopo,30,cmap=cm.jet)
# draw coastlines.
m.drawcoastlines()
# draw a line around the map region.
m.drawmapboundary()
if fillcont:
m.fillcontinents()
# draw parallels and meridians.
m.drawparallels(arange(-90.,120.,30.))
m.drawmeridians(arange(0.,420.,60.))
title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
show()
| from matplotlib import rcParams, use
rcParams['numerix'] = 'Numeric' # make sure Numeric is used (to read pickle)
from matplotlib.toolkits.basemap import Basemap
import cPickle
from pylab import *
# read in topo data from pickle (on a regular lat/lon grid)
topodict = cPickle.load(open('etopo20.pickle','rb'))
etopo = topodict['data']; lons = topodict['lons']; lats = topodict['lats']
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(raw_input('enter reference longitude (lon_0):'))
lat_0 = float(raw_input('enter reference latitude (lat_0):'))
fillcont = int(raw_input('fill continents? (1 for yes, 0 for no):'))
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
# compute native map projection coordinates for lat/lon grid.
lons, lats = meshgrid(lons,lats)
x,y = m(lons,lats)
# create figure with same aspect ratio as map.
fig=m.createfigure().add_axes([0.05,0.05,0.9,0.9])
# make filled contour plot.
cs = m.contourf(x,y,etopo,30,cmap=cm.jet)
# draw coastlines.
m.drawcoastlines()
# draw a line around the map region.
m.drawmapboundary()
if fillcont:
m.fillcontinents()
# draw parallels and meridians.
m.drawparallels(arange(-90.,120.,30.))
m.drawmeridians(arange(0.,420.,60.))
title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
show()
| Python | 0.000094 |
35d0ce026741c65cdb834f5828ef4000f6d06150 | fix for runtest path handling from Marek | tests/runtest.py | tests/runtest.py | #! /usr/bin/env python
"""
Test runner for main pygr tests.
Collects all files ending in _test.py and executes them with
unittest.TextTestRunner.
"""
import os, sys, re, unittest, shutil, re, shutil
from testlib import testutil, testoptions
from pygr import logger
def all_tests():
"Returns all file names that end in _test.py"
patt = re.compile("_test.py$")
mods = os.listdir(os.path.normpath(os.path.dirname(__file__)))
mods = filter(patt.search, mods)
mods = [ m.rstrip(".py") for m in mods ]
# some predictable order...
mods.sort()
return mods
def run(targets, options):
"Imports and runs the modules names that are contained in the 'targets'"
success = errors = 0
# run the tests by importing the module and getting its test suite
for name in targets:
try:
testutil.info( 'running tests for module %s' % name )
mod = __import__( name )
suite = mod.get_suite()
runner = unittest.TextTestRunner(verbosity=options.verbosity,
descriptions=0)
results = runner.run( suite )
# count tests and errors
success += results.testsRun - \
len(results.errors) - len(results.failures)
errors += len(results.errors) + len(results.failures)
# if we're in strict mode stop on errors
if options.strict and errors:
testutil.error( "strict mode stops on errors" )
break
except ImportError:
testutil.error( "unable to import module '%s'" % name )
# each skipped testsuite generates a message
skipped = len(testutil.SKIP_MESSAGES)
# generate warnings on skipped tests
for message in testutil.SKIP_MESSAGES:
testutil.warn(message)
# summarize the run
testutil.info('=' * 59)
testutil.info('''\
%s tests passed, %s tests failed, %s suites skipped; %d total''' % \
(success, errors, skipped, success + errors + skipped))
if __name__ == '__main__':
# gets the prebuild option parser
parser = testoptions.option_parser()
# parse the options
options, args = parser.parse_args()
# modules: from command line args or all modules
targets = args or all_tests()
# get rid of the .py ending in case full module names were passed in
# the command line
targets = [ t.rstrip(".py") for t in targets ]
# exclusion mode
if options.exclude:
targets = [ name for name in all_tests() if name not in targets ]
# disables debug messages at < 2 verbosity
if options.verbosity != 2:
logger.disable('DEBUG')
# run all the tests
if options.coverage:
testutil.generate_coverage(run, 'coverage', targets=targets,
options=options)
else:
run(targets=targets, options=options)
| #! /usr/bin/env python
"""
Test runner for main pygr tests.
Collects all files ending in _test.py and executes them with
unittest.TextTestRunner.
"""
import os, sys, re, unittest, shutil, re, shutil
from testlib import testutil, testoptions
from pygr import logger
def all_tests():
"Returns all file names that end in _test.py"
patt = re.compile("_test.py$")
mods = os.listdir(os.getcwd())
mods = filter(patt.search, mods)
mods = [ m.rstrip(".py") for m in mods ]
# some predictable order...
mods.sort()
return mods
def run(targets, options):
"Imports and runs the modules names that are contained in the 'targets'"
success = errors = 0
# run the tests by importing the module and getting its test suite
for name in targets:
try:
testutil.info( 'running tests for module %s' % name )
mod = __import__( name )
suite = mod.get_suite()
runner = unittest.TextTestRunner(verbosity=options.verbosity,
descriptions=0)
results = runner.run( suite )
# count tests and errors
success += results.testsRun - \
len(results.errors) - len(results.failures)
errors += len(results.errors) + len(results.failures)
# if we're in strict mode stop on errors
if options.strict and errors:
testutil.error( "strict mode stops on errors" )
break
except ImportError:
testutil.error( "unable to import module '%s'" % name )
# each skipped testsuite generates a message
skipped = len(testutil.SKIP_MESSAGES)
# generate warnings on skipped tests
for message in testutil.SKIP_MESSAGES:
testutil.warn(message)
# summarize the run
testutil.info('=' * 59)
testutil.info('''\
%s tests passed, %s tests failed, %s suites skipped; %d total''' % \
(success, errors, skipped, success + errors + skipped))
if __name__ == '__main__':
# gets the prebuild option parser
parser = testoptions.option_parser()
# parse the options
options, args = parser.parse_args()
# modules: from command line args or all modules
targets = args or all_tests()
# get rid of the .py ending in case full module names were passed in
# the command line
targets = [ t.rstrip(".py") for t in targets ]
# exclusion mode
if options.exclude:
targets = [ name for name in all_tests() if name not in targets ]
# disables debug messages at < 2 verbosity
if options.verbosity != 2:
logger.disable('DEBUG')
# run all the tests
if options.coverage:
testutil.generate_coverage(run, 'coverage', targets=targets,
options=options)
else:
run(targets=targets, options=options)
| Python | 0 |
6d4c5618db43725c0af2b37661911a960bfa0aa2 | Allow an already deleted watch to not fail the stack.delete(). | heat/engine/cloud_watch.py | heat/engine/cloud_watch.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import logging
import json
import os
from heat.common import exception
from heat.db import api as db_api
from heat.engine.resources import Resource
logger = logging.getLogger('heat.engine.cloud_watch')
class CloudWatchAlarm(Resource):
properties_schema = {'ComparisonOperator': {'Type': 'String',
'AllowedValues': ['GreaterThanOrEqualToThreshold',
'GreaterThanThreshold', 'LessThanThreshold',
'LessThanOrEqualToThreshold']},
'AlarmDescription': {'Type': 'String'},
'EvaluationPeriods': {'Type': 'String'},
'MetricName': {'Type': 'String'},
'Namespace': {'Type': 'String'},
'Period': {'Type': 'String'},
'Statistic': {'Type': 'String',
'AllowedValues': ['SampleCount', 'Average', 'Sum',
'Minimum', 'Maximum']},
'AlarmActions': {'Type': 'List'},
'OKActions': {'Type': 'List'},
'InsufficientDataActions': {'Type': 'List'},
'Threshold': {'Type': 'String'},
'Units': {'Type': 'String',
'AllowedValues': ['Seconds', 'Microseconds', 'Milliseconds',
'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits',
'Terabits', 'Percent', 'Count', 'Bytes/Second',
'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second',
'Megabits/Second', 'Gigabits/Second', 'Terabits/Second',
'Count/Second', None]}}
def __init__(self, name, json_snippet, stack):
super(CloudWatchAlarm, self).__init__(name, json_snippet, stack)
self.instance_id = ''
def validate(self):
'''
Validate the Properties
'''
return Resource.validate(self)
def create(self):
if self.state in [self.CREATE_IN_PROGRESS, self.CREATE_COMPLETE]:
return
self.state_set(self.CREATE_IN_PROGRESS)
Resource.create(self)
wr_values = {
'name': self.name,
'rule': self.parsed_template()['Properties'],
'state': 'NORMAL',
'stack_name': self.stack.name
}
wr = db_api.watch_rule_create(self.stack.context, wr_values)
self.instance_id = wr.id
self.state_set(self.CREATE_COMPLETE)
def delete(self):
if self.state in [self.DELETE_IN_PROGRESS, self.DELETE_COMPLETE]:
return
self.state_set(self.DELETE_IN_PROGRESS)
Resource.delete(self)
try:
db_api.watch_rule_delete(self.stack.context, self.name)
except Exception as ex:
pass
self.state_set(self.DELETE_COMPLETE)
def FnGetRefId(self):
return unicode(self.name)
def strict_dependency(self):
return False
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import logging
import json
import os
from heat.common import exception
from heat.db import api as db_api
from heat.engine.resources import Resource
logger = logging.getLogger('heat.engine.cloud_watch')
class CloudWatchAlarm(Resource):
properties_schema = {'ComparisonOperator': {'Type': 'String',
'AllowedValues': ['GreaterThanOrEqualToThreshold',
'GreaterThanThreshold', 'LessThanThreshold',
'LessThanOrEqualToThreshold']},
'AlarmDescription': {'Type': 'String'},
'EvaluationPeriods': {'Type': 'String'},
'MetricName': {'Type': 'String'},
'Namespace': {'Type': 'String'},
'Period': {'Type': 'String'},
'Statistic': {'Type': 'String',
'AllowedValues': ['SampleCount', 'Average', 'Sum',
'Minimum', 'Maximum']},
'AlarmActions': {'Type': 'List'},
'OKActions': {'Type': 'List'},
'InsufficientDataActions': {'Type': 'List'},
'Threshold': {'Type': 'String'},
'Units': {'Type': 'String',
'AllowedValues': ['Seconds', 'Microseconds', 'Milliseconds',
'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits',
'Terabits', 'Percent', 'Count', 'Bytes/Second',
'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second',
'Megabits/Second', 'Gigabits/Second', 'Terabits/Second',
'Count/Second', None]}}
def __init__(self, name, json_snippet, stack):
super(CloudWatchAlarm, self).__init__(name, json_snippet, stack)
self.instance_id = ''
def validate(self):
'''
Validate the Properties
'''
return Resource.validate(self)
def create(self):
if self.state in [self.CREATE_IN_PROGRESS, self.CREATE_COMPLETE]:
return
self.state_set(self.CREATE_IN_PROGRESS)
Resource.create(self)
wr_values = {
'name': self.name,
'rule': self.parsed_template()['Properties'],
'state': 'NORMAL',
'stack_name': self.stack.name
}
wr = db_api.watch_rule_create(self.stack.context, wr_values)
self.instance_id = wr.id
self.state_set(self.CREATE_COMPLETE)
def delete(self):
if self.state in [self.DELETE_IN_PROGRESS, self.DELETE_COMPLETE]:
return
self.state_set(self.DELETE_IN_PROGRESS)
Resource.delete(self)
db_api.watch_rule_delete(self.stack.context, self.name)
self.state_set(self.DELETE_COMPLETE)
def FnGetRefId(self):
return unicode(self.name)
def strict_dependency(self):
return False
| Python | 0 |
2ab2927b2ee4f821fd75050da19a7f1f81aaeca8 | FIX divide mnist features by 255 in mlp example (#11961) | examples/neural_networks/plot_mnist_filters.py | examples/neural_networks/plot_mnist_filters.py | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
print(__doc__)
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# rescale the data, use the traditional train/test split
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| Python | 0 |
45c67e0b9bc168549fdd1eb2cde3599aae921567 | Update base.py | webhook/base.py | webhook/base.py | """
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data):
"""
Unimplemented method
"""
raise NotImplementedError
| """
Base webhook implementation
"""
import json
from django.http import HttpResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class WebhookBase(View):
"""
Simple Webhook base class to handle the most standard case.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(WebhookBase, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body.decode('utf-8'))
self.process_webhook(data)
return HttpResponse(status=200)
def process_webhook(self, data=None):
"""
Unimplemented method
"""
raise NotImplementedError
| Python | 0.000001 |
1abd5833ef8936185f4c8870d300b3793da4ce00 | Fix regex for parsing Solr dates. The solr documentation suggests it will always use 4-digit years. In practice, though, it returns < 4 digits for years before 1000 AD. This fixes the date-parsing regex to account for the discrepancy. | sunburnt/dates.py | sunburnt/dates.py | from __future__ import absolute_import
import datetime
import re
import warnings
try:
import mx.DateTime
except ImportError:
warnings.warn(
"mx.DateTime not found, retricted to Python datetime objects",
ImportWarning)
mx = None
year = r'[+/-]?\d+'
tzd = r'Z|((?P<tzd_sign>[-+])(?P<tzd_hour>\d\d):(?P<tzd_minute>\d\d))'
extended_iso_template = r'(?P<year>'+year+r""")
(-(?P<month>\d\d)
(-(?P<day>\d\d)
([T%s](?P<hour>\d\d)
:(?P<minute>\d\d)
(:(?P<second>\d\d)
(.(?P<fraction>\d+))?)?
("""+tzd+""")?)?
)?)?"""
extended_iso = extended_iso_template % " "
extended_iso_re = re.compile('^'+extended_iso+'$', re.X)
def datetime_from_w3_datestring(s):
""" We need to extend ISO syntax (as permitted by the standard) to allow
for dates before 0AD and after 9999AD. This is how to parse such a string"""
m = extended_iso_re.match(s)
if not m:
raise ValueError
d = m.groupdict()
d['year'] = int(d['year'])
d['month'] = int(d['month'] or 1)
d['day'] = int(d['day'] or 1)
d['hour'] = int(d['hour'] or 0)
d['minute'] = int(d['minute'] or 0)
d['fraction'] = d['fraction'] or '0'
d['second'] = float("%s.%s" % ((d['second'] or '0'), d['fraction']))
del d['fraction']
if d['tzd_sign']:
if d['tzd_sign'] == '+':
tzd_sign = 1
elif d['tzd_sign'] == '-':
tzd_sign = -1
try:
tz_delta = datetime_delta_factory(tzd_sign*int(d['tzd_hour']),
tzd_sign*int(d['tzd_minute']))
except DateTimeRangeError:
raise ValueError(e.args[0])
else:
tz_delta = datetime_delta_factory(0, 0)
del d['tzd_sign']
del d['tzd_hour']
del d['tzd_minute']
try:
dt = datetime_factory(**d) + tz_delta
except DateTimeRangeError:
raise ValueError(e.args[0])
return dt
class DateTimeRangeError(ValueError):
pass
if mx:
def datetime_factory(**kwargs):
try:
return mx.DateTime.DateTimeFrom(**kwargs)
except mx.DateTime.RangeError:
raise DateTimeRangeError(e.args[0])
else:
def datetime_factory(**kwargs):
try:
return datetime.datetime(**kwargs)
except ValueError, e:
raise DateTimeRangeError(e.args[0])
if mx:
def datetime_delta_factory(hours, minutes):
return mx.DateTime.DateTimeDelta(0, hours, minutes)
else:
def datetime_delta_factory(hours, minutes):
return datetime.timedelta(hours=hours, minutes=minutes)
| from __future__ import absolute_import
import datetime
import re
import warnings
try:
import mx.DateTime
except ImportError:
warnings.warn(
"mx.DateTime not found, retricted to Python datetime objects",
ImportWarning)
mx = None
year = r'[+/-]?\d*\d\d\d\d'
tzd = r'Z|((?P<tzd_sign>[-+])(?P<tzd_hour>\d\d):(?P<tzd_minute>\d\d))'
extended_iso_template = r'(?P<year>'+year+r""")
(-(?P<month>\d\d)
(-(?P<day>\d\d)
([T%s](?P<hour>\d\d)
:(?P<minute>\d\d)
(:(?P<second>\d\d)
(.(?P<fraction>\d+))?)?
("""+tzd+""")?)?
)?)?"""
extended_iso = extended_iso_template % " "
extended_iso_re = re.compile('^'+extended_iso+'$', re.X)
def datetime_from_w3_datestring(s):
""" We need to extend ISO syntax (as permitted by the standard) to allow
for dates before 0AD and after 9999AD. This is how to parse such a string"""
m = extended_iso_re.match(s)
if not m:
raise ValueError
d = m.groupdict()
d['year'] = int(d['year'])
d['month'] = int(d['month'] or 1)
d['day'] = int(d['day'] or 1)
d['hour'] = int(d['hour'] or 0)
d['minute'] = int(d['minute'] or 0)
d['fraction'] = d['fraction'] or '0'
d['second'] = float("%s.%s" % ((d['second'] or '0'), d['fraction']))
del d['fraction']
if d['tzd_sign']:
if d['tzd_sign'] == '+':
tzd_sign = 1
elif d['tzd_sign'] == '-':
tzd_sign = -1
try:
tz_delta = datetime_delta_factory(tzd_sign*int(d['tzd_hour']),
tzd_sign*int(d['tzd_minute']))
except DateTimeRangeError:
raise ValueError(e.args[0])
else:
tz_delta = datetime_delta_factory(0, 0)
del d['tzd_sign']
del d['tzd_hour']
del d['tzd_minute']
try:
dt = datetime_factory(**d) + tz_delta
except DateTimeRangeError:
raise ValueError(e.args[0])
return dt
class DateTimeRangeError(ValueError):
pass
if mx:
def datetime_factory(**kwargs):
try:
return mx.DateTime.DateTimeFrom(**kwargs)
except mx.DateTime.RangeError:
raise DateTimeRangeError(e.args[0])
else:
def datetime_factory(**kwargs):
try:
return datetime.datetime(**kwargs)
except ValueError, e:
raise DateTimeRangeError(e.args[0])
if mx:
def datetime_delta_factory(hours, minutes):
return mx.DateTime.DateTimeDelta(0, hours, minutes)
else:
def datetime_delta_factory(hours, minutes):
return datetime.timedelta(hours=hours, minutes=minutes)
| Python | 0 |
a751e7f51412581e14cc822f1e443ed97746055a | Update structures example | examples/structures.py | examples/structures.py | from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
# You can also index by field name or field index:
# data[i].x == data[i]['x'] == data[i][0]
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print hypot(a)
# Notice inferred return type
print hypot.signature
# Notice native sqrt calls and for.body direct access to memory...
print hypot.lfunc
| from numba import struct, jit, double
import numpy as np
record_type = struct([('x', double), ('y', double)])
record_dtype = record_type.get_dtype()
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_dtype)
@jit(argtypes=[record_type[:]])
def hypot(data):
# return types of numpy functions are inferred
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print hypot(a)
# Notice inferred return type
print hypot.signature
# Notice native sqrt calls and for.body direct access to memory...
print hypot.lfunc
| Python | 0 |
f6045517b27bf6f878ab2906aa6b793cfd640786 | upgrade anymail | toucan_conf/settings/prod/__init__.py | toucan_conf/settings/prod/__init__.py | import os
from .. import *
try:
from ..secrets import ALLOWED_HOSTS
except ImportError:
raise ImportError('Please set ALLOWED_HOSTS in the secrets file when using production config.')
try:
from ..secrets import ANYMAIL
except ImportError:
raise ImportError('Please set ANYMAIL settings in the secrets file when using production config.')
INSTALLED_APPS += [
'anymail'
]
DEBUG = False
DEFAULT_FROM_EMAIL = 'toucan@brickwall.at'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# install raven handler if configured
try:
import raven
from ..secrets import RAVEN_DSN
except ImportError:
pass
else:
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
| import os
from .. import *
try:
from ..secrets import ALLOWED_HOSTS
except ImportError:
raise ImportError('Please set ALLOWED_HOSTS in the secrets file when using production config.')
try:
from ..secrets import ANYMAIL
except ImportError:
raise ImportError('Please set ANYMAIL settings in the secrets file when using production config.')
INSTALLED_APPS += [
'anymail'
]
DEBUG = False
DEFAULT_FROM_EMAIL = 'toucan@brickwall.at'
STATIC_ROOT = os.path.join(BASE_DIR, '_static')
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
# install raven handler if configured
try:
import raven
from ..secrets import RAVEN_DSN
except ImportError:
pass
else:
RAVEN_CONFIG = {
'dsn': RAVEN_DSN,
# If you are using git, you can also automatically configure the
# release based on the git info.
'release': raven.fetch_git_sha(BASE_DIR),
}
| Python | 0 |
594c68fb47ee37ff13b02637462d2bd79beb6f43 | update config tests for schema v3 | assigner/tests/config_test.py | assigner/tests/config_test.py | from assigner.tests.utils import AssignerTestCase
from assigner.config.versions import (
validate,
get_version,
upgrade,
ValidationError,
VersionError,
)
from assigner.config.upgrades import UPGRADES
from assigner.config.schemas import SCHEMAS
CONFIGS = [
{ # Version 0
"token": "xxx gitlab token xxx",
"gitlab-host": "https://git.gitlab.com",
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
{ # Version 1
"gitlab-token": "xxx gitlab token xxx",
"gitlab-host": "https://git.gitlab.com",
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
{ # Version 2
"version": 2,
"backend": {
"name": "gitlab",
"token": "xxx gitlab token xxx",
"host": "https://git.gitlab.com",
},
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
{ # Version 3
"version": 3,
"backend": {
"name": "gitlab",
"token": "xxx gitlab token xxx",
"host": "https://git.gitlab.com",
},
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
"canvas-courses": [],
},
]
EMPTY_CONFIGS = [
{},
{},
{"version": 2, "backend": {"name": "gitlab",},},
{"version": 3, "backend": {"name": "gitlab",}, "canvas-courses": []},
]
TOO_NEW_CONFIG = {"version": len(SCHEMAS)}
class UpgradeTester(AssignerTestCase):
def test_that_we_are_testing_all_schemas_and_upgrades(self):
self.assertEqual(len(CONFIGS), len(SCHEMAS))
self.assertEqual(len(CONFIGS), len(UPGRADES) + 1)
self.assertEqual(len(CONFIGS), len(EMPTY_CONFIGS))
def test_get_version(self):
for version, config in enumerate(CONFIGS):
self.assertEqual(version, get_version(config))
def test_validate(self):
for version, config in enumerate(CONFIGS):
try:
validate(config, version)
except ValidationError as e:
self.fail(
"Config version {} does not validate:\n\n{}".format(
version, e.message
)
)
def test_UPGRADES(self):
for version, config in enumerate(CONFIGS[:-1]):
config = dict(config)
config = UPGRADES[version](config)
self.assertEqual(config, CONFIGS[version + 1])
try:
validate(config, version + 1)
except ValidationError as e:
self.fail(
"UPGRADEing from version {} to version {} results in an invalid config:\n\n{}".format(
version, version + 1, e.message
)
)
def test_upgrade(self):
for version, config in enumerate(CONFIGS):
config = dict(config)
try:
config = upgrade(config)
validate(config)
except ValidationError as e:
self.fail(
"Upgrading from version {} to version {} results in an invalid config:\n\n{}".format(
version, len(CONFIGS) - 1, e.message
)
)
self.assertEqual(config, CONFIGS[-1])
def test_empty_config_upgrade(self):
for config in EMPTY_CONFIGS:
config = upgrade(config)
self.assertEqual(config, EMPTY_CONFIGS[-1])
def test_too_new_config(self):
with self.assertRaises(VersionError):
validate(TOO_NEW_CONFIG)
| from assigner.tests.utils import AssignerTestCase
from assigner.config.versions import validate, get_version, upgrade, ValidationError, VersionError
from assigner.config.upgrades import UPGRADES
from assigner.config.schemas import SCHEMAS
CONFIGS = [
{ # Version 0
"token": "xxx gitlab token xxx",
"gitlab-host": "https://git.gitlab.com",
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
{ # Version 1
"gitlab-token": "xxx gitlab token xxx",
"gitlab-host": "https://git.gitlab.com",
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
{ # Version 2
"version": 2,
"backend": {
"name": "gitlab",
"token": "xxx gitlab token xxx",
"host": "https://git.gitlab.com",
},
"namespace": "assigner-testing",
"semester": "2016-SP",
"roster": [],
},
]
EMPTY_CONFIGS = [
{},
{},
{
"version": 2,
"backend": {
"name": "gitlab",
},
},
]
TOO_NEW_CONFIG = {"version": len(SCHEMAS)}
class UpgradeTester(AssignerTestCase):
def test_that_we_are_testing_all_schemas_and_upgrades(self):
self.assertEqual(len(CONFIGS), len(SCHEMAS))
self.assertEqual(len(CONFIGS), len(UPGRADES) + 1)
self.assertEqual(len(CONFIGS), len(EMPTY_CONFIGS))
def test_get_version(self):
for version, config in enumerate(CONFIGS):
self.assertEqual(version, get_version(config))
def test_validate(self):
for version, config in enumerate(CONFIGS):
try:
validate(config, version)
except ValidationError as e:
self.fail("Config version {} does not validate:\n\n{}".format(version, e.message))
def test_UPGRADES(self):
for version, config in enumerate(CONFIGS[:-1]):
config = dict(config)
config = UPGRADES[version](config)
self.assertEqual(config, CONFIGS[version + 1])
try:
validate(config, version + 1)
except ValidationError as e:
self.fail("UPGRADEing from version {} to version {} results in an invalid config:\n\n{}".format(version, version + 1, e.message))
def test_upgrade(self):
for version, config in enumerate(CONFIGS):
config = dict(config)
try:
config = upgrade(config)
validate(config)
except ValidationError as e:
self.fail("Upgrading from version {} to version {} results in an invalid config:\n\n{}".format(version, len(CONFIGS) - 1, e.message))
self.assertEqual(config, CONFIGS[-1])
def test_empty_config_upgrade(self):
for config in EMPTY_CONFIGS:
config = upgrade(config)
self.assertEqual(config, EMPTY_CONFIGS[-1])
def test_too_new_config(self):
with self.assertRaises(VersionError):
validate(TOO_NEW_CONFIG)
| Python | 0 |
193d911536799751c9ec29571cb8091bcd187087 | fix uraseuranta py | pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py | pdi_integrations/arvo/python_scripts/get_arvo_uraseuranta.py | #import json
import requests
#import os
from pandas.io.json import json_normalize
#import datetime
import base64
import os
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key is missing")
try:
api_user = os.environ['AUTH_API_USER']
except KeyError:
print("API-user is missing")
result = []
good_result=[]
filtered_result=[]
urls = []
url = 'https://arvo.csc.fi/api/vipunen/uraseuranta'
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Accept'] = 'application/json'
### encode API user and API key tothe request headers
tmp = "%s:%s" % (api_user, api_key)
reqheaders['Authorization'] = "Basic %s" % base64.b64encode(tmp.encode('utf-8')).decode('utf-8')
#response = requests.get(url, headers=reqheaders).json()
## Not checking the status just downloading
## GET STATUS
##
while url != None: ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for uraseuranta in response['data']:
result.append(uraseuranta)
# taustatiedot.append(uraseuranta['taustatiedot'])
url = response['pagination']['next_url']
urls.append(url)
## split result into two sets (with&without taustatiedot)
## test first 301 result
## for item in result[0:300]:
for item in result:
if item.get('taustatiedot') == None:
filtered_result.append(item)
else:
good_result.append(item)
## normalize data from result sets
### if you want to check column names use row below
### data.dtypes.index
data = json_normalize(good_result)
filtered_data = json_normalize(filtered_result)
# print(data[12])
# data['vastaajaid'].head(10)
## data.dtypes
## Export data to csv's
print("Exporting data to csv file")
filtered_data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta_vajaadata.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n', escapechar='$')
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$')
#now = datetime.datetime.now()
#print
#print("Current date and time using str method of datetime object:")
#print(str(now))
## data.vastaajaid.nunique()
| #import json
import requests
#import os
from pandas.io.json import json_normalize
#import datetime
import os
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key missing")
result = []
good_result=[]
filtered_result=[]
urls = []
url = 'https://arvo.csc.fi/api/vipunen/uraseuranta'
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Authorization'] = api_key
#response = requests.get(url, headers=reqheaders).json()
## Not checking the status just downloading
## GET STATUS
##
while url != 'null': ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for uraseuranta in response['data']:
result.append(uraseuranta)
# taustatiedot.append(uraseuranta['taustatiedot'])
url = response['pagination']['next_url']
urls.append(url)
## split result into two sets (with&without taustatiedot)
## test first 301 result
## for item in result[0:300]:
for item in result:
if item.get('taustatiedot') == None:
filtered_result.append(item)
else:
good_result.append(item)
## normalize data from result sets
### if you want to check column names use row below
### data.dtypes.index
data = json_normalize(good_result)
filtered_data = json_normalize(filtered_result)
# print(data[12])
# data['vastaajaid'].head(10)
## data.dtypes
## Export data to csv's
filtered_data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta_vajaadata.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n', escapechar='$')
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/uraseuranta.csv', sep='|', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$')
#now = datetime.datetime.now()
#print
#print("Current date and time using str method of datetime object:")
#print(str(now))
## data.vastaajaid.nunique()
| Python | 0.000001 |
a0f7ca32edd5c924366738e0e6d6b8ab4e483cc8 | Undo last commit. | foliant/utils.py | foliant/utils.py | '''Various utilities used here and there in the Foliant code.'''
from contextlib import contextmanager
from pkgutil import iter_modules
from importlib import import_module
from shutil import rmtree
from pathlib import Path
from logging import Logger
from typing import Dict, Tuple, Type, Set
from halo import Halo
def get_available_tags() -> Set[str]:
'''Extract ``tags`` attribute values from installed
``foliant.preprocessors.*.Preprocessor`` classes.
:returns: Set of tags
'''
preprocessors_module = import_module('foliant.preprocessors')
result = set()
for importer, modname, _ in iter_modules(preprocessors_module.__path__):
if modname == 'base':
continue
result.update(importer.find_module(modname).load_module(modname).Preprocessor.tags)
return result
def get_available_config_parsers() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.config`` submodules and the corresponding
``Parser`` classes.
Used for construction of the Foliant config parser, which is a class that inherits
from all ``foliant.config.*.Parser`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
config_module = import_module('foliant.config')
result = {}
for importer, modname, _ in iter_modules(config_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Parser
return result
def get_available_clis() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.cli`` submodules and the corresponding
``Cli`` classes.
Used for construction of the Foliant CLI, which is a class that inherits
from all ``foliant.cli.*.Cli`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
cli_module = import_module('foliant.cli')
result = {}
for importer, modname, _ in iter_modules(cli_module.__path__):
result[modname] = importer.find_module(modname).load_module(modname).Cli
return result
def get_available_backends() -> Dict[str, Tuple[str]]:
'''Get the names of the installed ``foliant.backends`` submodules and the corresponding
``Backend.targets`` tuples.
Used in the interactive backend selection prompt to list the available backends
and to check if the selected target can be made with the selected backend.
:returns: Dictionary of submodule names as keys and target tuples as values
'''
backends_module = import_module('foliant.backends')
result = {}
for importer, modname, _ in iter_modules(backends_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Backend.targets
return result
@contextmanager
def spinner(text: str, logger: Logger, quiet=False):
'''Spinner decoration for long running processes.
:param text: The spinner's caption
:param logger: Logger to capture the error if it occurs
:param quiet: If ``True``, the spinner is hidden
'''
halo = Halo(text, enabled=not quiet)
halo.start()
try:
logger.info(text)
yield
if not quiet:
halo.succeed()
else:
halo.stop()
except Exception as exception:
logger.error(str(exception))
if not quiet:
halo.fail(str(exception))
else:
halo.stop()
@contextmanager
def tmp(tmp_path: Path, keep_tmp=False):
'''Clean up tmp directory before and after running a code block.
:param tmp_path: Path to the tmp directory
:param keep_tmp: If ``True``, skip the cleanup
'''
rmtree(tmp_path, ignore_errors=True)
yield
if not keep_tmp:
rmtree(tmp_path, ignore_errors=True)
| '''Various utilities used here and there in the Foliant code.'''
from contextlib import contextmanager
from pkgutil import iter_modules
from importlib import import_module
from shutil import rmtree
from pathlib import Path
from logging import Logger
from typing import Dict, Tuple, Type, Set
from halo import Halo
def get_available_tags() -> Set[str]:
'''Extract ``tags`` attribute values from installed
``foliant.preprocessors.*.Preprocessor`` classes.
:returns: Set of tags
'''
preprocessors_module = import_module('foliant.preprocessors')
result = set()
for importer, modname, _ in iter_modules(preprocessors_module.__path__):
if modname == 'base':
continue
result.update(importer.find_module(modname).load_module(modname).Preprocessor.tags)
return result
def get_available_config_parsers() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.config`` submodules and the corresponding
``Parser`` classes.
Used for construction of the Foliant config parser, which is a class that inherits
from all ``foliant.config.*.Parser`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
config_module = import_module('foliant.config')
result = {}
for importer, modname, _ in iter_modules(config_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Parser
return result
def get_available_clis() -> Dict[str, Type]:
'''Get the names of the installed ``foliant.cli`` submodules and the corresponding
``Cli`` classes.
Used for construction of the Foliant CLI, which is a class that inherits
from all ``foliant.cli.*.Cli`` classes.
:returns: Dictionary with submodule names as keys as classes as values
'''
cli_module = import_module('foliant.cli')
result = {}
for importer, modname, _ in iter_modules(cli_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Cli
return result
def get_available_backends() -> Dict[str, Tuple[str]]:
'''Get the names of the installed ``foliant.backends`` submodules and the corresponding
``Backend.targets`` tuples.
Used in the interactive backend selection prompt to list the available backends
and to check if the selected target can be made with the selected backend.
:returns: Dictionary of submodule names as keys and target tuples as values
'''
backends_module = import_module('foliant.backends')
result = {}
for importer, modname, _ in iter_modules(backends_module.__path__):
if modname == 'base':
continue
result[modname] = importer.find_module(modname).load_module(modname).Backend.targets
return result
@contextmanager
def spinner(text: str, logger: Logger, quiet=False):
'''Spinner decoration for long running processes.
:param text: The spinner's caption
:param logger: Logger to capture the error if it occurs
:param quiet: If ``True``, the spinner is hidden
'''
halo = Halo(text, enabled=not quiet)
halo.start()
try:
logger.info(text)
yield
if not quiet:
halo.succeed()
else:
halo.stop()
except Exception as exception:
logger.error(str(exception))
if not quiet:
halo.fail(str(exception))
else:
halo.stop()
@contextmanager
def tmp(tmp_path: Path, keep_tmp=False):
'''Clean up tmp directory before and after running a code block.
:param tmp_path: Path to the tmp directory
:param keep_tmp: If ``True``, skip the cleanup
'''
rmtree(tmp_path, ignore_errors=True)
yield
if not keep_tmp:
rmtree(tmp_path, ignore_errors=True)
| Python | 0 |
77859dbc019a19222ada36ebccc849ba77649a86 | add to unicode functions to all forum models | forums/models.py | forums/models.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
position = models.IntegerField(_("Position"), default=0)
class Meta:
ordering = ['position']
def __unicode__(self):
return self.name
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums')
name = models.CharField(_("Name"), max_length=255)
position = models.IntegerField(_("Position"), default=0)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['position']
def __unicode__(self):
return self.name
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics')
name = models.CharField(_("Name"), max_length=255)
last_post = models.ForeignKey('Post', verbose_name=_("Last post"), related_name='forum_last_post', blank=True, null=True)
class Meta:
ordering = ['-last_post__created']
def __unicode__(self):
return self.name
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts')
user = models.ForeignKey(User, related_name='forum_posts')
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"),auto_now=True)
body = models.TextField(_("Body"))
class Meta:
ordering = ['created']
def __unicode__(self):
return self.body
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
position = models.IntegerField(_("Position"), default=0)
class Meta:
ordering = ['position']
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums')
name = models.CharField(_("Name"), max_length=255)
position = models.IntegerField(_("Position"), default=0)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['position']
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics')
name = models.CharField(_("Name"), max_length=255)
last_post = models.ForeignKey('Post', verbose_name=_("Last post"), related_name='forum_last_post', blank=True, null=True)
class Meta:
ordering = ['-last_post__created']
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts')
user = models.ForeignKey(User, related_name='forum_posts')
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"),auto_now=True)
body = models.TextField(_("Body"))
class Meta:
ordering = ['created']
| Python | 0 |
f22ad01d72b8ab2a12bf68a23b79c9a1b2e6f237 | Standardizing all to uppercase for compare | tms/tms_utils.py | tms/tms_utils.py |
# Local modules
from common import telegram_utils
from tms import tms_data
class Verse():
def __init__(self, ref, title, pack, pos):
self.reference = ref
self.title = title
self.pack = pack
self.position = pos
def get_reference(self):
return self.reference
def get_title(self):
return self.title
def get_pack(self):
return self.pack
def get_position(self):
return self.position
def get_pack(pack):
select_pack = tms_data.get_tms().get(pack)
if select_pack is not None:
return select_pack
return None
def query_pack_pos(query):
query = query.upper().strip().split()
query = ''.join(query)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
try_packpos = pack_key + str(i + 1)
if try_packpos == query:
return pack[i]
return None
def query_verse_by_reference(ref):
ref = ref.upper().strip().split()
ref = ''.join(ref)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
try_ref = select_verse[1].upper().strip().split()
try_ref = ''.join(try_ref)
if try_ref == ref:
return Verse(select_verse[1], select_verse[0], pack_key, i + 1)
return None
def get_all_pack_keys():
return tms_data.get_tms().keys()
def get_verse_by_pack(pack, pos):
select_pack = get_pack(pack)
if select_pack is not None:
select_verse = select_pack[pos - 1]
if select_verse is not None:
return Verse(select_verse[1], select_verse[0], pack, pos)
def get_verse_by_title(title, pos):
verses = get_verses_by_title(title)
if len(verses) > pos:
return verses[pos - 1]
return None
def get_verses_by_title(title):
verses = []
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
if title == select_verse[0]:
verses.append(Verse(select_verse[1], select_verse[0], pack_key, i + 1))
return verses
def get_start_verse():
start_key = 'BWC'
select_pack = get_pack(start_key)
select_verse = select_pack[0]
return Verse(select_verse[1], select_verse[0], start_key, 1)
def format_verse(verse, text):
verse_prep = []
verse_prep.append(verse.get_pack() + ' ' + str(verse.get_position()))
verse_prep.append(text)
verse_prep.append(telegram_utils.bold(verse.reference))
return telegram_utils.join(verse_prep, '\n\n')
|
# Local modules
from common import telegram_utils
from tms import tms_data
class Verse():
def __init__(self, ref, title, pack, pos):
self.reference = ref
self.title = title
self.pack = pack
self.position = pos
def get_reference(self):
return self.reference
def get_title(self):
return self.title
def get_pack(self):
return self.pack
def get_position(self):
return self.position
def get_pack(pack):
select_pack = tms_data.get_tms().get(pack)
if select_pack is not None:
return select_pack
return None
def find_pack_pos(query):
query = query.strip().split()
query = ''.join(query)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
try_packpos = pack_key + str(i + 1)
if try_packpos == query:
return pack[i]
return None
def get_all_pack_keys():
return tms_data.get_tms().keys()
def get_verse_by_pack(pack, pos):
select_pack = get_pack(pack)
if select_pack is not None:
select_verse = select_pack[pos - 1]
if select_verse is not None:
return Verse(select_verse[1], select_verse[0], pack, pos)
def get_verse_by_title(title, pos):
verses = get_verses_by_title(title)
if len(verses) > pos:
return verses[pos - 1]
return None
def get_verse_by_reference(ref):
ref = ref.strip().split()
ref = ''.join(ref)
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
try_ref = select_verse[1]
try_ref = ''.join(try_ref)
if try_ref == ref:
return Verse(select_verse[1], select_verse[0], pack_key, i + 1)
return None
def get_verses_by_title(title):
verses = []
for pack_key in get_all_pack_keys():
pack = get_pack(pack_key)
size = len(pack)
for i in range(0, size):
select_verse = pack[i]
if title == select_verse[0]:
verses.append(Verse(select_verse[1], select_verse[0], pack_key, i + 1))
return verses
def get_start_verse():
start_key = 'BWC'
select_pack = get_pack(start_key)
select_verse = select_pack[0]
return Verse(select_verse[1], select_verse[0], start_key, 1)
def format_verse(verse, text):
verse_prep = []
verse_prep.append(verse.get_pack() + ' ' + str(verse.get_position()))
verse_prep.append(text)
verse_prep.append(telegram_utils.bold(verse.reference))
return telegram_utils.join(verse_prep, '\n\n')
| Python | 0.99991 |
694df5ba69e4e7123009605e59c2b5417a3b52c5 | Remove print statement about number of bins | tools/fitsevt.py | tools/fitsevt.py | #! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
| #! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
| Python | 0.003864 |
22a4644bd510a8b786d181c01c20f3dc522dac8d | Update corehq/apps/auditcare/migrations/0004_add_couch_id.py | corehq/apps/auditcare/migrations/0004_add_couch_id.py | corehq/apps/auditcare/migrations/0004_add_couch_id.py | # Generated by Django 2.2.20 on 2021-05-21 17:32
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddIndex(
model_name='accessaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=ACCESS_INDEX),
),
]
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
| # Generated by Django 2.2.20 on 2021-05-21 17:32
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddIndex(
model_name='accessaudit',
index=models.Index(fields=['couch_id'], name=ACCESS_INDEX),
),
]
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
| Python | 0 |
0c816aaa82ee9fee1ee244c6b96c1a2718ec836e | use default python command from the environment | testrunner.py | testrunner.py | #!/usr/bin/env python
import os
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps."""
SDK_PATH_manual = '/usr/local/google_appengine'
TEST_PATH_manual = '../unittests'
def main(sdk_path, test_path):
os.chdir('backend')
sys.path.extend([sdk_path, '.', '../lib', '../testlib'])
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
if not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful():
sys.exit(-1)
if __name__ == '__main__':
SDK_PATH = SDK_PATH_manual
TEST_PATH = TEST_PATH_manual
if len(sys.argv)==2:
SDK_PATH = sys.argv[1]
main(SDK_PATH, TEST_PATH)
| #!/usr/bin/python
import os
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps."""
SDK_PATH_manual = '/usr/local/google_appengine'
TEST_PATH_manual = '../unittests'
def main(sdk_path, test_path):
os.chdir('backend')
sys.path.extend([sdk_path, '.', '../lib', '../testlib'])
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
if not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful():
sys.exit(-1)
if __name__ == '__main__':
SDK_PATH = SDK_PATH_manual
TEST_PATH = TEST_PATH_manual
if len(sys.argv)==2:
SDK_PATH = sys.argv[1]
main(SDK_PATH, TEST_PATH)
| Python | 0.000001 |
ec2aaf86f2002b060f6e5b4d040961a37f89d06a | Update rearrange-string-k-distance-apart.py | Python/rearrange-string-k-distance-apart.py | Python/rearrange-string-k-distance-apart.py | # Time: O(n)
# Space: O(n)
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
cnts = [0] * 26;
for c in str:
cnts[ord(c) - ord('a')] += 1
sorted_cnts = []
for i in xrange(26):
sorted_cnts.append((cnts[i], chr(i + ord('a'))))
sorted_cnts.sort(reverse=True)
max_cnt = sorted_cnts[0][0]
blocks = [[] for _ in xrange(max_cnt)]
i = 0
for cnt in sorted_cnts:
for _ in xrange(cnt[0]):
blocks[i].append(cnt[1])
i = (i + 1) % max(cnt[0], max_cnt - 1)
for i in xrange(max_cnt-1):
if len(blocks[i]) < k:
return ""
return "".join(map(lambda x : "".join(x), blocks))
# Time: O(nlogc), c is the count of unique characters.
# Space: O(c)
from collections import defaultdict
from heapq import heappush, heappop
class Solution2(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k == 0:
return str
cnts = defaultdict(int)
for c in str:
cnts[c] += 1
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(str) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
| # Time: O(nlogc), c is the count of unique characters.
# Space: O(c)
from collections import defaultdict
from heapq import heappush, heappop
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k == 0:
return str
cnts = defaultdict(int)
for c in str:
cnts[c] += 1
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(str) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
| Python | 0.000046 |
392cf8f05b6c23600e7a61a51494771ab08f2274 | add exceptions to should_curry | toolz/curried.py | toolz/curried.py | """
Alternate namespece for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)])
>>> map(lambda seq: get(0, seq), data)
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> map(get(0), data)
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from .functoolz import curry
import inspect
def nargs(f):
try:
return len(inspect.getargspec(f).args)
except TypeError:
return None
exceptions = set((toolz.map, toolz.filter))
def should_curry(f):
return (callable(f) and nargs(f) and nargs(f) > 1
or f in exceptions)
d = dict((name, curry(f) if '__' not in name and should_curry(f) else f)
for name, f in toolz.__dict__.items())
locals().update(d)
| """
Alternate namespece for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)])
>>> map(lambda seq: get(0, seq), data)
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> map(get(0), data)
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from .functoolz import curry
import inspect
def nargs(f):
try:
return len(inspect.getargspec(f).args)
except TypeError:
return None
def should_curry(f):
return callable(f) and nargs(f) and nargs(f) > 1
d = dict((name, curry(f) if '__' not in name and should_curry(f) else f)
for name, f in toolz.__dict__.items())
locals().update(d)
| Python | 0.000011 |
778bab1b4f57eb03137c00203d7b5f32c018ca83 | fix error | ImagePaste.py | ImagePaste.py | # import sublime
import sublime_plugin
import os
import sys
package_file = os.path.normpath(os.path.abspath(__file__))
package_path = os.path.dirname(package_file)
lib_path = os.path.join(package_path, "lib")
if lib_path not in sys.path:
sys.path.append(lib_path)
print(sys.path)
from PIL import ImageGrab
from PIL import ImageFile
class ImagePasteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
rel_fn = self.paste()
if not rel_fn:
view.run_command("paste")
return
for pos in view.sel():
# print("scope name: %r" % (view.scope_name(pos.begin())))
if 'text.html.markdown' in view.scope_name(pos.begin()):
view.insert(edit, pos.begin(), "" % rel_fn)
else:
view.insert(edit, pos.begin(), "%s" % rel_fn)
# only the first cursor add the path
break
def paste(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
im = ImageGrab.grabclipboard()
if im:
abs_fn, rel_fn = self.get_filename()
im.save(abs_fn,'PNG')
return rel_fn
else:
print('clipboard buffer is not image!')
return None
def get_filename(self):
view = self.view
filename = view.file_name()
# create dir in current path with the name of current filename
dirname, _ = os.path.splitext(filename)
# create new image file under currentdir/filename_without_ext/filename_without_ext%d.png
fn_without_ext = os.path.basename(dirname)
if not os.path.lexists(dirname):
os.mkdir(dirname)
i = 0
while True:
# relative file path
rel_filename = os.path.join("%s/%s%d.png" % (fn_without_ext, fn_without_ext, i))
# absolute file path
abs_filename = os.path.join(dirname, "%s%d.png" % ( fn_without_ext, i))
if not os.path.exists(abs_filename):
break
i += 1
print("save file: " + abs_filename + "\nrel " + rel_filename)
return abs_filename, rel_filename
| # import sublime
import sublime_plugin
import os
package_file = os.path.normpath(os.path.abspath(__file__))
package_path = os.path.dirname(package_file)
lib_path = os.path.join(package_path, "lib")
if lib_path not in sys.path:
sys.path.append(lib_path)
print(sys.path)
from PIL import ImageGrab
from PIL import ImageFile
class ImagePasteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
rel_fn = self.paste()
if not rel_fn:
view.run_command("paste")
return
for pos in view.sel():
# print("scope name: %r" % (view.scope_name(pos.begin())))
if 'text.html.markdown' in view.scope_name(pos.begin()):
view.insert(edit, pos.begin(), "" % rel_fn)
else:
view.insert(edit, pos.begin(), "%s" % rel_fn)
# only the first cursor add the path
break
def paste(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
im = ImageGrab.grabclipboard()
if im:
abs_fn, rel_fn = self.get_filename()
im.save(abs_fn,'PNG')
return rel_fn
else:
print('clipboard buffer is not image!')
return None
def get_filename(self):
view = self.view
filename = view.file_name()
# create dir in current path with the name of current filename
dirname, _ = os.path.splitext(filename)
# create new image file under currentdir/filename_without_ext/filename_without_ext%d.png
fn_without_ext = os.path.basename(dirname)
if not os.path.lexists(dirname):
os.mkdir(dirname)
i = 0
while True:
# relative file path
rel_filename = os.path.join("%s/%s%d.png" % (fn_without_ext, fn_without_ext, i))
# absolute file path
abs_filename = os.path.join(dirname, "%s%d.png" % ( fn_without_ext, i))
if not os.path.exists(abs_filename):
break
i += 1
print("save file: " + abs_filename + "\nrel " + rel_filename)
return abs_filename, rel_filename
| Python | 0.000002 |
734457ed995a3dfcacf8556ed4e98e7536e63a66 | Fix typos | nodeconductor/openstack/management/commands/initsecuritygroups.py | nodeconductor/openstack/management/commands/initsecuritygroups.py | from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from nodeconductor.openstack import models, executors, handlers
class Command(BaseCommand):
help_text = "Add default security groups with given names to all tenants."
def add_arguments(self, parser):
parser.add_argument('names', nargs='+', type=str)
def handle(self, *args, **options):
names = options['names']
default_security_groups = getattr(settings, 'NODECONDUCTOR', {}).get('DEFAULT_SECURITY_GROUPS')
security_groups = []
for name in names:
try:
group = next(sg for sg in default_security_groups if sg['name'] == name)
except StopIteration:
raise CommandError('There is no default security group with name %s' % name)
else:
security_groups.append(group)
for spl in models.OpenStackServiceProjectLink.objects.all():
if not spl.tenant:
continue
for group in security_groups:
if spl.security_groups.filter(name=group['name']).exists():
self.stdout.write('Tenant %s already has security group %s' % (spl.tenant, group['name']))
continue
spl.security_groups.create(name=group['name'], description=group['description'])
try:
db_security_group = handlers.create_security_group(spl, group)
except handlers.SecurityGroupCreateException as e:
self.stdout.write(
'Failed to add security_group %s to tenant %s. Error: %s' % (group['name'], spl.teannt, e))
else:
executors.SecurityGroupCreateExecutor.execute(db_security_group, async=False)
self.stdout.write(
'Security group %s has been successfully added to tenant %s' % (group['name'], spl.tenant))
| from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from nodeconductor.openstack import models, executors, handlers
class Command(BaseCommand):
help_text = "Add default security groups with given names to all tenants to tenants."
def add_arguments(self, parser):
parser.add_argument('names', nargs='+', type=str)
def handle(self, *args, **options):
names = options['names']
default_security_groups = getattr(settings, 'NODECONDUCTOR', {}).get('DEFAULT_SECURITY_GROUPS')
security_groups = []
for name in names:
try:
group = next(sg for sg in default_security_groups if sg['name'] == name)
except StopIteration:
raise CommandError('There is no default security group with name %s' % name)
else:
security_groups.append(group)
for spl in models.OpenStackServiceProjectLink.objects.all():
if not spl.tenant:
continue
for group in security_groups:
if spl.security_groups.filter(name=group['name']).exists():
self.stdout.write('Tenant %s already have security group %s' % (spl.tenant, group['name']))
continue
spl.security_groups.create(name=group['name'], description=group['description'])
try:
db_security_group = handlers.create_security_group(spl, group)
except handlers.SecurityGroupCreateException as e:
self.stdout.write(
'Failed to add security_group %s to tenant %s. Error: %s' % (group['name'], spl.teannt, e))
else:
executors.SecurityGroupCreateExecutor.execute(db_security_group, async=False)
self.stdout.write(
'Security group %s has been successfully added to tenant %s' % (group['name'], spl.tenant))
| Python | 0.999537 |
b5b38d5ba76e61bc14e25a45394424436e323c5d | fix reduction2 index dict | utils/speedyvec/vectorizers/subject_verb_agreement.py | utils/speedyvec/vectorizers/subject_verb_agreement.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Generate a model capable of detecting subject-verb agreement errors"""
from pattern.en import lexeme, tenses
from pattern.en import pluralize, singularize
from textstat.textstat import textstat
from time import sleep
import hashlib
import os
import pika
import psycopg2
import random
import re
import sqlite3
import textacy
from sva_reducer import get_reduction
print("You just imported get_reduction from sva_reducer. This reduction"
"algorithm should be the same as the one used to create your previous"
"reducutions.")
RABBIT = os.environ.get('RABBITMQ_LOCATION', 'localhost')
DB_PASSWORD = os.environ.get('SVA_PASSWORD', '')
DB_NAME = os.environ.get('SVA_DB', 'sva')
DB_USER = os.environ.get('SVA_USER', DB_NAME)
# Indexing the sentence keys ################################################
print("Indexing sentence keys...")
# Connect to postgres
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD)
cur = conn.cursor()
# Select unique reductions in order of regularity, must occur at least thrice
cur.execute('SELECT reduction, count(*) from reductions group by'
' reduction having count(*) > 2 order by count(*) desc;')
# with ~2 million total sentences the number of unique reductions was a little
# over 12k. ~5k had more than 2 occurrences
reduction2idx = {n[0]: i for i, n in enumerate(cur)}
num_reductions = len(reduction2idx)
# close connections to database
cur.close()
conn.close()
# Vectorizing sentence keys ################################################
print('Vectorizing sentence keys...')
# vectors must be convertable to a numpy array.
# NOTE: storing the number of reductions on each object is not necessary and is
# increasing db size. The advantage is that each row can compute its numpy
# vector with no database calls which is why we choose it. We might undecide
# this at some point.
# Ex:
# {indices={5:1, 6:2, 500:1, 6003:2} num_reductions=5000}
# {indicies={index:count, index:count, ...} reductions=num_reductions}
def get_vector(string):
result = {'indices':{}, 'reductions':num_reductions}
for reduction in get_reduction(string):
index = reduction2idx.get(reduction)
if index:
result['indices'][index] = x['indices'].get(index, 0) + 1
result = repr(result) # transform to a string
return result
def handle_message(ch, method, properties, body):
labeled_sent_dict = dict(body.decode("utf-8"))
sent_str = labeled_sent_dict['sent_str']
label = labeled_sent_dict['label']
for vector in get_vector(sent_str):
labeled_vector = repr({'vector':vector, 'label':label})
channel.basic_publish(exchange='', routing_key='vectors',
body=labeled_vector)
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters(RABBIT))
channel = connection.channel()
channel.queue_declare(queue='fstrings') # create queue if doesn't exist
channel.queue_declare(queue='reductions')
# NOTE: if the prefetch count is too high, some workers could starve. If it
# is too low, we make an unneccessary amount of requests to rabbitmq server
channel.basic_qos(prefetch_count=10) # limit num of unackd msgs on channel
channel.basic_consume(handle_message, queue='fstrings', no_ack=False)
channel.start_consuming()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Generate a model capable of detecting subject-verb agreement errors"""
from pattern.en import lexeme, tenses
from pattern.en import pluralize, singularize
from textstat.textstat import textstat
from time import sleep
import hashlib
import os
import pika
import psycopg2
import random
import re
import sqlite3
import textacy
from sva_reducer import get_reduction
print("You just imported get_reduction from sva_reducer. This reduction"
"algorithm should be the same as the one used to create your previous"
"reducutions.")
RABBIT = os.environ.get('RABBITMQ_LOCATION', 'localhost')
DB_PASSWORD = os.environ.get('SVA_PASSWORD', '')
DB_NAME = os.environ.get('SVA_DB', 'sva')
DB_USER = os.environ.get('SVA_USER', DB_NAME)
# Indexing the sentence keys ################################################
print("Indexing sentence keys...")
# Connect to postgres
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD)
cur = conn.cursor()
# Select unique reductions in order of regularity, must occur at least thrice
reductions = cur.execute('SELECT reduction, count(*) from reductions group by'
' reduction having count(*) > 2 order by count(*) desc;')
# with ~2 million total sentences the number of unique reductions was a little
# over 12k. ~5k had more than 2 occurrences
reduction2idx = {n: i for i, n[1] in enumerate(reductions)}
num_reductions = len(reduction2idx)
# close connections to database
cur.close()
conn.close()
# Vectorizing sentence keys ################################################
print('Vectorizing sentence keys...')
# vectors must be convertable to a numpy array.
# NOTE: storing the number of reductions on each object is not necessary and is
# increasing db size. The advantage is that each row can compute its numpy
# vector with no database calls which is why we choose it. We might undecide
# this at some point.
# Ex:
# {indices={5:1, 6:2, 500:1, 6003:2} num_reductions=5000}
# {indicies={index:count, index:count, ...} reductions=num_reductions}
def get_vector(string):
result = {'indices':{}, 'reductions':num_reductions}
for reduction in get_reduction(string):
index = reduction2idx.get(reduction)
if index:
result['indices'][index] = x['indices'].get(index, 0) + 1
result = repr(result) # transform to a string
return result
def handle_message(ch, method, properties, body):
labeled_sent_dict = dict(body.decode("utf-8"))
sent_str = labeled_sent_dict['sent_str']
label = labeled_sent_dict['label']
for vector in get_vector(sent_str):
labeled_vector = repr({'vector':vector, 'label':label})
channel.basic_publish(exchange='', routing_key='vectors',
body=labeled_vector)
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters(RABBIT))
channel = connection.channel()
channel.queue_declare(queue='fstrings') # create queue if doesn't exist
channel.queue_declare(queue='reductions')
# NOTE: if the prefetch count is too high, some workers could starve. If it
# is too low, we make an unneccessary amount of requests to rabbitmq server
channel.basic_qos(prefetch_count=10) # limit num of unackd msgs on channel
channel.basic_consume(handle_message, queue='fstrings', no_ack=False)
channel.start_consuming()
| Python | 0 |
8463c22898210990e911580d217559efdbbfe5d7 | Make disk space test optional | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.etree.ElementTree as ET
from geecheck_tests import common
# Need to use unittest2 for Python 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def getDiskInfo():
"""Returns disk usage represented as percent of total available."""
tree = ET.parse('/etc/opt/google/systemrc')
root = tree.getroot()
sys_rc = {}
for child in root:
sys_rc[child.tag] = child.text
asset_root = sys_rc["assetroot"];
mount_point = getMountPoint(asset_root)
available_space, size = getFsFreespace(mount_point)
percentage_used = (size - available_space) * 100 / size
return percentage_used
def getMountPoint(pathname):
"""Get the mount point of the filesystem containing pathname."""
pathname = os.path.normcase(os.path.realpath(pathname))
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
return mount_point
def getFsFreespace(pathname):
"""Get the free space of the filesystem containing pathname."""
statvfs = os.statvfs(pathname)
# Size of filesystem in bytes
size = statvfs.f_frsize * statvfs.f_blocks
# Number of free bytes that ordinary users are allowed to use.
avail = statvfs.f_frsize * statvfs.f_bavail
return avail, size
class TestDiskSpace(unittest.TestCase):
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
def testAdequateDiskSpace(self):
"""Check that the remaining disk space is at least 20%."""
self.assertLessEqual(20, getDiskInfo())
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.etree.ElementTree as ET
# Need to use unittest2 for Python 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def getDiskInfo():
"""Returns disk usage represented as percent of total available."""
tree = ET.parse('/etc/opt/google/systemrc')
root = tree.getroot()
sys_rc = {}
for child in root:
sys_rc[child.tag] = child.text
asset_root = sys_rc["assetroot"];
mount_point = getMountPoint(asset_root)
available_space, size = getFsFreespace(mount_point)
percentage_used = (size - available_space) * 100 / size
return percentage_used
def getMountPoint(pathname):
"""Get the mount point of the filesystem containing pathname."""
pathname = os.path.normcase(os.path.realpath(pathname))
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
return mount_point
def getFsFreespace(pathname):
"""Get the free space of the filesystem containing pathname."""
statvfs = os.statvfs(pathname)
# Size of filesystem in bytes
size = statvfs.f_frsize * statvfs.f_blocks
# Number of free bytes that ordinary users are allowed to use.
avail = statvfs.f_frsize * statvfs.f_bavail
return avail, size
class TestDiskSpace(unittest.TestCase):
def testAdequateDiskSpace(self):
"""Check that the remaining disk space is at least 20%."""
self.assertLessEqual(20, getDiskInfo())
if __name__ == '__main__':
unittest.main()
| Python | 0.000007 |
88db3ab0e09639d07a0374f9e1877ae3a3669fd4 | Use more unittest.TestCase.assertIn instead of *.assertTrue(foo in bar). | utils/swift_build_support/tests/products/test_llvm.py | utils/swift_build_support/tests/products/test_llvm.py | # tests/products/test_llvm.py -----------------------------------*- python -*-
#
# This source file is part of the LLVM.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the LLVM project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of LLVM project authors
# ----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
from swift_build_support.products import LLVM
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class LLVMTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'llvm'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
llvm_targets_to_build='X86;ARM;AArch64;PowerPC;SystemZ',
llvm_assertions='true',
darwin_deployment_version_osx='10.9')
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_llvm_targets_to_build(self):
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
expected_targets = 'X86;ARM;AArch64;PowerPC;SystemZ'
expected_arg = '-DLLVM_TARGETS_TO_BUILD=%s' % expected_targets
self.assertIn(expected_arg, llvm.cmake_options)
def test_llvm_enable_assertions(self):
self.args.llvm_assertions = True
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn('-DLLVM_ENABLE_ASSERTIONS=TRUE', llvm.cmake_options)
self.args.llvm_assertions = False
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertIn('-DLLVM_ENABLE_ASSERTIONS=FALSE', llvm.cmake_options)
| # tests/products/test_llvm.py -----------------------------------*- python -*-
#
# This source file is part of the LLVM.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the LLVM project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of LLVM project authors
# ----------------------------------------------------------------------------
import argparse
import os
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
from swift_build_support.products import LLVM
from swift_build_support.toolchain import host_toolchain
from swift_build_support.workspace import Workspace
class LLVMTestCase(unittest.TestCase):
def setUp(self):
# Setup workspace
tmpdir1 = os.path.realpath(tempfile.mkdtemp())
tmpdir2 = os.path.realpath(tempfile.mkdtemp())
os.makedirs(os.path.join(tmpdir1, 'llvm'))
self.workspace = Workspace(source_root=tmpdir1,
build_root=tmpdir2)
# Setup toolchain
self.toolchain = host_toolchain()
self.toolchain.cc = '/path/to/cc'
self.toolchain.cxx = '/path/to/cxx'
# Setup args
self.args = argparse.Namespace(
llvm_targets_to_build='X86;ARM;AArch64;PowerPC;SystemZ',
llvm_assertions='true',
darwin_deployment_version_osx='10.9')
# Setup shell
shell.dry_run = True
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
shutil.rmtree(self.workspace.build_root)
shutil.rmtree(self.workspace.source_root)
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
shell.dry_run = False
self.workspace = None
self.toolchain = None
self.args = None
def test_llvm_targets_to_build(self):
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
expected_targets = 'X86;ARM;AArch64;PowerPC;SystemZ'
expected_arg = '-DLLVM_TARGETS_TO_BUILD=%s' % expected_targets
self.assertTrue(expected_arg in llvm.cmake_options)
def test_llvm_enable_assertions(self):
self.args.llvm_assertions = True
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertTrue('-DLLVM_ENABLE_ASSERTIONS=TRUE' in
llvm.cmake_options)
self.args.llvm_assertions = False
llvm = LLVM(
args=self.args,
toolchain=self.toolchain,
source_dir='/path/to/src',
build_dir='/path/to/build')
self.assertTrue('-DLLVM_ENABLE_ASSERTIONS=FALSE' in
llvm.cmake_options)
| Python | 0 |
a602ed873d71253723f07dfa043d959cd247d734 | Add latest version of py-typing (#13287) | var/spack/repos/builtin/packages/py-typing/package.py | var/spack/repos/builtin/packages/py-typing/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTyping(PythonPackage):
"""This is a backport of the standard library typing module to Python
versions older than 3.6."""
homepage = "https://docs.python.org/3/library/typing.html"
url = "https://pypi.io/packages/source/t/typing/typing-3.7.4.1.tar.gz"
import_modules = ['typing']
version('3.7.4.1', sha256='91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23')
version('3.6.4', sha256='d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2')
version('3.6.1', sha256='c36dec260238e7464213dcd50d4b5ef63a507972f5780652e835d0228d0edace')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTyping(PythonPackage):
"""This is a backport of the standard library typing module to Python
versions older than 3.6."""
homepage = "https://docs.python.org/3/library/typing.html"
url = "https://pypi.io/packages/source/t/typing/typing-3.6.1.tar.gz"
import_modules = ['typing']
version('3.6.4', sha256='d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2')
version('3.6.1', sha256='c36dec260238e7464213dcd50d4b5ef63a507972f5780652e835d0228d0edace')
# You need Python 2.7 or 3.3+ to install the typing package
depends_on('python@2.7:2.8,3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| Python | 0 |
41a22d7bdf4fb7c9a50a6d51eab67e1459af5456 | Update wifi migration script to handle blocklist merges. | ichnaea/scripts/migrate.py | ichnaea/scripts/migrate.py | """
Manual migration script to move networks from old single wifi table
to new sharded wifi table structure.
"""
from collections import defaultdict
import sys
import time
from ichnaea.config import read_config
from ichnaea.db import (
configure_db,
db_worker_session,
)
from ichnaea.models.wifi import (
Wifi,
WifiShard,
)
def migrate(db, batch=1000, order='asc'):
added = 0
blocked = 0
deleted = 0
updated = 0
with db_worker_session(db, commit=True) as session:
order_func = getattr(Wifi.id, order)
old_wifis = (session.query(Wifi)
.order_by(order_func())
.limit(batch)).all()
sharded = defaultdict(list)
for old_wifi in old_wifis:
shard = WifiShard.shard_model(old_wifi.key)
sharded[shard].append(shard(
mac=old_wifi.key,
created=old_wifi.created,
modified=old_wifi.modified,
lat=old_wifi.lat,
lon=old_wifi.lon,
max_lat=old_wifi.max_lat,
min_lat=old_wifi.min_lat,
max_lon=old_wifi.max_lon,
min_lon=old_wifi.min_lon,
radius=old_wifi.range,
samples=old_wifi.total_measures,
))
moved_wifis = set()
for shard, wifis in sharded.items():
shard_macs = [wifi.mac for wifi in wifis]
existing = (session.query(shard)
.filter(shard.mac.in_(shard_macs))).all()
existing = dict([(e.mac, e) for e in existing])
for wifi in wifis:
if wifi.mac not in existing:
moved_wifis.add(wifi.mac)
session.add(wifi)
added += 1
else:
shard_wifi = existing.get(wifi.mac)
if shard_wifi.blocked():
moved_wifis.add(wifi.mac)
blocked += 1
else:
shard_wifi.created = min(
shard_wifi.created, wifi.created)
shard_wifi.modified = max(
shard_wifi.modified, wifi.modified)
shard_wifi.lat = wifi.lat
shard_wifi.lon = wifi.lon
shard_wifi.max_lat = wifi.max_lat
shard_wifi.min_lat = wifi.min_lat
shard_wifi.max_lon = wifi.max_lon
shard_wifi.min_lon = wifi.min_lon
shard_wifi.radius = wifi.radius
shard_wifi.samples = wifi.samples
moved_wifis.add(wifi.mac)
updated += 1
if moved_wifis:
query = (session.query(Wifi)
.filter(Wifi.key.in_(list(moved_wifis))))
deleted = query.delete(synchronize_session=False)
else:
deleted = 0
return (added, deleted, updated, blocked)
def main(db, batch=1000, repeat=1, wait=0.3, order='asc'):
for i in range(repeat):
start = time.time()
print('Start: %s - %s' % (
i, time.strftime('%H:%m:%S', time.gmtime(start))))
added, deleted, updated, blocked = migrate(
db, batch=batch, order=order)
end = int((time.time() - start) * 1000)
print('Added: %s, Deleted: %s, Updated: %s, Blocked: %s' % (
added, deleted, updated, blocked))
print('Took: %s ms\n' % end)
sys.stdout.flush()
time.sleep(wait)
print('End')
if __name__ == '__main__':
argv = sys.argv
batch = 1000
repeat = 1
wait = 0.3
order = 'asc'
if len(argv) > 1:
batch = int(argv[-1])
if len(argv) > 2:
repeat = int(argv[-2])
if len(argv) > 3:
wait = float(argv[-3])
if len(argv) > 4:
order = str(argv[-4]).strip()
app_config = read_config()
db = configure_db(app_config.get('database', 'rw_url'))
main(db, batch=batch, repeat=repeat, wait=wait, order=order)
| """
Manual migration script to move networks from old single wifi table
to new sharded wifi table structure.
"""
from collections import defaultdict
import sys
import time
from ichnaea.config import read_config
from ichnaea.db import (
configure_db,
db_worker_session,
)
from ichnaea.models.wifi import (
Wifi,
WifiShard,
)
def migrate(db, batch=1000):
added = 0
deleted = 0
skipped = 0
with db_worker_session(db, commit=True) as session:
old_wifis = (session.query(Wifi)
.order_by(Wifi.id.desc())
.limit(batch)).all()
sharded = defaultdict(list)
for old_wifi in old_wifis:
shard = WifiShard.shard_model(old_wifi.key)
sharded[shard].append(shard(
mac=old_wifi.key,
created=old_wifi.created,
modified=old_wifi.modified,
lat=old_wifi.lat,
lon=old_wifi.lon,
max_lat=old_wifi.max_lat,
min_lat=old_wifi.min_lat,
max_lon=old_wifi.max_lon,
min_lon=old_wifi.min_lon,
radius=old_wifi.range,
samples=old_wifi.total_measures,
))
moved_wifis = set()
for shard, wifis in sharded.items():
shard_macs = set([wifi.mac for wifi in wifis])
existing = (session.query(shard.mac)
.filter(shard.mac.in_(list(shard_macs)))).all()
existing = set([e.mac for e in existing])
for wifi in wifis:
if wifi.mac not in existing:
moved_wifis.add(wifi.mac)
session.add(wifi)
added += 1
else:
skipped += 1
if moved_wifis:
query = (session.query(Wifi)
.filter(Wifi.key.in_(list(moved_wifis))))
deleted = query.delete(synchronize_session=False)
else:
deleted = 0
return (added, deleted, skipped)
def main(db, repeat=1, batch=1000):
for i in range(repeat):
start = time.time()
print('Start: %s' % time.strftime('%H:%m', time.gmtime(start)))
added, deleted, skipped = migrate(db, batch=batch)
end = int((time.time() - start) * 1000)
print('Added: %s, Deleted: %s, Skipped: %s' % (
added, deleted, skipped))
print('Took: %s ms\n' % end)
print('End')
if __name__ == '__main__':
argv = sys.argv
batch = 1000
repeat = 1
if len(argv) > 1:
batch = int(argv[-1])
if len(argv) > 2:
repeat = int(argv[-2])
app_config = read_config()
db = configure_db(app_config.get('database', 'rw_url'))
main(db, repeat=repeat, batch=batch)
| Python | 0 |
d4adaaf52a81c0d471657672fee5b5ed2ad4e306 | update export agency stats | export_agency_stats.py | export_agency_stats.py | #!/usr/bin/env python2
from time import sleep
import requests
import unicodecsv
from utils import get_api_key
token = get_api_key()
url = 'https://www.muckrock.com/api_v1/'
headers = {'Authorization': 'Token %s' % token, 'content-type': 'application/json'}
next_ = url + 'agency'
fields = (
"id",
"name",
"slug",
"status",
"twitter",
"twitter_handles",
"parent",
"appeal_agency",
"url",
"foia_logs",
"foia_guide",
"public_notes",
"absolute_url",
"average_response_time",
"fee_rate",
"success_rate",
"has_portal",
"has_email",
"has_fax",
"has_address",
"number_requests",
"number_requests_completed",
"number_requests_rejected",
"number_requests_no_docs",
"number_requests_ack",
"number_requests_resp",
"number_requests_fix",
"number_requests_appeal",
"number_requests_pay",
"number_requests_partial",
"number_requests_lawsuit",
"number_requests_withdrawn"
)
jurisdiction_fields = (
'name',
'parent',
'level',
)
page = 1
# make this true while exporting data to not crash on errors
SUPRESS_ERRORS = False
# This allows you to cach jurisdiction look ups
jurisdictions = {}
def get_jurisdiction(jurisdiction_id):
global jurisdictions
if jurisdiction_id in jurisdictions:
return jurisdictions[jurisdiction_id]
else:
# print 'getting jurisdiction', jurisdiction_id
sleep(1) # rate limit
r = requests.get(url + 'jurisdiction/' + str(jurisdiction_id), headers=headers)
jurisdiction_json = r.json()
if jurisdiction_json['parent']: # USA has no paremt
parent = get_jurisdiction(jurisdiction_json['parent'])
jurisdiction_json['parent'] = parent['name'] # replace parent id with parent name in jurisdiction json
jurisdictions[jurisdiction_id] = jurisdiction_json
return jurisdiction_json
csv_file = open('agency_stats.csv', 'w')
csv_writer = unicodecsv.writer(csv_file)
jurisdiction_field_names = tuple('jurisdiction {}'.format(f) for f in jurisdiction_fields)
csv_writer.writerow(fields + jurisdiction_field_names)
while next_ is not None:
r = requests.get(next_, headers=headers)
try:
json = r.json()
next_ = json['next']
for datum in json['results']:
agency_values = [datum[field] for field in fields]
jurisdiction = get_jurisdiction(datum['jurisdiction'])
jurisdiction_values = [jurisdiction[field] for field in jurisdiction_fields]
csv_writer.writerow(agency_values + jurisdiction_values)
print 'Page %d of %d' % (page, json['count'] / 20 + 1)
page += 1
except Exception as e:
print 'Error', e
if not SUPRESS_ERRORS:
raise
| #!/usr/bin/env python2
import requests
import unicodecsv
from utils import get_api_key
token = get_api_key()
url = 'https://www.muckrock.com/api_v1/'
headers = {'Authorization': 'Token %s' % token, 'content-type': 'application/json'}
next_ = url + 'agency'
fields = (
"id",
"name",
"slug",
"status",
"twitter",
"twitter_handles",
"parent",
"appeal_agency",
"url",
"foia_logs",
"foia_guide",
"public_notes",
"absolute_url",
"average_response_time",
"fee_rate",
"success_rate",
"has_portal",
"has_email",
"has_fax",
"has_address",
"number_requests",
"number_requests_completed",
"number_requests_rejected",
"number_requests_no_docs",
"number_requests_ack",
"number_requests_resp",
"number_requests_fix",
"number_requests_appeal",
"number_requests_pay",
"number_requests_partial",
"number_requests_lawsuit",
"number_requests_withdrawn"
)
jurisdiction_fields = (
'name',
'parent',
'level',
)
page = 1
# This allows you to cach jurisdiction look ups
jurisdictions = {}
def get_jurisdiction(jurisdiction_id):
global jurisdictions
if jurisdiction_id in jurisdictions:
return jurisdictions[jurisdiction_id]
else:
# print 'getting jurisdiction', jurisdiction_id
r = requests.get(url + 'jurisdiction/' + str(jurisdiction_id), headers=headers)
jurisdiction_json = r.json()
if jurisdiction_json['parent']: # USA has no paremt
parent = get_jurisdiction(jurisdiction_json['parent'])
jurisdiction_json['parent'] = parent['name'] # replace parent id with parent name in jurisdiction json
jurisdictions[jurisdiction_id] = jurisdiction_json
return jurisdiction_json
csv_file = open('agency_stats.csv', 'w')
csv_writer = unicodecsv.writer(csv_file)
jurisdiction_field_names = tuple('jurisdiction {}'.format(f) for f in jurisdiction_fields)
csv_writer.writerow(fields + jurisdiction_field_names)
while next_ is not None:
r = requests.get(next_, headers=headers)
try:
json = r.json()
next_ = json['next']
for datum in json['results']:
agency_values = [datum[field] for field in fields]
jurisdiction = get_jurisdiction(datum['jurisdiction'])
jurisdiction_values = [jurisdiction[field] for field in jurisdiction_fields]
csv_writer.writerow(agency_values + jurisdiction_values)
print 'Page %d of %d' % (page, json['count'] / 20 + 1)
break
page += 1
except Exception as e:
print 'Error', e
| Python | 0 |
24f485f256279e2fc86c12cdf383c93850f7f328 | Add utility method to check if message sender is admin | IrcMessage.py | IrcMessage.py | import time
import Constants
class IrcMessage(object):
"""Parses incoming messages into usable parts like the command trigger"""
def __init__(self, messageType, bot, user=None, source=None, rawText=""):
self.createdAt = time.time()
#MessageType is what kind of message it is. A 'say', 'action' or 'quit', for instance
self.messageType = messageType
self.bot = bot
#Info about the user that sent the message
self.user = user
if self.user and '!' in self.user:
self.userNickname, self.userAddress = self.user.split("!", 1)
else:
self.userNickname = None
self.userAddress = None
#Info about the source the message came from, either a channel, or a PM from a user
#If there is no source provided, or the source isn't a channel, assume it's a PM
if not source or source[0] not in Constants.CHANNEL_PREFIXES:
self.source = self.userNickname
self.isPrivateMessage = True
else:
self.source = source
self.isPrivateMessage = False
#Handle the text component, including seeing if it starts with the bot's command character
self.rawText = rawText.strip()
#There isn't always text
if not self.rawText:
self.trigger = None
self.message = ""
self.messageParts = []
self.messagePartsLength = 0
else:
#Collect information about the possible command in this message
if self.rawText.startswith(bot.commandPrefix):
#Get the part from the end of the command prefix to the first space (the 'help' part of '!help say')
self.trigger = self.rawText[bot.commandPrefixLength:].split(" ", 1)[0].lower()
self.message = self.rawText[bot.commandPrefixLength + len(self.trigger):].lstrip()
#Check if the text starts with the nick of the bot, 'DideRobot: help'
elif bot.nickname and self.rawText.startswith(bot.nickname + ": ") and len(self.rawText) > len(bot.nickname) + 2:
self.trigger = self.rawText.split(" ", 2)[1].strip().lower()
self.message = self.rawText[len(bot.nickname) + len(self.trigger) + 3:].lstrip() #+3 because of the colon and space
#In private messages we should respond too if there's no command character, because there's no other reason to PM a bot
elif self.isPrivateMessage:
self.trigger = self.rawText.split(" ", 1)[0].lower()
self.message = self.rawText[len(self.trigger)+1:]
else:
self.trigger = None
self.message = self.rawText
if self.message:
self.messageParts = self.message.split(" ")
self.messagePartsLength = len(self.messageParts)
else:
self.messageParts = []
self.messagePartsLength = 0
def reply(self, replytext, messagetype=None):
if not messagetype:
#Reply with a notice to a user's notice (not a channel one!), and with a 'say' to anything else
messagetype = 'notice' if self.messageType == 'notice' and self.isPrivateMessage else 'say'
self.bot.sendMessage(self.source, replytext, messagetype)
def isSenderAdmin(self):
"""
:return: True if the person that sent this message is a bot admin, False otherwise
"""
return self.bot.isUserAdmin(self.user, self.userNickname, self.userAddress)
| import time
import Constants
class IrcMessage(object):
"""Parses incoming messages into usable parts like the command trigger"""
def __init__(self, messageType, bot, user=None, source=None, rawText=""):
self.createdAt = time.time()
#MessageType is what kind of message it is. A 'say', 'action' or 'quit', for instance
self.messageType = messageType
self.bot = bot
#Info about the user that sent the message
self.user = user
if self.user and '!' in self.user:
self.userNickname, self.userAddress = self.user.split("!", 1)
else:
self.userNickname = None
self.userAddress = None
#Info about the source the message came from, either a channel, or a PM from a user
#If there is no source provided, or the source isn't a channel, assume it's a PM
if not source or source[0] not in Constants.CHANNEL_PREFIXES:
self.source = self.userNickname
self.isPrivateMessage = True
else:
self.source = source
self.isPrivateMessage = False
#Handle the text component, including seeing if it starts with the bot's command character
self.rawText = rawText.strip()
#There isn't always text
if not self.rawText:
self.trigger = None
self.message = ""
self.messageParts = []
self.messagePartsLength = 0
else:
#Collect information about the possible command in this message
if self.rawText.startswith(bot.commandPrefix):
#Get the part from the end of the command prefix to the first space (the 'help' part of '!help say')
self.trigger = self.rawText[bot.commandPrefixLength:].split(" ", 1)[0].lower()
self.message = self.rawText[bot.commandPrefixLength + len(self.trigger):].lstrip()
#Check if the text starts with the nick of the bot, 'DideRobot: help'
elif bot.nickname and self.rawText.startswith(bot.nickname + ": ") and len(self.rawText) > len(bot.nickname) + 2:
self.trigger = self.rawText.split(" ", 2)[1].strip().lower()
self.message = self.rawText[len(bot.nickname) + len(self.trigger) + 3:].lstrip() #+3 because of the colon and space
#In private messages we should respond too if there's no command character, because there's no other reason to PM a bot
elif self.isPrivateMessage:
self.trigger = self.rawText.split(" ", 1)[0].lower()
self.message = self.rawText[len(self.trigger)+1:]
else:
self.trigger = None
self.message = self.rawText
if self.message:
self.messageParts = self.message.split(" ")
self.messagePartsLength = len(self.messageParts)
else:
self.messageParts = []
self.messagePartsLength = 0
def reply(self, replytext, messagetype=None):
if not messagetype:
#Reply with a notice to a user's notice (not a channel one!), and with a 'say' to anything else
messagetype = 'notice' if self.messageType == 'notice' and self.isPrivateMessage else 'say'
self.bot.sendMessage(self.source, replytext, messagetype)
| Python | 0.000005 |
6641fd1275c27dfb27787ed25b80af3b6ba14b9f | debug by further reduction | apdflash/scarecrowDreams.py | apdflash/scarecrowDreams.py | print "hellow world"
| import sys,os
sys.path.insert(0, '../helpers')
from mpi4py import MPI
| Python | 0 |
14b1f9bde45b66f8752778469f1daae77b49f4e0 | Add comment | bluebottle/bb_orders/signals.py | bluebottle/bb_orders/signals.py | from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.dispatch.dispatcher import Signal
from django_fsm.signals import post_transition
from bluebottle.donations.models import Donation
from bluebottle.payments.models import OrderPayment
from bluebottle.payments.services import PaymentService
from bluebottle.utils.utils import StatusDefinition
order_requested = Signal(providing_args=["order"])
@receiver(post_save, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount_post_save(sender, instance, **kwargs):
instance.order.update_total()
@receiver(post_delete, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount(sender, instance, **kwargs):
# If we're deleting order and donations do nothing.
# If we're just deleting a donation then we should update the order total.
# Import it here to avoid circular imports
from bluebottle.orders.models import Order
try:
instance.order.update_total()
except Order.DoesNotExist:
pass
@receiver(post_transition, sender=OrderPayment)
def _order_payment_status_changed(sender, instance, **kwargs):
"""
TODO: Here we need to get the status from the Order Payment and update the
associated Order.
"""
# Get the Order from the OrderPayment
order = instance.order
# Get the mapped status OrderPayment to Order
new_order_status = order.get_status_mapping(kwargs['target'])
order.transition_to(new_order_status)
@receiver(order_requested)
def _order_requested(sender, order, **kwargs):
# Check the status at PSP if status is still locked
if order.status == StatusDefinition.LOCKED:
order_payment = OrderPayment.get_latest_by_order(order)
service = PaymentService(order_payment)
service.check_payment_status()
| from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.dispatch.dispatcher import Signal
from django_fsm.signals import post_transition
from bluebottle.donations.models import Donation
from bluebottle.payments.models import OrderPayment
from bluebottle.payments.services import PaymentService
from bluebottle.utils.utils import StatusDefinition
order_requested = Signal(providing_args=["order"])
@receiver(post_save, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount_post_save(sender, instance, **kwargs):
instance.order.update_total()
@receiver(post_delete, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount(sender, instance, **kwargs):
# If we're deleting order and donations do nothing.
# If we're just deleting a donation then we should update the order total.
from bluebottle.orders.models import Order
try:
instance.order.update_total()
except Order.DoesNotExist:
pass
@receiver(post_transition, sender=OrderPayment)
def _order_payment_status_changed(sender, instance, **kwargs):
"""
TODO: Here we need to get the status from the Order Payment and update the
associated Order.
"""
# Get the Order from the OrderPayment
order = instance.order
# Get the mapped status OrderPayment to Order
new_order_status = order.get_status_mapping(kwargs['target'])
order.transition_to(new_order_status)
@receiver(order_requested)
def _order_requested(sender, order, **kwargs):
# Check the status at PSP if status is still locked
if order.status == StatusDefinition.LOCKED:
order_payment = OrderPayment.get_latest_by_order(order)
service = PaymentService(order_payment)
service.check_payment_status()
| Python | 0 |
5e371a6aea1c3c0eb126849ef4c5855202b05cfa | Use standard name for output files | packages/cardpay-reward-programs/cardpay_reward_programs/utils.py | packages/cardpay-reward-programs/cardpay_reward_programs/utils.py | import tempfile
from pathlib import PosixPath
import pyarrow.parquet as pq
import yaml
from cloudpathlib import AnyPath, CloudPath
from cachetools import cached, TTLCache
def get_local_file(file_location):
if isinstance(file_location, PosixPath):
return file_location.as_posix()
elif isinstance(file_location, CloudPath):
if file_location._local.exists():
# Our files are immutable so if the local cache exists
# we can just return that
return file_location._local.as_posix()
else:
# Otherwise this downloads the file and returns the local path
return file_location.fspath
else:
raise Exception("Unsupported path type")
@cached(TTLCache(maxsize=1000, ttl=60))
def get_latest_details(config_location):
with open(config_location / "latest.yaml", "r") as stream:
return yaml.safe_load(stream)
def get_partition_iterator(min_partition, max_partition, partition_sizes):
for partition_size in sorted(partition_sizes, reverse=True):
start_partition_allowed = (min_partition // partition_size) * partition_size
end_partition_allowed = (max_partition // partition_size) * partition_size
last_max_partition = None
for start_partition in range(
start_partition_allowed, end_partition_allowed, partition_size
):
last_max_partition = start_partition + partition_size
yield partition_size, start_partition, start_partition + partition_size
if last_max_partition is not None:
min_partition = last_max_partition
def get_partition_files(config_location, table, min_partition, max_partition):
# Get config
with open(get_local_file(config_location / "config.yaml"), "r") as stream:
config = yaml.safe_load(stream)
latest = get_latest_details(config_location)
latest_block = latest.get("latest_block")
# Get table
table_config = config["tables"][table]
partition_sizes = sorted(table_config["partition_sizes"], reverse=True)
table_dir = config_location.joinpath(
"data", f"subgraph={latest['subgraph_deployment']}", f"table={table}"
)
files = []
for partition_size, start_partition, end_partition in get_partition_iterator(
min_partition, latest_block, partition_sizes):
if start_partition < max_partition:
files.append(table_dir.joinpath(
f"partition_size={partition_size}",
f"start_partition={start_partition}",
f"end_partition={end_partition}",
"data.parquet",
))
return files
def get_files(config_location, table, min_partition, max_partition):
file_list = get_partition_files(AnyPath(config_location), table, min_partition, max_partition)
return list(map(get_local_file, file_list))
def get_parameters(parameters):
"""
TODO: take hex blob as input instead of parameters
"""
core_parameters = parameters.get("core")
user_defined_parameters = parameters.get("user_defined")
return core_parameters, user_defined_parameters
def get_payment_cycle(start_block, end_block, payment_cycle_length):
"""
by default, the payment cycle is the tail of the compute range
"""
return max(end_block, start_block + payment_cycle_length)
def write_parquet_file(file_location, table):
# Pyarrow can't take a file object so we have to write to a temp file
# and upload directly
if isinstance(file_location, CloudPath):
with tempfile.TemporaryDirectory() as temp_dir:
pq_file_location = AnyPath(temp_dir) / "results.parquet"
pq.write_table(table, pq_file_location)
file_location.joinpath("results.parquet").upload_from(pq_file_location)
else:
pq.write_table(table, file_location / "results.parquet")
| import tempfile
from pathlib import PosixPath
import pyarrow.parquet as pq
import yaml
from cloudpathlib import AnyPath, CloudPath
from cachetools import cached, TTLCache
def get_local_file(file_location):
if isinstance(file_location, PosixPath):
return file_location.as_posix()
elif isinstance(file_location, CloudPath):
if file_location._local.exists():
# Our files are immutable so if the local cache exists
# we can just return that
return file_location._local.as_posix()
else:
# Otherwise this downloads the file and returns the local path
return file_location.fspath
else:
raise Exception("Unsupported path type")
@cached(TTLCache(maxsize=1000, ttl=60))
def get_latest_details(config_location):
with open(config_location / "latest.yaml", "r") as stream:
return yaml.safe_load(stream)
def get_partition_iterator(min_partition, max_partition, partition_sizes):
for partition_size in sorted(partition_sizes, reverse=True):
start_partition_allowed = (min_partition // partition_size) * partition_size
end_partition_allowed = (max_partition // partition_size) * partition_size
last_max_partition = None
for start_partition in range(
start_partition_allowed, end_partition_allowed, partition_size
):
last_max_partition = start_partition + partition_size
yield partition_size, start_partition, start_partition + partition_size
if last_max_partition is not None:
min_partition = last_max_partition
def get_partition_files(config_location, table, min_partition, max_partition):
# Get config
with open(get_local_file(config_location / "config.yaml"), "r") as stream:
config = yaml.safe_load(stream)
latest = get_latest_details(config_location)
latest_block = latest.get("latest_block")
# Get table
table_config = config["tables"][table]
partition_sizes = sorted(table_config["partition_sizes"], reverse=True)
table_dir = config_location.joinpath(
"data", f"subgraph={latest['subgraph_deployment']}", f"table={table}"
)
files = []
for partition_size, start_partition, end_partition in get_partition_iterator(
min_partition, latest_block, partition_sizes):
if start_partition < max_partition:
files.append(table_dir.joinpath(
f"partition_size={partition_size}",
f"start_partition={start_partition}",
f"end_partition={end_partition}",
"data.parquet",
))
return files
def get_files(config_location, table, min_partition, max_partition):
file_list = get_partition_files(AnyPath(config_location), table, min_partition, max_partition)
return list(map(get_local_file, file_list))
def get_parameters(parameters):
"""
TODO: take hex blob as input instead of parameters
"""
core_parameters = parameters.get("core")
user_defined_parameters = parameters.get("user_defined")
return core_parameters, user_defined_parameters
def get_payment_cycle(start_block, end_block, payment_cycle_length):
"""
by default, the payment cycle is the tail of the compute range
"""
return max(end_block, start_block + payment_cycle_length)
def write_parquet_file(file_location, table):
# Pyarrow can't take a file object so we have to write to a temp file
# and upload directly
if isinstance(file_location, CloudPath):
with tempfile.TemporaryDirectory() as temp_dir:
pq_file_location = AnyPath(temp_dir).joinpath("data.parquet")
pq.write_table(table, pq_file_location)
file_location.joinpath("data.parquet").upload_from(pq_file_location)
else:
pq.write_table(table, file_location / "results.parquet")
| Python | 0.000009 |
b52c8ab9c87c2de5feec91164d54b4f51ad5b759 | Add queryEnvKey | packaging/setup/ovirt_engine_setup/dialog.py | packaging/setup/ovirt_engine_setup/dialog.py | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dialog."""
import gettext
from otopi import util
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
def queryBoolean(
dialog,
name,
note,
prompt,
true=_('Yes'),
false=_('No'),
default=False,
):
return dialog.queryString(
name=name,
note=note,
prompt=prompt,
validValues=(true, false),
caseSensitive=False,
default=true if default else false,
) != false.lower()
@util.export
def queryEnvKey(
dialog,
logger,
env,
key,
note,
tests=None,
validValues=None,
caseSensitive=True,
hidden=False,
prompt=False,
default=None,
):
"""Query string and validate it.
Params:
dialog - plugin.dialog
logger - plugin.logger
env - a dict to store result into, usually plugin.environment
key - key in env to store result into
if env[key] is set, do not query
note - prompt to be displayed to the user
tests - tests to run on the input value
tests is a list of dicts, each having:
'test' -- Accepts a value and returns an error message if bad
'is_error' -- True (default) if a failure is an error, else a warning
If True and a test failed, ask user again. Otherwise prompt user
to accept value anyway.
'warn_note' -- Message displayed if warning, defaults to 'Accept?'
'interactive_only' -- Do not run test if env[key] is already set
"""
interactive = key not in env or env[key] is None
valid = False
while not valid:
if interactive:
value = dialog.queryString(
name='queryEnvKey_input:{key}'.format(key=key),
note=note,
validValues=validValues,
caseSensitive=caseSensitive,
hidden=hidden,
prompt=prompt,
default=default,
)
else:
value = env[key]
valid = True
for test in tests if tests else ():
if not interactive and test.get('interactive_only', False):
continue
msg = test['test'](value)
if msg:
if interactive:
if test.get('is_error', True):
logger.error(msg)
valid = False
break
else:
logger.warning(msg)
if not queryBoolean(
dialog=dialog,
name='queryEnvKey_warnverify:{key}'.format(
key=key
),
note='{msg} (@VALUES@) [@DEFAULT@]: '.format(
msg=test.get('warn_note', _('OK? ')),
),
prompt=True,
default=False,
):
valid = False
break
else: # Not interactive
if test.get('is_error', True):
logger.error(msg)
raise RuntimeError(msg)
else:
logger.warning(msg)
env[key] = value
return value
@util.export
def queryPassword(
dialog,
logger,
env,
key,
note,
verify_same=True,
note_verify_same=None,
error_msg_not_same=None,
verify_hard=True,
warn_not_hard=None,
tests=None,
):
"""Get a password from the user.
Params:
dialog - plugin.dialog
logger - plugin.logger
env - a dict to store result into, usually plugin.environment
key - key in env to store result into
if env[key] is set, do not query, but do run check and
warn if verify_hard is True and password not hard enough
note - prompt to be displayed to the user
verify_same - if true, query user to input password again
and verify that they are the same
note_verify_same - prompt to be displayed when querying again
error_msg_not_same - error message to be displayed if not same
verify_hard - optionally check that it is hard enough,
if cracklib is installed
warn_not_hard - warning to be displayed if not hard enough
if string includes '{error}', it will be replaced by
actual error returned from cracklib
tests - extra tests to run, in the format of queryEnvKey
"""
def password_hard_enough(password):
res = ''
try:
import cracklib
cracklib.FascistCheck(password)
except ImportError:
logger.debug(
'cannot import cracklib',
exc_info=True,
)
except ValueError as error:
res = warn_not_hard.format(error=error)
return res
if not note_verify_same:
note_verify_same = _('Please confirm password: ')
if not error_msg_not_same:
error_msg_not_same = _('Passwords do not match')
if not warn_not_hard:
warn_not_hard = _('Password is weak: {error}')
return queryEnvKey(
dialog=dialog,
logger=logger,
env=env,
key=key,
note=note,
prompt=True,
hidden=True,
tests=(
{
'test': lambda(value): (
'' if value == queryEnvKey(
dialog=dialog,
logger=logger,
env={},
key='second_password',
note=note_verify_same,
prompt=True,
hidden=True,
) else error_msg_not_same
),
'interactive_only': True,
},
{
'test': password_hard_enough,
'is_error': False,
'warn_note': 'Use weak password? ',
},
),
)
# vim: expandtab tabstop=4 shiftwidth=4
| #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dialog."""
import gettext
from otopi import util
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
def queryBoolean(
dialog,
name,
note,
prompt,
true=_('Yes'),
false=_('No'),
default=False,
):
return dialog.queryString(
name=name,
note=note,
prompt=prompt,
validValues=(true, false),
caseSensitive=False,
default=true if default else false,
) != false.lower()
# vim: expandtab tabstop=4 shiftwidth=4
| Python | 0.000001 |
cd0123b2cce81c063f42ff5a9f80665b602bdefd | use the wright product | addons/hr_timesheet_project/wizard/timesheet_hour_encode.py | addons/hr_timesheet_project/wizard/timesheet_hour_encode.py | ##############################################################################
#
# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved.
# Fabien Pinckaers <fp@tiny.Be>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import wizard
import netsvc
import time
import pooler
from osv import osv
def _action_line_create(self, cr, uid, data, context):
tw = pooler.get_pool(cr.dbname).get('project.task.work')
ids = tw.search(cr, uid, [('user_id','=',uid), ('date','>=',time.strftime('%Y-%m-%d 00:00:00')), ('date','<=',time.strftime('%Y-%m-%d 23:59:59'))])
ts = pooler.get_pool(cr.dbname).get('hr.analytic.timesheet')
for work in tw.browse(cr, uid, ids, context):
if work.task_id.project_id.category_id:
unit_id = ts._getEmployeeUnit(cr, uid, context)
product_id = ts._getEmployeeProduct(cr, uid, context)
res = {
'name': work.name,
'date': time.strftime('%Y-%m-%d'),
'unit_amount': work.hours,
'product_uom_id': unit_id,
'product_id': product_id,
'amount': work.hours or 0.0,
'account_id': work.task_id.project_id.category_id.id
}
res2 = ts.on_change_unit_amount(cr, uid, False, product_id, work.hours or 0.0,unit_id, context)
if res2:
res.update(res2['value'])
id = ts.create(cr, uid, res, context)
else:
print 'not found', work.task_id.project_id.name
value = {
'domain': "[('user_id','=',%d),('date','>=','%s'), ('date','<=','%s')]" % (uid, time.strftime('%Y-%m-%d 00:00:00'), time.strftime('%Y-%m-%d 23:59:59')),
'name': 'Create Analytic Line',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.analytic.timesheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
return value
class wiz_hr_timesheet_project(wizard.interface):
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _action_line_create, 'state':'end'}
}
}
wiz_hr_timesheet_project('hr_timesheet_project.encode.hour')
| ##############################################################################
#
# Copyright (c) 2004 TINY SPRL. (http://tiny.be) All Rights Reserved.
# Fabien Pinckaers <fp@tiny.Be>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import wizard
import netsvc
import time
import pooler
from osv import osv
def _action_line_create(self, cr, uid, data, context):
tw = pooler.get_pool(cr.dbname).get('project.task.work')
ids = tw.search(cr, uid, [('user_id','=',uid), ('date','>=',time.strftime('%Y-%m-%d 00:00:00')), ('date','<=',time.strftime('%Y-%m-%d 23:59:59'))])
ts = pooler.get_pool(cr.dbname).get('hr.analytic.timesheet')
for work in tw.browse(cr, uid, ids, context):
if work.task_id.project_id.category_id:
unit_id = ts._getEmployeeUnit(cr, uid, context)
product_id = ts._getEmployeeUnit(cr, uid, context)
res = {
'name': work.name,
'date': time.strftime('%Y-%m-%d'),
'unit_amount': work.hours,
'product_uom_id': unit_id,
'product_id': product_id,
'amount': work.hours or 0.0,
'account_id': work.task_id.project_id.category_id.id
}
res2 = ts.on_change_unit_amount(cr, uid, False, product_id, work.hours or 0.0,unit_id, context)
if res2:
res.update(res2['value'])
id = ts.create(cr, uid, res, context)
else:
print 'not found', work.task_id.project_id.name
value = {
'domain': "[('user_id','=',%d),('date','>=','%s'), ('date','<=','%s')]" % (uid, time.strftime('%Y-%m-%d 00:00:00'), time.strftime('%Y-%m-%d 23:59:59')),
'name': 'Create Analytic Line',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.analytic.timesheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
return value
class wiz_hr_timesheet_project(wizard.interface):
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _action_line_create, 'state':'end'}
}
}
wiz_hr_timesheet_project('hr_timesheet_project.encode.hour')
| Python | 0.000214 |
5a9358147c9930faf5b6d153344fb012c4a8f304 | Add MSISDN in the phone_number provider | faker/providers/phone_number/pt_BR/__init__.py | faker/providers/phone_number/pt_BR/__init__.py | from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
'+55 (011) #### ####',
'+55 (021) #### ####',
'+55 (031) #### ####',
'+55 (041) #### ####',
'+55 (051) #### ####',
'+55 (061) #### ####',
'+55 (071) #### ####',
'+55 (081) #### ####',
'+55 11 #### ####',
'+55 21 #### ####',
'+55 31 #### ####',
'+55 41 #### ####',
'+55 51 ### ####',
'+55 61 #### ####',
'+55 71 #### ####',
'+55 81 #### ####',
'+55 (011) ####-####',
'+55 (021) ####-####',
'+55 (031) ####-####',
'+55 (041) ####-####',
'+55 (051) ####-####',
'+55 (061) ####-####',
'+55 (071) ####-####',
'+55 (081) ####-####',
'+55 11 ####-####',
'+55 21 ####-####',
'+55 31 ####-####',
'+55 41 ####-####',
'+55 51 ### ####',
'+55 61 ####-####',
'+55 71 ####-####',
'+55 81 ####-####',
'(011) #### ####',
'(021) #### ####',
'(031) #### ####',
'(041) #### ####',
'(051) #### ####',
'(061) #### ####',
'(071) #### ####',
'(081) #### ####',
'11 #### ####',
'21 #### ####',
'31 #### ####',
'41 #### ####',
'51 ### ####',
'61 #### ####',
'71 #### ####',
'81 #### ####',
'(011) ####-####',
'(021) ####-####',
'(031) ####-####',
'(041) ####-####',
'(051) ####-####',
'(061) ####-####',
'(071) ####-####',
'(081) ####-####',
'11 ####-####',
'21 ####-####',
'31 ####-####',
'41 ####-####',
'51 ### ####',
'61 ####-####',
'71 ####-####',
'81 ####-####',
'#### ####',
'####-####',
)
msisdn_formats = (
'5511#########',
'5521#########',
'5531#########',
'5541#########',
'5551#########',
'5561#########',
'5571#########',
'5581#########',
)
@classmethod
def msisdn(cls):
""" https://en.wikipedia.org/wiki/MSISDN """
return cls.numerify(cls.random_element(cls.msisdn_formats))
| from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
'+55 (011) #### ####',
'+55 (021) #### ####',
'+55 (031) #### ####',
'+55 (041) #### ####',
'+55 (051) #### ####',
'+55 (061) #### ####',
'+55 (071) #### ####',
'+55 (081) #### ####',
'+55 11 #### ####',
'+55 21 #### ####',
'+55 31 #### ####',
'+55 41 #### ####',
'+55 51 ### ####',
'+55 61 #### ####',
'+55 71 #### ####',
'+55 81 #### ####',
'+55 (011) ####-####',
'+55 (021) ####-####',
'+55 (031) ####-####',
'+55 (041) ####-####',
'+55 (051) ####-####',
'+55 (061) ####-####',
'+55 (071) ####-####',
'+55 (081) ####-####',
'+55 11 ####-####',
'+55 21 ####-####',
'+55 31 ####-####',
'+55 41 ####-####',
'+55 51 ### ####',
'+55 61 ####-####',
'+55 71 ####-####',
'+55 81 ####-####',
'(011) #### ####',
'(021) #### ####',
'(031) #### ####',
'(041) #### ####',
'(051) #### ####',
'(061) #### ####',
'(071) #### ####',
'(081) #### ####',
'11 #### ####',
'21 #### ####',
'31 #### ####',
'41 #### ####',
'51 ### ####',
'61 #### ####',
'71 #### ####',
'81 #### ####',
'(011) ####-####',
'(021) ####-####',
'(031) ####-####',
'(041) ####-####',
'(051) ####-####',
'(061) ####-####',
'(071) ####-####',
'(081) ####-####',
'11 ####-####',
'21 ####-####',
'31 ####-####',
'41 ####-####',
'51 ### ####',
'61 ####-####',
'71 ####-####',
'81 ####-####',
'#### ####',
'####-####',
) | Python | 0.000001 |
374ef731e658097f9b2d2d7593ed1126ec52d282 | Fix issues with `skip_unknown` | ycml/transformers/sequences.py | ycml/transformers/sequences.py | import logging
from .base import PureTransformer
from .text import ListCountVectorizer
__all__ = ['TokensToIndexTransformer']
logger = logging.getLogger(__name__)
class TokensToIndexTransformer(PureTransformer):
def __init__(self, skip_unknown=False, pad_sequences=None, count_vectorizer_args={}, pad_sequences_args={}, **kwargs):
super(TokensToIndexTransformer, self).__init__(**kwargs)
self.skip_unknown = skip_unknown
self.pad_sequences = pad_sequences
self.count_vectorizer_args = count_vectorizer_args
self.pad_sequences_args = pad_sequences_args
#end def
def fit(self, X, **kwargs):
self.count_vectorizer_ = ListCountVectorizer(**self.count_vectorizer_args).fit(X)
logger.debug('TokensToIndexTransformer vocabulary fitted with size {}.'.format(len(self.vocabulary_)))
return self
#end def
def _transform(self, X, y=None):
if 'maxlen' in self.pad_sequences_args:
raise ValueError('The `maxlen` argument should not be set in `pad_sequences_args`. Set it in `pad_sequences` instead.')
analyzer = self.count_vectorizer_.build_analyzer()
V = self.vocabulary_
X_transformed = []
for seq in X:
indexes = []
for j, tok in enumerate(analyzer(seq)):
index = V.get(tok)
if not self.skip_unknown: indexes.append(0 if index is None else (index + 1))
elif index is not None: indexes.append(index)
#end for
X_transformed.append(indexes)
#end for
if self.pad_sequences is not None:
from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
maxlen = getattr(self, 'pad_sequences_maxlen_', None if self.pad_sequences is True else self.pad_sequences)
X_transformed = keras_pad_sequences(X_transformed, maxlen=maxlen, **self.pad_sequences_args)
if self.pad_sequences is True or maxlen is not None:
logger.debug('TokensToIndexTransformer transformed sequences has max length {}.'.format(X_transformed.shape[1]))
self.pad_sequences_maxlen_ = X_transformed.shape[1]
#end if
return X_transformed
#end def
@property
def vocabulary_(self): return self.count_vectorizer_.vocabulary_
@property
def stop_words_(self): return self.count_vectorizer_.stop_words_
def __repr__(self):
count_vectorizer_repr = '{}(vocabulary_={}, stop_words_={})'.format(self.count_vectorizer_.__class__.__name__, len(getattr(self.count_vectorizer_, 'vocabulary_', [])), len(getattr(self.count_vectorizer_, 'stop_words_', []))) if hasattr(self, 'count_vectorizer_') else None
return '{}(skip_unknown={}, pad_sequences={}, count_vectorizer_args={}, pad_sequences_args={}, count_vectorizer_={})'.format(self.__class__.__name__, self.skip_unknown, self.pad_sequences, self.count_vectorizer_args, self.pad_sequences_args, count_vectorizer_repr)
#end def
#end class
| import logging
from .base import PureTransformer
from .text import ListCountVectorizer
__all__ = ['TokensToIndexTransformer']
logger = logging.getLogger(__name__)
class TokensToIndexTransformer(PureTransformer):
def __init__(self, ignore_unknown=False, pad_sequences=None, count_vectorizer_args={}, pad_sequences_args={}, **kwargs):
super(TokensToIndexTransformer, self).__init__(**kwargs)
self.ignore_unknown = ignore_unknown
self.pad_sequences = pad_sequences
self.count_vectorizer_args = count_vectorizer_args
self.pad_sequences_args = pad_sequences_args
#end def
def fit(self, X, **kwargs):
self.count_vectorizer_ = ListCountVectorizer(**self.count_vectorizer_args).fit(X)
logger.debug('TokensToIndexTransformer vocabulary fitted with size {}.'.format(len(self.vocabulary_)))
return self
#end def
def _transform(self, X, y=None):
if 'maxlen' in self.pad_sequences_args:
raise ValueError('The `maxlen` argument should not be set in `pad_sequences_args`. Set it in `pad_sequences` instead.')
analyzer = self.count_vectorizer_.build_analyzer()
V = self.vocabulary_
unknown_index = 1 if self.ignore_unknown else len(V)
X_transformed = []
for seq in X:
indexes = []
for j, tok in enumerate(analyzer(seq)):
index = V.get(tok, unknown_index)
if index >= 0:
indexes.append(index)
#end for
X_transformed.append(indexes)
#end for
if self.pad_sequences is not None:
from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
maxlen = getattr(self, 'pad_sequences_maxlen_', None if self.pad_sequences is True else self.pad_sequences)
X_transformed = keras_pad_sequences(X_transformed, maxlen=maxlen, **self.pad_sequences_args)
if self.pad_sequences is True or maxlen is not None:
logger.debug('TokensToIndexTransformer transformed sequences has max length {}.'.format(X_transformed.shape[1]))
self.pad_sequences_maxlen_ = X_transformed.shape[1]
#end if
return X_transformed
#end def
@property
def vocabulary_(self): return self.count_vectorizer_.vocabulary_
@property
def stop_words_(self): return self.count_vectorizer_.stop_words_
def __repr__(self):
count_vectorizer_repr = '{}(vocabulary_={}, stop_words_={})'.format(self.count_vectorizer_.__class__.__name__, len(getattr(self.count_vectorizer_, 'vocabulary_', [])), len(getattr(self.count_vectorizer_, 'stop_words_', []))) if hasattr(self, 'count_vectorizer_') else None
return '{}(ignore_unknown={}, pad_sequences={}, count_vectorizer_args={}, pad_sequences_args={}, count_vectorizer_={})'.format(self.__class__.__name__, self.ignore_unknown, self.pad_sequences, self.count_vectorizer_args, self.pad_sequences_args, count_vectorizer_repr)
#end def
#end class
| Python | 0 |
de6babf92252ea5828a9c17d76766357cff3e440 | Extend _VALID_URL (Closes #10812) | youtube_dl/extractor/tvland.py | youtube_dl/extractor/tvland.py | # coding: utf-8
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class TVLandIE(MTVServicesInfoExtractor):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
'info_dict': {
'description': 'md5:80973e81b916a324e05c14a3fb506d29',
'title': 'The Invasion',
},
'playlist': [],
}, {
'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
'ext': 'mp4',
'title': 'Younger|December 28, 2015|2|NO-EPISODE#|Younger: Hilary Duff - Little Lies',
'description': 'md5:7d192f56ca8d958645c83f0de8ef0269',
'upload_date': '20151228',
'timestamp': 1451289600,
},
}, {
'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301',
'only_matching': True,
}]
| # coding: utf-8
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class TVLandIE(MTVServicesInfoExtractor):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
'info_dict': {
'description': 'md5:80973e81b916a324e05c14a3fb506d29',
'title': 'The Invasion',
},
'playlist': [],
}, {
'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
'ext': 'mp4',
'title': 'Younger|December 28, 2015|2|NO-EPISODE#|Younger: Hilary Duff - Little Lies',
'description': 'md5:7d192f56ca8d958645c83f0de8ef0269',
'upload_date': '20151228',
'timestamp': 1451289600,
},
}]
| Python | 0 |
59d4678e8319320b9b5ccd304b1034188c02ae61 | insert pth eggs at index of site-packages they come from | pkgs/development/python-modules/site/site.py | pkgs/development/python-modules/site/site.py | def __boot():
import sys, imp, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
sys.__egginsert = sys.path.index(os.path.abspath(item))
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| def __boot():
import sys, imp, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| Python | 0 |
a34d08cec2cdcf259070ca51c69dcd425a04c5be | move use_container into execkwargs | tests/util.py | tests/util.py | from __future__ import absolute_import
import os
import functools
from pkg_resources import (Requirement, ResolutionError, # type: ignore
resource_filename)
import distutils.spawn
import pytest
from cwltool.utils import onWindows, windows_default_container_id
from cwltool.factory import Factory
def get_windows_safe_factory(**execkwargs):
if onWindows():
makekwargs = {'find_default_container': functools.partial(
force_default_container, windows_default_container_id),
'use_container': True}
execkwargs['default_container': windows_default_container_id]
else:
opts = {}
return Factory(makekwargs=makekwargs, **execkwargs)
def force_default_container(default_container_id, builder):
return default_container_id
def get_data(filename):
filename = os.path.normpath(
filename) # normalizing path depending on OS or else it will cause problem when joining path
filepath = None
try:
filepath = resource_filename(
Requirement.parse("cwltool"), filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
# warning, __file__ is all lowercase on Windows systems, this can
# sometimes conflict with docker toolkit. Workaround: pip install .
# and run the tests elsewhere via python -m pytest --pyarg cwltool
return filepath
needs_docker = pytest.mark.skipif(not bool(distutils.spawn.find_executable('docker')),
reason="Requires the docker executable on the "
"system path.")
| from __future__ import absolute_import
import os
import functools
from pkg_resources import (Requirement, ResolutionError, # type: ignore
resource_filename)
import distutils.spawn
import pytest
from cwltool.utils import onWindows, windows_default_container_id
from cwltool.factory import Factory
def get_windows_safe_factory(**execkwargs):
if onWindows():
opts = {'find_default_container': functools.partial(
force_default_container, windows_default_container_id),
'use_container': True,
'default_container': windows_default_container_id}
else:
opts = {}
return Factory(makekwargs=opts, **execkwargs)
def force_default_container(default_container_id, builder):
return default_container_id
def get_data(filename):
filename = os.path.normpath(
filename) # normalizing path depending on OS or else it will cause problem when joining path
filepath = None
try:
filepath = resource_filename(
Requirement.parse("cwltool"), filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
# warning, __file__ is all lowercase on Windows systems, this can
# sometimes conflict with docker toolkit. Workaround: pip install .
# and run the tests elsewhere via python -m pytest --pyarg cwltool
return filepath
needs_docker = pytest.mark.skipif(not bool(distutils.spawn.find_executable('docker')),
reason="Requires the docker executable on the "
"system path.")
| Python | 0.000009 |
b06863b3bd9b12c47380362b3d4182167a6d2eaa | Update openssl.py | wigs/openssl.py | wigs/openssl.py | class openssl(Wig):
tarball_uri = 'https://github.com/openssl/openssl/archive/OpenSSL_$RELEASE_VERSION$.tar.gz'
git_uri = 'https://github.com/openssl/openssl'
last_release_version = 'v1_1_0e'
def setup(self):
self.configure_flags += [S.FPIC_FLAG]
def gen_configure_snippet(self):
return './config %s' % ' '.join(self.configure_flags)
| class openssl(Wig):
tarball_uri = 'https://github.com/openssl/openssl/archive/OpenSSL_$RELEASE_VERSION$.tar.gz'
git_uri = 'https://github.com/openssl/openssl'
last_release_version = 'v1_0_2d'
def setup(self):
self.configure_flags += [S.FPIC_FLAG]
def gen_configure_snippet(self):
return './config %s' % ' '.join(self.configure_flags)
| Python | 0.000002 |
66c4b93ae78c98928946f0ceeee3a2c16be7655d | Add coding line | app/tests/integration/test_database.py | app/tests/integration/test_database.py | # -*- coding: utf-8 -*-
"""
Database tests.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from unittest import TestCase
from lib import database
from models.trends import Trend
class TestDatabaseSetup(TestCase):
"""
Test the database library module.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_drop(self):
database._dropTables()
def test_create(self):
database._createTables()
def test_baseLabels(self):
database._createTables(verbose=False)
database._baseLabels()
def test_populate(self):
database._createTables(verbose=False)
limit = 1
database._populate(limit)
class TestModel(TestCase):
"""
Test ORM operations on the SQL database.
In particular, edgecases such as unicode character handling.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_insert(self):
database._dropTables(verbose=False)
database._createTables(verbose=False)
database._baseLabels()
t = Trend(topic="abc", volume=1)
self.assertEqual(t.topic, "abc")
self.assertEqual(t.volume, 1)
t = Trend(topic="a b Ç 😊", volume=1000)
self.assertEqual(t.topic, "a b Ç 😊")
database._dropTables(verbose=False)
| """
Database tests.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from unittest import TestCase
from lib import database
from models.trends import Trend
class TestDatabaseSetup(TestCase):
"""
Test the database library module.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_drop(self):
database._dropTables()
def test_create(self):
database._createTables()
def test_baseLabels(self):
database._createTables(verbose=False)
database._baseLabels()
def test_populate(self):
database._createTables(verbose=False)
limit = 1
database._populate(limit)
class TestModel(TestCase):
"""
Test ORM operations on the SQL database.
In particular, edgecases such as unicode character handling.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_insert(self):
database._dropTables(verbose=False)
database._createTables(verbose=False)
database._baseLabels()
t = Trend(topic="abc", volume=1)
self.assertEqual(t.topic, "abc")
self.assertEqual(t.volume, 1)
t = Trend(topic="a b Ç 😊", volume=1000)
self.assertEqual(t.topic, "a b Ç 😊")
database._dropTables(verbose=False)
| Python | 0.000878 |
005eea5e467c3a0aa6b942ce377a5c72b9177e21 | Fix build_lines() - s/bw/image | textinator.py | textinator.py | import click
from PIL import Image
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = image.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
| import click
from PIL import Image
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
| Python | 0 |
1f130a8577f16809008bd301ab8c47aab4677750 | Add build_lines function, move image generation there. | textinator.py | textinator.py | import click
from PIL import Image
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False, writable=True)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
| import click
from PIL import Image
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False, writable=True)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
o_width, o_height = bw.size
for y in range(o_height):
line = ''
for x in range(o_width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
click.echo(line)
| Python | 0 |
bf7fd4e606901fae6a434e4a375ac72bcbc66e00 | Fix plugin | tgp/plugin.py | tgp/plugin.py | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import traceback
import sys
from os.path import exists, join, dirname, abspath
from os import makedirs, environ
from future import standard_library
from qiita_client import QiitaClient
from tgp.split_libraries import split_libraries, split_libraries_fastq
from tgp.pick_otus import pick_closed_reference_otus
with standard_library.hooks():
from configparser import ConfigParser
TASK_DICT = {
'Split libraries FASTQ': split_libraries_fastq,
'Split libraries': split_libraries,
'Pick closed-reference OTUs': pick_closed_reference_otus
}
def execute_job(server_url, job_id, output_dir):
"""Starts the plugin and executes the assigned task
Parameters
----------
server_url : str
The url of the server
job_id : str
The job id
Raises
------
RuntimeError
If there is a problem gathering the job information
"""
# Set up the Qiita Client
dflt_conf_fp = join(dirname(abspath(__file__)), 'support_files',
'config_file.cfg')
conf_fp = environ.get('QP_TARGET_GENE_CONFIG_FP', dflt_conf_fp)
config = ConfigParser()
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
qclient = QiitaClient(server_url, config.get('main', 'CLIENT_ID'),
config.get('main', 'CLIENT_SECRET'),
server_cert=config.get('main', 'SERVER_CERT'))
# Request job information. If there is a problem retrieving the job
# information, the QiitaClient already raises an error
job_info = qclient.get_job_info(job_id)
# Starting the heartbeat
qclient.start_heartbeat(job_id)
# Execute the given task
task_name = job_info['command']
task = TASK_DICT[task_name]
if not exists(output_dir):
makedirs(output_dir)
try:
success, artifacts_info, error_msg = task(
qclient, job_id, job_info['parameters'], output_dir)
except Exception:
exc_str = repr(traceback.format_exception(*sys.exc_info()))
error_msg = ("Error executing %s:\n%s" % (task_name, exc_str))
success = False
# The job completed
qclient.complete_job(job_id, success, error_msg=error_msg,
artifacts_info=artifacts_info)
| # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import traceback
import sys
from os.path import exists, join, dirname, abspath
from os import makedirs, environ
from future import standard_library
from qiita_client import QiitaClient, format_payload
from tgp.split_libraries import split_libraries, split_libraries_fastq
from tgp.pick_otus import pick_closed_reference_otus
with standard_library.hooks():
from configparser import ConfigParser
TASK_DICT = {
'Split libraries FASTQ': split_libraries_fastq,
'Split libraries': split_libraries,
'Pick closed-reference OTUs': pick_closed_reference_otus
}
def execute_job(server_url, job_id, output_dir):
"""Starts the plugin and executes the assigned task
Parameters
----------
server_url : str
The url of the server
job_id : str
The job id
Raises
------
RuntimeError
If there is a problem gathering the job information
"""
# Set up the Qiita Client
try:
conf_fp = environ['QP_TARGET_GENE_CONFIG_FP']
except KeyError:
conf_fp = join(dirname(abspath(__file__)), 'support_files',
'config_file.cfg')
config = ConfigParser()
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
qclient = QiitaClient(server_url, config.get('main', 'CLIENT_ID'),
config.get('main', 'CLIENT_SECRET'),
server_cert=config.get('main', 'SERVER_CERT'))
# Request job information
job_info = qclient.get_job_info(job_id)
# Check if we have received the job information so we can start it
if job_info and job_info['success']:
# Starting the heartbeat
qclient.start_heartbeat(job_id)
# Execute the given task
task_name = job_info['command']
task = TASK_DICT[task_name]
if not exists(output_dir):
makedirs(output_dir)
try:
payload = task(qclient, job_id, job_info['parameters'],
output_dir)
except Exception:
exc_str = repr(traceback.format_exception(*sys.exc_info()))
error_msg = ("Error executing %s:\n%s" % (task_name, exc_str))
payload = format_payload(False, error_msg=error_msg)
# The job completed
qclient.complete_job(job_id, payload)
else:
raise RuntimeError("Can't get job (%s) information" % job_id)
| Python | 0.000001 |
fe81916434e6aa04d9672589cb75fde3c676e19f | Fix revision chain | src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py | src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Delete background tasks
Revision ID: 5410607088f9
Revises: 1ef8f4f504ae
Create Date: 2015-12-16 13:20:37.341342
"""
# pylint: disable=C0103,E1101
from alembic import op
# revision identifiers, used by Alembic.
revision = '5410607088f9'
down_revision = '1ef8f4f504ae'
def upgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
def downgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Delete background tasks
Revision ID: 5410607088f9
Revises: 504f541411a5
Create Date: 2015-12-16 13:20:37.341342
"""
# pylint: disable=C0103,E1101
from alembic import op
# revision identifiers, used by Alembic.
revision = '5410607088f9'
down_revision = '504f541411a5'
def upgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
def downgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
| Python | 0.000003 |
0ce840cf43b06fc810bbe45d2aed5fcc591be87c | Add ShortenedUrl class | url_shortener.py | url_shortener.py | # -*- coding: utf-8 -*-
import os
from bisect import bisect_left
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import types
class SlugValueError(ValueError):
'''The value of slug is incorrect '''
class Slug(object):
''' An identifier for shortened url
In has two values used as its representations: a string value
and an integer value, used in short urls and in database,
respectively.
:var CHARS: string containing characters allowed to be used
in a slug. The characters are used as digits of a numerical system
used to convert between the string and integer representations.
:var BASE: a base of numeral system used to convert between
the string and integer representations.
'''
CHARS = '0123456789abcdefghijkmnopqrstuvwxyz'
BASE = len(CHARS)
def __init__(self, integer=None, string=None):
''' Initialize new instance
:param integer: a value representing the slug as an integer.
It can not be None while string is None. If it is None, a
corresponding property of the object will be based on
the string parameter
:param string: a value representing the slug as a string.
It can not be None while integer is None, and it has to consist
only of characters specified by the CHARS class property.
If it is None, a value of corresponding property of the object
will be based on the integer parameter
:raises SlugValueError: if the slug contains characters that are not
in self.CHARS property, or if both string and integer params
are None
'''
if string is not None:
forbidden = [d for d in string if d not in self.CHARS]
if forbidden:
msg_tpl = "The slug '{}' contains forbidden characters: '{}'"
raise SlugValueError(msg_tpl.format(string, forbidden))
elif integer is None:
raise SlugValueError(
'The string and integer arguments cannot both be None'
)
self._string = string
self.integer = integer
if integer is None:
value = 0
for exponent, char in enumerate(reversed(string)):
digit_value = bisect_left(self.CHARS, char)
value += digit_value*self.BASE**exponent
self.integer = value
def __str__(self):
''' Get string representation of the slug
:returns: a string representing value of the slug as a numeral
of base specified for the class. If the object has been
initialized with integer as its only representation,
the numeral will be derived from it using the base.
'''
if self._string is None:
value = ''
integer = self.integer
while True:
integer, remainder = divmod(integer, self.BASE)
value = self.CHARS[remainder] + value
if integer == 0:
break
self._string = value
return self._string
class IntegerSlug(types.TypeDecorator):
''' Converts between database integers and
instances of Slug
'''
impl = types.Integer
def process_bind_param(self, value, dialect):
return value.integer
process_literal_param = process_bind_param
def process_result_value(self, value, dialect):
return Slug(integer=value)
app = Flask(__name__)
DATABASE_URI_NAME = 'URL_SHORTENER_DATABASE_URI'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ[DATABASE_URI_NAME]
db = SQLAlchemy(app)
class ShortenedUrl(db.Model):
''' Represents a url for which a short alias has been created
:var slug: a value representing a registered url in short urls and
in database
'''
slug = db.Column(IntegerSlug, primary_key=True)
target = db.Column(db.String(2083), unique=True)
redirect = db.Column(db.Boolean(), default=True)
def __init__(self, target, redirect=True):
''' Constructor
:param target: url represented by the instance
:param redirect: True if automatic redirection should be
performed when handling http requests for this url
'''
self.target = target
self.redirect = redirect
def __str__(self):
return self.target
if __name__ == '__main__':
app.run()
| # -*- coding: utf-8 -*-
import os
from bisect import bisect_left
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import types
class SlugValueError(ValueError):
'''The value of slug is incorrect '''
class Slug(object):
''' An identifier for shortened url
In has two values used as its representations: a string value
and an integer value, used in short urls and in database,
respectively.
:var CHARS: string containing characters allowed to be used
in a slug. The characters are used as digits of a numerical system
used to convert between the string and integer representations.
:var BASE: a base of numeral system used to convert between
the string and integer representations.
'''
CHARS = '0123456789abcdefghijkmnopqrstuvwxyz'
BASE = len(CHARS)
def __init__(self, integer=None, string=None):
''' Initialize new instance
:param integer: a value representing the slug as an integer.
It can not be None while string is None. If it is None, a
corresponding property of the object will be based on
the string parameter
:param string: a value representing the slug as a string.
It can not be None while integer is None, and it has to consist
only of characters specified by the CHARS class property.
If it is None, a value of corresponding property of the object
will be based on the integer parameter
:raises SlugValueError: if the slug contains characters that are not
in self.CHARS property, or if both string and integer params
are None
'''
if string is not None:
forbidden = [d for d in string if d not in self.CHARS]
if forbidden:
msg_tpl = "The slug '{}' contains forbidden characters: '{}'"
raise SlugValueError(msg_tpl.format(string, forbidden))
elif integer is None:
raise SlugValueError(
'The string and integer arguments cannot both be None'
)
self._string = string
self.integer = integer
if integer is None:
value = 0
for exponent, char in enumerate(reversed(string)):
digit_value = bisect_left(self.CHARS, char)
value += digit_value*self.BASE**exponent
self.integer = value
def __str__(self):
''' Get string representation of the slug
:returns: a string representing value of the slug as a numeral
of base specified for the class. If the object has been
initialized with integer as its only representation,
the numeral will be derived from it using the base.
'''
if self._string is None:
value = ''
integer = self.integer
while True:
integer, remainder = divmod(integer, self.BASE)
value = self.CHARS[remainder] + value
if integer == 0:
break
self._string = value
return self._string
class IntegerSlug(types.TypeDecorator):
''' Converts between database integers and
instances of Slug
'''
impl = types.Integer
def process_bind_param(self, value, dialect):
return value.integer
process_literal_param = process_bind_param
def process_result_value(self, value, dialect):
return Slug(integer=value)
app = Flask(__name__)
DATABASE_URI_NAME = 'URL_SHORTENER_DATABASE_URI'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ[DATABASE_URI_NAME]
db = SQLAlchemy(app)
if __name__ == '__main__':
app.run()
| Python | 0 |
07bda8adbeb798dfd100b63a784e14a00cf33927 | add new views to urls | urlsaver/urls.py | urlsaver/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.main_view, name='main'),
url(r'^register/', views.register_view, name='register'),
url(r'^login/', views.login_view, name='login'),
url(r'^logout/', views.logout_view, name='logout'),
]
| from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.main_view, name='main'),
]
| Python | 0 |
38d47061b6c1ea3250b99f7376d7479e970974a5 | define cheakInradius | MonteCarlo.py | MonteCarlo.py | from math import *
def checkInradius(x, y):
z = x**2 + y**2
z = sqrt(z)
if z < 1.0:
return True
else:
return False
N = int(raw_input('Insert your N (random) :: '))
| from math import *
N = int(raw_input('Insert your N (random) :: '))
print N
| Python | 0.99981 |
2ff82cd1e34472173cd8631b8e353515d2c38a41 | Rename get_update_db() into get_wrapupdater() | wrapweb/hook.py | wrapweb/hook.py | # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
# GitHub secret key support
import hashlib
import hmac
from mesonwrap import wrapupdater
from wrapweb.app import APP
RESTRICTED_PROJECTS = [
'mesonbuild/meson',
'mesonbuild/wrapweb',
'mesonbuild/meson-ci',
]
def get_wrapupdater():
db = getattr(flask.g, "_wrapupdater", None)
if db is None:
dbdir = APP.config['DB_DIRECTORY']
db = flask.g._wrapupdater = wrapupdater.WrapUpdater(dbdir)
return db
def json_ok():
jsonout = flask.jsonify({'output': 'ok'})
jsonout.status_code = 200
return jsonout
def json_error(code, message):
jsonout = flask.jsonify({'output': 'notok', 'error': message})
jsonout.status_code = code
return jsonout
@APP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return json_error(401, 'Not a GitHub hook')
signature = ('sha1=%s' %
hmac.new(APP.config['SECRET_KEY'].encode('utf-8'),
flask.request.data, hashlib.sha1).hexdigest())
if headers.get('X-Hub-Signature') != signature:
return json_error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return json_error(405, 'Not a Pull Request hook')
d = flask.request.get_json()
base = d['pull_request']['base']
if not base['repo']['full_name'].startswith('mesonbuild/'):
return json_error(406, 'Not a mesonbuild project')
if base['repo']['full_name'] in RESTRICTED_PROJECTS:
return json_error(406, "We don't run hook for "
"restricted project names")
if d['action'] == 'closed' and d['pull_request']['merged']:
project = base['repo']['name']
branch = base['ref']
repo_url = base['repo']['clone_url']
if branch == 'master':
return json_error(406, 'No bananas for you')
db_updater = get_wrapupdater()
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
db_updater.update_db(project, repo_url, branch)
return json_ok()
except Exception as e:
return json_error(500, 'Wrap generation failed. %s' % e)
else:
APP.logger.warning(flask.request.data)
return json_error(417, 'We got hook which is not merged pull request')
| # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
# GitHub secret key support
import hashlib
import hmac
from mesonwrap import wrapupdater
from wrapweb.app import APP
RESTRICTED_PROJECTS = [
'mesonbuild/meson',
'mesonbuild/wrapweb',
'mesonbuild/meson-ci',
]
def get_update_db():
db = getattr(flask.g, "_update_database", None)
if db is None:
dbdir = APP.config['DB_DIRECTORY']
db = flask.g._update_database = wrapupdater.WrapUpdater(dbdir)
return db
def json_ok():
jsonout = flask.jsonify({'output': 'ok'})
jsonout.status_code = 200
return jsonout
def json_error(code, message):
jsonout = flask.jsonify({'output': 'notok', 'error': message})
jsonout.status_code = code
return jsonout
@APP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return json_error(401, 'Not a GitHub hook')
signature = ('sha1=%s' %
hmac.new(APP.config['SECRET_KEY'].encode('utf-8'),
flask.request.data, hashlib.sha1).hexdigest())
if headers.get('X-Hub-Signature') != signature:
return json_error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return json_error(405, 'Not a Pull Request hook')
d = flask.request.get_json()
base = d['pull_request']['base']
if not base['repo']['full_name'].startswith('mesonbuild/'):
return json_error(406, 'Not a mesonbuild project')
if base['repo']['full_name'] in RESTRICTED_PROJECTS:
return json_error(406, "We don't run hook for "
"restricted project names")
if d['action'] == 'closed' and d['pull_request']['merged']:
project = base['repo']['name']
branch = base['ref']
repo_url = base['repo']['clone_url']
if branch == 'master':
return json_error(406, 'No bananas for you')
db_updater = get_update_db()
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
db_updater.update_db(project, repo_url, branch)
return json_ok()
except Exception as e:
return json_error(500, 'Wrap generation failed. %s' % e)
else:
APP.logger.warning(flask.request.data)
return json_error(417, 'We got hook which is not merged pull request')
| Python | 0.004751 |
e97755dffbc834853f3f46a8233a295671b53f5d | Disable pylint broad-except in wrapweb.hook | wrapweb/hook.py | wrapweb/hook.py | # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib # GitHub secret key support
import hmac
import flask
from mesonwrap import inventory
from mesonwrap import wrapupdater
from wrapweb import flaskutil
from wrapweb import jsonstatus
BP = flask.Blueprint('hook', __name__)
@flaskutil.appcontext_var(BP)
def _wrapupdater():
dbdir = flask.current_app.config['DB_DIRECTORY']
return wrapupdater.WrapUpdater(dbdir)
@_wrapupdater.teardown
def _close_connection(db):
db.close()
def update_project(project, repo_url, branch):
if branch == 'master':
return jsonstatus.error(406, 'Will not update master branch')
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
_wrapupdater().update_db(project, repo_url, branch)
return jsonstatus.ok()
except Exception as e: # pylint: disable=broad-except
return jsonstatus.error(500, 'Wrap generation failed. %s' % e)
def check_allowed_project(full_repo_name):
if not inventory.is_wrap_full_project_name(full_repo_name):
raise jsonstatus.WrapWebError(406, 'Not a mesonwrap project')
def github_pull_request():
d = flask.request.get_json()
base = d['pull_request']['base']
check_allowed_project(base['repo']['full_name'])
if d['action'] != 'closed' or not d['pull_request']['merged']:
flask.current_app.logger.warning(flask.request.data)
return jsonstatus.error(
417, 'We got hook which is not merged pull request')
return update_project(project=base['repo']['name'],
repo_url=base['repo']['clone_url'],
branch=base['ref'])
@BP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return jsonstatus.error(401, 'Not a GitHub hook')
secret_key = flask.current_app.config['SECRET_KEY'].encode('utf-8')
digest = hmac.new(secret_key, flask.request.data, hashlib.sha1).hexdigest()
signature = 'sha1=%s' % digest
if headers.get('X-Hub-Signature') != signature:
return jsonstatus.error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return jsonstatus.error(405, 'Not a Pull Request hook')
return github_pull_request()
| # Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib # GitHub secret key support
import hmac
import flask
from mesonwrap import inventory
from mesonwrap import wrapupdater
from wrapweb import flaskutil
from wrapweb import jsonstatus
BP = flask.Blueprint('hook', __name__)
@flaskutil.appcontext_var(BP)
def _wrapupdater():
dbdir = flask.current_app.config['DB_DIRECTORY']
return wrapupdater.WrapUpdater(dbdir)
@_wrapupdater.teardown
def _close_connection(db):
db.close()
def update_project(project, repo_url, branch):
if branch == 'master':
return jsonstatus.error(406, 'Will not update master branch')
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
_wrapupdater().update_db(project, repo_url, branch)
return jsonstatus.ok()
except Exception as e:
return jsonstatus.error(500, 'Wrap generation failed. %s' % e)
def check_allowed_project(full_repo_name):
if not inventory.is_wrap_full_project_name(full_repo_name):
raise jsonstatus.WrapWebError(406, 'Not a mesonwrap project')
def github_pull_request():
d = flask.request.get_json()
base = d['pull_request']['base']
check_allowed_project(base['repo']['full_name'])
if d['action'] != 'closed' or not d['pull_request']['merged']:
flask.current_app.logger.warning(flask.request.data)
return jsonstatus.error(
417, 'We got hook which is not merged pull request')
return update_project(project=base['repo']['name'],
repo_url=base['repo']['clone_url'],
branch=base['ref'])
@BP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return jsonstatus.error(401, 'Not a GitHub hook')
secret_key = flask.current_app.config['SECRET_KEY'].encode('utf-8')
digest = hmac.new(secret_key, flask.request.data, hashlib.sha1).hexdigest()
signature = 'sha1=%s' % digest
if headers.get('X-Hub-Signature') != signature:
return jsonstatus.error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return jsonstatus.error(405, 'Not a Pull Request hook')
return github_pull_request()
| Python | 0 |
7b717c778f02e642c564e9afaf406b0cc4f399ac | Move EE Initialize | geebam/geebam.py | geebam/geebam.py | #! /usr/bin/env python
import argparse
import json
import logging
import logging.config
import os
import sys
import ee
import batch_uploader
def setup_logging(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
def delete_collection(id):
logging.info('Attempting to delete collection %s', id)
if 'users' not in id:
root_path_in_gee = ee.data.getAssetRoots()[0]['id']
id = root_path_in_gee + '/' + id
params = {'id': id}
items_in_collection = ee.data.getList(params)
for item in items_in_collection:
ee.data.deleteAsset(item['id'])
ee.data.deleteAsset(id)
logging.info('Collection %s removed', id)
def cancel_all_running_tasks():
logging.info('Attempting to cancel all running tasks')
running_tasks = [task for task in ee.data.getTaskList() if task['state'] == 'RUNNING']
for task in running_tasks:
ee.data.cancelTask(task['id'])
logging.info('Cancel all request completed')
def get_filename_from_path(path):
return os.path.splitext(os.path.basename(os.path.normpath(path)))[0]
def cancel_all_running_tasks_from_parser(args):
cancel_all_running_tasks()
def delete_collection_from_parser(args):
delete_collection(args.id)
def upload_from_parser(args):
batch_uploader.upload(user=args.user,
path_for_upload=args.directory,
metadata_path=args.metadata,
collection_name=args.collection or get_filename_from_path(args.directory))
def main(args=None):
setup_logging(path=os.path.join(os.path.dirname(__file__), 'logconfig.json'))
parser = argparse.ArgumentParser(description='Google Earth Engine Batch Asset Manager')
subparsers = parser.add_subparsers()
parser_delete = subparsers.add_parser('delete', help='Deletes collection and all items inside.')
parser_delete.add_argument('id', help='ID of the collection, either fully qualified or abbreviated (no need to pass users/username).')
parser_delete.set_defaults(func=delete_collection_from_parser)
parser_upload = subparsers.add_parser('upload', help='Batch Asset Uploader.')
required_named = parser_upload.add_argument_group('Required named arguments.')
required_named.add_argument('-u', '--user', help='Google account name (gmail address).', required=True)
required_named.add_argument('-d', '--directory', help='Path to the directory with images.', required=True)
optional_named = parser_upload.add_argument_group('Optional named arguments')
optional_named.add_argument('-m', '--metadata', help='Path to CSV with metadata.')
optional_named.add_argument('-c', '--collection', help='Name of the collection to create. If not provided, '
'directory name will be used.')
parser_upload.set_defaults(func=upload_from_parser)
parser_cancel = subparsers.add_parser('cancel', help='Cancel all running tasks')
parser_cancel.set_defaults(func=cancel_all_running_tasks_from_parser)
args = parser.parse_args()
ee.Initialize()
args.func(args)
if __name__ == '__main__':
main() | #! /usr/bin/env python
import argparse
import json
import logging
import logging.config
import os
import sys
import ee
import batch_uploader
def setup_logging(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
def delete_collection(id):
logging.info('Attempting to delete collection %s', id)
if 'users' not in id:
root_path_in_gee = ee.data.getAssetRoots()[0]['id']
id = root_path_in_gee + '/' + id
params = {'id': id}
items_in_collection = ee.data.getList(params)
for item in items_in_collection:
ee.data.deleteAsset(item['id'])
ee.data.deleteAsset(id)
logging.info('Collection %s removed', id)
def cancel_all_running_tasks():
logging.info('Attempting to cancel all running tasks')
running_tasks = [task for task in ee.data.getTaskList() if task['state'] == 'RUNNING']
for task in running_tasks:
ee.data.cancelTask(task['id'])
logging.info('Cancel all request completed')
def get_filename_from_path(path):
return os.path.splitext(os.path.basename(os.path.normpath(path)))[0]
def cancel_all_running_tasks_from_parser(args):
cancel_all_running_tasks()
def delete_collection_from_parser(args):
delete_collection(args.id)
def upload_from_parser(args):
batch_uploader.upload(user=args.user,
path_for_upload=args.directory,
metadata_path=args.metadata,
collection_name=args.collection or get_filename_from_path(args.directory))
def main(args=None):
setup_logging(path=os.path.join(os.path.dirname(__file__), 'logconfig.json'))
ee.Initialize()
parser = argparse.ArgumentParser(description='Google Earth Engine Batch Asset Manager', prog='GEE asset manager')
subparsers = parser.add_subparsers()
parser_delete = subparsers.add_parser('delete', help='Deletes collection and all items inside.')
parser_delete.add_argument('id', help='ID of the collection, either fully qualified or abbreviated (no need to pass users/username).')
parser_delete.set_defaults(func=delete_collection_from_parser)
parser_upload = subparsers.add_parser('upload', help='Batch Asset Uploader.')
required_named = parser_upload.add_argument_group('Required named arguments.')
required_named.add_argument('-u', '--user', help='Google account name (gmail address).', required=True)
required_named.add_argument('-d', '--directory', help='Path to the directory with images.', required=True)
optional_named = parser_upload.add_argument_group('Optional named arguments')
optional_named.add_argument('-m', '--metadata', help='Path to CSV with metadata.')
optional_named.add_argument('-c', '--collection', help='Name of the collection to create. If not provided, '
'directory name will be used.')
parser_upload.set_defaults(func=upload_from_parser)
parser_cancel = subparsers.add_parser('cancel', help='Cancel all running tasks')
parser_cancel.set_defaults(func=cancel_all_running_tasks_from_parser)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main() | Python | 0.000001 |
33c1db03e6b52d73ee6571f3f645f1b8d01e9a25 | Comment to clarify the use of a custom field source | snippets/serializers.py | snippets/serializers.py | from django.forms import widgets
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
# Add a filed to diplsay a list of related snippets.
snippets = serializers.PrimaryKeyRelatedField(many=True)
class Meta:
model = User
fields = ('id', 'username', 'snippets')
#class SnippetSerializer(serializers.Serializer):
# pk = serializers.Field() # Note: `Field` is an untyped read-only field.
# title = serializers.CharField(required=False,
# max_length=100)
# code = serializers.CharField(widget=widgets.Textarea,
# max_length=100000)
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES,
# default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES,
# default='friendly')
#
# def restore_object(self, attrs, instance=None):
# """
# Create or update a new snippet instance, given a dictionary
# of deserialized field values.
#
# Note that if we don't define this method, then deserializing
# data will simply return a dictionary of items.
# """
# if instance:
# # Update existing instance
# instance.title = attrs.get('title', instance.title)
# instance.code = attrs.get('code', instance.code)
# instance.linenos = attrs.get('linenos', instance.linenos)
# instance.language = attrs.get('language', instance.language)
# instance.style = attrs.get('style', instance.style)
# return instance
#
# # Create new instance
# return Snippet(**attrs)
class SnippetSerializer(serializers.ModelSerializer):
# To make it more user-friendly, let's use the username instead of the default pk. This is
# optional, obviously.
owner = serializers.Field(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner') | from django.forms import widgets
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
# Add a filed to diplsay a list of related snippets.
snippets = serializers.PrimaryKeyRelatedField(many=True)
class Meta:
model = User
fields = ('id', 'username', 'snippets')
#class SnippetSerializer(serializers.Serializer):
# pk = serializers.Field() # Note: `Field` is an untyped read-only field.
# title = serializers.CharField(required=False,
# max_length=100)
# code = serializers.CharField(widget=widgets.Textarea,
# max_length=100000)
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES,
# default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES,
# default='friendly')
#
# def restore_object(self, attrs, instance=None):
# """
# Create or update a new snippet instance, given a dictionary
# of deserialized field values.
#
# Note that if we don't define this method, then deserializing
# data will simply return a dictionary of items.
# """
# if instance:
# # Update existing instance
# instance.title = attrs.get('title', instance.title)
# instance.code = attrs.get('code', instance.code)
# instance.linenos = attrs.get('linenos', instance.linenos)
# instance.language = attrs.get('language', instance.language)
# instance.style = attrs.get('style', instance.style)
# return instance
#
# # Create new instance
# return Snippet(**attrs)
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.Field(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner') | Python | 0 |
dd9e53cbe02c0652cca35cde6d859512de4f9e44 | fix user_detail pipeline issue | social/pipeline/user.py | social/pipeline/user.py | from uuid import uuid4
from social.utils import slugify, module_member
USER_FIELDS = ['username', 'email']
def get_username(strategy, details, user=None, *args, **kwargs):
if 'username' not in strategy.setting('USER_FIELDS', USER_FIELDS):
return
storage = strategy.storage
if not user:
email_as_username = strategy.setting('USERNAME_IS_FULL_EMAIL', False)
uuid_length = strategy.setting('UUID_LENGTH', 16)
max_length = storage.user.username_max_length()
do_slugify = strategy.setting('SLUGIFY_USERNAMES', False)
do_clean = strategy.setting('CLEAN_USERNAMES', True)
if do_clean:
clean_func = storage.user.clean_username
else:
clean_func = lambda val: val
if do_slugify:
override_slug = strategy.setting('SLUGIFY_FUNCTION')
if override_slug:
slug_func = module_member(override_slug)
else:
slug_func = slugify
else:
slug_func = lambda val: val
if email_as_username and details.get('email'):
username = details['email']
elif details.get('username'):
username = details['username']
else:
username = uuid4().hex
short_username = username[:max_length - uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
# Generate a unique username for current user using username
# as base but adding a unique hash at the end. Original
# username is cut to avoid any field max_length.
# The final_username may be empty and will skip the loop.
while not final_username or \
storage.user.user_exists(username=final_username):
username = short_username + uuid4().hex[:uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
else:
final_username = storage.user.get_username(user)
return {'username': final_username}
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
fields = dict((name, kwargs.get(name) or details.get(name))
for name in strategy.setting('USER_FIELDS',
USER_FIELDS))
if not fields:
return
return {
'is_new': True,
'user': strategy.create_user(**fields)
}
def user_details(strategy, details, user=None, *args, **kwargs):
"""Update user details using data from provider."""
if user:
changed = False # flag to track changes
protected = ('username', 'id', 'pk', 'email') + \
tuple(strategy.setting('PROTECTED_USER_FIELDS', []))
# Update user model attributes with the new data sent by the current
# provider. Update on some attributes is disabled by default, for
# example username and id fields. It's also possible to disable update
# on fields defined in SOCIAL_AUTH_PROTECTED_FIELDS.
for name, value in details.items():
if value and hasattr(user, name):
current_value = getattr(user, name, None)
if not current_value or name not in protected:
changed |= current_value != value
setattr(user, name, value)
if changed:
strategy.storage.user.changed(user)
| from uuid import uuid4
from social.utils import slugify, module_member
USER_FIELDS = ['username', 'email']
def get_username(strategy, details, user=None, *args, **kwargs):
if 'username' not in strategy.setting('USER_FIELDS', USER_FIELDS):
return
storage = strategy.storage
if not user:
email_as_username = strategy.setting('USERNAME_IS_FULL_EMAIL', False)
uuid_length = strategy.setting('UUID_LENGTH', 16)
max_length = storage.user.username_max_length()
do_slugify = strategy.setting('SLUGIFY_USERNAMES', False)
do_clean = strategy.setting('CLEAN_USERNAMES', True)
if do_clean:
clean_func = storage.user.clean_username
else:
clean_func = lambda val: val
if do_slugify:
override_slug = strategy.setting('SLUGIFY_FUNCTION')
if override_slug:
slug_func = module_member(override_slug)
else:
slug_func = slugify
else:
slug_func = lambda val: val
if email_as_username and details.get('email'):
username = details['email']
elif details.get('username'):
username = details['username']
else:
username = uuid4().hex
short_username = username[:max_length - uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
# Generate a unique username for current user using username
# as base but adding a unique hash at the end. Original
# username is cut to avoid any field max_length.
# The final_username may be empty and will skip the loop.
while not final_username or \
storage.user.user_exists(username=final_username):
username = short_username + uuid4().hex[:uuid_length]
final_username = slug_func(clean_func(username[:max_length]))
else:
final_username = storage.user.get_username(user)
return {'username': final_username}
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
fields = dict((name, kwargs.get(name) or details.get(name))
for name in strategy.setting('USER_FIELDS',
USER_FIELDS))
if not fields:
return
return {
'is_new': True,
'user': strategy.create_user(**fields)
}
def user_details(strategy, details, user=None, *args, **kwargs):
"""Update user details using data from provider."""
if user:
changed = False # flag to track changes
protected = ('username', 'id', 'pk', 'email') + \
tuple(strategy.setting('PROTECTED_USER_FIELDS', []))
# Update user model attributes with the new data sent by the current
# provider. Update on some attributes is disabled by default, for
# example username and id fields. It's also possible to disable update
# on fields defined in SOCIAL_AUTH_PROTECTED_FIELDS.
for name, value in details.items():
if value and hasattr(user, name):
current_value = getattr(user, name, None)
if current_value is None or name not in protected:
changed |= current_value != value
setattr(user, name, value)
if changed:
strategy.storage.user.changed(user)
| Python | 0 |
2b553e23791adaa9e333d6f8feded8e95fd348c9 | Bump version to 0.2.0a0 | cachy/version.py | cachy/version.py | # -*- coding: utf-8 -*-
VERSION = '0.2.0a0'
| # -*- coding: utf-8 -*-
VERSION = '0.1.1'
| Python | 0.000001 |
a9240cd8bcfced47b402fdbff0162ad939eaa631 | Fix typo | Yank/multistate/__init__.py | Yank/multistate/__init__.py | #!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiState
==========
Multistate Sampling simulation algorithms, specific variants, and analyzers
This module provides a general facility for running multiple thermodynamic state multistate simulations, both general
as well as derived classes for special cases such as parallel tempering (in which
the states differ only in temperature).
The classes also provide
Provided classes include:
- :class:`yank.multistate.MultiStateSampler`
Base class for general, multi-thermodynamic state parallel multistate
- :class:`yank.multistate.ReplicaExchangeSampler`
Derived class from MultiStateSampler which allows sampled thermodynamic states
to swap based on Hamiltonian Replica Exchange
- :class:`yank.multistate.ParallelTemperingSampler`
Convenience subclass of ReplicaExchange for parallel tempering simulations
(one System object, many temperatures).
- :class:`yank.multistate.SAMSSampler`
Single-replica sampler which samples through multiple thermodynamic states
on the fly.
- :class:`yank.multistate.MultiStateReporter`
Replica Exchange reporter class to store all variables and data
Analyzers
---------
The MultiState module also provides analysis modules to analyze simulations and compute observables from data generated
under any of the MultiStateSampler's
Extending and Subclassing
-------------------------
Subclassing a sampler and analyzer is done by importing and extending any of the following:
* The base ``MultiStateSampler`` from ``multistatesampler``
* The base ``MultiStateReporter`` from ``multistatereporter``
* The base ``MultiStateAnalyzer`` or ``PhaseAnalyzer`` and base `ObservablesRegistry`` from ``multistateanalyzer``
COPYRIGHT
---------
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
-------
This code is licensed under the latest available version of the MIT License.
"""
import warnings
warnings.warn("The yank.multistate package is deprecated and it will be "
"available as openmmtools.multistate with openmmtools >= 0.18",
DeprecationWarning, stacklevel=2)
from .multistatesampler import MultiStateSampler
from .multistatereporter import MultiStateReporter
from .replicaexchange import ReplicaExchangeSampler, ReplicaExchangeAnalyzer
from .paralleltempering import ParallelTemperingSampler, ParallelTemperingAnalyzer
from .sams import SAMSSampler, SAMSAnalyzer
from .multistateanalyzer import *
from .utils import *
| #!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiState
==========
Multistate Sampling simulation algorithms, specific variants, and analyzers
This module provides a general facility for running multiple thermodynamic state multistate simulations, both general
as well as derived classes for special cases such as parallel tempering (in which
the states differ only in temperature).
The classes also provide
Provided classes include:
- :class:`yank.multistate.MultiStateSampler`
Base class for general, multi-thermodynamic state parallel multistate
- :class:`yank.multistate.ReplicaExchangeSampler`
Derived class from MultiStateSampler which allows sampled thermodynamic states
to swap based on Hamiltonian Replica Exchange
- :class:`yank.multistate.ParallelTemperingSampler`
Convenience subclass of ReplicaExchange for parallel tempering simulations
(one System object, many temperatures).
- :class:`yank.multistate.SAMSSampler`
Single-replica sampler which samples through multiple thermodynamic states
on the fly.
- :class:`yank.multistate.MultiStateReporter`
Replica Exchange reporter class to store all variables and data
Analyzers
---------
The MultiState module also provides analysis modules to analyze simulations and compute observables from data generated
under any of the MultiStateSampler's
Extending and Subclassing
-------------------------
Subclassing a sampler and analyzer is done by importing and extending any of the following:
* The base ``MultiStateSampler`` from ``multistatesampler``
* The base ``MultiStateReporter`` from ``multistatereporter``
* The base ``MultiStateAnalyzer`` or ``PhaseAnalyzer`` and base `ObservablesRegistry`` from ``multistateanalyzer``
COPYRIGHT
---------
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
-------
This code is licensed under the latest available version of the MIT License.
"""
import warning
warnings.warn("The yank.multistate package is deprecated and it will be "
"available as openmmtools.multistate with openmmtools >= 0.18",
DeprecationWarning, stacklevel=2)
from .multistatesampler import MultiStateSampler
from .multistatereporter import MultiStateReporter
from .replicaexchange import ReplicaExchangeSampler, ReplicaExchangeAnalyzer
from .paralleltempering import ParallelTemperingSampler, ParallelTemperingAnalyzer
from .sams import SAMSSampler, SAMSAnalyzer
from .multistateanalyzer import *
from .utils import *
| Python | 0.999999 |
4a18649367e6593724cdde6cf821eced595bb3cf | use list comprehension for auto-parse | genson/genson.py | genson/genson.py | import argparse
import sys
import re
import json
from .generator import SchemaNode
DESCRIPTION = """
Generate one, unified JSON Schema from one or more
JSON objects and/or JSON Schemas.
(uses Draft 4 - http://json-schema.org/draft-04/schema)
"""
def main():
args = parse_args()
s = SchemaNode()
for schema_file in args.schema:
add_json_from_file(s, schema_file, args.delimiter, schema=True)
for object_file in args.object:
add_json_from_file(s, object_file, args.delimiter)
print(s.to_json(indent=args.indent))
def parse_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-d', '--delimiter', metavar='DELIM',
help='''set a delimiter - Use this option if the
input files contain multiple JSON objects/schemas.
You can pass any string. A few cases ('newline', 'tab',
'space') will get converted to a whitespace character,
and if empty string ('') is passed, the parser will
try to auto-detect where the boundary is.''')
parser.add_argument('-i', '--indent', type=int, metavar='SPACES',
help='''pretty-print the output, indenting SPACES
spaces''')
parser.add_argument('-s', '--schema', action='append', default=[],
type=argparse.FileType('r'),
help='''file containing a JSON Schema (can be
specified multiple times to merge schemas)''')
parser.add_argument('object', nargs=argparse.REMAINDER,
type=argparse.FileType('r'), help='''files containing
JSON objects (defaults to stdin if no arguments
are passed and the -s option is not present)''')
args = parser.parse_args()
args.delimiter = get_delim(args.delimiter)
# default to stdin if no objects or schemas
if not args.object and not args.schema:
args.object.append(get_stdin())
return args
def get_delim(delim):
"""
manage special conversions for difficult bash characters
"""
if delim == 'newline':
delim = '\n'
elif delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
return delim
def get_stdin():
"""
Grab stdin, printing simple instructions if it's interactive.
"""
if sys.stdin.isatty():
print('Enter a JSON object, then press ctrl-D')
return sys.stdin
def add_json_from_file(s, fp, delimiter, schema=False):
method = getattr(s, 'add_schema' if schema else 'add_object')
raw_text = fp.read().strip()
fp.close()
for json_string in get_json_strings(raw_text, delimiter):
method(json.loads(json_string))
def get_json_strings(raw_text, delim):
if delim is None:
json_strings = [raw_text]
elif delim == '':
json_strings = detect_json_strings(raw_text)
else:
json_strings = raw_text.split(delim)
# sanitize data before returning
return [string.strip() for string in json_strings if string.strip()]
def detect_json_strings(raw_text):
"""
Use regex with lookaround to spot the boundaries between JSON objects.
Unfortunately, it has to match *something*, so at least one character
must be removed and replaced.
"""
strings = re.split('}\s*(?={)', raw_text)
# put back the stripped character
json_strings = [string + '}' for string in strings[:-1]]
# the last one doesn't need to be modified
json_strings.append(strings[-1])
return json_strings
| import argparse
import sys
import re
import json
from .generator import SchemaNode
DESCRIPTION = """
Generate one, unified JSON Schema from one or more
JSON objects and/or JSON Schemas.
(uses Draft 4 - http://json-schema.org/draft-04/schema)
"""
def main():
args = parse_args()
s = SchemaNode()
for schema_file in args.schema:
add_json_from_file(s, schema_file, args.delimiter, schema=True)
for object_file in args.object:
add_json_from_file(s, object_file, args.delimiter)
print(s.to_json(indent=args.indent))
def parse_args():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-d', '--delimiter', metavar='DELIM',
help='''set a delimiter - Use this option if the
input files contain multiple JSON objects/schemas.
You can pass any string. A few cases ('newline', 'tab',
'space') will get converted to a whitespace character,
and if empty string ('') is passed, the parser will
try to auto-detect where the boundary is.''')
parser.add_argument('-i', '--indent', type=int, metavar='SPACES',
help='''pretty-print the output, indenting SPACES
spaces''')
parser.add_argument('-s', '--schema', action='append', default=[],
type=argparse.FileType('r'),
help='''file containing a JSON Schema (can be
specified multiple times to merge schemas)''')
parser.add_argument('object', nargs=argparse.REMAINDER,
type=argparse.FileType('r'), help='''files containing
JSON objects (defaults to stdin if no arguments
are passed and the -s option is not present)''')
args = parser.parse_args()
args.delimiter = get_delim(args.delimiter)
# default to stdin if no objects or schemas
if not args.object and not args.schema:
args.object.append(get_stdin())
return args
def get_delim(delim):
"""
manage special conversions for difficult bash characters
"""
if delim == 'newline':
delim = '\n'
elif delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
return delim
def get_stdin():
"""
Grab stdin, printing simple instructions if it's interactive.
"""
if sys.stdin.isatty():
print('Enter a JSON object, then press ctrl-D')
return sys.stdin
def add_json_from_file(s, fp, delimiter, schema=False):
method = getattr(s, 'add_schema' if schema else 'add_object')
raw_text = fp.read().strip()
fp.close()
for json_string in get_json_strings(raw_text, delimiter):
method(json.loads(json_string))
def get_json_strings(raw_text, delim):
if delim is None:
json_strings = [raw_text]
elif delim == '':
json_strings = detect_json_strings(raw_text)
else:
json_strings = raw_text.split(delim)
# sanitize data before returning
return [string.strip() for string in json_strings if string.strip()]
def detect_json_strings(raw_text):
"""
Use regex with lookaround to spot the boundaries between JSON objects.
Unfortunately, it has to match *something*, so at least one character
must be removed and replaced.
"""
strings = re.split('}\s*(?={)', raw_text)
json_strings = []
for string in strings:
# put back the stripped character
json_strings.append(string + '}')
# the last one doesn't need to be modified
json_strings[-1] = strings[-1]
return json_strings
| Python | 0.000002 |
c3d454d5d7272620ab83b9e02cc8063d0e16da0f | Add an embedded favicon for error pages | edgedb/lang/common/markup/renderers/dhtml/__init__.py | edgedb/lang/common/markup/renderers/dhtml/__init__.py | ##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
from semantix.rendering.css import dumps as scss_dumps, reload as scss_reload
from .. import json
from ... import serialize
__all__ = 'render',
_FAVICON = ('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAGXRFWHRTb'
'2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAABhQTFRF6+np2VZWmVVVqxUV1qioxhMTuzw8////'
'fKlS/gAAAAh0Uk5T/////////wDeg71ZAAAAbElEQVR42lyOCwrAMAhD47f3v/ES3aAsBY0PteL8hAl'
'p3Zb4QLZJRAveWigFMB6TmqUa+IDuGcIhp4CQjIBVReSCFjC5C7gaPvksrargDiUtRcsCDDXfbkuRxh'
'5G4jHI93QA6aOkXXDpPAIMAD0IA95480JWAAAAAElFTkSuQmCC')
_HTML_TPL_START = '''<!DOCTYPE html>
<!--
Copyright (c) 2011 Sprymix Inc.
All rights reserved.
-->
<html>
<head>
<link rel="shortcut icon" href="''' + _FAVICON + '''" >
<style type="text/css">
{styles}
</style>
<script type="text/javascript">
{scripts}
(function() {{
var exc_info = ''';
_HTML_END = ''';
sx.dom.on(window, 'load', function(exc_info) {
var spec = sx.Markup.Renderer.unpack_markup(exc_info);
var renderer = new sx.Markup.Renderer(spec);
renderer.render('body');
if (renderer.top_exc_title) {
document.title = renderer.top_exc_title;
}
}, this, exc_info);
})();
</script>
</head>
<body>
<div id="body">
</div>
</body>
</html>
'''
class Renderer:
TPL_START = None
@classmethod
def _init(cls):
from semantix.utils.lang import javascript
with open(os.path.join(os.path.dirname(javascript.__file__), 'sx.js')) as f:
scripts = f.read()
with open(os.path.join(os.path.dirname(__file__), 'render.js')) as f:
scripts += ';\n' + f.read()
from . import styles
scss_reload(styles)
rendered_styles = scss_dumps(styles)
cls.TPL_START = _HTML_TPL_START.format(styles=rendered_styles, scripts=scripts)
@classmethod
def render(cls, markup, reload=False):
if reload:
cls._init()
exc_info = json.render(markup)
return ''.join((cls.TPL_START, exc_info, _HTML_END))
Renderer._init()
render = Renderer.render
| ##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
from semantix.rendering.css import dumps as scss_dumps, reload as scss_reload
from .. import json
from ... import serialize
__all__ = 'render',
_HTML_TPL_START = '''<!DOCTYPE html>
<!--
Copyright (c) 2011 Sprymix Inc.
All rights reserved.
-->
<html>
<head>
<style type="text/css">
{styles}
</style>
<script type="text/javascript">
{scripts}
(function() {{
var exc_info = ''';
_HTML_END = ''';
sx.dom.on(window, 'load', function(exc_info) {
var spec = sx.Markup.Renderer.unpack_markup(exc_info);
var renderer = new sx.Markup.Renderer(spec);
renderer.render('body');
if (renderer.top_exc_title) {
document.title = renderer.top_exc_title;
}
}, this, exc_info);
})();
</script>
</head>
<body>
<div id="body">
</div>
</body>
</html>
'''
class Renderer:
TPL_START = None
@classmethod
def _init(cls):
from semantix.utils.lang import javascript
with open(os.path.join(os.path.dirname(javascript.__file__), 'sx.js')) as f:
scripts = f.read()
with open(os.path.join(os.path.dirname(__file__), 'render.js')) as f:
scripts += ';\n' + f.read()
from . import styles
scss_reload(styles)
rendered_styles = scss_dumps(styles)
cls.TPL_START = _HTML_TPL_START.format(styles=rendered_styles, scripts=scripts)
@classmethod
def render(cls, markup, reload=False):
if reload:
cls._init()
exc_info = json.render(markup)
return ''.join((cls.TPL_START, exc_info, _HTML_END))
Renderer._init()
render = Renderer.render
| Python | 0 |
8f1d2f0e821724f010291d340f30d5842ad32c76 | add word2vec yahoo for shoes | extractVecMat_shoes.py | extractVecMat_shoes.py | #/datastore/zhenyang/bin/python
import sys
import os
import gensim, logging
import numpy as np
import scipy.io as sio
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
#pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
#model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#pretrained_model = './vectors.output'
pretrained_model = '../yahoo_100m_words_30d.output'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=False)
##############
classnames = open('shoes_gclasses_vps.txt', 'r').read().splitlines()
cc = 0
clsid = 0
vec_size = 30
word2vec_mat = np.zeros((len(classnames), vec_size))
for classname in classnames:
idc = 1
for cls in classname.split(';'):
wordvec = np.zeros(1, vec_size))
for cls_word in cls.split(' '):
try:
wordvec = np.add(wordvec, model[cls_word])
idc = 0
except:
print cls_word
idc = 1
break
if idc == 0:
break
word2vec_mat[clsid, :] = wordvec
clsid = clsid + 1
cc = cc + idc
#np.savetxt('attr_word2vec_GoogleNews.txt', word2vec_mat)
#sio.savemat('attr_word2vec_GoogleNews.mat', {'word2vec':word2vec_mat})
np.savetxt('shoes_word2vec_yahoo_30d.txt', word2vec_mat)
sio.savemat('shoes_word2vec_yahoo_30d.mat', {'word2vec':word2vec_mat})
print cc
if __name__ == "__main__":
main() | #/datastore/zhenyang/bin/python
import sys
import os
import gensim, logging
import numpy as np
import scipy.io as sio
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
#pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
#model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#pretrained_model = './vectors.output'
pretrained_model = '../yahoo_100m_words_30d.output'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=False)
##############
classnames = open('shoes_gclasses_vps.txt', 'r').read().splitlines()
cc = 0
clsid = 0
vec_size = 30
word2vec_mat = np.zeros((len(classnames), vec_size))
for classname in classnames:
idc = 1
for cls in classname.split(';'):
wordvec = np.zeros(1, vec_size))
for cls_word in cls.split(' '):
try:
np.add(wordvec, model[cls_word])
idc = 0
except:
print cls_word
idc = 1
break
if idc == 0:
break
word2vec_mat[clsid, :] = wordvec
clsid = clsid + 1
cc = cc + idc
#np.savetxt('attr_word2vec_GoogleNews.txt', word2vec_mat)
#sio.savemat('attr_word2vec_GoogleNews.mat', {'word2vec':word2vec_mat})
np.savetxt('shoes_word2vec_yahoo_30d.txt', word2vec_mat)
sio.savemat('shoes_word2vec_yahoo_30d.mat', {'word2vec':word2vec_mat})
print cc
if __name__ == "__main__":
main() | Python | 0.000238 |
24f5be6e6a409b7447ccd6fede81b8c55662def4 | add return data | util/strategy.py | util/strategy.py |
import numpy as np
from trendy import segtrends
import pandas as pd
import tradingWithPython as twp
from filter import movingaverage
def orders_from_trends(x, segments=2, charts=True, window=7, momentum=False):
''' generate orders from segtrends '''
x_maxima, maxima, x_minima, minima = segtrends(x, segments, charts, window)
n = len(x)
y = np.array(x)
movy = movingaverage(y, window)
# generate order strategy
orders = np.zeros(n)
last_buy = y[0]
last_sale = y[0]
for i in range(1,n):
# get 2 latest support point y values prior to x
pmin = list(minima[np.where(x_minima<=i)][-2:])
pmax = list(maxima[np.where(x_maxima<=i)][-2:])
# sell if support slop is negative
min_sell = True if ((len(pmin)==2) and (pmin[1]-pmin[0])<0) else False
max_sell = True if ((len(pmax)==2) and (pmax[1]-pmax[0])<0) else False
# if support down, sell
buy = -1 if (min_sell and max_sell) else 0
# buy only if lower the moving average else sale
buy = 1 if ((buy == 0) and (y[i]<movy[i])) else -1
# sell only if ...
buy= -1 if ((buy == -1) and y[i]>last_buy) else 1
buy_price_dec = y[i]<last_buy
sale_price_dec = y[i]<last_sale
orders[i] = buy
last_buy = y[i] if (buy==1) else last_buy
last_sale = y[i] if (buy==-1) else last_sale
import math
if momentum:
# add momentum for buy
if (buy==1) and (orders[i-1]>=1):
#if buy_price_dec:
orders[i]=round(math.log(2*orders[i-1])+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# add momentum for sale
elif (buy==-1) and (orders[i-1]<=-1):
#if sale_price_dec:
orders[i]*=round(math.log(abs(orders[i-1]*2))+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# OUTPUT
return orders
def orders2strategy(orders, price, min_stocks=1):
strategy = pd.Series(index=price.index)
orders=[el*min_stocks for el in orders]
# create a stratgy from order
for i, idx in enumerate(price.index):
if orders[i]!=0:
strategy[idx] = orders[i]
return strategy
def eval(stockname='TSLA', field='open', months=12,
initialCash=20000, min_stocks=30, charts=True):
import tradingWithPython.lib.yahooFinance as yahoo
from pylab import title, figure
n = (5*4)*months
price = yahoo.getHistoricData(stockname)[field][-n:]
if charts:
title('automatic strategy')
orders = orders_from_trends(price, segments=n/5, charts=charts,
momentum=True);
strategy = orders2strategy(orders, price, min_stocks)
# do the backtest
bt = twp.Backtest(price, strategy, initialCash=initialCash, signalType='shares')
if charts:
bt.plotTrades()
figure()
bt.pnl.plot()
title('pnl')
bt.data.plot()
title('all strategy data')
return bt.data
|
import numpy as np
from trendy import segtrends
import pandas as pd
import tradingWithPython as twp
from filter import movingaverage
def orders_from_trends(x, segments=2, charts=True, window=7, momentum=False):
''' generate orders from segtrends '''
x_maxima, maxima, x_minima, minima = segtrends(x, segments, charts, window)
n = len(x)
y = np.array(x)
movy = movingaverage(y, window)
# generate order strategy
orders = np.zeros(n)
last_buy = y[0]
last_sale = y[0]
for i in range(1,n):
# get 2 latest support point y values prior to x
pmin = list(minima[np.where(x_minima<=i)][-2:])
pmax = list(maxima[np.where(x_maxima<=i)][-2:])
# sell if support slop is negative
min_sell = True if ((len(pmin)==2) and (pmin[1]-pmin[0])<0) else False
max_sell = True if ((len(pmax)==2) and (pmax[1]-pmax[0])<0) else False
# if support down, sell
buy = -1 if (min_sell and max_sell) else 0
# buy only if lower the moving average else sale
buy = 1 if ((buy == 0) and (y[i]<movy[i])) else -1
# sell only if ...
buy= -1 if ((buy == -1) and y[i]>last_buy) else 1
buy_price_dec = y[i]<last_buy
sale_price_dec = y[i]<last_sale
orders[i] = buy
last_buy = y[i] if (buy==1) else last_buy
last_sale = y[i] if (buy==-1) else last_sale
import math
if momentum:
# add momentum for buy
if (buy==1) and (orders[i-1]>=1):
#if buy_price_dec:
orders[i]=round(math.log(2*orders[i-1])+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# add momentum for sale
elif (buy==-1) and (orders[i-1]<=-1):
#if sale_price_dec:
orders[i]*=round(math.log(abs(orders[i-1]*2))+1)
#else:
# orders[i]=max(1, round(orders[i-1]/2))
# OUTPUT
return orders
def orders2strategy(orders, price, min_stocks=1):
strategy = pd.Series(index=price.index)
orders=[el*min_stocks for el in orders]
# create a stratgy from order
for i, idx in enumerate(price.index):
if orders[i]!=0:
strategy[idx] = orders[i]
return strategy
def eval(stockname='TSLA', field='open', months=12,
initialCash=20000, min_stocks=30, charts=True):
import tradingWithPython.lib.yahooFinance as yahoo
from pylab import title, figure
n = (5*4)*months
price = yahoo.getHistoricData(stockname)[field][-n:]
if charts:
title('automatic strategy')
orders = orders_from_trends(price, segments=n/5, charts=charts,
momentum=True);
strategy = orders2strategy(orders, price, min_stocks)
# do the backtest
bt = twp.Backtest(price, strategy, initialCash=initialCash, signalType='shares')
if charts:
bt.plotTrades()
figure()
bt.pnl.plot()
title('pnl')
bt.data.plot()
title('all strategy data')
| Python | 0.025135 |
efc26d1e3065f18a80b601e8416abe0a19c83103 | Simplify a test case, NFC | packages/Python/lldbsuite/test/lang/swift/variables/bridged_string/TestSwiftBridgedStringVariables.py | packages/Python/lldbsuite/test/lang/swift/variables/bridged_string/TestSwiftBridgedStringVariables.py | # coding=utf-8
# TestSwiftBridgedStringVariables.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Tests simple swift expressions
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftBridgedStringVariables(TestBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.skipUnlessDarwin
@decorators.swiftTest
def test_swift_bridged_string_variables(self):
"""Test that Swift.String formats properly"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_test(self):
"""Test that Swift.String formats properly"""
(_, _, thread, _) = lldbutil.run_to_source_breakpoint(self,
"Set breakpoint here", self.main_source_spec)
self.frame = thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
s1 = self.frame.FindVariable("s1")
s2 = self.frame.FindVariable("s2")
s3 = self.frame.FindVariable("s3")
s4 = self.frame.FindVariable("s4")
s5 = self.frame.FindVariable("s5")
s6 = self.frame.FindVariable("s6")
lldbutil.check_variable(self, s1, summary='"Hello world"')
lldbutil.check_variable(self, s2, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s3, summary='"Hello world"')
lldbutil.check_variable(self, s4, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s5, use_dynamic=True, summary='"abc"')
lldbutil.check_variable(self, s6, summary='"abc"')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| # coding=utf-8
# TestSwiftBridgedStringVariables.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Tests simple swift expressions
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftBridgedStringVariables(TestBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.skipUnlessDarwin
@decorators.swiftTest
def test_swift_bridged_string_variables(self):
"""Test that Swift.String formats properly"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_test(self):
"""Test that Swift.String formats properly"""
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', self.main_source_spec)
self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
s1 = self.frame.FindVariable("s1")
s2 = self.frame.FindVariable("s2")
s3 = self.frame.FindVariable("s3")
s4 = self.frame.FindVariable("s4")
s5 = self.frame.FindVariable("s5")
s6 = self.frame.FindVariable("s6")
lldbutil.check_variable(self, s1, summary='"Hello world"')
lldbutil.check_variable(self, s2, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s3, summary='"Hello world"')
lldbutil.check_variable(self, s4, summary='"ΞΕΛΛΘ"')
lldbutil.check_variable(self, s5, use_dynamic=True, summary='"abc"')
lldbutil.check_variable(self, s6, summary='"abc"')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| Python | 0.000002 |
b45e9be5338baa652055f52c494a4febefe75c2d | Fix dealing with numpy arrays. | pydevd_plugins/extensions/types/pydevd_plugin_numpy_types.py | pydevd_plugins/extensions/types/pydevd_plugin_numpy_types.py | from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider
from _pydevd_bundle.pydevd_resolver import defaultResolver, MAX_ITEMS_TO_HANDLE, TOO_LARGE_ATTR, TOO_LARGE_MSG
from .pydevd_helpers import find_mod_attr
# =======================================================================================================================
# NdArrayResolver
# =======================================================================================================================
class NdArrayResolver: pass
class NdArrayItemsContainer: pass
class NDArrayTypeResolveProvider(object):
def can_provide(self, type_object, type_name):
nd_array = find_mod_attr('numpy', 'ndarray')
return nd_array is not None and issubclass(type_object, nd_array)
'''
This resolves a numpy ndarray returning some metadata about the NDArray
'''
def is_numeric(self, obj):
if not hasattr(obj, 'dtype'):
return False
return obj.dtype.kind in 'biufc'
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.get_dictionary(obj)
if attribute == 'min':
if self.is_numeric(obj) and obj.size > 0:
return obj.min()
else:
return None
if attribute == 'max':
if self.is_numeric(obj) and obj.size > 0:
return obj.max()
else:
return None
if attribute == 'shape':
return obj.shape
if attribute == 'dtype':
return obj.dtype
if attribute == 'size':
return obj.size
if attribute.startswith('['):
container = NdArrayItemsContainer()
i = 0
format_str = '%0' + str(int(len(str(len(obj))))) + 'd'
for item in obj:
setattr(container, format_str % i, item)
i += 1
if i > MAX_ITEMS_TO_HANDLE:
setattr(container, TOO_LARGE_ATTR, TOO_LARGE_MSG)
break
return container
return None
def get_dictionary(self, obj):
ret = dict()
ret['__internals__'] = defaultResolver.get_dictionary(obj)
if obj.size > 1024 * 1024:
ret['min'] = 'ndarray too big, calculating min would slow down debugging'
ret['max'] = 'ndarray too big, calculating max would slow down debugging'
elif obj.size == 0:
ret['min'] = 'array is empty'
ret['max'] = 'array is empty'
else:
if self.is_numeric(obj):
ret['min'] = obj.min()
ret['max'] = obj.max()
else:
ret['min'] = 'not a numeric object'
ret['max'] = 'not a numeric object'
ret['shape'] = obj.shape
ret['dtype'] = obj.dtype
ret['size'] = obj.size
ret['[0:%s] ' % (len(obj))] = list(obj[0:MAX_ITEMS_TO_HANDLE])
return ret
import sys
if not sys.platform.startswith("java"):
TypeResolveProvider.register(NDArrayTypeResolveProvider)
| from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider
from _pydevd_bundle.pydevd_resolver import defaultResolver, MAX_ITEMS_TO_HANDLE, TOO_LARGE_ATTR, TOO_LARGE_MSG
from .pydevd_helpers import find_mod_attr
# =======================================================================================================================
# NdArrayResolver
# =======================================================================================================================
class NdArrayResolver: pass
class NdArrayItemsContainer: pass
class NDArrayTypeResolveProvider(object):
def can_provide(self, type_object, type_name):
nd_array = find_mod_attr('numpy', 'ndarray')
return nd_array is not None and issubclass(type_object, nd_array)
'''
This resolves a numpy ndarray returning some metadata about the NDArray
'''
def is_numeric(self, obj):
if not hasattr(obj, 'dtype'):
return False
return obj.dtype.kind in 'biufc'
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.get_dictionary(obj)
if attribute == 'min':
if self.is_numeric(obj):
return obj.min()
else:
return None
if attribute == 'max':
if self.is_numeric(obj):
return obj.max()
else:
return None
if attribute == 'shape':
return obj.shape
if attribute == 'dtype':
return obj.dtype
if attribute == 'size':
return obj.size
if attribute.startswith('['):
container = NdArrayItemsContainer()
i = 0
format_str = '%0' + str(int(len(str(len(obj))))) + 'd'
for item in obj:
setattr(container, format_str % i, item)
i += 1
if i > MAX_ITEMS_TO_HANDLE:
setattr(container, TOO_LARGE_ATTR, TOO_LARGE_MSG)
break
return container
return None
def get_dictionary(self, obj):
ret = dict()
ret['__internals__'] = defaultResolver.get_dictionary(obj)
if obj.size > 1024 * 1024:
ret['min'] = 'ndarray too big, calculating min would slow down debugging'
ret['max'] = 'ndarray too big, calculating max would slow down debugging'
else:
if self.is_numeric(obj):
ret['min'] = obj.min()
ret['max'] = obj.max()
else:
ret['min'] = 'not a numeric object'
ret['max'] = 'not a numeric object'
ret['shape'] = obj.shape
ret['dtype'] = obj.dtype
ret['size'] = obj.size
ret['[0:%s] ' % (len(obj))] = list(obj[0:MAX_ITEMS_TO_HANDLE])
return ret
import sys
if not sys.platform.startswith("java"):
TypeResolveProvider.register(NDArrayTypeResolveProvider)
| Python | 0 |
d90d7c35df1f815f31de2cad9fe2dde43f9f561a | Print generation date. | git_changelog.py | git_changelog.py | from __future__ import print_function
from collections import defaultdict
import datetime
import glob
import json
import os
import re
import subprocess
from urllib2 import urlopen
DEBUG = False
GIT_EXEC = "/usr/bin/git"
REPOSITORIES = glob.glob("/ssd/swinbank/src/*") # Everything in w_2017_8
JIRA_API_URL = "https://jira.lsstcorp.org/rest/api/2"
class Repository(object):
def __init__(self, path):
self.path = path
def __call_git(self, *args):
to_exec = [GIT_EXEC] + list(args)
if DEBUG:
print(to_exec)
return subprocess.check_output(to_exec, cwd=self.path)
def commits(self, reachable_from=None, merges_only=False):
args = ["log", "--pretty=format:%H"]
if reachable_from:
args.append(reachable_from)
if merges_only:
args.append("--merges")
return self.__call_git(*args).split()
def message(self, commit_hash):
return self.__call_git("show", commit_hash, "--pretty=format:%s")
def tags(self, pattern=r".*"):
return [tag for tag in self.__call_git("tag").split()
if re.search(pattern, tag)]
def update(self):
return self.__call_git("pull")
@staticmethod
def ticket(message):
try:
return re.search(r"(DM-\d+)", message, re.IGNORECASE).group(1)
except AttributeError:
if DEBUG:
print(message)
def get_ticket_summary(ticket):
url = JIRA_API_URL + "/issue/" + ticket + "?fields=summary"
if DEBUG:
print(url)
j = json.load(urlopen(url))
return j['fields']['summary']
def print_tag(tagname, tickets):
print("<h2>New in {}</h2>".format(tagname))
print("<ul>")
for ticket in sorted(tickets):
summary = get_ticket_summary(ticket)
pkgs = ", ".join(sorted(tickets[ticket]))
link_text = (u"<li><a href=https://jira.lsstcorp.org/browse/"
u"{ticket}>{ticket}</a>: {summary} [{pkgs}]</li>")
print(link_text.format(ticket=ticket, summary=summary, pkgs=pkgs)
.encode("utf-8"))
print("</ul>")
def format_output(changelog):
# Ew, needs a proper templating engine
print("<html>")
print("<body>")
print("<h1>LSST DM Weekly Changelog</h1>")
# Always do master first
print_tag("master", changelog.pop("master"))
# Then the other tags in order
for tag in sorted(changelog, reverse=True):
print_tag(tag, changelog[tag])
gen_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M +00:00")
print("<p>Generated {}.</p>".format(gen_date))
print("</body>")
print("</html>")
def generate_changelog(repositories):
# Dict of tag -> ticket -> affected packages
changelog = defaultdict(lambda: defaultdict(set))
for repository in repositories:
if DEBUG:
print(repository)
r = Repository(repository)
r.update()
# Extract all tags which look like weeklies
tags = sorted(r.tags("w\.\d{4}"), reverse=True)
# Also include tickets which aren't yet in a weekly
tags.insert(0, "master")
for newtag, oldtag in zip(tags, tags[1:]):
merges = (set(r.commits(newtag, merges_only=True)) -
set(r.commits(oldtag, merges_only=True)))
for sha in merges:
ticket = r.ticket(r.message(sha))
if ticket:
changelog[newtag][ticket].add(os.path.basename(repository))
return changelog
if __name__ == "__main__":
changelog = generate_changelog(REPOSITORIES)
format_output(changelog)
| from __future__ import print_function
from collections import defaultdict
import glob
import json
import os
import re
import subprocess
from urllib2 import urlopen
DEBUG = False
GIT_EXEC = "/usr/bin/git"
REPOSITORIES = glob.glob("/ssd/swinbank/src/*") # Everything in w_2017_8
JIRA_API_URL = "https://jira.lsstcorp.org/rest/api/2"
class Repository(object):
def __init__(self, path):
self.path = path
def __call_git(self, *args):
to_exec = [GIT_EXEC] + list(args)
if DEBUG:
print(to_exec)
return subprocess.check_output(to_exec, cwd=self.path)
def commits(self, reachable_from=None, merges_only=False):
args = ["log", "--pretty=format:%H"]
if reachable_from:
args.append(reachable_from)
if merges_only:
args.append("--merges")
return self.__call_git(*args).split()
def message(self, commit_hash):
return self.__call_git("show", commit_hash, "--pretty=format:%s")
def tags(self, pattern=r".*"):
return [tag for tag in self.__call_git("tag").split()
if re.search(pattern, tag)]
def update(self):
return self.__call_git("pull")
@staticmethod
def ticket(message):
try:
return re.search(r"(DM-\d+)", message, re.IGNORECASE).group(1)
except AttributeError:
if DEBUG:
print(message)
def get_ticket_summary(ticket):
url = JIRA_API_URL + "/issue/" + ticket + "?fields=summary"
if DEBUG:
print(url)
j = json.load(urlopen(url))
return j['fields']['summary']
def print_tag(tagname, tickets):
print("<h2>New in {}</h2>".format(tagname))
print("<ul>")
for ticket in sorted(tickets):
summary = get_ticket_summary(ticket)
pkgs = ", ".join(sorted(tickets[ticket]))
link_text = (u"<li><a href=https://jira.lsstcorp.org/browse/"
u"{ticket}>{ticket}</a>: {summary} [{pkgs}]</li>")
print(link_text.format(ticket=ticket, summary=summary, pkgs=pkgs)
.encode("utf-8"))
print("</ul>")
def format_output(changelog):
# Ew, needs a proper templating engine
print("<html>")
print("<body>")
print("<h1>LSST DM Weekly Changelog</h1>")
# Always do master first
print_tag("master", changelog.pop("master"))
# Then the other tags in order
for tag in sorted(changelog, reverse=True):
print_tag(tag, changelog[tag])
print("</body>")
print("</html>")
def generate_changelog(repositories):
# Dict of tag -> ticket -> affected packages
changelog = defaultdict(lambda: defaultdict(set))
for repository in repositories:
if DEBUG:
print(repository)
r = Repository(repository)
r.update()
# Extract all tags which look like weeklies
tags = sorted(r.tags("w\.\d{4}"), reverse=True)
# Also include tickets which aren't yet in a weekly
tags.insert(0, "master")
for newtag, oldtag in zip(tags, tags[1:]):
merges = (set(r.commits(newtag, merges_only=True)) -
set(r.commits(oldtag, merges_only=True)))
for sha in merges:
ticket = r.ticket(r.message(sha))
if ticket:
changelog[newtag][ticket].add(os.path.basename(repository))
return changelog
if __name__ == "__main__":
changelog = generate_changelog(REPOSITORIES)
format_output(changelog)
| Python | 0.000001 |
d7e9264418cbe5574d7475094e2c06a878897c34 | fix ALDC scraper | every_election/apps/election_snooper/snoopers/aldc.py | every_election/apps/election_snooper/snoopers/aldc.py | from datetime import datetime
from .base import BaseSnooper
from election_snooper.models import SnoopedElection
class ALDCScraper(BaseSnooper):
snooper_name = "ALDC"
base_url = "https://www.aldc.org/"
def get_all(self):
url = "{}category/forthcoming-by-elections/".format(self.base_url)
print(url)
soup = self.get_soup(url)
for tile in soup.find_all('article'):
title = tile.find('h2').a.text.strip()
detail_url = tile.find('h2').a['href'].strip()
date = tile.find('date').text.strip()
content = tile.find('div', {'class': 'c-editor'}).find_all('p')
if 'cause' in content[0].text.lower():
seat_control, cause = content[0].text.lower().split('cause')
cause = cause.split('\n')[0].strip(": .")
else:
cause = "unknown"
data = {
'title': title,
'source': url,
'cause': cause,
'detail': "\n".join([x.text for x in content]),
'snooper_name': self.snooper_name,
}
try:
data['date'] = datetime.strptime(date, "%B %d, %Y")
except ValueError:
pass
item, created = SnoopedElection.objects.update_or_create(
snooper_name=self.snooper_name,
detail_url=detail_url,
defaults=data
)
if created:
self.post_to_slack(item)
| from datetime import datetime
from .base import BaseSnooper
from election_snooper.models import SnoopedElection
class ALDCScraper(BaseSnooper):
snooper_name = "ALDC"
base_url = "https://www.aldc.org/"
def get_all(self):
url = "{}category/forthcoming-by-elections/".format(self.base_url)
print(url)
soup = self.get_soup(url)
wrapper = soup.find('section', {'class': 'mod-tile-wrap'})
for tile in wrapper.find_all('div', {'class': 'tile'}):
title = tile.find(
'div', {'class': 'election-heading'}).text.strip()
detail_url = tile.find(
'div', {'class': 'election-heading'}).a['href'].strip()
content = tile.find(
'div', {'class': 'election-content'}).find_all('p')
if 'cause' in content[1].text.lower():
seat_control, cause = content[1].text.lower().split('cause')
cause = cause.split('\n')[0].strip(": .")
else:
cause = "unknown"
data = {
'title': title,
'source': url,
'cause': cause,
'detail': "\n".join([x.text for x in content]),
'snooper_name': self.snooper_name,
}
try:
data['date'] = datetime.strptime(content[0].strong.text, "%B %d, %Y")
except ValueError:
pass
item, created = SnoopedElection.objects.update_or_create(
snooper_name=self.snooper_name,
detail_url=detail_url,
defaults=data
)
if created:
self.post_to_slack(item)
| Python | 0 |
021ca057be4333d209454b043c79f9d6d327c3e0 | Return the response for the main page without jinja rendering as AngularJS is doing the rendering | webapp/keepupwithscience/frontend/main.py | webapp/keepupwithscience/frontend/main.py | from flask import Blueprint, render_template, make_response
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
"""Returns the main interface."""
return make_response(open('keepupwithscience/frontend/templates/main.html').read())
# return render_template('main.html') | from flask import Blueprint, render_template
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
"""Returns the main interface."""
return render_template('main.html') | Python | 0.000021 |
392e34a70bd2bccba268ec9de1752afc50cd1b35 | Add the httlib dir to the build | packaging/datadog-agent-lib/setup.py | packaging/datadog-agent-lib/setup.py | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os, sys
from distutils.command.install import INSTALL_SCHEMES
def getVersion():
try:
from config import get_version
except ImportError:
import sys
sys.path.append("../..")
from config import get_version
return get_version()
def printVersion():
print getVersion()
def getDataFiles():
''' Load the data files from checks.d '''
import glob
curpath = os.path.dirname(os.path.join(os.path.realpath(__file__)))
checksd_path = os.path.join(curpath, 'checks.d')
checksd_glob = os.path.join(checksd_path, '*.py')
# Find all py files in the checks.d directory
checks = []
for check in glob.glob(checksd_glob):
check = os.path.basename(check)
checks.append(check)
return [('share/datadog/agent/checks.d', ['checks.d/%s' % c for c in checks])]
if __name__ == "__main__":
setup(name='datadog-agent-lib',
version=getVersion(),
description='Datatadog monitoring agent check library',
author='Datadog',
author_email='info@datadoghq.com',
url='http://datadoghq.com/',
packages=['checks', 'checks/db', 'checks/system', 'dogstream','pup', 'yaml', 'checks/libs/httplib2'],
package_data={'checks': ['libs/*'], 'pup' : ['static/*', 'pup.html']},
data_files=getDataFiles()
)
| #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os, sys
from distutils.command.install import INSTALL_SCHEMES
def getVersion():
try:
from config import get_version
except ImportError:
import sys
sys.path.append("../..")
from config import get_version
return get_version()
def printVersion():
print getVersion()
def getDataFiles():
''' Load the data files from checks.d '''
import glob
curpath = os.path.dirname(os.path.join(os.path.realpath(__file__)))
checksd_path = os.path.join(curpath, 'checks.d')
checksd_glob = os.path.join(checksd_path, '*.py')
# Find all py files in the checks.d directory
checks = []
for check in glob.glob(checksd_glob):
check = os.path.basename(check)
checks.append(check)
return [('share/datadog/agent/checks.d', ['checks.d/%s' % c for c in checks])]
if __name__ == "__main__":
setup(name='datadog-agent-lib',
version=getVersion(),
description='Datatadog monitoring agent check library',
author='Datadog',
author_email='info@datadoghq.com',
url='http://datadoghq.com/',
packages=['checks', 'checks/db', 'checks/system', 'dogstream','pup', 'yaml'],
package_data={'checks': ['libs/*', 'libs/httplib2/*'], 'pup' : ['static/*', 'pup.html']},
data_files=getDataFiles()
)
| Python | 0 |
a9ff99f94938c5e50038b9d98200c5247e651c35 | Fix AttributeError: module 'config' has no attribute 'expires' | utils/ignores.py | utils/ignores.py | import random
import time
import config
import log as logging
def check_ignored(host, channel):
ignores = config.expires['global']
if channel in config.ignores['channel'].keys():
ignores.extend(config.expires['channel'][channel])
for i in ignores:
for (uhost, expires) in i:
# if duration is not None, check if it's in the past, else say True
is_past = time.time() > expires if expires is not None else True
if host == uhost and is_past:
return True
elif host == uhost and not is_past:
del config.ignores['channel'][channel][host]
break
return False
def add_ignore(irc, event, args):
host = args[0]
base_message = "Ignoring %s for %s seconds"
indefinite = "Ignoring %s indefinately"
if len(args) > 1:
if args[1] == 'random':
duration = random.randrange(100, 10000)
expires = duration + int(time.time())
else:
duration = int(args[1])
expires = duration + int(time.time())
else:
expires = None
channel = args[2] if len(args) > 2 else None
if channel is not None:
try:
i = config.ignores['channels'][channel]
except KeyError:
i = config.ignores['channels'][channel] = []
i.append([host, expires])
else:
i = config.ignores['global']
i.append([host, expires])
if expires is not None:
if channel is not None:
logging.info(base_message + " in %s", host, duration, channel)
else:
logging.info(base_message, host, duration)
else:
if channel is not None:
logging.info(indefinite + " in %s", host, channel)
else:
logging.info(indefinite, host)
| import random
import time
import config
import log as logging
def check_ignored(host, channel):
ignores = config.expires['global']
if channel in config.expires['channel'].keys():
ignores.extend(config.expires['channel'][channel])
for i in ignores:
for (uhost, expires) in i:
# if duration is not None, check if it's in the past, else say True
is_past = time.time() > expires if expires is not None else True
if host == uhost and is_past:
return True
elif host == uhost and not is_past:
del config.ignores['channel'][channel][host]
break
return False
def add_ignore(irc, event, args):
host = args[0]
base_message = "Ignoring %s for %s seconds"
indefinite = "Ignoring %s indefinately"
if len(args) > 1:
if args[1] == 'random':
duration = random.randrange(100, 10000)
expires = duration + int(time.time())
else:
duration = int(args[1])
expires = duration + int(time.time())
else:
expires = None
channel = args[2] if len(args) > 2 else None
if channel is not None:
try:
i = config.ignores['channels'][channel]
except KeyError:
i = config.ignores['channels'][channel] = []
i.append([host, expires])
else:
i = config.ignores['global']
i.append([host, expires])
if expires is not None:
if channel is not None:
logging.info(base_message + " in %s", host, duration, channel)
else:
logging.info(base_message, host, duration)
else:
if channel is not None:
logging.info(indefinite + " in %s", host, channel)
else:
logging.info(indefinite, host)
| Python | 0.014043 |
7de5d99866164c0f17aa85f8cdd910132ac35667 | use re.split instead of string.split | topiary/rna/common.py | topiary/rna/common.py | # Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
candidate_delimiters = ["\t", ",", "\s+"]
for candidate_delimiter in candidate_delimiters:
counts = [len(re.split(candidate_delimiter, line)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename)
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name))
| # Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
# the split function defaults to splitting on multiple spaces,
# which here corresponds to a candidate value of None
candidate_delimiters = ["\t", ",", None]
for candidate_delimiter in candidate_delimiters:
counts = [len(line.split(candidate_delimiter)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
if candidate_delimiter is None:
return "\s+"
else:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename)
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name))
| Python | 0.000032 |
9c90c539f83551de2645522c22ccbd0c75d34be3 | Fix Mapbox routing fixture shape | server/lib/python/cartodb_services/test/test_mapboxrouting.py | server/lib/python/cartodb_services/test/test_mapboxrouting.py | import unittest
from mock import Mock
from cartodb_services.mapbox import MapboxRouting
from cartodb_services.mapbox.routing import DEFAULT_PROFILE
from cartodb_services.tools.exceptions import ServiceException
from cartodb_services.tools import Coordinate
from credentials import mapbox_api_key
INVALID_TOKEN = 'invalid_token'
VALID_WAYPOINTS = [Coordinate(-73.989, 40.733), Coordinate(-74, 40.733)]
NUM_WAYPOINTS_MAX = 25
INVALID_WAYPOINTS_EMPTY = []
INVALID_WAYPOINTS_MIN = [Coordinate(-73.989, 40.733)]
INVALID_WAYPOINTS_MAX = [Coordinate(-73.989, 40.733)
for x in range(0, NUM_WAYPOINTS_MAX + 2)]
VALID_PROFILE = DEFAULT_PROFILE
INVALID_PROFILE = 'invalid_profile'
WELL_KNOWN_SHAPE = [(40.73312, -73.98891), (40.73353, -73.98987),
(40.73398, -73.99095), (40.73453, -73.99227),
(40.73531, -73.99412), (40.73467, -73.99459),
(40.73442, -73.99477), (40.73435, -73.99482),
(40.73403, -73.99505), (40.73344, -73.99549),
(40.73286, -73.9959), (40.73226, -73.99635),
(40.73186, -73.99664), (40.73147, -73.99693),
(40.73141, -73.99698), (40.73147, -73.99707),
(40.73219, -73.99856), (40.73222, -73.99861),
(40.73225, -73.99868), (40.73293, -74.00007),
(40.733, -74.00001)]
WELL_KNOWN_LENGTH = 1317.9
class MapboxRoutingTestCase(unittest.TestCase):
def setUp(self):
self.routing = MapboxRouting(token=mapbox_api_key(), logger=Mock())
def test_invalid_profile(self):
with self.assertRaises(ValueError):
self.routing.directions(VALID_WAYPOINTS, INVALID_PROFILE)
def test_invalid_waypoints_empty(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_EMPTY, VALID_PROFILE)
def test_invalid_waypoints_min(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_MIN, VALID_PROFILE)
def test_invalid_waypoints_max(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_MAX, VALID_PROFILE)
def test_invalid_token(self):
invalid_routing = MapboxRouting(token=INVALID_TOKEN, logger=Mock())
with self.assertRaises(ServiceException):
invalid_routing.directions(VALID_WAYPOINTS,
VALID_PROFILE)
def test_valid_request(self):
route = self.routing.directions(VALID_WAYPOINTS, VALID_PROFILE)
self.assertEqual(route.shape, WELL_KNOWN_SHAPE)
self.assertEqual(route.length, WELL_KNOWN_LENGTH)
assert route.duration # The duration may change between executions
| import unittest
from mock import Mock
from cartodb_services.mapbox import MapboxRouting
from cartodb_services.mapbox.routing import DEFAULT_PROFILE
from cartodb_services.tools.exceptions import ServiceException
from cartodb_services.tools import Coordinate
from credentials import mapbox_api_key
INVALID_TOKEN = 'invalid_token'
VALID_WAYPOINTS = [Coordinate(-73.989, 40.733), Coordinate(-74, 40.733)]
NUM_WAYPOINTS_MAX = 25
INVALID_WAYPOINTS_EMPTY = []
INVALID_WAYPOINTS_MIN = [Coordinate(-73.989, 40.733)]
INVALID_WAYPOINTS_MAX = [Coordinate(-73.989, 40.733)
for x in range(0, NUM_WAYPOINTS_MAX + 2)]
VALID_PROFILE = DEFAULT_PROFILE
INVALID_PROFILE = 'invalid_profile'
WELL_KNOWN_SHAPE = [(40.73312, -73.98891), (40.73353, -73.98987),
(40.73398, -73.99095), (40.73453, -73.99227),
(40.73531, -73.99412), (40.73467, -73.99459),
(40.73442, -73.99477), (40.73435, -73.99482),
(40.73403, -73.99505), (40.73344, -73.99549),
(40.73286, -73.9959), (40.73226, -73.99635),
(40.73186, -73.99664), (40.73147, -73.99693),
(40.73141, -73.99698), (40.73147, -73.99707),
(40.73219, -73.99856), (40.73222, -73.99861),
(40.73293, -74.00007), (40.733, -74.00001)]
WELL_KNOWN_LENGTH = 1317.9
class MapboxRoutingTestCase(unittest.TestCase):
def setUp(self):
self.routing = MapboxRouting(token=mapbox_api_key(), logger=Mock())
def test_invalid_profile(self):
with self.assertRaises(ValueError):
self.routing.directions(VALID_WAYPOINTS, INVALID_PROFILE)
def test_invalid_waypoints_empty(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_EMPTY, VALID_PROFILE)
def test_invalid_waypoints_min(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_MIN, VALID_PROFILE)
def test_invalid_waypoints_max(self):
with self.assertRaises(ValueError):
self.routing.directions(INVALID_WAYPOINTS_MAX, VALID_PROFILE)
def test_invalid_token(self):
invalid_routing = MapboxRouting(token=INVALID_TOKEN, logger=Mock())
with self.assertRaises(ServiceException):
invalid_routing.directions(VALID_WAYPOINTS,
VALID_PROFILE)
def test_valid_request(self):
route = self.routing.directions(VALID_WAYPOINTS, VALID_PROFILE)
self.assertEqual(route.shape, WELL_KNOWN_SHAPE)
self.assertEqual(route.length, WELL_KNOWN_LENGTH)
assert route.duration # The duration may change between executions
| Python | 0 |
ab505466859a5d2e5b397d1fb1fc3271977a2024 | modify register validation | app/user/forms.py | app/user/forms.py | from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, SelectField, validators
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from .models import User
from app.post.models import Post_type
from flask.ext.bcrypt import check_password_hash
class LoginForm(Form):
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Username tidak boleh kosong')])
#Add a validation when Logged In
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None or not user:
self.username.errors.append('Unknown username')
return False
if not check_password_hash(user.password, self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
class RegisterForm(Form):
full_name = StringField('Full Name', validators=[validators.Required('Nama tidak boleh kosong')])
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
email = StringField('Email', validators=[validators.Required('Email tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Password Tidak boleh kosong'),
validators.EqualTo('confirm', message='Password harus sama')])
confirm = PasswordField('Ulangi Password')
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if User.query.filter_by(username=self.username.data).first():
self.username.errors.append("Username Telah digunakan")
return False
if User.query.filter_by(email=self.email.data).first():
self.email.errors.append("Email yang anda masukkan telah terdaftar")
return False
return True
class CreatePost(Form):
title = StringField('title', validators=[validators.Required('Judul tidak boleh kosong')])
content = TextAreaField('Content', validators=[validators.Required('Konten tidak boleh kosong'),
validators.Length(max=100, message="Konten maksimal 100 karakter")])
post_type = SelectField('Type', coerce=int)
| from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, SelectField, validators
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from .models import User
from app.post.models import Post_type
from flask.ext.bcrypt import check_password_hash
class LoginForm(Form):
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Username tidak boleh kosong')])
#Add a validation when Logged In
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None or not user:
self.username.errors.append('Unknown username')
return False
if not check_password_hash(user.password, self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
class RegisterForm(Form):
full_name = StringField('Full Name', validators=[validators.Required('Nama tidak boleh kosong')])
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
email = StringField('Email', validators=[validators.Required('Email tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Password Tidak boleh kosong'),
validators.EqualTo('confirm', message='Password harus sama')])
confirm = PasswordField('Ulangi Password')
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if User.query.filter_by(username=self.username.data).first():
self.username.errors.append("Username Telah digunakan")
return False
if User.query.filte_by(email=self.email.data).first():
self.email.errors.append("Email yang anda masukkan telah terdaftar")
return False
return True
class CreatePost(Form):
title = StringField('title', validators=[validators.Required('Judul tidak boleh kosong')])
content = TextAreaField('Content', validators=[validators.Required('Konten tidak boleh kosong'),
validators.Length(max=100, message="Konten maksimal 100 karakter")])
post_type = SelectField('Type', coerce=int)
| Python | 0.000001 |
8bc108c5a8b4ce3fa5192363576eef7f67f4d82e | Update tracking params | app/utils/meta.py | app/utils/meta.py | from urllib.parse import unquote, urlparse
import aiohttp
from sanic.log import logger
from .. import settings
def get_watermark(request, watermark: str) -> tuple[str, bool]:
api_key = _get_api_key(request)
if api_key:
api_mask = api_key[:2] + "***" + api_key[-2:]
logger.info(f"Authenticated with {api_mask}")
if api_key in settings.API_KEYS:
return "", False
if watermark == settings.DISABLED_WATERMARK:
referer = _get_referer(request)
logger.info(f"Watermark removal referer: {referer}")
if referer:
domain = urlparse(referer).netloc
if domain in settings.ALLOWED_WATERMARKS:
return "", False
return settings.DEFAULT_WATERMARK, True
if watermark:
if watermark == settings.DEFAULT_WATERMARK:
logger.warning(f"Redundant watermark: {watermark}")
return watermark, True
if watermark not in settings.ALLOWED_WATERMARKS:
logger.warning(f"Unknown watermark: {watermark}")
return settings.DEFAULT_WATERMARK, True
return watermark, False
return settings.DEFAULT_WATERMARK, False
async def track(request, lines: list[str]):
text = " ".join(lines).strip()
trackable = not any(
name in request.args for name in ["height", "width", "watermark"]
)
if text and trackable and settings.REMOTE_TRACKING_URL:
async with aiohttp.ClientSession() as session:
params = dict(
text=text,
client=_get_referer(request) or "https://memegen.link",
result=unquote(request.url),
)
logger.info(f"Tracking request: {params}")
headers = {"X-API-KEY": _get_api_key(request) or ""}
response = await session.get(
settings.REMOTE_TRACKING_URL, params=params, headers=headers
)
if response.status != 200:
try:
message = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
message = response.text
logger.error(f"Tracker response: {message}")
def _get_referer(request):
return request.headers.get("referer") or request.args.get("referer")
def _get_api_key(request):
return request.headers.get("x-api-key")
| from urllib.parse import unquote, urlparse
import aiohttp
from sanic.log import logger
from .. import settings
def get_watermark(request, watermark: str) -> tuple[str, bool]:
api_key = _get_api_key(request)
if api_key:
api_mask = api_key[:2] + "***" + api_key[-2:]
logger.info(f"Authenticated with {api_mask}")
if api_key in settings.API_KEYS:
return "", False
if watermark == settings.DISABLED_WATERMARK:
referer = _get_referer(request)
logger.info(f"Watermark removal referer: {referer}")
if referer:
domain = urlparse(referer).netloc
if domain in settings.ALLOWED_WATERMARKS:
return "", False
return settings.DEFAULT_WATERMARK, True
if watermark:
if watermark == settings.DEFAULT_WATERMARK:
logger.warning(f"Redundant watermark: {watermark}")
return watermark, True
if watermark not in settings.ALLOWED_WATERMARKS:
logger.warning(f"Unknown watermark: {watermark}")
return settings.DEFAULT_WATERMARK, True
return watermark, False
return settings.DEFAULT_WATERMARK, False
async def track(request, lines: list[str]):
text = " ".join(lines).strip()
trackable = not any(
name in request.args for name in ["height", "width", "watermark"]
)
referer = _get_referer(request)
if referer:
source = urlparse(referer).netloc
else:
source = "memegen.link"
if text and trackable and settings.REMOTE_TRACKING_URL:
async with aiohttp.ClientSession() as session:
params = dict(text=text, source=source, context=unquote(request.url))
logger.info(f"Tracking request: {params}")
headers = {"X-API-KEY": _get_api_key(request) or ""}
response = await session.get(
settings.REMOTE_TRACKING_URL, params=params, headers=headers
)
if response.status != 200:
try:
message = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
message = response.text
logger.error(f"Tracker response: {message}")
def _get_referer(request):
return request.headers.get("referer") or request.args.get("referer")
def _get_api_key(request):
return request.headers.get("x-api-key")
| Python | 0.000001 |
29e56ec30c13c5fbb562e77cdb2c660d5fc52842 | remove debugging print | freppledb/common/management/commands/generatetoken.py | freppledb/common/management/commands/generatetoken.py | #
# Copyright (C) 2021 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ...auth import getWebserviceAuthorization
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from freppledb import __version__
class Command(BaseCommand):
help = """
This command generates an API authentication token for a user.
"""
requires_system_checks = False
def get_version(self):
return __version__
def add_arguments(self, parser):
parser.add_argument("user", help="User running the command")
parser.add_argument(
"--expiry", help="Validity in days of the token", type=int, default=5
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help="Specifies the database to use",
),
def handle(self, **options):
token = getWebserviceAuthorization(
database=options["database"],
secret=None,
user=options["user"],
exp=options["expiry"] * 86400,
)
if options["verbosity"]:
print(
"Access token for %s, valid for %s days:"
% (options["user"], options["expiry"])
)
return token
| #
# Copyright (C) 2021 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ...auth import getWebserviceAuthorization
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from freppledb import __version__
class Command(BaseCommand):
help = """
This command generates an API authentication token for a user.
"""
requires_system_checks = False
def get_version(self):
return __version__
def add_arguments(self, parser):
parser.add_argument("user", help="User running the command")
parser.add_argument(
"--expiry", help="Validity in days of the token", type=int, default=5
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help="Specifies the database to use",
),
def handle(self, **options):
token = getWebserviceAuthorization(
database=options["database"],
secret="perepe", #None,
user=options["user"],
exp=options["expiry"] * 86400,
)
if options["verbosity"]:
print(
"Access token for %s, valid for %s days:"
% (options["user"], options["expiry"])
)
return token
| Python | 0.000081 |
9235d1aa35e6a597be3c497577de528425d6e046 | comment cleanup | training/parse_osm.py | training/parse_osm.py | from lxml import etree
import ast
import re
# parse xml data, return a list of dicts representing addresses
def xmlToAddrList(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
addr_list=[]
for element in root:
if element.tag == 'node' or element.tag =='way':
address={}
for x in element.iter('tag'):
addr = ast.literal_eval(str(x.attrib))
address[addr['k']]=addr['v']
addr_list.append(address)
return addr_list
# transform osm data into tagged training data
def osmToTraining(address_list):
train_data=[]
addr_index = 0
token_index = 0
# only the osm tags below will end up in training data; others will be ignored
osm_tags_to_addr_tags = {
"addr:house:number":"AddressNumber",
"addr:street:prefix":"StreetNamePreDirectional",
"addr:street:name":"StreetName",
"addr:street:type":"StreetNamePostType",
"addr:city":"PlaceName",
"addr:state":"StateName",
"addr:postcode":"ZipCode"}
for address in address_list:
addr_train = []
for key, value in address.items():
if key in osm_tags_to_addr_tags.keys():
addr_train.append([value ,osm_tags_to_addr_tags[key]])
train_data.append(addr_train)
return train_data
| from lxml import etree
import ast
import re
# parse xml data, return a list of dicts representing addresses
def xmlToAddrList(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
addr_list=[]
for element in root:
if element.tag == 'node' or element.tag =='way':
address={}
for x in element.iter('tag'):
addr = ast.literal_eval(str(x.attrib))
address[addr['k']]=addr['v']
addr_list.append(address)
return addr_list
# transform osm data into tagged training data
def osmToTraining(address_list):
train_data=[]
addr_index = 0
token_index = 0
osm_tags_to_addr_tags = {
"addr:house:number":"AddressNumber",
"addr:street:prefix":"StreetNamePreDirectional",
"addr:street:name":"StreetName",
"addr:street:type":"StreetNamePostType",
"addr:city":"PlaceName",
"addr:state":"StateName",
"addr:postcode":"ZipCode"}
for address in address_list:
addr_train = []
for key, value in address.items(): #iterate through dict ****
if key in osm_tags_to_addr_tags.keys(): #if the key is one of the defined osm tags
addr_train.append([value ,osm_tags_to_addr_tags[key]]) #add (token, tokentag)
train_data.append(addr_train)
return train_data
| Python | 0 |
d7bea2995fc54c15404b4b47cefae5fc7b0201de | FIX partner internal code compatibility with sign up | partner_internal_code/res_partner.py | partner_internal_code/res_partner.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class partner(models.Model):
""""""
_inherit = 'res.partner'
internal_code = fields.Char(
'Internal Code',
copy=False,
)
# we let this to base nane search improoved
# def name_search(self, cr, uid, name, args=None,
# operator='ilike', context=None, limit=100):
# args = args or []
# res = []
# if name:
# recs = self.search(
# cr, uid, [('internal_code', operator, name)] + args,
# limit=limit, context=context)
# res = self.name_get(cr, uid, recs)
# res += super(partner, self).name_search(
# cr, uid,
# name=name, args=args, operator=operator, limit=limit)
# return res
@api.model
def create(self, vals):
if not vals.get('internal_code', False):
vals['internal_code'] = self.env[
'ir.sequence'].next_by_code('partner.internal.code') or '/'
return super(partner, self).create(vals)
_sql_constraints = {
('internal_code_uniq', 'unique(internal_code)',
'Internal Code mast be unique!')
}
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class partner(models.Model):
""""""
_inherit = 'res.partner'
internal_code = fields.Char(
'Internal Code')
# we let this to base nane search improoved
# def name_search(self, cr, uid, name, args=None,
# operator='ilike', context=None, limit=100):
# args = args or []
# res = []
# if name:
# recs = self.search(
# cr, uid, [('internal_code', operator, name)] + args,
# limit=limit, context=context)
# res = self.name_get(cr, uid, recs)
# res += super(partner, self).name_search(
# cr, uid,
# name=name, args=args, operator=operator, limit=limit)
# return res
@api.model
def create(self, vals):
if not vals.get('internal_code', False):
vals['internal_code'] = self.env[
'ir.sequence'].next_by_code('partner.internal.code') or '/'
return super(partner, self).create(vals)
_sql_constraints = {
('internal_code_uniq', 'unique(internal_code)',
'Internal Code mast be unique!')
}
| Python | 0 |
7428f7d87d33ab1531f94753516ad4a56780a612 | Add helper to predefine remove recursive flag. Add copy_to and copy_from helpers which aid the copying of single files | virtualbox/library_ext/guest_session.py | virtualbox/library_ext/guest_session.py | import time
from virtualbox import library
"""
Add helper code to the default IGuestSession class.
"""
# Add context management to IGuestSession
class IGuestSession(library.IGuestSession):
__doc__ = library.IGuestSession.__doc__
def __enter__(self):
return self
def __exit__(self, exception_type, exception_val, trace):
self.close()
def execute(self, command, arguments=[], stdin="", environment=[],
flags=[library.ProcessCreateFlag.wait_for_std_err,
library.ProcessCreateFlag.wait_for_std_out,
library.ProcessCreateFlag.ignore_orphaned_processes],
priority=library.ProcessPriority.default,
affinity=[], timeout_ms=0):
"""Execute a command in the Guest
Arguments:
command - Command to execute.
arguments - List of arguments for the command
stdin - A buffer to write to the stdin of the command.
environment - See IGuestSession.create_process?
flags - List of ProcessCreateFlag objects.
Default value set to [wait_for_std_err,
wait_for_stdout,
ignore_orphaned_processes]
timeout_ms - ms to wait for the process to complete.
If 0, wait for ever...
priority - Set the ProcessPriority priority to be used for
execution.
affinity - Process affinity to use for execution.
Return IProcess, stdout, stderr
"""
def read_out(process, flags, stdout, stderr):
if library.ProcessCreateFlag.wait_for_std_err in flags:
e = str(process.read(2, 65000, 0))
stderr.append(e)
if library.ProcessCreateFlag.wait_for_std_out in flags:
o = str(process.read(1, 65000, 0))
stdout.append(o)
process = self.process_create_ex(command, arguments, environment,
flags, timeout_ms, priority, affinity)
process.wait_for(int(library.ProcessWaitResult.start), 0)
# write stdin to the process
if stdin:
index = 0
while index < len(stdin):
index += process.write(0, [library.ProcessInputFlag.none],
stdin[index:], 0)
process.write(0, [library.ProcessInputFlag.end_of_file], 0)
# read the process output and wait for
stdout = []
stderr = []
while process.status == library.ProcessStatus.started:
read_out(process, flags, stdout, stderr)
time.sleep(0.2)
# make sure we have read the remainder of the out
read_out(process, flags, stdout, stderr)
return process, "".join(stdout), "".join(stderr)
def makedirs(self, path, mode=0x777):
"""Super-mkdir: create a leaf directory and all intermediate ones."""
self.directory_create(path, mode, [library.DirectoryCreateFlag.parents])
# Simplify calling directory_remove_recursive. Set default flags to
# content_and_dir if they have not yet been set.
def directory_remove_recursive(self, path, flags=None):
if flags is None:
flags = [library.DirectoryRemoveRecFlag.content_and_dir]
super(IGuestSession, self).directory_remove_recursive(path, flags)
directory_remove_recursive.__doc__ = \
library.IGuestSession.directory_remove_recursive.__doc__
def copy_to(self, host_path, guest_path):
"Copy a single file to the vm. Wraps copy_to_vm."
if not os.path.exists(host_path):
raise OSError("Failed to find %s on host" % host_path)
p = self.copy_to_vm(host_path, guest_path, [])
p.wait_for_completion()
return p
def copy_from(self, guest_path, host_path):
"Copy a single file from the vm. Wraps copy_from_vm."
# Dodgy exists check...
for x in range(10):
try:
self.file_exists(guest_path)
break
except:
time.sleep(0.1)
else:
raise OSError("Failed to find %s on guest" % guest_path)
p = self.copy_from(guest_path, host_path, [])
p.wait_for_completion()
return p
| import time
from virtualbox import library
"""
Add helper code to the default IGuestSession class.
"""
# Add context management to IGuestSession
class IGuestSession(library.IGuestSession):
__doc__ = library.IGuestSession.__doc__
def __enter__(self):
return self
def __exit__(self, exception_type, exception_val, trace):
self.close()
def execute(self, command, arguments=[], stdin="", environment=[],
flags=[library.ProcessCreateFlag.wait_for_std_err,
library.ProcessCreateFlag.wait_for_std_out,
library.ProcessCreateFlag.ignore_orphaned_processes],
priority=library.ProcessPriority.default,
affinity=[], timeout_ms=0):
"""Execute a command in the Guest
Arguments:
command - Command to execute.
arguments - List of arguments for the command
stdin - A buffer to write to the stdin of the command.
environment - See IGuestSession.create_process?
flags - List of ProcessCreateFlag objects.
Default value set to [wait_for_std_err,
wait_for_stdout,
ignore_orphaned_processes]
timeout_ms - ms to wait for the process to complete.
If 0, wait for ever...
priority - Set the ProcessPriority priority to be used for
execution.
affinity - Process affinity to use for execution.
Return IProcess, stdout, stderr
"""
def read_out(process, flags, stdout, stderr):
if library.ProcessCreateFlag.wait_for_std_err in flags:
e = str(process.read(2, 65000, 0))
stderr.append(e)
if library.ProcessCreateFlag.wait_for_std_out in flags:
o = str(process.read(1, 65000, 0))
stdout.append(o)
process = self.process_create_ex(command, arguments, environment,
flags, timeout_ms, priority, affinity)
process.wait_for(int(library.ProcessWaitResult.start), 0)
# write stdin to the process
if stdin:
index = 0
while index < len(stdin):
index += process.write(0, [library.ProcessInputFlag.none],
stdin[index:], 0)
process.write(0, [library.ProcessInputFlag.end_of_file], 0)
# read the process output and wait for
stdout = []
stderr = []
while process.status == library.ProcessStatus.started:
read_out(process, flags, stdout, stderr)
time.sleep(0.2)
# make sure we have read the remainder of the out
read_out(process, flags, stdout, stderr)
return process, "".join(stdout), "".join(stderr)
def makedirs(self, path, mode=0x777):
"""Super-mkdir: create a leaf directory and all intermediate ones."""
self.directory_create(path, mode, [library.DirectoryCreateFlag.parents])
| Python | 0 |
da05fe2d41a077276946c5d6c86995c60315e093 | Make sure we load pyvisa-py when enumerating instruments. | src/auspex/instruments/__init__.py | src/auspex/instruments/__init__.py | import pkgutil
import importlib
import pyvisa
instrument_map = {}
for loader, name, is_pkg in pkgutil.iter_modules(__path__):
module = importlib.import_module('auspex.instruments.' + name)
if hasattr(module, "__all__"):
globals().update((name, getattr(module, name)) for name in module.__all__)
for name in module.__all__:
instrument_map.update({name:getattr(module,name)})
def enumerate_visa_instruments():
rm = pyvisa.ResourceManager("@py")
print(rm.list_resources())
def probe_instrument_ids():
rm = pyvisa.ResourceManager("@py")
for instr_label in rm.list_resources():
instr = rm.open_resource(instr_label)
try:
print(instr_label, instr.query('*IDN?'))
except:
print(instr_label, "Did not respond")
instr.close()
| import pkgutil
import importlib
import pyvisa
instrument_map = {}
for loader, name, is_pkg in pkgutil.iter_modules(__path__):
module = importlib.import_module('auspex.instruments.' + name)
if hasattr(module, "__all__"):
globals().update((name, getattr(module, name)) for name in module.__all__)
for name in module.__all__:
instrument_map.update({name:getattr(module,name)})
def enumerate_visa_instruments():
rm = pyvisa.ResourceManager()
print(rm.list_resources())
def probe_instrument_ids():
rm = pyvisa.ResourceManager()
for instr_label in rm.list_resources():
instr = rm.open_resource(instr_label)
try:
print(instr_label, instr.query('*IDN?'))
except:
print(instr_label, "Did not respond")
instr.close()
| Python | 0 |
23d4e48155e8906510d09a5eaf9fafafa7280d63 | Fix a few typos in the test. | test/functionalities/data-formatter/data-formatter-stl/libcxx/unordered/TestDataFormatterUnordered.py | test/functionalities/data-formatter/data-formatter-stl/libcxx/unordered/TestDataFormatterUnordered.py | """
Test lldb data formatter subsystem.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class LibcxxUnorderedDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@dsym_test
def test_with_dsym_and_run_command(self):
"""Test data formatter commands."""
self.buildDsym()
self.data_formatter_commands()
@dwarf_test
@skipIfGcc
def test_with_dwarf_and_run_command(self):
"""Test data formatter commands."""
self.buildDwarf()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def look_for_content_and_continue(self,var_name,substrs):
self.expect( ("frame variable %s" % var_name), substrs=substrs)
self.runCmd("continue")
def data_formatter_commands(self):
"""Test that that file and class static variables display correctly."""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_source_regexp (self, "Set break point at this line.")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.expect('image list', substrs = self.getLibcPlusPlusLibs())
self.look_for_content_and_continue("map",['size=5 {', 'hello','world','this','is','me'])
self.look_for_content_and_continue("mmap",['size=6 {','first = 3','second = "this"','first = 2','second = "hello"'])
self.look_for_content_and_continue("iset",['size=5 {','[0] = 5','[2] = 3','[3] = 2'])
self.look_for_content_and_continue("sset",['size=5 {','[0] = "is"','[1] = "world"','[4] = "hello"'])
self.look_for_content_and_continue("imset",['size=6 {','[0] = 3','[1] = 3','[2] = 3','[4] = 2','[5] = 1'])
self.look_for_content_and_continue("smset",['size=5 {','[0] = "is"','[1] = "is"','[2] = "world"','[3] = "world"'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| """
Test lldb data formatter subsystem.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
import lldbutil
class LibcxxUnorderedDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@dsym_test
def test_with_dsym_and_run_command(self):
"""Test data formatter commands."""
self.buildDsym()
self.data_formatter_commands()
@dwarf_test
@skipIfGcc
def test_with_dwarf_and_run_command(self):
"""Test data formatter commands."""
self.buildDwarf()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def look_for_content_and_continue(self,var_name,substrs):
self.expect( ("frame variable %s" % var_name), substrs )
self.runCmd("continue")
def data_formatter_commands(self):
"""Test that that file and class static variables display correctly."""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_source_regexp (self, "Set break point at this line.")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.expect('image list', substrs = self.getLibcPlusPlusLibs())
self.look_for_content_and_continue("map",['size=5 {,''hello','world','this','is','me'])
self.look_for_content_and_continue("mmap",['size=6 {','first = 3','second = "this"','first = 2','second = "hello"'])
self.look_for_content_and_continue("iset",['size=5 {','[0] = 5','[2] = 3','[3] = 2'])
self.look_for_content_and_continue("sset",['size=5 {','[0] = "is"','[1] = "world"','[4] = "hello"'])
self.look_for_content_and_continue("imset",['size=6 {','[0] = 3','[1] = 3','[2] = 3','[4] = 2','[5] = 1'])
self.look_for_content_and_continue("smset",['size=5 {','[0] = "is"','[1] = "is"','[2] = "world"','[3] = "world"'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| Python | 0.999998 |
abc74f521f1b52fe2b17046cc81705a691314832 | Give an error only if the report object is not None | ReadConfig.py | ReadConfig.py | #
# ReadConfig
#
# Ron Lockwood
# University of Washington, SIL International
# 12/4/14
#
# Version 1.1 - 3/7/18 - Ron Lockwood
# Give an error only if the report object is not None
#
# Functions for reading a configuration file
import re
CONFIG_FILE = 'FlexTrans.config'
def readConfig(report):
try:
f_handle = open(CONFIG_FILE)
except:
if report is not None:
report.Error('Error reading the file: "' + CONFIG_FILE + '". Check that it is in the FlexTools folder.')
return None
my_map = {}
for line in f_handle:
if len(line) < 2:
if report is not None:
report.Error('Error reading the file: "' + CONFIG_FILE + '". No blank lines allowed.')
return
# Skip commented lines
if line[0] == '#':
continue
# We expect lines in the form -- property=value
if not re.search('=',line):
if report is not None:
report.Error('Error reading the file: "' + CONFIG_FILE + '". A line without "=" was found.')
return
(prop, value) = line.split('=')
value = value.rstrip()
# if the value has commas, save it as a list
if re.search(',',value):
my_list = value.split(',')
my_map[prop] = my_list
else:
my_map[prop] = value
return my_map
def getConfigVal(my_map, key, report):
if key not in my_map:
if report is not None:
report.Error('Error in the file: "' + CONFIG_FILE + '". A value for "'+key+'" was not found.')
return None
else:
return my_map[key]
def configValIsList(my_map, key, report):
if type(my_map[key]) is not list:
if report is not None:
report.Error('Error in the file: "' + CONFIG_FILE + '". The value for "'+key+'" is supposed to be a comma separated list. For a single value, end it with a comma.')
return False
else:
return True
| #
# ReadConfig
#
# Ron Lockwood
# University of Washington, SIL International
# 12/4/14
#
# Functions for reading a configuration file
import re
CONFIG_FILE = 'FlexTrans.config'
def readConfig(report):
try:
f_handle = open(CONFIG_FILE)
except:
report.Error('Error reading the file: "' + CONFIG_FILE + '". Check that it is in the FlexTools folder.')
return None
my_map = {}
for line in f_handle:
if len(line) < 2:
report.Error('Error reading the file: "' + CONFIG_FILE + '". No blank lines allowed.')
return
# Skip commented lines
if line[0] == '#':
continue
# We expect lines in the form -- property=value
if not re.search('=',line):
report.Error('Error reading the file: "' + CONFIG_FILE + '". A line without "=" was found.')
return
(prop, value) = line.split('=')
value = value.rstrip()
# if the value has commas, save it as a list
if re.search(',',value):
my_list = value.split(',')
my_map[prop] = my_list
else:
my_map[prop] = value
return my_map
def getConfigVal(my_map, key, report):
if key not in my_map:
report.Error('Error in the file: "' + CONFIG_FILE + '". A value for "'+key+'" was not found.')
return None
else:
return my_map[key]
def configValIsList(my_map, key, report):
if type(my_map[key]) is not list:
report.Error('Error in the file: "' + CONFIG_FILE + '". The value for "'+key+'" is supposed to be a comma separated list. For a single value, end it with a comma.')
return False
else:
return True
| Python | 0.999999 |
4eb4a2eaa42cd71bf4427bdaaa1e853975432691 | Allow keyword arguments in GeneralStoreManager.create_item method | graphene/storage/intermediate/general_store_manager.py | graphene/storage/intermediate/general_store_manager.py | from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self, **kwargs):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id, **kwargs)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
| from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
| Python | 0.000001 |
ad47fb85e5c2deb47cbe3fc3478e1ae2da93adfe | Update h-index.py | Python/h-index.py | Python/h-index.py | # Time: O(n)
# Space: O(n)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations);
count = [0] * (n + 1)
for x in citations:
if x >= n:
count[n] += 1
else:
count[x] += 1
h = 0
for i in reversed(xrange(0, n + 1)):
h += count[i]
if h >= i:
return i
return h
# Time: O(nlogn)
# Space: O(1)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for x in citations:
if x >= h + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(x >= i + 1 for i, x in enumerate(sorted(citations, reverse=True)))
| # Time: O(nlogn)
# Space: O(1)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for x in citations:
if x >= h + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(x >= i + 1 for i, x in enumerate(sorted(citations, reverse=True)))
| Python | 0.000002 |
56adaeecb5ed868ca057a4985a9305770d551b61 | Add version | sqlalchemy_seed/__init__.py | sqlalchemy_seed/__init__.py | # -*- coding: utf-8 -*-
"""
sqlalchemy_seed
~~~~~~~~~~~~~~~
`sqlalchemy_seed` is a seed library which provides initial data to
database using SQLAlchemy.
:copyright: (c) 2017 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import importlib
import json
import yaml
__version__ = '0.1.0'
def create_table(base, session=None):
"""Create table.
:param base: `sqlalchemy.ext.declarative`
:param session: `sqlalchemy.orm`
"""
if session:
base.metadata.create_all(bind=session.bind)
else:
base.metadata.create_all()
def drop_table(base, session):
"""Drop table.
:param base: `sqlalchemy.ext.declarative`
:param session: `sqlalchemy.orm`
"""
session.expunge_all()
session.remove()
base.metadata.drop_all()
def load_fixture_files(paths, files):
"""Load fixture files.
:param path: Path to fixtures
:param files: Fixture file names
"""
fixtures = []
if not isinstance(paths, list):
paths = [paths]
for path in paths:
for file in files:
fixture_path = os.path.join(path, file)
if not os.path.exists(fixture_path):
continue
with open(fixture_path, 'r') as f:
if file.endswith('.yaml') or file.endswith('.yml'):
data = yaml.load(f)
elif file.endswith('.json'):
data = json.loads(f)
else:
continue
fixtures.append(data)
return fixtures
def _create_model_instance(fixture):
"""Create model instance.
:param fixture: Fixtures
"""
instances = []
for data in fixture:
if 'model' in data:
module_name, class_name = data['model'].rsplit('.', 1)
module = importlib.import_module(module_name)
model = getattr(module, class_name)
instance = model(**data['fields'])
instances.append(instance)
return instances
def load_fixtures(session, fixtures):
"""Load fixture.
:param base: `sqlalchemy.ext.declarative`
:param fixtures: Fixture files
"""
instances = []
for fixture in fixtures:
_instances = _create_model_instance(fixture)
for instance in _instances:
instances.append(instance)
try:
session.add_all(instances)
session.flush()
session.commit()
except:
session.rollback()
raise
| # -*- coding: utf-8 -*-
"""
sqlalchemy_seed
~~~~~~~~~~~~~~~
Seed
:copyright: (c) 2017 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import importlib
import json
import yaml
def create_table(base, session=None):
"""Create table.
:param base: `sqlalchemy.ext.declarative`
:param session: `sqlalchemy.orm`
"""
if session:
base.metadata.create_all(bind=session.bind)
else:
base.metadata.create_all()
def drop_table(base, session):
"""Drop table.
:param base: `sqlalchemy.ext.declarative`
:param session: `sqlalchemy.orm`
"""
session.expunge_all()
session.remove()
base.metadata.drop_all()
def load_fixture_files(paths, files):
"""Load fixture files.
:param path: Path to fixtures
:param files: Fixture file names
"""
fixtures = []
if not isinstance(paths, list):
paths = [paths]
for path in paths:
for file in files:
fixture_path = os.path.join(path, file)
if not os.path.exists(fixture_path):
continue
with open(fixture_path, 'r') as f:
if file.endswith('.yaml') or file.endswith('.yml'):
data = yaml.load(f)
elif file.endswith('.json'):
data = json.loads(f)
else:
continue
fixtures.append(data)
return fixtures
def _create_model_instance(fixture):
"""Create model instance.
:param fixture: Fixtures
"""
instances = []
for data in fixture:
if 'model' in data:
module_name, class_name = data['model'].rsplit('.', 1)
module = importlib.import_module(module_name)
model = getattr(module, class_name)
instance = model(**data['fields'])
instances.append(instance)
return instances
def load_fixtures(session, fixtures):
"""Load fixture.
:param base: `sqlalchemy.ext.declarative`
:param fixtures: Fixture files
"""
instances = []
for fixture in fixtures:
_instances = _create_model_instance(fixture)
for instance in _instances:
instances.append(instance)
try:
session.add_all(instances)
session.flush()
session.commit()
except:
session.rollback()
raise
| Python | 0 |
4fd6d20be257cca38f98d20df78b35d7c7bc3911 | Fix factory_jst | feder/teryt/factory.py | feder/teryt/factory.py | from random import randint
from .models import JednostkaAdministracyjna as JST
from .models import Category
def factory_jst():
category = Category.objects.create(name="X", level=1)
return JST.objects.create(name="X", id=randint(0, 1000),
category=category,
updated_on='2015-05-12',
active=True)
| from autofixture import AutoFixture
from .models import JednostkaAdministracyjna
def factory_jst():
jst = AutoFixture(JednostkaAdministracyjna,
field_values={'updated_on': '2015-02-12'},
generate_fk=True).create_one(commit=False)
jst.rght = 0
jst.save()
return jst
| Python | 0.000002 |
2eefaca1d7d27ebe2e9a489ab2c1dc2927e49b55 | Bump version | sqliteschema/__version__.py | sqliteschema/__version__.py | __author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016, {}".format(__author__)
__license__ = "MIT License"
__version__ = "1.0.2"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| __author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016, {}".format(__author__)
__license__ = "MIT License"
__version__ = "1.0.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| Python | 0 |
bc904f3ab7cc9d697dc56058ac9cb578055c401f | raise exception rather than logging and returning | checks.d/hdfs.py | checks.d/hdfs.py | from checks import AgentCheck
class HDFSCheck(AgentCheck):
"""Report on free space and space used in HDFS.
"""
def check(self, instance):
try:
import snakebite.client
except ImportError:
raise ImportError('HDFSCheck requires the snakebite module')
if 'namenode' not in instance:
raise ValueError('Missing key \'namenode\' in HDFSCheck config')
hostport = instance['namenode']
if ':' in hostport:
host, _, port = hostport.partition(':')
port = int(port)
else:
host = hostport
port = 8020
hdfs = snakebite.client.Client(host, port)
stats = hdfs.df()
# {'used': 2190859321781L,
# 'capacity': 76890897326080L,
# 'under_replicated': 0L,
# 'missing_blocks': 0L,
# 'filesystem': 'hdfs://hostname:port',
# 'remaining': 71186818453504L,
# 'corrupt_blocks': 0L}
self.gauge('hdfs.used', stats['used'])
self.gauge('hdfs.free', stats['remaining'])
self.gauge('hdfs.capacity', stats['capacity'])
self.gauge('hdfs.in_use', float(stats['used']) / float(stats['capacity']))
self.gauge('hdfs.under_replicated', stats['under_replicated'])
self.gauge('hdfs.missing_blocks', stats['missing_blocks'])
self.gauge('hdfs.corrupt_blocks', stats['corrupt_blocks'])
if __name__ == '__main__':
check, instances = HDFSCheck.from_yaml('./hdfs.yaml')
for instance in instances:
check.check(instance)
print "Events: %r" % check.get_events()
print "Metrics: %r" % check.get_metrics()
| from checks import AgentCheck
class HDFSCheck(AgentCheck):
"""Report on free space and space used in HDFS.
"""
def check(self, instance):
try:
import snakebite.client
except ImportError:
raise ImportError('HDFSCheck requires the snakebite module')
if 'namenode' not in instance:
self.log.info('Missing key \'namenode\' in HDFSCheck config')
return
hostport = instance['namenode']
if ':' in hostport:
host, _, port = hostport.partition(':')
port = int(port)
else:
host = hostport
port = 8020
hdfs = snakebite.client.Client(host, port)
stats = hdfs.df()
# {'used': 2190859321781L,
# 'capacity': 76890897326080L,
# 'under_replicated': 0L,
# 'missing_blocks': 0L,
# 'filesystem': 'hdfs://hostname:port',
# 'remaining': 71186818453504L,
# 'corrupt_blocks': 0L}
self.gauge('hdfs.used', stats['used'])
self.gauge('hdfs.free', stats['remaining'])
self.gauge('hdfs.capacity', stats['capacity'])
self.gauge('hdfs.in_use', float(stats['used']) / float(stats['capacity']))
self.gauge('hdfs.under_replicated', stats['under_replicated'])
self.gauge('hdfs.missing_blocks', stats['missing_blocks'])
self.gauge('hdfs.corrupt_blocks', stats['corrupt_blocks'])
if __name__ == '__main__':
check, instances = HDFSCheck.from_yaml('./hdfs.yaml')
for instance in instances:
check.check(instance)
print "Events: %r" % check.get_events()
print "Metrics: %r" % check.get_metrics()
| Python | 0 |
082562d4fc3567f956e95d71807c65281a69b3ff | change get_many to expect ids | feedly/storage/base.py | feedly/storage/base.py | from feedly.serializers.base import BaseSerializer
class BaseActivityStorage(object):
'''
The storage class for activities data
'''
serializer = BaseSerializer
def __init__(self, **options):
self.options = options
self.serializer = self.serializer()
def add_to_storage(self, key, activities, *args, **kwargs):
'''
activities should be a dict with activity_id as keys and
the serialized data as value
'''
raise NotImplementedError()
def get_from_storage(self, key, activity_ids, *args, **kwargs):
raise NotImplementedError()
def remove_from_storage(self, key, activity_ids, *args, **kwargs):
raise NotImplementedError()
def get_many(self, key, activity_ids, *args, **kwargs):
activities_data = self.get_from_storage(key, activity_ids, *args, **kwargs)
return self.deserialize_activities(activities_data)
def get(self, key, activity, *args, **kwargs):
return self.get_many(key, [activity], *args, **kwargs)[0]
def add(self, key, activity, *args, **kwargs):
return self.add_many(key, [activity], *args, **kwargs)
def add_many(self, key, activities, *args, **kwargs):
serialized_activities = self.serialize_activities(activities)
return self.add_to_storage(key, serialized_activities, *args, **kwargs)
def remove(self, key, activity, *args, **kwargs):
return self.remove_many(key, [activity], *args, **kwargs)
def remove_many(self, key, activities, *args, **kwargs):
activity_ids = self.serialize_activities(activities).keys()
return self.remove_from_storage(key, activity_ids, *args, **kwargs)
def flush(self):
pass
def serialize_activity(self, activity):
activity_id, activity_data = self.serializer.dumps(activity)
serialized_activity = dict(((activity_id, activity_data),))
return serialized_activity
def serialize_activities(self, activities):
serialized_activities = {}
for activity in activities:
serialized_activities.update(self.serialize_activity(activity))
return serialized_activities
def deserialize_activities(self, data):
return self.serializer.loads(data)
class BaseTimelineStorage(object):
'''
The storage class for the feeds
'''
def __init__(self, **options):
self.options = options
def get_many(self, key, start, stop):
raise NotImplementedError()
def add_many(self, key, activities, *args, **kwargs):
raise NotImplementedError()
def remove_many(self, key, activities, *args, **kwargs):
raise NotImplementedError()
def trim(self, key, length):
raise NotImplementedError()
def count(self, key, *args, **kwargs):
raise NotImplementedError()
def delete(self, key, *args, **kwargs):
raise NotImplementedError()
| from feedly.serializers.base import BaseSerializer
class BaseActivityStorage(object):
'''
The storage class for activities data
'''
serializer = BaseSerializer
def __init__(self, **options):
self.options = options
self.serializer = self.serializer()
def add_to_storage(self, key, activities, *args, **kwargs):
'''
activities should be a dict with activity_id as keys and
the serialized data as value
'''
raise NotImplementedError()
def get_from_storage(self, key, activity_ids, *args, **kwargs):
raise NotImplementedError()
def remove_from_storage(self, key, activity_ids, *args, **kwargs):
raise NotImplementedError()
def get_many(self, key, activities, *args, **kwargs):
activity_ids = self.serialize_activities(activities).keys()
activities_data = self.get_from_storage(key, activity_ids, *args, **kwargs)
return self.deserialize_activities(activities_data)
def get(self, key, activity, *args, **kwargs):
return self.get_many(key, [activity], *args, **kwargs)[0]
def add(self, key, activity, *args, **kwargs):
return self.add_many(key, [activity], *args, **kwargs)
def add_many(self, key, activities, *args, **kwargs):
serialized_activities = self.serialize_activities(activities)
return self.add_to_storage(key, serialized_activities, *args, **kwargs)
def remove(self, key, activity, *args, **kwargs):
return self.remove_many(key, [activity], *args, **kwargs)
def remove_many(self, key, activities, *args, **kwargs):
activity_ids = self.serialize_activities(activities).keys()
return self.remove_from_storage(key, activity_ids, *args, **kwargs)
def flush(self):
pass
def serialize_activity(self, activity):
activity_id, activity_data = self.serializer.dumps(activity)
serialized_activity = dict(((activity_id, activity_data),))
return serialized_activity
def serialize_activities(self, activities):
serialized_activities = {}
for activity in activities:
serialized_activities.update(self.serialize_activity(activity))
return serialized_activities
def deserialize_activities(self, data):
return self.serializer.loads(data)
class BaseTimelineStorage(object):
'''
The storage class for the feeds
'''
def __init__(self, **options):
self.options = options
def get_many(self, key, start, stop):
raise NotImplementedError()
def add_many(self, key, activities, *args, **kwargs):
raise NotImplementedError()
def remove_many(self, key, activities, *args, **kwargs):
raise NotImplementedError()
def trim(self, key, length):
raise NotImplementedError()
def count(self, key, *args, **kwargs):
raise NotImplementedError()
def delete(self, key, *args, **kwargs):
raise NotImplementedError()
| Python | 0 |
6a8c8bc0e407327e5c0e4cae3d4d6ace179a6940 | Add team eligibility to API | webserver/codemanagement/serializers.py | webserver/codemanagement/serializers.py | from rest_framework import serializers
from greta.models import Repository
from competition.models import Team
from .models import TeamClient, TeamSubmission
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'eligible_to_win')
class RepoSerializer(serializers.ModelSerializer):
class Meta:
model = Repository
fields = ('name', 'description', 'forked_from',
'path', 'is_ready')
forked_from = serializers.RelatedField()
path = serializers.SerializerMethodField('get_path')
is_ready = serializers.SerializerMethodField('get_is_ready')
def get_path(self, repo):
return repo.path
def get_is_ready(self, repo):
return repo.is_ready()
class TeamSubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = TeamSubmission
fields = ('name', 'commit')
class TeamClientSerializer(serializers.ModelSerializer):
class Meta:
model = TeamClient
fields = ('team', 'repository', 'tag', 'language')
team = TeamSerializer()
repository = RepoSerializer()
tag = serializers.SerializerMethodField('get_tag')
language = serializers.SerializerMethodField('get_language')
def get_tag(self, teamclient):
try:
latest_sub= teamclient.submissions.latest()
return TeamSubmissionSerializer(latest_sub).data
except TeamSubmission.DoesNotExist:
return None
def get_language(self, teamclient):
return teamclient.base.language
| from rest_framework import serializers
from greta.models import Repository
from competition.models import Team
from .models import TeamClient, TeamSubmission
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = ('id', 'name', 'slug')
class RepoSerializer(serializers.ModelSerializer):
class Meta:
model = Repository
fields = ('name', 'description', 'forked_from',
'path', 'is_ready')
forked_from = serializers.RelatedField()
path = serializers.SerializerMethodField('get_path')
is_ready = serializers.SerializerMethodField('get_is_ready')
def get_path(self, repo):
return repo.path
def get_is_ready(self, repo):
return repo.is_ready()
class TeamSubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = TeamSubmission
fields = ('name', 'commit')
class TeamClientSerializer(serializers.ModelSerializer):
class Meta:
model = TeamClient
fields = ('team', 'repository', 'tag', 'language')
team = TeamSerializer()
repository = RepoSerializer()
tag = serializers.SerializerMethodField('get_tag')
language = serializers.SerializerMethodField('get_language')
def get_tag(self, teamclient):
try:
latest_sub= teamclient.submissions.latest()
return TeamSubmissionSerializer(latest_sub).data
except TeamSubmission.DoesNotExist:
return None
def get_language(self, teamclient):
return teamclient.base.language
| Python | 0 |
d50daddde2186d54659a4f8dbf63622311ed6d22 | remove service class | glim/services.py | glim/services.py | from glim.core import Service
class Config(Service):
pass
class Session(Service):
pass
class Router(Service):
pass | # metaclass for Service class
class DeflectToInstance(type):
def __getattr__(selfcls, a): # selfcls in order to make clear it is a class object (as we are a metaclass)
try:
# first, inquiry the class itself
return super(DeflectToInstance, selfcls).__getattr__(a)
except AttributeError:
# Not found, so try to inquiry the instance attribute:
return getattr(selfcls.instance, a)
# facade that is used for saving complex
class Service:
__metaclass__ = DeflectToInstance
instance = None
@classmethod
def boot(cls, object, configuration = {}):
if cls.instance is None:
cls.instance = object(configuration)
class Config(Service):
pass
class Session(Service):
pass
class Router(Service):
pass | Python | 0.000003 |
72902ebcada7bdc7a889f8766b63afff82110182 | Comment about recursion limit in categories. | webshop/extensions/category/__init__.py | webshop/extensions/category/__init__.py | # Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl>
#
# This file is part of django-webshop.
#
# django-webshop is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Django-webshop, by default, contains base classes for two kinds of categories:
* Simple categories, which define a base class for products that belong to
exactly one category.
* Advanced categories, that belong to zero or more categories.
Furthermore, generic abstract base models are defined for 'normal' categories
and for nested categories, allowing for the hierarchical categorization of
products.
TODO: We want a setting allowing us to limit the nestedness of categories.
For 'navigational' reasons, a number of 3 should be a reasonable default.
""" | # Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl>
#
# This file is part of django-webshop.
#
# django-webshop is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Django-webshop, by default, contains base classes for two kinds of categories:
* Simple categories, which define a base class for products that belong to
exactly one category.
* Advanced categories, that belong to zero or more categories.
Furthermore, generic abstract base models are defined for 'normal' categories
and for nested categories, allowing for the hierarchical categorization of
products.
""" | Python | 0 |
f5fad49e0b20e54e01fe4d9ae69be0694d7878f9 | add docstring to test setup, and move to the top | sale_exception_nostock/tests/test_dropshipping_skip_check.py | sale_exception_nostock/tests/test_dropshipping_skip_check.py | # Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestDropshippingSkipCheck(TransactionCase):
def setUp(self):
"""Set up an dropshipping sale order line.
To do that, mock the computed source location to be a supplier.
"""
super(TestDropshippingSkipCheck, self).setUp()
source_loc = self.env['stock.location'].new({'usage': 'supplier'})
self.order_line = self.env['sale.order.line'].new()
self.order_line._get_line_location = lambda: source_loc
def test_dropshipping_sale_can_always_be_delivered(self):
self.assertIs(True, self.order_line.can_command_at_delivery_date())
def test_dropshipping_sale_does_not_affect_future_orders(self):
self.assertIs(False, self.order_line.future_orders_are_affected())
| # Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestDropshippingSkipCheck(TransactionCase):
def test_dropshipping_sale_can_always_be_delivered(self):
self.assertIs(True, self.order_line.can_command_at_delivery_date())
def test_dropshipping_sale_does_not_affect_future_orders(self):
self.assertIs(False, self.order_line.future_orders_are_affected())
def setUp(self):
super(TestDropshippingSkipCheck, self).setUp()
source_loc = self.env['stock.location'].new({'usage': 'supplier'})
self.order_line = self.env['sale.order.line'].new()
self.order_line._get_line_location = lambda: source_loc
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.