commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
fa82a17c61698847904fa7dea14414b30b80bdfc | Update InferenceContext attribute documentation | astroid/context.py | astroid/context.py | # Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Various context related utilities, including inference and call contexts."""
import contextlib
import copy
import pprint
class InferenceContext(object):
"""Provide context for inference
Store already inferred nodes to save time
Account for already visited nodes to infinite stop infinite recursion
"""
__slots__ = ('path', 'lookupname', 'callcontext', 'boundnode', 'inferred')
def __init__(self, path=None, inferred=None):
self.path = path or set()
"""
:type: set(tuple(NodeNG, optional(str)))
Path of visited nodes and their lookupname
Currently this key is ``(node, context.lookupname)``
"""
self.lookupname = None
"""
:type: optional[str]
The original name of the node
e.g.
foo = 1
The inference of 'foo' is nodes.Const(1) but the lookup name is 'foo'
"""
self.callcontext = None
"""
:type: optional[CallContext]
The call arguments and keywords for the given context
"""
self.boundnode = None
"""
:type: optional[NodeNG]
The bound node of the given context
e.g. the bound node of object.__new__(cls) is the object node
"""
self.inferred = inferred or {}
"""
:type: dict(seq, seq)
Inferred node contexts to their mapped results
Currently the key is ``(node, lookupname, callcontext, boundnode)``
and the value is tuple of the inferred results
"""
def push(self, node):
"""Push node into inference path
:return: True if node is already in context path else False
:rtype: bool
Allows one to see if the given node has already
been looked at for this inference context"""
name = self.lookupname
if (node, name) in self.path:
return True
self.path.add((node, name))
return False
def clone(self):
"""Clone inference path
For example, each side of a binary operation (BinOp)
starts with the same context but diverge as each side is inferred
so the InferenceContext will need be cloned"""
# XXX copy lookupname/callcontext ?
clone = InferenceContext(copy.copy(self.path), inferred=self.inferred)
clone.callcontext = self.callcontext
clone.boundnode = self.boundnode
return clone
def cache_generator(self, key, generator):
"""Cache result of generator into dictionary
Used to cache inference results"""
results = []
for result in generator:
results.append(result)
yield result
self.inferred[key] = tuple(results)
return
@contextlib.contextmanager
def restore_path(self):
path = set(self.path)
yield
self.path = path
def __str__(self):
state = ('%s=%s' % (field, pprint.pformat(getattr(self, field),
width=80 - len(field)))
for field in self.__slots__)
return '%s(%s)' % (type(self).__name__, ',\n '.join(state))
class CallContext(object):
"""Holds information for a call site."""
__slots__ = ('args', 'keywords')
def __init__(self, args, keywords=None):
"""
:param List[NodeNG] args: Call positional arguments
:param Union[List[nodes.Keyword], None] keywords: Call keywords
"""
self.args = args
if keywords:
keywords = [(arg.arg, arg.value) for arg in keywords]
else:
keywords = []
self.keywords = keywords
def copy_context(context):
if context is not None:
return context.clone()
return InferenceContext()
| # Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Various context related utilities, including inference and call contexts."""
import contextlib
import copy
import pprint
class InferenceContext(object):
"""Provide context for inference
Store already inferred nodes to save time
Account for already visited nodes to infinite stop infinite recursion
"""
__slots__ = ('path', 'lookupname', 'callcontext', 'boundnode', 'inferred')
def __init__(self, path=None, inferred=None):
self.path = path or set()
"""Path of visited nodes and their lookupname
:type: set(tuple(NodeNG, optional(str)))"""
self.lookupname = None
self.callcontext = None
self.boundnode = None
self.inferred = inferred or {}
"""
:type: dict(seq, seq)
Inferred node contexts to their mapped results
Currently the key is (node, lookupname, callcontext, boundnode)
and the value is tuple of the inferred results
"""
def push(self, node):
"""Push node into inference path
:return: True if node is already in context path else False
:rtype: bool
Allows one to see if the given node has already
been looked at for this inference context"""
name = self.lookupname
if (node, name) in self.path:
return True
self.path.add((node, name))
return False
def clone(self):
"""Clone inference path
For example, each side of a binary operation (BinOp)
starts with the same context but diverge as each side is inferred
so the InferenceContext will need be cloned"""
# XXX copy lookupname/callcontext ?
clone = InferenceContext(copy.copy(self.path), inferred=self.inferred)
clone.callcontext = self.callcontext
clone.boundnode = self.boundnode
return clone
def cache_generator(self, key, generator):
"""Cache result of generator into dictionary
Used to cache inference results"""
results = []
for result in generator:
results.append(result)
yield result
self.inferred[key] = tuple(results)
return
@contextlib.contextmanager
def restore_path(self):
path = set(self.path)
yield
self.path = path
def __str__(self):
state = ('%s=%s' % (field, pprint.pformat(getattr(self, field),
width=80 - len(field)))
for field in self.__slots__)
return '%s(%s)' % (type(self).__name__, ',\n '.join(state))
class CallContext(object):
"""Holds information for a call site."""
__slots__ = ('args', 'keywords')
def __init__(self, args, keywords=None):
self.args = args
if keywords:
keywords = [(arg.arg, arg.value) for arg in keywords]
else:
keywords = []
self.keywords = keywords
def copy_context(context):
if context is not None:
return context.clone()
return InferenceContext()
| Python | 0 |
cc963ca1a169506ee46c926fd7e7bc41f0b46780 | fix import complete_social_login | rest_auth/registration/serializers.py | rest_auth/registration/serializers.py | from django.http import HttpRequest
from django.conf import settings
from rest_framework import serializers
from requests.exceptions import HTTPError
# Import is needed only if we are using social login, in which
# case the allauth.socialaccount will be declared
if 'allauth.socialaccount' in settings.INSTALLED_APPS:
try:
from allauth.socialaccount.helpers import complete_social_login
except ImportError:
pass
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass. Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:return: :return: A populated instance of the `allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
'View is not defined, pass it as a context variable'
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError('Define adapter_class in view')
adapter = adapter_class()
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# Case 1: We received the access_token
if('access_token' in attrs):
access_token = attrs.get('access_token')
# Case 2: We received the authorization code
elif('code' in attrs):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
'Define callback_url in view'
)
if not self.client_class:
raise serializers.ValidationError(
'Define client_class in view'
)
code = attrs.get('code')
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
else:
raise serializers.ValidationError('Incorrect input. access_token or code is required.')
token = adapter.parse_token({'access_token': access_token})
token.app = app
try:
login = self.get_social_login(adapter, app, token, access_token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError('Incorrect value')
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
| from django.http import HttpRequest
from django.conf import settings
from rest_framework import serializers
from requests.exceptions import HTTPError
# Import is needed only if we are using social login, in which
# case the allauth.socialaccount will be declared
try:
from allauth.socialaccount.helpers import complete_social_login
except ImportError:
raise ImportError('allauth.socialaccount needs to be installed.')
if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
raise ImportError('allauth.socialaccount needs to be added to INSTALLED_APPS.')
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass. Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:return: :return: A populated instance of the `allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
'View is not defined, pass it as a context variable'
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError('Define adapter_class in view')
adapter = adapter_class()
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# Case 1: We received the access_token
if('access_token' in attrs):
access_token = attrs.get('access_token')
# Case 2: We received the authorization code
elif('code' in attrs):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
'Define callback_url in view'
)
if not self.client_class:
raise serializers.ValidationError(
'Define client_class in view'
)
code = attrs.get('code')
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
else:
raise serializers.ValidationError('Incorrect input. access_token or code is required.')
token = adapter.parse_token({'access_token': access_token})
token.app = app
try:
login = self.get_social_login(adapter, app, token, access_token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError('Incorrect value')
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
| Python | 0.000005 |
1a9581a33efab4bcf7f1b7a6e555fa373d6f0739 | Fix repo URL in staging report | scripts/GenerateStagingReport.py | scripts/GenerateStagingReport.py | #coding=UTF-8
from BuildArchetypes import archetypes, getDeploymentContext
import argparse, cgi
parser = argparse.ArgumentParser(description="Build report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("deployUrl", type=str, help="Base url of the deployment server")
parser.add_argument("buildResultUrl", type=str, help="URL for the build result page")
parser.add_argument("stagingRepo", type=str, help="URL for the staging repository")
args = parser.parse_args()
content = """<html>
<head></head>
<body>
<table>
"""
content += "<tr><td>Try archetype demos<ul>"
for archetype in archetypes:
content += "<li><a href='{url}/{context}'>{demo}</a></li>\n".format(url=args.deployUrl, demo=archetype, context=getDeploymentContext(archetype, args.version))
content += """</ul></td></tr>
<tr><td><a href="{repoUrl}">Staging repository</a></td></tr>
<tr><td>Eclipse Ivy Settings:<br><pre>""".format(repoUrl=args.stagingRepo)
content += cgi.escape(""" <ibiblio name="vaadin-staging" usepoms="true" m2compatible="true"
root="{repoUrl}" />""".format(repoUrl=args.stagingRepo))
content += """</pre>
</td></tr>
<tr><td><a href="https://dev.vaadin.com/milestone/Vaadin {version}">Trac Milestone</a></td></tr>
<tr><td><a href="https://dev.vaadin.com/admin/ticket/versions">Add version {version} to Trac</td></tr>
<tr><td><a href="{url}">Staging result page (See test results, pin and tag build and dependencies)</a></td></tr>
</table>
</body>
</html>""".format(url=args.buildResultUrl, repoUrl=args.stagingRepo, version=args.version)
f = open("result/report.html", 'w')
f.write(content)
| #coding=UTF-8
from BuildArchetypes import archetypes, getDeploymentContext
import argparse, cgi
parser = argparse.ArgumentParser(description="Build report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("deployUrl", type=str, help="Base url of the deployment server")
parser.add_argument("buildResultUrl", type=str, help="URL for the build result page")
parser.add_argument("stagingRepo", type=str, help="URL for the staging repository")
args = parser.parse_args()
content = """<html>
<head></head>
<body>
<table>
"""
content += "<tr><td>Try archetype demos<ul>"
for archetype in archetypes:
content += "<li><a href='{url}/{context}'>{demo}</a></li>\n".format(url=args.deployUrl, demo=archetype, context=getDeploymentContext(archetype, args.version))
content += """</ul></td></tr>
<tr><td><a href="{repoUrl}">Staging repository</a></td></tr>
<tr><td>Eclipse Ivy Settings:<br><pre>"""
content += cgi.escape(""" <ibiblio name="vaadin-staging" usepoms="true" m2compatible="true"
root="{repoUrl}" />""".format(repoUrl=args.stagingRepo))
content += """</pre>
</td></tr>
<tr><td><a href="https://dev.vaadin.com/milestone/Vaadin {version}">Trac Milestone</a></td></tr>
<tr><td><a href="https://dev.vaadin.com/admin/ticket/versions">Add version {version} to Trac</td></tr>
<tr><td><a href="{url}">Staging result page (See test results, pin and tag build and dependencies)</a></td></tr>
</table>
</body>
</html>""".format(url=args.buildResultUrl, repoUrl=args.stagingRepo, version=args.version)
f = open("result/report.html", 'w')
f.write(content)
| Python | 0.000032 |
ceb5f223f2f38969157372b608d03771a9179858 | Make threading tests work in environment with restricted maxprocs | rootpy/logger/tests/test_threading.py | rootpy/logger/tests/test_threading.py | from __future__ import division
import itertools
import os
import resource
import thread
import threading
import time
from math import ceil
from random import random
import ROOT
import rootpy; log = rootpy.log["rootpy.logger.test.threading"]
rootpy.logger.magic.DANGER.enabled = True
from .logcheck import EnsureLogContains
def optional_fatal(abort=True):
msg = "[rootpy.ALWAYSABORT]" if abort else "[rootpy.NEVERABORT]"
ROOT.Error("rootpy.logger.test", msg)
f = optional_fatal
optional_fatal._bytecode = lambda: map(ord, f.func_code.co_code)
optional_fatal._ORIG_BYTECODE = optional_fatal._bytecode()
optional_fatal._unmodified = lambda: f._bytecode() == f._ORIG_BYTECODE
def optional_fatal_bytecode_check():
assert optional_fatal._unmodified(), (
"Detected modified bytecode. This should never happen.")
number_of_fatals = itertools.count()
total = itertools.count()
def maybe_fatal():
try:
# Throw exceptions 80% of the time
optional_fatal(random() < 0.8)
except rootpy.ROOTError:
number_of_fatals.next()
finally:
total.next()
optional_fatal_bytecode_check()
def randomfatal(should_exit):
while not should_exit.is_set():
maybe_fatal()
def spareprocs():
"""
Compute the maximum number of threads we can start up according to ulimit
"""
nmax, _ = resource.getrlimit(resource.RLIMIT_NPROC)
me = os.geteuid()
return nmax - sum(1 for p in os.listdir("/proc")
if p.isdigit() and os.stat("/proc/" + p).st_uid == me)
def test_multithread_exceptions():
should_exit = threading.Event()
sup_logger = log["/ROOT.rootpy.logger.test"]
old_level = sup_logger.level
# Suppress test warnings
sup_logger.setLevel(log.CRITICAL)
# Run for 1/4 second or 10s if LONG_TESTS is in the environment
length = float(os.environ.get("TEST_TIME", 0.25))
try:
threads = []
for i in range(min(100, int(ceil(spareprocs()*0.8)))):
t = threading.Thread(target=randomfatal, args=(should_exit,))
try:
t.start()
threads.append(t)
except thread.error:
log.warning("Unable to start thread")
break
assert threads, "Didn't manage to start any threads!"
time.sleep(length)
should_exit.set()
for t in threads:
t.join()
finally:
sup_logger.setLevel(old_level)
tot = total.next()-1
fatals = number_of_fatals.next()-1
fmt = "Success raising exceptions in {0} threads: total: {1} (fatals {2:%})"
log.debug(fmt.format(len(threads), tot, fatals / tot))
| from __future__ import division
import itertools
import os
import threading
import time
from random import random
import rootpy; log = rootpy.log["rootpy.logger.test.threading"]
rootpy.logger.magic.DANGER.enabled = True
import ROOT
from .logcheck import EnsureLogContains
def optional_fatal(abort=True):
msg = "[rootpy.ALWAYSABORT]" if abort else "[rootpy.NEVERABORT]"
ROOT.Error("rootpy.logger.test", msg)
f = optional_fatal
optional_fatal._bytecode = lambda: map(ord, f.func_code.co_code)
optional_fatal._ORIG_BYTECODE = optional_fatal._bytecode()
optional_fatal._unmodified = lambda: f._bytecode() == f._ORIG_BYTECODE
def optional_fatal_bytecode_check():
assert optional_fatal._unmodified(), (
"Detected modified bytecode. This should never happen.")
number_of_fatals = itertools.count()
total = itertools.count()
def maybe_fatal():
try:
# Throw exceptions 80% of the time
optional_fatal(random() < 0.8)
except rootpy.ROOTError:
number_of_fatals.next()
finally:
total.next()
optional_fatal_bytecode_check()
def randomfatal(should_exit):
while not should_exit.is_set():
maybe_fatal()
#@EnsureLogContains("ERROR", "ALWAYSABORT")
def test_multithread_exceptions():
should_exit = threading.Event()
sup_logger = log["/ROOT.rootpy.logger.test"]
old_level = sup_logger.level
# Suppress test warnings
sup_logger.setLevel(log.CRITICAL)
# Run for 1/4 second or 10s if LONG_TESTS is in the environment
length = float(os.environ.get("TEST_TIME", 0.25))
try:
threads = []
for i in range(100):
t = threading.Thread(target=randomfatal, args=(should_exit,))
t.start()
threads.append(t)
time.sleep(length)
should_exit.set()
for t in threads:
t.join()
finally:
#sup_logger.setLevel(old_level)
pass
tot = total.next()-1
fatals = number_of_fatals.next()-1
log.debug("Success raising exceptions: total: {0} (fatals {1:%})".format(tot, fatals / tot)) | Python | 0 |
4bef39d1344a832d9a6acfb52173ac493e238139 | bump version | publicprize/config.py | publicprize/config.py | # -*- coding: utf-8 -*-
""" Flask configuration.
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import os
def _config_from_environ(cfg, prefix):
for k in cfg.keys():
ek = prefix + '_' + k.upper()
if isinstance(cfg[k], dict):
_config_from_environ(cfg[k], ek)
elif ek in os.environ:
t = type(cfg[k])
v = os.environ[ek]
if issubclass(t, (int, bool)):
v = t(v)
cfg[k] = v
def _read_json(filename):
"""Read filename for json"""
with open(filename) as f:
import json
return json.load(f)
class Config(object):
"""Configuration driven off environment variables"""
# DO NOT set cfg['SERVER_NAME'] it breaks uwsgi so nothing is found
# see code in manager.send_event_vote_invites so it can use url_for.
import locale
locale.setlocale(locale.LC_ALL, '')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
PUBLICPRIZE = _read_json(os.environ.get('PUBLICPRIZE_JSON', 'config.json'))
_config_from_environ(PUBLICPRIZE, 'PUBLICPRIZE')
for k in ['DEBUG', 'ALL_PUBLIC_CONTESTANTS', 'TEST_USER', 'MAIL_DEBUG', 'MAIL_SUPPRESS_SEND']:
if PUBLICPRIZE.get(k, None) is None:
PUBLICPRIZE[k] = PUBLICPRIZE['TEST_MODE']
MAIL_SUPPRESS_SEND = PUBLICPRIZE['MAIL_SUPPRESS_SEND']
import paypalrestsdk
paypalrestsdk.configure(PUBLICPRIZE['PAYPAL'])
SECRET_KEY = PUBLICPRIZE['SECRET_KEY']
DEBUG = PUBLICPRIZE['TEST_MODE']
# Avoid message: "adds significant overhead..."
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = \
'postgresql://{user}:{password}@/{name}'.format(**PUBLICPRIZE['DATABASE'])
if PUBLICPRIZE.get('SQLALCHEMY_ECHO') is not None:
SQLALCHEMY_ECHO = PUBLICPRIZE['SQLALCHEMY_ECHO']
if PUBLICPRIZE.get('WTF_CSRF_TIME_LIMIT') is not None:
WTF_CSRF_TIME_LIMIT = PUBLICPRIZE['WTF_CSRF_TIME_LIMIT']
if PUBLICPRIZE.get('WTF_CSRF_ENABLED') is not None:
WTF_CSRF_ENABLED = PUBLICPRIZE['WTF_CSRF_ENABLED']
MAIL_DEFAULT_SENDER = PUBLICPRIZE['SUPPORT_EMAIL']
MAIL_DEBUG = PUBLICPRIZE['MAIL_DEBUG']
PROPAGATE_EXCEPTIONS = True
PUBLICPRIZE['APP_VERSION'] = '20170914.000700'
if PUBLICPRIZE['TEST_MODE']:
import datetime
PUBLICPRIZE['APP_VERSION'] = datetime.datetime.utcnow().strftime(
'%Y%m%d.%H%M%S')
| # -*- coding: utf-8 -*-
""" Flask configuration.
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import os
def _config_from_environ(cfg, prefix):
for k in cfg.keys():
ek = prefix + '_' + k.upper()
if isinstance(cfg[k], dict):
_config_from_environ(cfg[k], ek)
elif ek in os.environ:
t = type(cfg[k])
v = os.environ[ek]
if issubclass(t, (int, bool)):
v = t(v)
cfg[k] = v
def _read_json(filename):
"""Read filename for json"""
with open(filename) as f:
import json
return json.load(f)
class Config(object):
"""Configuration driven off environment variables"""
# DO NOT set cfg['SERVER_NAME'] it breaks uwsgi so nothing is found
# see code in manager.send_event_vote_invites so it can use url_for.
import locale
locale.setlocale(locale.LC_ALL, '')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
PUBLICPRIZE = _read_json(os.environ.get('PUBLICPRIZE_JSON', 'config.json'))
_config_from_environ(PUBLICPRIZE, 'PUBLICPRIZE')
for k in ['DEBUG', 'ALL_PUBLIC_CONTESTANTS', 'TEST_USER', 'MAIL_DEBUG', 'MAIL_SUPPRESS_SEND']:
if PUBLICPRIZE.get(k, None) is None:
PUBLICPRIZE[k] = PUBLICPRIZE['TEST_MODE']
MAIL_SUPPRESS_SEND = PUBLICPRIZE['MAIL_SUPPRESS_SEND']
import paypalrestsdk
paypalrestsdk.configure(PUBLICPRIZE['PAYPAL'])
SECRET_KEY = PUBLICPRIZE['SECRET_KEY']
DEBUG = PUBLICPRIZE['TEST_MODE']
# Avoid message: "adds significant overhead..."
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = \
'postgresql://{user}:{password}@/{name}'.format(**PUBLICPRIZE['DATABASE'])
if PUBLICPRIZE.get('SQLALCHEMY_ECHO') is not None:
SQLALCHEMY_ECHO = PUBLICPRIZE['SQLALCHEMY_ECHO']
if PUBLICPRIZE.get('WTF_CSRF_TIME_LIMIT') is not None:
WTF_CSRF_TIME_LIMIT = PUBLICPRIZE['WTF_CSRF_TIME_LIMIT']
if PUBLICPRIZE.get('WTF_CSRF_ENABLED') is not None:
WTF_CSRF_ENABLED = PUBLICPRIZE['WTF_CSRF_ENABLED']
MAIL_DEFAULT_SENDER = PUBLICPRIZE['SUPPORT_EMAIL']
MAIL_DEBUG = PUBLICPRIZE['MAIL_DEBUG']
PROPAGATE_EXCEPTIONS = True
PUBLICPRIZE['APP_VERSION'] = '20170913.214500'
if PUBLICPRIZE['TEST_MODE']:
import datetime
PUBLICPRIZE['APP_VERSION'] = datetime.datetime.utcnow().strftime(
'%Y%m%d.%H%M%S')
| Python | 0 |
65ecd399ea82abdafd0a2471193a9c850b50db87 | Debug level of logging | playoff.py | playoff.py | import traceback
from jfr_playoff.filemanager import PlayoffFileManager
from jfr_playoff.generator import PlayoffGenerator
from jfr_playoff.settings import PlayoffSettings
def main():
interactive = False
try:
import argparse
arg_parser = argparse.ArgumentParser(
description='Generate play-off HTML for JFR Teamy tournaments')
output_args = arg_parser.add_mutually_exclusive_group()
output_args.add_argument('-v', '--verbose', action='store_true',
help='display info on STDERR')
output_args.add_argument('-vv', '--debug', action='store_true',
help='display debug info on STDERR')
output_args.add_argument('-q', '--quiet', action='store_true',
help='suppress warnings on STDERR')
arg_parser.add_argument('config_file', metavar='JSON_FILE',
help='path to config JSON file',
type=str, nargs='?', default=None)
arguments = arg_parser.parse_args()
settings = PlayoffSettings(arguments.config_file)
interactive = settings.interactive
generator = PlayoffGenerator(settings)
content = generator.generate_content()
file_manager = PlayoffFileManager(settings)
file_manager.write_content(content)
file_manager.copy_scripts()
file_manager.send_files()
except SystemExit:
interactive = False
raise
except:
print traceback.format_exc()
finally:
if interactive:
raw_input('Press any key to continue...')
if __name__ == '__main__':
main()
| import traceback
from jfr_playoff.filemanager import PlayoffFileManager
from jfr_playoff.generator import PlayoffGenerator
from jfr_playoff.settings import PlayoffSettings
def main():
interactive = False
try:
import argparse
arg_parser = argparse.ArgumentParser(
description='Generate play-off HTML for JFR Teamy tournaments')
output_args = arg_parser.add_mutually_exclusive_group()
output_args.add_argument('-v', '--verbose', action='store_true',
help='display debug info on STDERR')
output_args.add_argument('-q', '--quiet', action='store_true',
help='suppress warnings on STDERR')
arg_parser.add_argument('config_file', metavar='JSON_FILE',
help='path to config JSON file',
type=str, nargs='?', default=None)
arguments = arg_parser.parse_args()
settings = PlayoffSettings(arguments.config_file)
interactive = settings.interactive
generator = PlayoffGenerator(settings)
content = generator.generate_content()
file_manager = PlayoffFileManager(settings)
file_manager.write_content(content)
file_manager.copy_scripts()
file_manager.send_files()
except SystemExit:
interactive = False
raise
except:
print traceback.format_exc()
finally:
if interactive:
raw_input('Press any key to continue...')
if __name__ == '__main__':
main()
| Python | 0 |
3f80c759c55552dce7d45cf5f84e953ac7863974 | add placeholder for more examples | octopus/modules/examples/examples.py | octopus/modules/examples/examples.py | from octopus.core import app
from flask import Blueprint, render_template
blueprint = Blueprint('examples', __name__)
#@blueprint.route("/")
#def list_examples():
# return render_template("examples/list.html")
@blueprint.route("/ac")
def autocomplete():
return render_template("examples/es/autocomplete.html")
@blueprint.route("/fact")
def fact():
return render_template("examples/sherpafact/proxy.html")
@blueprint.route("/clientjs")
def clientjs():
pass
@blueprint.route("/epmc")
def epmc():
pass
@blueprint.route("/romeo")
def romeo():
# at the moment the romeo endpoint only deals with downloads, which is not very demoable
pass
| from octopus.core import app
from flask import Blueprint, render_template
blueprint = Blueprint('examples', __name__)
#@blueprint.route("/")
#def list_examples():
# return render_template("examples/list.html")
@blueprint.route("/ac")
def autocomplete():
return render_template("examples/es/autocomplete.html")
@blueprint.route("/fact")
def fact():
return render_template("examples/sherpafact/proxy.html")
| Python | 0 |
2668829d114031ba6fa641bb989988368371917b | add program lookup to choice group admin hotfix | open_programs/apps/programs/admin.py | open_programs/apps/programs/admin.py | from django.contrib import admin
from reversion.admin import VersionAdmin
from ajax_select.admin import AjaxSelectAdmin
from ajax_select import make_ajax_form
from .models import Program, TrainingTarget, ProgramCompetence, ProgramModules, TargetModules, ChoiceGroup, ChoiceGroupType, LearningPlan
@admin.register(Program)
class ProgramAdmin(VersionAdmin):
list_display = (
'title',
"training_direction",
'chief',
"level",
'created',
'updated',
'archived',
'status',
)
list_filter = ("level", 'created', 'updated', 'status', 'archived',)
filter_horizontal = ("learning_plans", )
@admin.register(TrainingTarget)
class TrainingTargetAdmin(VersionAdmin):
list_display = (
"title",
"number"
) # TODO: "program"
list_filter = (
"program",
"number"
)
@admin.register(ProgramCompetence)
class ProgramCompetenceAdmin(VersionAdmin):
list_display = ("title", "number", "program")
list_filter = ("title", "number")
search_fields = ("title", )
@admin.register(ProgramModules)
class ProgramModulesAdmin(VersionAdmin):
list_display = ("id", "semester", "module", "program", "choice_group", "competence")
list_filter = ("program", "semester",)
raw_id_fields = ("module", )
@admin.register(TargetModules)
class TargetModulesAdmin(VersionAdmin):
list_display = ("id", ) # TODO: "choice_group", "program_module", "target"
@admin.register(ChoiceGroup)
class ChoiceGroupAdmin(VersionAdmin, AjaxSelectAdmin):
list_display = ("id", "program", "title", "labor", "choice_group_type", "number")
form = make_ajax_form(ChoiceGroup, {'program': 'program'})
@admin.register(ChoiceGroupType)
class ChoiceGroupTypeAdmin(VersionAdmin):
list_display = ("title", )
@admin.register(LearningPlan)
class LearningPlanAdmin(VersionAdmin):
list_display = ('uni_displayableTitle', 'uni_number', 'uni_title', 'uni_stage', 'uni_loadTimeType')
| from django.contrib import admin
from reversion.admin import VersionAdmin
from ajax_select.admin import AjaxSelectAdmin
from ajax_select import make_ajax_form
from .models import Program, TrainingTarget, ProgramCompetence, ProgramModules, TargetModules, ChoiceGroup, ChoiceGroupType, LearningPlan
@admin.register(Program)
class ProgramAdmin(VersionAdmin):
list_display = (
'title',
"training_direction",
'chief',
"level",
'created',
'updated',
'archived',
'status',
)
list_filter = ("level", 'created', 'updated', 'status', 'archived',)
filter_horizontal = ("learning_plans", )
@admin.register(TrainingTarget)
class TrainingTargetAdmin(VersionAdmin):
list_display = (
"title",
"number"
) # TODO: "program"
list_filter = (
"program",
"number"
)
@admin.register(ProgramCompetence)
class ProgramCompetenceAdmin(VersionAdmin):
list_display = ("title", "number", "program")
list_filter = ("title", "number")
search_fields = ("title", )
@admin.register(ProgramModules)
class ProgramModulesAdmin(VersionAdmin):
list_display = ("id", "semester", "module", "program", "choice_group", "competence")
list_filter = ("program", "semester",)
raw_id_fields = ("module", )
@admin.register(TargetModules)
class TargetModulesAdmin(VersionAdmin):
list_display = ("id", ) # TODO: "choice_group", "program_module", "target"
@admin.register(ChoiceGroup)
class ChoiceGroupAdmin(VersionAdmin, AjaxSelectAdmin):
list_display = ("id", "program", "title", "labor", "choice_group_type", "number")
form = make_ajax_form(Program, {'program': 'program'})
@admin.register(ChoiceGroupType)
class ChoiceGroupTypeAdmin(VersionAdmin):
list_display = ("title", )
@admin.register(LearningPlan)
class LearningPlanAdmin(VersionAdmin):
list_display = ('uni_displayableTitle', 'uni_number', 'uni_title', 'uni_stage', 'uni_loadTimeType')
| Python | 0 |
01c8f0ebc4669d88576d2e66c57ac51863fd31fe | Fix ignore pattern | autotweet/learn.py | autotweet/learn.py | """:mod:`autotweet.learn` --- Learning your tweets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module learns your tweets and store it to database.
"""
import logging
import re
import sqlalchemy
import time
import tweepy
from .database import add_document, get_session
from .twitter import CONSUMER_KEY, CONSUMER_SECRET, OAuthToken, strip_tweet
MY_CLIENT_NAME = 'learn your tweet'
IGNORE_PATTERN = re.compile(r'(@\w+\s+)*@\w+\s{2,}')
logger = logging.getLogger('collector')
def check_ignore(status):
if hasattr(status, 'retweeted_status'):
return True
if status.source == MY_CLIENT_NAME:
return True
if IGNORE_PATTERN.match(status.text):
return True
return False
class MyMentionListener(tweepy.streaming.StreamListener):
def __init__(self, api, db_url):
super(MyMentionListener, self).__init__()
self.api = api
self.db_url = db_url
self.db_session = get_session(db_url)
self.me = api.me()
def on_status(self, status):
if check_ignore(status):
return True
if status.user.id == self.me.id and status.in_reply_to_status_id:
original_status = self.api.get_status(status.in_reply_to_status_id)
question = strip_tweet(original_status.text)
answer = strip_tweet(status.text, remove_url=False)
if question and answer:
try:
add_document(self.db_session, question, answer)
except sqlalchemy.exc.OperationalError:
self.db_session = get_session(self.db_url)
add_document(self.db_session, question, answer)
return True
def polling_timeline(api, db_url):
db_session = get_session(db_url)
me = api.me()
last_id = me.status.id
logger.debug('tracking from status id: {0}'.format(last_id))
while 1:
time.sleep(60)
logger.debug('polling from status id: {0}'.format(last_id))
statuses = me.timeline(since_id=last_id)
if statuses:
statuses.reverse()
last_id = statuses[-1].id
else:
continue
for status in statuses:
if check_ignore(status):
continue
if not status.in_reply_to_status_id:
continue
original_status = api.get_status(status.in_reply_to_status_id)
question = strip_tweet(original_status.text)
answer = strip_tweet(status.text, remove_url=False)
if question and answer:
try:
add_document(db_session, question, answer)
except sqlalchemy.exc.OperationalError:
db_session = get_session(db_url)
add_document(db_session, question, answer)
def learning_daemon(token, db_url, streaming=False):
if not isinstance(token, OAuthToken):
token = OAuthToken.from_string(token)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(token.key, token.secret)
api = tweepy.API(auth)
if streaming:
listener = MyMentionListener(api, db_url)
stream = tweepy.Stream(auth, listener)
stream.userstream()
else:
polling_timeline(api, db_url)
| """:mod:`autotweet.learn` --- Learning your tweets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module learns your tweets and store it to database.
"""
import logging
import re
import sqlalchemy
import time
import tweepy
from .database import add_document, get_session
from .twitter import CONSUMER_KEY, CONSUMER_SECRET, OAuthToken, strip_tweet
MY_CLIENT_NAME = 'learn your tweet'
IGNORE_PATTERN = re.compile(r'@\w+\s{2,}')
logger = logging.getLogger('collector')
def check_ignore(status):
if hasattr(status, 'retweeted_status'):
return True
if status.source == MY_CLIENT_NAME:
return True
if IGNORE_PATTERN.match(status.text):
return True
return False
class MyMentionListener(tweepy.streaming.StreamListener):
def __init__(self, api, db_url):
super(MyMentionListener, self).__init__()
self.api = api
self.db_url = db_url
self.db_session = get_session(db_url)
self.me = api.me()
def on_status(self, status):
if check_ignore(status):
return True
if status.user.id == self.me.id and status.in_reply_to_status_id:
original_status = self.api.get_status(status.in_reply_to_status_id)
question = strip_tweet(original_status.text)
answer = strip_tweet(status.text, remove_url=False)
if question and answer:
try:
add_document(self.db_session, question, answer)
except sqlalchemy.exc.OperationalError:
self.db_session = get_session(self.db_url)
add_document(self.db_session, question, answer)
return True
def polling_timeline(api, db_url):
db_session = get_session(db_url)
me = api.me()
last_id = me.status.id
logger.debug('tracking from status id: {0}'.format(last_id))
while 1:
time.sleep(60)
logger.debug('polling from status id: {0}'.format(last_id))
statuses = me.timeline(since_id=last_id)
if statuses:
statuses.reverse()
last_id = statuses[-1].id
else:
continue
for status in statuses:
if check_ignore(status):
continue
if not status.in_reply_to_status_id:
continue
original_status = api.get_status(status.in_reply_to_status_id)
question = strip_tweet(original_status.text)
answer = strip_tweet(status.text, remove_url=False)
if question and answer:
try:
add_document(db_session, question, answer)
except sqlalchemy.exc.OperationalError:
db_session = get_session(db_url)
add_document(db_session, question, answer)
def learning_daemon(token, db_url, streaming=False):
if not isinstance(token, OAuthToken):
token = OAuthToken.from_string(token)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(token.key, token.secret)
api = tweepy.API(auth)
if streaming:
listener = MyMentionListener(api, db_url)
stream = tweepy.Stream(auth, listener)
stream.userstream()
else:
polling_timeline(api, db_url)
| Python | 0.000003 |
d632b78c4fe41fff6511a4bcbb8ec9c13a34c066 | Add test for LogEntry | tests/core/tests/admin_integration_tests.py | tests/core/tests/admin_integration_tests.py | from __future__ import unicode_literals
import os.path
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.models import LogEntry
from core.admin import BookAdmin
class ImportExportAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', 'admin@example.com',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='admin', password='password')
def test_import_export_template(self):
response = self.client.get('/admin/core/book/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'admin/import_export/change_list_import_export.html')
self.assertContains(response, _('Import'))
self.assertContains(response, _('Export'))
def test_import(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, _('Import finished'))
def test_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {
'file_format': '0',
}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
def test_import_export_buttons_visible_without_add_permission(self):
# issue 38 - Export button not visible when no add permission
original = BookAdmin.has_add_permission
BookAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/book/')
BookAdmin.has_add_permission = original
self.assertContains(response, _('Export'))
self.assertContains(response, _('Import'))
def test_import_file_name_in_tempdir(self):
# 65 - import_file_name form field can be use to access the filesystem
import_file_name = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
data = {
'input_format': "0",
'import_file_name': import_file_name,
}
with self.assertRaises(IOError):
self.client.post('/admin/core/book/process_import/', data)
def test_import_log_entry(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
book = LogEntry.objects.latest('id')
self.assertEqual(book.object_repr, "Some book")
self.assertEqual(book.object_id, str(1))
| from __future__ import unicode_literals
import os.path
from django.test.testcases import TestCase
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from core.admin import BookAdmin
class ImportExportAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', 'admin@example.com',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='admin', password='password')
def test_import_export_template(self):
response = self.client.get('/admin/core/book/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'admin/import_export/change_list_import_export.html')
self.assertContains(response, _('Import'))
self.assertContains(response, _('Export'))
def test_import(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, _('Import finished'))
def test_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {
'file_format': '0',
}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
def test_import_export_buttons_visible_without_add_permission(self):
# issue 38 - Export button not visible when no add permission
original = BookAdmin.has_add_permission
BookAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/book/')
BookAdmin.has_add_permission = original
self.assertContains(response, _('Export'))
self.assertContains(response, _('Import'))
def test_import_file_name_in_tempdir(self):
# 65 - import_file_name form field can be use to access the filesystem
import_file_name = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
data = {
'input_format': "0",
'import_file_name': import_file_name,
}
with self.assertRaises(IOError):
self.client.post('/admin/core/book/process_import/', data)
| Python | 0 |
71b6f040e161c9a169e86d7a87fdd8038cf5961e | Add get_all_instance_types to AZURE | apps/domain/src/main/core/infrastructure/providers/azure/utils.py | apps/domain/src/main/core/infrastructure/providers/azure/utils.py | import subprocess
import click
import json
from PyInquirer import prompt
from ...utils import Config, styles
class AZ:
def locations_list(self):
proc = subprocess.Popen(
"az account list-locations --query '[].{DisplayName:displayName}' --output table",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
locations = proc.stdout.read()
return locations.split("\n")[2:]
def get_all_instance_types(location=None):
proc = subprocess.Popen(
f"az vm list-sizes --location {location}",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
machines = json.loads(proc.stdout.read())
all_instances = {"all_instances": [machine["name"] for machine in machines]}
return all_instances
def get_azure_config() -> Config:
"""Getting the configration required for deployment on AZURE.
Returns:
Config: Simple Config with the user inputs
"""
az = AZ()
subscription_id = prompt(
[
{
"type": "input",
"name": "subscription_id",
"message": "Please provide your subscription_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["subscription_id"]
client_id = prompt(
[
{
"type": "input",
"name": "client_id",
"message": "Please provide your client_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["client_id"]
client_secret = prompt(
[
{
"type": "input",
"name": "client_secret",
"message": "Please provide your client_secret",
"default": "XXXX-XXXX-XXX-XXX-XXX",
}
],
style=styles.second,
)["client_secret"]
tenant_id = prompt(
[
{
"type": "input",
"name": "tenant_id",
"message": "Please provide your tenant_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["tenant_id"]
location = prompt(
[
{
"type": "list",
"name": "location",
"message": "Please select your desired location",
"choices": az.locations_list(),
}
],
style=styles.second,
)["location"]
return Config(
location=location,
subscription_id=subscription_id,
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
)
| import subprocess
import click
from PyInquirer import prompt
from ...utils import Config, styles
class AZ:
def locations_list(self):
proc = subprocess.Popen(
"az account list-locations --query '[].{DisplayName:displayName}' --output table",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
locations = proc.stdout.read()
return locations.split("\n")[2:]
def get_azure_config() -> Config:
"""Getting the configration required for deployment on AZURE.
Returns:
Config: Simple Config with the user inputs
"""
az = AZ()
subscription_id = prompt(
[
{
"type": "input",
"name": "subscription_id",
"message": "Please provide your subscription_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["subscription_id"]
client_id = prompt(
[
{
"type": "input",
"name": "client_id",
"message": "Please provide your client_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["client_id"]
client_secret = prompt(
[
{
"type": "input",
"name": "client_secret",
"message": "Please provide your client_secret",
"default": "XXXX-XXXX-XXX-XXX-XXX",
}
],
style=styles.second,
)["client_secret"]
tenant_id = prompt(
[
{
"type": "input",
"name": "tenant_id",
"message": "Please provide your tenant_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["tenant_id"]
location = prompt(
[
{
"type": "list",
"name": "location",
"message": "Please select your desired location",
"choices": az.locations_list(),
}
],
style=styles.second,
)["location"]
return Config(
location=location,
subscription_id=subscription_id,
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
)
| Python | 0.000003 |
ec5cb4e878dae00bb6b23965c6c466ee29727583 | Update HashFilter | pybloom/hashfilter.py | pybloom/hashfilter.py | import time
class HashFilter(object):
'''
Plain Temporal Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key, timestamp = None):
timestamp = int(timestamp)
if key in self.unique_items:
if timestamp < self.unique_items[key]:
self.unique_items[key] = timestamp + self.expiration
return True
else:
self.unique_items[key] = timestamp + self.expiration
return False
else:
self.unique_items[key] = timestamp + self.expiration
return False
def contains(self, key, timestamp):
timestamp = int(timestamp)
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
| import time
class HashFilter(object):
'''
Plain Temporal Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key, timestamp = None):
if key in self.unique_items:
if not timestamp:
timestamp = time.time()
self.unique_items[key] = int(timestamp) + self.expiration
return True
else:
if not timestamp:
timestamp = time.time()
self.unique_items[key] = int(timestamp) + self.expiration
return False
def contains(self, key, timestamp):
timestamp = int(timestamp)
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
| Python | 0 |
ab13290364a40c0592ed347bf7b91110afaa7115 | Fix test_json | openfisca_france/tests/test_jsons.py | openfisca_france/tests/test_jsons.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########### DESCRIPTION ############
## Ce programme teste tous les fichiers .json créés par un script et renvoie les erreurs d'OpenFisca
import json
import os
import sys
from biryani1.baseconv import check
import numpy as np
import openfisca_france
from openfisca_france.scripts.compare_openfisca_impots import compare_variable
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def test():
path = os.path.join(os.path.dirname(__file__), 'json')
err = 1
for fichier in os.listdir(path):
with open(os.path.join(path, fichier)) as officiel:
try:
content = json.load(officiel)
except:
print fichier
official_result = content['resultat_officiel']
json_scenario = content['scenario']
scenario = check(tax_benefit_system.Scenario.make_json_to_instance(
tax_benefit_system = tax_benefit_system))(json_scenario)
year = json_scenario['year']
print scenario
# print scenario.test_case.keys()
totpac = scenario['test_case']['foyers_fiscaux'].values()[0].get('personnes_a_charge')
simulation = scenario.new_simulation()
for code, field in official_result.iteritems():
if compare_variable(code, field, simulation, totpac, fichier, year):
err = 0
assert err, "Erreur"
if __name__ == "__main__":
sys.exit(test())
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########### DESCRIPTION ############
## Ce programme teste tous les fichiers .json créés par un script et renvoie les erreurs d'OpenFisca
import json
import os
import sys
from biryani1.baseconv import check
import numpy as np
import openfisca_france
from openfisca_france.scripts.compare_openfisca_impots import compare_variable
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def test():
path = os.path.join(os.path.dirname(__file__), 'json')
err = 1
for fichier in os.listdir(path):
with open(os.path.join(path, fichier)) as officiel:
try:
content = json.load(officiel)
except:
print fichier
official_result = content['resultat_officiel']
json_scenario = content['scenario']
scenario = check(tax_benefit_system.Scenario.make_json_to_instance(
tax_benefit_system = tax_benefit_system))(json_scenario)
year = json_scenario['year']
totpac = scenario.test_case['foyers_fiscaux'].values()[0].get('personnes_a_charge')
simulation = scenario.new_simulation()
for code, field in official_result.iteritems():
if compare_variable(code, field, simulation, totpac, fichier, year):
err = 0
assert err, "Erreur"
if __name__ == "__main__":
sys.exit(test())
| Python | 0.998619 |
72ec0d82bfa59d14dbd9e8ffd89ddcfc990fc4fe | Fix #14 | pygraphml/__init__.py | pygraphml/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .attribute import Attribute
from .item import Item
from .point import Point
from .node import Node
from .edge import Edge
from .graph import Graph
from .graphml_parser import GraphMLParser
__version__ = '2.1.4'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .attribute import Attribute
from .item import Item
from .point import Point
from .node import Node
from .edge import Edge
from .graph import Graph
from .graphml_parser import GraphMLParser
__version__ = '2.1.3'
| Python | 0.000001 |
145dc7d22301cffdf43498924f1a908f0d160512 | Allow deep recursion | pylint_runner/main.py | pylint_runner/main.py | #!/usr/bin/env python
# pylint: disable=no-member
"""
Runs pylint on all contained python files in this directory, printint out
nice colorized warnings/errors without all the other report fluff
"""
from __future__ import print_function
import os
import sys
import colorama
import pylint
import pylint.lint
__author__ = "Matthew 'MasterOdin' Peveler"
__license__ = "The MIT License (MIT)"
IGNORE_FOLDERS = [".git", ".idea", "__pycache__"]
ARGS = ["--reports=n", "--output-format=colorized", "--disable=locally-disabled"]
colorama.init(autoreset=True)
def runner(output=None, error=None):
"""
Runs pylint on all python files in the current directory
"""
pylint_output = output if output is not None else sys.stdout
pylint_error = error if error is not None else sys.stderr
savedout, savederr = sys.__stdout__, sys.__stderr__
sys.stdout = pylint_output
sys.stderr = pylint_error
pylint_files = get_files_from_dir(os.curdir)
version = '.'.join([str(x) for x in sys.version_info[0:3]])
print("Using pylint " + colorama.Fore.RED + pylint.__version__ +
colorama.Fore.RESET + " for python " + colorama.Fore.RED +
version + colorama.Fore.RESET)
print("pylint running on the following files:")
for pylint_file in pylint_files:
split_file = pylint_file.split("/")
split_file[-1] = colorama.Fore.CYAN + split_file[-1] + colorama.Fore.RESET
pylint_file = '/'.join(split_file)
print("- " + pylint_file)
print("----")
run = pylint.lint.Run(ARGS + pylint_files, exit=False)
sys.stdout = savedout
sys.stderr = savederr
sys.exit(run.linter.msg_status)
def get_files_from_dir(current_dir):
"""
Recursively walk through a directory and get all python files and then walk
through any potential directories that are found off current directory,
so long as not within IGNORE_FOLDERS
:return: all python files that were found off current_dir
"""
if current_dir[-1] != "/" and current_dir != ".":
current_dir += "/"
files = []
for dir_file in os.listdir(current_dir):
if current_dir != ".":
file_path = current_dir + dir_file
else:
file_path = dir_file
if os.path.isfile(file_path):
file_split = os.path.splitext(dir_file)
if len(file_split) == 2 and file_split[0] != "" \
and file_split[1] == '.py':
files.append(file_path)
elif (os.path.isdir(dir_file) or os.path.isdir(file_path)) and dir_file not in IGNORE_FOLDERS:
path = dir_file + "/"
if current_dir != "" and current_dir != ".":
path = current_dir.rstrip("/") + "/" + path
files += get_files_from_dir(path)
return files
| #!/usr/bin/env python
# pylint: disable=no-member
"""
Runs pylint on all contained python files in this directory, printint out
nice colorized warnings/errors without all the other report fluff
"""
from __future__ import print_function
import os
import sys
import colorama
import pylint
import pylint.lint
__author__ = "Matthew 'MasterOdin' Peveler"
__license__ = "The MIT License (MIT)"
IGNORE_FOLDERS = [".git", ".idea", "__pycache__"]
ARGS = ["--reports=n", "--output-format=colorized", "--disable=locally-disabled"]
colorama.init(autoreset=True)
def runner(output=None, error=None):
"""
Runs pylint on all python files in the current directory
"""
pylint_output = output if output is not None else sys.stdout
pylint_error = error if error is not None else sys.stderr
savedout, savederr = sys.__stdout__, sys.__stderr__
sys.stdout = pylint_output
sys.stderr = pylint_error
pylint_files = get_files_from_dir(os.curdir)
version = '.'.join([str(x) for x in sys.version_info[0:3]])
print("Using pylint " + colorama.Fore.RED + pylint.__version__ +
colorama.Fore.RESET + " for python " + colorama.Fore.RED +
version + colorama.Fore.RESET)
print("pylint running on the following files:")
for pylint_file in pylint_files:
split_file = pylint_file.split("/")
split_file[-1] = colorama.Fore.CYAN + split_file[-1] + colorama.Fore.RESET
pylint_file = '/'.join(split_file)
print("- " + pylint_file)
print("----")
run = pylint.lint.Run(ARGS + pylint_files, exit=False)
sys.stdout = savedout
sys.stderr = savederr
sys.exit(run.linter.msg_status)
def get_files_from_dir(current_dir):
"""
Recursively walk through a directory and get all python files and then walk
through any potential directories that are found off current directory,
so long as not within IGNORE_FOLDERS
:return: all python files that were found off current_dir
"""
if current_dir[-1] != "/" and current_dir != ".":
current_dir += "/"
files = []
for dir_file in os.listdir(current_dir):
if current_dir != ".":
file_path = current_dir + dir_file
else:
file_path = dir_file
if os.path.isfile(file_path):
file_split = os.path.splitext(dir_file)
if len(file_split) == 2 and file_split[0] != "" \
and file_split[1] == '.py':
files.append(file_path)
elif os.path.isdir(dir_file) and dir_file not in IGNORE_FOLDERS:
path = dir_file+"/"
if current_dir != "" and current_dir != ".":
path = current_dir.rstrip("/")+"/"+path
files += get_files_from_dir(path)
return files
| Python | 0.999987 |
3905327d8cb02c6c7929f6b3bd12658c6bc1b6ab | bump to 1.73 | pyperform/__init__.py | pyperform/__init__.py | from __future__ import print_function
__version__ = '1.73'
from pyperform.benchmark import Benchmark
from .comparisonbenchmark import ComparisonBenchmark
from .benchmarkedclass import BenchmarkedClass
from .benchmarkedfunction import BenchmarkedFunction
from .timer import timer
from .exceptions import ValidationError
def enable():
"""
Enable all benchmarking.
"""
Benchmark.enable = True
ComparisonBenchmark.enable = True
BenchmarkedFunction.enable = True
BenchmarkedClass.enable = True
def disable():
"""
Disable all benchmarking.
"""
Benchmark.enable = False
ComparisonBenchmark.enable = False
BenchmarkedFunction.enable = False
BenchmarkedClass.enable = False
| from __future__ import print_function
__version__ = '1.72'
from pyperform.benchmark import Benchmark
from .comparisonbenchmark import ComparisonBenchmark
from .benchmarkedclass import BenchmarkedClass
from .benchmarkedfunction import BenchmarkedFunction
from .timer import timer
from .exceptions import ValidationError
def enable():
"""
Enable all benchmarking.
"""
Benchmark.enable = True
ComparisonBenchmark.enable = True
BenchmarkedFunction.enable = True
BenchmarkedClass.enable = True
def disable():
"""
Disable all benchmarking.
"""
Benchmark.enable = False
ComparisonBenchmark.enable = False
BenchmarkedFunction.enable = False
BenchmarkedClass.enable = False
| Python | 0.000001 |
813fac88b392f81825d60f3862a09718f12bf424 | add ccsd | pyquante2/__init__.py | pyquante2/__init__.py | from pyquante2.basis.basisset import basisset
from pyquante2.basis.cgbf import cgbf,sto
from pyquante2.basis.pgbf import pgbf
from pyquante2.geo.molecule import molecule
from pyquante2.geo.samples import *
from pyquante2.graphics.vtkplot import vtk_orbs
from pyquante2.grid.grid import grid
from pyquante2.ints.one import S,T,V
from pyquante2.pt.mp2 import mp2
from pyquante2.cc.ccsd import ccsd
from pyquante2.scf.hamiltonians import rhf,uhf
try:
import matplotlib
from pyquante2.graphics.lineplot import lineplot_orbs,line
from pyquante2.graphics.contourplot import contourplot
except:
pass
| from pyquante2.basis.basisset import basisset
from pyquante2.basis.cgbf import cgbf,sto
from pyquante2.basis.pgbf import pgbf
from pyquante2.geo.molecule import molecule
from pyquante2.geo.samples import *
from pyquante2.graphics.vtkplot import vtk_orbs
from pyquante2.grid.grid import grid
from pyquante2.ints.one import S,T,V
from pyquante2.pt.mp2 import mp2
from pyquante2.scf.hamiltonians import rhf,uhf
try:
import matplotlib
from pyquante2.graphics.lineplot import lineplot_orbs,line
from pyquante2.graphics.contourplot import contourplot
except:
pass
| Python | 0.000001 |
89ed1ea77e2e92ae9a953404552a229854ce0f9c | Add option to don't add route | pyramid_auth/views.py | pyramid_auth/views.py | from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPFound,
HTTPForbidden,
)
from pyramid.security import (
unauthenticated_userid,
remember,
forget,
)
from urllib import urlencode
import tw2.core as twc
from . import forms
class BaseView(object):
def __init__(self, context, request):
self.context = context
self.request = request
def forbidden(self):
return {}
class BaseLoginView(BaseView):
def get_validate_func(self):
return self.request.registry.settings[
'authentication.validate_function']
def _get_next_location(self):
login_url = self.request.route_url('login')
referrer = self.request.url
if referrer == login_url:
referrer = '/'
return self.request.params.get('next', referrer)
def login(self):
LoginForm = forms.create_login_form(self.request,
self.get_validate_func())
widget = LoginForm().req()
if self.request.method == 'POST':
try:
data = widget.validate(self.request.POST)
headers = remember(self.request, data['login'])
return HTTPFound(location=self._get_next_location(),
headers=headers)
except twc.ValidationError, e:
widget = e.widget
return dict(widget=widget)
def logout(self):
headers = forget(self.request)
location = self.request.params.get('next', self.request.application_url)
return HTTPFound(location=location, headers=headers)
def forbidden_redirect(self):
if unauthenticated_userid(self.request):
# The user is logged but doesn't have the right permission
location = self.request.route_url('forbidden')
else:
login_url = self.request.route_url('login')
location = '%s?%s' % (login_url, urlencode({'next': self.request.url}))
return HTTPFound(location=location)
def base_includeme(config):
if config.registry.settings.get('authentication.no_routes'):
return
config.add_view(
BaseView,
attr='forbidden',
context=HTTPForbidden,
renderer='auth/forbidden.mak')
def login_includeme(config):
if config.registry.settings.get('authentication.no_routes'):
return
ViewClass = BaseLoginView
config.add_view(
ViewClass,
attr='forbidden_redirect',
context=HTTPForbidden)
config.add_route(
'forbidden',
'/forbidden',
)
config.add_view(
ViewClass,
attr='forbidden',
route_name='forbidden',
renderer='auth/forbidden.mak')
config.add_route(
'login',
'/login',
)
config.add_view(
ViewClass,
attr='login',
route_name='login',
renderer='auth/login.mak')
config.add_route(
'logout',
'/logout',
)
config.add_view(
ViewClass,
attr='logout',
route_name='logout')
| from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPFound,
HTTPForbidden,
)
from pyramid.security import (
unauthenticated_userid,
remember,
forget,
)
from urllib import urlencode
import tw2.core as twc
from . import forms
class BaseView(object):
def __init__(self, context, request):
self.context = context
self.request = request
def forbidden(self):
return {}
class BaseLoginView(BaseView):
def get_validate_func(self):
return self.request.registry.settings[
'authentication.validate_function']
def _get_next_location(self):
login_url = self.request.route_url('login')
referrer = self.request.url
if referrer == login_url:
referrer = '/'
return self.request.params.get('next', referrer)
def login(self):
LoginForm = forms.create_login_form(self.request,
self.get_validate_func())
widget = LoginForm().req()
if self.request.method == 'POST':
try:
data = widget.validate(self.request.POST)
headers = remember(self.request, data['login'])
return HTTPFound(location=self._get_next_location(),
headers=headers)
except twc.ValidationError, e:
widget = e.widget
return dict(widget=widget)
def logout(self):
headers = forget(self.request)
location = self.request.params.get('next', self.request.application_url)
return HTTPFound(location=location, headers=headers)
def forbidden_redirect(self):
if unauthenticated_userid(self.request):
# The user is logged but doesn't have the right permission
location = self.request.route_url('forbidden')
else:
login_url = self.request.route_url('login')
location = '%s?%s' % (login_url, urlencode({'next': self.request.url}))
return HTTPFound(location=location)
def base_includeme(config):
config.add_view(
BaseView,
attr='forbidden',
context=HTTPForbidden,
renderer='auth/forbidden.mak')
def login_includeme(config):
ViewClass = BaseLoginView
config.add_view(
ViewClass,
attr='forbidden_redirect',
context=HTTPForbidden)
config.add_route(
'forbidden',
'/forbidden',
)
config.add_view(
ViewClass,
attr='forbidden',
route_name='forbidden',
renderer='auth/forbidden.mak')
config.add_route(
'login',
'/login',
)
config.add_view(
ViewClass,
attr='login',
route_name='login',
renderer='auth/login.mak')
config.add_route(
'logout',
'/logout',
)
config.add_view(
ViewClass,
attr='logout',
route_name='logout')
| Python | 0.000001 |
f808b67c9a067d9addd75f09e10853c3812d6101 | Refactor code | transfers/examples/pre-transfer/00_unbag.py | transfers/examples/pre-transfer/00_unbag.py | #!/usr/bin/env python
# Script to re-package unzipped bags as standard transfers, utilizing checksums from bag manifest.
# Assumes bags are structured as either bag/data/(content) or bag/data/objects/(content).
# Enables use of scripts to add metadata to SIP without failing transfer at bag validation.
from __future__ import print_function, unicode_literals
import os
import shutil
import sys
def main(transfer_path):
transfer_path = os.path.abspath(transfer_path)
# check if transfer is an unzipped bag
if not os.path.isfile(os.path.join(transfer_path, 'bag-info.txt')):
return 1
# move files in data up one level if 'objects' folder already exists
data_path = os.path.join(transfer_path, 'data')
if os.path.isdir(os.path.join(data_path, 'objects')):
data_contents = os.listdir(data_path)
data_contents = [os.path.join(data_path, filename) for filename in data_contents]
for f in data_contents:
shutil.move(f, transfer_path)
# otherwise, rename data to objects
else:
os.rename(data_path, os.path.join(transfer_path, 'objects'))
# create metadata and subdoc folders if don't already exist
metadata_dir = os.path.join(transfer_path, 'metadata')
subdoc_dir = os.path.join(metadata_dir, 'submissionDocumentation')
if not os.path.isdir(metadata_dir):
os.mkdir(metadata_dir)
if not os.path.isdir(subdoc_dir):
os.mkdir(subdoc_dir)
# write manifest checksums to checksum file
with open(os.path.join(transfer_path, 'manifest-md5.txt'), 'r') as old_file:
with open (os.path.join(metadata_dir, 'checksum.md5'), 'w') as new_file:
for line in old_file:
if "data/objects/" in line:
new_line = line.replace("data/objects/", "../objects/")
else:
new_line = line.replace("data/", "../objects/")
new_file.write(new_line)
# move bag files to submissionDocumentation
for bagfile in 'bag-info.txt', 'bagit.txt', 'manifest-md5.txt', 'tagmanifest-md5.txt':
shutil.copy2(os.path.join(transfer_path, bagfile), os.path.join(subdoc_dir, bagfile))
os.remove(os.path.join(transfer_path, bagfile))
return 0
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
| #!/usr/bin/env python
# Script to re-package unzipped bags as standard transfers, utilizing checksums from bag manifest.
# Assumes bags are structured as either bag/data/(content) or bag/data/objects/(content).
# Enables use of scripts to add metadata to SIP without failing transfer at bag validation.
from __future__ import print_function, unicode_literals
import os
import shutil
import sys
def main(transfer_path):
transfer_path = os.path.abspath(transfer_path)
# check if transfer is an unzipped bag
if not os.path.isfile(os.path.join(transfer_path, 'bag-info.txt')):
return 1
# move files in data up one level if 'objects' folder already exists
data_path = os.path.join(transfer_path, 'data')
if os.path.isdir(os.path.join(data_path, 'objects')):
data_contents = os.listdir(data_path)
data_contents = [os.path.abspath(data_path) + '/' + filename for filename in data_contents]
for f in data_contents:
shutil.move(f, transfer_path)
# otherwise, rename data to objects
else:
os.rename(data_path, os.path.join(transfer_path, 'objects'))
# create metadata and subdoc folders if don't already exist
metadata_dir = os.path.join(transfer_path, 'metadata')
subdoc_dir = os.path.join(metadata_dir, 'submissionDocumentation')
if not os.path.isdir(metadata_dir):
os.mkdir(metadata_dir)
if not os.path.isdir(subdoc_dir):
os.mkdir(subdoc_dir)
# write manifest checksums to checksum file
with open(os.path.join(transfer_path, 'manifest-md5.txt'), 'r') as old_file:
with open (os.path.join(metadata_dir, 'checksum.md5'), 'w') as new_file:
manifest_content = old_file.readlines()
for line in manifest_content:
if "data/objects/" in line:
new_line = line.replace("data/objects/", "../objects/")
else:
new_line = line.replace("data/", "../objects/")
new_file.write(new_line)
# move bag files to submissionDocumentation
for bagfile in 'bag-info.txt', 'bagit.txt', 'manifest-md5.txt', 'tagmanifest-md5.txt':
shutil.copy2(os.path.join(transfer_path, bagfile), os.path.join(subdoc_dir, bagfile))
os.remove(os.path.join(transfer_path, bagfile))
return 0
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
| Python | 0.000002 |
1d5d767433b611d3c5f4627cf73dea7c9f86c748 | Reformat and update copyright. | spotseeker_server/test/schema.py | spotseeker_server/test/schema.py | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from spotseeker_server.models import *
from django.test.client import Client
from django.test import TestCase
import simplejson as json
from mock import patch
from spotseeker_server import models
from django.test.utils import override_settings
@override_settings(SPOTSEEKER_AUTH_MODULE="spotseeker_server.auth.all_ok")
@override_settings(
SPOTSEEKER_SPOT_FORM="spotseeker_server.default_forms.spot."
"DefaultSpotForm"
)
@override_settings(
SPOTSEEKER_SPOTEXTENDEDINFO_FORM="spotseeker_server.default_forms.spot."
"DefaultSpotExtendedInfoForm"
)
class SpotSchemaTest(TestCase):
def test_content_type(self):
c = Client()
url = "/api/v1/schema"
response = c.get(url)
self.assertEqual(response["Content-Type"], "application/json")
def test_regular_spot_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
self.assertEqual(schema["manager"], "unicode")
self.assertEqual(schema["capacity"], "int")
self.assertEqual(schema["last_modified"], "auto")
self.assertEqual(schema["uri"], "auto")
def test_location_spot_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_location = schema["location"]
self.assertEqual(schema_location["latitude"], "decimal")
self.assertEqual(schema_location["room_number"], "unicode")
self.assertEqual(schema_location["floor"], "unicode")
def test_spot_image_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_image = schema["images"][0]
self.assertEqual(schema_image["description"], "unicode")
self.assertEqual(schema_image["modification_date"], "auto")
self.assertEqual(schema_image["width"], "int")
def test_spot_types(self):
SpotType.objects.create(name="Jedi")
SpotType.objects.create(name="Sith")
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_types = schema["type"]
self.assertEqual(len(schema_types), 2)
SpotType.objects.create(name="Ewok")
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_types = schema["type"]
self.assertEqual(len(schema_types), 3)
def test_extended_info(self):
test_spot = Spot.objects.create(id=1, name="Test")
SpotExtendedInfo.objects.create(
spot=test_spot,
key="noise_level",
value=["silent", "quiet", "moderate", "loud", "variable"],
)
SpotExtendedInfo.objects.create(
spot=test_spot, key="has_computers", value=["true"]
)
SpotExtendedInfo.objects.create(
spot=test_spot, key="orientation", value="unicode"
)
SpotExtendedInfo.objects.create(
spot=test_spot, key="num_computers", value="int"
)
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
extended_info = schema["extended_info"]
self.assertEqual(extended_info["noise_level"], "unicode")
self.assertEqual(extended_info["has_computers"], "unicode")
self.assertEqual(extended_info["orientation"], "unicode")
self.assertEqual(extended_info["num_computers"], "unicode")
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from spotseeker_server.models import *
from django.test.client import Client
from django.test import TestCase
import simplejson as json
from mock import patch
from spotseeker_server import models
from django.test.utils import override_settings
@override_settings(
SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
@override_settings(
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.'
'DefaultSpotForm')
@override_settings(
SPOTSEEKER_SPOTEXTENDEDINFO_FORM='spotseeker_server.default_forms.spot.'
'DefaultSpotExtendedInfoForm')
class SpotSchemaTest(TestCase):
def test_content_type(self):
c = Client()
url = "/api/v1/schema"
response = c.get(url)
self.assertEqual(response["Content-Type"], "application/json")
def test_regular_spot_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
self.assertEqual(schema["manager"], "unicode")
self.assertEqual(schema["capacity"], "int")
self.assertEqual(schema["last_modified"], "auto")
self.assertEqual(schema["uri"], "auto")
def test_location_spot_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_location = schema["location"]
self.assertEqual(schema_location["latitude"], "decimal")
self.assertEqual(schema_location["room_number"], "unicode")
self.assertEqual(schema_location["floor"], "unicode")
def test_spot_image_info(self):
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_image = schema["images"][0]
self.assertEqual(schema_image["description"], "unicode")
self.assertEqual(schema_image["modification_date"], "auto")
self.assertEqual(schema_image["width"], "int")
def test_spot_types(self):
SpotType.objects.create(name="Jedi")
SpotType.objects.create(name="Sith")
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_types = schema["type"]
self.assertEqual(len(schema_types), 2)
SpotType.objects.create(name="Ewok")
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
schema_types = schema["type"]
self.assertEqual(len(schema_types), 3)
def test_extended_info(self):
test_spot = Spot.objects.create(id=1, name="Test")
SpotExtendedInfo.objects.create(spot=test_spot,
key="noise_level",
value=["silent",
"quiet",
"moderate",
"loud",
"variable"])
SpotExtendedInfo.objects.create(spot=test_spot,
key="has_computers",
value=["true"])
SpotExtendedInfo.objects.create(spot=test_spot,
key="orientation",
value="unicode")
SpotExtendedInfo.objects.create(spot=test_spot,
key="num_computers",
value="int")
c = Client()
response = c.get("/api/v1/schema")
schema = json.loads(response.content)
extended_info = schema["extended_info"]
self.assertEqual(extended_info["noise_level"], "unicode")
self.assertEqual(extended_info["has_computers"], "unicode")
self.assertEqual(extended_info["orientation"], "unicode")
self.assertEqual(extended_info["num_computers"], "unicode")
| Python | 0 |
2fbd5ceead47ea980e5dfa7b2bc29eafbbab2d72 | remove unneeded import in views | blog/views.py | blog/views.py | from django.http import Http404
from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.utils import timezone
from . import models as blog
def home(request):
NUM_LAST_ARTICLES = 5
articles = blog.Article.objects.filter(date__lte=timezone.now()).order_by('-date')[:NUM_LAST_ARTICLES]
return render(request, 'blog/article.html', {'isroot': True, 'articles': articles})
def article(request, slug=None):
if slug is None:
articles = get_list_or_404(blog.Article)
else:
articles = get_list_or_404(blog.Article, slug=slug)
return render(request, 'blog/article.html', {
'isroot': bool(slug is None),
'articles': articles
})
def category(request, slug=None):
if slug is None:
categories = get_list_or_404(blog.Category)
else:
categories = get_list_or_404(blog.Category, slug=slug)
return render(request, 'blog/category.html', {
'isroot': bool(slug is None),
'categories': categories,
})
| from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.utils import timezone
from . import models as blog
def home(request):
NUM_LAST_ARTICLES = 5
articles = blog.Article.objects.filter(date__lte=timezone.now()).order_by('-date')[:NUM_LAST_ARTICLES]
return render(request, 'blog/article.html', {'isroot': True, 'articles': articles})
def article(request, slug=None):
if slug is None:
articles = get_list_or_404(blog.Article)
else:
articles = get_list_or_404(blog.Article, slug=slug)
return render(request, 'blog/article.html', {
'isroot': bool(slug is None),
'articles': articles
})
def category(request, slug=None):
if slug is None:
categories = get_list_or_404(blog.Category)
else:
categories = get_list_or_404(blog.Category, slug=slug)
return render(request, 'blog/category.html', {
'isroot': bool(slug is None),
'categories': categories,
})
| Python | 0 |
62c76a953ea5a1c753f9c7447bab5800bb25c2b1 | add life expantency context bulk down for ihme | ddf_utils/factory/igme.py | ddf_utils/factory/igme.py | # -*- coding: utf-8 -*-
"""download sources from CME info portal"""
__doc__ = """T.B.D"""
import os.path as osp
import re
import requests
import pandas as pd
from lxml import html
from urllib.parse import urlsplit, urljoin
url = 'http://www.childmortality.org/'
metadata = None
def load_metadata():
r = requests.get(url)
h = html.fromstring(r.content)
flist = []
for l in h.xpath('//a/@href'):
if l.endswith('xlsx'):
#print(urljoin(url, l))
flist.append(urljoin(url, l))
md = pd.DataFrame(flist, columns=['link'])
md['name'] = md['link'].map(lambda x: osp.basename(x)[:-5])
global metadata
metadata = md[['name', 'link']].copy()
def has_newer_source(v):
"""accepts a int and return true if version inferred from metadata is bigger."""
if metadata is None:
load_metadata()
link = metadata.loc[0, 'link']
ver = re.match('.*files_v(\d+).*', link).groups()[0]
if int(ver) > v:
return True
return False
def bulk_download(out_dir, name=None):
if metadata is None:
load_metadata()
if name:
names = [name]
else:
names = metadata['name'].values
for n in names:
if n not in metadata['name'].values:
raise KeyError("{} not found in page.".format(n))
link = metadata.loc[metadata['name'] == n, 'link'].values[0]
res = requests.get(link)
out_path = osp.join(out_dir, osp.basename(link))
with open(osp.expanduser(out_path), 'wb') as f:
f.write(res.content)
f.close()
| # -*- coding: utf-8 -*-
"""download sources from CME info portal"""
__doc__ = """T.B.D"""
import os.path as osp
import re
import requests
import pandas as pd
from io import BytesIO
from lxml import html
from urllib.parse import urlsplit, urljoin
url = 'http://www.childmortality.org/'
metadata = None
def load_metadata():
r = requests.get(url)
h = html.fromstring(r.content)
flist = []
for l in h.xpath('//a/@href'):
if l.endswith('xlsx'):
#print(urljoin(url, l))
flist.append(urljoin(url, l))
md = pd.DataFrame(flist, columns=['link'])
md['name'] = md['link'].map(lambda x: osp.basename(x)[:-5])
global metadata
metadata = md[['name', 'link']].copy()
def has_newer_source(v):
"""accepts a int and return true if version inferred from metadata is bigger."""
if metadata is None:
load_metadata()
link = metadata.loc[0, 'link']
ver = re.match('.*files_v(\d+).*', link).groups()[0]
if int(ver) > v:
return True
return False
def bulk_download(out_dir, name=None):
if metadata is None:
load_metadata()
if name:
names = [name]
else:
names = metadata['name'].values
for n in names:
if n not in metadata['name'].values:
raise KeyError("{} not found in page.".format(n))
link = metadata.loc[metadata['name'] == n, 'link'].values[0]
res = requests.get(link)
out_path = osp.join(out_dir, osp.basename(link))
with open(osp.expanduser(out_path), 'wb') as f:
b = BytesIO(res.content)
f.write(b.read())
f.close()
| Python | 0 |
5dce1ee6c54d8686cee42651528c087e9939368b | Bump version, 0.9.4.21 | dp_tornado/version.py | dp_tornado/version.py | __version_info__ = (0, 9, 4, 22)
__version__ = '.'.join(map(str, __version_info__))
| __version_info__ = (0, 9, 4, 21)
__version__ = '.'.join(map(str, __version_info__))
| Python | 0 |
7eca9eb4d5c7134b84c3462ac01cf1679557819f | Update example | example/app/tables.py | example/app/tables.py | #!/usr/bin/env python
# coding: utf-8
from table.columns import Column, LinkColumn, DatetimeColumn, Link
from table.utils import A
from table import Table
from models import Person
class PersonTable(Table):
id = Column(field='id', header=u'#', header_attrs={'width': '5%'})
name = Column(field='name', header=u'NAME')
action = LinkColumn(header=u'ACTION', links=[Link(text=u'EDIT', viewname='app.views.edit', args=(A('id'),))])
class Meta:
model = Person
ext_button_template = "button.html"
# disable_search = True
# disable_info = True
# disable_length_menu = True
# disable_pagination = True
| #!/usr/bin/env python
# coding: utf-8
from table.columns import Column, LinkColumn, DatetimeColumn, Link
from table.utils import A
from table import Table
from models import Person
class PersonTable(Table):
id = Column(field='id', header=u'#', header_attrs={'width': '5%'})
name = Column(field='name', header=u'姓名')
action = LinkColumn(header=u'操作', links=[Link(text=u'编辑', viewname='app.views.edit', args=(A('id'),))])
class Meta:
model = Person
ext_button_link = "http://www.baidu.com"
ext_button_text = "Add +"
| Python | 0.000001 |
3a156fb107db25c8171adcc1346fd17f36222092 | Fix refresh count | galaxy/main/management/commands/refresh_role_counts.py | galaxy/main/management/commands/refresh_role_counts.py | import time
from math import ceil, floor
from github import Github
from django.conf import settings
from django.db.models import Max, Q
from django.core.management.base import BaseCommand, CommandError
from galaxy.main.models import Role, RefreshRoleCount
from galaxy.main.celerytasks.tasks import refresh_role_counts
class Command(BaseCommand):
help = 'Update each role with GitHub counts'
def handle(self, *args, **options):
agg = Role.objects.filter(is_valid=True,active=True).aggregate(Max('id'))
max_id = agg['id__max']
size = ceil(max_id / float(len(settings.GITHUB_TASK_USERS)))
in_list = []
print 'Refresh Role Counts'
for i in range(len(settings.GITHUB_TASK_USERS)):
start = size * i
end = size * (i + 1)
print 'User: %s' % settings.GITHUB_TASK_USERS[i]['username']
print 'Range: %d - %d' % (start, end)
r = RefreshRoleCount.objects.create(
state='PENDING',
description='User: %s Range: %s-%s' % (settings.GITHUB_TASK_USERS[i]['username'], start, end)
)
in_list.append(r.id)
gh_api = Github(settings.GITHUB_TASK_USERS[i]['username'],settings.GITHUB_TASK_USERS[i]['password'])
refresh_role_counts.delay(start, end, gh_api, r)
print "Request submitted to Celery."
finished = False
started = time.time()
while not finished:
finished = True
for obj in RefreshRoleCount.objects.filter(pk__in=in_list,~Q(state='COMPLETED')):
if not obj.state == 'FINISHED':
finished = False
else:
print '%s Total: %s Passed: %s Failed: %s' % (obj.description, obj.failed + obj.passed, obj.passed, obj.failed)
obj.state = 'COMPLETED'
obj.save()
time.sleep(60)
elapsed = time.time() - started
hours = floor(elapsed / 3600) if elapsed > 3600 else 0
minutes = floor((elapsed - (hours * 3600)) / 60) if (elapsed - (hours * 3600)) > 60 else 0
seconds = elapsed - (hours * 3600) - (minutes * 60)
print 'Elapsed time %02d.%02d.%02d' % (hours, minutes, seconds)
| import time
from math import ceil, floor
from github import Github
from django.conf import settings
from django.db.models import Max
from django.core.management.base import BaseCommand, CommandError
from galaxy.main.models import Role, RefreshRoleCount
from galaxy.main.celerytasks.tasks import refresh_role_counts
class Command(BaseCommand):
help = 'Update each role with GitHub counts'
def handle(self, *args, **options):
agg = Role.objects.filter(is_valid=True,active=True).aggregate(Max('id'))
max_id = agg['id__max']
size = ceil(max_id / float(len(settings.GITHUB_TASK_USERS)))
in_list = []
print 'Refresh Role Counts'
# for i in range(len(settings.GITHUB_TASK_USERS)):
i = 1
start = size * i
end = size * (i + 1)
print 'User: %s' % settings.GITHUB_TASK_USERS[i]['username']
print 'Range: %d - %d' % (start, end)
r = RefreshRoleCount.objects.create(
state='PENDING',
description='User: %s Range: %s-%s' % (settings.GITHUB_TASK_USERS[i]['username'], start, end)
)
in_list.append(r.id)
gh_api = Github(settings.GITHUB_TASK_USERS[i]['username'],settings.GITHUB_TASK_USERS[i]['password'])
refresh_role_counts.delay(start, end, gh_api, r)
print "Request submitted to Celery."
finished = False
started = time.time()
while not finished:
finished = True
for obj in RefreshRoleCount.objects.filter(pk__in=in_list,state__not='COMPLETED'):
if not obj.state == 'FINISHED':
finished = False
else:
print '%s Total: %s Passed: %s Failed: %s' % (obj.description, obj.failed + obj.passed, obj.passed, obj.failed)
obj.state = 'COMPLETED'
obj.save()
time.sleep(60)
elapsed = time.time() - started
hours = floor(elapsed / 3600) if elapsed > 3600 else 0
minutes = floor((elapsed - (hours * 3600)) / 60) if (elapsed - (hours * 3600)) > 60 else 0
seconds = elapsed - (hours * 3600) - (minutes * 60)
print 'Elapsed time %02d.%02d.%02d' % (hours, minutes, seconds)
| Python | 0.000001 |
cd9e9efd8587b5be9e3d9a4e7efeaf26b048b0d2 | fix attribute error on handlers loading | lib/rapidsms/contrib/handlers/settings.py | lib/rapidsms/contrib/handlers/settings.py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
INSTALLED_HANDLERS = None
EXCLUDED_HANDLERS = []
RAPIDSMS_HANDLERS_EXCLUDE_APPS = [] | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
INSTALLED_HANDLERS = None
EXCLUDED_HANDLERS = []
| Python | 0.000001 |
5223846786b70dd9c198f98f7a620e70b40fab3d | update k84 | chap09/k84.py | chap09/k84.py | #
# usage: python k84.py {N}
#
import sys
import plyvel
import struct
from math import log
def create_matrix(n):
co_db = plyvel.DB('./co.ldb', create_if_missing=True)
word_db = plyvel.DB('./word.ldb', create_if_missing=True)
context_db = plyvel.DB('./context.ldb', create_if_missing=True)
matrix_db = plyvel.DB('./matrix.ldb', create_if_missing=True)
for k, v in co_db:
tmp = k.decode('utf-8').strip().split('\t')
if len(tmp) != 2:
continue
x = 0
f_tc = int.from_bytes(v, 'big')
if f_tc >= 10:
f_t = int.from_bytes(word_db.get(tmp[0].encode('utf-8')), 'big')
f_c = int.from_bytes(context_db.get(tmp[1].encode('utf-8')), 'big')
x = max(log(2, n * f_tc / (f_t * f_c)), 0)
if x != 0:
matrix_db.put(k, struct.pack('>d', x))
co_db.close()
word_db.close()
context_db.close()
matrix_db.close()
def get_matrix(t, c):
matrix_db = plyvel.DB('./matrix.ldb', create_if_missing=True)
t_key = '\t'.join((t, c)).encode('utf-8')
v = float(struct.unpack('>d', matrix_db.get(t_key))[0])
matrix_db.close()
print('X("{}", "{}") = {}'.format(t, c, v))
if __name__ == '__main__':
N = int(sys.argv[1])
create_matrix(N)
get_matrix('of', 'a')
| #
# usage: python k84.py {N}
#
import sys
import plyvel
from math import log
def wc_matrix(n, ofn):
co_db = plyvel.DB('./co.ldb', create_if_missing=True)
word_db = plyvel.DB('./word.ldb', create_if_missing=True)
context_db = plyvel.DB('./context.ldb', create_if_missing=True)
x = 0
ZERO = x.to_bytes((x.bit_length() + 7) // 8, 'big')
for k, v in co_db:
tmp = k.decode('utf-8').strip().split('\t')
if len(tmp) != 2:
continue
x = 0
f_tc = int.from_bytes(v, 'big')
if f_tc >= 10:
f_t = int.from_bytes(word_db.get(tmp[0].encode('utf-8'), ZERO), 'big')
f_c = int.from_bytes(context_db.get(tmp[1].encode('utf-8'), ZERO), 'big')
x = max(log(2, n * f_tc / (f_t * f_c)), 0)
if x != 0:
with open(ofn, 'a') as f:
f.write('{}\t{}\t{}\n'.format(tmp[0], tmp[1], x))
co_db.close()
word_db.close()
context_db.close()
if __name__ == '__main__':
N = int(sys.argv[1])
ofn = 'wc-matrix.txt'
wc_matrix(N, ofn)
| Python | 0 |
241897e2f4596dfee6eae87a6467254e135ac61b | Upgrade ready to fly quads to parts v2. | rcbi/rcbi/spiders/ReadyToFlyQuadsSpider.py | rcbi/rcbi/spiders/ReadyToFlyQuadsSpider.py | import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
MANUFACTURERS = ["Tiger", "RTF ", "HQ Prop", "Lemon"]
CORRECT = {"Tiger": "T-Motor", "RTF ": "ReadyToFlyQuads", "HQ Prop": "HQProp", "Lemon": "Lemon Rx"}
STOCK_STATE_MAP = {"http://schema.org/InStock": "in_stock",
"http://schema.org/OutOfStock": "out_of_stock"}
class ReadyToFlyQuadsSpider(CrawlSpider):
name = "readytoflyquads"
allowed_domains = ["readytoflyquads.com"]
start_urls = ["http://www.readytoflyquads.com/catalog/seo_sitemap/product/"]
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
Rule(LinkExtractor(allow=('seo_sitemap/product/', ))),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
Rule(LinkExtractor(allow=('/.*', )), callback='parse_item'),
)
def parse_item(self, response):
headers = response.css("#product-attribute-specs-table th")
data = response.css("#product-attribute-specs-table td")
manufacturer = None
for i, header in enumerate(headers):
header = header.xpath("text()").extract()[0]
if header == "Manufacturer":
manufacturer = data[i].xpath("text()").extract()[0]
item = Part()
if manufacturer and manufacturer != "No":
item["manufacturer"] = manufacturer
item["site"] = self.name
product_name = response.css("div.product-name")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract()[0].strip()
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip()
if m in CORRECT:
m = CORRECT[m]
item["manufacturer"] = m
break
variant = {}
variant["timestamp"] = response.headers["Date"]
if "Last-Modified" in response.headers:
variant["timestamp"] = response.headers["Last-Modified"]
item["variants"] = [variant]
variant["url"] = response.url
price = response.css("[itemprop=\"price\"]::text")
variant["price"] = price.extract()[0]
availability = response.css("[itemprop=\"availability\"]::attr(href)").extract()
availability = availability[0]
variant["stock_state"] = STOCK_STATE_MAP[availability]
return item
| import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
MANUFACTURERS = ["Tiger", "RTF ", "HQ Prop", "Lemon"]
CORRECT = {"Tiger": "T-Motor", "RTF ": "ReadyToFlyQuads", "HQ Prop": "HQProp", "Lemon": "Lemon Rx"}
class ReadyToFlyQuadsSpider(CrawlSpider):
name = "readytoflyquads"
allowed_domains = ["readytoflyquads.com"]
start_urls = ["http://www.readytoflyquads.com/catalog/seo_sitemap/product/"]
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
Rule(LinkExtractor(allow=('seo_sitemap/product/', ))),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
Rule(LinkExtractor(allow=('/.*', )), callback='parse_item'),
)
def parse_item(self, response):
headers = response.css("#product-attribute-specs-table th")
data = response.css("#product-attribute-specs-table td")
manufacturer = None
for i, header in enumerate(headers):
header = header.xpath("text()").extract()[0]
if header == "Manufacturer":
manufacturer = data[i].xpath("text()").extract()[0]
item = Part()
if manufacturer and manufacturer != "No":
item["manufacturer"] = manufacturer
item["site"] = self.name
item["url"] = response.url
product_name = response.css("div.product-name")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract()[0].strip()
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip()
if m in CORRECT:
m = CORRECT[m]
item["manufacturer"] = m
break
return item
| Python | 0 |
5f96021cf88201447a48a05dab0ff5a1f131b6bc | Fix problem_id command with anonymize argument | edx_data_research/reporting/problem_ids/problem_id.py | edx_data_research/reporting/problem_ids/problem_id.py | '''
In this module, we will generate a csv report for a given problem id, which
will include information about how students fared with a given problem id
'''
from itertools import groupby
from edx_data_research.reporting.edx_base import EdX
class ProblemId(EdX):
def __init__(self, args):
super(self.__class__, self).__init__(args)
self.problem_id = args.problem_id
self.final_attempt = args.final_attempt
def _generate_name_from_problem_id(problem_id, display_name, final_attempt):
'''Generate name of csv output file from problem id'''
attempts_name = '_FinalAttempts' if final_attempt else '_AllAttempts'
return ('_'.join(problem_id.split('/')[3:]) + '_' +
''.join(e for e in display_name if e.isalnum()) + attempts_name +
'.csv')
def problem_id(edx_obj):
edx_obj.collections = ['problem_ids']
cursor = edx_obj.collections['problem_ids'].find({'event.problem_id' :
edx_obj.problem_id})
display_name = cursor[0]['module']['display_name']
one_record = cursor[0]['event']
problem_ids_keys = sorted(one_record['correct_map'].keys(),
key=lambda x : int(x.split('_')[-2]))
problem_ids = []
for key in problem_ids_keys:
try:
item = one_record['submission'][key]
value = item['question']
problem_ids.append('{0} : {1}'.format(key, value))
except UnicodeEncodeError:
value = value.encode("utf-8")
problem_ids.append('{0} : {1}'.format(key, value))
except KeyError:
problem_ids.append('{0}'.format(key))
result = []
for document in cursor:
answers = []
for key in sorted(document['event']['correct_map'].keys(),
key=lambda x : int(x.split('_')[-2])):
try:
answers.append(document['event']['submission'][key]['answer'])
except KeyError:
answers.append('')
row = ([document['hash_id']] if edx_obj.anonymize else
[document['hash_id'], document['user_id'], document['username']])
row.extend([document['event']['attempts'],
document['module']['display_name'], document['time'],
document['event']['success'], document['event']['grade'],
document['event']['max_grade']] + answers)
result.append(row)
if edx_obj.final_attempt:
result = [max(items, key=lambda x : x[1]) for key, items in
groupby(sorted(result, key=lambda x : x[0]), lambda x : x[0])]
csv_report_name = _generate_name_from_problem_id(edx_obj.problem_id,
display_name,
edx_obj.final_attempt)
headers = (['Hash ID'] if edx_obj.anonymize else
['Hash ID', 'User ID', 'Username'])
headers.extend(['Attempt Number', 'Module', 'Time', 'Success',
'Grade Achieved', 'Max Grade'])
edx_obj.generate_csv(result, headers, csv_report_name)
| '''
In this module, we will generate a csv report for a given problem id, which
will include information about how students fared with a given problem id
'''
from itertools import groupby
from edx_data_research.reporting.edx_base import EdX
class ProblemId(EdX):
def __init__(self, args):
super(self.__class__, self).__init__(args)
self.problem_id = args.problem_id
self.final_attempt = args.final_attempt
def _generate_name_from_problem_id(problem_id, display_name, final_attempt):
'''Generate name of csv output file from problem id'''
attempts_name = '_FinalAttempts' if final_attempt else '_AllAttempts'
return ('_'.join(problem_id.split('/')[3:]) + '_' +
''.join(e for e in display_name if e.isalnum()) + attempts_name +
'.csv')
def problem_id(edx_obj):
edx_obj.collections = ['problem_ids']
cursor = edx_obj.collections['problem_ids'].find({'event.problem_id' :
edx_obj.problem_id})
display_name = cursor[0]['module']['display_name']
one_record = cursor[0]['event']
problem_ids_keys = sorted(one_record['correct_map'].keys(),
key=lambda x : int(x.split('_')[-2]))
problem_ids = []
for key in problem_ids_keys:
try:
item = one_record['submission'][key]
value = item['question']
problem_ids.append('{0} : {1}'.format(key, value))
except UnicodeEncodeError:
value = value.encode("utf-8")
problem_ids.append('{0} : {1}'.format(key, value))
except KeyError:
problem_ids.append('{0}'.format(key))
result = []
for document in cursor:
answers = []
for key in sorted(document['event']['correct_map'].keys(),
key=lambda x : int(x.split('_')[-2])):
try:
answers.append(document['event']['submission'][key]['answer'])
except KeyError:
answers.append('')
result.append([document['hash_id'], document['username'],
document['event']['attempts'],
document['module']['display_name'], document['time'],
document['event']['success'], document['event']['grade'],
document['event']['max_grade']] + answers)
if edx_obj.final_attempt:
result = [max(items, key=lambda x : x[1]) for key, items in
groupby(sorted(result, key=lambda x : x[0]), lambda x : x[0])]
csv_report_name = _generate_name_from_problem_id(edx_obj.problem_id,
display_name,
edx_obj.final_attempt)
headers = (['Hash ID'] if edx_obj.anonymize else
['Hash ID', 'User ID', 'Username'])
headers.extend(['Attempt Number', 'Module', 'Time', 'Success',
'Grade Achieved', 'Max Grade'])
edx_obj.generate_csv(result, headers, csv_report_name)
| Python | 0.00177 |
01003d7b64220b794d8e10e78dd26badef4dfcc5 | Fix tests | base/auth/tests.py | base/auth/tests.py | from flask_testing import TestCase
from ..app import create_app
from ..config import test
from ..ext import db
class BaseCoreTest(TestCase):
def create_app(self):
return create_app(test)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_users(self):
from base.auth.models import User
response = self.client.post('/users/login/', data=dict())
self.assertRedirects(response, '/')
user = User(username='test', pw_hash='test', email='test@test.com')
db.session.add(user)
db.session.commit()
self.assertTrue(user.updated_at)
response = self.client.post('/users/login/', data=dict(
email='test@test.com',
action_save=True,
password='test'))
self.assertRedirects(response, '/users/profile/')
response = self.client.get('/users/logout/')
self.assertRedirects(response, '/')
response = self.client.post('/users/register/', data=dict(
username='test2',
email='test2@test.com',
action_save=True,
password='test',
password_confirm='test',
))
self.assertRedirects(response, '/users/profile/')
user = User.query.filter(User.username == 'test2').first()
self.assertEqual(user.email, 'test2@test.com')
def test_manager(self):
from base.auth.models import Role, User
from manage import manager
manager.app = self.app
manager.handle('manage', 'create_role', ['test'])
role = Role.query.filter(Role.name == 'test').first()
self.assertEqual(role.name, 'test')
manager.handle('manage', 'create_user', 'test test@test.com -p 12345'.split())
user = User.query.filter(User.username == 'test').first()
manager.handle('manage', 'add_role', 'test test'.split())
self.assertTrue(role in user.roles)
def test_oauth(self):
from flask import url_for
self.assertTrue(url_for('login_twitter'))
| from flask_testing import TestCase
from ..app import create_app
from ..config import test
from ..ext import db
class BaseCoreTest(TestCase):
def create_app(self):
return create_app(test)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_users(self):
from base.auth.models import User
response = self.client.post('/users/login/', data=dict())
self.assertRedirects(response, '/')
user = User(username='test', pw_hash='test', email='test@test.com')
db.session.add(user)
db.session.commit()
self.assertTrue(user.updated)
response = self.client.post('/users/login/', data=dict(
email='test@test.com',
action_save=True,
password='test'))
self.assertRedirects(response, '/users/profile/')
response = self.client.get('/users/logout/')
self.assertRedirects(response, '/')
response = self.client.post('/users/register/', data=dict(
username='test2',
email='test2@test.com',
action_save=True,
password='test',
password_confirm='test',
))
self.assertRedirects(response, '/users/profile/')
user = User.query.filter(User.username == 'test2').first()
self.assertEqual(user.email, 'test2@test.com')
def test_manager(self):
from base.auth.models import Role, User
from manage import manager
manager.app = self.app
manager.handle('manage', 'create_role', ['test'])
role = Role.query.filter(Role.name == 'test').first()
self.assertEqual(role.name, 'test')
manager.handle('manage', 'create_user', 'test test@test.com -p 12345'.split())
user = User.query.filter(User.username == 'test').first()
manager.handle('manage', 'add_role', 'test test'.split())
self.assertTrue(role in user.roles)
def test_oauth(self):
from flask import url_for
self.assertTrue(url_for('login_twitter'))
| Python | 0.000003 |
69e760e4a571d16e75f30f1e97ea1a917445f333 | Switch to recipe engine "url" module. | recipes/recipe_modules/gitiles/__init__.py | recipes/recipe_modules/gitiles/__init__.py | DEPS = [
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/url',
]
| DEPS = [
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/python',
'recipe_engine/raw_io',
'url',
]
| Python | 0.000023 |
6ced33f201e8a4e389a077a91ba9ed8bf5c19fa0 | fix issue with number for samples | wqflask/wqflask/correlation/pre_computes.py | wqflask/wqflask/correlation/pre_computes.py | """module contains the code to do the
precomputations of sample data between
two entire datasets"""
import json
from typing import List
from base import data_set
from gn3.computations.correlations import compute_all_sample_correlation
from gn3.computations.correlations import fast_compute_all_sample_correlation
from gn3.computations.correlations import map_shared_keys_to_values
def get_dataset_dict_data(dataset_obj):
"""function to get the dataset data mapped to key"""
dataset_obj.get_trait_data(dataset_obj.group.all_samples_ordered())
return map_shared_keys_to_values(dataset_obj.samplelist,
dataset_obj.trait_data)
def fetch_datasets(base_dataset_name: str, target_dataset_name: str) ->List:
"""query to fetch create datasets and fetch traits
all traits of a dataset"""
# doesnt work for temp
base_dataset = data_set.create_dataset(dataset_name=base_dataset_name)
target_dataset = data_set.create_dataset(dataset_name=target_dataset_name)
# replace with map
return (map(get_dataset_dict_data,
[base_dataset, target_dataset]))
# in the base dataset we just need the traits
def pre_compute_sample_correlation(base_dataset: List,
target_dataset: List) -> List:
"""function compute the correlation between the
a whole dataset against a target
input: target&base_dataset(contains traits and sample results)
output: list containing the computed results
precaution:function is expensive;targets only Exon and
"""
results = []
for trait_info in base_dataset:
result = fast_compute_all_sample_correlation(corr_method="pearson",
this_trait=trait_info,
target_dataset=target_dataset)
# results.append(fast_compute_all_sample_correlation(corr_method="pearson",
# this_trait=trait_info,
# target_dataset=target_dataset))
print("finished")
print(result)
return results
def cache_to_file(base_dataset_name: str, target_dataset_name: str):
"""function to cache the results to file"""
# validate the datasets expiry first
base_dataset_data, target_dataset_data = [list(dataset) for dataset in list(
fetch_datasets(base_dataset_name, target_dataset_name))]
# print(target_dataset_data)
try:
# with open("unique_file_name.json", "w") as file_handler:
# file_handler.write()
dataset_correlation_results = pre_compute_sample_correlation(
base_dataset_data, target_dataset_data)
print(dataset_correlation_results)
# json.dump(dataset_correlation_results, file_handler)
except Exception as error:
raise error
def check_cached_files_validity():
"""function to check the validity of cached files"""
pass
| """module contains the code to do the
precomputations of sample data between
two entire datasets"""
import json
from typing import List
from base import data_set
from gn3.computations.correlations import fast_compute_all_sample_correlation
from gn3.computations.correlations import map_shared_keys_to_values
def get_dataset_dict_data(dataset_obj):
"""function to get the dataset data mapped to key"""
dataset_obj.get_trait_data()
return map_shared_keys_to_values(dataset_obj.samplelist,
dataset_obj.trait_data)
def fetch_datasets(base_dataset_name: str, target_dataset_name: str) ->List:
"""query to fetch create datasets and fetch traits
all traits of a dataset"""
# doesnt work for temp
base_dataset = data_set.create_dataset(dataset_name=base_dataset_name)
target_dataset = data_set.create_dataset(dataset_name=target_dataset_name)
# replace with map
return (map(get_dataset_dict_data,
[base_dataset, target_dataset]))
# in the base dataset we just need the traits
def pre_compute_sample_correlation(base_dataset: List,
target_dataset: List) -> List:
"""function compute the correlation between the
a whole dataset against a target
input: target&base_dataset(contains traits and sample results)
output: list containing the computed results
precaution:function is expensive;targets only Exon and
"""
for trait_info in base_dataset:
yield fast_compute_all_sample_correlation(corr_method="pearson",
this_trait=trait_info,
target_dataset=target_dataset)
def cache_to_file(base_dataset_name: str, target_dataset_name: str):
"""function to cache the results to file"""
# validate the datasets expiry first
base_dataset_data, target_dataset_data = [list(dataset) for dataset in list(
fetch_datasets(base_dataset_name, target_dataset_name))]
try:
with open("unique_file_name.json", "w") as file_handler:
file_handler.write()
dataset_correlation_results = list(pre_compute_sample_correlation(
base_dataset_data, target_dataset_data))
print(dataset_correlation_results)
json.dump(dataset_correlation_results, file_handler)
except Exception as error:
raise error
| Python | 0 |
bb366439065924732b9b1559a0dc776c586fa07c | fix url | regulations/tests/selenium/example_test.py | regulations/tests/selenium/example_test.py | import os
import unittest
import base64
import json
import httplib
import sys
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
class ExampleTest(unittest.TestCase):
def setUp(self):
self.capabilities = webdriver.DesiredCapabilities.CHROME
self.capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
self.capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
self.capabilities['platform'] = 'LINUX'
self.capabilities['version'] = ''
self.capabilities['name'] = 'Example test'
self.username = os.environ['SAUCE_USERNAME']
self.key = os.environ['SAUCE_ACCESS_KEY']
hub_url = "%s:%s" % (self.username, self.key)
self.driver = webdriver.Remote(desired_capabilities=self.capabilities,
command_executor = ("http://%s@ondemand.saucelabs.com:80/wd/hub" % hub_url))
self.jobid = self.driver.session_id
print("Sauce Labs job: https://saucelabs.com/jobs/%s" % self.jobid)
self.driver.implicitly_wait(30)
def test_sauce(self):
self.driver.get('http://localhost:8000/1005')
toc_link_1005_1 = self.driver.find_element_by_xpath('//*[@id="toc"]/ol/li[1]/a')
self.assertEquals(toc_link_1005_1.get_attribute('data-section-id'), '1005-1')
def tearDown(self):
print("https://saucelabs.com/jobs/%s" % self.driver.session_id)
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| import os
import unittest
import base64
import json
import httplib
import sys
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
class ExampleTest(unittest.TestCase):
def setUp(self):
self.capabilities = webdriver.DesiredCapabilities.CHROME
self.capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER']
self.capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER']
self.capabilities['platform'] = 'LINUX'
self.capabilities['version'] = ''
self.capabilities['name'] = 'Example test'
self.username = os.environ['SAUCE_USERNAME']
self.key = os.environ['SAUCE_ACCESS_KEY']
hub_url = "%s:%s" % (self.username, self.key)
self.driver = webdriver.Remote(desired_capabilities=self.capabilities,
command_executor = ("http://%s@ondemand.saucelabs.com:80/wd/hub" % hub_url))
self.jobid = self.driver.session_id
print("Sauce Labs job: https://saucelabs.com/jobs/%s" % self.jobid)
self.driver.implicitly_wait(30)
def test_sauce(self):
self.driver.get('http://localhost:8000')
toc_link_1005_1 = self.driver.find_element_by_xpath('//*[@id="toc"]/ol/li[1]/a')
self.assertEquals(toc_link_1005_1.get_attribute('data-section-id'), '1005-1')
def tearDown(self):
print("https://saucelabs.com/jobs/%s" % self.driver.session_id)
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| Python | 0.86565 |
bbee1e9b8563d56c0d0acbfc6ae61334f8251159 | Reset default value in test so that it doesn't produce error | enthought/traits/tests/undefined_test_case.py | enthought/traits/tests/undefined_test_case.py | import unittest
from enthought.traits.api import HasTraits, Str, Undefined, ReadOnly, Float
class Foo(HasTraits):
name = Str()
original_name = ReadOnly
bar = Str
baz = Float
def _name_changed(self):
if self.original_name is Undefined:
self.original_name = self.name
class Bar(HasTraits):
name = Str(Undefined)
class UndefinedTestCase(unittest.TestCase):
def test_initial_value(self):
b = Bar()
self.failUnlessEqual( b.name, Undefined )
return
def test_name_change(self):
b = Bar()
b.name = 'first'
self.failUnlessEqual( b.name, 'first' )
return
def test_read_only_write_once(self):
f = Foo()
self.failUnlessEqual(f.name, '')
self.failUnless(f.original_name is Undefined)
f.name = 'first'
self.failUnlessEqual(f.name, 'first')
self.failUnlessEqual(f.original_name, 'first')
f.name = 'second'
self.failUnlessEqual(f.name, 'second')
self.failUnlessEqual(f.original_name, 'first')
return
def test_read_only_write_once_from_constructor(self):
f = Foo(name='first')
f.name = 'first'
self.failUnlessEqual(f.name, 'first')
self.failUnlessEqual(f.original_name, 'first')
f.name = 'second'
self.failUnlessEqual(f.name, 'second')
self.failUnlessEqual(f.original_name, 'first')
return
### EOF ####################################################################### | import unittest
from enthought.traits.api import HasTraits, Str, Undefined, ReadOnly, Float
class Foo(HasTraits):
name = Str()
original_name = ReadOnly
bar = Str
baz = Float
def _name_changed(self):
if self.original_name is Undefined:
self.original_name = self.name
class Bar(HasTraits):
name = Str(Undefined())
class UndefinedTestCase(unittest.TestCase):
def test_initial_value(self):
b = Bar()
self.failUnlessEqual( b.name, Undefined )
return
def test_name_change(self):
b = Bar()
b.name = 'first'
self.failUnlessEqual( b.name, 'first' )
return
def test_read_only_write_once(self):
f = Foo()
self.failUnlessEqual(f.name, '')
self.failUnless(f.original_name is Undefined)
f.name = 'first'
self.failUnlessEqual(f.name, 'first')
self.failUnlessEqual(f.original_name, 'first')
f.name = 'second'
self.failUnlessEqual(f.name, 'second')
self.failUnlessEqual(f.original_name, 'first')
return
def test_read_only_write_once_from_constructor(self):
f = Foo(name='first')
f.name = 'first'
self.failUnlessEqual(f.name, 'first')
self.failUnlessEqual(f.original_name, 'first')
f.name = 'second'
self.failUnlessEqual(f.name, 'second')
self.failUnlessEqual(f.original_name, 'first')
return
### EOF ####################################################################### | Python | 0.000001 |
85f14fffc01002e5a1c0a7a3644a81a4ade61745 | Bump dsub version to 0.2.1 | dsub/_dsub_version.py | dsub/_dsub_version.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.2.1'
| # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.2.1.dev0'
| Python | 0 |
edb6f738979e213cca3fd03991caebdf209b09b9 | Fix permissions script | static/extension/dynamic_scope/dynamic_permission.py | static/extension/dynamic_scope/dynamic_permission.py | # oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
from org.xdi.model.custom.script.type.scope import DynamicScopeType
from org.xdi.service.cdi.util import CdiUtil
from org.xdi.oxauth.service import UserService
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList
import java
class DynamicScope(DynamicScopeType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "Permission dynamic scope. Initialization"
print "Permission dynamic scope. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Permission dynamic scope. Destroy"
print "Permission dynamic scope. Destroyed successfully"
return True
# Update Json Web token before signing/encrypring it
# dynamicScopeContext is org.xdi.oxauth.service.external.context.DynamicScopeExternalContext
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def update(self, dynamicScopeContext, configurationAttributes):
print "Permission dynamic scope scope. Update method"
authorizationGrant = dynamicScopeContext.getAuthorizationGrant()
user = dynamicScopeContext.getUser()
jsonWebResponse = dynamicScopeContext.getJsonWebResponse()
claims = jsonWebResponse.getClaims()
userService = CdiUtil.bean(UserService)
roles = userService.getCustomAttribute(user, "role")
if roles != None:
claims.setClaim("role", roles.getValues())
return True
def logout(self, configurationAttributes, requestParameters):
return True
def getApiVersion(self):
return 1
| # oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
from org.xdi.model.custom.script.type.scope import DynamicScopeType
from org.xdi.oxauth.service import UserService
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList
import java
class DynamicScope(DynamicScopeType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "Permission dynamic scope. Initialization"
print "Permission dynamic scope. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Permission dynamic scope. Destroy"
print "Permission dynamic scope. Destroyed successfully"
return True
# Update Json Web token before signing/encrypring it
# dynamicScopeContext is org.xdi.oxauth.service.external.context.DynamicScopeExternalContext
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def update(self, dynamicScopeContext, configurationAttributes):
print "Permission dynamic scope scope. Update method"
authorizationGrant = dynamicScopeContext.getAuthorizationGrant()
user = dynamicScopeContext.getUser()
jsonWebResponse = dynamicScopeContext.getJsonWebResponse()
claims = jsonWebResponse.getClaims()
userService = UserService.instance()
roles = userService.getCustomAttribute(user, "role")
if roles != None:
claims.setClaim("role", roles.getValues())
return True
def logout(self, configurationAttributes, requestParameters):
return True
def getApiVersion(self):
return 1
| Python | 0.000001 |
c1923339d7d64b9e85e3a2a1522ff0442e18a798 | Update common version (#6060) | sdk/core/azure-common/azure/common/_version.py | sdk/core/azure-common/azure/common/_version.py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
VERSION = "1.1.23"
| #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
VERSION = "1.1.22"
| Python | 0 |
9711a2208525b200c98997248c358432a26bf7a3 | improve cleanup_ctx_databases | dvhb_hybrid/config.py | dvhb_hybrid/config.py | import functools
import json
import os
def absdir(directory, base_dir):
if not directory.startswith('/'):
directory = os.path.join(base_dir, directory)
return os.path.normpath(directory)
def dirs(list_dir, base_dir):
result = []
for i in list_dir:
result.append(absdir(i, base_dir))
return result
def convert_to_djangodb(d, name, base_dir='/tmp'):
if d.get('database'):
db = {
k.upper(): v
for k, v in d.items()
if v}
if db.pop('GIS', None):
db['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
else:
db['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
db['NAME'] = db.pop('DATABASE')
# Use same db name for test. Use custom config for tests to separate test and dev dbs.
db['TEST'] = {'NAME': db['NAME']}
else:
return {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(base_dir, name + '.sqlite3'),
}
return db
def db_to_settings(db_dict, base_dir):
return {
n: convert_to_djangodb(v, n, base_dir=base_dir)
for n, v in db_dict.items()
}
def convert_to_django_redis(config):
return {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{host}:{port}/{db}'.format(**config),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
def redis_to_settings(redis_dict):
return {name: convert_to_django_redis(value) for name, value in redis_dict.items()}
async def cleanup_ctx_redis(app, cfg_key='default', app_key='redis'):
import aioredis
cfg = app.context.config.redis[cfg_key].connection
pool = await aioredis.create_pool(
(cfg.host, cfg.port),
db=cfg.db,
minsize=cfg.minsize,
maxsize=cfg.maxsize,
loop=app.loop)
app[app_key] = pool
yield
pool.close()
await pool.wait_closed()
cleanup_ctx_redis_sessions = functools.partial(
cleanup_ctx_redis, app_key='sessions', cfg_key='sessions')
async def cleanup_ctx_aiopg(app, cfg_key='default', app_key='db'):
import aiopg.sa
from dvhb_hybrid.amodels import AppModels
dbparams = app.context.config.databases.get(cfg_key)
app.models = app.m = AppModels(app)
async with aiopg.sa.create_engine(dbparams.uri) as pool:
app[app_key] = pool
yield
async def cleanup_ctx_databases(app, cfg_key='default', app_key='db'):
import asyncpgsa
from dvhb_hybrid.amodels import AppModels
app.models = app.m = AppModels(app)
async def init(connection):
for t in ['json', 'jsonb']:
await connection.set_type_codec(
t,
encoder=lambda x: x,
decoder=json.loads,
schema='pg_catalog',
)
dbparams = app.context.config.databases.get(cfg_key)
if 'uri' in dbparams:
dbargs, dbkwargs = (dbparams.uri,), {}
else:
dbargs, dbkwargs = (), dbparams
async with asyncpgsa.create_pool(*dbargs, init=init, **dbkwargs) as pool:
app[app_key] = pool
yield
| import functools
import os
def absdir(directory, base_dir):
if not directory.startswith('/'):
directory = os.path.join(base_dir, directory)
return os.path.normpath(directory)
def dirs(list_dir, base_dir):
result = []
for i in list_dir:
result.append(absdir(i, base_dir))
return result
def convert_to_djangodb(d, name, base_dir='/tmp'):
if d.get('database'):
db = {
k.upper(): v
for k, v in d.items()
if v}
if db.pop('GIS', None):
db['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
else:
db['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
db['NAME'] = db.pop('DATABASE')
# Use same db name for test. Use custom config for tests to separate test and dev dbs.
db['TEST'] = {'NAME': db['NAME']}
else:
return {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(base_dir, name + '.sqlite3'),
}
return db
def db_to_settings(db_dict, base_dir):
return {
n: convert_to_djangodb(v, n, base_dir=base_dir)
for n, v in db_dict.items()
}
def convert_to_django_redis(config):
return {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{host}:{port}/{db}'.format(**config),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
def redis_to_settings(redis_dict):
return {name: convert_to_django_redis(value) for name, value in redis_dict.items()}
async def cleanup_ctx_redis(app, cfg_key='default', app_key='redis'):
import aioredis
cfg = app.context.config.redis[cfg_key].connection
pool = await aioredis.create_pool(
(cfg.host, cfg.port),
db=cfg.db,
minsize=cfg.minsize,
maxsize=cfg.maxsize,
loop=app.loop)
app[app_key] = pool
yield
pool.close()
await pool.wait_closed()
cleanup_ctx_redis_sessions = functools.partial(
cleanup_ctx_redis, app_key='sessions', cfg_key='sessions')
async def cleanup_ctx_aiopg(app, cfg_key='default', app_key='db'):
import aiopg.sa
from dvhb_hybrid.amodels import AppModels
dbparams = app.context.config.databases.get(cfg_key)
app.models = app.m = AppModels(app)
async with aiopg.sa.create_engine(dbparams.uri) as pool:
app[app_key] = pool
yield
async def cleanup_ctx_databases(app, cfg_key='default', app_key='db'):
import asyncpgsa
from dvhb_hybrid.amodels import AppModels
dbparams = app.context.config.databases.get(cfg_key)
app.models = app.m = AppModels(app)
async with asyncpgsa.create_pool(**dbparams) as pool:
app[app_key] = pool
yield
| Python | 0.000001 |
3ff373ed0d5349087a77b2a96af41e0e5cc9c15d | add UI for boardd loopback test | selfdrive/boardd/tests/test_boardd_loopback.py | selfdrive/boardd/tests/test_boardd_loopback.py | #!/usr/bin/env python3
import os
import random
import time
from collections import defaultdict
from functools import wraps
import cereal.messaging as messaging
from cereal import car
from common.basedir import PARAMS
from common.params import Params
from common.spinner import Spinner
from panda import Panda
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car import make_can_msg
from selfdrive.test.helpers import with_processes
def reset_panda(fn):
@wraps(fn)
def wrapper():
p = Panda()
for i in [0, 1, 2, 0xFFFF]:
p.can_clear(i)
p.reset()
p.close()
fn()
return wrapper
os.environ['STARTED'] = '1'
os.environ['BOARDD_LOOPBACK'] = '1'
os.environ['PARAMS_PATH'] = PARAMS
@reset_panda
@with_processes(['boardd'])
def test_boardd_loopback():
# wait for boardd to init
spinner = Spinner()
time.sleep(2)
# boardd blocks on CarVin and CarParams
cp = car.CarParams.new_message()
cp.safetyModel = car.CarParams.SafetyModel.allOutput
Params().put("CarVin", b"0"*17)
Params().put("CarParams", cp.to_bytes())
sendcan = messaging.pub_sock('sendcan')
can = messaging.sub_sock('can', conflate=False, timeout=100)
time.sleep(1)
n = 1000
for i in range(n):
spinner.update(f"boardd loopback {i}/{n}")
sent_msgs = defaultdict(set)
for _ in range(random.randrange(10)):
to_send = []
for __ in range(random.randrange(100)):
bus = random.randrange(3)
addr = random.randrange(1, 1<<29)
dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))])
sent_msgs[bus].add((addr, dat))
to_send.append(make_can_msg(addr, dat, bus))
sendcan.send(can_list_to_can_capnp(to_send, msgtype='sendcan'))
max_recv = 10
while max_recv > 0 and any(len(sent_msgs[bus]) for bus in range(3)):
recvd = messaging.drain_sock(can, wait_for_one=True)
for msg in recvd:
for m in msg.can:
if m.src >= 128:
k = (m.address, m.dat)
assert k in sent_msgs[m.src-128]
sent_msgs[m.src-128].discard(k)
max_recv -= 1
# if a set isn't empty, messages got dropped
for bus in range(3):
assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages"
spinner.close()
| #!/usr/bin/env python3
import os
import random
import time
from collections import defaultdict
from functools import wraps
import cereal.messaging as messaging
from cereal import car
from common.basedir import PARAMS
from common.params import Params
from panda import Panda
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car import make_can_msg
from selfdrive.test.helpers import with_processes
def reset_panda(fn):
@wraps(fn)
def wrapper():
p = Panda()
for i in [0, 1, 2, 0xFFFF]:
p.can_clear(i)
p.reset()
p.close()
fn()
return wrapper
os.environ['STARTED'] = '1'
os.environ['BOARDD_LOOPBACK'] = '1'
os.environ['PARAMS_PATH'] = PARAMS
@reset_panda
@with_processes(['boardd'])
def test_boardd_loopback():
# wait for boardd to init
time.sleep(2)
# boardd blocks on CarVin and CarParams
cp = car.CarParams.new_message()
cp.safetyModel = car.CarParams.SafetyModel.allOutput
Params().put("CarVin", b"0"*17)
Params().put("CarParams", cp.to_bytes())
sendcan = messaging.pub_sock('sendcan')
can = messaging.sub_sock('can', conflate=False, timeout=100)
time.sleep(1)
for i in range(1000):
sent_msgs = defaultdict(set)
for _ in range(random.randrange(10)):
to_send = []
for __ in range(random.randrange(100)):
bus = random.randrange(3)
addr = random.randrange(1, 1<<29)
dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))])
sent_msgs[bus].add((addr, dat))
to_send.append(make_can_msg(addr, dat, bus))
sendcan.send(can_list_to_can_capnp(to_send, msgtype='sendcan'))
max_recv = 10
while max_recv > 0 and any(len(sent_msgs[bus]) for bus in range(3)):
recvd = messaging.drain_sock(can, wait_for_one=True)
for msg in recvd:
for m in msg.can:
if m.src >= 128:
k = (m.address, m.dat)
assert k in sent_msgs[m.src-128]
sent_msgs[m.src-128].discard(k)
max_recv -= 1
# if a set isn't empty, messages got dropped
for bus in range(3):
assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages"
| Python | 0 |
4019372609565b074a5c3ba946245b61c8479ada | update dev version after 2.1.0 tag [skip ci] | py/fiberassign/_version.py | py/fiberassign/_version.py | __version__ = '2.1.0.dev2650'
| __version__ = '2.1.0'
| Python | 0 |
9602574af41a9c09edbc84bf77bde3a285d71741 | use datastore client in example | examples/google/radiology/upload_storage_radiology.py | examples/google/radiology/upload_storage_radiology.py | #!/usr/bin/env python
# RADIOLOGY ---------------------------------------------------
# This is an example script to upload data (images, text, metadata) to
# google cloud storage and datastore. Data MUST be de-identified
import os
# Start google storage client for pmc-stanford
from som.api.google.datastore import DataStoreClient as Client
client = Client(bucket_name='radiology')
# big_query not developed yet
collection = client.create_collection(uid='IRB41449')
# Let's load some dummy data from deid
from deid.data import get_dataset
from deid.dicom import get_files
dicom_files = get_files(get_dataset('dicom-cookies'))
# Now de-identify to get clean files
from deid.dicom import get_identifiers, replace_identifiers
ids=get_identifiers(dicom_files)
updated_files = replace_identifiers(dicom_files=dicom_files,
ids=ids)
# Define some metadata for the entity
metadata = { "source_id" : "cookieTumorDatabase",
"id":"cookie-47",
"Modality": "cookie"}
# Upload the dataset
client.upload_dataset(images=updated_files,
collection=collection,
uid=metadata['id'],
entity_metadata=metadata)
# Now try with adding metadata for an image
images_metadata = {
updated_files[0]:
{
"Modality":"cookie",
"Type": "chocolate-chip",
"Width": 350,
"Height": 350
}
}
# And again do the call
client.upload_dataset(images=updated_files,
collection=collection,
uid="cookie-47",
images_metadata=images_metadata)
| #!/usr/bin/env python
# RADIOLOGY ---------------------------------------------------
# This is an example script to upload data (images, text, metadata) to
# google cloud storage and datastore. Data MUST be de-identified
import os
# Start google storage client for pmc-stanford
from som.api.google import Client
client = Client(use_bigquery=False, bucket_name='radiology')
collection = client.create_collection(uid='IRB41449')
# Let's load some dummy data from deid
from deid.data import get_dataset
from deid.dicom import get_files
dicom_files = get_files(get_dataset('dicom-cookies'))
# Now de-identify to get clean files
from deid.dicom import get_identifiers, replace_identifiers
ids=get_identifiers(dicom_files)
updated_files = replace_identifiers(dicom_files=dicom_files,
ids=ids)
# Define some metadata for the entity
metadata = { "source_id" : "cookieTumorDatabase",
"id":"cookie-47",
"Modality": "cookie"}
# Upload the dataset
client.upload_dataset(images=updated_files,
collection=collection,
uid=metadata['id'],
entity_metadata=metadata)
# Now try with adding metadata for an image
images_metadata = {
updated_files[0]:
{
"Modality":"cookie",
"Type": "chocolate-chip",
"Width": 350,
"Height": 350
}
}
# And again do the call
client.upload_dataset(images=updated_files,
collection=collection,
uid="cookie-47",
images_metadata=images_metadata)
| Python | 0 |
951b6b9cc14e323dc97aa6e67dee17ef110e673f | check for exclusive try/else and if/else | pychecker2/utest/scopes.py | pychecker2/utest/scopes.py | from pychecker2.TestSupport import WarningTester
from pychecker2 import ScopeChecks
class RedefinedTestCase(WarningTester):
def testScopes(self):
w = ScopeChecks.RedefineCheck.redefinedScope
self.warning('def f(): pass\n'
'def f(): pass\n',
1, w, 'f', 2)
self.warning('class C:\n'
' def g(self): pass\n'
' def g(self): pass\n',
2, w, 'g', 3)
self.silent('def s(): pass\n'
'def f(): pass\n')
self.silent('import sys\n'
'if sys.argv:\n'
' def f(): return 1\n'
'else:\n'
' def f(): return 0\n')
self.warning('import sys\n'
'if sys.argv:\n'
' def f(): return 1\n'
' def f(): return 0\n',
3, w, 'f', 4)
self.warning('try:\n'
' def f(): return 1\n'
'except Exception:\n'
' pass\n'
'else:\n'
' def f(): return 0\n',
2, w, 'f', 6)
| from pychecker2.TestSupport import WarningTester
from pychecker2 import ScopeChecks
class RedefinedTestCase(WarningTester):
def testScopes(self):
self.warning('def f(): pass\n'
'def f(): pass\n',
1, ScopeChecks.RedefineCheck.redefinedScope, 'f', 2)
self.warning('class C:\n'
' def g(self): pass\n'
' def g(self): pass\n',
2, ScopeChecks.RedefineCheck.redefinedScope, 'g', 3)
self.silent('def s(): pass\n'
'def f(): pass\n')
| Python | 0 |
a434050e0f1c9f3e162898a3687cd7de8b77980c | Update load.py | simphony_mayavi/load.py | simphony_mayavi/load.py | from mayavi.core.api import registry
from simphony_mayavi.adapt2cuds import adapt2cuds
def load(filename, name=None, kind=None, rename_arrays=None):
""" Load the file data into a CUDS container.
"""
data_set = _read(filename)
return adapt2cuds(
data_set, name, kind, rename_arrays)
def _read(filename):
""" Find a suitable reader and read in the tvtk.Dataset.
"""
metasource = registry.get_file_reader(filename)
if metasource is None:
message = 'No suitable reader found for file: {}'
raise RuntimeError(message.format(filename))
if metasource.factory is None:
source = metasource.get_callable()()
source.initialize(filename)
source.update()
reader = source.reader
else:
message = 'Mayavi reader that requires a scene is not supported : {}'
raise NotImplementedError(message.format(filename))
if len(source.outputs) != 1:
message = 'Only one output is expected from the reader'
raise RuntimeError(message)
return reader.output
| from mayavi.core.api import registry
from simphony_mayavi.adapt2cuds import adapt2cuds
def load(filename, name=None, kind=None, rename_arrays=None):
""" Load the file data into a CUDS container.
"""
data_set = _read(filename)
return adapt2cuds(
data_set, name, kind, rename_arrays)
def _read(filename):
""" Find a suitable reader and read in the tvtk.Dataset.
"""
metasource = registry.get_file_reader(filename)
if metasource is None:
message = 'No suitable reader found for file: {}'
raise RuntimeError(message.format(filename))
if metasource.factory is None:
source = metasource.get_callable()()
source.initialize(filename)
source.update()
reader = source.reader
else:
message = 'Mayavi reader that requires a scene is not supported : {}'
raise NotImplementedError(message.format(filename))
if len(source.outputs) != 1:
message = 'Only one output is expected from the reader'
raise RuntimeError(message)
return reader.output
| Python | 0.000001 |
2bf0c9e0d8bbce50f06ca08c79f97ecf5b76e21b | Fix logging | simplesqlite/_logger.py | simplesqlite/_logger.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import logbook
import sqliteschema
import tabledata
logger = logbook.Logger("SimpleSQLie")
logger.disable()
def set_logger(is_enable):
if is_enable != logger.disabled:
return
if is_enable:
logger.enable()
else:
logger.disable()
tabledata.set_logger(is_enable)
sqliteschema.set_logger(is_enable)
try:
import pytablereader
pytablereader.set_logger(is_enable)
except ImportError:
pass
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <http://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
tabledata.set_log_level(log_level)
sqliteschema.set_log_level(log_level)
try:
import pytablereader
pytablereader.set_log_level(log_level)
except ImportError:
pass
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import logbook
import tabledata
logger = logbook.Logger("SimpleSQLie")
logger.disable()
def set_logger(is_enable):
if is_enable != logger.disabled:
return
if is_enable:
logger.enable()
else:
logger.disable()
tabledata.set_logger(is_enable)
try:
import pytablereader
pytablereader.set_logger(is_enable)
except ImportError:
pass
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <http://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
tabledata.set_log_level(log_level)
try:
import pytablereader
pytablereader.set_log_level(log_level)
except ImportError:
pass
| Python | 0.000007 |
930508e5ec00d9f174409097ba54e70c7c6b2b3c | Fix #421: RPN_DEFNS needs to passed to Pelegant via env | sirepo/pkcli/elegant.py | sirepo/pkcli/elegant.py | # -*- coding: utf-8 -*-
"""Wrapper to run elegant from the command line.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkresource
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc
from sirepo import mpi
from sirepo import simulation_db
from sirepo.template import template_common
from sirepo.template.elegant import extract_report_data, ELEGANT_LOG_FILE
import copy
import os
import re
import subprocess
_ELEGANT_STDERR_FILE = 'elegant.stderr'
def run(cfg_dir):
"""Run elegant in ``cfg_dir``
The files in ``cfg_dir`` must be configured properly.
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(bunch_report=True)
_extract_bunch_report()
def run_background(cfg_dir):
"""Run elegant as a background task
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(with_mpi=True);
simulation_db.write_result({})
def _run_elegant(bunch_report=False, with_mpi=False):
exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
if bunch_report and re.search('\&sdds_beam\s', elegant_file):
return
pkio.write_text('elegant.lte', lattice_file)
ele = 'elegant.ele'
pkio.write_text(ele, elegant_file)
# TODO(robnagler) Need to handle this specially, b/c different binary
env = copy.deepcopy(os.environ)
env['RPN_DEFNS'] = pkresource.filename('defns.rpn')
if with_mpi and mpi.cfg.cores > 1:
return mpi.run_program(['Pelegant', ele], output=ELEGANT_LOG_FILE, env=env)
pksubprocess.check_call_with_signals(
['elegant', ele],
output=ELEGANT_LOG_FILE,
env=env,
msg=pkdp,
)
def _extract_bunch_report():
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data['models']['bunchSource']['inputSource'] == 'sdds_beam':
file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile'])
else:
file = 'elegant.bun'
info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0)
simulation_db.write_result(info)
| # -*- coding: utf-8 -*-
"""Wrapper to run elegant from the command line.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkresource
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc
from sirepo import mpi
from sirepo import simulation_db
from sirepo.template import template_common
from sirepo.template.elegant import extract_report_data, ELEGANT_LOG_FILE
import copy
import os
import re
import subprocess
_ELEGANT_STDERR_FILE = 'elegant.stderr'
def run(cfg_dir):
"""Run elegant in ``cfg_dir``
The files in ``cfg_dir`` must be configured properly.
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(bunch_report=True)
_extract_bunch_report()
def run_background(cfg_dir):
"""Run elegant as a background task
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(with_mpi=True);
simulation_db.write_result({})
def _run_elegant(bunch_report=False, with_mpi=False):
exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
if bunch_report and re.search('\&sdds_beam\s', elegant_file):
return
pkio.write_text('elegant.lte', lattice_file)
ele = 'elegant.ele'
pkio.write_text(ele, elegant_file)
# TODO(robnagler) Need to handle this specially, b/c different binary
if with_mpi and mpi.cfg.cores > 1:
return mpi.run_program(['Pelegant', ele], output=ELEGANT_LOG_FILE)
env = copy.deepcopy(os.environ)
env['RPN_DEFNS'] = pkresource.filename('defns.rpn')
pksubprocess.check_call_with_signals(
['elegant', ele],
output=ELEGANT_LOG_FILE,
env=env,
msg=pkdp,
)
def _extract_bunch_report():
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data['models']['bunchSource']['inputSource'] == 'sdds_beam':
file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile'])
else:
file = 'elegant.bun'
info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0)
simulation_db.write_result(info)
| Python | 0 |
5c8a6072309989ac97eefc2a6f63a6082a2c5ff0 | Update matching_specific_string.py | hacker_rank/contests/regular_expresso/matching_specific_string.py | hacker_rank/contests/regular_expresso/matching_specific_string.py | Regex_Pattern = r'hackerrank' # Do not delete 'r'.
| Python | 0.000002 | |
05b15f2db049e8b722f17867f5163c0b6e3a3108 | Allow POST to /git-update url. | pthemes.py | pthemes.py | import os
import logging
from flask import Flask, render_template, redirect, url_for, flash
from pq import PQ
from api import APIGrabber
from db import PonyDB
logging.basicConfig()
# Config
# ---------------
# App config
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS', None))
db = PonyDB(app)
pq = PQ(db.get_connection()) # Postgres work queue
if db.table_exists('queue') is False:
pq.create()
queue = pq['themes']
# Routes
# ---------------
@app.route('/')
def show_entries():
"""
List out all the themes.
"""
image_themes = db.get_image_themes()
no_image_themes = db.get_no_image_themes()
sha = db.get_sha()
counts = {}
counts['image_themes'] = len(image_themes)
counts['no_image_themes'] = len(no_image_themes)
counts['total'] = counts['image_themes'] + counts['no_image_themes']
for t in image_themes:
if t['image_urls'] is not None:
t['image_urls'] = t['image_urls'].split(',')
return render_template('list.html',
image_themes=image_themes,
no_image_themes=no_image_themes,
counts=counts,
sha=sha)
@app.route('/git-update', methods=['GET', 'POST'])
def refresh_themes():
"""
Adds a job to the job queue. The job is to refresh the theme list. As
all jobs are identical, the job will only be added if there are no
existing jobs.
"""
if len(queue) < 1:
queue.put('Refresh themes')
flash('Added theme refresh job to queue.')
else:
flash('A theme refresh job has already been scheduled.')
return redirect(url_for('show_entries'))
# App decorators
# ---------------
# @app.cli.command('initdb')
# def initdb_command():
# """Creates the database tables."""
# db.init_db()
# @app.cli.command('populatedb')
# def populatedb_command():
# db.populate_db()
@app.cli.command('worker')
def queue_worker():
"""
Process queue tasks and then exit
"""
for task in queue:
if task is None:
break
a = APIGrabber(app.config['GITHUB_API_KEY'])
sha, data = a.process()
db.populate_db(sha, data)
if __name__ == "__main__":
app.run()
| import os
import logging
from flask import Flask, render_template, redirect, url_for, flash
from pq import PQ
from api import APIGrabber
from db import PonyDB
logging.basicConfig()
# Config
# ---------------
# App config
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS', None))
db = PonyDB(app)
pq = PQ(db.get_connection()) # Postgres work queue
if db.table_exists('queue') is False:
pq.create()
queue = pq['themes']
# Routes
# ---------------
@app.route('/')
def show_entries():
"""
List out all the themes.
"""
image_themes = db.get_image_themes()
no_image_themes = db.get_no_image_themes()
sha = db.get_sha()
counts = {}
counts['image_themes'] = len(image_themes)
counts['no_image_themes'] = len(no_image_themes)
counts['total'] = counts['image_themes'] + counts['no_image_themes']
for t in image_themes:
if t['image_urls'] is not None:
t['image_urls'] = t['image_urls'].split(',')
return render_template('list.html',
image_themes=image_themes,
no_image_themes=no_image_themes,
counts=counts,
sha=sha)
@app.route('/git-update', methods=['GET'])
def refresh_themes():
"""
Adds a job to the job queue. The job is to refresh the theme list. As
all jobs are identical, the job will only be added if there are no
existing jobs.
"""
if len(queue) < 1:
queue.put('Refresh themes')
flash('Added theme refresh job to queue.')
else:
flash('A theme refresh job has already been scheduled.')
return redirect(url_for('show_entries'))
# App decorators
# ---------------
# @app.cli.command('initdb')
# def initdb_command():
# """Creates the database tables."""
# db.init_db()
# @app.cli.command('populatedb')
# def populatedb_command():
# db.populate_db()
@app.cli.command('worker')
def queue_worker():
"""
Process queue tasks and then exit
"""
for task in queue:
if task is None:
break
a = APIGrabber(app.config['GITHUB_API_KEY'])
sha, data = a.process()
db.populate_db(sha, data)
if __name__ == "__main__":
app.run()
| Python | 0 |
66370cecd7bdf9d0b5ecd358aa58b4f567d45c95 | add a new keyword to the pypy lexer | pygments/lexers/pypylog.py | pygments/lexers/pypylog.py | # -*- coding: utf-8 -*-
"""
pygments.lexers.pypylog
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for pypy log files.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Text, Keyword, Number, Comment, Punctuation, Name, \
String
__all__ = ["PyPyLogLexer"]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|"
r"cast_int_to_float|cast_float_to_int|cast_opaque_ptr|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|virtual_ref|"
r"call_may_force|call_assembler|call_loopinvariant|call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|getfield_gc|getinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
| # -*- coding: utf-8 -*-
"""
pygments.lexers.pypylog
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for pypy log files.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Text, Keyword, Number, Comment, Punctuation, Name, \
String
__all__ = ["PyPyLogLexer"]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|"
r"cast_int_to_float|cast_float_to_int|cast_opaque_ptr|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|virtual_ref|"
r"call_may_force|call_assembler|call_loopinvariant|call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|getfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
| Python | 0 |
ebd152ca9b4126776e0f035477791be587907a8b | Fix coding style and add a file header. | pygments/lexers/pypylog.py | pygments/lexers/pypylog.py | # -*- coding: utf-8 -*-
"""
pygments.lexers.pypylog
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for pypy log files.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Text, Keyword, Number, Comment, Punctuation, Name, \
String
__all__ = ["PyPyLogLexer"]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_rshift|int_and|int_or|int_xor|int_eq|int_ne|int_ge|int_gt|"
r"int_le|int_lt|int_is_zero|int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_gt|"
r"ptr_eq|"
r"force_token|"
r"call_may_force|call_assembler|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|getfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
| from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import (Text, Keyword, Number, Comment, Punctuation, Name,
String, Literal)
__all__ = [
"PyPyLogLexer",
]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))", bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|int_rshift|int_and|int_or|int_xor|"
r"int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|int_is_true|"
r"uint_floordiv|"
r"uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_gt|"
r"ptr_eq|"
r"force_token|"
r"call_may_force|call_assembler|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|getarrayitem_raw|setarrayitem_raw|"
r"getfield_gc_pure|getfield_gc|getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|guard_not_forced|guard_no_exception|guard_not_invalidated)", Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
| Python | 0 |
0b89e64128fbc2970027fde31758160bdce6d30d | Use unix line ending | pytable.py | pytable.py | # -*- coding: utf-8 -*-
from __future__ import print_function
class BaseRenderer(object):
def __init__(self, table):
self.table = table
self.output = []
def p(self, val):
self.output.append(val)
def __str__(self):
return ''.join(self.output)
class FancyRenderer(BaseRenderer):
def render(self, data):
self.widths = [col.max_width(data) + 1 for col in self.table.columns]
self._print_divider()
self.print_row([c.header for c in self.table.columns])
self._print_divider()
for row in data:
self.print_row([c.cell(row) for c in self.table.columns])
self._print_divider()
def print_row(self, values):
map(self.print_cell, values, self.widths)
self.print_delimiter()
def print_cell(self, value, width):
self.p('| {:<{w}}'.format(value, w=width))
def print_delimiter(self):
self.p('|\n')
def _print_divider(self):
for w in self.widths:
self.p('+' + '-' * (w + 1))
self.p('+\n')
class HtmlRenderer(BaseRenderer):
def tag(self, elem):
class TagWrapper(object):
def __enter__(_):
self.p('<{}>'.format(elem))
def __exit__(_, type, value, traceback):
self.p('</{}>'.format(elem))
return TagWrapper()
def render(self, data):
with self.tag('table'):
with self.tag('thead'):
self.print_row([c.header for c in self.table.columns],
tag='th')
with self.tag('tbody'):
for row in data:
self.print_row([c.cell(row) for c in self.table.columns])
def print_row(self, values, tag='td'):
with self.tag('tr'):
map(lambda v: self.print_cell(v, tag=tag), values)
def print_cell(self, value, tag):
with self.tag(tag):
self.p(value)
class Table(object):
def __init__(self, columns=()):
self.columns = columns
def __add__(self, other):
return Table(self.columns[:] + other.columns[:])
def render(self, data, renderer=FancyRenderer):
r = renderer(self)
r.render(data)
print(r)
class Column(object):
def __init__(self, header, cell):
self.header = header
self.cell = cell
def max_width(self, data):
data_max = 0
if data:
data_max = max([len(self.cell(item)) for item in data])
return max(data_max, len(self.header))
def singleton(header, getter):
return Table(columns=(Column(header, getter), ))
def typed_column(header, getter, type):
def better_getter(x):
value = getter(x)
if value is None:
return ''
if not isinstance(value, type):
raise TypeError(
'Column {col}: {data} is not {type}'.format(
col=header, data=value, type=str(type)))
return str(value)
return singleton(header, better_getter)
def integer(header, getter):
return typed_column(header, getter, int)
def string(header, getter):
return typed_column(header, getter, basestring)
def boolean(header, getter):
return typed_column(header, getter, bool)
if __name__ == '__main__':
from operator import itemgetter
table = (integer('X', itemgetter('x')) +
integer('Y', itemgetter('y')) +
string('Name', itemgetter('name')))
data = [
{'x': 0, 'y': 0, 'name': 'Origin'},
{'x': 5, 'y': 5, 'name': 'Diagonal'},
{'x': 2, 'y': 8, 'name': 'Up'},
]
table.render(data, renderer=FancyRenderer)
| # -*- coding: utf-8 -*-
from __future__ import print_function
class BaseRenderer(object):
def __init__(self, table):
self.table = table
self.output = []
def p(self, val):
self.output.append(val)
def __str__(self):
return ''.join(self.output)
class FancyRenderer(BaseRenderer):
def render(self, data):
self.widths = [col.max_width(data) + 1 for col in self.table.columns]
self._print_divider()
self.print_row([c.header for c in self.table.columns])
self._print_divider()
for row in data:
self.print_row([c.cell(row) for c in self.table.columns])
self._print_divider()
def print_row(self, values):
map(self.print_cell, values, self.widths)
self.print_delimiter()
def print_cell(self, value, width):
self.p('| {:<{w}}'.format(value, w=width))
def print_delimiter(self):
self.p('|\n')
def _print_divider(self):
for w in self.widths:
self.p('+' + '-' * (w + 1))
self.p('+\n')
class HtmlRenderer(BaseRenderer):
def tag(self, elem):
class TagWrapper(object):
def __enter__(_):
self.p('<{}>'.format(elem))
def __exit__(_, type, value, traceback):
self.p('</{}>'.format(elem))
return TagWrapper()
def render(self, data):
with self.tag('table'):
with self.tag('thead'):
self.print_row([c.header for c in self.table.columns],
tag='th')
with self.tag('tbody'):
for row in data:
self.print_row([c.cell(row) for c in self.table.columns])
def print_row(self, values, tag='td'):
with self.tag('tr'):
map(lambda v: self.print_cell(v, tag=tag), values)
def print_cell(self, value, tag):
with self.tag(tag):
self.p(value)
class Table(object):
def __init__(self, columns=()):
self.columns = columns
def __add__(self, other):
return Table(self.columns[:] + other.columns[:])
def render(self, data, renderer=FancyRenderer):
r = renderer(self)
r.render(data)
print(r)
class Column(object):
def __init__(self, header, cell):
self.header = header
self.cell = cell
def max_width(self, data):
data_max = 0
if data:
data_max = max([len(self.cell(item)) for item in data])
return max(data_max, len(self.header))
def singleton(header, getter):
return Table(columns=(Column(header, getter), ))
def typed_column(header, getter, type):
def better_getter(x):
value = getter(x)
if value is None:
return ''
if not isinstance(value, type):
raise TypeError(
'Column {col}: {data} is not {type}'.format(
col=header, data=value, type=str(type)))
return str(value)
return singleton(header, better_getter)
def integer(header, getter):
return typed_column(header, getter, int)
def string(header, getter):
return typed_column(header, getter, basestring)
def boolean(header, getter):
return typed_column(header, getter, bool)
if __name__ == '__main__':
from operator import itemgetter
table = (integer('X', itemgetter('x')) +
integer('Y', itemgetter('y')) +
string('Name', itemgetter('name')))
data = [
{'x': 0, 'y': 0, 'name': 'Origin'},
{'x': 5, 'y': 5, 'name': 'Diagonal'},
{'x': 2, 'y': 8, 'name': 'Up'},
]
table.render(data, renderer=FancyRenderer)
| Python | 0.000052 |
38a4b41d942f40dd16e1a1c88ab68c0b9169ff0c | update tts | slackbot/plugins/tts.py | slackbot/plugins/tts.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# tts.py ---
#
# Filename: tts.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Sun Sep 10 16:24:08 2017 (+0800)
#
# Change Log:
#
#
import os
import sys
import baidutts
import hashlib
import commands
from slackbot.bot import plugin_init
from slackbot.bot import respond_to
try:
from pydub import AudioSegment
except Exception as e:
print 'Missing module pydub, please install it'
class TTS(object):
def __init__(self, config, method):
self.__ttsdriver = None
if method == 'baidu':
self.__ttsdriver = baidutts.BaiduTTS(config.get('apikey', ""),
config.get('secretkey', ""),
config.get('speed', 5),
config.get('pitch', 9),
config.get('volume', 9),
config.get('person', 3))
def __insert_silent(self, media_file, ftype):
try:
silent = AudioSegment.silent(duration=1000)
sound1 = AudioSegment.from_file(media_file, ftype)
combined = silent + sound1
combined.export(media_file, format=ftype)
except Exception as e:
print("{}".format(e))
def __text2tts(self, message):
return self.__ttsdriver.get_tts_audio(message, 'zh')
def __md5sum(contents):
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
def __mplayer(f):
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 85 -speed 0.8 {}'.format(f))
if st != 0:
print('mplayer output:\n {}'.format(output))
def text2play(self, message):
t, d = self.__text2tts(message)
basename = self.__md5sum(d)
basename = os.path.join('/tmp/' + basename + '.' + t)
with open(basename, 'w') as f:
f.write(d)
self.__mplayer(basename)
os.remove(basename)
tts_obj = None
@plugin_init
def init_tts(config):
global tts_obj
enable = config.get('enable', False)
driver = config.get('driver', 'baidu')
if enable:
tts_obj = TTS(config, driver)
@respond_to(r'tts (.*)')
def tts_command(message, rest):
global tts_obj
tts_obj.text2play(rest)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# tts.py ---
#
# Filename: tts.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Sun Sep 10 16:24:08 2017 (+0800)
#
# Change Log:
#
#
import os
import sys
import baidutts
import hashlib
import commands
from slackbot.bot import plugin_init
from slackbot.bot import respond_to
class TTS(object):
def __init__(self, config, method):
self.__ttsdriver = None
if method == 'baidu':
self.__ttsdriver = baidutts.BaiduTTS(config.get('apikey', ""),
config.get('secretkey', ""),
config.get('speed', 5),
config.get('pitch', 9),
config.get('volume', 9),
config.get('person', 3))
def __text2tts(self, message):
return self.__ttsdriver.get_tts_audio(message, 'zh')
def __md5sum(contents):
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
def __mplayer(f):
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 82 {}'.format(f))
def text2play(self, message):
t, d = self.__text2tts(message)
basename = self.__md5sum(d)
basename = os.path.join('/tmp/' + basename + '.' + t)
with open(basename, 'w') as f:
f.write(d)
self.__mplayer(basename)
os.remove(basename)
tts_obj = None
@plugin_init
def init_tts(config):
global tts_obj
enable = config.get('enable', False)
driver = config.get('driver', 'baidu')
if enable:
tts_obj = TTS(config, driver)
@respond_to(r'tts (.*)')
def tts_command(message, rest):
global tts_obj
tts_obj.text2play(rest)
| Python | 0.000001 |
8f4f902042b848a6a212ae966aaf6435ae8d5c77 | set background as a widget | sheldonchess/interface/web/sheldonchess.py | sheldonchess/interface/web/sheldonchess.py | from rajesh import Application, run, expr
from rajesh.element import Img, Div
from screens import MainMenu, NormalGameLobby
class Player(object):
def __init__(self, app):
self.app = app
self.name = ""
class SheldonChess(Application):
def begin(self):
self.player = Player(self)
self.title = "Sheldon Chess"
self.background = "images/sheldonchess_background.png"
info_box = Div(id="info_box")
self.put(info_box, ("50%", 0))
main_menu = MainMenu(self)
self.put(main_menu, ("50%", "50%"))
def connectionLost(self, reason):
for player in NormalGameLobby.players:
if player == self.player:
NormalGameLobby.players.remove(player)
NormalGameLobby.update_players()
if __name__ == "__main__":
run()
| from rajesh import Application, run, expr
from rajesh.element import Img, Div
from screens import MainMenu, NormalGameLobby
class Player(object):
def __init__(self, app):
self.app = app
self.name = ""
class SheldonChess(Application):
def begin(self):
self.player = Player(self)
self.title = "Sheldon Chess"
background = Img(id="background", src="images/sheldonchess_background.png", width="100%", height="100%")
self.put(background, (0, 0))
info_box = Div(id="info_box")
self.put(info_box, ("50%", 0))
main_menu = MainMenu(self)
self.put(main_menu, ("50%", "50%"))
def connectionLost(self, reason):
for player in NormalGameLobby.players:
if player == self.player:
NormalGameLobby.players.remove(player)
NormalGameLobby.update_players()
if __name__ == "__main__":
run()
| Python | 0.000001 |
fac2c5752c23d2fd415caafd2654f696c4842806 | Bump version. | pyramid_addons/__init__.py | pyramid_addons/__init__.py | __version__ = '0.21'
| __version__ = '0.20'
| Python | 0 |
1159cda1437085218b79345244897f2be8990ca9 | fix tell delivery and possibly db lock | pyscp_bot/modules/notes.py | pyscp_bot/modules/notes.py | #!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import arrow
import peewee
import sopel
import re
import pyscp_bot.jarvis as vocab
###############################################################################
db = peewee.SqliteDatabase('jarvis.db')
class BaseModel(peewee.Model):
class Meta:
database = db
class Tell(BaseModel):
sender = peewee.CharField()
recipient = peewee.CharField()
message = peewee.TextField()
time = peewee.DateTimeField()
class Seen(BaseModel):
pass
class Message(BaseModel):
user = peewee.CharField()
channel = peewee.CharField()
time = peewee.CharField()
text = peewee.TextField()
###############################################################################
def setup(bot):
db.connect()
Tell.create_table(True)
Seen.drop_table(True)
Message.create_table(True)
sopel.bot.Sopel._say = sopel.bot.Sopel.say
sopel.bot.Sopel.say = log_and_say
@sopel.module.commands('tell')
def tell(bot, trigger):
name, text = trigger.group(2).split(maxsplit=1)
name = name.strip().lower()
now = arrow.utcnow().timestamp
Tell.create(
sender=str(trigger.nick), recipient=name, message=text, time=now)
bot.say(vocab.tell_stored(trigger.nick))
@sopel.module.thread(False)
@sopel.module.rule('.*')
@sopel.module.priority('low')
def chat_activity(bot, trigger):
user = trigger.nick.strip()
channel = trigger.sender
time = arrow.utcnow().timestamp
message = trigger.group(0)
Message.create(user=user, channel=channel, time=time, text=message)
if not re.match(r'[!\.](st|showt|showtells)$', trigger.group(0)):
deliver_tells(bot, trigger.nick)
def log_and_say(bot, text, recipient, max_messages=1):
if recipient != 'NickServ':
time = arrow.utcnow().timestamp
Message.create(
user=bot.config.core.nick, channel=recipient, time=time, text=text)
bot._say(text, recipient, max_messages)
@sopel.module.commands('showtells', 'showt', 'st')
def showtells(bot, trigger):
if Tell.select().where(Tell.recipient == trigger.nick.lower()).exists():
deliver_tells(bot, trigger.nick)
else:
bot.notice(vocab.no_tells(trigger.nick), trigger.nick)
@sopel.module.commands('seen')
def seen(bot, trigger):
name = trigger.group(2).strip().lower()
channel = trigger.sender
try:
message = (
Message.select()
.where(
peewee.fn.Lower(Message.user) == name,
Message.channel == channel)
.limit(1).order_by(Message.time.desc()).get())
time = arrow.get(message.time).humanize()
bot.say('{}: I saw {} {} saying "{}"'.format(
trigger.nick, message.user, time, message.text))
except Message.DoesNotExist:
bot.say(vocab.user_never_seen(trigger.nick))
def deliver_tells(bot, name):
query = Tell.select().where(Tell.recipient == name.lower())
if not query.exists():
return
bot.notice(
'{}: you have {} new messages.'.format(name, query.count()), name)
for tell in query:
time_passed = arrow.get(tell.time).humanize()
msg = '{} said {}: {}'.format(tell.sender, time_passed, tell.message)
bot.say(msg, name)
Tell.delete().where(Tell.recipient == name.lower()).execute()
| #!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import arrow
import peewee
import sopel
import re
import pyscp_bot.jarvis as vocab
###############################################################################
db = peewee.SqliteDatabase('jarvis.db')
class BaseModel(peewee.Model):
class Meta:
database = db
class Tell(BaseModel):
sender = peewee.CharField()
recipient = peewee.CharField()
message = peewee.TextField()
time = peewee.DateTimeField()
class Seen(BaseModel):
pass
class Message(BaseModel):
user = peewee.CharField()
channel = peewee.CharField()
time = peewee.CharField()
text = peewee.TextField()
###############################################################################
def setup(bot):
db.connect()
Tell.create_table(True)
Seen.drop_table(True)
Message.create_table(True)
sopel.bot.Sopel._say = sopel.bot.Sopel.say
sopel.bot.Sopel.say = log_and_say
@sopel.module.commands('tell')
def tell(bot, trigger):
name, text = trigger.group(2).split(maxsplit=1)
name = name.strip().lower()
now = arrow.utcnow().timestamp
Tell.create(
sender=str(trigger.nick), recipient=name, message=text, time=now)
bot.say(vocab.tell_stored(trigger.nick))
@sopel.module.rule('.*')
def chat_activity(bot, trigger):
user = trigger.nick.strip()
channel = trigger.sender
time = arrow.utcnow().timestamp
message = trigger.group(0)
Message.create(user=user, channel=channel, time=time, text=message)
if re.match(r'[!\.](st|showt|showtells)$', trigger.group(0)):
deliver_tells(bot, trigger.nick)
def log_and_say(bot, text, recipient, max_messages=1):
if recipient != 'NickServ':
time = arrow.utcnow().timestamp
Message.create(
user=bot.config.core.nick, channel=recipient, time=time, text=text)
bot._say(text, recipient, max_messages)
@sopel.module.commands('showtells', 'showt', 'st')
def showtells(bot, trigger):
if Tell.select().where(Tell.recipient == trigger.nick.lower()).exists():
deliver_tells(bot, trigger.nick)
else:
bot.notice(vocab.no_tells(trigger.nick), trigger.nick)
@sopel.module.commands('seen')
def seen(bot, trigger):
name = trigger.group(2).strip().lower()
channel = trigger.sender
try:
message = (
Message.select()
.where(
peewee.fn.Lower(Message.user) == name,
Message.channel == channel)
.limit(1).order_by(Message.time.desc()).get())
time = arrow.get(message.time).humanize()
bot.say('{}: I saw {} {} saying "{}"'.format(
trigger.nick, message.user, time, message.text))
except Message.DoesNotExist:
bot.say(vocab.user_never_seen(trigger.nick))
def deliver_tells(bot, name):
query = Tell.select().where(Tell.recipient == name.lower())
if not query.exists():
return
bot.notice(
'{}: you have {} new messages.'.format(name, query.count()), name)
for tell in query:
time_passed = arrow.get(tell.time).humanize()
msg = '{} said {}: {}'.format(tell.sender, time_passed, tell.message)
bot.say(msg, name)
Tell.delete().where(Tell.recipient == name.lower()).execute()
| Python | 0 |
e913bbffde84403018e741a62318df029a641950 | Delete more not needed stuff | archive/archive_api/src/conftest.py | archive/archive_api/src/conftest.py | # -*- encoding: utf-8
import os
import uuid
import betamax
import pytest
import requests
@pytest.fixture(scope="session")
def recorded_sess(pytestconfig):
with betamax.Betamax.configure() as config:
config.cassette_library_dir = str(
pytestconfig.rootdir.join("src", "tests", "cassettes")
)
session = requests.Session()
with betamax.Betamax(session) as vcr:
vcr.use_cassette("test_archive_api")
yield session
@pytest.fixture
def client(
sns_client,
topic_arn,
recorded_sess,
):
# This only has to work when populating the betamax recording file;
# although we run on Linux in Travis CI, this will still fine because
# we use the cached recordings.
os.environ.update(
{"PROGRESS_MANAGER_ENDPOINT": "http://docker.for.mac.localhost:6000"}
)
os.environ.update(
{"BAGS_MANAGER_ENDPOINT": "http://host.docker.internal:6001"}
)
from archive_api import app
app.config["SNS_CLIENT"] = sns_client
app.config["SNS_TOPIC_ARN"] = topic_arn
app.config["PROGRESS_MANAGER"].sess = recorded_sess
app.config["BAGS_MANAGER"].sess = recorded_sess
yield app.test_client()
@pytest.fixture
def guid():
return str(uuid.uuid4())
@pytest.fixture
def external_identifier():
return "b22454408"
@pytest.fixture
def space_name():
return "digitised"
@pytest.fixture
def bag_id(external_identifier, space_name):
return f"{space_name}/{external_identifier}"
| # -*- encoding: utf-8
import os
import random
import uuid
import betamax
import pytest
import requests
import json
@pytest.fixture(scope="session")
def recorded_sess(pytestconfig):
with betamax.Betamax.configure() as config:
config.cassette_library_dir = str(
pytestconfig.rootdir.join("src", "tests", "cassettes")
)
session = requests.Session()
with betamax.Betamax(session) as vcr:
vcr.use_cassette("test_archive_api")
yield session
@pytest.fixture
def client(
dynamodb_resource,
s3_client,
sns_client,
topic_arn,
table_name_bag,
bucket_bag,
recorded_sess,
):
# This only has to work when populating the betamax recording file;
# although we run on Linux in Travis CI, this will still fine because
# we use the cached recordings.
os.environ.update(
{"PROGRESS_MANAGER_ENDPOINT": "http://docker.for.mac.localhost:6000"}
)
os.environ.update(
{"BAGS_MANAGER_ENDPOINT": "http://host.docker.internal:6001"}
)
from archive_api import app
app.config["DYNAMODB_RESOURCE"] = dynamodb_resource
app.config["SNS_CLIENT"] = sns_client
app.config["SNS_TOPIC_ARN"] = topic_arn
app.config["S3_CLIENT"] = s3_client
app.config["BAG_VHS_TABLE_NAME"] = table_name_bag
app.config["BAG_VHS_BUCKET_NAME"] = bucket_bag
app.config["PROGRESS_MANAGER"].sess = recorded_sess
app.config["BAGS_MANAGER"].sess = recorded_sess
yield app.test_client()
@pytest.fixture
def guid():
return str(uuid.uuid4())
@pytest.fixture
def external_identifier():
return "b22454408"
@pytest.fixture
def space_name():
return "digitised"
@pytest.fixture
def bag_id(external_identifier, space_name):
return f"{space_name}/{external_identifier}"
@pytest.fixture()
def table_name_bag(dynamodb_client):
dynamodb_table_name = "bag--table-%d" % random.randint(0, 10000)
os.environ.update({"BAG_VHS_TABLE_NAME": dynamodb_table_name})
create_table(dynamodb_client, dynamodb_table_name)
yield dynamodb_table_name
dynamodb_client.delete_table(TableName=dynamodb_table_name)
try:
del os.environ["BAG_VHS_TABLE_NAME"]
except KeyError:
pass
def create_table(dynamodb_client, table_name):
try:
dynamodb_client.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
dynamodb_client.get_waiter("table_exists").wait(TableName=table_name)
except dynamodb_client.exceptions.ResourceInUseException:
pass
@pytest.fixture
def bucket_bag(s3_client):
bucket_name = "test-python-bag-bucket-%d" % random.randint(0, 10000)
os.environ.update({"BAG_VHS_BUCKET_NAME": bucket_name})
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
try:
del os.environ["BAG_VHS_BUCKET_NAME"]
except KeyError:
pass
@pytest.fixture
def s3_bag(bag_id):
file_location = "src/tests/resources/vhs_bag.json"
with open(file_location, "r") as io:
json_bag = json.load(io)
json_bag["id"]["value"] = bag_id
return json_bag
| Python | 0 |
8e4a5ef25c87879fb01aa79f88c6c6a833820f8b | bump version | python/baseline/version.py | python/baseline/version.py | __version__ = "1.1.3"
| __version__ = "1.1.2"
| Python | 0 |
76ffb1b8891e2ad349140044d78e766e02ebf87a | Remove import taichi from expr.py (#3871) | python/taichi/lang/expr.py | python/taichi/lang/expr.py | import numpy as np
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.matrix import Matrix
from taichi.lang.util import is_taichi_class
# Scalar, basic data type
class Expr(TaichiOperations):
"""A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class."""
def __init__(self, *args, tb=None):
_taichi_skip_traceback = 1
self.tb = tb
if len(args) == 1:
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
self.tb = args[0].tb
elif is_taichi_class(args[0]):
raise ValueError('cannot initialize scalar expression from '
f'taichi class: {type(args[0])}')
else:
# assume to be constant
arg = args[0]
try:
if isinstance(arg, np.ndarray):
arg = arg.dtype(arg)
except:
pass
self.ptr = impl.make_constant_expr(arg).ptr
else:
assert False
if self.tb:
self.ptr.set_tb(self.tb)
self.ptr.type_check()
def __hash__(self):
return self.ptr.get_raw_address()
def __str__(self):
return '<ti.Expr>'
def __repr__(self):
return '<ti.Expr>'
def make_var_list(size):
exprs = []
for _ in range(size):
exprs.append(_ti_core.make_id_expr(''))
return exprs
def make_expr_group(*exprs):
if len(exprs) == 1:
if isinstance(exprs[0], (list, tuple)):
exprs = exprs[0]
elif isinstance(exprs[0], Matrix):
mat = exprs[0]
assert mat.m == 1
exprs = mat.entries
expr_group = _ti_core.ExprGroup()
for i in exprs:
if isinstance(i, Matrix):
assert i.local_tensor_proxy is not None
expr_group.push_back(i.local_tensor_proxy)
else:
expr_group.push_back(Expr(i).ptr)
return expr_group
| import numpy as np
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.util import is_taichi_class
import taichi as ti
# Scalar, basic data type
class Expr(TaichiOperations):
"""A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class."""
def __init__(self, *args, tb=None):
_taichi_skip_traceback = 1
self.tb = tb
if len(args) == 1:
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
self.tb = args[0].tb
elif is_taichi_class(args[0]):
raise ValueError('cannot initialize scalar expression from '
f'taichi class: {type(args[0])}')
else:
# assume to be constant
arg = args[0]
try:
if isinstance(arg, np.ndarray):
arg = arg.dtype(arg)
except:
pass
self.ptr = impl.make_constant_expr(arg).ptr
else:
assert False
if self.tb:
self.ptr.set_tb(self.tb)
self.ptr.type_check()
def __hash__(self):
return self.ptr.get_raw_address()
def __str__(self):
return '<ti.Expr>'
def __repr__(self):
return '<ti.Expr>'
def make_var_list(size):
exprs = []
for _ in range(size):
exprs.append(_ti_core.make_id_expr(''))
return exprs
def make_expr_group(*exprs):
if len(exprs) == 1:
if isinstance(exprs[0], (list, tuple)):
exprs = exprs[0]
elif isinstance(exprs[0], ti.Matrix):
mat = exprs[0]
assert mat.m == 1
exprs = mat.entries
expr_group = _ti_core.ExprGroup()
for i in exprs:
if isinstance(i, ti.Matrix):
assert i.local_tensor_proxy is not None
expr_group.push_back(i.local_tensor_proxy)
else:
expr_group.push_back(Expr(i).ptr)
return expr_group
| Python | 0 |
49f365ecfc18e32e5664ffc53c163320ed0af6ac | Add the missing rows() function | pywind/bmreports/prices.py | pywind/bmreports/prices.py | # coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
""" BMReports make the system electricity prices available. This module contains
classes to access those reports.
"""
import os
from datetime import date, datetime
from pywind.utils import get_or_post_a_url, parse_response_as_xml
class SystemPrices(object):
""" Class to get the electricity prices from BMreports. """
URL = 'http://www.bmreports.com/bsp/additional/soapfunctions.php'
def __init__(self, dtt=None):
self.dtt = dtt or date.today()
self.xml = None
self.prices = []
def get_data(self):
""" Get the data from the remote server. """
data = {'element': 'SYSPRICE',
'dT': self.dtt.strftime("%Y-%m-%d")}
resp = get_or_post_a_url(self.URL, params=data)
self.xml = parse_response_as_xml(resp)
if self.xml is None:
return False
for elm in self.xml.xpath('.//ELEMENT'):
data = {}
for elm2 in elm.getchildren():
if elm2.tag == 'SP':
data['period'] = int(elm2.text)
elif elm2.tag == 'SD':
data['date'] = datetime.strptime(elm2.text, "%Y-%m-%d")
else:
data[elm2.tag.lower()] = elm2.text
self.prices.append(data)
return len(self.prices) > 0
def rows(self):
"""Generator to return rows for export.
:returns: Dict containing information for a single price period.
:rtype: dict
"""
for per in self.prices:
yield {'PricePeriod': {'@{}'.format(key):per[key] for key in per}}
def save_original(self, filename):
""" Save the downloaded certificate data into the filename provided.
:param filename: Filename to save the file to.
:returns: True or False
:rtype: bool
"""
if self.xml is None:
return False
name, ext = os.path.splitext(filename)
if ext is '':
filename += '.xml'
self.xml.write(filename)
return True
def as_dict(self):
""" Return the data as a dict. """
return {'date': self.dtt, 'data': self.prices}
| # coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
""" BMReports make the system electricity prices available. This module contains
classes to access those reports.
"""
import os
from datetime import date, datetime
from pywind.utils import get_or_post_a_url, parse_response_as_xml
class SystemPrices(object):
""" Class to get the electricity prices from BMreports. """
URL = 'http://www.bmreports.com/bsp/additional/soapfunctions.php'
def __init__(self, dtt=None):
self.dtt = dtt or date.today()
self.xml = None
self.prices = []
def get_data(self):
""" Get the data from the remote server. """
data = {'element': 'SYSPRICE',
'dT': self.dtt.strftime("%Y-%m-%d")}
resp = get_or_post_a_url(self.URL, params=data)
self.xml = parse_response_as_xml(resp)
if self.xml is None:
return False
for elm in self.xml.xpath('.//ELEMENT'):
data = {}
for elm2 in elm.getchildren():
if elm2.tag == 'SP':
data['period'] = int(elm2.text)
elif elm2.tag == 'SD':
data['date'] = datetime.strptime(elm2.text, "%Y-%m-%d")
else:
data[elm2.tag.lower()] = elm2.text
self.prices.append(data)
return len(self.prices) > 0
def save_original(self, filename):
""" Save the downloaded certificate data into the filename provided.
:param filename: Filename to save the file to.
:returns: True or False
:rtype: bool
"""
if self.xml is None:
return False
name, ext = os.path.splitext(filename)
if ext is '':
filename += '.xml'
self.xml.write(filename)
return True
def as_dict(self):
""" Return the data as a dict. """
return {'date': self.dtt, 'data': self.prices}
| Python | 0.999729 |
86768d83067745def16761db24ce496e3125399e | test script works | mtgreatest/process_results/test_script.py | mtgreatest/process_results/test_script.py | #!/usr/bin/env python
import scrape_results
from mtgreatest.rdb import Cursor
cursor = Cursor()
event_info = cursor.execute('select event_id, event_link from event_table where results_loaded = 1')
event_info = [dict(zip(('event_id','event_link'), item)) for item in event_info] #this should be a method (or default return structure) in rdb
failed = []
for row in event_info:
soup = scrape_results.event_soup(row['event_link'])
print 'scraping standings for event {}'.format(row['event_id'])
try:
scrape_results.scrape_standings(soup, row['event_id'])
except:
failed.append(row['event_id'])
| #!/usr/bin/env python
import scrape_results
from mtgreatest.rdb import Cursor
cursor = Cursor()
event_info = cursor.execute('select event_id, event_link from event_table where results_loaded = 1')
event_info = [dict(zip(('event_id','event_link'), item)) for item in event_info] #this should be a method (or default return structure) in rdb
soups = [scrape_results.event_soup(row['event_link']) for row in event_info]
failed = []
for i in range(len(soups)):
try:
scrape_results.scrape_standings(soups[i], event_info[i]['event_id'])
except:
failed.append(event_info[i]['event_id'])
| Python | 0.000001 |
e164a3ccb7625d4f36c83628aa6f7f030f38d6cf | remove normalized test | networkx/algorithms/tests/test_smetric.py | networkx/algorithms/tests/test_smetric.py |
from nose.tools import assert_equal
import networkx as nx
def test_smetric():
g = nx.Graph()
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(1,4)
sm = nx.s_metric(g,normalized=False)
assert_equal(sm, 19.0)
# smNorm = nx.s_metric(g,normalized=True)
# assert_equal(smNorm, 0.95)
|
from nose.tools import assert_equal
import networkx as nx
def test_smetric():
g = nx.Graph()
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(1,4)
sm = nx.s_metric(g,normalized=False)
assert_equal(sm, 19.0)
smNorm = nx.s_metric(g,normalized=True)
# assert_equal(smNorm, 0.95)
| Python | 0.000051 |
a15bda8b4a4d32d25f1e47f69b20f7df0ff45691 | Remove debugging code | ether/ethcontract.py | ether/ethcontract.py | from ether import asm, util
import re
import persistent
from ethereum import utils
class ETHContract(persistent.Persistent):
def __init__(self, code = ""):
self.code = code
def get_xrefs(self):
disassembly = asm.disassemble(util.safe_decode(self.code))
xrefs = []
for instruction in disassembly:
if instruction['opcode'] == "PUSH20":
if instruction['argument']:
xref = instruction['argument'].decode("utf-8")
if xref not in xrefs:
xrefs.append(xref)
return xrefs
def get_disassembly(self):
return asm.disassemble(util.safe_decode(self.code))
def get_easm(self):
return asm.disassembly_to_easm(asm.disassemble(util.safe_decode(self.code)))
def matches_expression(self, expression):
easm_code = self.get_easm()
str_eval = ''
matches = re.findall(r'func:([a-zA-Z0-9\s,(\[\]]+?\))', expression)
for m in matches:
# Calculate function signature hashes
sign_hash = utils.sha3(m)[:4].hex()
expression = expression.replace(m, sign_hash)
tokens = re.split("( and | or )", expression, re.IGNORECASE)
for token in tokens:
if token == " and " or token == " or ":
str_eval += token
continue
m = re.match(r'^code:([a-zA-Z0-9\s,\[\]]+)', token)
if (m):
code = m.group(1).replace(",", "\\n")
str_eval += "\"" + code + "\" in easm_code"
continue
m = re.match(r'^func:([a-zA-Z0-9\s,()\[\]]+)$', token)
if (m):
str_eval += "\"" + m.group(1) + "\" in easm_code"
continue
return eval(str_eval)
class InstanceList(persistent.Persistent):
def __init__(self):
self.addresses = []
self.balances = []
pass
def add(self, address, balance = 0):
self.addresses.append(address)
self.balances.append(balance)
self._p_changed = True
| from ether import asm, util
import re
import persistent
from ethereum import utils
class ETHContract(persistent.Persistent):
def __init__(self, code = ""):
self.code = code
def get_xrefs(self):
disassembly = asm.disassemble(util.safe_decode(self.code))
xrefs = []
for instruction in disassembly:
if instruction['opcode'] == "PUSH20":
if instruction['argument']:
xref = instruction['argument'].decode("utf-8")
if xref not in xrefs:
xrefs.append(xref)
return xrefs
def get_disassembly(self):
return asm.disassemble(util.safe_decode(self.code))
def get_easm(self):
return asm.disassembly_to_easm(asm.disassemble(util.safe_decode(self.code)))
def matches_expression(self, expression):
easm_code = self.get_easm()
str_eval = ''
matches = re.findall(r'func:([a-zA-Z0-9\s,(\[\]]+?\))', expression)
for m in matches:
# Calculate function signature hashes
sign_hash = utils.sha3(m)[:4].hex()
expression = expression.replace(m, sign_hash)
tokens = re.split("( and | or )", expression, re.IGNORECASE)
for token in tokens:
if token == " and " or token == " or ":
str_eval += token
continue
m = re.match(r'^code:([a-zA-Z0-9\s,\[\]]+)', token)
if (m):
code = m.group(1).replace(",", "\\n")
str_eval += "\"" + code + "\" in easm_code"
continue
m = re.match(r'^func:([a-zA-Z0-9\s,()\[\]]+)$', token)
if (m):
str_eval += "\"" + m.group(1) + "\" in easm_code"
print(str_eval)
continue
return eval(str_eval)
class InstanceList(persistent.Persistent):
def __init__(self):
self.addresses = []
self.balances = []
pass
def add(self, address, balance = 0):
self.addresses.append(address)
self.balances.append(balance)
self._p_changed = True
| Python | 0.000739 |
d1490510cd5f66a5da699ffea39b6a8b37c88f01 | add test for composite potential | gary/potential/tests/test_io.py | gary/potential/tests/test_io.py | # coding: utf-8
""" test reading/writing potentials to files """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import numpy as np
# Project
from ..io import read, write
from ..builtin import IsochronePotential
from ..custom import PW14Potential
from ...units import galactic
# TODO: config item to specify path to test data?
test_data_path = os.path.abspath(os.path.join(os.path.split(__file__)[0],
"../../../test-data/"))
def test_read():
f1 = os.path.join(test_data_path, 'potential', 'isochrone.yml')
potential = read(f1)
assert np.allclose(potential.parameters['m'], 1E11)
assert np.allclose(potential.parameters['b'], 0.76)
f2 = os.path.join(test_data_path, 'potential', 'pw14.yml')
potential = read(f2)
f3 = os.path.join(test_data_path, 'potential', 'pw14_2.yml')
potential = read(f3)
def test_write():
tmp_filename = "/tmp/potential.yml"
# try a simple potential
potential = IsochronePotential(m=1E11, b=0.76, units=galactic)
with open(tmp_filename,'w') as f:
write(potential, f)
write(potential, tmp_filename)
# more complex
potential = PW14Potential()
with open(tmp_filename,'w') as f:
write(potential, f)
write(potential, tmp_filename)
| # coding: utf-8
""" test reading/writing potentials to files """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import numpy as np
# Project
from ..io import read, write
from ..builtin import IsochronePotential
from ...units import galactic
# TODO: config item to specify path to test data?
test_data_path = os.path.abspath(os.path.join(os.path.split(__file__)[0],
"../../../test-data/"))
def test_read():
f1 = os.path.join(test_data_path, 'potential', 'isochrone.yml')
potential = read(f1)
assert np.allclose(potential.parameters['m'], 1E11)
assert np.allclose(potential.parameters['b'], 0.76)
f2 = os.path.join(test_data_path, 'potential', 'pw14.yml')
potential = read(f2)
def test_write():
tmp_filename = "/tmp/potential.yml"
# try a simple potential
potential = IsochronePotential(m=1E11, b=0.76, units=galactic)
with open(tmp_filename,'w') as f:
write(potential, f)
write(potential, tmp_filename)
| Python | 0 |
11499596e27e8f4c792cf5e36ea5a1c8f6d6053f | Improve logging (learning) | evilminions/hydra.py | evilminions/hydra.py | '''Replicates the behavior of a minion many times'''
import logging
import tornado.gen
import zmq
import salt.config
import salt.loader
import salt.payload
from evilminions.hydrahead import HydraHead
from evilminions.utils import fun_call_id
# HACK: turn the trace function into a no-op
# this almost doubles evil-minion's performance
salt.log.mixins.LoggingTraceMixIn.trace = lambda self, msg, *args, **kwargs: None
class Hydra(object):
'''Spawns HydraHeads, listens for messages coming from the Vampire.'''
def __init__(self, hydra_number):
self.hydra_number = hydra_number
self.current_reactions = []
self.reactions = {}
self.last_time = None
self.serial = salt.payload.Serial({})
self.log = None
def start(self, hydra_count, chunk, prefix, offset,
ramp_up_delay, slowdown_factor, keysize, semaphore):
'''Per-process entry point (one per Hydra)'''
# set up logging
self.log = logging.getLogger(__name__)
self.log.debug("Starting Hydra on: %s", chunk)
# set up the IO loop
zmq.eventloop.ioloop.install()
io_loop = zmq.eventloop.ioloop.ZMQIOLoop.current()
# set up ZeroMQ connection to the Proxy
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('ipc:///tmp/evil-minions-pub.ipc')
socket.setsockopt(zmq.SUBSCRIBE, "")
stream = zmq.eventloop.zmqstream.ZMQStream(socket, io_loop)
stream.on_recv(self.update_reactions)
# Load original settings and grains
opts = salt.config.minion_config('/etc/salt/minion')
grains = salt.loader.grains(opts)
# set up heads!
first_head_number = chunk[0] if chunk else 0
delays = [ramp_up_delay * ((head_number - first_head_number) * hydra_count + self.hydra_number)
for head_number in chunk]
offset_head_numbers = [head_number + offset for head_number in chunk]
heads = [HydraHead('{}-{}'.format(prefix, offset_head_numbers[i]), io_loop, keysize, opts, grains,
delays[i], slowdown_factor, self.reactions) for i in range(len(chunk))]
# start heads!
for head in heads:
io_loop.spawn_callback(head.start)
semaphore.release()
io_loop.start()
@tornado.gen.coroutine
def update_reactions(self, packed_events):
'''Called whenever a message from Vampire is received.
Updates the internal self.reactions hash to contain reactions that will be mimicked'''
for packed_event in packed_events:
event = self.serial.loads(packed_event)
load = event['load']
socket = event['header']['socket']
current_time = event['header']['time']
self.last_time = self.last_time or current_time
if socket == 'PUB' and self.reactions == {}:
self.reactions[fun_call_id(None, None)] = [self.current_reactions]
self.current_reactions = []
self.last_time = current_time
if socket == 'REQ':
if load['cmd'] == '_auth':
continue
self.current_reactions.append(event)
if load['cmd'] == '_return':
call_id = fun_call_id(load['fun'], load['fun_args'])
self.reactions[call_id] = (self.reactions.get(call_id) or []) + [self.current_reactions]
self.log.debug("Hydra #{} learned reaction list #{} ({} reactions) for call: {}".format(
self.hydra_number,
len(self.reactions[call_id]),
len(self.current_reactions),
call_id))
for reaction in self.current_reactions:
load = reaction['load']
cmd = load['cmd']
path = "path={}".format(load['path']) if 'path' in load else ''
self.log.debug(" - {}({})".format(cmd, path))
self.current_reactions = []
event['header']['duration'] = current_time - self.last_time
self.last_time = current_time
| '''Replicates the behavior of a minion many times'''
import logging
import tornado.gen
import zmq
import salt.config
import salt.loader
import salt.payload
from evilminions.hydrahead import HydraHead
from evilminions.utils import fun_call_id
# HACK: turn the trace function into a no-op
# this almost doubles evil-minion's performance
salt.log.mixins.LoggingTraceMixIn.trace = lambda self, msg, *args, **kwargs: None
class Hydra(object):
'''Spawns HydraHeads, listens for messages coming from the Vampire.'''
def __init__(self, hydra_number):
self.hydra_number = hydra_number
self.current_reactions = []
self.reactions = {}
self.last_time = None
self.serial = salt.payload.Serial({})
self.log = None
def start(self, hydra_count, chunk, prefix, offset,
ramp_up_delay, slowdown_factor, keysize, semaphore):
'''Per-process entry point (one per Hydra)'''
# set up logging
self.log = logging.getLogger(__name__)
self.log.debug("Starting Hydra on: %s", chunk)
# set up the IO loop
zmq.eventloop.ioloop.install()
io_loop = zmq.eventloop.ioloop.ZMQIOLoop.current()
# set up ZeroMQ connection to the Proxy
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('ipc:///tmp/evil-minions-pub.ipc')
socket.setsockopt(zmq.SUBSCRIBE, "")
stream = zmq.eventloop.zmqstream.ZMQStream(socket, io_loop)
stream.on_recv(self.update_reactions)
# Load original settings and grains
opts = salt.config.minion_config('/etc/salt/minion')
grains = salt.loader.grains(opts)
# set up heads!
first_head_number = chunk[0] if chunk else 0
delays = [ramp_up_delay * ((head_number - first_head_number) * hydra_count + self.hydra_number)
for head_number in chunk]
offset_head_numbers = [head_number + offset for head_number in chunk]
heads = [HydraHead('{}-{}'.format(prefix, offset_head_numbers[i]), io_loop, keysize, opts, grains,
delays[i], slowdown_factor, self.reactions) for i in range(len(chunk))]
# start heads!
for head in heads:
io_loop.spawn_callback(head.start)
semaphore.release()
io_loop.start()
@tornado.gen.coroutine
def update_reactions(self, packed_events):
'''Called whenever a message from Vampire is received.
Updates the internal self.reactions hash to contain reactions that will be mimicked'''
for packed_event in packed_events:
event = self.serial.loads(packed_event)
load = event['load']
socket = event['header']['socket']
current_time = event['header']['time']
self.last_time = self.last_time or current_time
if socket == 'PUB' and self.reactions == {}:
self.reactions[fun_call_id(None, None)] = [self.current_reactions]
self.current_reactions = []
self.last_time = current_time
if socket == 'REQ':
if load['cmd'] == '_auth':
continue
self.current_reactions.append(event)
if load['cmd'] == '_return':
call_id = fun_call_id(load['fun'], load['fun_args'])
self.reactions[call_id] = (self.reactions.get(call_id) or []) + [self.current_reactions]
self.log.debug("Hydra #{} learned reaction #{} for call: {}".format(self.hydra_number,
len(self.reactions[call_id]),
call_id))
self.current_reactions = []
event['header']['duration'] = current_time - self.last_time
self.last_time = current_time
| Python | 0 |
42b0c14d1e34dd88a92f22eda9d87dd104ea61f0 | tweak demo | examples/meanfield.py | examples/meanfield.py | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from pyhsmm.basic.distributions import Regression, Gaussian, PoissonDuration
from autoregressive.distributions import AutoRegression
from pyhsmm.util.text import progprint_xrange
from pyslds.models import HMMSLDS
np.random.seed(0)
###################
# generate data #
###################
import autoregressive
As = [np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for alpha, theta in ((0.95,0.1), (0.95,-0.1), (1., 0.))]
truemodel = autoregressive.models.ARHSMM(
alpha=4.,init_state_concentration=4.,
obs_distns=[AutoRegression(A=A,sigma=0.05*np.eye(2)) for A in As],
dur_distns=[PoissonDuration(alpha_0=3*50,beta_0=3) for _ in As])
truemodel.prefix = np.array([[0.,3.]])
data, labels = truemodel.generate(1000)
data = data[truemodel.nlags:]
plt.figure()
plt.plot(data[:,0],data[:,1],'bx-')
#################
# build model #
#################
Nmax = 10
P = 2
D = data.shape[1]
dynamics_distns = [
AutoRegression(
A=np.eye(P),sigma=np.eye(P),
nu_0=3,S_0=3.*np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
emission_distns = [
Regression(
A=np.eye(D),sigma=0.05*np.eye(D),
nu_0=5.,S_0=np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
init_dynamics_distns = [
Gaussian(nu_0=4,sigma_0=4.*np.eye(P),mu_0=np.zeros(P),kappa_0=0.1)
for _ in xrange(Nmax)]
model = HMMSLDS(
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_dynamics_distns=init_dynamics_distns,
alpha=3.,init_state_distn='uniform')
model.add_data(data)
model.resample_states()
for _ in progprint_xrange(10):
model.resample_model()
model.states_list[0]._init_mf_from_gibbs()
####################
# run mean field #
####################
# plt.figure()
# plt.plot([model.meanfield_coordinate_descent_step() for _ in progprint_xrange(50)])
for _ in progprint_xrange(50):
model.meanfield_coordinate_descent_step(compute_vlb=False)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9,3))
gs = gridspec.GridSpec(7,1)
ax1 = fig.add_subplot(gs[:-2])
ax2 = fig.add_subplot(gs[-2], sharex=ax1)
ax3 = fig.add_subplot(gs[-1], sharex=ax1)
im = ax1.matshow(model.states_list[0].expected_states.T, aspect='auto')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.matshow(model.states_list[0].expected_states.argmax(1)[None,:], aspect='auto')
ax2.set_xticks([])
ax2.set_yticks([])
ax3.matshow(labels[None,:], aspect='auto')
ax3.set_xticks([])
ax3.set_yticks([])
plt.show()
| from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from pyhsmm.basic.distributions import Regression, Gaussian, PoissonDuration
from autoregressive.distributions import AutoRegression
from pyhsmm.util.text import progprint_xrange
from pyslds.models import HMMSLDS
np.random.seed(0)
###################
# generate data #
###################
import autoregressive
As = [np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for alpha, theta in ((0.95,0.1), (0.95,-0.1), (1., 0.))]
truemodel = autoregressive.models.ARHSMM(
alpha=4.,init_state_concentration=4.,
obs_distns=[AutoRegression(A=A,sigma=0.05*np.eye(2)) for A in As],
dur_distns=[PoissonDuration(alpha_0=5*50,beta_0=5) for _ in As])
truemodel.prefix = np.array([[0.,3.]])
data, labels = truemodel.generate(1000)
data = data[truemodel.nlags:]
plt.figure()
plt.plot(data[:,0],data[:,1],'bx-')
#################
# build model #
#################
Nmax = 10
P = 2
D = data.shape[1]
dynamics_distns = [
AutoRegression(
A=np.eye(P),sigma=np.eye(P),
nu_0=3,S_0=3.*np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
emission_distns = [
Regression(
A=np.eye(D),sigma=0.05*np.eye(D),
nu_0=5.,S_0=np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
init_dynamics_distns = [
Gaussian(nu_0=4,sigma_0=4.*np.eye(P),mu_0=np.zeros(P),kappa_0=0.1)
for _ in xrange(Nmax)]
model = HMMSLDS(
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_dynamics_distns=init_dynamics_distns,
alpha=3.,init_state_distn='uniform')
model.add_data(data)
model.resample_states()
for _ in progprint_xrange(10):
model.resample_model()
model.states_list[0]._init_mf_from_gibbs()
####################
# run mean field #
####################
# plt.figure()
# vlbs = [model.meanfield_coordinate_descent_step() for _ in progprint_xrange(50)]
# plt.plot(vlbs)
for _ in progprint_xrange(50):
model.meanfield_coordinate_descent_step(compute_vlb=False)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9,3))
gs = gridspec.GridSpec(7,1)
ax1 = fig.add_subplot(gs[:-2])
ax2 = fig.add_subplot(gs[-2], sharex=ax1)
ax3 = fig.add_subplot(gs[-1], sharex=ax1)
im = ax1.matshow(model.states_list[0].expected_states.T, aspect='auto')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.matshow(model.states_list[0].expected_states.argmax(1)[None,:], aspect='auto')
ax2.set_xticks([])
ax2.set_yticks([])
ax3.matshow(labels[None,:], aspect='auto')
ax3.set_xticks([])
ax3.set_yticks([])
plt.show()
| Python | 0.000001 |
db69d08b61f83703fc40ef7273cba1b2e0b825a3 | stop checking if an entry already exists when polling | feeds/tasks.py | feeds/tasks.py | import time
import feedparser
from celery import task
from feeds.models import Feed, Entry
from django.core.exceptions import ObjectDoesNotExist
from profiles.models import UserProfile, UserEntryDetail
def poll_feed(feed):
parser = feedparser.parse(feed.link)
# Add entries from feed
entries = parser.entries
for entry in entries:
published = time.strftime('%Y-%m-%d %H:%M', entry.published_parsed)
entry_obj, _ = Entry.objects.get_or_create(feed=feed,
title=entry.title,
link=entry.link,
published=published)
subscribers = UserProfile.objects.filter(feeds=feed)
for profile in subscribers:
if not UserEntryDetail.objects.filter(entry=entry_obj,
profile=profile)\
.exists():
UserEntryDetail(entry=entry_obj,
profile=profile,
read=False).save()
@task
def poll_all_feeds():
feeds = Feed.objects.all()
for feed in feeds:
poll_feed(feed)
| import time
import feedparser
from celery import task
from feeds.models import Feed, Entry
from django.core.exceptions import ObjectDoesNotExist
from profiles.models import UserProfile, UserEntryDetail
def poll_feed(feed):
parser = feedparser.parse(feed.link)
# Add entries from feed
entries = parser.entries
for entry in entries:
try:
Entry.objects.get(link=entry.link)
except ObjectDoesNotExist:
pass
else:
continue
published = time.strftime('%Y-%m-%d %H:%M', entry.published_parsed)
entry_obj, _ = Entry.objects.get_or_create(feed=feed,
title=entry.title,
link=entry.link,
published=published)
subscribers = UserProfile.objects.filter(feeds=feed)
for profile in subscribers:
if not UserEntryDetail.objects.filter(entry=entry_obj,
profile=profile)\
.exists():
UserEntryDetail(entry=entry_obj,
profile=profile,
read=False).save()
@task
def poll_all_feeds():
feeds = Feed.objects.all()
for feed in feeds:
poll_feed(feed)
| Python | 0 |
81806fe89b9c4a364d373020bd56ee1a396a5858 | Add automatic escaping of the settings docstring for Windows' sake in py3k | evennia/game_template/server/conf/settings.py | evennia/game_template/server/conf/settings.py | r"""
Evennia settings file.
The available options are found in the default settings file found
here:
{settings_default}
Remember:
Don't copy more from the default file than you actually intend to
change; this will make sure that you don't overload upstream updates
unnecessarily.
When changing a setting requiring a file system path (like
path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference
your game folder and the Evennia library folders respectively. Python
paths (path.to.module) should be given relative to the game's root
folder (typeclasses.foo) whereas paths within the Evennia library
needs to be given explicitly (evennia.foo).
If you want to share your game dir, including its settings, you can
put secret game- or server-specific settings in secret_settings.py.
"""
# Use the defaults from Evennia unless explicitly overridden
from evennia.settings_default import *
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = {servername}
# Server ports. If enabled and marked as "visible", the port
# should be visible to the outside world on a production server.
# Note that there are many more options available beyond these.
# Telnet ports. Visible.
TELNET_ENABLED = True
TELNET_PORTS = [4000]
# (proxy, internal). Only proxy should be visible.
WEBSERVER_ENABLED = True
WEBSERVER_PORTS = [(4001, 4002)]
# Telnet+SSL ports, for supporting clients. Visible.
SSL_ENABLED = False
SSL_PORTS = [4003]
# SSH client ports. Requires crypto lib. Visible.
SSH_ENABLED = False
SSH_PORTS = [4004]
# Websocket-client port. Visible.
WEBSOCKET_CLIENT_ENABLED = True
WEBSOCKET_CLIENT_PORT = 4005
# Internal Server-Portal port. Not visible.
AMP_PORT = 4006
######################################################################
# Settings given in secret_settings.py override those in this file.
######################################################################
try:
from server.conf.secret_settings import *
except ImportError:
print("secret_settings.py file not found or failed to import.")
| """
Evennia settings file.
The available options are found in the default settings file found
here:
{settings_default}
Remember:
Don't copy more from the default file than you actually intend to
change; this will make sure that you don't overload upstream updates
unnecessarily.
When changing a setting requiring a file system path (like
path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference
your game folder and the Evennia library folders respectively. Python
paths (path.to.module) should be given relative to the game's root
folder (typeclasses.foo) whereas paths within the Evennia library
needs to be given explicitly (evennia.foo).
If you want to share your game dir, including its settings, you can
put secret game- or server-specific settings in secret_settings.py.
"""
# Use the defaults from Evennia unless explicitly overridden
from evennia.settings_default import *
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = {servername}
# Server ports. If enabled and marked as "visible", the port
# should be visible to the outside world on a production server.
# Note that there are many more options available beyond these.
# Telnet ports. Visible.
TELNET_ENABLED = True
TELNET_PORTS = [4000]
# (proxy, internal). Only proxy should be visible.
WEBSERVER_ENABLED = True
WEBSERVER_PORTS = [(4001, 4002)]
# Telnet+SSL ports, for supporting clients. Visible.
SSL_ENABLED = False
SSL_PORTS = [4003]
# SSH client ports. Requires crypto lib. Visible.
SSH_ENABLED = False
SSH_PORTS = [4004]
# Websocket-client port. Visible.
WEBSOCKET_CLIENT_ENABLED = True
WEBSOCKET_CLIENT_PORT = 4005
# Internal Server-Portal port. Not visible.
AMP_PORT = 4006
######################################################################
# Settings given in secret_settings.py override those in this file.
######################################################################
try:
from server.conf.secret_settings import *
except ImportError:
print("secret_settings.py file not found or failed to import.")
| Python | 0 |
a85f6f86522dbb984818defc0d5c3cee049f1341 | add simple async post support | eventbus/eventbus.py | eventbus/eventbus.py | __author__ = 'Xsank'
import inspect
from multiprocessing.pool import ThreadPool
from listener import Listener
from exception import RegisterError
from exception import UnregisterError
class EventBus(object):
def __init__(self,pool_size=4):
self.listeners=dict()
self.pool=ThreadPool(pool_size)
def register(self,listener):
if not isinstance(listener,Listener):
raise RegisterError
self.listeners[listener.__class__.__name__]=listener
def unregister(self,listener):
try:
self.listeners.pop(listener.__class__.__name__)
except Exception:
raise UnregisterError
def post(self,event):
for listener in self.listeners.values():
for name,func in inspect.getmembers(listener,predicate=inspect.ismethod):
func(event)
def async_post(self,event):
self.pool.map(self.post,(event,))
def destroy(self):
self.listeners.clear()
self.pool.close() | __author__ = 'Xsank'
import inspect
from listener import Listener
from exception import RegisterError
from exception import UnregisterError
class EventBus(object):
def __init__(self):
self.listeners=dict()
def register(self,listener):
if not isinstance(listener,Listener):
raise RegisterError
self.listeners[listener.__class__.__name__]=listener
def unregister(self,listener):
try:
self.listeners.pop(listener.__class__.__name__)
except Exception:
raise UnregisterError
def post(self,event):
for listener in self.listeners.values():
for name,func in inspect.getmembers(listener,predicate=inspect.ismethod):
func(event)
def destroy(self):
self.listeners.clear() | Python | 0 |
30e00089247b314e82bc7792ac6f9641cd632bbd | Bump to dev. | eventlet/__init__.py | eventlet/__init__.py | version_info = (0, 9, 16, "dev")
__version__ = ".".join(map(str, version_info))
try:
from eventlet import greenthread
from eventlet import greenpool
from eventlet import queue
from eventlet import timeout
from eventlet import patcher
from eventlet import convenience
import greenlet
sleep = greenthread.sleep
spawn = greenthread.spawn
spawn_n = greenthread.spawn_n
spawn_after = greenthread.spawn_after
kill = greenthread.kill
Timeout = timeout.Timeout
with_timeout = timeout.with_timeout
GreenPool = greenpool.GreenPool
GreenPile = greenpool.GreenPile
Queue = queue.Queue
import_patched = patcher.import_patched
monkey_patch = patcher.monkey_patch
connect = convenience.connect
listen = convenience.listen
serve = convenience.serve
StopServe = convenience.StopServe
wrap_ssl = convenience.wrap_ssl
getcurrent = greenlet.greenlet.getcurrent
# deprecated
TimeoutError = timeout.Timeout
exc_after = greenthread.exc_after
call_after_global = greenthread.call_after_global
except ImportError, e:
# This is to make Debian packaging easier, it ignores import
# errors of greenlet so that the packager can still at least
# access the version. Also this makes easy_install a little quieter
if 'greenlet' not in str(e):
# any other exception should be printed
import traceback
traceback.print_exc()
| version_info = (0, 9, 15)
__version__ = ".".join(map(str, version_info))
try:
from eventlet import greenthread
from eventlet import greenpool
from eventlet import queue
from eventlet import timeout
from eventlet import patcher
from eventlet import convenience
import greenlet
sleep = greenthread.sleep
spawn = greenthread.spawn
spawn_n = greenthread.spawn_n
spawn_after = greenthread.spawn_after
kill = greenthread.kill
Timeout = timeout.Timeout
with_timeout = timeout.with_timeout
GreenPool = greenpool.GreenPool
GreenPile = greenpool.GreenPile
Queue = queue.Queue
import_patched = patcher.import_patched
monkey_patch = patcher.monkey_patch
connect = convenience.connect
listen = convenience.listen
serve = convenience.serve
StopServe = convenience.StopServe
wrap_ssl = convenience.wrap_ssl
getcurrent = greenlet.greenlet.getcurrent
# deprecated
TimeoutError = timeout.Timeout
exc_after = greenthread.exc_after
call_after_global = greenthread.call_after_global
except ImportError, e:
# This is to make Debian packaging easier, it ignores import
# errors of greenlet so that the packager can still at least
# access the version. Also this makes easy_install a little quieter
if 'greenlet' not in str(e):
# any other exception should be printed
import traceback
traceback.print_exc()
| Python | 0 |
22f373c0e6ef3a2c7bfb128ad014af245f113ea9 | Add default serial baudrate. | robus/io/serial_io.py | robus/io/serial_io.py | import serial as _serial
from serial.tools.list_ports import comports
from . import IOHandler
class Serial(IOHandler):
@classmethod
def is_host_compatible(cls, host):
available_host = (p.device for p in comports())
return host in available_host
def __init__(self, host, baudrate=57600):
self._serial = _serial.Serial(host, baudrate)
def recv(self):
return self._serial.readline()
def write(self, data):
self._serial.write(data + '\r'.encode())
| import serial as _serial
from serial.tools.list_ports import comports
from . import IOHandler
class Serial(IOHandler):
@classmethod
def is_host_compatible(cls, host):
available_host = (p.device for p in comports())
return host in available_host
def __init__(self, host, baudrate):
self._serial = _serial.Serial(host, baudrate)
def recv(self):
return self._serial.readline()
def write(self, data):
self._serial.write(data + '\r'.encode())
| Python | 0 |
e0024790be3a85e529c93397500e4f736c82a5ef | Fix the runner for Python 2. | src/calmjs/parse/tests/test_testing.py | src/calmjs/parse/tests/test_testing.py | # -*- coding: utf-8 -*-
import unittest
from calmjs.parse.testing.util import build_equality_testcase
from calmjs.parse.testing.util import build_exception_testcase
def run(self):
"""
A dummy run method.
"""
class BuilderEqualityTestCase(unittest.TestCase):
def test_build_equality_testcase(self):
DummyTestCase = build_equality_testcase('DummyTestCase', int, [
('str_to_int_pass', '1', 1),
('str_to_int_fail', '2', 1),
('str_to_int_exception', 'z', 1),
])
DummyTestCase.runTest = run
testcase = DummyTestCase()
testcase.test_str_to_int_pass()
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail()
with self.assertRaises(ValueError):
testcase.test_str_to_int_exception()
def test_build_equality_testcase_flag_dupe_labels(self):
with self.assertRaises(ValueError):
build_equality_testcase('DummyTestCase', int, [
('str_to_int_dupe', '1', 1),
('str_to_int_dupe', '2', 2),
])
class BuilderExceptionTestCase(unittest.TestCase):
def test_build_exception_testcase(self):
FailTestCase = build_exception_testcase(
'FailTestCase', int, [
('str_to_int_fail1', 'hello'),
('str_to_int_fail2', 'goodbye'),
('str_to_int_fail3', '1'),
],
ValueError,
)
FailTestCase.runTest = run
testcase = FailTestCase()
# ValueError should have been caught.
testcase.test_str_to_int_fail1()
testcase.test_str_to_int_fail2()
# Naturally, the final test will not raise it.
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail3()
| # -*- coding: utf-8 -*-
import unittest
from calmjs.parse.testing.util import build_equality_testcase
from calmjs.parse.testing.util import build_exception_testcase
class BuilderEqualityTestCase(unittest.TestCase):
def test_build_equality_testcase(self):
DummyTestCase = build_equality_testcase('DummyTestCase', int, [
('str_to_int_pass', '1', 1),
('str_to_int_fail', '2', 1),
('str_to_int_exception', 'z', 1),
])
testcase = DummyTestCase()
testcase.test_str_to_int_pass()
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail()
with self.assertRaises(ValueError):
testcase.test_str_to_int_exception()
def test_build_equality_testcase_flag_dupe_labels(self):
with self.assertRaises(ValueError):
build_equality_testcase('DummyTestCase', int, [
('str_to_int_dupe', '1', 1),
('str_to_int_dupe', '2', 2),
])
class BuilderExceptionTestCase(unittest.TestCase):
def test_build_exception_testcase(self):
FailTestCase = build_exception_testcase(
'FailTestCase', int, [
('str_to_int_fail1', 'hello'),
('str_to_int_fail2', 'goodbye'),
('str_to_int_fail3', '1'),
],
ValueError,
)
testcase = FailTestCase()
# ValueError should have been caught.
testcase.test_str_to_int_fail1()
testcase.test_str_to_int_fail2()
# Naturally, the final test will not raise it.
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail3()
| Python | 0.001551 |
d926c984e895b68ad0cc0383926451c0d7249512 | Fix use of deprecated find_module | astropy/tests/tests/test_imports.py | astropy/tests/tests/test_imports.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.',
onerror=onerror):
imper.find_spec(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.',
onerror=onerror):
imper.find_module(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
| Python | 0.000002 |
8864d0f7ea909bebda68b28ad53a312c10af581e | Update ClientCredentials tutorial to use Python3 | oauth-client-credentials/python/script.py | oauth-client-credentials/python/script.py | import base64, requests, sys
print("-----------------------------------------------")
print("- PureCloud Python Client Credentials Example -")
print("-----------------------------------------------")
client_id = "CLIENT_ID"
client_secret = "CLIENT_SECRET"
# Base64 encode the client ID and client secret
authorization = base64.b64encode(bytes(client_id + ":" + client_secret, "ISO-8859-1")).decode("ascii")
# Prepare for POST /oauth/token request
request_headers = {
"Authorization": f"Basic {authorization}",
"Content-Type": "application/x-www-form-urlencoded"
}
request_body = {
"grant_type": "client_credentials"
}
# Get token
response = requests.post("https://login.mypurecloud.com/oauth/token", data=request_body, headers=request_headers)
# Check response
if response.status_code == 200:
print("Got token")
else:
print(f"Failure: { str(response.status_code) } - { response.reason }")
sys.exit(response.status_code)
# Get JSON response body
response_json = response.json()
# Prepare for GET /api/v2/authorization/roles request
requestHeaders = {
"Authorization": f"{ response_json['token_type'] } { response_json['access_token']}"
}
# Get roles
response = requests.get("https://api.mypurecloud.com/api/v2/authorization/roles", headers=requestHeaders)
# Check response
if response.status_code == 200:
print("Got roles")
else:
print(f"Failure: { str(response.status_code) } - { response.reason }")
sys.exit(response.status_code)
# Print roles
print("\nRoles:")
for entity in response.json()["entities"]:
print(f" { entity['name'] }")
print("\nDone")
| import base64, requests, sys
print '-----------------------------------------------'
print '- PureCloud Python Client Credentials Example -'
print '-----------------------------------------------'
clientId = '7de3af06-c0b3-4f9b-af45-72f4a1403797'
clientSecret = '1duphi_YtswNjN2GXOg_APY-KKTmnYXvfNj7N8GUhnM'
# Base64 encode the client ID and client secret
authorization = base64.b64encode(clientId + ':' + clientSecret)
# Prepare for POST /oauth/token request
requestHeaders = {
'Authorization': 'Basic ' + authorization,
'Content-Type': 'application/x-www-form-urlencoded'
}
requestBody = {
'grant_type': 'client_credentials'
}
# Get token
response = requests.post('https://login.mypurecloud.com/oauth/token', data=requestBody, headers=requestHeaders)
# Check response
if response.status_code == 200:
print 'Got token'
else:
print 'Failure: ' + str(response.status_code) + ' - ' + response.reason
sys.exit(response.status_code)
# Get JSON response body
responseJson = response.json()
# Prepare for GET /api/v2/authorization/roles request
requestHeaders = {
'Authorization': responseJson['token_type'] + ' ' + responseJson['access_token']
}
# Get roles
response = requests.get('https://api.mypurecloud.com/api/v2/authorization/roles', headers=requestHeaders)
# Check response
if response.status_code == 200:
print 'Got roles'
else:
print 'Failure: ' + str(response.status_code) + ' - ' + response.reason
sys.exit(response.status_code)
# Print roles
print '\nRoles:'
for entity in response.json()['entities']:
print ' ' + entity['name']
print '\nDone' | Python | 0 |
c479c360d979d22182e787f74f5a74473fc41002 | Save sales ranges only when the forms data is changed | shoop/campaigns/admin_module/form_parts.py | shoop/campaigns/admin_module/form_parts.py | # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from shoop.admin.form_part import FormPart, TemplatedFormDef
from shoop.campaigns.models import ContactGroupSalesRange
from shoop.core.models import Shop, ShopStatus
from shoop.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS
class SalesRangesForm(forms.ModelForm):
class Meta:
model = ContactGroupSalesRange
fields = ["min_value", "max_value"]
labels = {
"min_value": _("Minimum value"),
"max_value": _("Maximum value")
}
help_texts = {
"max_value": _("Leave empty for no maximum")
}
def __init__(self, **kwargs):
super(SalesRangesForm, self).__init__(**kwargs)
class SalesRangesFormPart(FormPart):
priority = 3
name = "contact_group_sales_ranges"
form = SalesRangesForm
def __init__(self, request, object=None):
super(SalesRangesFormPart, self).__init__(request, object)
self.shops = Shop.objects.filter(status=ShopStatus.ENABLED)
def _get_form_name(self, shop):
return "%d-%s" % (shop.pk, self.name)
def get_form_defs(self):
if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS:
return
for shop in self.shops:
instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop)
yield TemplatedFormDef(
name=self._get_form_name(shop),
form_class=self.form,
template_name="shoop/campaigns/admin/sales_ranges_form_part.jinja",
required=False,
kwargs={"instance": instance}
)
def form_valid(self, form):
form_names = [self._get_form_name(shop) for shop in self.shops]
forms = [form.forms[name] for name in form_names if name in form.forms]
for form in forms:
if form.changed_data:
form.save()
| # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from shoop.admin.form_part import FormPart, TemplatedFormDef
from shoop.campaigns.models import ContactGroupSalesRange
from shoop.core.models import Shop, ShopStatus
from shoop.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS
class SalesRangesForm(forms.ModelForm):
class Meta:
model = ContactGroupSalesRange
fields = ["min_value", "max_value"]
labels = {
"min_value": _("Minimum value"),
"max_value": _("Maximum value")
}
help_texts = {
"max_value": _("Leave empty for no maximum")
}
def __init__(self, **kwargs):
super(SalesRangesForm, self).__init__(**kwargs)
class SalesRangesFormPart(FormPart):
priority = 3
name = "contact_group_sales_ranges"
form = SalesRangesForm
def __init__(self, request, object=None):
super(SalesRangesFormPart, self).__init__(request, object)
self.shops = Shop.objects.filter(status=ShopStatus.ENABLED)
def _get_form_name(self, shop):
return "%d-%s" % (shop.pk, self.name)
def get_form_defs(self):
if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS:
return
for shop in self.shops:
instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop)
yield TemplatedFormDef(
name=self._get_form_name(shop),
form_class=self.form,
template_name="shoop/campaigns/admin/sales_ranges_form_part.jinja",
required=False,
kwargs={"instance": instance}
)
def form_valid(self, form):
for shop in self.shops:
name = self._get_form_name(shop)
if name in form.forms:
form.forms[name].save()
| Python | 0 |
8b17411d16c22c7a28017f67c827c81e28535f41 | Fix rate_watcher locking error | sacad/rate_watcher.py | sacad/rate_watcher.py | """ This module provides a class with a context manager to help avoid overloading web servers. """
import asyncio
import logging
import os
import random
import sqlite3
import time
import urllib.parse
class AccessRateWatcher:
""" Access rate limiter, supporting concurrent access by threads and/or processes. """
def __init__(self, db_filepath, url, min_delay_between_accesses, *, jitter_range_ms=None, logger=logging.getLogger()):
self.domain = urllib.parse.urlsplit(url).netloc
self.min_delay_between_accesses = min_delay_between_accesses
self.jitter_range_ms = jitter_range_ms
self.logger = logger
os.makedirs(os.path.dirname(db_filepath), exist_ok=True)
self.connection = sqlite3.connect(db_filepath)
with self.connection:
self.connection.executescript("""CREATE TABLE IF NOT EXISTS access_timestamp (domain TEXT PRIMARY KEY,
timestamp FLOAT NOT NULL);""")
self.lock = None
async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
if self.lock is None:
self.lock = asyncio.Lock()
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break
def __getLastAccess(self):
with self.connection:
return self.connection.execute("""SELECT timestamp
FROM access_timestamp
WHERE domain = ?;""",
(self.domain,)).fetchone()
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain))
| """ This module provides a class with a context manager to help avoid overloading web servers. """
import asyncio
import logging
import os
import random
import sqlite3
import time
import urllib.parse
class AccessRateWatcher:
""" Access rate limiter, supporting concurrent access by threads and/or processes. """
def __init__(self, db_filepath, url, min_delay_between_accesses, *, jitter_range_ms=None, logger=logging.getLogger()):
self.domain = urllib.parse.urlsplit(url).netloc
self.min_delay_between_accesses = min_delay_between_accesses
self.jitter_range_ms = jitter_range_ms
self.logger = logger
os.makedirs(os.path.dirname(db_filepath), exist_ok=True)
self.connection = sqlite3.connect(db_filepath)
with self.connection:
self.connection.executescript("""CREATE TABLE IF NOT EXISTS access_timestamp (domain TEXT PRIMARY KEY,
timestamp FLOAT NOT NULL);""")
self.lock = asyncio.Lock()
async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break
def __getLastAccess(self):
with self.connection:
return self.connection.execute("""SELECT timestamp
FROM access_timestamp
WHERE domain = ?;""",
(self.domain,)).fetchone()
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain))
| Python | 0.000001 |
029e39edfb733a524d7ea4fc7f64fa93e81b9f53 | Add SACREBLEU_DIR and smart_open to imports (#73) | sacrebleu/__init__.py | sacrebleu/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = '1.4.5'
__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores'
from .sacrebleu import smart_open, corpus_bleu, corpus_chrf, sentence_bleu, sentence_chrf, compute_bleu,\
raw_corpus_bleu, BLEU, CHRF, DATASETS, TOKENIZERS, SACREBLEU_DIR
# more imports for backward compatibility
from .sacrebleu import ref_stats, bleu_signature, extract_ngrams, extract_char_ngrams, \
get_corpus_statistics, display_metric, get_sentence_statistics, download_test_set
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = '1.4.5'
__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores'
from .sacrebleu import corpus_bleu, corpus_chrf, sentence_bleu, sentence_chrf, compute_bleu,\
raw_corpus_bleu, BLEU, CHRF, DATASETS, TOKENIZERS
# more imports for backward compatibility
from .sacrebleu import ref_stats, bleu_signature, extract_ngrams, extract_char_ngrams, \
get_corpus_statistics, display_metric, get_sentence_statistics, download_test_set
| Python | 0 |
09faf99eb775a36a70d03958a58bf1df8bbb1b93 | update for structure changes | salt/auth/__init__.py | salt/auth/__init__.py | '''
Salt's pluggable authentication system
This sysetm allows for authentication to be managed in a module pluggable way
so that any external authentication system can be used inside of Salt
'''
# 1. Create auth loader instance
# 2. Accept arguments as a dict
# 3. Verify with function introspection
# 4. Execute auth function
# 5. Cache auth token with relative data opts['token_dir']
# 6. Interface to verify tokens
# Import Python libs
import time
import logging
import random
#
# Import Salt libs
import salt.loader
import salt.utils
import salt.payload
log = logging.getLogger(__name__)
class LoadAuth(object):
'''
Wrap the authentication system to handle periphrial components
'''
def __init__(self, opts):
self.opts = opts
self.max_fail = 1.0
self.serial = salt.payload.Serial(opts)
self.auth = salt.loader.auth(opts)
def load_name(self, load):
'''
Return the primary name associate with the load, if an empty string
is returned then the load does not match the function
'''
if not 'eauth' in load:
return ''
fstr = '{0}.auth'.format(load['eauth'])
if not fstr in self.auth:
return ''
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
return fcall['args'][0]
except IndexError:
return ''
def auth_call(self, load):
'''
Return the token and set the cache data for use
'''
if not 'fun' in load:
return False
fstr = '{0}.auth'.format(load['fun'])
if not fstr in self.auth:
return False
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
if 'kwargs' in fcall:
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
else:
return self.auth[fstr](*fcall['args'])
except Exception as exc:
err = 'Authentication module threw an exception: {0}'.format(exc)
log.critical(err)
return False
return False
def time_auth(self, load):
'''
Make sure that all failures happen in the same amount of time
'''
start = time.time()
ret = self.auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_time / 4
r_time = random.uniform(
self.max_time - deviation,
self.max_time + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False
def mk_token(self, load):
'''
Run time_auth and create a token. Return False or the token
'''
ret = time_auth(load)
if ret is False:
return ret
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
while os.path.isfile(t_path):
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
fcall = salt.utils.format_call(self.auth[fstr], load)
tdata = {'start': time.time(),
'expire': time.time() + self.opts['token_expire'],
'name': fcall['args'][0],}
with open(t_path, 'w+') as fp_:
fp_.write(self.serial.dumps(tdata))
return tok
def get_tok(self, tok):
'''
Return the name associate with the token, or False ifthe token is not valid
'''
t_path = os.path.join(opts['token_dir'], tok)
if not os.path.isfile:
return False
with open(t_path, 'r') as fp_:
return self.serial.loads(fp_.read())
return False
| '''
Salt's pluggable authentication system
This sysetm allows for authentication to be managed in a module pluggable way
so that any external authentication system can be used inside of Salt
'''
# 1. Create auth loader instance
# 2. Accept arguments as a dict
# 3. Verify with function introspection
# 4. Execute auth function
# 5. Cache auth token with relative data opts['token_dir']
# 6. Interface to verify tokens
# Import Python libs
import time
import logging
import random
#
# Import Salt libs
import salt.loader
import salt.utils
import salt.payload
log = logging.getLogger(__name__)
class LoadAuth(object):
'''
Wrap the authentication system to handle periphrial components
'''
def __init__(self, opts):
self.opts = opts
self.max_fail = 1.0
self.serial = salt.payload.Serial(opts)
self.auth = salt.loader.auth(opts)
def load_name(self, load):
'''
Return the primary name associate with the load, if an empty string
is returned then the load does not match the function
'''
if not 'fun' in load:
return ''
fstr = '{0}.auth'.format(load['fun'])
if not fstr in self.auth:
return ''
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
return fcall['args'][0]
except IndexError:
return ''
def auth_call(self, load):
'''
Return the token and set the cache data for use
'''
if not 'fun' in load:
return False
fstr = '{0}.auth'.format(load['fun'])
if not fstr in self.auth:
return False
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
if 'kwargs' in fcall:
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
else:
return self.auth[fstr](*fcall['args'])
except Exception as exc:
err = 'Authentication module threw an exception: {0}'.format(exc)
log.critical(err)
return False
return False
def time_auth(self, load):
'''
Make sure that all failures happen in the same amount of time
'''
start = time.time()
ret = self.auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_time / 4
r_time = random.uniform(
self.max_time - deviation,
self.max_time + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False
def mk_token(self, load):
'''
Run time_auth and create a token. Return False or the token
'''
ret = time_auth(load)
if ret is False:
return ret
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
while os.path.isfile(t_path):
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
fcall = salt.utils.format_call(self.auth[fstr], load)
tdata = {'start': time.time(),
'expire': time.time() + self.opts['token_expire'],
'name': fcall['args'][0],}
with open(t_path, 'w+') as fp_:
fp_.write(self.serial.dumps(tdata))
return tok
def get_tok(self, tok):
'''
Return the name associate with the token, or False ifthe token is not valid
'''
t_path = os.path.join(opts['token_dir'], tok)
if not os.path.isfile:
return False
with open(t_path, 'r') as fp_:
return self.serial.loads(fp_.read())
return False
| Python | 0 |
cad9160baa6d3841910ad68b48a212c4475a62e1 | Fix contact list type filter | shuup/admin/modules/contacts/views/list.py | shuup/admin/modules/contacts/views/list.py | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shuup.admin.toolbar import NewActionButton, SettingsActionButton, Toolbar
from shuup.admin.utils.picotable import (
ChoicesFilter, Column, RangeFilter, TextFilter
)
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import (
CompanyContact, Contact, ContactGroup, PersonContact
)
class ContactTypeFilter(ChoicesFilter):
def __init__(self):
super(ContactTypeFilter, self).__init__(choices=[("person", _("Person")), ("company", _("Company"))])
def filter_queryset(self, queryset, column, value, context):
if value == "_all":
return queryset
model_class = PersonContact
if value == "company":
model_class = CompanyContact
return queryset.instance_of(model_class)
class ContactListView(PicotableListView):
model = Contact
default_columns = [
Column("name", _(u"Name"), linked=True, filter_config=TextFilter()),
Column("type", _(u"Type"), display="get_type_display", sortable=False, filter_config=ContactTypeFilter()),
Column("email", _(u"Email"), filter_config=TextFilter()),
Column("phone", _(u"Phone"), filter_config=TextFilter()),
Column(
"is_active",
_(u"Active"),
filter_config=ChoicesFilter([(False, _("no")), (True, _("yes"))], default=True)
),
Column("n_orders", _(u"# Orders"), class_name="text-right", filter_config=RangeFilter(step=1)),
Column("groups", _("Groups"), filter_config=ChoicesFilter(ContactGroup.objects.all(), "groups"))
]
mass_actions = [
"shuup.admin.modules.contacts.mass_actions:EditContactsAction",
"shuup.admin.modules.contacts.mass_actions:EditContactGroupsAction",
]
def get_toolbar(self):
return Toolbar([
NewActionButton.for_model(
PersonContact, url=reverse("shuup_admin:contact.new") + "?type=person"),
NewActionButton.for_model(
CompanyContact, extra_css_class="btn-info", url=reverse("shuup_admin:contact.new") + "?type=company"),
SettingsActionButton.for_model(Contact, return_url="contact")
])
def get_queryset(self):
groups = self.get_filter().get("groups")
query = Q(groups__in=groups) if groups else Q()
return (
super(ContactListView, self).get_queryset()
.filter(query)
.annotate(n_orders=Count("customer_orders"))
.order_by("-created_on"))
def get_type_display(self, instance):
if isinstance(instance, PersonContact):
return _(u"Person")
elif isinstance(instance, CompanyContact):
return _(u"Company")
else:
return _(u"Contact")
def get_object_abstract(self, instance, item):
"""
:type instance: shuup.core.models.Contact
"""
bits = filter(None, [
item.get("type"),
_("Active") if instance.is_active else _("Inactive"),
_("Email: %s") % (instance.email or "\u2014"),
_("Phone: %s") % (instance.phone or "\u2014"),
_("%d orders") % instance.n_orders,
])
return [
{"text": instance.name or _("Contact"), "class": "header"},
{"text": ", ".join([force_text(bit) for bit in bits])}
]
| # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shuup.admin.toolbar import NewActionButton, SettingsActionButton, Toolbar
from shuup.admin.utils.picotable import (
ChoicesFilter, Column, RangeFilter, TextFilter
)
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import (
CompanyContact, Contact, ContactGroup, PersonContact
)
class ContactTypeFilter(ChoicesFilter):
def __init__(self):
super(ContactTypeFilter, self).__init__(choices=[("person", _("Person")), ("company", _("Company"))])
def filter_queryset(self, queryset, column, value):
if value == "_all":
return queryset
model_class = PersonContact
if value == "company":
model_class = CompanyContact
return queryset.instance_of(model_class)
class ContactListView(PicotableListView):
model = Contact
default_columns = [
Column("name", _(u"Name"), linked=True, filter_config=TextFilter()),
Column("type", _(u"Type"), display="get_type_display", sortable=False, filter_config=ContactTypeFilter()),
Column("email", _(u"Email"), filter_config=TextFilter()),
Column("phone", _(u"Phone"), filter_config=TextFilter()),
Column(
"is_active",
_(u"Active"),
filter_config=ChoicesFilter([(False, _("no")), (True, _("yes"))], default=True)
),
Column("n_orders", _(u"# Orders"), class_name="text-right", filter_config=RangeFilter(step=1)),
Column("groups", _("Groups"), filter_config=ChoicesFilter(ContactGroup.objects.all(), "groups"))
]
mass_actions = [
"shuup.admin.modules.contacts.mass_actions:EditContactsAction",
"shuup.admin.modules.contacts.mass_actions:EditContactGroupsAction",
]
def get_toolbar(self):
return Toolbar([
NewActionButton.for_model(
PersonContact, url=reverse("shuup_admin:contact.new") + "?type=person"),
NewActionButton.for_model(
CompanyContact, extra_css_class="btn-info", url=reverse("shuup_admin:contact.new") + "?type=company"),
SettingsActionButton.for_model(Contact, return_url="contact")
])
def get_queryset(self):
groups = self.get_filter().get("groups")
query = Q(groups__in=groups) if groups else Q()
return (
super(ContactListView, self).get_queryset()
.filter(query)
.annotate(n_orders=Count("customer_orders"))
.order_by("-created_on"))
def get_type_display(self, instance):
if isinstance(instance, PersonContact):
return _(u"Person")
elif isinstance(instance, CompanyContact):
return _(u"Company")
else:
return _(u"Contact")
def get_object_abstract(self, instance, item):
"""
:type instance: shuup.core.models.Contact
"""
bits = filter(None, [
item.get("type"),
_("Active") if instance.is_active else _("Inactive"),
_("Email: %s") % (instance.email or "\u2014"),
_("Phone: %s") % (instance.phone or "\u2014"),
_("%d orders") % instance.n_orders,
])
return [
{"text": instance.name or _("Contact"), "class": "header"},
{"text": ", ".join([force_text(bit) for bit in bits])}
]
| Python | 0 |
9d9737f765416305dd2adbd816b447de5c5eae7c | add version 8.0.6.0 (#14003) | var/spack/repos/builtin/packages/ibm-java/package.py | var/spack/repos/builtin/packages/ibm-java/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
import os
class IbmJava(Package):
"""Binary distribution of the IBM Java Software Development Kit
for big and little-endian powerpc (power7, 8 and 9)."""
homepage = "https://developer.ibm.com/javasdk/"
# Note: IBM is fairly aggressive about taking down old versions,
# so we may need to update this frequently. Also, old revs may
# not be available for download.
version_list = [
('8.0.6.0', 'ppc64', 'e142746a83e47ab91d71839d5776f112ed154ae180d0628e3f10886151dad710'),
('8.0.6.0', 'ppc64le', '18c2eccf99225e6e7643141d8da4110cacc39f2fa00149fc26341d2272cc0102'),
('8.0.5.30', 'ppc64', 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'),
('8.0.5.30', 'ppc64le', 'dec6434d926861366c135aac6234fc28b3e7685917015aa3a3089c06c3b3d8f0'),
]
# There are separate tar files for big and little-endian machine
# types. And no, this won't work cross platform.
for (ver, mach, sha) in version_list:
if mach == platform.machine():
version(ver, sha256=sha, expand=False)
provides('java@8')
conflicts('target=x86_64:', msg='ibm-java is only available for ppc64 and ppc64le')
# This assumes version numbers are 4-tuples: 8.0.5.30
def url_for_version(self, version):
# Convert 8.0.5.30 to 8.0-5.30 for the file name.
dash = '{0}.{1}-{2}.{3}'.format(*(str(version).split('.')))
url = ('http://public.dhe.ibm.com/ibmdl/export/pub/systems/cloud'
'/runtimes/java/{0}/linux/{1}/ibm-java-sdk-{2}-{1}'
'-archive.bin').format(version, platform.machine(), dash)
return url
@property
def home(self):
return self.prefix
@property
def libs(self):
return find_libraries(['libjvm'], root=self.home, recursive=True)
def setup_run_environment(self, env):
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('JAVA_HOME', self.home)
def setup_dependent_package(self, module, dependent_spec):
self.spec.home = self.home
def install(self, spec, prefix):
archive = os.path.basename(self.stage.archive_file)
# The archive.bin file is quite fussy and doesn't work as a
# symlink.
if os.path.islink(archive):
targ = os.readlink(archive)
os.unlink(archive)
copy(targ, archive)
# The properties file is how we avoid an interactive install.
prop = 'properties'
with open(prop, 'w') as file:
file.write('INSTALLER_UI=silent\n')
file.write('USER_INSTALL_DIR=%s\n' % prefix)
file.write('LICENSE_ACCEPTED=TRUE\n')
# Running the archive file installs everything.
set_executable(archive)
inst = Executable(join_path('.', archive))
inst('-f', prop)
return
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
import os
class IbmJava(Package):
"""Binary distribution of the IBM Java Software Development Kit
for big and little-endian powerpc (power7, 8 and 9)."""
homepage = "https://developer.ibm.com/javasdk/"
# There are separate tar files for big and little-endian machine
# types. When we add more versions, then turn this into a mapping
# from version and machine type to sha256sum.
mach = platform.machine() if platform.machine() == 'ppc64' else 'ppc64le'
if mach == 'ppc64le':
sha = 'dec6434d926861366c135aac6234fc28b3e7685917015aa3a3089c06c3b3d8f0'
else:
sha = 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'
version('8.0.5.30', sha256=sha, expand=False)
provides('java@8')
conflicts('target=x86_64:', msg='ibm-java is only available for ppc64 and ppc64le')
# This assumes version numbers are 4-tuples: 8.0.5.30
def url_for_version(self, version):
# Convert 8.0.5.30 to 8.0-5.30 for the file name.
dash = '{0}.{1}-{2}.{3}'.format(*(str(version).split('.')))
url = ('http://public.dhe.ibm.com/ibmdl/export/pub/systems/cloud'
'/runtimes/java/{0}/linux/{1}/ibm-java-sdk-{2}-{1}'
'-archive.bin').format(version, self.mach, dash)
return url
@property
def home(self):
return self.prefix
@property
def libs(self):
return find_libraries(['libjvm'], root=self.home, recursive=True)
def setup_run_environment(self, env):
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('JAVA_HOME', self.home)
def setup_dependent_package(self, module, dependent_spec):
self.spec.home = self.home
def install(self, spec, prefix):
archive = os.path.basename(self.stage.archive_file)
# The archive.bin file is quite fussy and doesn't work as a
# symlink.
if os.path.islink(archive):
targ = os.readlink(archive)
os.unlink(archive)
copy(targ, archive)
# The properties file is how we avoid an interactive install.
prop = 'properties'
with open(prop, 'w') as file:
file.write('INSTALLER_UI=silent\n')
file.write('USER_INSTALL_DIR=%s\n' % prefix)
file.write('LICENSE_ACCEPTED=TRUE\n')
# Running the archive file installs everything.
set_executable(archive)
inst = Executable(join_path('.', archive))
inst('-f', prop)
return
| Python | 0 |
9fdcc147a754e3b4f85decb858066610652bd713 | update version. (#5439) | var/spack/repos/builtin/packages/r-tibble/package.py | var/spack/repos/builtin/packages/r-tibble/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTibble(RPackage):
"""Provides a 'tbl_df' class that offers better checking and printing
capabilities than traditional data frames."""
homepage = "https://github.com/tidyverse/tibble"
url = "https://cran.rstudio.com/src/contrib/tibble_1.3.4.tar.gz"
list_url = homepage
version('1.3.4', '298e81546f999fb0968625698511b8d3')
version('1.2', 'bdbc3d67aa16860741add6d6ec20ea13')
version('1.1', '2fe9f806109d0b7fadafb1ffafea4cb8')
depends_on('r@3.1.2:')
depends_on('r-assertthat', type=('build', 'run'))
depends_on('r-lazyeval@0.1.10:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTibble(RPackage):
"""Provides a 'tbl_df' class that offers better checking and printing
capabilities than traditional data frames."""
homepage = "https://github.com/hadley/tibble"
url = "https://cran.r-project.org/src/contrib/tibble_1.2.tar.gz"
version('1.2', 'bdbc3d67aa16860741add6d6ec20ea13')
version('1.1', '2fe9f806109d0b7fadafb1ffafea4cb8')
depends_on('r@3.1.2:')
depends_on('r-assertthat', type=('build', 'run'))
depends_on('r-lazyeval@0.1.10:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
| Python | 0 |
802926f151d9a1337c2a07adbc485b6193e91733 | Add template string calling to the state module | salt/modules/state.py | salt/modules/state.py | '''
Control the state system on the minion
'''
# Import Python modules
import os
# Import salt modules
import salt.state
def low(data):
'''
Execute a single low data call
'''
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
return err
return st_.call(data)
def high(data):
'''
Execute the compound calls stored in a single set of high data
'''
st_ = salt.state.State(__opts__)
return st_.call_high(data)
def template(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template(tem)
def template_str(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template_str(tem)
| '''
Control the state system on the minion
'''
# Import Python modules
import os
# Import salt modules
import salt.state
def low(data):
'''
Execute a single low data call
'''
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
return err
return st_.call(data)
def high(data):
'''
Execute the compound calls stored in a single set of high data
'''
st_ = salt.state.State(__opts__)
return st_.call_high(data)
def template(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template(tem)
| Python | 0.000001 |
83fea631f1765d4641cde8af2c5c931b22e4ee33 | extend trilinos | var/spack/repos/builtin/packages/trilinos/package.py | var/spack/repos/builtin/packages/trilinos/package.py | from spack import *
import os
class Trilinos(Package):
"""
The Trilinos Project is an effort to develop algorithms and enabling technologies within an object-oriented
software framework for the solution of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "http://trilinos.csbsju.edu/download/files/trilinos-12.2.1-Source.tar.gz"
version('12.6.1', 'adcf2d3aab74cdda98f88fee19cd1442604199b0515ee3da4d80cbe8f37d00e4')
version('12.4.2', '7c830f7f0f68b8ad324690603baf404e')
version('12.2.1', '6161926ea247863c690e927687f83be9')
version('12.0.1', 'bd99741d047471e127b8296b2ec08017')
version('11.14.3', '2f4f83f8333e4233c57d0f01c4b57426')
version('11.14.2', 'a43590cf896c677890d75bfe75bc6254')
version('11.14.1', '40febc57f76668be8b6a77b7607bb67f')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds a debug version of the libraries')
# Everything should be compiled with -fpic
depends_on('blas')
depends_on('lapack')
depends_on('boost')
depends_on('matio')
depends_on('glm')
depends_on('swig')
# MPI related dependencies
depends_on('mpi')
depends_on('netcdf+mpi')
depends_on('python') # Needs py-numpy activated
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
options.extend(['-DTrilinos_ENABLE_ALL_PACKAGES:BOOL=ON',
'-DTrilinos_ENABLE_ALL_OPTIONAL_PACKAGES:BOOL=ON',
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
'-DCMAKE_BUILD_TYPE:STRING=%s' % ('Debug' if '+debug' in spec else 'Release'),
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('ON' if '+shared' in spec else 'OFF'),
'-DTPL_ENABLE_MPI:BOOL=ON',
'-DMPI_BASE_DIR:PATH=%s' % spec['mpi'].prefix,
'-DTPL_ENABLE_BLAS=ON',
'-DBLAS_LIBRARY_NAMES=blas',
'-DBLAS_LIBRARY_DIRS=/usr/lib', # % spec['blas'].prefix,
'-DTPL_ENABLE_LAPACK=ON',
'-DLAPACK_LIBRARY_NAMES=lapack',
'-DLAPACK_LIBRARY_DIRS=/usr/lib', # % spec['lapack'].prefix,
'-DTPL_ENABLE_Boost:BOOL=ON',
'-DBOOST_BASE_DIR:PATH=%s' % spec['boost'].prefix,
'-DTrilinos_ENABLE_Fortran=OFF',
'-DTrilinos_ENABLE_EXPLICIT_INSTANTIATION:BOOL=ON',
'-DTrilinos_ENABLE_CXX11:BOOL=ON',
'-DTrilinos_CXX11_FLAGS=-std=c++11'
])
# disable due to compiler / config errors:
options.extend(['-DTrilinos_ENABLE_SEACAS=OFF',
'-DTrilinos_ENABLE_Pike=OFF',
'-DTrilinos_ENABLE_STK=OFF'
])
if self.compiler.name == "clang":
os.environ['CPPFLAGS']="-Qunused-arguments"
#os.environ['LDFLAGS']="lgfortran"
with working_dir('spack-build', create=True):
cmake('..', *options)
make()
make('install')
| from spack import *
class Trilinos(Package):
"""
The Trilinos Project is an effort to develop algorithms and enabling technologies within an object-oriented
software framework for the solution of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "http://trilinos.csbsju.edu/download/files/trilinos-12.2.1-Source.tar.gz"
version('12.4.2', '7c830f7f0f68b8ad324690603baf404e')
version('12.2.1', '6161926ea247863c690e927687f83be9')
version('12.0.1', 'bd99741d047471e127b8296b2ec08017')
version('11.14.3', '2f4f83f8333e4233c57d0f01c4b57426')
version('11.14.2', 'a43590cf896c677890d75bfe75bc6254')
version('11.14.1', '40febc57f76668be8b6a77b7607bb67f')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds a debug version of the libraries')
# Everything should be compiled with -fpic
depends_on('blas')
depends_on('lapack')
depends_on('boost')
depends_on('matio')
depends_on('glm')
depends_on('swig')
# MPI related dependencies
depends_on('mpi')
depends_on('netcdf+mpi')
depends_on('python') # Needs py-numpy activated
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
options.extend(['-DTrilinos_ENABLE_ALL_PACKAGES:BOOL=ON',
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
'-DCMAKE_BUILD_TYPE:STRING=%s' % ('Debug' if '+debug' in spec else 'Release'),
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('ON' if '+shared' in spec else 'OFF'),
'-DTPL_ENABLE_MPI:STRING=ON',
'-DBLAS_LIBRARY_DIRS:PATH=%s' % spec['blas'].prefix,
'-DLAPACK_LIBRARY_DIRS:PATH=%s' % spec['lapack'].prefix
])
with working_dir('spack-build', create=True):
cmake('..', *options)
make()
make('install')
| Python | 0.000001 |
ac8d29c5855ea05bd42766cd142808704aded867 | Add space to trigger travis | web/impact/impact/permissions/graphql_permissions.py | web/impact/impact/permissions/graphql_permissions.py | from accelerator.models import (
UserRole,
)
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import ACTIVE_PROGRAM_STATUS
BASIC_ALLOWED_USER_ROLES = [
UserRole.FINALIST,
UserRole.AIR,
UserRole.MENTOR,
UserRole.PARTNER,
UserRole.ALUM
]
BASIC_VISIBLE_USER_ROLES = [UserRole.FINALIST, UserRole.STAFF, UserRole.ALUM]
def check_for_no_user_role(logged_in_user_roles):
count = len(logged_in_user_roles) == 1
return not logged_in_user_roles or count and not logged_in_user_roles[0]
def check_for_basic_user_roles(logged_in_user_roles):
return any(
[role in BASIC_ALLOWED_USER_ROLES for role in logged_in_user_roles]
)
def visible_roles(current_user):
current_logged_in_user_roles = list(
current_user.programrolegrant_set.filter(
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
).values_list('program_role__user_role__name', flat=True).distinct())
if check_for_no_user_role(current_logged_in_user_roles):
return [UserRole.STAFF]
if check_for_basic_user_roles(current_logged_in_user_roles):
return BASIC_VISIBLE_USER_ROLES + [UserRole.MENTOR]
if UserRole.JUDGE in current_logged_in_user_roles:
return BASIC_VISIBLE_USER_ROLES
def can_view_profile(profile_user, roles):
return profile_user.programrolegrant_set.filter(
program_role__user_role__name__in=roles
).exists()
def can_view_entrepreneur_profile(current_user, profile_user):
if not is_employee(current_user):
roles = visible_roles(current_user)
return can_view_profile(profile_user, roles)
return True
| from accelerator.models import (
UserRole,
)
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import ACTIVE_PROGRAM_STATUS
BASIC_ALLOWED_USER_ROLES = [
UserRole.FINALIST,
UserRole.AIR,
UserRole.MENTOR,
UserRole.PARTNER,
UserRole.ALUM
]
BASIC_VISIBLE_USER_ROLES = [UserRole.FINALIST, UserRole.STAFF, UserRole.ALUM]
def check_for_no_user_role(logged_in_user_roles):
count = len(logged_in_user_roles) == 1
return not logged_in_user_roles or count and not logged_in_user_roles[0]
def check_for_basic_user_roles(logged_in_user_roles):
return any(
[role in BASIC_ALLOWED_USER_ROLES for role in logged_in_user_roles]
)
def visible_roles(current_user):
current_logged_in_user_roles = list(
current_user.programrolegrant_set.filter(
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
).values_list('program_role__user_role__name', flat=True).distinct())
if check_for_no_user_role(current_logged_in_user_roles):
return [UserRole.STAFF]
if check_for_basic_user_roles(current_logged_in_user_roles):
return BASIC_VISIBLE_USER_ROLES + [UserRole.MENTOR]
if UserRole.JUDGE in current_logged_in_user_roles:
return BASIC_VISIBLE_USER_ROLES
def can_view_profile(profile_user, roles):
return profile_user.programrolegrant_set.filter(
program_role__user_role__name__in=roles
).exists()
def can_view_entrepreneur_profile(current_user, profile_user):
if not is_employee(current_user):
roles = visible_roles(current_user)
return can_view_profile(profile_user, roles)
return True
| Python | 0 |
7ad6da17a72010967ccd82d3393a86762cf2a786 | Mark import-std-module/empty-module as libc++ test | packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py | packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py | """
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
# We only emulate a fake libc++ in this test and don't use the real libc++,
# but we still add the libc++ category so that this test is only run in
# test configurations where libc++ is actually supposed to be tested.
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.runCmd("log enable lldb expr")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
| """
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.runCmd("log enable lldb expr")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
| Python | 0.000037 |
973a7754623c330f0352979bf9e0f2a6020acf62 | reformat >80 char import line | tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py | tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py | import etcd
import maps
import pytest
from tendrl.commons.objects.cluster.atoms.check_cluster_available import \
CheckClusterAvailable
from tendrl.commons.objects import AtomExecutionFailedError
class MockCluster(object):
def __init__(self, integration_id = 0):
self.is_managed = True
def load(self):
return self
def exists(self):
return self
def test_check_cluster_available():
NS.publisher_id = 0
NS._int = maps.NamedDict()
NS.tendrl = maps.NamedDict()
NS.tendrl.objects = maps.NamedDict()
NS.tendrl.objects.Cluster = MockCluster
test = CheckClusterAvailable()
test.parameters = maps.NamedDict()
test.parameters['TendrlContext.integration_id'] = \
"7a3f2238-ef79-4943-9edf-762a80cf22a0"
test.parameters['job_id'] = 0
test.parameters['flow_id'] = 0
NS.tendrl_context = maps.NamedDict(integration_id="")
NS._int.client = etcd.Client()
with pytest.raises(AtomExecutionFailedError):
test.run()
| import etcd
import maps
import pytest
from tendrl.commons.objects.cluster.atoms.check_cluster_available import CheckClusterAvailable # noqa
from tendrl.commons.objects import AtomExecutionFailedError
class MockCluster(object):
def __init__(self, integration_id = 0):
self.is_managed = True
def load(self):
return self
def exists(self):
return self
def test_check_cluster_available():
NS.publisher_id = 0
NS._int = maps.NamedDict()
NS.tendrl = maps.NamedDict()
NS.tendrl.objects = maps.NamedDict()
NS.tendrl.objects.Cluster = MockCluster
test = CheckClusterAvailable()
test.parameters = maps.NamedDict()
test.parameters['TendrlContext.integration_id'] = \
"7a3f2238-ef79-4943-9edf-762a80cf22a0"
test.parameters['job_id'] = 0
test.parameters['flow_id'] = 0
NS.tendrl_context = maps.NamedDict(integration_id="")
NS._int.client = etcd.Client()
with pytest.raises(AtomExecutionFailedError):
test.run()
| Python | 0 |
eea0c4fd610882ee748410063a62c30ce95da0ee | Fix the snapshot creation script for the new command line syntax. Review URL: http://codereview.chromium.org//8414015 | runtime/tools/create_snapshot_file.py | runtime/tools/create_snapshot_file.py | #!/usr/bin/env python
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to create snapshot files.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--executable",
action="store", type="string",
help="path to executable")
result.add_option("--output_bin",
action="store", type="string",
help="binary snapshot output file name")
result.add_option("--input_cc",
action="store", type="string",
help="input template file name")
result.add_option("--output",
action="store", type="string",
help="generated snapshot output file name")
result.add_option("--scripts",
action="store", type="string",
help="list of scripts to include in snapshot")
result.add_option("-v", "--verbose",
help='Verbose output.',
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.output_bin:
sys.stderr.write('--output_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def makeString(input_file):
result = ' '
fileHandle = open(input_file, 'rb')
lineCounter = 0
for byte in fileHandle.read():
result += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
result += '\n '
lineCounter = 0
if lineCounter != 0:
result += '\n '
return result
def makeFile(output_file, input_cc_file, input_file):
snapshot_cc_text = open(input_cc_file).read()
snapshot_cc_text = snapshot_cc_text % makeString(input_file)
open(output_file, 'w').write(snapshot_cc_text)
return True
def Main():
# Parse the options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# Construct the path to the dart binary.
snapshot_argument = ''.join([ "--snapshot=", options.output_bin ])
if not options.scripts:
command = [ options.executable, snapshot_argument ]
else:
scripts = string.split(options.scripts)
command = [ options.executable, snapshot_argument ] + scripts
if options.verbose:
print ' '.join(command)
subprocess.call(command)
if not makeFile(options.output, options.input_cc, options.output_bin):
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| #!/usr/bin/env python
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to create snapshot files.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--executable",
action="store", type="string",
help="path to executable")
result.add_option("--output_bin",
action="store", type="string",
help="binary snapshot output file name")
result.add_option("--input_cc",
action="store", type="string",
help="input template file name")
result.add_option("--output",
action="store", type="string",
help="generated snapshot output file name")
result.add_option("--scripts",
action="store", type="string",
help="list of scripts to include in snapshot")
result.add_option("-v", "--verbose",
help='Verbose output.',
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.output_bin:
sys.stderr.write('--output_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def makeString(input_file):
result = ' '
fileHandle = open(input_file, 'rb')
lineCounter = 0
for byte in fileHandle.read():
result += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
result += '\n '
lineCounter = 0
if lineCounter != 0:
result += '\n '
return result
def makeFile(output_file, input_cc_file, input_file):
snapshot_cc_text = open(input_cc_file).read()
snapshot_cc_text = snapshot_cc_text % makeString(input_file)
open(output_file, 'w').write(snapshot_cc_text)
return True
def Main():
# Parse the options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# Construct the path to the dart binary.
snapshot_argument = ''.join([ "--snapshot=", options.output_bin ])
if not options.scripts:
command = [ options.executable, snapshot_argument ]
else:
scripts = string.split(options.scripts)
command = [ options.executable, snapshot_argument, "--" ] + scripts + [ "--" ]
if options.verbose:
print ' '.join(command)
subprocess.call(command)
if not makeFile(options.output, options.input_cc, options.output_bin):
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| Python | 0.999999 |
788073cdf2a5e2ee142cbcf1263accad7baac153 | Move chmod | genes/gnu_coreutils/commands.py | genes/gnu_coreutils/commands.py | #!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def chmod(*args):
# FIXME: this is ugly, name the args
run(['chmod'] + list(args))
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(args))
@only_posix()
def ln(*args):
run(['ln'] + list(args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(args))
| #!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(args))
@only_posix()
def ln(*args):
run(['ln'] + list(args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(args))
| Python | 0.000001 |
14ba4d581428ff38190153955315dc13483623e1 | simplify (at least for me) | find_groups.py | find_groups.py | import random
from collections import namedtuple, defaultdict, deque
Point = namedtuple('Point', ['x', 'y'])
Size = namedtuple('Size', ['w', 'h'])
EMPTY = 0
BLACK = 1
WHITE = 2
BOARD_LETTERS = 'ABCDEFGHJKLMNOPQRST'
class Group:
def __init__(self, color):
self.color = color
self.points = set()
self.liberties = set()
def get_num_liberties(self):
return len(self.liberties)
def __len__(self):
return len(self.points)
def __repr__(self):
return '<group color={} {} points {} liberties>'.format(
self.color, len(self.points), len(self.liberties))
class Board:
def __init__(self, size):
self.size = size
self.stones = {}
def random_fill(self, seed=None):
rand = random.Random(seed)
for point in self.iter_points():
color = rand.choice([EMPTY, BLACK, WHITE])
if color != EMPTY:
self.stones[point] = color
def is_inside(self, point):
return 0 <= point.x < self.size.w and 0 <= point.y < self.size.h
def get_color(self, point):
return self.stones.get(point, 0)
def get_neighbours(self, point):
x, y = point
_points = [Point(x-1, y), Point(x+1, y), Point(x, y-1), Point(x, y+1)]
points = filter(lambda p: self.is_inside(p), _points)
return points
def iter_points(self):
for x in range(self.size.w):
for y in range(self.size.h):
yield Point(x, y)
def find_groups(self):
groups = []
grouped_points = set()
for point, color in self.stones.items():
assert color != EMPTY
if point in grouped_points:
continue
group = Group(color)
todo = [point]
while todo:
point = todo.pop()
if point not in grouped_points:
color = self.stones.get(point, EMPTY)
if color == EMPTY:
group.liberties.add(point)
elif color == group.color:
group.points.add(point)
grouped_points.add(point)
todo.extend(self.get_neighbours(point))
groups.append(group)
return groups
def print_board(board):
color_chars = {
# Characters that are easy to tell apart at a glance.
EMPTY: '.',
BLACK: '#',
WHITE: 'o',
}
print()
print(' ', ' '.join(BOARD_LETTERS[:board.size.w]))
print()
for y in range(board.size.h):
line = []
for x in reversed(range(board.size.w)):
line.append(color_chars[board.get_color(Point(x, y))])
rownum = board.size.h - y
print(' {:2} '.format(rownum), ' '.join(line))
print()
def print_captured_groups(groups, board_size):
board = Board(board_size)
for group in groups:
if group.get_num_liberties() == 0:
for point in group.points:
board.stones[point] = group.color
print_board(board)
board = Board(Size(9, 9))
board.random_fill(seed=13)
print('Board:')
print_board(board)
groups = board.find_groups()
print('Captured groups:')
print_captured_groups(groups, board.size)
| import random
from collections import namedtuple, defaultdict, deque
Point = namedtuple('Point', ['x', 'y'])
Size = namedtuple('Size', ['w', 'h'])
EMPTY = 0
BLACK = 1
WHITE = 2
BOARD_LETTERS = 'ABCDEFGHJKLMNOPQRST'
class Group:
def __init__(self, color):
self.color = color
self.points = set()
self.liberties = set()
def get_num_liberties(self):
return len(self.liberties)
def __len__(self):
return len(self.points)
def __repr__(self):
return '<group color={} {} points {} liberties>'.format(
self.color, len(self.points), len(self.liberties))
class Board:
def __init__(self, size):
self.size = size
self.stones = {}
def random_fill(self, seed=None):
rand = random.Random(seed)
for point in self.iter_points():
color = rand.choice([EMPTY, BLACK, WHITE])
if color != EMPTY:
self.stones[point] = color
def is_inside(self, point):
return 0 <= point.x < self.size.w and 0 <= point.y < self.size.h
def get_color(self, point):
return self.stones.get(point, 0)
def get_neighbours(self, point):
x, y = point
_points = [Point(x-1, y), Point(x+1, y), Point(x, y-1), Point(x, y+1)]
points = filter(lambda p: self.is_inside(p), _points)
return points
def iter_points(self):
for x in range(self.size.w):
for y in range(self.size.h):
yield Point(x, y)
def find_groups(self):
groups = []
grouped_points = set()
for point, color in self.stones.items():
assert color != EMPTY
if point in grouped_points:
continue
group = Group(color)
todo = [point]
while todo:
point = todo.pop()
color = self.stones.get(point, EMPTY)
if point in grouped_points:
continue
elif color == EMPTY:
group.liberties.add(point)
elif color == group.color:
group.points.add(point)
grouped_points.add(point)
todo.extend(self.get_neighbours(point))
groups.append(group)
return groups
def print_board(board):
color_chars = {
# Characters that are easy to tell apart at a glance.
EMPTY: '.',
BLACK: '#',
WHITE: 'o',
}
print()
print(' ', ' '.join(BOARD_LETTERS[:board.size.w]))
print()
for y in range(board.size.h):
line = []
for x in reversed(range(board.size.w)):
line.append(color_chars[board.get_color(Point(x, y))])
rownum = board.size.h - y
print(' {:2} '.format(rownum), ' '.join(line))
print()
def print_captured_groups(groups, board_size):
board = Board(board_size)
for group in groups:
if group.get_num_liberties() == 0:
for point in group.points:
board.stones[point] = group.color
print_board(board)
board = Board(Size(9, 9))
board.random_fill(seed=13)
print('Board:')
print_board(board)
groups = board.find_groups()
print('Captured groups:')
print_captured_groups(groups, board.size)
| Python | 0.000002 |
0ab67a50c711e3a15974f3bb4fe9df84fac6608a | use new template | gimmemotifs/commands/cluster.py | gimmemotifs/commands/cluster.py | #!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <s.vanheeringen@science.ru.nl>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from distutils import sysconfig
from gimmemotifs.motif import pwmfile_to_motifs
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.cluster import cluster_motifs
from gimmemotifs.config import MotifConfig
import sys
import os
import jinja2
def cluster(args):
revcomp = not args.single
outdir = os.path.abspath(args.outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
trim_ic = 0.2
clusters = []
motifs = pwmfile_to_motifs(args.inputfile)
if len(motifs) == 1:
clusters = [[motifs[0], motifs]]
else:
tree = cluster_motifs(args.inputfile, "total", "wic", "mean", True, threshold=args.threshold, include_bg=True)
clusters = tree.getResult()
ids = []
mc = MotifComparer()
sys.stderr.write("Creating images\n")
for cluster,members in clusters:
cluster.trim(trim_ic)
cluster.to_img(os.path.join(outdir,"%s.png" % cluster.id), format="PNG")
ids.append([cluster.id, {"src":"%s.png" % cluster.id},[]])
if len(members) > 1:
scores = {}
for motif in members:
scores[motif] = mc.compare_motifs(cluster, motif, "total", "wic", "mean", pval=True)
add_pos = sorted(scores.values(),cmp=lambda x,y: cmp(x[1], y[1]))[0][1]
for motif in members:
score, pos, strand = scores[motif]
add = pos - add_pos
if strand in [1,"+"]:
pass
else:
#print "RC %s" % motif.id
rc = motif.rc()
rc.id = motif.id
motif = rc
#print "%s\t%s" % (motif.id, add)
motif.to_img(os.path.join(outdir, "%s.png" % motif.id.replace(" ", "_")), format="PNG", add_left=add)
ids[-1][2] = [dict([("src", "%s.png" % motif.id.replace(" ", "_")), ("alt", motif.id.replace(" ", "_"))]) for motif in members]
config = MotifConfig()
env = jinja2.Environment(loader=jinja2.FileSystemLoader([config.get_template_dir()]))
template = env.get_template("cluster_template.jinja.html")
result = template.render(motifs=ids)
with open(os.path.join(outdir, "cluster_report.html"), "w") as f:
f.write(result.encode('utf-8'))
f = open(os.path.join(outdir, "cluster_key.txt"), "w")
for id in ids:
f.write("%s\t%s\n" % (id[0], ",".join([x["alt"] for x in id[2]])))
f.close()
f = open(os.path.join(outdir, "clustered_motifs.pwm"), "w")
if len(clusters) == 1 and len(clusters[0][1]) == 1:
f.write("%s\n" % clusters[0][0].to_pwm())
else:
for motif in tree.get_clustered_motifs():
f.write("%s\n" % motif.to_pwm())
f.close()
| #!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <s.vanheeringen@science.ru.nl>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from distutils import sysconfig
from gimmemotifs.motif import pwmfile_to_motifs
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.cluster import cluster_motifs
import sys
import os
import kid
def cluster(args):
revcomp = not args.single
outdir = os.path.abspath(args.outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
trim_ic = 0.2
clusters = []
motifs = pwmfile_to_motifs(args.inputfile)
if len(motifs) == 1:
clusters = [[motifs[0], motifs]]
else:
tree = cluster_motifs(args.inputfile, "total", "wic", "mean", True, threshold=args.threshold, include_bg=True)
clusters = tree.getResult()
ids = []
mc = MotifComparer()
sys.stderr.write("Creating images\n")
for cluster,members in clusters:
cluster.trim(trim_ic)
cluster.to_img(os.path.join(outdir,"%s.png" % cluster.id), format="PNG")
ids.append([cluster.id, {"src":"%s.png" % cluster.id},[]])
if len(members) > 1:
scores = {}
for motif in members:
scores[motif] = mc.compare_motifs(cluster, motif, "total", "wic", "mean", pval=True)
add_pos = sorted(scores.values(),cmp=lambda x,y: cmp(x[1], y[1]))[0][1]
for motif in members:
score, pos, strand = scores[motif]
add = pos - add_pos
if strand in [1,"+"]:
pass
else:
#print "RC %s" % motif.id
rc = motif.rc()
rc.id = motif.id
motif = rc
#print "%s\t%s" % (motif.id, add)
motif.to_img(os.path.join(outdir, "%s.png" % motif.id.replace(" ", "_")), format="PNG", add_left=add)
ids[-1][2] = [dict([("src", "%s.png" % motif.id.replace(" ", "_")), ("alt", motif.id.replace(" ", "_"))]) for motif in members]
kid.enable_import()
prefix = sysconfig.get_config_var("prefix")
template_file = os.path.join(prefix, "share/gimmemotifs/templates/cluster_template.kid")
template = kid.Template(file=template_file, motifs=ids)
f = open(os.path.join(outdir, "cluster_report.html"), "w")
f.write(template.serialize())
f.close()
f = open(os.path.join(outdir, "cluster_key.txt"), "w")
for id in ids:
f.write("%s\t%s\n" % (id[0], ",".join([x["alt"] for x in id[2]])))
f.close()
f = open(os.path.join(outdir, "clustered_motifs.pwm"), "w")
if len(clusters) == 1 and len(clusters[0][1]) == 1:
f.write("%s\n" % clusters[0][0].to_pwm())
else:
for motif in tree.get_clustered_motifs():
f.write("%s\n" % motif.to_pwm())
f.close()
| Python | 0 |
394954fc80230e01112166db4fe133c107febead | Allow more than one GitHub repo from the same user | gitautodeploy/parsers/common.py | gitautodeploy/parsers/common.py |
class WebhookRequestParser(object):
"""Abstract parent class for git service parsers. Contains helper
methods."""
def __init__(self, config):
self._config = config
def get_matching_repo_configs(self, urls):
"""Iterates over the various repo URLs provided as argument (git://,
ssh:// and https:// for the repo) and compare them to any repo URL
specified in the config"""
configs = []
for url in urls:
for repo_config in self._config['repositories']:
if repo_config in configs:
continue
if repo_config.get('repo', repo_config.get('url')) == url:
configs.append(repo_config)
elif 'url_without_usernme' in repo_config and repo_config['url_without_usernme'] == url:
configs.append(repo_config)
return configs
|
class WebhookRequestParser(object):
"""Abstract parent class for git service parsers. Contains helper
methods."""
def __init__(self, config):
self._config = config
def get_matching_repo_configs(self, urls):
"""Iterates over the various repo URLs provided as argument (git://,
ssh:// and https:// for the repo) and compare them to any repo URL
specified in the config"""
configs = []
for url in urls:
for repo_config in self._config['repositories']:
if repo_config in configs:
continue
if repo_config['url'] == url:
configs.append(repo_config)
elif 'url_without_usernme' in repo_config and repo_config['url_without_usernme'] == url:
configs.append(repo_config)
return configs | Python | 0.998892 |
d4fed426153105a9f8cab595848d5303003449b8 | revert last commit, import properly | cogs/games.py | cogs/games.py | import discord
import json
from discord.ext import commands
from datetime import datetime
from utils import aiohttp_wrap as aw
class Game:
""" Cog which allows fetching of video game information """
IG_URL = 'https://api-2445582011268.apicast.io/{}/'
with open('data/apikeys.json') as f:
KEY = json.load(f)['pgdb']
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.comand(aliases=['games'])
async def game(self, ctx, *, query: str):
""" Search for some information about a game """
url = self.IG_URL.format('games')
headers = {'user-key': self.KEY}
params = {'search': query,
'fields': 'name,summary,first_release_date,aggregated_rating,cover'}
resp = await aw.session_get(self.session, url, headers=headers, params=params).json()
await ctx.send(f'{resp}'[:500])
def setup(bot):
bot.add_cog(Game(bot))
| import discord
from discord.ext import commands
from datetime import datetime
from utils import aiohttp_wrap as aw
class Game:
""" Cog which allows fetching of video game information """
IG_URL = 'https://api-2445582011268.apicast.io/{}/'
with open('data/apikeys.json') as f:
KEY = json.load(f)['pgdb']
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.comand(aliases=['games'])
async def game(self, ctx, *, query: str):
""" Search for some information about a game """
url = self.IG_URL.format('games')
headers = {'user-key': self.KEY}
params = {'search': query,
'fields': 'name,summary,first_release_date,aggregated_rating,cover'}
resp = await aw.aio_get_json(self.session, url, headers=headers, params=params)
await ctx.send(f'{resp}'[:500])
def setup(bot):
bot.add_cog(Game(bot))
| Python | 0 |
7adeb5e668a132ab540fa45c8e6c62cb8481930d | fix infinite recursion | fluff/sync_couchdb.py | fluff/sync_couchdb.py | from django.db.models import signals
import os
from couchdbkit.ext.django.loading import get_db
from pillowtop.utils import import_pillows
from dimagi.utils.couch import sync_docs
FLUFF = 'fluff'
def sync_design_docs(temp=None):
dir = os.path.abspath(os.path.dirname(__file__))
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
sync_docs.sync_design_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
def catch_signal(app, **kwargs):
"""Function used by syncdb signal"""
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label == FLUFF:
sync_design_docs()
def copy_designs(temp='tmp', delete=True):
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
sync_docs.copy_designs(db, FLUFF)
signals.post_syncdb.connect(catch_signal)
| from django.db.models import signals
import os
from couchdbkit.ext.django.loading import get_db
from pillowtop.utils import import_pillows
from dimagi.utils.couch.sync_docs import sync_design_docs as sync_docs
FLUFF = 'fluff'
def sync_design_docs(temp=None):
dir = os.path.abspath(os.path.dirname(__file__))
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
print 'fluff sync: %s' % app_label
db = get_db(app_label)
sync_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
def catch_signal(app, **kwargs):
"""Function used by syncdb signal"""
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label == FLUFF:
sync_design_docs()
def copy_designs(temp='tmp', delete=True):
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
copy_designs(db, FLUFF)
signals.post_syncdb.connect(catch_signal)
| Python | 0.00056 |
9cc7218f2eef7135e5402a47c2783def31add9f3 | save screenshot in 800x480 too | screenshots.py | screenshots.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from PIL import Image, ImageFile
from shovel import task
from meta.utils import path_meta, path_generated, depends
ImageFile.MAXBLOCK = 2**20
def save(image, filename):
image.save(filename, "JPEG", quality=98, optimize=True, progressive=True)
@task
def retina_resize():
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
if image.size != (2048, 1580):
continue
resized = image.resize((1024, 790), Image.ANTIALIAS)
resized.save(filename, filename.ext[1:].upper())
@task
def export():
depends("meta.pxm.export")
depends("meta.screenshots.retina_resize")
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
# crop
width, height = image.size
box = (0, height - 768, width, height)
cropped = image.crop(box)
# overlay
name = "".join(filename.namebase.split("-")[1:])
overlayfile = path_meta() / "overlay-" + name + ".png"
if overlayfile.exists():
overlay = Image.open(overlayfile)
cropped.paste(overlay, None, overlay)
# save
for x, y in ((1024, 768), (800, 480), (960, 640), (1136, 640), (1280, 720)):
resized = cropped.resize((x, y), Image.ANTIALIAS)
savename = "screen-" + name + "-" + str(x) + "x" + str(y) + ".jpg"
save(resized, path_generated() / savename)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from PIL import Image, ImageFile
from shovel import task
from meta.utils import path_meta, path_generated, depends
ImageFile.MAXBLOCK = 2**20
def save(image, filename):
image.save(filename, "JPEG", quality=98, optimize=True, progressive=True)
@task
def retina_resize():
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
if image.size != (2048, 1580):
continue
resized = image.resize((1024, 790), Image.ANTIALIAS)
resized.save(filename, filename.ext[1:].upper())
@task
def export():
depends("meta.pxm.export")
depends("meta.screenshots.retina_resize")
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
# crop
width, height = image.size
box = (0, height - 768, width, height)
cropped = image.crop(box)
# overlay
name = "".join(filename.namebase.split("-")[1:])
overlayfile = path_meta() / "overlay-" + name + ".png"
if overlayfile.exists():
overlay = Image.open(overlayfile)
cropped.paste(overlay, None, overlay)
# save
for x, y in ((1024, 768), (960, 640), (1136, 640), (1280, 720)):
resized = cropped.resize((x, y), Image.ANTIALIAS)
savename = "screen-" + name + "-" + str(x) + "x" + str(y) + ".jpg"
save(resized, path_generated() / savename)
| Python | 0 |
bcb1c8d48532159f76708bdfd0e6868dbda92343 | make sure command processes run in test database when needed | freppledb/__init__.py | freppledb/__init__.py | r'''
A Django project implementing a web-based user interface for frePPLe.
'''
VERSION = '4.5.0'
def runCommand(taskname, *args, **kwargs):
'''
Auxilary method to run a django command. It is intended to be used
as a target for the multiprocessing module.
The code is put here, such that a child process loads only
a minimum of other python modules.
'''
# Initialize django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Be sure to use the correct database
from django.db import DEFAULT_DB_ALIAS, connections
from freppledb.common.middleware import _thread_locals
database = kwargs.get("database", DEFAULT_DB_ALIAS)
setattr(_thread_locals, 'database', database)
if 'FREPPLE_TEST' in os.environ:
from django.conf import settings
connections[database].close()
settings.DATABASES[database]['NAME'] = settings.DATABASES[database]['TEST']['NAME']
# Run the command
try:
from django.core import management
management.call_command(taskname, *args, **kwargs)
except Exception as e:
taskid = kwargs.get("task", None)
if taskid:
from datetime import datetime
from freppledb.execute.models import Task
task = Task.objects.all().using(database).get(pk=taskid)
task.status = 'Failed'
now = datetime.now()
if not task.started:
task.started = now
task.finished = now
task.message = str(e)
task.processid = None
task.save(using=database)
| r'''
A Django project implementing a web-based user interface for frePPLe.
'''
VERSION = '4.5.0'
def runCommand(taskname, *args, **kwargs):
'''
Auxilary method to run a django command. It is intended to be used
as a target for the multiprocessing module.
The code is put here, such that a child process loads only
a minimum of other python modules.
'''
# Initialize django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Be sure to use the correct default database
from django.db import DEFAULT_DB_ALIAS
from freppledb.common.middleware import _thread_locals
database = kwargs.get("database", DEFAULT_DB_ALIAS)
setattr(_thread_locals, 'database', database)
# Run the command
try:
from django.core import management
management.call_command(taskname, *args, **kwargs)
except Exception as e:
taskid = kwargs.get("task", None)
if taskid:
from datetime import datetime
from freppledb.execute.models import Task
task = Task.objects.all().using(database).get(pk=taskid)
task.status = 'Failed'
now = datetime.now()
if not task.started:
task.started = now
task.finished = now
task.message = str(e)
task.processid = None
task.save(using=database)
| Python | 0 |
b56145ad1aebd931ca0e741ea7d4315520e6ed40 | Tweak version incrementer | release.py | release.py | from os import fdopen, remove
from shutil import move
import subprocess
import sys
from tempfile import mkstemp
from geomeppy import __version__
def replace(file_path, pattern, subst):
# Create temp file
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
# Remove original file
remove(file_path)
# Move new file
move(abs_path, file_path)
def main(increment):
# check we're on master
assert b'* develop' in subprocess.check_output(['git', 'branch']), 'Not on develop branch'
# check we're up-to-date
status = subprocess.check_output(['git', 'status'])
assert b'modified' not in status, 'Repository contains modified files'
assert b'Untracked' not in status, 'Repository contains untracked files'
# increment version
version = __version__
new_version = [int(i) for i in version.split('.')]
for i in range(len(new_version)):
if i == increment:
new_version[i] += 1
if i > increment:
new_version[i] = 1
new_version = '.'.join(str(v) for v in new_version)
replace('geomeppy/__init__.py', version, new_version)
replace('setup.py', "version='%s'" % version, "version='%s'" % new_version)
replace('setup.py', "tarball/v%s" % version, "tarball/v%s" % new_version)
try:
# add and commit changes
print(subprocess.check_output(['git', 'add', 'geomeppy/__init__.py']))
print(subprocess.check_output(['git', 'add', 'setup.py']))
print(subprocess.check_output(['git', 'add', 'README.rst']))
print(subprocess.check_output(['git', 'commit', '-m', 'release/%s' % new_version]))
except Exception as e:
# rollback
print('rolling back')
print(e)
replace('geomeppy/__init__.py', new_version, version)
replace('setup.py', new_version, version)
exit()
try:
# push the changes
print(subprocess.check_output(['git', 'push', 'origin', 'develop', '-f']))
# create a tagged release
print(subprocess.check_output(['git', 'tag', 'release/%s' % new_version, '-m', 'v%s' % new_version]))
# push to github
print(subprocess.check_output(['git', 'push', 'origin', 'release/%s' % new_version, '-f']))
except Exception as e:
# rollback
print('rolling back tag')
print(e)
# delete the tagged release
print(subprocess.check_output(['git', 'tag', '-d', 'release/%s' % new_version, 'v%s' % new_version]))
# push to github
print(subprocess.check_output(
['git', 'push', 'origin', ':refs/tags/release/%s' % new_version, 'v%s' % new_version])
)
# from here, the Travis CI magic begins
if __name__ == '__main__':
args = sys.argv[1:]
VERSION = ['major', 'minor', 'patch']
try:
increment = VERSION.index(sys.argv[1])
except ValueError:
print('%s is not a valid semantic version level (use major, minor, or patch)' % sys.argv[1])
except IndexError:
# default
increment = VERSION.index('patch')
main(increment)
| from os import fdopen, remove
from shutil import move
import subprocess
import sys
from tempfile import mkstemp
from geomeppy import __version__
def replace(file_path, pattern, subst):
# Create temp file
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
# Remove original file
remove(file_path)
# Move new file
move(abs_path, file_path)
def main(increment):
# check we're on master
assert b'* develop' in subprocess.check_output(['git', 'branch']), 'Not on develop branch'
# check we're up-to-date
status = subprocess.check_output(['git', 'status'])
assert b'modified' not in status, 'Repository contains modified files'
assert b'Untracked' not in status, 'Repository contains untracked files'
# increment version
version = __version__
major, minor, patch = version.split('.')
version = [int(i) for i in version.split('.')]
version[increment] += 1
for i in range(len(version)):
if i == increment:
version[i] += 1
if i > increment:
version[i] = 1
new_version = '%d.%d.%d' % (major, minor, int(patch) + 1)
replace('geomeppy/__init__.py', version, new_version)
replace('setup.py', "version='%s'" % version, "version='%s'" % new_version)
replace('setup.py', "tarball/v%s" % version, "tarball/v%s" % new_version)
try:
# add and commit changes
print(subprocess.check_output(['git', 'add', 'geomeppy/__init__.py']))
print(subprocess.check_output(['git', 'add', 'setup.py']))
print(subprocess.check_output(['git', 'add', 'README.rst']))
print(subprocess.check_output(['git', 'commit', '-m', 'release/%s' % new_version]))
except Exception as e:
# rollback
print('rolling back')
print(e)
replace('geomeppy/__init__.py', new_version, version)
replace('setup.py', new_version, version)
exit()
try:
# push the changes
print(subprocess.check_output(['git', 'push', 'origin', 'develop', '-f']))
# create a tagged release
print(subprocess.check_output(['git', 'tag', 'release/%s' % new_version, '-m', 'v%s' % new_version]))
# push to github
print(subprocess.check_output(['git', 'push', 'origin', 'release/%s' % new_version, '-f']))
except Exception as e:
# rollback
print('rolling back tag')
print(e)
# delete the tagged release
print(subprocess.check_output(['git', 'tag', '-d', 'release/%s' % new_version, 'v%s' % new_version]))
# push to github
print(subprocess.check_output(
['git', 'push', 'origin', ':refs/tags/release/%s' % new_version, 'v%s' % new_version])
)
# from here, the Travis CI magic begins
if __name__ == '__main__':
args = sys.argv[1:]
VERSION = ['major', 'minor', 'patch']
try:
increment = VERSION.index(sys.argv[1])
except ValueError:
print('%s is not a valid semantic version level (use major, minor, or patch)' % sys.argv[1])
except IndexError:
# default
increment = VERSION.index('patch')
main(increment)
| Python | 0 |
720c841d0930f73d1efe90518b0a2d9dcbd6425d | Document context | funktional/context.py | funktional/context.py | import sys
from contextlib import contextmanager
# Are we training (or testing)
training = False
@contextmanager
def context(**kwargs):
"""Temporarily change the values of context variables passed.
Enables the `with` syntax:
>>> with context(training=True):
...
"""
current = dict((k, getattr(sys.modules[__name__], k)) for k in kwargs)
for k,v in kwargs.items():
setattr(sys.modules[__name__], k, v)
yield
for k,v in current.items():
setattr(sys.modules[__name__], k, v)
| import sys
from contextlib import contextmanager
training = False
@contextmanager
def context(**kwargs):
current = dict((k, getattr(sys.modules[__name__], k)) for k in kwargs)
for k,v in kwargs.items():
setattr(sys.modules[__name__], k, v)
yield
for k,v in current.items():
setattr(sys.modules[__name__], k, v)
| Python | 0.000004 |
55987e48997f7f5a94adc3c53fcb8ae58e672c3c | increase version number | gdc_client/version.py | gdc_client/version.py | __version__ = 'v1.4.0'
| __version__ = 'v1.3.0'
| Python | 0.00036 |
ed7f0e555b438b611f4a9b0fdf6de1fca6ec2914 | fix incorrect use of str replace | genSongbook.py | genSongbook.py | #!/usr/bin/python
import sys, os
def query(question, default):
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
if __name__ == '__main__':
print("----------------------")
print("Welcome to genSongbook")
print("----------------------")
# Query song directory path string
songDirectory = query("Please specify the path of the input song directory","/opt/Dropbox/lyrics/english")
print("Will use song directory: " + songDirectory)
# Query template file path string
templateFile = query("Please specify the path of the template file","template/english.tex")
print("Will use template file: " + templateFile)
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk( songDirectory ):
for filename in sorted(filenames):
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
rep += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
rep += "\\end{verbatim}\n"
rep += "\n"
#sys.stdout.write(rep) #-- Screen output for debugging.
s = s.replace("genSongbook",rep)
outFd = open("out.tex", 'w')
outFd.write(s)
| #!/usr/bin/python
import sys, os
def query(question, default):
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
if __name__ == '__main__':
print("----------------------")
print("Welcome to genSongbook")
print("----------------------")
# Query song directory path string
songDirectory = query("Please specify the path of the input song directory","/opt/Dropbox/lyrics/english")
print("Will use song directory: " + songDirectory)
# Query template file path string
templateFile = query("Please specify the path of the template file","template/english.tex")
print("Will use template file: " + templateFile)
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk( songDirectory ):
for filename in sorted(filenames):
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
rep += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
rep += "\\end{verbatim}\n"
rep += "\n"
s.replace("genSongbook",rep)
outFd = open("out.tex", 'w')
outFd.write(s)
| Python | 0.000014 |
c7067fce8723f810ed48de6513c6f756d499d807 | add whitelist tags. | dp_tornado/helper/html.py | dp_tornado/helper/html.py | # -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b', 'br', 'site', 'font')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
| # -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
| Python | 0 |
ab6d09c93a9d43ffbf442880633170f5fc678edd | add verbose mode to print processing module | get_modules.py | get_modules.py | #!/usr/bin/env python3
import os
import sys
import requests
import yaml
import git
import svn.remote
import zipfile
import argparse
def get_modules(yml_file, dest, verbose):
f = open(yml_file)
for data in yaml.load(f):
if (not dest.endswith('/')):
dest = dest + '/'
if not 'version' in data:
version = None
else:
version = data['version']
download_module(data['url'], dest, data['name'], data['type'], version, verbose)
f.close()
def download_module(src, dest, name, type, version, verbose):
if os.path.exists(dest + name):
if verbose: print(name + ' already exist')
return
if verbose and version is not None:
print('download ' + name + ':' + version + ' (' + type + ')')
elif verbose:
print('download ' + name + ' (' + type + ')')
if type == 'git':
download_git(src, dest + name, version)
elif type == 'svn':
download_svn(src, dest + name, version)
elif type == 'zip':
download_zip(src, dest, name)
def download_git(src, dest, version):
if version is None:
git.Repo.clone_from(src, dest)
else:
git.Repo.clone_from(src, dest, branch=version)
def download_svn(src, dest, version):
r = svn.remote.RemoteClient(src)
r.checkout(dest)
def download_zip(src, dest, name):
filename = download_file(src, dest)
zfile = zipfile.ZipFile(filename, "r")
zfile.extractall(dest)
os.rename(dest+zfile.namelist()[0].split("/")[0], dest+name)
os.remove(filename)
def download_file(url, destdir):
filename = destdir + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('modules',
help='list of modules to download')
parser.add_argument('dest_dir',
help='dest directory to save modules')
parser.add_argument('-V', '--verbose',
action='store_true',
help='show verbose message')
return parser
if __name__ == '__main__':
args = create_argparser().parse_args()
get_modules(args.modules, args.dest_dir, args.verbose)
| #!/usr/bin/env python3
import os
import sys
import requests
import yaml
import git
import svn.remote
import zipfile
import argparse
def get_modules(yml_file, dest):
f = open(yml_file)
for data in yaml.load(f):
if (not dest.endswith('/')):
dest = dest + '/'
if not 'version' in data:
version = None
else:
version = data['version']
download_module(data['url'], dest, data['name'], data['type'], version)
f.close()
def download_module(src, dest, name, type, version):
if os.path.exists(dest + name):
return
if type == 'git':
download_git(src, dest + name, version)
elif type == 'svn':
download_svn(src, dest + name, version)
elif type == 'zip':
download_zip(src, dest, name)
def download_git(src, dest, version):
if version is None:
git.Repo.clone_from(src, dest)
else:
git.Repo.clone_from(src, dest, branch=version)
def download_svn(src, dest, version):
r = svn.remote.RemoteClient(src)
r.checkout(dest)
def download_zip(src, dest, name):
filename = download_file(src, dest)
zfile = zipfile.ZipFile(filename, "r")
zfile.extractall(dest)
os.rename(dest+zfile.namelist()[0].split("/")[0], dest+name)
os.remove(filename)
def download_file(url, destdir):
filename = destdir + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('modules',
help='list of modules to download')
parser.add_argument('dest_dir',
help='dest directory to save modules')
return parser
if __name__ == '__main__':
args = create_argparser().parse_args()
get_modules(args.modules, args.dest_dir)
| Python | 0.000001 |
ad70f37b4a02ba117a91dcbdf5387ade2cbdfcf5 | Change petset to stateful set | e2e/tests/test_volumes.py | e2e/tests/test_volumes.py | from clickclick import fatal_error
from .helpers import PETSET_PATH, SECRET_PATH, create_resource, wait_for_pod
def test_volumes(run_id, url, token):
secret_manifest = '''
apiVersion: v1
kind: Secret
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
type: Opaque
data:
superuser-password: emFsYW5kbw==
replication-password: cmVwLXBhc3M=
admin-password: YWRtaW4=
'''
create_resource(secret_manifest, url + SECRET_PATH, token)
manifest = '''
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
spec:
replicas: 3
serviceName: *cluster_name
template:
metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
spec:
containers:
- name: *cluster_name
image: registry.opensource.zalan.do/acid/spilotest-9.6:1.1-p10 # put the spilo image here
imagePullPolicy: Always
ports:
- containerPort: 8008
protocol: TCP
- containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /home/postgres/pgdata
name: pgdata
env:
- name: ETCD_HOST
value: 'etcd.default.svc.cluster.local:2379' # where is your etcd?
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: PGPASSWORD_SUPERUSER
valueFrom:
secretKeyRef:
name: *cluster_name
key: superuser-password
- name: PGPASSWORD_ADMIN
valueFrom:
secretKeyRef:
name: *cluster_name
key: admin-password
- name: PGPASSWORD_STANDBY
valueFrom:
secretKeyRef:
name: *cluster_name
key: replication-password
- name: SCOPE
value: *cluster_name
- name: PGROOT
value: /home/postgres/pgdata/pgroot
terminationGracePeriodSeconds: 0
volumes:
- name: pgdata
emptyDir: {}
volumeClaimTemplates:
- metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
annotations:
volume.beta.kubernetes.io/storage-class: standard
name: pgdata
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
'''
create_resource(manifest, url + PETSET_PATH, token)
for i in range(3):
available = wait_for_pod('spilodemo-{}'.format(i), url, token)
if not available:
fatal_error('e2e test for volumes failed')
| from clickclick import fatal_error
from .helpers import PETSET_PATH, SECRET_PATH, create_resource, wait_for_pod
def test_volumes(run_id, url, token):
secret_manifest = '''
apiVersion: v1
kind: Secret
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
type: Opaque
data:
superuser-password: emFsYW5kbw==
replication-password: cmVwLXBhc3M=
admin-password: YWRtaW4=
'''
create_resource(secret_manifest, url + SECRET_PATH, token)
manifest = '''
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
spec:
replicas: 3
serviceName: *cluster_name
template:
metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
containers:
- name: *cluster_name
image: registry.opensource.zalan.do/acid/spilotest-9.6:1.1-p10 # put the spilo image here
imagePullPolicy: Always
ports:
- containerPort: 8008
protocol: TCP
- containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /home/postgres/pgdata
name: pgdata
env:
- name: ETCD_HOST
value: 'etcd.default.svc.cluster.local:2379' # where is your etcd?
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: PGPASSWORD_SUPERUSER
valueFrom:
secretKeyRef:
name: *cluster_name
key: superuser-password
- name: PGPASSWORD_ADMIN
valueFrom:
secretKeyRef:
name: *cluster_name
key: admin-password
- name: PGPASSWORD_STANDBY
valueFrom:
secretKeyRef:
name: *cluster_name
key: replication-password
- name: SCOPE
value: *cluster_name
- name: PGROOT
value: /home/postgres/pgdata/pgroot
terminationGracePeriodSeconds: 0
volumes:
- name: pgdata
emptyDir: {}
volumeClaimTemplates:
- metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
annotations:
volume.beta.kubernetes.io/storage-class: standard
name: pgdata
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
'''
create_resource(manifest, url + PETSET_PATH, token)
for i in range(3):
available = wait_for_pod('spilodemo-{}'.format(i), url, token)
if not available:
fatal_error('e2e test for volumes failed')
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.