commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
57d9d4fe1b46d9dd45629dc5fc461c0b8c51c5ec | Fix music helper | src/helpers/musicHelper.py | src/helpers/musicHelper.py | import pygame
import os
import sys
sys.path.append(os.path.dirname(__file__) + "/../audios/letters")
pygame.mixer.init()
def play_word(word):
for letter in word:
_play_letter(letter)
def _play_letter(letter):
pygame.mixer.music.load("audios/letters/" + letter.lower() + ".mp3")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
| #import pygame
import os
import sys
import pyglet
sys.path.append(os.path.dirname(__file__) + "/../audios/letters")
pyglet.options['audio'] = ('openal', 'pulse', 'silent')
player = pyglet.media.Player()
#pygame.mixer.init()
def play_file(file_path):
pass
#pygame.mixer.music.load(file_path)
# playAudioLoaded()
def play_word(word):
#CHANNEL.stop()
# pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/a.mp3")
# pygame.mixer.music.play()
# pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/e.mp3")
# pygame.mixer.music.stop()
first = True
for letter in word:
path = str(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
src = pyglet.media.load(path, streaming=False)
player.queue(src)
# if first:
# first = False
# pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
#pygame.mixer.music.play()
# else:
# pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
#_play_letter(letter)
# pygame.mixer.music.play()
player.play()
def _play_letter(letter):
pass
#pygame.mixer.music.load("audios/letters/" + letter.lower() + ".mp3")
#pygame.mixer.music.play()
#while pygame.mixer.music.get_busy() == True:
# continue
#def playAudioLoaded():
| Python | 0.000003 |
38d96e4ddbe44af8f028dfb29eca17dc8ecd478d | test case for clean module | src/html2latex/__init__.py | src/html2latex/__init__.py | from .html2latex import html2latex
html2latex
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| from .html2latex import html2latex
html2latex
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| Python | 0 |
46f15a00d2324da4b9f12c9168ddda8dddb1b607 | use notebook-style for plot_logistic_path.py (#22536) | examples/linear_model/plot_logistic_path.py | examples/linear_model/plot_logistic_path.py | """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
# %%
# Load data
# ---------
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# %%
# Compute regularization path
# ---------------------------
import numpy as np
from sklearn import linear_model
from sklearn.svm import l1_min_c
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
coefs_ = np.array(coefs_)
# %%
# Plot regularization path
# ------------------------
import matplotlib.pyplot as plt
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
| """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)
print("Computing regularization path ...")
start = time()
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took %0.3fs" % (time() - start))
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
| Python | 0 |
fd6899578bc8e6149921998d42be383f21adbe9a | add plots | examples/tf/bench_run_times/plot_results.py | examples/tf/bench_run_times/plot_results.py | import numpy as np
import matplotlib.pyplot as plt
# factor for 90% coverage with 90% confidence using Normal distribution
# with 10 samples from table XII in [1]
# [1] Montgomery, D. C., & Runger, G. C. (2014). Applied statistics and
# probability for engineers. Sixth edition. John Wiley & Sons.
k = 2.535
amd_fx_run_times = np.load('amd_fx_8350_titanXP/6_break_times.npy')
n = np.load('amd_fx_8350_titanXP/n.npy')
amd_fx_run_times_means = amd_fx_run_times.mean(axis=2)
amd_fx_run_times_stds = amd_fx_run_times.std(axis=2, ddof=1)
plt.figure()
plt.grid()
plt.errorbar(n, amd_fx_run_times_means[0], yerr=k*amd_fx_run_times_stds[0], capsize=2.0, label='Numpy')
plt.errorbar(n, amd_fx_run_times_means[1], yerr=k*amd_fx_run_times_stds[1], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time (seconds, Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:] - amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[0,1:]), capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:] - amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[1,1:]), capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time difference (Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:]/amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[0,1:])/amd_fx_run_times_means[0,1:], capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:]/amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[1,1:])/amd_fx_run_times_means[0,1:], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time relative to Numpy (Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:]/amd_fx_run_times_means[1,1:], yerr=(k*amd_fx_run_times_stds[0,1:])/amd_fx_run_times_means[1,1:], capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:]/amd_fx_run_times_means[1,1:], yerr=(k*amd_fx_run_times_stds[1,1:])/amd_fx_run_times_means[1,1:], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time relative to TF GPU (Lower is better)')
plt.semilogx()
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
amd_fx_run_times = np.load('amd_fx_8350_titanXP/6_break_times.npy')
n = np.load('amd_fx_8350_titanXP/n.npy') | Python | 0.000001 |
d1a69904ba1d8072988aeb330157dbff20d0c5de | Remove unneeded self.client. | cred/test/util.py | cred/test/util.py | import os
import tempfile
import json
from functools import wraps
import flask.ext.testing
import flask.ext.sqlalchemy
import cred.database
from cred.app import app, api
from cred.routes import create_api_resources
# Constants used throughout the test suites
DEVICE = 'Thermostat'
LOCATION = 'Living Room'
EVENTS = ['Temperature']
SUBSCRIBE = {
'Light': {'location': 'Living Room'},
'Alarm': {}
}
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
cred.database.db = flask.ext.sqlalchemy.SQLAlchemy(app)
create_api_resources(api)
def assertEqual(test_object, assertables):
"""Convenience method for asserting multiple items."""
for value, expected_value in assertables.items():
test_object.assertEqual(value, expected_value)
def authenticate(permission, alt_dev=None):
"""Decorator for authenticating a client with permissions."""
def authenticate_decorator(fun):
@wraps(fun)
def wrapped(self, *args, **kwargs):
self.authenticate_with_server(permission, alternate_device=alt_dev)
fun(self, *args, **kwargs)
return wrapped
return authenticate_decorator
class BaseTestCase(flask.ext.testing.TestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
TESTING = True
def create_app(self):
return app
def setUp(self):
"""Create a SQLite database for quick testing."""
cred.database.init_db(cred.database.db)
self.session_key = None
def tearDown(self):
"""Close the database file and unlink it."""
cred.database.db.session.remove()
cred.database.db.drop_all()
def authenticate_with_server(self, permission, alternate_device=None):
"""Authenticate with the server."""
from cred.models.apikey import APIKey as APIKeyModel
from cred.resources.apikeys import generate_apikey
device = DEVICE
if alternate_device is not None:
device = alternate_device
apikey = APIKeyModel(generate_apikey(), permission)
cred.database.db.session.add(apikey)
cred.database.db.session.commit()
req = json.dumps({
'apikey': apikey.apikey,
'device': device,
'location': LOCATION,
'subscribe': SUBSCRIBE
})
response = self.client.post(
'/auth',
data=req,
content_type='application/json'
)
resp = json.loads(response.data.decode('utf-8'))
self.session_key = resp['sessionKey']
self.client_id = resp['id']
return response
| import os
import tempfile
import json
from functools import wraps
import flask.ext.testing
import flask.ext.sqlalchemy
import cred.database
from cred.app import app, api
from cred.routes import create_api_resources
# Constants used throughout the test suites
DEVICE = 'Thermostat'
LOCATION = 'Living Room'
EVENTS = ['Temperature']
SUBSCRIBE = {
'Light': {'location': 'Living Room'},
'Alarm': {}
}
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
cred.database.db = flask.ext.sqlalchemy.SQLAlchemy(app)
create_api_resources(api)
def assertEqual(test_object, assertables):
"""Convenience method for asserting multiple items."""
for value, expected_value in assertables.items():
test_object.assertEqual(value, expected_value)
def authenticate(permission, alt_dev=None):
"""Decorator for authenticating a client with permissions."""
def authenticate_decorator(fun):
@wraps(fun)
def wrapped(self, *args, **kwargs):
self.authenticate_with_server(permission, alternate_device=alt_dev)
fun(self, *args, **kwargs)
return wrapped
return authenticate_decorator
class BaseTestCase(flask.ext.testing.TestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
TESTING = True
def create_app(self):
self.client = app.test_client()
return app
def setUp(self):
"""Create a SQLite database for quick testing."""
cred.database.init_db(cred.database.db)
self.session_key = None
def tearDown(self):
"""Close the database file and unlink it."""
cred.database.db.session.remove()
cred.database.db.drop_all()
def authenticate_with_server(self, permission, alternate_device=None):
"""Authenticate with the server."""
from cred.models.apikey import APIKey as APIKeyModel
from cred.resources.apikeys import generate_apikey
device = DEVICE
if alternate_device is not None:
device = alternate_device
apikey = APIKeyModel(generate_apikey(), permission)
cred.database.db.session.add(apikey)
cred.database.db.session.commit()
req = json.dumps({
'apikey': apikey.apikey,
'device': device,
'location': LOCATION,
'subscribe': SUBSCRIBE
})
response = self.client.post(
'/auth',
data=req,
content_type='application/json'
)
resp = json.loads(response.data.decode('utf-8'))
self.session_key = resp['sessionKey']
self.client_id = resp['id']
return response
| Python | 0 |
064802e0354cd9d27a7ea0d1c69a45baf0587c63 | add pool example | redisext/pool.py | redisext/pool.py | '''
Pool
----
.. autoclass:: Pool
:members:
The simpliest example of pool usage is token pool::
class TokenPool(Connection, redisext.pool.Pool):
SERIALIZER = redisext.serializer.String
and this pool could be used like::
>>> facebook = TokenPool('facebook')
>>> facebook.push('fb1')
True
>>> facebook.push('fb1')
False
>>> facebook.push('fb2')
True
>>> facebook.pop()
u'fb1'
>>> facebook.pop()
u'fb2'
>>> facebook.pop()
>>>
SortedSet
---------
For your spectial needs check :class:`redisext.pool.SortedSet`.
'''
from __future__ import absolute_import
import redisext.models.abc
class Pool(redisext.models.abc.Model):
def pop(self):
'''
Pop item from pool.
:returns: obviously item
:rtype: how knows(serializer knows)
'''
item = self.connect_to_master().spop(self.key)
return self.decode(item)
def push(self, item):
'''
Place item into pool.
:param item: whatever you need to place into pool
:rtype: bool
'''
item = self.encode(item)
return bool(self.connect_to_master().sadd(self.key, item))
class SortedSet(redisext.models.abc.Model):
def add(self, element, score):
element = self.encode(element)
return bool(self.connect_to_master().zadd(self.key, score, element))
def length(self, start_score, end_score):
return int(self.connect_to_slave().zcount(self.key, start_score, end_score))
def members(self):
elements = self.connect_to_slave().zrevrange(self.key, 0, -1)
if not elements:
return elements
return [self.decode(e) for e in elements]
def contains(self, element):
element = self.encode(element)
return self.connect_to_slave().zscore(self.key, element) is not None
def truncate(self, size):
return int(self.connect_to_master().zremrangebyrank(self.key, 0, -1 * size - 1))
def clean(self):
return bool(self.connect_to_master().delete(self.key))
| '''
Pool
^^^^
.. autoclass:: Pool
:members:
SortedSet
^^^^^^^^^
.. autoclass:: SortedSet
:members:
'''
from __future__ import absolute_import
import redisext.models.abc
class Pool(redisext.models.abc.Model):
def pop(self):
item = self.connect_to_master().spop(self.key)
return self.decode(item)
def push(self, item):
item = self.encode(item)
return bool(self.connect_to_master().sadd(self.key, item))
class SortedSet(redisext.models.abc.Model):
def add(self, element, score):
element = self.encode(element)
return bool(self.connect_to_master().zadd(self.key, score, element))
def length(self, start_score, end_score):
return int(self.connect_to_slave().zcount(self.key, start_score, end_score))
def members(self):
elements = self.connect_to_slave().zrevrange(self.key, 0, -1)
if not elements:
return elements
return [self.decode(e) for e in elements]
def contains(self, element):
element = self.encode(element)
return self.connect_to_slave().zscore(self.key, element) is not None
def truncate(self, size):
return int(self.connect_to_master().zremrangebyrank(self.key, 0, -1 * size - 1))
def clean(self):
return bool(self.connect_to_master().delete(self.key))
| Python | 0 |
e5ed6ef0c201d9a29c5934e3687abec7e13ae551 | update models to use a hashids for naming files | api/models.py | api/models.py | """
This file represents the models for the api app.
"""
from django.db import models
from .utils import get_file_upload_path, generate_uid
class DateMixin(models.Model):
"""A model mixin for date creation."""
created = models.DateField(auto_now_add=True)
class File(DateMixin):
"""This class represents the file model."""
file_id = models.CharField(default=generate_uid, max_length=50)
_file = models.FileField(upload_to=get_file_upload_path)
def __str__(self):
"""Return a string representation of the model instance."""
return "{}".format(self.name)
| """
This file represents the models for the api app.
"""
from django.db import models
class DateMixin(models.Model):
"""A model mixin for date creation."""
created = models.DateField(auto_now_add=True)
class File(DateMixin):
"""This class represents the file model."""
name = models.CharField(max_length=100, unique=True)
file = models.FileField(allow_files=True)
def __str__(self):
"""Return a string representation of the model instance."""
return "{}".format(self.name)
| Python | 0 |
9e82515ca1eeb6376947ae653ee375146c95016c | Fix EE API interface | dcos_test_utils/enterprise.py | dcos_test_utils/enterprise.py | import logging
import os
from dcos_test_utils import dcos_api_session, helpers, iam
log = logging.getLogger(__name__)
class MesosNodeClientMixin:
""" This Mixin allows any request to be made against a master or agent
mesos HTTP port by providing the keyword 'mesos_node'. Thus, the user
does not have to specify the master/agent port or which arbitrary host
in the cluster meeting that role
"""
def api_request(self, method, path_extension, *, scheme=None, host=None, query=None,
fragment=None, port=None, mesos_node=None, **kwargs):
if mesos_node is not None:
assert port is None, 'Usage error: mesos_node keyword will set port'
assert host is None, 'Usage error: mesos_node keyword will set host'
if mesos_node == 'master':
port = 5050
host = self.masters[0]
elif mesos_node == 'agent':
port = 5051
host = self.slaves[0]
else:
raise AssertionError('Mesos node type not recognized: {}'.format(mesos_node))
return super().api_request(method, path_extension, scheme=scheme, host=host, query=query,
fragment=fragment, port=port, **kwargs)
class EnterpriseUser(dcos_api_session.DcosUser):
def __init__(self, uid: str, password: str):
self.uid = uid
self.password = password
super().__init__(self.auth_json)
@property
def auth_json(self):
return {'uid': self.uid, 'password': self.password}
class EnterpriseApiSession(MesosNodeClientMixin, dcos_api_session.DcosApiSession):
@property
def iam(self):
return iam.Iam(self.default_url.copy(path='acs/api/v1'), session=self.copy().session)
@property
def secrets(self):
new = self.copy()
new.default_url = self.default_url.copy(path='secrets/v1')
return new
@property
def ca(self):
new = self.copy()
new.default_url = self.default_url.copy(path='ca/api/v2')
return new
@staticmethod
def get_args_from_env():
assert 'DCOS_LOGIN_UNAME' in os.environ, 'DCOS_LOGIN_UNAME must be set to login!'
assert 'DCOS_LOGIN_PW' in os.environ, 'DCOS_LOGIN_PW must be set!'
uid = os.environ['DCOS_LOGIN_UNAME']
password = os.environ['DCOS_LOGIN_PW']
args = dcos_api_session.DcosApiSession.get_args_from_env()
args['auth_user'] = EnterpriseUser(uid, password)
return args
def set_ca_cert(self):
log.info('Attempt to get CA bundle via CA HTTP API')
r = self.post('ca/api/v2/info', json={'profile': ''}, verify=False)
r.raise_for_status()
crt = r.json()['result']['certificate']
self.session.verify = helpers.session_tempfile(crt.encode())
def set_initial_resource_ids(self):
self.initial_resource_ids = []
r = self.iam.get('/acls')
for o in r.json()['array']:
self.initial_resource_ids.append(o['rid'])
| import logging
import os
from dcos_test_utils import dcos_api_session, helpers, iam
log = logging.getLogger(__name__)
class MesosNodeClientMixin:
""" This Mixin allows any request to be made against a master or agent
mesos HTTP port by providing the keyword 'mesos_node'. Thus, the user
does not have to specify the master/agent port or which arbitrary host
in the cluster meeting that role
"""
def api_request(self, method, path_extension, *, scheme=None, host=None, query=None,
fragment=None, port=None, mesos_node=None, **kwargs):
if mesos_node is not None:
assert port is None, 'Usage error: mesos_node keyword will set port'
assert host is None, 'Usage error: mesos_node keyword will set host'
if mesos_node == 'master':
port = 5050
host = self.masters[0]
elif mesos_node == 'agent':
port = 5051
host = self.slaves[0]
else:
raise AssertionError('Mesos node type not recognized: {}'.format(mesos_node))
return super().api_request(method, path_extension, scheme=scheme, host=host, query=query,
fragment=fragment, port=port, **kwargs)
class EnterpriseUser(dcos_api_session.DcosUser):
def __init__(self, uid: str, password: str):
self.uid = uid
self.password = password
super().__init__(self.auth_json)
@property
def auth_json(self):
return {'uid': self.uid, 'password': self.password}
class EnterpriseApiSession(MesosNodeClientMixin, dcos_api_session.DcosApiSession):
@property
def iam(self):
return iam.Iam(self.default_url.copy(path='acs/api/v1'), session=self.copy().session)
@property
def secrets(self):
new = self.copy()
new.default_url = self.default_url.copy(path='secrets/v1')
return new
@property
def ca(self):
new = self.copy()
new.default_url = self.default_url.copy(path='ca/api/v2')
return new
@staticmethod
def get_args_from_env():
assert 'DCOS_LOGIN_UNAME' in os.environ, 'DCOS_LOGIN_UNAME must be set to login!'
assert 'DCOS_LOGIN_PW' in os.environ, 'DCOS_LOGIN_PW must be set!'
uid = os.environ['DCOS_LOGIN_UNAME']
password = os.environ['DCOS_LOGIN_PW']
args = super(dcos_api_session.DcosApiSession).get_args_from_env()
args['auth_user'] = EnterpriseUser(uid, password)
return args
def set_ca_cert(self):
log.info('Attempt to get CA bundle via CA HTTP API')
r = self.post('ca/api/v2/info', json={'profile': ''}, verify=False)
r.raise_for_status()
crt = r.json()['result']['certificate']
self.session.verify = helpers.session_tempfile(crt.encode())
def set_initial_resrouce_ids(self):
self.initial_resource_ids = []
r = self.iam.get('/acls')
for o in r.json()['array']:
self.initial_resource_ids.append(o['rid'])
| Python | 0.000143 |
6e2515f4db3b6b9913e252cd52237574002637f2 | Add missing user_id in revoke_certs_by_user_and_project() | nova/cert/manager.py | nova/cert/manager.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from nova import crypto
from nova import flags
from nova import manager
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CertManager(manager.Manager):
RPC_API_VERSION = '1.0'
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project"""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project"""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project"""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from nova import crypto
from nova import flags
from nova import manager
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CertManager(manager.Manager):
RPC_API_VERSION = '1.0'
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project"""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project"""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project"""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
| Python | 0.999994 |
b45ce22e0d688e5c2a9a56f5eb87744cea87a263 | Fix scimath.power for negative integer input. | numpy/lib/scimath.py | numpy/lib/scimath.py | """
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
"""
__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
'arcsin', 'arctanh']
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
#__all__.extend([key for key in dir(nx.umath)
# if key[0] != '_' and key not in __all__])
_ln2 = nx.log(2.0)
def _tocomplex(arr):
if isinstance(arr.dtype, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x<0)):
x = _tocomplex(x)
return x
def _fix_int_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = x * 1.0
return x
def _fix_real_abs_gt_1(x):
x = asarray(x)
if any(isreal(x) & (abs(x)>1)):
x = _tocomplex(x)
return x
def sqrt(x):
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
def log10(x):
x = _fix_real_lt_zero(x)
return nx.log10(x)
def logn(n, x):
""" Take log base n of x.
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
def log2(x):
""" Take log base 2 of x.
"""
x = _fix_real_lt_zero(x)
return nx.log(x)/_ln2
def power(x, p):
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
def arccos(x):
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
def arcsin(x):
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
def arctanh(x):
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
| """
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
"""
__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
'arcsin', 'arctanh']
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
#__all__.extend([key for key in dir(nx.umath)
# if key[0] != '_' and key not in __all__])
_ln2 = nx.log(2.0)
def _tocomplex(arr):
if isinstance(arr.dtype, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x<0)):
x = _tocomplex(x)
return x
def _fix_real_abs_gt_1(x):
x = asarray(x)
if any(isreal(x) & (abs(x)>1)):
x = _tocomplex(x)
return x
def sqrt(x):
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
def log10(x):
x = _fix_real_lt_zero(x)
return nx.log10(x)
def logn(n, x):
""" Take log base n of x.
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
def log2(x):
""" Take log base 2 of x.
"""
x = _fix_real_lt_zero(x)
return nx.log(x)/_ln2
def power(x, p):
x = _fix_real_lt_zero(x)
return nx.power(x, p)
def arccos(x):
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
def arcsin(x):
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
def arctanh(x):
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
| Python | 0.999999 |
3c299bf2682a9b8d5be2c9c8f308720182935d12 | Add missing username to log statement | accounts/tasks.py | accounts/tasks.py | import logging
from celery import task
from django.db import IntegrityError
from django.utils.text import slugify
import games.models
from accounts.models import User
from emails.messages import send_daily_mod_mail
from games.util.steam import create_game
LOGGER = logging.getLogger()
@task
def sync_steam_library(user_id):
user = User.objects.get(pk=user_id)
steamid = user.steamid
library = games.models.GameLibrary.objects.get(user=user)
steam_games = games.util.steam.steam_sync(steamid)
if not steam_games:
LOGGER.info("Steam user %s has no steam games", user.username)
return
for game in steam_games:
LOGGER.info("Adding %s to %s's library", game['name'], user.username)
if not game['img_icon_url']:
LOGGER.info("Game %s has no icon", game['name'])
continue
try:
steam_game = games.models.Game.objects.get(steamid=game['appid'])
except games.models.Game.MultipleObjectsReturned:
LOGGER.error("Multiple games with appid '%s'", game['appid'])
continue
except games.models.Game.DoesNotExist:
LOGGER.info("No game with steam id %s", game['appid'])
try:
steam_game = games.models.Game.objects.get(
slug=slugify(game['name'])[:50]
)
if not steam_game.steamid:
steam_game.steamid = game['appid']
steam_game.save()
except games.models.Game.DoesNotExist:
steam_game = create_game(game)
LOGGER.info("Creating game %s", steam_game.slug)
try:
library.games.add(steam_game)
except IntegrityError:
# Game somehow already added.
pass
@task
def daily_mod_mail():
send_daily_mod_mail()
| import logging
from celery import task
from django.db import IntegrityError
from django.utils.text import slugify
import games.models
from accounts.models import User
from emails.messages import send_daily_mod_mail
from games.util.steam import create_game
LOGGER = logging.getLogger()
@task
def sync_steam_library(user_id):
user = User.objects.get(pk=user_id)
steamid = user.steamid
library = games.models.GameLibrary.objects.get(user=user)
steam_games = games.util.steam.steam_sync(steamid)
if not steam_games:
LOGGER.info("Steam user %s has no steam games")
return
for game in steam_games:
LOGGER.info("Adding %s to %s's library", game['name'], user.username)
if not game['img_icon_url']:
LOGGER.info("Game %s has no icon", game['name'])
continue
try:
steam_game = games.models.Game.objects.get(steamid=game['appid'])
except games.models.Game.MultipleObjectsReturned:
LOGGER.error("Multiple games with appid '%s'", game['appid'])
continue
except games.models.Game.DoesNotExist:
LOGGER.info("No game with steam id %s", game['appid'])
try:
steam_game = games.models.Game.objects.get(
slug=slugify(game['name'])[:50]
)
if not steam_game.steamid:
steam_game.steamid = game['appid']
steam_game.save()
except games.models.Game.DoesNotExist:
steam_game = create_game(game)
LOGGER.info("Creating game %s", steam_game.slug)
try:
library.games.add(steam_game)
except IntegrityError:
# Game somehow already added.
pass
@task
def daily_mod_mail():
send_daily_mod_mail()
| Python | 0.000006 |
3b6d5fd80eb4d95679b969e8809b154d6254de8d | Replace get_user_profile_by_email with get_user. | zerver/management/commands/bankrupt_users.py | zerver/management/commands/bankrupt_users.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import CommandError
from zerver.lib.actions import do_update_message_flags
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message
class Command(ZulipBaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
for email in options['emails']:
try:
user_profile = self.get_user(email, realm)
except CommandError:
print("e-mail %s doesn't exist in the realm %s, skipping" % (email, realm))
continue
do_update_message_flags(user_profile, "add", "read", None, True, None, None)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
| from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_update_message_flags
from zerver.models import UserProfile, Message, get_user_profile_by_email
class Command(BaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
def handle(self, *args, **options):
# type: (*Any, **str) -> None
for email in options['emails']:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
print("e-mail %s doesn't exist in the system, skipping" % (email,))
continue
do_update_message_flags(user_profile, "add", "read", None, True, None, None)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
| Python | 0.000007 |
3135bda8970a2fdefa92b932c15cf5c559392c9c | allow to specify db session callable directly | ziggurat_foundations/ext/pyramid/get_user.py | ziggurat_foundations/ext/pyramid/get_user.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import logging
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services.user import UserService
CONFIG_KEY = "ziggurat_foundations"
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings
session_provider_callable_config = settings.get(
"%s.session_provider_callable" % CONFIG_KEY
)
if not session_provider_callable_config:
def session_provider_callable(request):
return get_db_session()
test_session_callable = None
else:
if callable(session_provider_callable_config):
session_provider_callable = session_provider_callable_config
else:
parts = session_provider_callable_config.split(":")
_tmp = importlib.import_module(parts[0])
session_provider_callable = getattr(_tmp, parts[1])
test_session_callable = "session exists"
# This function is bundled into the request, so for each request you can
# do request.user
def get_user(request):
userid = request.unauthenticated_userid
if test_session_callable is None:
# set db_session to none to pass to the UserModel.by_id
db_session = None
else:
# Else assign the request.session
db_session = session_provider_callable(request)
if userid is not None:
return UserService.by_id(userid, db_session=db_session)
# add in request.user function
config.add_request_method(get_user, "user", reify=True, property=True)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import logging
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services.user import UserService
CONFIG_KEY = "ziggurat_foundations"
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings
session_provider_callable_config = settings.get(
"%s.session_provider_callable" % CONFIG_KEY
)
if not session_provider_callable_config:
def session_provider_callable(request):
return get_db_session()
test_session_callable = None
else:
parts = session_provider_callable_config.split(":")
_tmp = importlib.import_module(parts[0])
session_provider_callable = getattr(_tmp, parts[1])
test_session_callable = "session exists"
# This function is bundled into the request, so for each request you can
# do request.user
def get_user(request):
userid = request.unauthenticated_userid
if test_session_callable is None:
# set db_session to none to pass to the UserModel.by_id
db_session = None
else:
# Else assign the request.session
db_session = session_provider_callable(request)
if userid is not None:
return UserService.by_id(userid, db_session=db_session)
# add in request.user function
config.add_request_method(get_user, "user", reify=True, property=True)
| Python | 0 |
07def114287bc3488e76e2516ca7682954ba4a09 | Use default alphabet | APITaxi/extensions.py | APITaxi/extensions.py | #coding: utf-8
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
from sqlalchemy.pool import QueuePool as BaseQueuePool
class SQLAlchemy(BaseSQLAlchemy):
def apply_driver_hacks(self, app, info, options):
BaseSQLAlchemy.apply_driver_hacks(self, app, info, options)
class QueuePool(BaseQueuePool):
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw):
kw['use_threadlocal'] = True
BaseQueuePool.__init__(self, creator, pool_size, max_overflow, timeout, **kw)
options.setdefault('poolclass', QueuePool)
db = SQLAlchemy(session_options={"autoflush":False})
from .utils.redis_geo import GeoRedis
from flask.ext.redis import FlaskRedis
redis_store = FlaskRedis.from_custom_provider(GeoRedis)
from flask.ext.celery import Celery
celery = Celery()
from dogpile.cache import make_region
region_taxi = make_region('taxis')
region_hails = make_region('hails')
region_zupc = make_region('zupc')
def user_key_generator(namespace, fn, **kw):
def generate_key(*args, **kwargs):
return fn.__name__ +\
"_".join(str(s) for s in args) +\
"_".join(k+"_"+str(v) for k,v in kwargs.iteritems())
return generate_key
region_users = make_region('users', function_key_generator=user_key_generator)
from flask.ext.uploads import (UploadSet, configure_uploads,
DOCUMENTS, DATA, ARCHIVES, IMAGES)
documents = UploadSet('documents', DOCUMENTS + DATA + ARCHIVES)
images = UploadSet('images', IMAGES)
from .index_zupc import IndexZUPC
index_zupc = IndexZUPC()
from .utils.cache_user_datastore import CacheUserDatastore
from .models import security
user_datastore = CacheUserDatastore(db, security.User,
security.Role)
import shortuuid
suid = shortuuid.ShortUUID()
def get_short_uuid():
return suid.uuid()[:7]
| #coding: utf-8
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
from sqlalchemy.pool import QueuePool as BaseQueuePool
class SQLAlchemy(BaseSQLAlchemy):
def apply_driver_hacks(self, app, info, options):
BaseSQLAlchemy.apply_driver_hacks(self, app, info, options)
class QueuePool(BaseQueuePool):
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw):
kw['use_threadlocal'] = True
BaseQueuePool.__init__(self, creator, pool_size, max_overflow, timeout, **kw)
options.setdefault('poolclass', QueuePool)
db = SQLAlchemy(session_options={"autoflush":False})
from .utils.redis_geo import GeoRedis
from flask.ext.redis import FlaskRedis
redis_store = FlaskRedis.from_custom_provider(GeoRedis)
from flask.ext.celery import Celery
celery = Celery()
from dogpile.cache import make_region
region_taxi = make_region('taxis')
region_hails = make_region('hails')
region_zupc = make_region('zupc')
def user_key_generator(namespace, fn, **kw):
def generate_key(*args, **kwargs):
return fn.__name__ +\
"_".join(str(s) for s in args) +\
"_".join(k+"_"+str(v) for k,v in kwargs.iteritems())
return generate_key
region_users = make_region('users', function_key_generator=user_key_generator)
from flask.ext.uploads import (UploadSet, configure_uploads,
DOCUMENTS, DATA, ARCHIVES, IMAGES)
documents = UploadSet('documents', DOCUMENTS + DATA + ARCHIVES)
images = UploadSet('images', IMAGES)
from .index_zupc import IndexZUPC
index_zupc = IndexZUPC()
from .utils.cache_user_datastore import CacheUserDatastore
from .models import security
user_datastore = CacheUserDatastore(db, security.User,
security.Role)
import shortuuid
suid = shortuuid.ShortUUID(alphabet=
'0123456789abcdefghijklmnopqrstuvwxyzABDEFGHIJKLOMNOPQRSTUVWXYZ')
def get_short_uuid():
return suid.uuid()[:7]
| Python | 0.000652 |
9f500668555292add5d87c942e0cd804aefa6df2 | Replace cat usage for fgrep | fuel_health/tests/cloudvalidation/test_disk_space_db.py | fuel_health/tests/cloudvalidation/test_disk_space_db.py | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_health import cloudvalidation
class DBSpaceTest(cloudvalidation.CloudValidationTest):
"""Cloud Validation Test class for free space for DB."""
def _check_db_disk_expectation_warning(self, host):
"""Checks whether DB expects less free space than actually
is presented on the controller node
"""
scheduler_log = 'nova-scheduler.log'
if self.config.compute.deployment_os.lower() == 'centos':
scheduler_log = 'scheduler.log'
err_msg = "Cannot check {scheduler_log} at {host}".format(
host=host, scheduler_log=scheduler_log)
warning_msg = "Host has more disk space than database expected"
cmd = "fgrep '{msg}' -q /var/log/nova/{scheduler_log}".format(
msg=warning_msg, scheduler_log=scheduler_log)
out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg,
'check nova-scheduler.log', host, cmd)
self.verify_response_true(not err, err_msg, 1)
return out
def test_db_expectation_free_space(self):
"""Check disk space allocation for databases on controller nodes
Target component: Nova
Scenario:
1. Check disk space allocation for databases on controller nodes
Duration: 20 s.
Deployment tags: disabled
Available since release: 2014.2-6.1
"""
hosts = filter(self._check_db_disk_expectation_warning,
self.controllers)
self.verify_response_true(not hosts,
("Free disk space cannot be used "
"by database on node(s): {hosts}"
).format(hosts=hosts),
1)
| # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_health import cloudvalidation
class DBSpaceTest(cloudvalidation.CloudValidationTest):
"""Cloud Validation Test class for free space for DB."""
def _check_db_disk_expectation_warning(self, host):
"""Checks whether DB expects less free space than actually
is presented on the controller node
"""
scheduler_log = 'nova-scheduler.log'
if self.config.compute.deployment_os.lower() == 'centos':
scheduler_log = 'scheduler.log'
err_msg = "Cannot check {scheduler_log} at {host}".format(
host=host, scheduler_log=scheduler_log)
warning_msg = "Host has more disk space than database expected"
cmd = ("cat /var/log/nova/{scheduler_log} "
"| grep '{msg}' | tail -1").format(
msg=warning_msg, scheduler_log=scheduler_log)
out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg,
'check nova-scheduler.log', host, cmd)
self.verify_response_true(not err, err_msg, 1)
return out
def test_db_expectation_free_space(self):
"""Check disk space allocation for databases on controller nodes
Target component: Nova
Scenario:
1. Check disk space allocation for databases on controller nodes
Duration: 20 s.
Deployment tags: disabled
Available since release: 2014.2-6.1
"""
hosts = filter(self._check_db_disk_expectation_warning,
self.controllers)
self.verify_response_true(not hosts,
("Free disk space cannot be used "
"by database on node(s): {hosts}"
).format(hosts=hosts),
1)
| Python | 0.000003 |
2284f9f944ef72c7e2f6c9a4e93e395b09196719 | modify initial config | golive/management/commands/create_config.py | golive/management/commands/create_config.py | from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory, Stack
import yaml
class Command(BaseCommand):
help = 'Creates a basic exampe configuration file'
output['stdout'] = False
example = """CONFIG:
PLATFORM: DEDICATED
STACK: CLASSIC
ENVIRONMENTS:
DEFAULTS:
INIT_USER: root
PROJECT_NAME: djangoproject
PUBKEY: $HOME/.ssh/id_dsa.pub
TESTING:
SERVERNAME: testserver
ROLES:
APP_HOST:
- testserver
DB_HOST:
- testserver
WEB_HOST:
- testserver"""
def handle(self, *args, **options):
example_file = open(Stack.CONFIG, 'w')
example_file.write(Command.example)
example_file.close()
def end(self):
self.stdout.write('Done\n')
| from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory, Stack
import yaml
class Command(BaseCommand):
help = 'Creates a basic exampe configuration file'
output['stdout'] = False
example = """CONFIG:
PLATFORM: DEDICATED
STACK: CLASSIC
ENVIRONMENTS:
DEFAULTS:
INIT_USER: fatrix
PROJECT_NAME: django_example
PUBKEY: $HOME/user.pub
TESTING:
SERVERNAME: golive-sandbox1
ROLES:
APP_HOST:
- testbox1
DB_HOST:
- testbox1
WEB_HOST:
- testbox1"""
def handle(self, *args, **options):
example_file = open(Stack.CONFIG, 'w')
example_file.write(Command.example)
example_file.close()
def end(self):
self.stdout.write('Done\n')
| Python | 0.000002 |
bc85dffa594c292094d2aa1f5a456e0a0690ea79 | Remove debug code | grumpy-tools-src/tests/test_grumpy_tools.py | grumpy-tools-src/tests/test_grumpy_tools.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `grumpy_tools` package."""
import tempfile
import unittest
import pytest
from click.testing import CliRunner
from grumpy_tools import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
@pytest.mark.xfail
def test_command_line_interface(capfd):
"""Test the CLI."""
runner = CliRunner()
out, err = capfd.readouterr()
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '>>> ' in out, (result.output, out, err)
def test_run_input_inline(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run', '-c', "print('Hello World')",])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_stdin(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run'], input="print('Hello World')")
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_file(capfd):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as script_file:
script_file.write("print('Hello World')")
script_file.flush()
result = runner.invoke(cli.main, ['run', script_file.name])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `grumpy_tools` package."""
import tempfile
import unittest
import pytest
from click.testing import CliRunner
from grumpy_tools import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
@pytest.mark.xfail
def test_command_line_interface(capfd):
"""Test the CLI."""
runner = CliRunner()
out, err = capfd.readouterr()
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '>>> ' in out, (result.output, out, err)
def test_run_input_inline(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run', '-c', "print('Hello World')",])
# import wdb; wdb.set_trace()
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_stdin(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run'], input="print('Hello World')")
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_file(capfd):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as script_file:
script_file.write("print('Hello World')")
script_file.flush()
result = runner.invoke(cli.main, ['run', script_file.name])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
| Python | 0.000299 |
dc9c5021c022108fd9ca2c87e9064b385abd26cf | Fix style | didyoumean/readme_examples.py | didyoumean/readme_examples.py | # -*- coding: utf-8
"""Code to generate examples in README.md."""
from didyoumean import add_suggestions_to_exception
import sys
def get_exception(code):
"""Helper function to run code and get what it throws."""
try:
exec(code)
except:
return sys.exc_info()
assert False
def main():
"""Main."""
# Different examples :
# Code examples are groupes by error type then by suggestion type
# Numbers have been added in dict keys just to be able to iterate
# over them and have the result in the wanted order.
examples = {
(1, NameError): {
(1, "Fuzzy matches on existing names "
"(local, builtin, keywords, modules, etc)"): [
"def my_func(foo, bar):\n\treturn foob\nmy_func(1, 2)",
"def my_func(lst):\n\treturn leng(foo)\nmy_func([0])",
"import math\nmaths.pi",
"def my_func():\n\tpasss\nmy_func()",
"def my_func():\n\tfoo = 1\n\tfoob +=1\nmy_func()"
],
(2, "Checking if name is the attribute of a defined object"): [
"class Duck():\n\tdef __init__(self):\n\t\tquack()"
"\n\tdef quack(self):\n\t\tpass\nd = Duck()",
"import math\npi",
],
(3, "Looking for missing imports"): [
"functools.wraps()",
],
},
(2, AttributeError): {
(1, "Fuzzy matches on existing attributes"): [
"lst = [1, 2, 3]\nlst.appendh(4)",
"import math\nmath.pie",
],
(2, "Detection of mis-used builtins"): [
"lst = [1, 2, 3]\nlst.max()",
],
(3, "Trying to find method with similar meaning (hardcoded)"): [
"lst = [1, 2, 3]\nlst.add(4)",
],
},
(3, ImportError): {
(1, "Fuzzy matches on existing modules"): [
"from maths import pi",
],
(2, "Fuzzy matches on elements of the module"): [
"from math import pie",
],
(3, "Looking for import from wrong module"): [
"from itertools import pi",
],
},
(4, TypeError): {
(1, "Fuzzy matches on keyword arguments"): [
"def my_func(abcde):\n\tpass\nmy_func(abcdf=1)",
],
},
(5, SyntaxError): {
(1, "Fuzzy matches when importing from __future__"): [
"from __future__ import divisio",
],
(2, "Various"): [
"return",
],
},
}
str_func = repr # could be str or repr
for (_, exc_type), exc_examples in sorted(examples.items()):
print("### %s\n" % exc_type.__name__)
for (_, desc), codes in sorted(exc_examples.items()):
print("##### %s\n" % desc)
for code in codes:
type_, value, traceback = get_exception(code)
assert issubclass(type_, exc_type)
before = str_func(value)
add_suggestions_to_exception(type_, value, traceback)
after = str_func(value)
assert before != after
print("""```python
%s
#>>> Before: %s
#>>> After: %s
```""" % (code, before, after))
if __name__ == '__main__':
main()
| # -*- coding: utf-8
"""Code to generate examples in README.md."""
from didyoumean import add_suggestions_to_exception
import sys
def get_exception(code):
"""Helper function to run code and get what it throws."""
try:
exec(code)
except:
return sys.exc_info()
assert False
def main():
"""Main."""
# Different examples :
# Code examples are groupes by error type then by suggestion type
# Numbers have been added in dict keys just to be able to iterate
# over them and have the result in the wanted order.
examples = {
(1, NameError): {
(1, "Fuzzy matches on existing names "
"(local, builtin, keywords, modules, etc)"): [
"def my_func(foo, bar):\n\treturn foob\nmy_func(1, 2)",
"def my_func(lst):\n\treturn leng(foo)\nmy_func([0])",
"import math\nmaths.pi",
"def my_func():\n\tpasss\nmy_func()",
"def my_func():\n\tfoo = 1\n\tfoob +=1\nmy_func()"
],
(2, "Checking if name is the attribute of a defined object"): [
"class Duck():\n\tdef __init__(self):\n\t\tquack()"
"\n\tdef quack(self):\n\t\tpass\nd = Duck()",
"import math\npi",
],
(3, "Looking for missing imports"): [
"functools.wraps()",
],
},
(2, AttributeError): {
(1, "Fuzzy matches on existing attributes"): [
"lst = [1, 2, 3]\nlst.appendh(4)",
"import math\nmath.pie",
],
(2, "Detection of mis-used builtins"): [
"lst = [1, 2, 3]\nlst.max()",
],
(3, "Trying to find method with similar meaning (hardcoded)"): [
"lst = [1, 2, 3]\nlst.add(4)",
],
},
(3, ImportError): {
(1, "Fuzzy matches on existing modules"): [
"from maths import pi",
],
(2, "Fuzzy matches on elements of the module"): [
"from math import pie",
],
(3, "Looking for import from wrong module"): [
"from itertools import pi",
],
},
(4, TypeError): {
(1, "Fuzzy matches on keyword arguments"): [
"def my_func(abcde):\n\tpass\nmy_func(abcdf=1)",
],
},
(5, SyntaxError): {
(1, "Fuzzy matches when importing from __future__"): [
"from __future__ import divisio",
],
(2, "Various"): [
"return",
],
},
}
str_func = repr # could be str or repr
for (_, exc_type), exc_examples in sorted(examples.items()):
print("### %s\n" % exc_type.__name__)
for (_, desc), codes in sorted(exc_examples.items()):
print("##### %s\n" % desc)
for code in codes:
type_, value, traceback = get_exception(code)
assert issubclass(type_, exc_type)
before = str_func(value)
add_suggestions_to_exception(type_, value, traceback)
after = str_func(value)
assert before != after
print("""```python
%s
#>>> Before: %s
#>>> After: %s
```""" % (code, before, after))
if __name__ == '__main__':
main()
| Python | 0.000001 |
f42fdde5404c3025236ad7dcade4b08529e7ce36 | repair Noneuser_bug | app/delete.py | app/delete.py | from .models import User
from . import db
def deletenone():
noneuser=User.query.filter_by(username=None).all()
for user in noneuser:
db.session.delete(user)
db.session.commit()
| from .models import User
from . import db
def deletenone():
noneuser=User.query.filter_by(username=None).all()
for user in noneuser:
db.session.delete(user)
db.session.commit()
| Python | 0.000003 |
9ac03fa54f0134905033f615f6e02804f704b1a0 | Add User and Items | app/models.py | app/models.py | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""This class represents the user table."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(256), nullable=False, unique=True)
user_password = db.Column(db.String(255), nullable=False)
bucketlists = db.relationship('BucketList', order_by="BucketList.id",
cascade="all,delete-orphan")
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
@property
def password(self):
raise AttributeError('You cannot access password')
@password.setter
def password(self):
self.user_password = generate_password_hash(self.password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Bucketlist(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
class BucketListItem(db.Model):
"""This class represents the bucketlist_item table"""
__tablename__ = 'bucketlistitems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(Bucketlist.id))
def __init__(self, name, bucketlist_id):
"""Initialize with name and bucketlist_id"""
self.name = name
self.bucketlist_id = bucketlist_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_items():
return BucketListItem.query.filter_by(bucketlist_id=Bucketlist.id)
def delete(self):
db.session.delete(self)
db.session.commit()
| from app import db
class Bucketlist(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
| Python | 0 |
c12df9f8f0c73577c122fc65bd11314b7231179c | Add type | test_runner/environments.py | test_runner/environments.py | import logging
import re
import sys
from glanceclient.v1.client import Client as glance_client
from keystoneclient.v2_0.client import Client as keystone_client
from neutronclient.v2_0.client import Client as neutron_client
from novaclient.v1_1 import client as nova_client
from .utils import rand_name
LOG = logging.getLogger(__name__)
CIRROS_URL='http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img'
class Environment(object):
def __init__(self, username, password, auth_url):
self.keystone = keystone_client(
username=username,
password=password,
tenant_name=username,
auth_url=auth_url)
self.endpoints = self.keystone.service_catalog.get_endpoints()
self.token = self.keystone.auth_ref['token']['id']
self.glance = glance_client(
endpoint=self.endpoints['image'][0]['internalURL'],
token=self.token)
self.neutron = neutron_client(
username=username,
password=password,
tenant_name=username,
auth_url=auth_url,
endpoint_type='internalURL')
self.nova = nova_client.Client(
username=username,
api_key=password,
project_id=username,
auth_url=auth_url,
endpoint_type='internalURL')
self.images = []
self.network = {}
self.router = {}
self.users = {}
self.admin = {'username': username, 'password': password}
def build(self):
self.create_guests()
self.get_images()
self.get_network()
self.get_router()
def destroy(self):
LOG.info('Destroying environment')
if self.guests: map(self.keystone.users.delete, self.guests)
if self.tenant: self.keystone.tenants.delete(self.tenant)
if self.role: self.keystone.roles.delete(self.role)
if self.images: self.glance.images.delete(self.images[0])
def create_guests(self, password='secrete'):
LOG.info('Creating guest users')
self.tenant = self.keystone.tenants.create(rand_name('guest'))
try:
roles = self.keystone.roles.list()
self.role = self._find_resource(roles, 'Member')
except:
self.role = self.keystone.roles.create('Member')
self.guests = []
for _ in range(2):
user = self.keystone.users.create(name=rand_name('guest'),
password=password,
tenant_id=self.tenant.id)
user.password = password
user.tenant_name = self.tenant.name
self.guests.append(user)
def get_images(self):
LOG.info('Fetching image metadata')
try:
filters = {'name': 'cirros'}
image = next(self.glance.images.list(filters=filters))
self.images = [image, image]
except StopIteration:
image = self.glance.images.create(
name='cirros',
disk_format='qcow2',
container_format='bare',
location=CIRROS_URL,
is_public='True')
self.images = [image, image]
@staticmethod
def _find_resource(resources, name):
if type(resources) is dict or list:
return next(resource for resource in resources
if name in resource['name'])
else:
return next(resource for resource in resources
if name in resource.name)
def get_network(self):
LOG.info('Fetching networks')
networks = self.neutron.list_networks()['networks']
self.network = self._find_resource(networks, 'private')
def get_router(self):
LOG.info('Fetching routers')
routers = self.neutron.list_routers()['routers']
self.router = self._find_resource(routers, 'public-private')
| import logging
import re
import sys
from glanceclient.v1.client import Client as glance_client
from keystoneclient.v2_0.client import Client as keystone_client
from neutronclient.v2_0.client import Client as neutron_client
from novaclient.v1_1 import client as nova_client
from .utils import rand_name
LOG = logging.getLogger(__name__)
CIRROS_URL='http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img'
class Environment(object):
def __init__(self, username, password, auth_url):
self.keystone = keystone_client(
username=username,
password=password,
tenant_name=username,
auth_url=auth_url)
self.endpoints = self.keystone.service_catalog.get_endpoints()
self.token = self.keystone.auth_ref['token']['id']
self.glance = glance_client(
endpoint=self.endpoints['image'][0]['internalURL'],
token=self.token)
self.neutron = neutron_client(
username=username,
password=password,
tenant_name=username,
auth_url=auth_url,
endpoint_type='internalURL')
self.nova = nova_client.Client(
username=username,
api_key=password,
project_id=username,
auth_url=auth_url,
endpoint_type='internalURL')
self.images = []
self.network = {}
self.router = {}
self.users = {}
self.admin = {'username': username, 'password': password}
def build(self):
self.create_guests()
self.get_images()
self.get_network()
self.get_router()
def destroy(self):
LOG.info('Destroying environment')
if self.guests: map(self.keystone.users.delete, self.guests)
if self.tenant: self.keystone.tenants.delete(self.tenant)
if self.role: self.keystone.roles.delete(self.role)
if self.images: self.glance.images.delete(self.images[0])
def create_guests(self, password='secrete'):
LOG.info('Creating guest users')
self.tenant = self.keystone.tenants.create(rand_name('guest'))
try:
roles = self.keystone.roles.list()
self.role = self._find_resource(roles, 'Member')
except:
self.role = self.keystone.roles.create('Member')
self.guests = []
for _ in range(2):
user = self.keystone.users.create(name=rand_name('guest'),
password=password,
tenant_id=self.tenant.id)
user.password = password
user.tenant_name = self.tenant.name
self.guests.append(user)
def get_images(self):
LOG.info('Fetching image metadata')
try:
filters = {'name': 'cirros'}
image = next(self.glance.images.list(filters=filters))
self.images = [image, image]
except StopIteration:
image = self.glance.images.create(
name='cirros',
disk_format='qcow2',
container_format='bare',
location=CIRROS_URL,
is_public='True')
self.images = [image, image]
@staticmethod
def _find_resource(resources, name):
if type(resources) is dict:
return next(resource for resource in resources
if name in resource['name'])
else:
return next(resource for resource in resources
if name in resource.name)
def get_network(self):
LOG.info('Fetching networks')
networks = self.neutron.list_networks()['networks']
self.network = self._find_resource(networks, 'private')
def get_router(self):
LOG.info('Fetching routers')
routers = self.neutron.list_routers()['routers']
self.router = self._find_resource(routers, 'public-private')
| Python | 0.000003 |
06d71ede1c1feaa597b442f4ead63d2b2e31e715 | fix `trigger` -> `__call__` | chainer/training/triggers/once_trigger.py | chainer/training/triggers/once_trigger.py | class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
def __call__(self, trainer):
flag = self._flag_first or self._flag_resumed
self._flag_resumed = False
self._flag_first = False
return flag
def serialize(self, serializer):
self._flag_first = serializer('_flag_first', self._flag_first)
| class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
def trigger(self, trainer):
flag = self._flag_first or self._flag_resumed
self._flag_resumed = False
self._flag_first = False
return flag
def serialize(self, serializer):
self._flag_first = serializer('_flag_first', self._flag_first)
| Python | 0.000003 |
ddec6067054cc4408ac174e3ea4ffeca2a962201 | Remove unnecessary assert from view for Notice home. | regulations/views/notice_home.py | regulations/views/notice_home.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import itemgetter
import logging
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.base import View
from regulations.generator.api_reader import ApiReader
from regulations.views.preamble import (
notice_data, CommentState)
logger = logging.getLogger(__name__)
class NoticeHomeView(View):
"""
Basic view that provides a list of regulations and notices to the context.
"""
template_name = None # We should probably have a default notice template.
def get(self, request, *args, **kwargs):
notices = ApiReader().notices().get("results", [])
context = {}
notices_meta = []
for notice in notices:
try:
if notice.get("document_number"):
_, meta, _ = notice_data(notice["document_number"])
notices_meta.append(meta)
except Http404:
pass
notices_meta = sorted(notices_meta, key=itemgetter("publication_date"),
reverse=True)
context["notices"] = notices_meta
# Django templates won't show contents of CommentState as an Enum, so:
context["comment_state"] = {state.name: state.value for state in
CommentState}
template = self.template_name
return TemplateResponse(request=request, template=template,
context=context)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import itemgetter
import logging
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.base import View
from regulations.generator.api_reader import ApiReader
from regulations.views.preamble import (
notice_data, CommentState)
logger = logging.getLogger(__name__)
class NoticeHomeView(View):
"""
Basic view that provides a list of regulations and notices to the context.
"""
template_name = None # We should probably have a default notice template.
def get(self, request, *args, **kwargs):
notices = ApiReader().notices().get("results", [])
context = {}
notices_meta = []
for notice in notices:
try:
if notice.get("document_number"):
_, meta, _ = notice_data(notice["document_number"])
notices_meta.append(meta)
except Http404:
pass
notices_meta = sorted(notices_meta, key=itemgetter("publication_date"),
reverse=True)
context["notices"] = notices_meta
# Django templates won't show contents of CommentState as an Enum, so:
context["comment_state"] = {state.name: state.value for state in
CommentState}
assert self.template_name
template = self.template_name
return TemplateResponse(request=request, template=template,
context=context)
| Python | 0 |
91946410f14b21e510a104b105a6f5036cc8944f | build updated | python/common/core/globalVariables.py | python/common/core/globalVariables.py | '''
Author: Jason Parks
Created: Apr 22, 2012
Module: common.core.globalVariables
Purpose: to import globalVariables
'''
# Location of Toolset
toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet'
# NOTE!: It is necessary to manually add the above location's
# python directory, i.e-
#
# PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python'
#
# to the PYTHONPATH environment variable on all user's
# machines whom want to use Pipeline Construction set
# Location of setup schema data file
schemaLocation = 'C:/Users/jason/remotePCS'
# "schemaLocation" should probably be in a shared folder on the network
# so all users can get updates the T.A. makes to the file called
# pcsSchema.xml in this location. You can find a sample version of
# this file in ./PipelineConstructionSet/schemas/pcsSchemaSample.xml
# Name your games here:
teamA = 'GreatGameA'
teamB = 'GreatGameB'
teamC = 'GreatGameC'
teamD = 'GreatGameD'
# You need to change the name of the file
# ./PipelineConstructionSet/schemas/GreatGameA.xml
# and the xml header info in the file as well
# If you are making tools for more than one team,
# you'll need to make more GreatGame*.xml files
# manually update to date/time
build = '050612-21.01'
# This will show up in the PythonEditor or ScriptEditor
# when our DCC app first launches the toolMenu.
print "common.core.globalVariables imported" | '''
Author: Jason Parks
Created: Apr 22, 2012
Module: common.core.globalVariables
Purpose: to import globalVariables
'''
# Location of Toolset
toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet'
# NOTE!: It is necessary to manually add the above location's
# python directory, i.e-
#
# PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python'
#
# to the PYTHONPATH environment variable on all user's
# machines whom want to use Pipeline Construction set
# Location of setup schema data file
schemaLocation = 'C:/Users/jason/remotePCS'
# "schemaLocation" should probably be in a shared folder on the network
# so all users can get updates the T.A. makes to the file called
# pcsSchema.xml in this location. You can find a sample version of
# this file in ./PipelineConstructionSet/schemas/pcsSchemaSample.xml
# Name your games here:
teamA = 'GreatGameA'
teamB = 'GreatGameB'
teamC = 'GreatGameC'
teamD = 'GreatGameD'
# You need to change the name of the file
# ./PipelineConstructionSet/schemas/GreatGameA.xml
# and the xml header info in the file as well
# If you are making tools for more than one team,
# you'll need to make more GreatGame*.xml files
# manually update to date/time
build = '042212-20.27'
# This will show up in the PythonEditor or ScriptEditor
# when our DCC app first launches the toolMenu.
print "common.core.globalVariables imported" | Python | 0 |
b0e91b820913c7b46d04f946267903d9785fc2ca | Fix test | experiments/tests/test_counter.py | experiments/tests/test_counter.py | from __future__ import absolute_import
from unittest import TestCase
from experiments import counters
from mock import patch
TEST_KEY = 'CounterTestCase'
class CounterTestCase(TestCase):
def setUp(self):
self.counters = counters.Counters()
self.counters.reset(TEST_KEY)
self.assertEqual(self.counters.get(TEST_KEY), 0)
def tearDown(self):
self.counters.reset(TEST_KEY)
def test_add_item(self):
self.counters.increment(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
def test_add_multiple_items(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'george')
self.counters.increment(TEST_KEY, 'george')
self.assertEqual(self.counters.get(TEST_KEY), 3)
def test_add_duplicate_item(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
def test_get_frequencies(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'george')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.assertEqual(self.counters.get_frequencies(TEST_KEY), {1: 3, 4: 1})
def test_delete_key(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.reset(TEST_KEY)
self.assertEqual(self.counters.get(TEST_KEY), 0)
def test_clear_value(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'barney')
self.counters.clear(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
self.assertEqual(self.counters.get_frequencies(TEST_KEY), {2: 1})
@patch('experiments.counters.Counters._redis')
def test_should_return_tuple_if_failing(self, patched__redis):
patched__redis.side_effect = Exception
self.assertEqual(self.counters.get_frequencies(TEST_KEY), dict())
| from __future__ import absolute_import
from unittest import TestCase
from experiments import counters
from mock import patch
TEST_KEY = 'CounterTestCase'
class CounterTestCase(TestCase):
def setUp(self):
self.counters = counters.Counters()
self.counters.reset(TEST_KEY)
self.assertEqual(self.counters.get(TEST_KEY), 0)
def tearDown(self):
self.counters.reset(TEST_KEY)
def test_add_item(self):
self.counters.increment(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
def test_add_multiple_items(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'george')
self.counters.increment(TEST_KEY, 'george')
self.assertEqual(self.counters.get(TEST_KEY), 3)
def test_add_duplicate_item(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
def test_get_frequencies(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'george')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.counters.increment(TEST_KEY, 'roger')
self.assertEqual(self.counters.get_frequencies(TEST_KEY), {1: 3, 4: 1})
def test_delete_key(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.reset(TEST_KEY)
self.assertEqual(self.counters.get(TEST_KEY), 0)
def test_clear_value(self):
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'fred')
self.counters.increment(TEST_KEY, 'barney')
self.counters.increment(TEST_KEY, 'barney')
self.counters.clear(TEST_KEY, 'fred')
self.assertEqual(self.counters.get(TEST_KEY), 1)
self.assertEqual(self.counters.get_frequencies(TEST_KEY), {2: 1})
@patch('experiments.counters.Counter._redis')
def test_should_return_tuple_if_failing(self, patched__redis):
patched__redis.side_effect = Exception
self.assertEqual(self.counters.get_frequencies(TEST_KEY), dict())
| Python | 0.000004 |
c573263511bcbf0ffe37f538142aedd9064f8ae0 | Remove copying devdata.env as it's only used for the Google API key we've removed | bin/devdata.py | bin/devdata.py | """Download .devdata from github.com:hypothesis/devdata.git."""
import os
from pathlib import Path
from shutil import copyfile
from subprocess import check_call
from tempfile import TemporaryDirectory
def _get_devdata():
# The directory that we'll clone the devdata git repo into.
with TemporaryDirectory() as tmp_dir_name:
git_dir = os.path.join(tmp_dir_name, "devdata")
check_call(["git", "clone", "git@github.com:hypothesis/devdata.git", git_dir])
# Copy devdata env file into place.
for source, target in (
(
"via/devdata/google_drive_credentials.json",
".devdata/google_drive_credentials.json",
),
(
"via/devdata/google_drive_resource_keys.json",
".devdata/google_drive_resource_keys.json",
),
):
copyfile(
os.path.join(git_dir, source),
os.path.join(Path(__file__).parent.parent, target),
)
if __name__ == "__main__":
_get_devdata()
| """Download .devdata.env from github.com:hypothesis/devdata.git."""
import os
from pathlib import Path
from shutil import copyfile
from subprocess import check_call
from tempfile import TemporaryDirectory
def _get_devdata():
# The directory that we'll clone the devdata git repo into.
with TemporaryDirectory() as tmp_dir_name:
git_dir = os.path.join(tmp_dir_name, "devdata")
check_call(["git", "clone", "git@github.com:hypothesis/devdata.git", git_dir])
# Copy devdata env file into place.
for source, target in (
("via/devdata.env", ".devdata.env"),
(
"via/devdata/google_drive_credentials.json",
".devdata/google_drive_credentials.json",
),
(
"via/devdata/google_drive_resource_keys.json",
".devdata/google_drive_resource_keys.json",
),
):
copyfile(
os.path.join(git_dir, source),
os.path.join(Path(__file__).parent.parent, target),
)
if __name__ == "__main__":
_get_devdata()
| Python | 0 |
81b5961cdf4b9ca7e20920eda3c7f76f96a35a9b | Bump version | filer/__init__.py | filer/__init__.py | #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.105' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.105.dev1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| Python | 0 |
eef9d75a7d019a397d2026612ece76d217747e5b | mark oddity | src/zeit/edit/browser/tests/test_form.py | src/zeit/edit/browser/tests/test_form.py | # Copyright (c) 2012 gocept gmbh & co. kg
# See also LICENSE.txt
from mock import Mock
import zeit.cms.testing
import zeit.edit.browser.form
import zope.formlib.form
import zope.interface
import zope.publisher.browser
import zope.schema
class IExample(zope.interface.Interface):
foo = zope.schema.TextLine(title=u'foo')
class InlineForm(zeit.cms.testing.FunctionalTestCase):
# XXX This test should be moved to zeit.cms.browser, but it seems nearly
# impossible to instantiate an EditForm, so we punt on this for now;
# InlineForms are friendlier (since they don't pull in the
# main_template.pt)
def render_form(self, form_class):
ANY_CONTEXT = Mock()
zope.interface.alsoProvides(ANY_CONTEXT, IExample)
request = zope.publisher.browser.TestRequest()
form = form_class(ANY_CONTEXT, request)
return form()
def test_css_class_on_widget_is_rendered_to_html(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
def setUpWidgets(self):
super(ExampleForm, self).setUpWidgets()
self.widgets['foo'].vivi_css_class = 'barbaz qux'
self.assertEllipsis("""\
...<div class="field fieldname-foo required barbaz qux">
<div class="label">...""", self.render_form(ExampleForm))
def test_widget_without_css_class_does_not_break(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
self.assertEllipsis("""\
...<div class="field fieldname-foo required">
<div class="label">...""", self.render_form(ExampleForm))
| # Copyright (c) 2012 gocept gmbh & co. kg
# See also LICENSE.txt
from mock import Mock
import zeit.cms.testing
import zeit.edit.browser.form
import zope.formlib.form
import zope.interface
import zope.publisher.browser
import zope.schema
class IExample(zope.interface.Interface):
foo = zope.schema.TextLine(title=u'foo')
class InlineForm(zeit.cms.testing.FunctionalTestCase):
def render_form(self, form_class):
ANY_CONTEXT = Mock()
zope.interface.alsoProvides(ANY_CONTEXT, IExample)
request = zope.publisher.browser.TestRequest()
form = form_class(ANY_CONTEXT, request)
return form()
def test_css_class_on_widget_is_rendered_to_html(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
def setUpWidgets(self):
super(ExampleForm, self).setUpWidgets()
self.widgets['foo'].vivi_css_class = 'barbaz qux'
self.assertEllipsis("""\
...<div class="field fieldname-foo required barbaz qux">
<div class="label">...""", self.render_form(ExampleForm))
def test_widget_without_css_class_does_not_break(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
self.assertEllipsis("""\
...<div class="field fieldname-foo required">
<div class="label">...""", self.render_form(ExampleForm))
| Python | 0.00151 |
1b94e5564b7940139e56310f18c58999f0c598b2 | validate by casting | filestore/core.py | filestore/core.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from document import Document
from jsonschema import validate as js_validate
from bson import ObjectId
class DatumNotFound(Exception):
pass
def get_datum(col, eid, _DATUM_CACHE, get_spec_handler, logger):
try:
datum = _DATUM_CACHE[eid]
except KeyError:
keys = ['datum_kwargs', 'resource']
# find the current document
edoc = col.find_one({'datum_id': eid})
if edoc is None:
raise DatumNotFound(
"No datum found with datum_id {!r}".format(eid))
# save it for later
datum = {k: edoc[k] for k in keys}
res = edoc['resource']
count = 0
for dd in col.find({'resource': res}):
count += 1
d_id = dd['datum_id']
if d_id not in _DATUM_CACHE:
_DATUM_CACHE[d_id] = {k: dd[k] for k in keys}
if count > _DATUM_CACHE.max_size:
logger.warn("More datum in a resource than your "
"datum cache can hold.")
handler = get_spec_handler(datum['resource'])
return handler(**datum['datum_kwargs'])
def bulk_insert_datum(col, resource, datum_ids, datum_kwarg_list):
resource_id = ObjectId(resource['id'])
def datum_factory():
for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):
datum = dict(resource=resource_id,
datum_id=str(d_id),
datum_kwargs=dict(d_kwargs))
yield datum
bulk = col.initialize_ordered_bulk_op()
for dm in datum_factory():
bulk.insert(dm)
return bulk.execute()
def insert_datum(col, resource, datum_id, datum_kwargs, known_spec):
spec = resource['spec']
if spec in known_spec:
js_validate(datum_kwargs, known_spec[spec]['datum'])
datum = dict(resource=ObjectId(resource['id']),
datum_id=str(datum_id),
datum_kwargs=dict(datum_kwargs))
col.insert_one(datum)
# do not leak mongo objectID
datum.pop('_id', None)
return Document('datum', datum)
def insert_resource(col, spec, resource_path, resource_kwargs,
known_spec):
resource_kwargs = dict(resource_kwargs)
if spec in known_spec:
js_validate(resource_kwargs, known_spec[spec]['resource'])
resource_object = dict(spec=str(spec),
resource_path=str(resource_path),
resource_kwargs=resource_kwargs)
col.insert_one(resource_object)
# rename to play nice with ME
resource_object['id'] = resource_object.pop('_id')
return resource_object
| from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from document import Document
from jsonschema import validate as js_validate
class DatumNotFound(Exception):
pass
def get_datum(col, eid, _DATUM_CACHE, get_spec_handler, logger):
try:
datum = _DATUM_CACHE[eid]
except KeyError:
keys = ['datum_kwargs', 'resource']
# find the current document
edoc = col.find_one({'datum_id': eid})
if edoc is None:
raise DatumNotFound(
"No datum found with datum_id {!r}".format(eid))
# save it for later
datum = {k: edoc[k] for k in keys}
res = edoc['resource']
count = 0
for dd in col.find({'resource': res}):
count += 1
d_id = dd['datum_id']
if d_id not in _DATUM_CACHE:
_DATUM_CACHE[d_id] = {k: dd[k] for k in keys}
if count > _DATUM_CACHE.max_size:
logger.warn("More datum in a resource than your "
"datum cache can hold.")
handler = get_spec_handler(datum['resource'])
return handler(**datum['datum_kwargs'])
def bulk_insert_datum(col, resource, datum_ids, datum_kwarg_list):
resource_id = resource['id']
def datum_factory():
for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):
datum = dict(resource=resource_id,
datum_id=d_id,
datum_kwargs=d_kwargs)
yield datum
bulk = col.initialize_ordered_bulk_op()
for dm in datum_factory():
bulk.insert(dm)
return bulk.execute()
def insert_datum(col, resource, datum_id, datum_kwargs, known_spec):
spec = resource['spec']
if spec in known_spec:
js_validate(datum_kwargs, known_spec[spec]['datum'])
datum = dict(resource=resource['id'], datum_id=datum_id,
datum_kwargs=datum_kwargs)
col.insert_one(datum)
# do not leak mongo objectID
datum.pop('_id', None)
return Document('datum', datum)
def insert_resource(col, spec, resource_path, resource_kwargs,
known_spec):
if spec in known_spec:
js_validate(resource_kwargs, known_spec[spec]['resource'])
resource_object = dict(spec=spec, resource_path=resource_path,
resource_kwargs=resource_kwargs)
col.insert_one(resource_object)
# rename to play nice with ME
resource_object['id'] = resource_object.pop('_id')
return resource_object
| Python | 0.000001 |
0f41838d07c15bb22861e884949306a8498ead58 | Support move | archive_images.py | archive_images.py | #!/usr/bin/env python
"""
Sorts image files by time - copies them into folders by year and month.
Written by Friedrich C. Kischkel.
"""
import os
import re
import shutil
import time
import argparse
IMAGE_FILE = re.compile(r"""\.(jpe?g)|(png)|(tiff?)$""", re.IGNORECASE)
EXIF_TIME_FORMAT = "%Y:%m:%d %H:%M:%S"
def time_taken(path):
"""Get time a picture was taken or at least file c/mtime."""
times = [
time.localtime(os.path.getctime(path)),
time.localtime(os.path.getmtime(path)),
time.localtime() # now
]
import exifread
with open(path, 'rb') as imagefile:
tags = exifread.process_file(imagefile, details=False)
for tag in ['Image DateTime', 'EXIF DateTimeOriginal', 'EXIF DateTimeDigitized']:
try:
times.append(time.strptime(str(tags[tag]), EXIF_TIME_FORMAT))
except KeyError, err:
print \
"WARNING: tag %(tag)s could not be retrieved from %(file)s" % \
{"tag": err, "file": path}
times.sort()
return times[0]
def archive_image(srcpath, filename, dstpath, overwrite=False, file_function=shutil.copy2):
"""Copy image "filename" in "path" into a subfolder "dstpath"."""
if re.search(IMAGE_FILE, filename):
srcpath = os.path.join(srcpath, filename)
ctime = time_taken(srcpath)
dst = os.path.join(
dstpath,
time.strftime("%Y", ctime),
time.strftime("%m", ctime))
try:
os.makedirs(dst)
except OSError:
pass
if not overwrite and os.path.exists(os.path.join(dst, filename)):
raise IOError('"%(path)s" already exists' % \
{'path': os.path.join(dst, filename)})
file_function(srcpath, dst)
def archive_all(srcpath, dstpath, overwrite=False, max_depth=None, file_function=shutil.copy2):
"""Copy files by creation time into sub-folders"""
iteration = 0
for current, _, files in os.walk(srcpath):
for filename in files:
try:
archive_image(current, filename, dstpath, overwrite, file_function)
except IOError, err:
print "ERROR: copying image: %(msg)s" % {'msg': str(err)}
iteration += 1
if max_depth != None and iteration > max_depth:
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""\
Copy images into year/month sub-folders by time they were taken.
Useful to get some chronological orientations when copying a bulk of
images from a camera's memory card to a local pictures folder.""")
parser.add_argument('SOURCE', nargs='+', help='source path(s)')
parser.add_argument('DESTINATION', nargs=1, help='destination path')
parser.add_argument('-m', '--move', action='store_true', default=False, help='move file instead of copying it (default: copy)')
parser.add_argument('-f', '--force', action='store_true', default=False, help='force overwriting of existing files (default: do not overwrite)')
parser.add_argument('-d', '--depth', type=int, help='descend this deep into SOURCE directories')
#parser.add_argument('--exec', help='execute command with args SRC DST')
ARGS = parser.parse_args()
fct = shutil.copy2
if ARGS.move:
fct = shutil.move
for source in ARGS.SOURCE:
archive_all(\
source,\
ARGS.DESTINATION[0],\
overwrite=ARGS.force,\
max_depth=ARGS.depth,\
file_function=fct)
| #!/usr/bin/env python
"""
Sorts image files by time - copies them into folders by year and month.
Written by Friedrich C. Kischkel.
"""
import os
import re
import shutil
import time
import argparse
IMAGE_FILE = re.compile(r"""\.(jpe?g)|(png)|(tiff?)$""", re.IGNORECASE)
EXIF_TIME_FORMAT = "%Y:%m:%d %H:%M:%S"
def time_taken(path):
"""Get time a picture was taken or at least file c/mtime."""
times = [
time.localtime(os.path.getctime(path)),
time.localtime(os.path.getmtime(path)),
time.localtime() # now
]
import exifread
with open(path, 'rb') as imagefile:
tags = exifread.process_file(imagefile, details=False)
for tag in ['Image DateTime', 'EXIF DateTimeOriginal', 'EXIF DateTimeDigitized']:
try:
times.append(time.strptime(str(tags[tag]), EXIF_TIME_FORMAT))
except KeyError, err:
print \
"WARNING: tag %(tag)s could not be retrieved from %(file)s" % \
{"tag": err, "file": path}
times.sort()
return times[0]
def archive_image(srcpath, filename, dstpath, overwrite=False):
"""Copy image "filename" in "path" into a subfolder "dstpath"."""
if re.search(IMAGE_FILE, filename):
srcpath = os.path.join(srcpath, filename)
ctime = time_taken(srcpath)
dst = os.path.join(
dstpath,
time.strftime("%Y", ctime),
time.strftime("%m", ctime))
try:
os.makedirs(dst)
except OSError:
pass
if not overwrite and os.path.exists(os.path.join(dst, filename)):
raise IOError('"%(path)s" already exists' % \
{'path': os.path.join(dst, filename)})
shutil.copy2(srcpath, dst)
def archive_all(srcpath, dstpath, overwrite=False, max_depth=None):
"""Copy files by creation time into sub-folders"""
iteration = 0
for current, _, files in os.walk(srcpath):
for filename in files:
try:
archive_image(current, filename, dstpath, overwrite)
except IOError, err:
print "ERROR: copying image: %(msg)s" % {'msg': str(err)}
iteration += 1
if max_depth != None and iteration > max_depth:
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""\
Copy images into year/month sub-folders by time they were taken.
Useful to get some chronological orientations when copying a bulk of
images from a camera's memory card to a local pictures folder.""")
parser.add_argument('SOURCE', nargs='+', help='source path(s)')
parser.add_argument('DESTINATION', nargs=1, help='destination path')
parser.add_argument('-f', '--force', action='store_true', default=False, help='force overwriting of existing files (default: do not overwrite)')
parser.add_argument('-d', '--depth', type=int, help='descend this deep into SOURCE directories')
#parser.add_argument('--exec', help='execute command with args SRC DST')
ARGS = parser.parse_args()
for source in ARGS.SOURCE:
archive_all(\
source,\
ARGS.DESTINATION[0],\
overwrite=ARGS.force,\
max_depth=ARGS.depth)
| Python | 0 |
22c727e0e38953f3647a8a825b01fcf142c06c64 | Bump version. | armet/_version.py | armet/_version.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 17)
__version__ = '.'.join(map(str, __version_info__))
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 16)
__version__ = '.'.join(map(str, __version_info__))
| Python | 0 |
7a1ddf38db725f0696482a271c32fa297d629316 | Set the version to the next patch release number (in dev mode) | backlog/__init__.py | backlog/__init__.py | __version__ = (0, 2, 2, 'dev', 0)
def get_version():
version = '%d.%d.%d' % __version__[0:3]
if __version__[3]:
version = '%s-%s%s' % (version, __version__[3],
(__version__[4] and str(__version__[4])) or '')
return version
| __version__ = (0, 2, 1, '', 0)
def get_version():
version = '%d.%d.%d' % __version__[0:3]
if __version__[3]:
version = '%s-%s%s' % (version, __version__[3],
(__version__[4] and str(__version__[4])) or '')
return version
| Python | 0 |
b808784711242099d8fbf9f0f1c7d13ca5a5a1d7 | Bump the version to 0.3.2 | backlog/__init__.py | backlog/__init__.py | """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.2'
| """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.1'
| Python | 0.999999 |
d0c6ae0dbb68fad31c5f3e51d934b8c7f5e8534f | Add ability to override issue JQL in runner | jzb/runner.py | jzb/runner.py | from argparse import ArgumentParser
import logging
import sys
import jira
from redis import StrictRedis
import yaml
import zendesk
from jzb import LOG
from jzb.bridge import Bridge
from jzb.util import objectize
def configure_logger(level):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
LOG.addHandler(handler)
LOG.setLevel(level)
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config-file', default='config.yml')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-Q', '--query')
args = parser.parse_args()
if args.verbose:
configure_logger(logging.DEBUG)
else:
configure_logger(logging.INFO)
with open(args.config_file) as fp:
config = objectize(yaml.load(fp))
redis = StrictRedis(host=config.redis_host, port=config.redis_port)
jira_client = jira.JIRA(server=config.jira_url,
basic_auth=(config.jira_username, config.jira_password))
zd_client = zendesk.Client(url=config.zd_url,
username=config.zd_username,
password=config.zd_password)
bridge = Bridge(jira_client=jira_client,
zd_client=zd_client,
redis=redis,
config=config)
if args.query:
bridge.jira_issue_jql = args.query
bridge.sync()
if __name__ == '__main__':
main()
| from argparse import ArgumentParser
import logging
import sys
import jira
from redis import StrictRedis
import yaml
import zendesk
from jzb import LOG
from jzb.bridge import Bridge
from jzb.util import objectize
def configure_logger(level):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
LOG.addHandler(handler)
LOG.setLevel(level)
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config-file', default='config.yml')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
configure_logger(logging.DEBUG)
else:
configure_logger(logging.INFO)
with open(args.config_file) as fp:
config = objectize(yaml.load(fp))
redis = StrictRedis(host=config.redis_host, port=config.redis_port)
jira_client = jira.JIRA(server=config.jira_url,
basic_auth=(config.jira_username, config.jira_password))
zd_client = zendesk.Client(url=config.zd_url,
username=config.zd_username,
password=config.zd_password)
bridge = Bridge(jira_client=jira_client,
zd_client=zd_client,
redis=redis,
config=config)
bridge.sync()
if __name__ == '__main__':
main()
| Python | 0 |
122b0982d1e10aada383bbd373518d049e54b906 | Prepare for release 0.9pbs.107 | filer/__init__.py | filer/__init__.py | #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.107' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.107.dev1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| Python | 0 |
fc6694686b5b928580c3e8d682b3b6496b12d006 | Refactor pop method | binary_heap.py | binary_heap.py | from __future__ import unicode_literals
class BinaryHeap(object):
"""A class for a binary heap."""
def __init__(self, iterable=()):
self.tree = []
for val in iterable:
self.push(val)
def __repr__(self):
return repr(self.tree)
def __len__(self):
return len(self.tree)
def __iter__(self):
return iter(self.tree)
def pop(self):
"""Pop the head from the heap and return."""
if len(self.tree) <= 1:
to_return = self.tree.pop()
else:
self.tree[0], self.tree[len(self.tree) - 1] = self.tree[len(self.tree) - 1], self.tree[0]
to_return = self.tree.pop()
self._bubbledown(0)
return to_return
def push(self, value):
"""Push a value onto a stack.
args:
value: the value to add
"""
self.tree.append(value) # Add protecion for different types case
if len(self.tree) > 1:
self._bubbleup(len(self.tree)-1)
def _bubbleup(self, pos):
"""Perform one step of heap sort up the tree.
args:
pos: the index position to inspect
"""
parent = self._find_parent(pos)
if pos == 0: # find_parent will return -1 at end of list
return
elif self.tree[pos] < self.tree[parent]:
self.tree[pos], self.tree[parent] = self.tree[parent], self.tree[pos]
self._bubbleup(parent)
def _bubbledown(self, pos):
"""Perform one step of heap sort down the tree.
args:
pos: the index position to inspect
"""
lchild = self._find_lchild(pos)
rchild = lchild + 1
try: # Evaluating whether lchild exists; may refactor
lval = self.tree[lchild]
try:
rval = self.tree[rchild]
except IndexError: # Case of left_child only
if lval < self.tree[pos]:
self.tree[lchild], self.tree[pos] = self.tree[pos], self.tree[lchild]
else: # Case of left_child and right_child
if lval < rval:
target = lchild
else:
target = rchild
if self.tree[target] < self.tree[pos]:
self.tree[target], self.tree[pos] = self.tree[pos], self.tree[target]
self._bubbledown(target)
except IndexError: # Case of no lchild
return
def _find_parent(self, pos):
"""Returns the parent index of given position.
args:
pos: the index position to inspect
Returns: index of the parent
"""
parent = (pos - 1) // 2
return parent
def _find_lchild(self, pos):
"""Returns the left child index of given position.
args:
pos: the index position to inspect
Returns: index of the left child
"""
lchild = (pos * 2) + 1
return lchild
def compare_values(self, parent_value=None, child_value=None, minheap=True):
"""Compares the values of child and parent according to heap type.
For a minheap, checks if child value is greater than parent value.
For a maxheap, checks if child value is less than parent value.
args:
child_pos: the pos of the child
parent: the pos of the parent
min: heap type comparison, defaults to minheap
Returns: True if heap type comparison matches
"""
if minheap is True:
return child_value > parent_value
else:
return child_value < parent_value
| from __future__ import unicode_literals
class BinaryHeap(object):
"""A class for a binary heap."""
def __init__(self, iterable=()):
self.tree = []
for val in iterable:
self.push(val)
def __repr__(self):
return repr(self.tree)
def __len__(self):
return len(self.tree)
def __iter__(self):
return iter(self.tree)
def pop(self):
"""Pop the head from the heap and return."""
if len(self.tree) == 1:
to_return = self.tree.pop()
else:
self.tree[0], self.tree[len(self.tree) - 1] = self.tree[len(self.tree) - 1], self.tree[0]
to_return = self.tree.pop() # Should raise error on empty
self._bubbledown(0)
return to_return
def push(self, value):
"""Push a value onto a stack.
args:
value: the value to add
"""
self.tree.append(value) # Add protecion for different types case
if len(self.tree) > 1:
self._bubbleup(len(self.tree)-1)
def _bubbleup(self, pos):
"""Perform one step of heap sort up the tree.
args:
pos: the index position to inspect
"""
parent = self._find_parent(pos)
if pos == 0: # find_parent will return -1 at end of list
return
elif self.tree[pos] < self.tree[parent]:
self.tree[pos], self.tree[parent] = self.tree[parent], self.tree[pos]
self._bubbleup(parent)
def _bubbledown(self, pos):
"""Perform one step of heap sort down the tree.
args:
pos: the index position to inspect
"""
lchild = self._find_lchild(pos)
rchild = lchild + 1
try: # Evaluating whether lchild exists; may refactor
lval = self.tree[lchild]
try:
rval = self.tree[rchild]
except IndexError: # Case of left_child only
if lval < self.tree[pos]:
self.tree[lchild], self.tree[pos] = self.tree[pos], self.tree[lchild]
else: # Case of left_child and right_child
if lval < rval:
target = lchild
else:
target = rchild
if self.tree[target] < self.tree[pos]:
self.tree[target], self.tree[pos] = self.tree[pos], self.tree[target]
self._bubbledown(target)
except IndexError: # Case of no lchild
return
def _find_parent(self, pos):
"""Returns the parent index of given position.
args:
pos: the index position to inspect
Returns: index of the parent
"""
parent = (pos - 1) // 2
return parent
def _find_lchild(self, pos):
"""Returns the left child index of given position.
args:
pos: the index position to inspect
Returns: index of the left child
"""
lchild = (pos * 2) + 1
return lchild
def compare_values(self, parent_value=None, child_value=None, minheap=True):
"""Compares the values of child and parent according to heap type.
For a minheap, checks if child value is greater than parent value.
For a maxheap, checks if child value is less than parent value.
args:
child_pos: the pos of the child
parent: the pos of the parent
min: heap type comparison, defaults to minheap
Returns: True if heap type comparison matches
"""
if minheap is True:
return child_value > parent_value
else:
return child_value < parent_value
| Python | 0.000001 |
40fc5d12d93d9c258e615b6001070b4fbd04f119 | Add sharding checks | cogs/utils/checks.py | cogs/utils/checks.py | import discord
from discord.ext import commands
# noinspection PyUnresolvedReferences
import __main__
def owner_check(ctx):
return str(ctx.message.author.id) in __main__.liara.owners
def is_owner():
return commands.check(owner_check)
def is_bot_account():
def predicate(ctx):
return ctx.bot.user.bot
return commands.check(predicate)
def is_not_bot_account():
def predicate(ctx):
return not ctx.bot.user.bot
return commands.check(predicate)
def is_selfbot():
def predicate(ctx):
return ctx.bot.self_bot
return commands.check(predicate)
def is_not_selfbot():
def predicate(ctx):
return not ctx.bot.self_bot
return commands.check(predicate)
def is_main_shard():
def predicate(ctx):
if ctx.bot.shard_id is None:
return True
elif ctx.bot.shard_id == 0:
return True
else:
return False
return commands.check(predicate)
def is_not_main_shard():
def predicate(ctx):
if ctx.bot.shard_id is None:
return False
elif ctx.bot.shard_id == 0:
return False
else:
return True
return commands.check(predicate)
def mod_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
# let's get the roles and compare them to
# what we have on file (if we do)
roles = [x.name.lower() for x in ctx.message.author.roles]
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['mod_role'].lower() in roles:
return True
except KeyError:
pass
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def admin_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
try:
roles = [x.name.lower() for x in ctx.message.author.roles]
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def serverowner_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
# deal with more of Red's nonsense
serverowner = serverowner_or_permissions
admin = admin_or_permissions
mod = mod_or_permissions
| import discord
from discord.ext import commands
# noinspection PyUnresolvedReferences
import __main__
def owner_check(ctx):
return str(ctx.message.author.id) in __main__.liara.owners
def is_owner():
return commands.check(owner_check)
def is_bot_account():
def predicate(ctx):
return ctx.bot.user.bot
return commands.check(predicate)
def is_not_bot_account():
def predicate(ctx):
return not ctx.bot.user.bot
return commands.check(predicate)
def is_selfbot():
def predicate(ctx):
return ctx.bot.self_bot
return commands.check(predicate)
def is_not_selfbot():
def predicate(ctx):
return not ctx.bot.self_bot
return commands.check(predicate)
def mod_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
# let's get the roles and compare them to
# what we have on file (if we do)
roles = [x.name.lower() for x in ctx.message.author.roles]
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['mod_role'].lower() in roles:
return True
except KeyError:
pass
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def admin_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
try:
roles = [x.name.lower() for x in ctx.message.author.roles]
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def serverowner_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
# deal with more of Red's nonsense
serverowner = serverowner_or_permissions
admin = admin_or_permissions
mod = mod_or_permissions
| Python | 0 |
4dedbc15c835d02ccde99fb9fad00ed9a590c69e | Add private field to posts | blog/models.py | blog/models.py | import hashlib, random
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String())
email = db.Column(db.String())
api_key = db.Column(db.String(64))
submitted = db.relationship('Post', backref='author', lazy='dynamic')
pings = db.relationship('Ping', backref='author', lazy='dynamic')
def __init__(self, username, password, email):
self.username = username
self.email = email
self.set_password(password)
self.new_api_key()
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def new_api_key(self):
self.api_key = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
def is_active(self):
return True
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
""" A post containing location data """
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(256), nullable=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow(), nullable=False)
loc = db.Column(Geometry('POINT'), nullable=False)
latitude = db.Column(db.Float, default=43.165556, nullable=False)
longitude = db.Column(db.Float, default=-77.611389, nullable=False)
private = db.Column(db.Boolean, default=False, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
__mapper_args__ = {'polymorphic_on': post_type }
def get_id(self):
return self.id
def get_location(self):
return self.loc
def __repr__(self):
return '<Post {0}>'.format(self.title)
class TextPost(Post):
""" A blog post """
__mapper_args__ = {'polymorphic_identity': 'text'}
text = db.Column(db.Text)
class ImagePost(Post):
""" An image post """
__mapper_args__ = {'polymorphic_identity': 'image'}
image_path = db.Column(db.Text)
caption = db.Column(db.String(512))
class Ping(db.Model):
__tablename__ = "ping"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
loc = db.Column(Geometry('POINT'))
| import hashlib, random
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String())
email = db.Column(db.String())
api_key = db.Column(db.String(64))
submitted = db.relationship('Post', backref='author', lazy='dynamic')
pings = db.relationship('Ping', backref='author', lazy='dynamic')
def __init__(self, username, password, email):
self.username = username
self.email = email
self.set_password(password)
self.new_api_key()
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def new_api_key(self):
self.api_key = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
def is_active(self):
return True
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
""" A post containing location data """
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(256), nullable=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow(), nullable=False)
loc = db.Column(Geometry('POINT'), nullable=False)
latitude = db.Column(db.Float, default=43.165556, nullable=False)
longitude = db.Column(db.Float, default=-77.611389, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
__mapper_args__ = {'polymorphic_on': post_type }
def get_id(self):
return self.id
def get_location(self):
return self.loc
def __repr__(self):
return '<Post {0}>'.format(self.title)
class TextPost(Post):
""" A blog post """
__mapper_args__ = {'polymorphic_identity': 'text'}
text = db.Column(db.Text)
class ImagePost(Post):
""" An image post """
__mapper_args__ = {'polymorphic_identity': 'image'}
image_path = db.Column(db.Text)
caption = db.Column(db.String(512))
class Ping(db.Model):
__tablename__ = "ping"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
loc = db.Column(Geometry('POINT'))
| Python | 0 |
0b452dca8c517b180df037fafc52f6e2b09811c1 | fix class name | books/forms.py | books/forms.py | from django.forms import ModelForm
from .models import BookReader, User
class UserForm(ModelForm):
class Meta:
model = User
class BookReaderForm(ModelForm):
class Meta:
model = BookReader
excluse = ['user']
| from django.forms import ModelForm
from .models import BookReader, User
class UserForm(ModelForm):
class Meta:
model = User
class BookReader(ModelForm):
class Meta:
model = BookReader
excluse = ['user']
| Python | 0.999994 |
7f4cd4f88656863f0c6976911407b0d8ae9f0a3b | complete explain html file | src/handle_html_file.py | src/handle_html_file.py | #-*_coding:utf8-*-
import inspect
import os
import shutil
from bs4 import BeautifulSoup
#Get test file path
def get_file_path():
test_file_path = ''
# the test dirs
test_dir = os.path.join(os.getcwd(), 'test_file')
# traverse the test dirs
list_dirs = os.walk(test_dir)
is_find_file = False
for root, dirs, files in list_dirs:
if is_find_file:
break
for f in files:
if not f.startswith('.'): # ignore the hide files
file_path = os.path.join(root, f) # get a full path
print(os.path.join(root, f))
if os.path.isfile(file_path):
test_file_path = file_path
is_find_file = True
break
print('the file path :' + test_file_path)
return test_file_path
def get_app_name_from_div_tag(div):
# Default value
info_app_name = 'NULL_APP_NAME'
if div is None:
return info_app_name
# find 'span' name tag from the info div tag
tag_td_div_span_app_name = div.find('span', attrs={'class': 'oneline-info title-info'})
if tag_td_div_span_app_name is None:
return info_app_name
# may be without 'a' tag
if tag_td_div_span_app_name.a is None:
return info_app_name
name = tag_td_div_span_app_name.a.string
# get the info app name
if name is not None:
info_app_name = name
print('find the app name :' + info_app_name)
return info_app_name
def get_app_publisher_from_div_tag(div):
# default value
info_app_publisher = 'NULL_APP_PUBLISHER'
if div == None:
return info_app_publisher
# find the 'span' publisher tag from the info div tag
tag_td_div_span_app_publisher = div.find('span', attrs={'class': 'oneline-info add-info'})
if type(tag_td_div_span_app_publisher) == type(None):
return info_app_publisher
# may be without 'a' tag
if tag_td_div_span_app_publisher.a is None:
return info_app_publisher
publisher = tag_td_div_span_app_publisher.a.string
if publisher is not None:
# get the info app publisher
info_app_publisher = publisher
print('find the app publisher :' + info_app_publisher)
return info_app_publisher
def get_app_package_from_div_tag(div):
# Default value
info_app_package = 'NULL_APP_PACKAGE'
if div is None:
return info_app_package
# find the 'span' publisher tag from the info div tag
tag_td_div_span_app_package = div.find('span', attrs={'class': 'product-code'})
if tag_td_div_span_app_package is None:
return info_app_package
# get the info app publisher
package = tag_td_div_span_app_package.string
if package is not None:
info_app_package = package
print('find the app package :' + info_app_package)
return info_app_package
# for tag_div in tag_td.find_all('div'):
# print('find a tag = div')
# use method 'join' to delete the '[]' for the reason that using the 'find_all' method get string will get the last eletment call the str() and append the string '[]'
# print('find a tag class is ', ''.join(tag_div['class']))
# print(tag_div.attrs)
# print(tag_div.find('span', attrs={'class': 'title-info-wrapper'}))
# class_value = ''.join(tag_div['class'])
# if class_value == 'main-info':
# print('find a main info')
def explain_html(path):
soup = BeautifulSoup(open(path, encoding='utf-8'), 'html.parser')
# print(soup.prettify(encoding='utf-8'))
# find TAG = td
count = 0
for tag_td in soup.find_all('td'):
print('find a tag = td')
# find the main info div tag
tag_td_div_main_info = tag_td.find('div', attrs={'class': 'main-info'})
# print('find a div main info :', tag_td_div_main_info)
app_name = get_app_name_from_div_tag(tag_td_div_main_info)
app_package = get_app_package_from_div_tag(tag_td_div_main_info)
app_publisher = get_app_publisher_from_div_tag(tag_td_div_main_info)
count = count + 1
sep = '##'
print('----#' + str(count) + sep + app_name + sep + app_package + sep + app_publisher)
# print(soup.td)
# print(soup.td.contents)
explain_html(get_file_path()) | #-*_coding:utf8-*-
import inspect
import os
import shutil
from bs4 import BeautifulSoup
#Get test file path
def get_file_path():
test_file_path = ''
# the test dirs
test_dir = os.path.join(os.getcwd(), 'test_file')
# traverse the test dirs
list_dirs = os.walk(test_dir)
is_find_file = False
for root, dirs, files in list_dirs:
if is_find_file:
break
for f in files:
file_path = os.path.join(root, f)
print(os.path.join(root, f))
if os.path.isfile(file_path):
test_file_path = file_path
is_find_file = True
break
print('the file path :' + test_file_path)
return test_file_path
def explain_html(path):
soup = BeautifulSoup(open(path, encoding='utf-8'), 'html.parser')
print(soup.prettify(encoding='utf-8'))
# print(soup.td)
print(soup.td.contents)
explain_html(get_file_path()) | Python | 0.000003 |
77d26064694e89d30ea4d62a7a9de9fb7d4038a0 | Fix typo secounds => seconds (#743) | common/util/debug.py | common/util/debug.py | import functools
import json
import pprint as _pprint
import sublime
from contextlib import contextmanager
_log = []
enabled = False
ENCODING_NOT_UTF8 = "{} was sent as binaries and we dont know the encoding, not utf-8"
def start_logging():
global _log
global enabled
_log = []
enabled = True
def stop_logging():
global enabled
enabled = False
@contextmanager
def disable_logging():
global enabled
enabled = False
try:
yield
finally:
enabled = True
def get_log():
return json.dumps(_log, indent=2)
def add_to_log(obj):
if enabled:
_log.append(obj)
def log_git(command, stdin, stdout, stderr, seconds):
message = {
"type": "git",
"command": command,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"seconds": seconds
}
if stdin.__class__ == bytes:
message["stdin"] = try_to_decode(stdin, "stdin")
if stdout.__class__ == bytes:
message["stdout"] = try_to_decode(stdout, "stdout")
if stderr.__class__ == bytes:
message["stderr"] = try_to_decode(stderr, "stderr")
add_to_log(message)
def try_to_decode(message, name):
try:
return message.decode(),
except UnicodeDecodeError:
return ENCODING_NOT_UTF8.format(name)
def log_error(err):
add_to_log({
"type": "error",
"error": repr(err)
})
def log_on_exception(fn):
def wrapped_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
add_to_log({
"type": "exception",
"exception": repr(e)
})
raise e
def dump_var(name, value, width=79, end='\n', **kwargs):
is_str = isinstance(value, str)
prefix = "{}{}".format(name, ': ' if is_str else '=')
line_prefix = end + ' '*len(prefix)
if not is_str:
value = _pprint.pformat(value, width=max(49, width-len(prefix)))
print(prefix + line_prefix.join(value.splitlines()), end=end, **kwargs)
def dump(*args, **kwargs):
for i, arg in enumerate(args):
dump_var("_arg{}".format(i), arg)
for name, arg in sorted(kwargs.items()):
dump_var(name, arg)
# backward-compatibility
def pprint(*args, **kwargs):
"""
Pretty print since we can not use debugger
"""
dump(*args, **kwargs)
def get_trace_tags():
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("dev_mode"):
return savvy_settings.get("dev_trace", [])
else:
return []
def trace(*args, tag="debug", fill=None, fill_width=60, **kwargs):
"""
Lightweight logging facility. Provides simple print-like interface with
filtering by tags and pretty-printed captions for delimiting output
sections.
See the "dev_trace" setting for possible values of the "tag" keyword.
"""
if tag not in get_trace_tags():
return
if fill is not None:
sep = str(kwargs.get('sep', ' '))
caption = sep.join(args)
args = "{0:{fill}<{width}}".format(caption and caption + sep,
fill=fill, width=fill_width),
print("GS [{}]".format(tag), *args, **kwargs)
def trace_for_tag(tag):
return functools.partial(trace, tag=tag)
trace.for_tag = trace_for_tag
class StackMeter:
"""Reentrant context manager counting the reentrancy depth."""
def __init__(self, depth=0):
super().__init__()
self.depth = depth
def __enter__(self):
depth = self.depth
self.depth += 1
return depth
def __exit__(self, *exc_info):
self.depth -= 1
| import functools
import json
import pprint as _pprint
import sublime
from contextlib import contextmanager
_log = []
enabled = False
ENCODING_NOT_UTF8 = "{} was sent as binaries and we dont know the encoding, not utf-8"
def start_logging():
global _log
global enabled
_log = []
enabled = True
def stop_logging():
global enabled
enabled = False
@contextmanager
def disable_logging():
global enabled
enabled = False
try:
yield
finally:
enabled = True
def get_log():
return json.dumps(_log, indent=2)
def add_to_log(obj):
if enabled:
_log.append(obj)
def log_git(command, stdin, stdout, stderr, secounds):
message = {
"type": "git",
"command": command,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"secounds": secounds
}
if stdin.__class__ == bytes:
message["stdin"] = try_to_decode(stdin, "stdin")
if stdout.__class__ == bytes:
message["stdout"] = try_to_decode(stdout, "stdout")
if stderr.__class__ == bytes:
message["stderr"] = try_to_decode(stderr, "stderr")
add_to_log(message)
def try_to_decode(message, name):
try:
return message.decode(),
except UnicodeDecodeError:
return ENCODING_NOT_UTF8.format(name)
def log_error(err):
add_to_log({
"type": "error",
"error": repr(err)
})
def log_on_exception(fn):
def wrapped_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
add_to_log({
"type": "exception",
"exception": repr(e)
})
raise e
def dump_var(name, value, width=79, end='\n', **kwargs):
is_str = isinstance(value, str)
prefix = "{}{}".format(name, ': ' if is_str else '=')
line_prefix = end + ' '*len(prefix)
if not is_str:
value = _pprint.pformat(value, width=max(49, width-len(prefix)))
print(prefix + line_prefix.join(value.splitlines()), end=end, **kwargs)
def dump(*args, **kwargs):
for i, arg in enumerate(args):
dump_var("_arg{}".format(i), arg)
for name, arg in sorted(kwargs.items()):
dump_var(name, arg)
# backward-compatibility
def pprint(*args, **kwargs):
"""
Pretty print since we can not use debugger
"""
dump(*args, **kwargs)
def get_trace_tags():
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("dev_mode"):
return savvy_settings.get("dev_trace", [])
else:
return []
def trace(*args, tag="debug", fill=None, fill_width=60, **kwargs):
"""
Lightweight logging facility. Provides simple print-like interface with
filtering by tags and pretty-printed captions for delimiting output
sections.
See the "dev_trace" setting for possible values of the "tag" keyword.
"""
if tag not in get_trace_tags():
return
if fill is not None:
sep = str(kwargs.get('sep', ' '))
caption = sep.join(args)
args = "{0:{fill}<{width}}".format(caption and caption + sep,
fill=fill, width=fill_width),
print("GS [{}]".format(tag), *args, **kwargs)
def trace_for_tag(tag):
return functools.partial(trace, tag=tag)
trace.for_tag = trace_for_tag
class StackMeter:
"""Reentrant context manager counting the reentrancy depth."""
def __init__(self, depth=0):
super().__init__()
self.depth = depth
def __enter__(self):
depth = self.depth
self.depth += 1
return depth
def __exit__(self, *exc_info):
self.depth -= 1
| Python | 0.000001 |
396a217ad725e25c8761edf3678dea349d06e023 | Reorganize imports | setuptools/__init__.py | setuptools/__init__.py | """Extensions to the 'distutils' for large or complex distributions"""
import os
import sys
import distutils.core
from distutils.core import Command as _Command
from distutils.util import convert_path
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
from setuptools.depends import Require
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
| """Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
from setuptools.version import __version__
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
| Python | 0.000001 |
390bcb4be27012794ceb927e3ab2e384c2909daf | Add retries | conf/celeryconfig.py | conf/celeryconfig.py | from datetime import timedelta
import os
from ast import literal_eval
from celery.schedules import crontab
from kombu import Queue
CLUSTER_NAME = os.getenv('CLUSTER_NAME', 'local')
MESSAGES_TTL = 7200
# Broker and Queue Settings
BROKER_URL = os.getenv('BROKER_URL',
'amqp://guest:guest@localhost:5672')
BROKER_CONNECTION_TIMEOUT = int(os.getenv('BROKER_CONNECTION_TIMEOUT', '20'))
BROKER_HEARTBEAT = int(os.getenv('BROKER_HEARTBEAT', '20'))
BROKER_CONNECTION_RETRY = True
BROKER_CONNECTION_MAX_RETRIES = 100
CELERY_DEFAULT_QUEUE = 'cluster-deployer-%s-default' % CLUSTER_NAME
CELERY_PREFORK_QUEUE = 'cluster-deployer-%s-prefork' % CLUSTER_NAME
CELERY_QUEUES = (
Queue(CELERY_DEFAULT_QUEUE, routing_key='default',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
Queue(CELERY_PREFORK_QUEUE, routing_key='prefork',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
)
CELERY_DEFAULT_EXCHANGE = 'cluster-deployer-%s' % (CLUSTER_NAME)
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_ROUTES = {
'deployer.tasks.deployment._fleet_deploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_check_running': {
'routing_key': 'prefork',
}
}
CELERY_RESULT_BACKEND = 'amqp'
CELERY_RESULT_EXCHANGE = 'cluster-deployer-%s-results' % CLUSTER_NAME
CELERY_IMPORTS = ('deployer.tasks', 'deployer.tasks.deployment',
'deployer.tasks.common', 'deployer.tasks.proxy',
'celery.task')
CELERY_ACCEPT_CONTENT = ['json', 'pickle']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ALWAYS_EAGER = literal_eval(os.getenv('CELERY_ALWAYS_EAGER', 'False'))
CELERY_CHORD_PROPAGATES = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = False
CELERYD_TASK_SOFT_TIME_LIMIT = 300
CELERYD_TASK_TIME_LIMIT = 330
CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TASK_RESULT_EXPIRES = timedelta(hours=6)
CELERY_RESULT_PERSISTENT = True
# Remote Management
CELERYD_POOL_RESTARTS = True
# Queue Settings
CELERY_QUEUE_HA_POLICY = 'all'
# GLobal Settings
CELERY_TIMEZONE = 'UTC'
# Task releated settings
CELERY_ACKS_LATE = True
CELERY_TASK_PUBLISH_RETRY_POLICY = {
'max_retries': 30,
'interval_step': 1,
'interval_max': 10
}
# Celery Beat settings
CELERYBEAT_SCHEDULE = {
'celery.task.backend_cleanup': {
'task': 'deployer.tasks.backend_cleanup',
'schedule': crontab(hour="*/2", minute=0),
'args': (),
}
}
| from datetime import timedelta
import os
from ast import literal_eval
from celery.schedules import crontab
from kombu import Queue
CLUSTER_NAME = os.getenv('CLUSTER_NAME', 'local')
MESSAGES_TTL = 7200
# Broker and Queue Settings
BROKER_URL = os.getenv('BROKER_URL',
'amqp://guest:guest@localhost:5672')
BROKER_HEARTBEAT = int(os.getenv('BROKER_HEARTBEAT', '20'))
CELERY_DEFAULT_QUEUE = 'cluster-deployer-%s-default' % CLUSTER_NAME
CELERY_PREFORK_QUEUE = 'cluster-deployer-%s-prefork' % CLUSTER_NAME
CELERY_QUEUES = (
Queue(CELERY_DEFAULT_QUEUE, routing_key='default',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
Queue(CELERY_PREFORK_QUEUE, routing_key='prefork',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
)
CELERY_DEFAULT_EXCHANGE = 'cluster-deployer-%s' % (CLUSTER_NAME)
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_ROUTES = {
'deployer.tasks.deployment._fleet_deploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_check_running': {
'routing_key': 'prefork',
}
}
CELERY_RESULT_BACKEND = 'amqp'
CELERY_RESULT_EXCHANGE = 'cluster-deployer-%s-results' % CLUSTER_NAME
CELERY_IMPORTS = ('deployer.tasks', 'deployer.tasks.deployment',
'deployer.tasks.common', 'deployer.tasks.proxy',
'celery.task')
CELERY_ACCEPT_CONTENT = ['json', 'pickle']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ALWAYS_EAGER = literal_eval(os.getenv('CELERY_ALWAYS_EAGER', 'False'))
CELERY_CHORD_PROPAGATES = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = False
CELERYD_TASK_SOFT_TIME_LIMIT = 300
CELERYD_TASK_TIME_LIMIT = 330
CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TASK_RESULT_EXPIRES = timedelta(hours=6)
CELERY_RESULT_PERSISTENT = True
# Remote Management
CELERYD_POOL_RESTARTS = True
# Queue Settings
CELERY_QUEUE_HA_POLICY = 'all'
# GLobal Settings
CELERY_TIMEZONE = 'UTC'
# Task releated settings
CELERY_ACKS_LATE = True
CELERY_TASK_PUBLISH_RETRY_POLICY = {
'max_retries': 30,
'interval_step': 1,
'interval_max': 10
}
# Celery Beat settings
CELERYBEAT_SCHEDULE = {
'celery.task.backend_cleanup': {
'task': 'deployer.tasks.backend_cleanup',
'schedule': crontab(hour="*/2", minute=0),
'args': (),
}
}
| Python | 0.000539 |
f78ef9ff6094b23316a170cf8ae33056ba358aae | Remove a TODO | feedreader/handlers.py | feedreader/handlers.py | """APIRequestHandler subclasses for API endpoints."""
from tornado.web import HTTPError
from feedreader.api_request_handler import APIRequestHandler
class MainHandler(APIRequestHandler):
def get(self):
username = self.require_auth()
self.write({"message": "Hello world!"})
class UsersHandler(APIRequestHandler):
def post(self):
"""Create a new user."""
body = self.require_body_schema({
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["username", "password"],
})
try:
self.auth_provider.register(body["username"], body["password"])
except ValueError as e:
raise HTTPError(400, reason=e.message)
self.set_status(201)
| """APIRequestHandler subclasses for API endpoints."""
from tornado.web import HTTPError
from feedreader.api_request_handler import APIRequestHandler
class MainHandler(APIRequestHandler):
def get(self):
username = self.require_auth()
self.write({"message": "Hello world!"})
class UsersHandler(APIRequestHandler):
def post(self):
"""Create a new user."""
body = self.require_body_schema({
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["username", "password"],
})
# TODO: handle username already being taken, empty password
try:
self.auth_provider.register(body["username"], body["password"])
except ValueError as e:
raise HTTPError(400, reason=e.message)
self.set_status(201)
| Python | 0.998852 |
3595db808230f579a2410bf57eac054d779aaf4a | printmetadata STABLE | src/amiens/printmetadata.py | src/amiens/printmetadata.py | #!/usr/bin/python3
# Copyright 2015 Nathan Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiens.core.subcmd import Subcmd
import defusedxml.ElementTree as etree
class PrintMetadata(Subcmd):
@staticmethod
def add_if_present(lines, m_etree, tag, arg_displayname=None):
displayname=tag.lower()
if arg_displayname != None:
displayname = arg_displayname
spacelen=max(0, 10-len(displayname))
spaces=' '.join('' for x in range(0, spacelen))
el=m_etree.find(tag)
if el != None and el.text != None:
text=el.text
if type(el.text) != str:
text = repr(el.text)
lines.append(displayname+': '+spaces+ text)
@staticmethod
def cmd(args):
item=args['item']
t='\t'
lines=['' '--------------------------------------', '']
lines.append('Ident: '+item.data['ident'])
lines.append('Rating: '+repr(item.data['rating']))
lines.append('Comment: '+repr(item.data['comment']))
lines.append(' ')
m_etree = etree.fromstring(item.data['metadata'])
prioritized_tags = ({'tag':'title', 'display':'Title'},
{'tag':'subject','display':'Tags'},
{'tag':'description','display':'Descript.'})
low_priority_tags=({'tag':'addeddate', 'display': 'Added on'},
{'tag':'publicdate', 'display': 'Published'},
{'tag': 'uploader', 'display': None},
{'tag': 'mediatype', 'display': None})
skipped_tags=['identifier', 'curation']
for keyword in prioritized_tags:
PrintMetadata.add_if_present(lines, m_etree, keyword['tag'],
keyword['display'])
lines.append('')
lines.append('--other tags--')
standout_tags = [ x['tag'] for x in prioritized_tags ]
standout_tags.extend([ x['tag'] for x in low_priority_tags ])
standout_tags.extend(skipped_tags)
for m_keyval in m_etree:
if m_keyval.tag in standout_tags:
continue
if m_keyval.text == None or m_keyval.text == '':
continue
lines.append(m_keyval.tag + ' : ' + repr(m_keyval.text))
lines.append(' ')
for keyword in low_priority_tags:
PrintMetadata.add_if_present(lines, m_etree, keyword['tag'],
keyword['display'])
lines.append(' ')
print('\n '.join(lines))
| #!/usr/bin/python3
# Copyright 2015 Nathan Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiens.core.subcmd import Subcmd
class PrintMetadata(Subcmd):
@staticmethod
def cmd(args):
item=args['item']
t='\t'
lines=['']
lines.append('Ident:\t'+item.ident)
lines.append('Rating:\t'+str(item.rating))
lines.append('Comment:\t'+item.comment)
m_etree = etree.fromstring(item.data.metadata)
for keyword in ({tag:'title', display:'Title'},
{tag:'subject',display:'Tags'},
{tag:'description',display:'Description'}):
el=m_etree.find(keyword['tag'])
if el:
lines.append(keyword['display']+': '+ el.text)
print('\n'.join(lines))
| Python | 0.999738 |
cd37746924a6b6b94afd044688c4a2554d0f50d1 | fix variable name for id | import.py | import.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from app import app, db, Request
from glob import glob
from sqlalchemy.exc import IntegrityError
from OpenSSL import crypto
from datetime import datetime
for path in glob("{}/freifunk_*.crt".format(app.config['DIRECTORY'])):
with open(path) as certfile:
print("Importing {} ...".format(path))
certificate = crypto.load_certificate(
crypto.FILETYPE_PEM,
certfile.read()
)
# extract email and id from subject components
components = dict(certificate.get_subject().get_components())
email_address = components[b'emailAddress']
# remove 'freifunk_' prefix from id
cert_id = components[b'CN'].decode('utf-8').replace('freifunk_', '')
# extract creation date from certificate
generation_date = datetime.strptime(
certificate.get_notBefore().decode('utf-8'),
'%Y%m%d%H%M%SZ'
)
request = Request(cert_id, email_address, generation_date)
try:
db.session.add(request)
db.session.commit()
print("Improted {}.".format(cert_id))
except IntegrityError:
print("{} already exists.".format(cert_id))
db.session.rollback()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from app import app, db, Request
from glob import glob
from sqlalchemy.exc import IntegrityError
from OpenSSL import crypto
from datetime import datetime
for path in glob("{}/freifunk_*.crt".format(app.config['DIRECTORY'])):
with open(path) as certfile:
print("Importing {} ...".format(path))
certificate = crypto.load_certificate(
crypto.FILETYPE_PEM,
certfile.read()
)
# extract email and id from subject components
components = dict(certificate.get_subject().get_components())
email_address = components[b'emailAddress']
# remove 'freifunk_' prefix from id
cert_id = components[b'CN'].decode('utf-8').replace('freifunk_', '')
# extract creation date from certificate
generation_date = datetime.strptime(
certificate.get_notBefore().decode('utf-8'),
'%Y%m%d%H%M%SZ'
)
request = Request(cert_id, email_address, generation_date)
try:
db.session.add(request)
db.session.commit()
print("Improted {}.".format(id))
except IntegrityError:
print("{} already exists.".format(id))
db.session.rollback()
| Python | 0.999789 |
d4da07688c0b1244bad24c26483a0f1b94a8fab0 | remove that filtering option | src/apps/calendar/schema.py | src/apps/calendar/schema.py | from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Calendar
filter_fields = {
'uuid': ['exact', ]
}
interfaces = (relay.Node, )
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Day
interfaces = (relay.Node, )
exclude_fields = ('image', 'image_small', 'image_large')
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
def get_absolute_image_url(context, relative_url):
return context.scheme + '://' + context.get_host() + relative_url
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
| from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Calendar
filter_fields = {
'uuid': ['exact', ]
}
filter_order_by = ['uuid']
interfaces = (relay.Node, )
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Day
interfaces = (relay.Node, )
exclude_fields = ('image', 'image_small', 'image_large')
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
def get_absolute_image_url(context, relative_url):
return context.scheme + '://' + context.get_host() + relative_url
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
| Python | 0.000045 |
4a30762680fd3ee9b95795f39e10e15faf4279e8 | remove language intolerance | src/boarbot/modules/echo.py | src/boarbot/modules/echo.py | import discord
from boarbot.common.botmodule import BotModule
from boarbot.common.events import EventType
class EchoModule(BotModule):
async def handle_event(self, event_type, args):
if event_type == EventType.MESSAGE:
await self.echo(args[0])
async def echo(self, message: discord.Message):
if not self.client.user.mentioned_in(message):
return # Gotta mention me
if '!echo' not in message.clean_content:
return # Need !echo
echo = message.clean_content.split('!echo', 1)[1]
await self.client.send_message(message.channel, echo)
| import discord
from boarbot.common.botmodule import BotModule
from boarbot.common.events import EventType
class EchoModule(BotModule):
async def handle_event(self, event_type, args):
if event_type == EventType.MESSAGE:
await self.echo(args[0])
async def echo(self, message: discord.Message):
if not self.client.user.mentioned_in(message):
return # Gotta mention me
if '!echo' not in message.clean_content:
return # Need !echo
echo = message.clean_content.split('!echo', 1)[1]
if 'shit' in echo:
raise ValueError('Your language is bad and you should feel bad')
await self.client.send_message(message.channel, echo)
| Python | 0.999939 |
e8d0c7f678689c15049186360c08922be493587a | Remove non-existant flask.current_app.debug doc ref. | flask_nav/renderers.py | flask_nav/renderers.py | from flask import current_app
from dominate import tags
from visitor import Visitor
class Renderer(Visitor):
"""Base interface for navigation renderers.
Visiting a node should return a string or an object that converts to a
string containing HTML."""
def visit_object(self, node):
"""Fallback rendering for objects.
If the current application is in debug-mode
(``flask.current_app.debug`` is ``True``), an ``<!-- HTML comment
-->`` will be rendered, indicating which class is missing a visitation
function.
Outside of debug-mode, returns an empty string.
"""
if current_app.debug:
return tags.comment(
'no implementation in {} to render {}'.format(
self.__class__.__name__, node.__class__.__name__,
))
return ''
class SimpleRenderer(Renderer):
"""A very basic HTML5 renderer.
Renders a navigational structure using ``<nav>`` and ``<ul>`` tags that
can be styled using modern CSS.
:param kwargs: Additional attributes to pass on to the root ``<nav>``-tag.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def visit_Link(self, node):
return tags.a(node.title, **node.attribs)
def visit_Navbar(self, node):
kwargs = {'_class': 'navbar'}
kwargs.update(self.kwargs)
cont = tags.nav(**kwargs)
ul = cont.add(tags.ul())
for item in node.items:
ul.add(tags.li(self.visit(item)))
return cont
def visit_View(self, node):
kwargs = {}
if node.active:
kwargs['_class'] = 'active'
return tags.a(node.title,
href=node.get_url(),
title=node.title,
**kwargs)
def visit_Subgroup(self, node):
group = tags.ul(_class='subgroup')
title = tags.span(node.title)
if node.active:
title.attributes['class'] = 'active'
for item in node.items:
group.add(tags.li(self.visit(item)))
return tags.div(title, group)
def visit_Separator(self, node):
return tags.hr(_class='separator')
def visit_Label(self, node):
return tags.span(node.title, _class='nav-label')
| from flask import current_app
from dominate import tags
from visitor import Visitor
class Renderer(Visitor):
"""Base interface for navigation renderers.
Visiting a node should return a string or an object that converts to a
string containing HTML."""
def visit_object(self, node):
"""Fallback rendering for objects.
If the current application is in debug-mode
(:attr:`flask.current_app.debug` is ``True``), an ``<!-- HTML comment
-->`` will be rendered, indicating which class is missing a visitation
function.
Outside of debug-mode, returns an empty string.
"""
if current_app.debug:
return tags.comment(
'no implementation in {} to render {}'.format(
self.__class__.__name__, node.__class__.__name__,
))
return ''
class SimpleRenderer(Renderer):
"""A very basic HTML5 renderer.
Renders a navigational structure using ``<nav>`` and ``<ul>`` tags that
can be styled using modern CSS.
:param kwargs: Additional attributes to pass on to the root ``<nav>``-tag.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def visit_Link(self, node):
return tags.a(node.title, **node.attribs)
def visit_Navbar(self, node):
kwargs = {'_class': 'navbar'}
kwargs.update(self.kwargs)
cont = tags.nav(**kwargs)
ul = cont.add(tags.ul())
for item in node.items:
ul.add(tags.li(self.visit(item)))
return cont
def visit_View(self, node):
kwargs = {}
if node.active:
kwargs['_class'] = 'active'
return tags.a(node.title,
href=node.get_url(),
title=node.title,
**kwargs)
def visit_Subgroup(self, node):
group = tags.ul(_class='subgroup')
title = tags.span(node.title)
if node.active:
title.attributes['class'] = 'active'
for item in node.items:
group.add(tags.li(self.visit(item)))
return tags.div(title, group)
def visit_Separator(self, node):
return tags.hr(_class='separator')
def visit_Label(self, node):
return tags.span(node.title, _class='nav-label')
| Python | 0 |
f6df8c05d247650f4899d1101c553230a60ccc70 | Improve registration response messages | fogeybot/cogs/users.py | fogeybot/cogs/users.py | from discord.ext.commands import command
class UserCommands(object):
def __init__(self, bot, api, db):
self.bot = bot
self.api = api
self.db = db
@command(description="Registers/updates your battle tag", pass_context=True)
async def register(self, ctx, battletag: str):
if '#' not in battletag:
await self.bot.reply("bad battle tag format, it should look like this: `MrCool#123`")
return
try:
info = await self.api.get_mmr(battletag)
if info.present:
msg = "Registration successful\n"
msg += "**Note**: MMR lookup requires that your HotsLog profile remains public"
else:
msg = "Unable to find `{}` via HotsLogs; either your profile is private, or you made a typo\n".format(battletag)
msg += "If you made a typo: simply type `!register battletag#123` again\n"
msg += "If your profile is private: you will need to specify your MMR each time you `!joinpickup`, or make it public"
except APIError:
msg = "Registration succeeded, but I was unable to verify your battle tag with HotsLogs\n"
msg += "**Note**: MMR lookup requires that your HotsLog profile remains public"
await self.db.register_battle_tag(ctx.message.author.id, battletag)
await self.bot.reply(msg)
@command(description="Shows your registered battle tags, if any", pass_context=True)
async def registrationstatus(self, ctx):
battle_tag = await self.db.lookup_battle_tag(ctx.message.author.id)
if battle_tag:
await self.bot.reply("Registered battle tag: `{}`".format(battle_tag))
else:
await self.bot.reply("Battle tag not found")
@command(description="Unregisters your battle tag", pass_context=True)
async def unregister(self, ctx):
await self.db.unregister_battle_tag(ctx.message.author.id)
await self.bot.reply("Registration removed")
| from discord.ext.commands import command
class UserCommands(object):
def __init__(self, bot, api, db):
self.bot = bot
self.api = api
self.db = db
@command(description="Registers/updates your battle tag", pass_context=True)
async def register(self, ctx, battletag: str):
if '#' not in battletag:
await self.bot.reply("bad battle tag format, it should look like this: `MrCool#123`")
return
# TODO verify with hotslogs (account for private profiles)
await self.db.register_battle_tag(ctx.message.author.id, battletag)
await self.bot.reply("Registration successful")
@command(description="Shows your registered battle tags, if any", pass_context=True)
async def registrationstatus(self, ctx):
battle_tag = await self.db.lookup_battle_tag(ctx.message.author.id)
if battle_tag:
await self.bot.reply("Registered battle tag: `{}`".format(battle_tag))
else:
await self.bot.reply("Battle tag not found")
@command(description="Unregisters your battle tag", pass_context=True)
async def unregister(self, ctx):
await self.db.unregister_battle_tag(ctx.message.author.id)
await self.bot.reply("Registration removed")
| Python | 0.000002 |
db711fe24ffff78d21db3af8e437dc2f2f1b48a7 | Add space at top of class bruteforce_ssh_pyes | alerts/bruteforce_ssh_pyes.py | alerts/bruteforce_ssh_pyes.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez averez@mozilla.com
# Jeff Bryner jbryner@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertBruteforceSshES(AlertTask):
def main(self):
search_query = SearchQuery(minutes=2)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('summary', 'failed'),
TermMatch('program', 'sshd'),
TermsMatch('summary', ['login', 'invalid', 'ldap_count_entries']),
])
search_query.add_must_not([
PhraseMatch('summary', '10.22.75.203'),
PhraseMatch('summary', '10.8.75.144'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.sourceipaddress', samplesLimit=10)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=10)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'bruteforce'
tags = ['ssh']
severity = 'NOTICE'
summary = ('{0} ssh bruteforce attempts by {1}'.format(
aggreg['count'], aggreg['value']))
hosts = self.mostCommon(
aggreg['allevents'], '_source.details.hostname')
for i in hosts[:5]:
summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
| #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez averez@mozilla.com
# Jeff Bryner jbryner@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertBruteforceSshES(AlertTask):
def main(self):
search_query = SearchQuery(minutes=2)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('summary', 'failed'),
TermMatch('program', 'sshd'),
TermsMatch('summary', ['login', 'invalid', 'ldap_count_entries']),
])
search_query.add_must_not([
PhraseMatch('summary', '10.22.75.203'),
PhraseMatch('summary', '10.8.75.144'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.sourceipaddress', samplesLimit=10)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=10)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'bruteforce'
tags = ['ssh']
severity = 'NOTICE'
summary = ('{0} ssh bruteforce attempts by {1}'.format(
aggreg['count'], aggreg['value']))
hosts = self.mostCommon(
aggreg['allevents'], '_source.details.hostname')
for i in hosts[:5]:
summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
| Python | 0.000003 |
dfe531a481e2e753e755f8877bc747147deb7840 | Set optional port as proper Channel attribute. (#1163) | parsl/channels/oauth_ssh/oauth_ssh.py | parsl/channels/oauth_ssh/oauth_ssh.py | import logging
import paramiko
import socket
from parsl.errors import OptionalModuleMissing
from parsl.channels.ssh.ssh import SSHChannel
try:
from oauth_ssh.ssh_service import SSHService
from oauth_ssh.oauth_ssh_token import find_access_token
_oauth_ssh_enabled = True
except (ImportError, NameError):
_oauth_ssh_enabled = False
logger = logging.getLogger(__name__)
class OAuthSSHChannel(SSHChannel):
"""SSH persistent channel. This enables remote execution on sites
accessible via ssh. This channel uses Globus based OAuth tokens for authentication.
"""
def __init__(self, hostname, username=None, script_dir=None, envs=None, port=22):
''' Initialize a persistent connection to the remote system.
We should know at this point whether ssh connectivity is possible
Args:
- hostname (String) : Hostname
KWargs:
- username (string) : Username on remote system
- script_dir (string) : Full path to a script dir where
generated scripts could be sent to.
- envs (dict) : A dictionary of env variables to be set when executing commands
- port (int) : Port at which the SSHService is running
Raises:
'''
if not _oauth_ssh_enabled:
raise OptionalModuleMissing(['oauth_ssh'],
"OauthSSHChannel requires oauth_ssh module and config.")
self.hostname = hostname
self.username = username
self.script_dir = script_dir
self.port = port
self.envs = {}
if envs is not None:
self.envs = envs
try:
access_token = find_access_token(hostname)
except Exception:
logger.exception("Failed to find the access token for {}".format(hostname))
raise
try:
self.service = SSHService(hostname, port)
self.transport = self.service.login(access_token, username)
except Exception:
logger.exception("Caught an exception in the OAuth authentication step with {}".format(hostname))
raise
self.sftp_client = paramiko.SFTPClient.from_transport(self.transport)
def execute_wait(self, cmd, walltime=60, envs={}):
''' Synchronously execute a commandline string on the shell.
This command does *NOT* honor walltime currently.
Args:
- cmd (string) : Commandline string to execute
Kwargs:
- walltime (int) : walltime in seconds
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 1024
session.exec_command(self.prepend_envs(cmd, envs))
session.settimeout(walltime)
try:
# Wait until command is executed
exit_status = session.recv_exit_status()
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
except socket.timeout:
logger.exception("Command failed to execute without timeout limit on {}".format(self))
raise
return exit_status, stdout, stderr
def execute_no_wait(self, cmd, walltime=60, envs={}):
''' Execute asynchronousely without waiting for exitcode
Args:
- cmd (string): Commandline string to be executed on the remote side
KWargs:
- walltime (int): timeout to exec_command
- envs (dict): A dictionary of env variables
Returns:
- None, stdout (readable stream), stderr (readable stream)
Raises:
- ChannelExecFailed (reason)
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 10240
session.exec_command(self.prepend_envs(cmd, envs))
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
return None, stdout, stderr
def close(self):
return self.transport.close()
| import logging
import paramiko
import socket
from parsl.errors import OptionalModuleMissing
from parsl.channels.ssh.ssh import SSHChannel
try:
from oauth_ssh.ssh_service import SSHService
from oauth_ssh.oauth_ssh_token import find_access_token
_oauth_ssh_enabled = True
except (ImportError, NameError):
_oauth_ssh_enabled = False
logger = logging.getLogger(__name__)
class OAuthSSHChannel(SSHChannel):
"""SSH persistent channel. This enables remote execution on sites
accessible via ssh. This channel uses Globus based OAuth tokens for authentication.
"""
def __init__(self, hostname, username=None, script_dir=None, envs=None, port=22):
''' Initialize a persistent connection to the remote system.
We should know at this point whether ssh connectivity is possible
Args:
- hostname (String) : Hostname
KWargs:
- username (string) : Username on remote system
- script_dir (string) : Full path to a script dir where
generated scripts could be sent to.
- envs (dict) : A dictionary of env variables to be set when executing commands
- port (int) : Port at which the SSHService is running
Raises:
'''
if not _oauth_ssh_enabled:
raise OptionalModuleMissing(['oauth_ssh'],
"OauthSSHChannel requires oauth_ssh module and config.")
self.hostname = hostname
self.username = username
self.script_dir = script_dir
self.envs = {}
if envs is not None:
self.envs = envs
try:
access_token = find_access_token(hostname)
except Exception:
logger.exception("Failed to find the access token for {}".format(hostname))
raise
try:
self.service = SSHService(hostname, port)
self.transport = self.service.login(access_token, username)
except Exception:
logger.exception("Caught an exception in the OAuth authentication step with {}".format(hostname))
raise
self.sftp_client = paramiko.SFTPClient.from_transport(self.transport)
def execute_wait(self, cmd, walltime=60, envs={}):
''' Synchronously execute a commandline string on the shell.
This command does *NOT* honor walltime currently.
Args:
- cmd (string) : Commandline string to execute
Kwargs:
- walltime (int) : walltime in seconds
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 1024
session.exec_command(self.prepend_envs(cmd, envs))
session.settimeout(walltime)
try:
# Wait until command is executed
exit_status = session.recv_exit_status()
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
except socket.timeout:
logger.exception("Command failed to execute without timeout limit on {}".format(self))
raise
return exit_status, stdout, stderr
def execute_no_wait(self, cmd, walltime=60, envs={}):
''' Execute asynchronousely without waiting for exitcode
Args:
- cmd (string): Commandline string to be executed on the remote side
KWargs:
- walltime (int): timeout to exec_command
- envs (dict): A dictionary of env variables
Returns:
- None, stdout (readable stream), stderr (readable stream)
Raises:
- ChannelExecFailed (reason)
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 10240
session.exec_command(self.prepend_envs(cmd, envs))
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
return None, stdout, stderr
def close(self):
return self.transport.close()
| Python | 0 |
b1b33a778d7abca2aa29e9612b6a75ff4aa7d64f | add UnboundError to actionAngle | galpy/actionAngle_src/actionAngle.py | galpy/actionAngle_src/actionAngle.py | import math as m
class actionAngle:
"""Top-level class for actionAngle classes"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngle object
INPUT:
OUTPUT:
HISTORY:
2010-07-11 - Written - Bovy (NYU)
"""
if len(args) == 3: #R,vR.vT
R,vR,vT= args
self._R= R
self._vR= vR
self._vT= vT
elif len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
self._phi= phi
else:
if len(args) == 2:
vxvv= args[0](args[1]).vxvv
else:
vxvv= args[0].vxvv
self._R= vxvv[0]
self._vR= vxvv[1]
self._vT= vxvv[2]
if len(vxvv) > 3:
self._z= vxvv[3]
self.vz= vxvv[4]
self._phi= vxvv[5]
if hasattr(self,'_z'): #calculate the polar angle
if self._z == 0.: self._theta= m.pi/2.
else: self._theta= m.atan(self._R/self._z)
return None
class UnboundError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| import math as m
class actionAngle:
"""Top-level class for actionAngle classes"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngle object
INPUT:
OUTPUT:
HISTORY:
2010-07-11 - Written - Bovy (NYU)
"""
if len(args) == 3: #R,vR.vT
R,vR,vT= args
self._R= R
self._vR= vR
self._vT= vT
elif len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
self._phi= phi
else:
if len(args) == 2:
vxvv= args[0](args[1]).vxvv
else:
vxvv= args[0].vxvv
self._R= vxvv[0]
self._vR= vxvv[1]
self._vT= vxvv[2]
if len(vxvv) > 3:
self._z= vxvv[3]
self.vz= vxvv[4]
self._phi= vxvv[5]
if hasattr(self,'_z'): #calculate the polar angle
if self._z == 0.: self._theta= m.pi/2.
else: self._theta= m.atan(self._R/self._z)
return None
| Python | 0.000001 |
400788fac8b91206521feaea800e37dd183c2f4f | Make sure coverage is at 100% for ref_validator_test.py | tests/swagger20_validator/ref_validator_test.py | tests/swagger20_validator/ref_validator_test.py | import pytest
from jsonschema.validators import Draft4Validator
from jsonschema.validators import RefResolver
from mock import Mock, MagicMock
from bravado_core.swagger20_validator import ref_validator
@pytest.fixture
def address_target():
return {
'type': 'object',
'properties': {
'street': {
'type': 'string',
},
'city': {
'type': 'string',
},
'state': {
'type': 'string',
},
},
'required': ['street', 'city', 'state'],
}
@pytest.fixture
def address_ref():
return '#/definitions/Address'
@pytest.fixture
def address_schema(address_ref, annotated_scope):
return {
'$ref': address_ref,
'x-scope': annotated_scope,
}
@pytest.fixture
def address():
return {
'street': '1000 Main St',
'city': 'Austin',
'state': 'TX',
}
@pytest.fixture
def original_scope():
return ['file:///tmp/swagger.json']
@pytest.fixture
def annotated_scope():
return [
'file:///tmp/swagger.json',
'file:///tmp/models.json',
]
@pytest.fixture
def mock_validator(original_scope):
validator = Mock(spec=Draft4Validator)
validator.resolver = Mock(spec=RefResolver)
validator.resolver._scopes_stack = original_scope
# Make descend() return an empty list to StopIteration.
validator.descend.return_value = [Mock()]
return validator
def test_when_resolve_is_not_None(address_target, address, original_scope,
annotated_scope, address_ref,
address_schema, mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolve(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = Mock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolve.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
def test_when_resolve_is_None(address_target, address, original_scope,
annotated_scope, address_ref, address_schema,
mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolving(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = None
mock_validator.resolver.resolving.return_value = MagicMock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolving.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
| import pytest
from jsonschema.validators import Draft4Validator
from jsonschema.validators import RefResolver
from mock import Mock, MagicMock
from bravado_core.swagger20_validator import ref_validator
@pytest.fixture
def address_target():
return {
'type': 'object',
'properties': {
'street': {
'type': 'string',
},
'city': {
'type': 'string',
},
'state': {
'type': 'string',
},
},
'required': ['street', 'city', 'state'],
}
@pytest.fixture
def address_ref():
return '#/definitions/Address'
@pytest.fixture
def address_schema(address_ref, annotated_scope):
return {
'$ref': address_ref,
'x-scope': annotated_scope,
}
@pytest.fixture
def address():
return {
'street': '1000 Main St',
'city': 'Austin',
'state': 'TX',
}
@pytest.fixture
def original_scope():
return ['file:///tmp/swagger.json']
@pytest.fixture
def annotated_scope():
return [
'file:///tmp/swagger.json',
'file:///tmp/models.json',
]
@pytest.fixture
def mock_validator(original_scope):
validator = Mock(spec=Draft4Validator)
validator.resolver = Mock(spec=RefResolver)
validator.resolver._scopes_stack = original_scope
# Make descend() return an empty list to StopIteration.
validator.descend.return_value = []
return validator
def test_when_resolve_is_not_None(address_target, address, original_scope,
annotated_scope, address_ref,
address_schema, mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolve(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = Mock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolve.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
def test_when_resolve_is_None(address_target, address, original_scope,
annotated_scope, address_ref, address_schema,
mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolving(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = None
mock_validator.resolver.resolving.return_value = MagicMock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolving.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
| Python | 0 |
0662ab1773b835b447dd71ad53fa595f490cbcc8 | Add proper encoding support to ftp_list | flexget/plugins/input/ftp_list.py | flexget/plugins/input/ftp_list.py | import logging
import ftplib
import os
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('ftp_list')
class InputFtpList(object):
"""
Generate entries from a ftp listing
Configuration:
ftp_list:
config:
use-ssl: no
name: <ftp name>
username: <username>
password: <password>
host: <host to connect>
port: <port>
dirs:
- <directory 1>
- <directory 2>
- ....
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('list', key='dirs').accept('text')
config = root.accept('dict', key='config', required=True)
config.accept('text', key='name', required=True)
config.accept('text', key='username', required=True)
config.accept('text', key='password', required=True)
config.accept('text', key='host', required=True)
config.accept('integer', key='port', required=True)
config.accept('text', key='encoding')
config.accept('boolean', key='use-ssl')
return root
def prepare_config(self, config):
config['config'].setdefault('use-ssl', False)
config['config'].setdefault('encoding', 'auto')
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
connection_config = config['config']
if connection_config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug('Trying connecting to: %s', (connection_config['host']))
try:
ftp.connect(connection_config['host'], connection_config['port'])
ftp.login(connection_config['username'], connection_config['password'])
except ftplib.all_errors as e:
raise plugin.PluginError(e)
log.debug('Connected.')
encoding = 'ascii'
if connection_config['encoding'] == 'auto':
feat_response = ftp.sendcmd('FEAT')
if 'UTF8' in [feat_item.strip().upper() for feat_item in feat_response.splitlines()]:
encoding = 'utf8'
elif connection_config['encoding']:
encoding = connection_config['encoding']
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
entries = []
for path in config['dirs']:
baseurl = "ftp://%s:%s@%s:%s/" % (connection_config['username'], connection_config['password'],
connection_config['host'], connection_config['port'])
try:
dirs = ftp.nlst(path)
except ftplib.error_perm as e:
raise plugin.PluginWarning(str(e))
if not dirs:
log.verbose('Directory %s is empty', path)
for p in dirs:
p = p.decode(encoding)
url = baseurl + p
title = os.path.basename(p)
log.info('Accepting entry %s ' % title)
entries.append(Entry(title, url))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputFtpList, 'ftp_list', api_ver=2)
| import logging
import ftplib
import os
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('ftp_list')
class InputFtpList(object):
"""
Generate entries from a ftp listing
Configuration:
ftp_list:
config:
use-ssl: no
name: <ftp name>
username: <username>
password: <password>
host: <host to connect>
port: <port>
dirs:
- <directory 1>
- <directory 2>
- ....
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('list', key='dirs').accept('text')
config = root.accept('dict', key='config', required=True)
config.accept('text', key='name', required=True)
config.accept('text', key='username', required=True)
config.accept('text', key='password', required=True)
config.accept('text', key='host', required=True)
config.accept('integer', key='port', required=True)
config.accept('boolean', key='use-ssl')
return root
def prepare_config(self, config):
config.setdefault('use-ssl', False)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
connection_config = config['config']
if connection_config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug('Trying connecting to: %s', (connection_config['host']))
try:
ftp.connect(connection_config['host'], connection_config['port'])
ftp.login(connection_config['username'], connection_config['password'])
except ftplib.all_errors as e:
raise plugin.PluginError(e)
log.debug('Connected.')
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
entries = []
for path in config['dirs']:
baseurl = "ftp://%s:%s@%s:%s/" % (connection_config['username'], connection_config['password'],
connection_config['host'], connection_config['port'])
try:
dirs = ftp.nlst(path)
except ftplib.error_perm as e:
raise plugin.PluginWarning(str(e))
if not dirs:
log.verbose('Directory %s is empty', path)
for p in dirs:
url = baseurl + p
title = os.path.basename(p)
log.info('Accepting entry %s ' % title)
entries.append(Entry(title, url))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputFtpList, 'ftp_list', api_ver=2)
| Python | 0.000001 |
a0af5dc1478fe8b639cc5a37898ad180f1f20a89 | Add --midi option to CLI | src/twelve_tone/cli.py | src/twelve_tone/cli.py | """
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mtwelve_tone` python will execute
``__main__.py`` as a script. That means there won't be any
``twelve_tone.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``twelve_tone.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import click
from twelve_tone.composer import Composer
@click.command()
@click.option('--midi', '-m', help='MIDI output file')
def main(midi):
c = Composer()
c.compose()
click.echo(c.get_melody())
if midi is not None:
c.save_to_midi(filename=midi)
| """
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mtwelve_tone` python will execute
``__main__.py`` as a script. That means there won't be any
``twelve_tone.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``twelve_tone.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import click
from twelve_tone.composer import Composer
@click.command()
def main():
c = Composer()
c.compose()
click.echo(c.get_melody())
| Python | 0 |
910b1cc171de18cc844abe912130541234b23c7f | Add auth support. | flamyngo/views.py | flamyngo/views.py | import json
import re
import os
from pymongo import MongoClient
from monty.serialization import loadfn
from monty.json import jsanitize
from flask import render_template, request, make_response, Response
from flamyngo import app
from functools import wraps
from flask import request, Response
module_path = os.path.dirname(os.path.abspath(__file__))
SETTINGS = loadfn(os.environ["FLAMYNGO"])
CONN = MongoClient(SETTINGS["db"]["host"], SETTINGS["db"]["port"])
DB = CONN[SETTINGS["db"]["database"]]
if "username" in SETTINGS["db"]:
DB.authenticate(SETTINGS["db"]["username"], SETTINGS["db"]["password"])
CNAMES = [d["name"] for d in SETTINGS["collections"]]
CSETTINGS = {d["name"]: d for d in SETTINGS["collections"]}
AUTH_USER = SETTINGS.get("AUTH_USER", None)
AUTH_PASSWD = SETTINGS.get("AUTH_PASSWD", None)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
if AUTH_USER is None:
return True
return username == AUTH_USER and password == AUTH_PASSWD
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if (AUTH_USER is not None) and (not auth or not check_auth(
auth.username, auth.password)):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.route('/', methods=['GET'])
@requires_auth
def index():
return make_response(render_template('index.html', collections=CNAMES))
@app.route('/query', methods=['GET'])
@requires_auth
def query():
cname = request.args.get("collection")
search_string = request.args.get("search_string")
settings = CSETTINGS[cname]
criteria = {}
for regex in settings["query"]:
if re.match(r'%s' % regex[1], search_string):
criteria[regex[0]] = parse_criteria(search_string, regex[2])
break
if not criteria:
criteria = json.loads(search_string)
results = []
for r in DB[cname].find(criteria, projection=settings["summary"]):
processed = {}
for k in settings["summary"]:
toks = k.split(".")
try:
val = r[toks[0]]
for t in toks[1:]:
try:
val = val[t]
except KeyError:
# Handle integer indices
val = val[int(t)]
except:
# Return the base value if we can descend into the data.
val = None
processed[k] = val
results.append(processed)
return make_response(render_template(
'index.html', collection_name=cname,
results=results, fields=settings["summary"],
unique_key=settings["unique_key"],
active_collection=cname,
collections=CNAMES)
)
@app.route('/<string:collection_name>/doc/<string:uid>')
@requires_auth
def get_doc(collection_name, uid):
settings = CSETTINGS[collection_name]
criteria = {
settings["unique_key"]: parse_criteria(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria)
return make_response(render_template(
'doc.html', doc=json.dumps(jsanitize(doc)),
collection_name=collection_name, doc_id=uid)
)
def parse_criteria(val, vtype):
toks = vtype.rsplit(".", 1)
if len(toks) == 1:
func = getattr(__import__("__builtin__"), toks[0])
else:
mod = __import__(toks[0], globals(), locals(), [toks[1]], 0)
func = getattr(mod, toks[1])
return func(val)
if __name__ == "__main__":
app.run(debug=True)
| import json
import re
import os
from pymongo import MongoClient
from monty.serialization import loadfn
from monty.json import jsanitize
from flask import render_template, request, make_response
from flamyngo import app
module_path = os.path.dirname(os.path.abspath(__file__))
SETTINGS = loadfn(os.environ["FLAMYNGO"])
CONN = MongoClient(SETTINGS["db"]["host"], SETTINGS["db"]["port"])
DB = CONN[SETTINGS["db"]["database"]]
if "username" in SETTINGS["db"]:
DB.authenticate(SETTINGS["db"]["username"], SETTINGS["db"]["password"])
CNAMES = [d["name"] for d in SETTINGS["collections"]]
CSETTINGS = {d["name"]: d for d in SETTINGS["collections"]}
@app.route('/', methods=['GET'])
def index():
return make_response(render_template('index.html', collections=CNAMES))
@app.route('/query', methods=['GET'])
def query():
cname = request.args.get("collection")
search_string = request.args.get("search_string")
settings = CSETTINGS[cname]
criteria = {}
for regex in settings["query"]:
if re.match(r'%s' % regex[1], search_string):
criteria[regex[0]] = parse_criteria(search_string, regex[2])
break
if not criteria:
criteria = json.loads(search_string)
results = []
for r in DB[cname].find(criteria, projection=settings["summary"]):
processed = {}
for k in settings["summary"]:
toks = k.split(".")
try:
val = r[toks[0]]
for t in toks[1:]:
try:
val = val[t]
except KeyError:
# Handle integer indices
val = val[int(t)]
except:
# Return the base value if we can descend into the data.
val = None
processed[k] = val
results.append(processed)
return make_response(render_template(
'index.html', collection_name=cname,
results=results, fields=settings["summary"],
unique_key=settings["unique_key"],
active_collection=cname,
collections=CNAMES)
)
@app.route('/<string:collection_name>/doc/<string:uid>')
def get_doc(collection_name, uid):
settings = CSETTINGS[collection_name]
criteria = {
settings["unique_key"]: parse_criteria(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria)
return make_response(render_template(
'doc.html', doc=json.dumps(jsanitize(doc)),
collection_name=collection_name, doc_id=uid)
)
def parse_criteria(val, vtype):
toks = vtype.rsplit(".", 1)
if len(toks) == 1:
func = getattr(__import__("__builtin__"), toks[0])
else:
mod = __import__(toks[0], globals(), locals(), [toks[1]], 0)
func = getattr(mod, toks[1])
return func(val)
if __name__ == "__main__":
app.run(debug=True)
| Python | 0 |
81612e20e327b4b4eabb4c77201dd6b8d2d21e93 | Add get_default getter to config. | law/config.py | law/config.py | # -*- coding: utf-8 -*-
"""
law Config interface.
"""
__all__ = ["Config"]
import os
import tempfile
import six
from six.moves.configparser import ConfigParser
class Config(ConfigParser):
_instance = None
_default_config = {
"core": {
"db_file": os.environ.get("LAW_DB_FILE", os.path.expandvars("$HOME/.law/db")),
"target_tmp_dir": tempfile.gettempdir(),
},
"paths": {},
}
_config_files = ("$LAW_CONFIG_FILE", "$HOME/.law/config", "etc/law/config")
@classmethod
def instance(cls, config_file=""):
if cls._instance is None:
cls._instance = cls(config_file=config_file)
return cls._instance
def __init__(self, config_file="", skip_fallbacks=False):
ConfigParser.__init__(self, allow_no_value=True) # old-style
files = (config_file,)
if not skip_fallbacks:
files += self._config_files
# read from files
self.config_file = None
for f in files:
f = os.path.expandvars(os.path.expanduser(f))
if os.path.isfile(f):
self.read(f)
self.config_file = f
# maybe inherit
if self.has_section("core") and self.has_option("core", "inherit_config"):
self.inherit(self.get("core", "inherit_config"))
# update by defaults
self.update(self._default_config, overwrite=False)
def optionxform(self, option):
return option
def get_default(self, section, option, default=None):
if self.has_option(section, option):
return self.get(section, option)
else:
return default
def update(self, data, overwrite=True):
for section, _data in data.items():
if not self.has_section(section):
self.add_section(section)
for option, value in _data.items():
if overwrite or not self.has_option(section, option):
self.set(section, option, value)
def inherit(self, filename):
p = self.__class__(filename, skip_fallbacks=True)
self.update(p._sections, overwrite=False)
def keys(self, section):
return [key for key, _ in self.items(section)]
| # -*- coding: utf-8 -*-
"""
law Config interface.
"""
__all__ = ["Config"]
import os
import tempfile
import six
from six.moves.configparser import ConfigParser
class Config(ConfigParser):
_instance = None
_default_config = {
"core": {
"db_file": os.environ.get("LAW_DB_FILE", os.path.expandvars("$HOME/.law/db")),
"target_tmp_dir": tempfile.gettempdir(),
},
"paths": {},
}
_config_files = ("$LAW_CONFIG_FILE", "$HOME/.law/config", "etc/law/config")
@classmethod
def instance(cls, config_file=""):
if cls._instance is None:
cls._instance = cls(config_file=config_file)
return cls._instance
def __init__(self, config_file="", skip_fallbacks=False):
ConfigParser.__init__(self, allow_no_value=True) # old-style
files = (config_file,)
if not skip_fallbacks:
files += self._config_files
# read from files
self.config_file = None
for f in files:
f = os.path.expandvars(os.path.expanduser(f))
if os.path.isfile(f):
self.read(f)
self.config_file = f
# maybe inherit
if self.has_section("core") and self.has_option("core", "inherit_config"):
self.inherit(self.get("core", "inherit_config"))
# update by defaults
self.update(self._default_config, overwrite=False)
def optionxform(self, option):
return option
def update(self, data, overwrite=True):
for section, _data in data.items():
if not self.has_section(section):
self.add_section(section)
for option, value in _data.items():
if overwrite or not self.has_option(section, option):
self.set(section, option, value)
def inherit(self, filename):
p = self.__class__(filename, skip_fallbacks=True)
self.update(p._sections, overwrite=False)
def keys(self, section):
return [key for key, _ in self.items(section)]
| Python | 0 |
67444868b1c7c50da6d490893d72991b65b2aa7b | Add superlance supervisord plugin | frontend/setup.py | frontend/setup.py | import sys
from setuptools import setup, find_packages
requires = (
'flask',
'Flask-Script',
'flask_sockets',
'gunicorn',
'cassandra-driver',
'google-api-python-client',
'ecdsa',
'daemonize',
'websocket-client',
'pyzmq',
'fabric',
'pyyaml',
'supervisor',
'pexpect',
'blist',
'superlance'
)
setup(
name = 'cstar_perf.frontend',
version = '1.0',
description = 'A web frontend for cstar_perf, the Cassandra performance testing platform',
author = 'The DataStax Cassandra Test Engineering Team',
author_email = 'ryan@datastax.com',
url = 'https://github.com/datastax/cstar_perf',
install_requires = requires,
namespace_packages = ['cstar_perf'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
entry_points = {'console_scripts':
['cstar_perf_client = cstar_perf.frontend.client.client:main',
'cstar_perf_server = cstar_perf.frontend.lib.server:main',
'cstar_perf_notifications = cstar_perf.frontend.server.notifications:main']},
)
from cstar_perf.frontend.lib.crypto import generate_server_keys
generate_server_keys()
| import sys
from setuptools import setup, find_packages
requires = (
'flask',
'Flask-Script',
'flask_sockets',
'gunicorn',
'cassandra-driver',
'google-api-python-client',
'ecdsa',
'daemonize',
'websocket-client',
'pyzmq',
'fabric',
'pyyaml',
'supervisor',
'pexpect',
'blist'
)
setup(
name = 'cstar_perf.frontend',
version = '1.0',
description = 'A web frontend for cstar_perf, the Cassandra performance testing platform',
author = 'The DataStax Cassandra Test Engineering Team',
author_email = 'ryan@datastax.com',
url = 'https://github.com/datastax/cstar_perf',
install_requires = requires,
namespace_packages = ['cstar_perf'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
entry_points = {'console_scripts':
['cstar_perf_client = cstar_perf.frontend.client.client:main',
'cstar_perf_server = cstar_perf.frontend.lib.server:main',
'cstar_perf_notifications = cstar_perf.frontend.server.notifications:main']},
)
from cstar_perf.frontend.lib.crypto import generate_server_keys
generate_server_keys()
| Python | 0 |
a16cda69c2ec0e96bf5b5a558e288d22b353f28f | change work folder before build | build/build.py | build/build.py | #!/usr/bin/env python2.7
# coding=utf-8
import subprocess
import platform
import os
import sys
def build(platform):
print("[Start Build] Target Platform: " + platform)
build_folder = os.path.split(os.path.realpath(__file__))[0]
#change folder
os.chdir(build_folder)
build_script = ""
if platform == "windows":
build_script = "make_win_with_2015_static.bat"
elif platform == "android":
build_script = "make_android_static.sh"
elif platform == "ios":
build_script = "make_ios.sh"
elif platform == "osx":
build_script = "make_osx_static.sh"
subprocess.check_call(build_script,shell=True)
if __name__ == '__main__':
length = len(sys.argv)
if length < 2:
sys.exit("please select target platform !")
platform = sys.argv[1]
build(platform)
# print(platform)
| #!/usr/bin/env python2.7
# coding=utf-8
import subprocess
import platform
import os
import sys
def build(platform):
print("[Start Build] Target Platform: " + platform)
build_script = ""
if platform == "windows":
build_script = "make_win_with_2015_static.bat"
subprocess.Popen(["cmd.exe","/C",build_script],shell=True)
elif platform == "android":
build_script = "make_android_static.sh"
elif platform == "ios":
build_script = "make_ios.sh"
elif platform == "osx":
build_script = "make_osx_static.sh"
if __name__ == '__main__':
length = len(sys.argv)
if length < 2:
sys.exit("please select target platform !")
platform = sys.argv[1]
build(platform)
# print(platform)
| Python | 0 |
e4b9463dcbe5700c5a9089188e1f3caca5a206ab | Add hierarchy walker | avalon/tools/cbsceneinventory/lib.py | avalon/tools/cbsceneinventory/lib.py | from avalon import io, api
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError(
"Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
assert representation, (
"Could not find representation in the database with"
" the name '%s'" % representation_name)
api.switch(container, representation)
return representation
def walk_hierarchy(node):
"""Recursively yield group node
"""
for child in node.children():
if child.get("isGroupNode"):
yield child
for _child in walk_hierarchy(child):
yield _child
| from avalon import io, api
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError(
"Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
assert representation, (
"Could not find representation in the database with"
" the name '%s'" % representation_name)
api.switch(container, representation)
return representation
| Python | 0.000008 |
6c004827c642c3aee4166dd8689dc40104be6346 | Stop hard-coding satellites, and make the tester easy to run on any notebook path. Allow specifying output and error files as args rather than just on command line. | src/verify_notebook.py | src/verify_notebook.py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import subprocess
import sys
def get_out_and_err(notebook_path=None, outfile=None, errfile=None):
output = None
error = None
if notebook_path is not None:
cmd = 'runipy --matplotlib --stdout "%s.ipynb"' % (notebook_path,)
p = subprocess.Popen(cmd, shell=True, stdin=None,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
output = p.stdout.read()
error = p.stderr.read()
elif outfile is not None and errfile is not None:
with open(outfile, 'r') as out:
output = out.read()
with open(errfile, 'r') as err:
error = err.read()
elif len(sys.argv) == 3:
return get_out_and_err(sys.argv[1], sys.argv[2])
else:
raise ValueError("Specify a notebook path or out and err files to verify.")
return (output, error)
def check_results(results, warnings_are_errors=False, content_tester=None):
(output, error) = results
notebook = json.loads(output)
cells = notebook['worksheets'][0]['cells']
for cell in cells:
if cell['cell_type'] in ('markdown', 'heading'):
pass
elif cell['cell_type'] == 'code':
for output in cell['outputs']:
if output['output_type'] == 'pyerr':
raise ValueError(str(output))
elif output['output_type'] == 'stream':
for msg in output['text']:
if re.search(r'/(.*):\d+:.*(warn(ings?)?)\W', msg, re.I):
if warnings_are_errors:
raise ValueError(msg)
else:
print "WARNING: ", msg
elif output['output_type'] == 'pyout':
if content_tester:
content_tester(cell) # Content-tester should raise on error.
else:
pass
elif output['output_type'] == 'display_data':
pass # Assume they're good.
else:
raise ValueError(str(output))
else:
raise ValueError(str(output))
if 'exception' in error or 'nonzero exit status' in error:
raise ValueError(error)
def run_and_verify_notebook(notebook_path, **kwargs):
'''runipy it and verify it.'''
check_results(get_out_and_err(notebook_path), **kwargs)
| # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import subprocess
import sys
def get_out_and_err():
output = None
if len(sys.argv) == 3:
with open(sys.argv[1], 'r') as out:
output = out.read()
with open(sys.argv[2], 'r') as err:
error = err.read()
else:
cmd = 'runipy --matplotlib --stdout Satellites.ipynb'
p = subprocess.Popen(cmd, shell=True, stdin=None,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
output = p.stdout.read()
error = p.stderr.read()
return (output, error)
def check_results(results, warnings_are_errors=False, content_tester=None):
(output, error) = results
notebook = json.loads(output)
cells = notebook['worksheets'][0]['cells']
for cell in cells:
if cell['cell_type'] in ('markdown', 'heading'):
pass
elif cell['cell_type'] == 'code':
for output in cell['outputs']:
if output['output_type'] == 'pyerr':
raise ValueError(str(output))
elif output['output_type'] == 'stream':
for msg in output['text']:
if re.search(r'/(.*):\d+:.*(warn(ings?)?)\W', msg, re.I):
if warnings_are_errors:
raise ValueError(msg)
else:
print "WARNING: ", msg
elif output['output_type'] == 'pyout':
if content_tester:
content_tester(cell)
else:
pass
elif output['output_type'] == 'display_data':
pass # Assume they're good.
else:
raise ValueError(str(output))
else:
raise ValueError(str(output))
if 'exception' in error or 'nonzero exit status' in error:
raise ValueError(error)
def main():
check_results(get_out_and_err())
if __name__ == "__main__":
main()
| Python | 0 |
a9f6432288f74b9f590a91649de1e475ed523806 | Correct data format | restclients/test/library/mylibinfo.py | restclients/test/library/mylibinfo.py | from datetime import date
from django.test import TestCase
from django.conf import settings
from restclients.library.mylibinfo import get_account, get_account_html
from restclients.exceptions import DataFailureException
class MyLibInfoTest(TestCase):
def test_get_account(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
account = get_account("javerage")
self.assertEquals(account.next_due, date(2014, 5, 27))
self.assertEquals(account.holds_ready, 1)
self.assertEquals(account.fines, 5.35)
self.assertEquals(account.items_loaned, 3)
self.assertEquals(account.get_next_due_date_str(True), "Tue, May 27")
self.assertEquals(account.get_next_due_date_str(False), "2014-05-27")
account = get_account("jnewstudent")
self.assertIsNone(account.next_due)
self.assertEquals(account.holds_ready, 0)
self.assertEquals(account.fines, 0.0)
self.assertEquals(account.items_loaned, 0)
def test_html_response(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
response = get_account_html("javerage")
self.assertEquals(response, '<p>You have 7 items checked out.<br>\nYou have items due back on 2014-04-29.<br>\nYou don\'t owe any fines.</p>\n<a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
def test_invalid_user(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
#Testing error message in a 200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid")
#Testing non-200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid123")
try:
get_account("invalidnetid")
except DataFailureException as ex:
self.assertEquals(ex.msg, "[Alma] User not found/401651")
def test_with_timestamp(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS=
'restclients.dao_implementation.libraries.File'):
response = get_account_html('javerage', timestamp=1391122522900)
self.assertEquals(response, '<p>You have 7 items checked out.<br>\n You have items due back on 2014-04-29.<br>\n You don\'t owe any fines.</p>\n <a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
| from datetime import date
from django.test import TestCase
from django.conf import settings
from restclients.library.mylibinfo import get_account, get_account_html
from restclients.exceptions import DataFailureException
class MyLibInfoTest(TestCase):
def test_get_account(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
account = get_account("javerage")
self.assertEquals(account.next_due, date(2014, 5, 27))
self.assertEquals(account.holds_ready, 1)
self.assertEquals(account.fines, 5.35)
self.assertEquals(account.items_loaned, 3)
self.assertEquals(account.get_next_due_date_str(True), "May 27, 2014")
self.assertEquals(account.get_next_due_date_str(False), "2014-05-27")
account = get_account("jnewstudent")
self.assertIsNone(account.next_due)
self.assertEquals(account.holds_ready, 0)
self.assertEquals(account.fines, 0.0)
self.assertEquals(account.items_loaned, 0)
def test_html_response(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
response = get_account_html("javerage")
self.assertEquals(response, '<p>You have 7 items checked out.<br>\nYou have items due back on 2014-04-29.<br>\nYou don\'t owe any fines.</p>\n<a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
def test_invalid_user(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
#Testing error message in a 200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid")
#Testing non-200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid123")
try:
get_account("invalidnetid")
except DataFailureException as ex:
self.assertEquals(ex.msg, "[Alma] User not found/401651")
def test_with_timestamp(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS=
'restclients.dao_implementation.libraries.File'):
response = get_account_html('javerage', timestamp=1391122522900)
self.assertEquals(response, '<p>You have 7 items checked out.<br>\n You have items due back on 2014-04-29.<br>\n You don\'t owe any fines.</p>\n <a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
| Python | 0.999995 |
b70d3c2c75befe747079697a66b1bb417749e786 | Update Workflow: add abstract method .on_failure() | simpleflow/workflow.py | simpleflow/workflow.py | from __future__ import absolute_import
class Workflow(object):
"""
Main interface to define a workflow by submitting tasks for asynchronous
execution.
The actual behavior depends on the executor backend.
"""
def __init__(self, executor):
self._executor = executor
def submit(self, func, *args, **kwargs):
"""
Submit a function for asynchronous execution.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param *args: arguments passed to the task.
:type *args: Sequence.
:param **kwargs: keyword-arguments passed to the task.
:type **kwargs: Mapping (dict).
:returns:
:rtype: Future.
"""
return self._executor.submit(func, *args, **kwargs)
def map(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of arguments passed to the task.
:type iterable: Iterable.
"""
return self._executor.map(func, iterable)
def starmap(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of multiple-arguments passed to the task
as positional arguments. They are destructured using
the ``*`` operator.
:type iterable: Iterable.
"""
return self._executor.starmap(func, iterable)
def fail(self, reason, details=None):
self._executor.fail(reason, details)
def run(self, *args, **kwargs):
raise NotImplementedError
def on_failure(self, history, reason, details=None):
"""
The executor calls this method when the workflow fails.
"""
raise NotImplementedError
| from __future__ import absolute_import
class Workflow(object):
"""
Main interface to define a workflow by submitting tasks for asynchronous
execution.
The actual behavior depends on the executor backend.
"""
def __init__(self, executor):
self._executor = executor
def submit(self, func, *args, **kwargs):
"""
Submit a function for asynchronous execution.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param *args: arguments passed to the task.
:type *args: Sequence.
:param **kwargs: keyword-arguments passed to the task.
:type **kwargs: Mapping (dict).
:returns:
:rtype: Future.
"""
return self._executor.submit(func, *args, **kwargs)
def map(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of arguments passed to the task.
:type iterable: Iterable.
"""
return self._executor.map(func, iterable)
def starmap(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of multiple-arguments passed to the task
as positional arguments. They are destructured using
the ``*`` operator.
:type iterable: Iterable.
"""
return self._executor.starmap(func, iterable)
def fail(self, reason, details=None):
self._executor.fail(reason, details)
def run(self, *args, **kwargs):
raise NotImplementedError
| Python | 0.000002 |
6a67c22a9843517ece1ee5e890ea38873b44648b | Teste html_to_latex. | libretto/templatetags/extras.py | libretto/templatetags/extras.py | # coding: utf-8
from __future__ import unicode_literals
import re
from bs4 import BeautifulSoup, Comment
from django.template import Library
from django.utils.encoding import smart_text
from ..utils import abbreviate as abbreviate_func
register = Library()
@register.filter
def stripchars(text):
return smart_text(BeautifulSoup(text, 'html.parser'))
@register.filter
def striptags_n_chars(text):
return smart_text(BeautifulSoup(text, 'html.parser').get_text())
compact_paragraph_re = re.compile(r'(?<![\n\s ])\n+[\s\n ]*\n+(?![\n\s ])')
@register.filter
def compact_paragraph(text):
return compact_paragraph_re.sub(r' / ', text.strip('\n'))
escaped_chars_re = re.compile(r'([#$%&_])')
@register.filter
def escape_latex(text):
return escaped_chars_re.sub(r'\\\1', text)
html_latex_bindings = (
(dict(name='h1'), r'\part*{', r'}'),
(dict(name='h2'), r'\chapter*{', r'}'),
(dict(name='h3'), r'\section*{', r'}'),
(dict(name='p'), '\n\n', '\n\n'),
(dict(name='cite'), r'\textit{', r'}'),
(dict(name='em'), r'\textit{', r'}'),
(dict(name='i'), r'\textit{', r'}'),
(dict(name='strong'), r'\textbf{', r'}'),
(dict(name='b'), r'\textbf{', r'}'),
(dict(name='small'), r'\small{', r'}'),
(dict(class_='sc'), r'\textsc{', r'}'),
(dict(style=re.compile(r'.*font-variant:\s*'
r'small-caps;.*')), r'\textsc{', r'}'),
)
@register.filter
def html_to_latex(text):
r"""
Permet de convertir du HTML en syntaxe LaTeX.
Attention, ce convertisseur est parfaitement incomplet et ne devrait pas
être utilisé en dehors d'un contexte très précis.
>>> print(html_to_latex('<h1>Bonjour à tous</h1>'))
\part*{Bonjour à tous}
>>> print(html_to_latex('<span style="font-series: bold; font-variant: small-caps;">Écriture romaine</span>'))
\textsc{Écriture romaine}
>>> print(html_to_latex('Vive les <!-- cons -->poilus !'))
Vive les poilus !
"""
soup = BeautifulSoup(text)
for html_selectors, latex_open_tag, latex_close_tag in html_latex_bindings:
for tag in soup.find_all(**html_selectors):
tag.insert(0, latex_open_tag)
tag.append(latex_close_tag)
for comment in soup.find_all(text=lambda text: isinstance(text, Comment)):
comment.extract()
return smart_text(soup.get_text())
@register.filter
def abbreviate(string, min_vowels=0, min_len=1, tags=True, enabled=True):
return abbreviate_func(string, min_vowels=min_vowels, min_len=min_len,
tags=tags, enabled=enabled)
| # coding: utf-8
from __future__ import unicode_literals
import re
from bs4 import BeautifulSoup, Comment
from django.template import Library
from django.utils.encoding import smart_text
from ..utils import abbreviate as abbreviate_func
register = Library()
@register.filter
def stripchars(text):
return smart_text(BeautifulSoup(text, 'html.parser'))
@register.filter
def striptags_n_chars(text):
return smart_text(BeautifulSoup(text, 'html.parser').get_text())
compact_paragraph_re = re.compile(r'(?<![\n\s ])\n+[\s\n ]*\n+(?![\n\s ])')
@register.filter
def compact_paragraph(text):
return compact_paragraph_re.sub(r' / ', text.strip('\n'))
escaped_chars_re = re.compile(r'([#$%&_])')
@register.filter
def escape_latex(text):
return escaped_chars_re.sub(r'\\\1', text)
html_latex_bindings = (
(dict(name='h1'), r'\part*{', r'}'),
(dict(name='h2'), r'\chapter*{', r'}'),
(dict(name='h3'), r'\section*{', r'}'),
(dict(name='p'), '\n\n', '\n\n'),
(dict(name='cite'), r'\textit{', r'}'),
(dict(name='em'), r'\textit{', r'}'),
(dict(name='i'), r'\textit{', r'}'),
(dict(name='strong'), r'\textbf{', r'}'),
(dict(name='b'), r'\textbf{', r'}'),
(dict(name='small'), r'\small{', r'}'),
(dict(class_='sc'), r'\textsc{', r'}'),
(dict(style=re.compile(r'.*font-variant:\s*'
r'small-caps;.*')), r'\textsc{', r'}'),
)
@register.filter
def html_to_latex(text):
"""
Permet de convertir du HTML en syntaxe LaTeX.
Attention, ce convertisseur est parfaitement incomplet et ne devrait pas
être utilisé en dehors d'un contexte très précis.
"""
soup = BeautifulSoup(text)
for html_selectors, latex_open_tag, latex_close_tag in html_latex_bindings:
for tag in soup.find_all(**html_selectors):
tag.insert(0, latex_open_tag)
tag.append(latex_close_tag)
for comment in soup.find_all(text=lambda text: isinstance(text, Comment)):
comment.extract()
return smart_text(soup.get_text())
@register.filter
def abbreviate(string, min_vowels=0, min_len=1, tags=True, enabled=True):
return abbreviate_func(string, min_vowels=min_vowels, min_len=min_len,
tags=tags, enabled=enabled)
| Python | 0 |
268c577acd07bce4eb7e63bab6a38a7b436bc2e5 | Include request ip in monitored data | frappe/monitor.py | frappe/monitor.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from datetime import datetime
import json
import traceback
import frappe
import os
import uuid
MONITOR_REDIS_KEY = "monitor-transactions"
def start(transaction_type="request", method=None, kwargs=None):
if frappe.conf.monitor:
frappe.local.monitor = Monitor(
transaction_type=transaction_type, method=method, kwargs=kwargs
)
def stop():
if frappe.conf.monitor and hasattr(frappe.local, "monitor"):
frappe.local.monitor.dump()
def log_file():
return os.path.join(frappe.utils.get_bench_path(), "logs", "monitor.json.log")
class Monitor:
def __init__(self, transaction_type=None, method=None, kwargs=None):
try:
self.site = frappe.local.site
self.timestamp = datetime.utcnow()
self.transaction_type = transaction_type
self.uuid = uuid.uuid4()
if self.transaction_type == "request":
self.data = frappe.form_dict
self.headers = dict(frappe.request.headers)
self.ip = frappe.local.request_ip
self.method = frappe.request.method
self.path = frappe.request.path
else:
self.kwargs = kwargs
self.method = method
except Exception:
traceback.print_exc()
def dump(self):
try:
timediff = datetime.utcnow() - self.timestamp
# Obtain duration in microseconds
self.duration = int(timediff.total_seconds() * 1000000)
data = {
"uuid": self.uuid,
"duration": self.duration,
"site": self.site,
"timestamp": self.timestamp.isoformat(sep=" "),
"transaction_type": self.transaction_type,
}
if self.transaction_type == "request":
update = {
"data": self.data,
"headers": self.headers,
"ip": self.ip,
"method": self.method,
"path": self.path,
}
else:
update = {
"kwargs": self.kwargs,
"method": self.method,
}
data.update(update)
json_data = json.dumps(data, sort_keys=True, default=str)
store(json_data)
except Exception:
traceback.print_exc()
def store(json_data):
MAX_LOGS = 1000000
if frappe.cache().llen(MONITOR_REDIS_KEY) > MAX_LOGS:
frappe.cache().ltrim(MONITOR_REDIS_KEY, 1, -1)
frappe.cache().rpush(MONITOR_REDIS_KEY, json_data)
def flush():
try:
# Fetch all the logs without removing from cache
logs = frappe.cache().lrange(MONITOR_REDIS_KEY, 0, -1)
logs = list(map(frappe.safe_decode, logs))
with open(log_file(), "a", os.O_NONBLOCK) as f:
f.write("\n".join(logs))
f.write("\n")
# Remove fetched entries from cache
frappe.cache().ltrim(MONITOR_REDIS_KEY, len(logs) - 1, -1)
except Exception:
traceback.print_exc()
| # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from datetime import datetime
import json
import traceback
import frappe
import os
import uuid
MONITOR_REDIS_KEY = "monitor-transactions"
def start(transaction_type="request", method=None, kwargs=None):
if frappe.conf.monitor:
frappe.local.monitor = Monitor(
transaction_type=transaction_type, method=method, kwargs=kwargs
)
def stop():
if frappe.conf.monitor and hasattr(frappe.local, "monitor"):
frappe.local.monitor.dump()
def log_file():
return os.path.join(frappe.utils.get_bench_path(), "logs", "monitor.json.log")
class Monitor:
def __init__(self, transaction_type=None, method=None, kwargs=None):
try:
self.site = frappe.local.site
self.timestamp = datetime.utcnow()
self.transaction_type = transaction_type
self.uuid = uuid.uuid4()
if self.transaction_type == "request":
self.data = frappe.form_dict
self.headers = dict(frappe.request.headers)
self.method = frappe.request.method
self.path = frappe.request.path
else:
self.kwargs = kwargs
self.method = method
except Exception:
traceback.print_exc()
def dump(self):
try:
timediff = datetime.utcnow() - self.timestamp
# Obtain duration in microseconds
self.duration = int(timediff.total_seconds() * 1000000)
data = {
"uuid": self.uuid,
"duration": self.duration,
"site": self.site,
"timestamp": self.timestamp.isoformat(sep=" "),
"transaction_type": self.transaction_type,
}
if self.transaction_type == "request":
update = {
"data": self.data,
"headers": self.headers,
"method": self.method,
"path": self.path,
}
else:
update = {
"kwargs": self.kwargs,
"method": self.method,
}
data.update(update)
json_data = json.dumps(data, sort_keys=True, default=str)
store(json_data)
except Exception:
traceback.print_exc()
def store(json_data):
MAX_LOGS = 1000000
if frappe.cache().llen(MONITOR_REDIS_KEY) > MAX_LOGS:
frappe.cache().ltrim(MONITOR_REDIS_KEY, 1, -1)
frappe.cache().rpush(MONITOR_REDIS_KEY, json_data)
def flush():
try:
# Fetch all the logs without removing from cache
logs = frappe.cache().lrange(MONITOR_REDIS_KEY, 0, -1)
logs = list(map(frappe.safe_decode, logs))
with open(log_file(), "a", os.O_NONBLOCK) as f:
f.write("\n".join(logs))
f.write("\n")
# Remove fetched entries from cache
frappe.cache().ltrim(MONITOR_REDIS_KEY, len(logs) - 1, -1)
except Exception:
traceback.print_exc()
| Python | 0 |
7a5cb953f64dce841d88b9c8b45be7719c617ba2 | Fix games init file | games/__init__.py | games/__init__.py | import Game
import Mancala
import Player
import TicTacToe | __all__ = ['Game', 'Mancala', 'Player', 'TicTacToe'] | Python | 0.000001 |
147c85aff3e93ebb39d984a05cec970b3dc7edc0 | Add expires_at field to jwt that was removed accidentally (#242) | frontstage/jwt.py | frontstage/jwt.py | """
Module to create jwt token.
"""
from datetime import datetime, timedelta
from jose import jwt
from frontstage import app
def timestamp_token(token):
"""Time stamp the expires_in argument of the OAuth2 token. And replace with an expires_in UTC timestamp"""
current_time = datetime.now()
expires_in = current_time + timedelta(seconds=token['expires_in'])
data_dict_for_jwt_token = {
"refresh_token": token['refresh_token'],
"access_token": token['access_token'],
"expires_at": expires_in.timestamp(),
"role": "respondent",
"party_id": token['party_id']
}
return data_dict_for_jwt_token
def encode(data):
"""Encode data in jwt token."""
return jwt.encode(data, app.config['JWT_SECRET'], algorithm=app.config['JWT_ALGORITHM'])
def decode(token):
"""Decode data in jwt token."""
return jwt.decode(token, app.config['JWT_SECRET'], algorithms=[app.config['JWT_ALGORITHM']])
| """
Module to create jwt token.
"""
from datetime import datetime, timedelta
from jose import jwt
from frontstage import app
def timestamp_token(token):
"""Time stamp the expires_in argument of the OAuth2 token. And replace with an expires_in UTC timestamp"""
current_time = datetime.now()
expires_in = current_time + timedelta(seconds=token['expires_in'])
data_dict_for_jwt_token = {
"refresh_token": token['refresh_token'],
"access_token": token['access_token'],
"role": "respondent",
"party_id": token['party_id']
}
return data_dict_for_jwt_token
def encode(data):
"""Encode data in jwt token."""
return jwt.encode(data, app.config['JWT_SECRET'], algorithm=app.config['JWT_ALGORITHM'])
def decode(token):
"""Decode data in jwt token."""
return jwt.decode(token, app.config['JWT_SECRET'], algorithms=[app.config['JWT_ALGORITHM']])
| Python | 0 |
a23e385d5de4ae3c36eb7e5e37b7bfcc6ed5d129 | Add bat file suffix for invoking dart2js. | site/try/build_try.gyp | site/try/build_try.gyp | # Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE
{
'variables' : {
'script_suffix%': '',
},
'conditions' : [
['OS=="win"', {
'variables' : {
'script_suffix': '.bat',
},
}],
],
'targets': [
{
'target_name': 'try_site',
'type': 'none',
'dependencies': [
'../../runtime/dart-runtime.gyp:dart',
'../../create_sdk.gyp:create_sdk_internal',
],
'variables': {
'try_dart_static_files': [
'index.html',
'dartlang-style.css',
'iframe.html',
'iframe.js',
'dart-icon.png', # iOS icon.
'dart-iphone5.png', # iPhone 5 splash screen.
'dart-icon-196px.png', # Android icon.
'try-dart-screenshot.png', # Google+ screen shot.
'../../third_party/font-awesome/font-awesome-4.0.3/'
'fonts/fontawesome-webfont.woff',
'favicon.ico',
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
'actions': [
{
'action_name': 'sdk_json',
'message': 'Creating sdk.json',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
# This dependency is redundant for now, as this directory is
# implicitly part of the dependencies for dart-sdk/README.
'<!@(["python", "../../tools/list_files.py", "\\.dart$", '
'"../../sdk/lib/_internal/compiler/samples/jsonify"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/'
'<(EXECUTABLE_PREFIX)dart<(EXECUTABLE_SUFFIX)',
'-Dlist_all_libraries=true',
'-DoutputJson=true',
'../../sdk/lib/_internal/compiler/samples/jsonify/jsonify.dart',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
{
'action_name': 'compile',
'message': 'Creating leap.dart.js',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
'<!@(["python", "../../tools/list_files.py", "\\.dart$", "src"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/dart2js<(script_suffix)',
'-p../../sdk/lib/_internal/',
'-Denable_ir=false',
'src/leap.dart',
'-o<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
},
{
'action_name': 'nossl_appcache',
'message': 'Creating nossl.appcache',
'inputs': [
'add_time_stamp.py',
'nossl.appcache',
'<@(try_dart_static_files)',
'build_try.gyp', # If the list of files changed.
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
# Try Dart! uses AppCache. Cached files are only validated when the
# manifest changes (not its timestamp, but its actual contents).
'action': [
'python',
'add_time_stamp.py',
'nossl.appcache',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
'copies': [
{
# Destination directory.
'destination': '<(PRODUCT_DIR)/try_dartlang_org/',
# List of files to be copied (creates implicit build dependencies).
'files': [
'app.yaml',
'<@(try_dart_static_files)',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
},
],
}
| # Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE
{
'targets': [
{
'target_name': 'try_site',
'type': 'none',
'dependencies': [
'../../runtime/dart-runtime.gyp:dart',
'../../create_sdk.gyp:create_sdk_internal',
],
'variables': {
'try_dart_static_files': [
'index.html',
'dartlang-style.css',
'iframe.html',
'iframe.js',
'dart-icon.png', # iOS icon.
'dart-iphone5.png', # iPhone 5 splash screen.
'dart-icon-196px.png', # Android icon.
'try-dart-screenshot.png', # Google+ screen shot.
'../../third_party/font-awesome/font-awesome-4.0.3/'
'fonts/fontawesome-webfont.woff',
'favicon.ico',
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
'actions': [
{
'action_name': 'sdk_json',
'message': 'Creating sdk.json',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
# This dependency is redundant for now, as this directory is
# implicitly part of the dependencies for dart-sdk/README.
'<!@(["python", "../../tools/list_files.py", "\\.dart$", '
'"../../sdk/lib/_internal/compiler/samples/jsonify"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/'
'<(EXECUTABLE_PREFIX)dart<(EXECUTABLE_SUFFIX)',
'-Dlist_all_libraries=true',
'-DoutputJson=true',
'../../sdk/lib/_internal/compiler/samples/jsonify/jsonify.dart',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
{
'action_name': 'compile',
'message': 'Creating leap.dart.js',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
'<!@(["python", "../../tools/list_files.py", "\\.dart$", "src"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/dart2js',
'-p../../sdk/lib/_internal/',
'-Denable_ir=false',
'src/leap.dart',
'-o<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
},
{
'action_name': 'nossl_appcache',
'message': 'Creating nossl.appcache',
'inputs': [
'add_time_stamp.py',
'nossl.appcache',
'<@(try_dart_static_files)',
'build_try.gyp', # If the list of files changed.
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
# Try Dart! uses AppCache. Cached files are only validated when the
# manifest changes (not its timestamp, but its actual contents).
'action': [
'python',
'add_time_stamp.py',
'nossl.appcache',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
'copies': [
{
# Destination directory.
'destination': '<(PRODUCT_DIR)/try_dartlang_org/',
# List of files to be copied (creates implicit build dependencies).
'files': [
'app.yaml',
'<@(try_dart_static_files)',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
},
],
}
| Python | 0.005184 |
2c870ee5b3d4df5a2f628350b7d4897f301c34de | Delete commented out urlparams code | badger/helpers.py | badger/helpers.py | import hashlib
import urllib
import urlparse
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.utils.html import conditional_escape
try:
from commons.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
try:
from tower import ugettext_lazy as _
except ImportError, e:
from django.utils.translation import ugettext_lazy as _
import jingo
import jinja2
from jinja2 import evalcontextfilter, Markup, escape
from jingo import register, env
from .models import (Badge, Award, Nomination, Progress,
BadgeAwardNotAllowedException)
@register.function
def user_avatar(user, secure=False, size=256, rating='pg', default=''):
try:
profile = user.get_profile()
if profile.avatar:
return profile.avatar.url
except AttributeError:
pass
except SiteProfileNotAvailable:
pass
except ObjectDoesNotExist:
pass
base_url = (secure and 'https://secure.gravatar.com' or
'http://www.gravatar.com')
m = hashlib.md5(user.email)
return '%(base_url)s/avatar/%(hash)s?%(params)s' % dict(
base_url=base_url, hash=m.hexdigest(),
params=urllib.urlencode(dict(
s=size, d=default, r=rating
))
)
@register.function
def user_awards(user):
return Award.objects.filter(user=user)
@register.function
def user_badges(user):
return Badge.objects.filter(creator=user)
@register.function
def badger_allows_add_by(user):
return Badge.objects.allows_add_by(user)
@register.function
def qr_code_image(value, alt=None, size=150):
# TODO: Bake our own QR codes, someday soon!
url = conditional_escape("http://chart.apis.google.com/chart?%s" % \
urllib.urlencode({'chs': '%sx%s' % (size, size), 'cht': 'qr', 'chl': value, 'choe': 'UTF-8'}))
alt = conditional_escape(alt or value)
return Markup(u"""<img class="qrcode" src="%s" width="%s" height="%s" alt="%s" />""" %
(url, size, size, alt))
@register.function
def nominations_pending_approval(user):
return Nomination.objects.filter(badge__creator=user,
approver__isnull=True)
@register.function
def nominations_pending_acceptance(user):
return Nomination.objects.filter(nominee=user,
approver__isnull=False,
accepted=False)
| import hashlib
import urllib
import urlparse
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.utils.html import conditional_escape
try:
from commons.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
try:
from tower import ugettext_lazy as _
except ImportError, e:
from django.utils.translation import ugettext_lazy as _
import jingo
import jinja2
from jinja2 import evalcontextfilter, Markup, escape
from jingo import register, env
from .models import (Badge, Award, Nomination, Progress,
BadgeAwardNotAllowedException)
@register.function
def user_avatar(user, secure=False, size=256, rating='pg', default=''):
try:
profile = user.get_profile()
if profile.avatar:
return profile.avatar.url
except AttributeError:
pass
except SiteProfileNotAvailable:
pass
except ObjectDoesNotExist:
pass
base_url = (secure and 'https://secure.gravatar.com' or
'http://www.gravatar.com')
m = hashlib.md5(user.email)
return '%(base_url)s/avatar/%(hash)s?%(params)s' % dict(
base_url=base_url, hash=m.hexdigest(),
params=urllib.urlencode(dict(
s=size, d=default, r=rating
))
)
@register.function
def user_awards(user):
return Award.objects.filter(user=user)
@register.function
def user_badges(user):
return Badge.objects.filter(creator=user)
@register.function
def badger_allows_add_by(user):
return Badge.objects.allows_add_by(user)
@register.function
def qr_code_image(value, alt=None, size=150):
# TODO: Bake our own QR codes, someday soon!
url = conditional_escape("http://chart.apis.google.com/chart?%s" % \
urllib.urlencode({'chs': '%sx%s' % (size, size), 'cht': 'qr', 'chl': value, 'choe': 'UTF-8'}))
alt = conditional_escape(alt or value)
return Markup(u"""<img class="qrcode" src="%s" width="%s" height="%s" alt="%s" />""" %
(url, size, size, alt))
@register.function
def nominations_pending_approval(user):
return Nomination.objects.filter(badge__creator=user,
approver__isnull=True)
@register.function
def nominations_pending_acceptance(user):
return Nomination.objects.filter(nominee=user,
approver__isnull=False,
accepted=False)
# FIXME - This code is broken because smart_str doesn't exist in the namespace
# Since it's not used anywhere in django-badger and I'm not sure whether
# deleting it is ok or not, I'm commenting it out.
#
# @register.filter
# def urlparams(url_, hash=None, **query):
# """Add a fragment and/or query paramaters to a URL.
#
# New query params will be appended to exising parameters, except duplicate
# names, which will be replaced.
# """
# url = urlparse.urlparse(url_)
# fragment = hash if hash is not None else url.fragment
#
# # Use dict(parse_qsl) so we don't get lists of values.
# q = url.query
# query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
# query_dict.update((k, v) for k, v in query.items())
#
# query_string = _urlencode([(k, v) for k, v in query_dict.items()
# if v is not None])
# new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
# query_string, fragment)
# return new.geturl()
#
#
# def _urlencode(items):
# """A Unicode-safe URLencoder."""
# try:
# return urllib.urlencode(items)
# except UnicodeEncodeError:
# return urllib.urlencode([(k, smart_str(v)) for k, v in items])
| Python | 0 |
e8b49384d3e9e23485199ef131f0cb8f818a2a02 | edit default port | get-image-part.py | get-image-part.py | import tornado.ioloop
import tornado.web
import tornado.wsgi
import io
import time
import random
import os
from PIL import Image
N = 20
class MainHandler(tornado.web.RequestHandler):
def get(self):
n = int(random.uniform(0,N))
img = int(self.get_argument("img"))
fn = os.path.join(os.path.dirname(__file__), "images/"+str(img)+".jpg")
im = Image.open(fn)
dim = im.size
c = im.crop((int(n*dim[0]/N), 0, int((n+1)*dim[0]/N), dim[1]))
c = c.convert("RGBA")
bio = io.BytesIO()
c.save(bio, 'PNG')
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Content-Type', 'image/jpeg')
self.set_header('X-ECE459-Fragment', str(n))
time.sleep(abs(random.gauss(0.2, 0.2)))
self.write(bio.getvalue())
application = tornado.wsgi.WSGIApplication([
(r"/image", MainHandler),
])
if __name__ == "__main__":
import logging
import wsgiref.simple_server
logger = logging.getLogger('tornado.application')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
server = wsgiref.simple_server.make_server('', 4590, application)
server.serve_forever()
| import tornado.ioloop
import tornado.web
import tornado.wsgi
import io
import time
import random
import os
from PIL import Image
N = 20
class MainHandler(tornado.web.RequestHandler):
def get(self):
n = int(random.uniform(0,N))
img = int(self.get_argument("img"))
fn = os.path.join(os.path.dirname(__file__), "images/"+str(img)+".jpg")
im = Image.open(fn)
dim = im.size
c = im.crop((int(n*dim[0]/N), 0, int((n+1)*dim[0]/N), dim[1]))
c = c.convert("RGBA")
bio = io.BytesIO()
c.save(bio, 'PNG')
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Content-Type', 'image/jpeg')
self.set_header('X-ECE459-Fragment', str(n))
time.sleep(abs(random.gauss(0.2, 0.2)))
self.write(bio.getvalue())
application = tornado.wsgi.WSGIApplication([
(r"/image", MainHandler),
])
if __name__ == "__main__":
import logging
import wsgiref.simple_server
logger = logging.getLogger('tornado.application')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
server = wsgiref.simple_server.make_server('', 8000, application)
server.serve_forever()
| Python | 0.000001 |
37e1eb093eb29044930cb90a049f471aa2caad8b | Update publicip.py | apps/tinyosGW/publicip.py | apps/tinyosGW/publicip.py | #-*- coding: utf-8 -*-
#!/usr/bin/python
# Author : jeonghoonkang, https://github.com/jeonghoonkang
from __future__ import print_function
from subprocess import *
from types import *
import platform
import sys
import os
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def hostname():
cmd = "hostname"
ret = run_cmd(cmd)
return ret
def getip():
cmd = "curl http://checkip.amazonaws.com"
ip = run_cmd(cmd)
print (ip)
return ip
def getiip():
cmd="/sbin/ifconfig"
_os_type = platform.system()
print (_os_type)
if _os_type.find('Cygwin') > 0:
cmd = "ipconfig"
iip = run_cmd(cmd)
return iip, _os_type
def checkifexist(fname):
cmd='ls ' + fname
print (run_cmd(cmd))
def writefile(_in, fn="ip.txt"):
f = open(fn, 'w')
f.write(_in)
f.flush()
f.close()
return
def args_proc():
msg = "usage : python %s {server_IP_ADD} {server_PORT} {server_id} {passwd_for_server}" %__file__
msg += " => user should input arguments {} "
print (msg)
if len(sys.argv) < 2:
exit("[bye] you need to input args, ip / port / id")
arg1 = sys.argv[1]
arg2 = sys.argv[2]
arg3 = sys.argv[3]
arg4 = sys.argv[4]
ip = arg1
port = arg2
id = arg3
passwd = arg4
print ("... start running, inputs are ", ip, port, id, passwd)
return ip, port, id, passwd
if __name__ == '__main__':
ip, port, id, passwd = args_proc()
p_ip = getip()
i_ip, os_type = getiip()
info = i_ip + p_ip
hostn = hostname()
name = os.getlogin()
if os_type == "Linux":
fname = '/home/%s/' %name
elif os_type == "Darwin":
fname = '/Users/%s/' %name
fname += 'devel/BerePi/apps/tinyosGW/out/%s.txt' %(hostn[:-1])
writefile (info, fname)
checkifexist(fname)
cmd = 'sshpass -p' + passwd + ' ' + 'scp' + ' -o' + ' StrictHostKeyChecking=no'
cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
# cmd = 'scp'
# cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
ret = run_cmd(cmd)
print (cmd)
print (" ")
print (ret)
# ssh-keygen
# cat ~/.ssh/id_rsa.pub | ssh -p xxxx pi@xxx.xxx.xxx 'cat >>
# .ssh/authorized_keys'
|
#!/usr/bin/python
# Author : jeonghoonkang, https://github.com/jeonghoonkang
#-*- coding: utf-8 -*-
from __future__ import print_function
from subprocess import *
from types import *
import platform
import sys
import os
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def hostname():
cmd = "hostname"
ret = run_cmd(cmd)
return ret
def getip():
cmd = "curl http://checkip.amazonaws.com"
ip = run_cmd(cmd)
print (ip)
return ip
def getiip():
cmd="/sbin/ifconfig"
_os_type = platform.system()
print (_os_type)
if _os_type.find('Cygwin') > 0:
cmd = "ipconfig"
iip = run_cmd(cmd)
return iip, _os_type
def checkifexist(fname):
cmd='ls ' + fname
print (run_cmd(cmd))
def writefile(_in, fn="ip.txt"):
f = open(fn, 'w')
f.write(_in)
f.flush()
f.close()
return
def args_proc():
msg = "usage : python %s {server_IP_ADD} {server_PORT} {server_id} {passwd_for_server}" %__file__
msg += " => user should input arguments {} "
print (msg)
if len(sys.argv) < 2:
exit("[bye] you need to input args, ip / port / id")
arg1 = sys.argv[1]
arg2 = sys.argv[2]
arg3 = sys.argv[3]
arg4 = sys.argv[4]
ip = arg1
port = arg2
id = arg3
passwd = arg4
print ("... start running, inputs are ", ip, port, id, passwd)
return ip, port, id, passwd
if __name__ == '__main__':
ip, port, id, passwd = args_proc()
p_ip = getip()
i_ip, os_type = getiip()
info = i_ip + p_ip
hostn = hostname()
name = os.getlogin()
if os_type == "Linux":
fname = '/home/%s/' %name
elif os_type == "Darwin":
fname = '/Users/%s/' %name
fname += 'devel/BerePi/apps/tinyosGW/out/%s.txt' %(hostn[:-1])
writefile (info, fname)
checkifexist(fname)
cmd = 'sshpass -p' + passwd + ' ' + 'scp' + ' -o' + ' StrictHostKeyChecking=no'
cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
# cmd = 'scp'
# cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
ret = run_cmd(cmd)
print (cmd)
print (" ")
print (ret)
# ssh-keygen
# cat ~/.ssh/id_rsa.pub | ssh -p xxxx pi@xxx.xxx.xxx 'cat >>
# .ssh/authorized_keys'
| Python | 0.000001 |
4b46b9b92ec3d2ed0016d9994708bfaa2a90bca3 | fix BramPortAgent timing | hwt/interfaces/agents/bramPort.py | hwt/interfaces/agents/bramPort.py | from hwt.hdlObjects.constants import READ, WRITE, NOP
from hwt.simulator.agentBase import SyncAgentBase
from hwt.simulator.shortcuts import oscilate
class BramPort_withoutClkAgent(SyncAgentBase):
"""
:ivar requests: list of tuples (request type, address, [write data]) - used for driver
:ivar data: list of data in memory, used for monitor
:ivar mem: if agent is in monitor mode (= is slave) all reads and writes are performed on
mem object
"""
def __init__(self, intf, clk=None, rstn=None):
super().__init__(intf, clk=clk, rstn=rstn, allowNoReset=True)
self.requests = []
self.readPending = False
self.readed = []
self.mem = {}
self.requireInit = True
def doReq(self, s, req):
rw = req[0]
addr = req[1]
if rw == READ:
rw = 0
wdata = None
self.readPending = True
if self._debugOutput is not None:
self._debugOutput.write("%s, after %r read_req: %d\n" % (
self.intf._getFullName(), s.now, addr))
elif rw == WRITE:
wdata = req[2]
rw = 1
if self._debugOutput is not None:
self._debugOutput.write("%s, after %r write: %d:%d\n" % (
self.intf._getFullName(), s.now, addr, wdata))
else:
raise NotImplementedError(rw)
intf = self.intf
s.w(rw, intf.we)
s.w(addr, intf.addr)
s.w(wdata, intf.din)
def onReadReq(self, s, addr):
"""
on readReqRecieved in monitor mode
"""
self.requests.append((READ, addr))
def onWriteReq(self, s, addr, data):
"""
on writeReqRecieved in monitor mode
"""
self.requests.append((WRITE, addr, data))
def monitor(self, s):
intf = self.intf
yield s.updateComplete
# now we are after clk edge
if self.enable:
en = s.read(intf.en)
assert en.vldMask
if en.val:
we = s.read(intf.we)
assert we.vldMask
addr = s.read(intf.addr)
if we.val:
data = s.read(intf.din)
self.onWriteReq(s, addr, data)
else:
self.onReadReq(s, addr)
if self.requests:
req = self.requests.pop(0)
t = req[0]
addr = req[1]
assert addr._isFullVld(), s.now
if t == READ:
s.write(self.mem[addr.val], intf.dout)
else:
assert t == WRITE
s.write(None, intf.dout)
self.mem[addr.val] = req[2]
def driver(self, s):
intf = self.intf
if self.requireInit:
s.w(0, intf.en)
s.w(0, intf.we)
self.requireInit = False
readPending = self.readPending
yield s.updateComplete
# now we are after clk edge
if self.requests and self.enable:
req = self.requests.pop(0)
if req is NOP:
s.w(0, intf.en)
s.w(0, intf.we)
self.readPending = False
else:
self.doReq(s, req)
s.w(1, intf.en)
else:
s.w(0, intf.en)
s.w(0, intf.we)
self.readPending = False
if readPending:
yield s.updateComplete
d = s.r(intf.dout)
self.readed.append(d)
if self._debugOutput is not None:
self._debugOutput.write("%s, on %r read_data: %d\n" % (
self.intf._getFullName(), s.now, d.val))
class BramPortAgent(BramPort_withoutClkAgent):
def __init__(self, intf, clk=None, rstn=None):
if clk is None:
clk = intf.clk
super().__init__(intf, clk=clk, rstn=rstn)
def getDrivers(self):
drivers = super(BramPortAgent, self).getDrivers()
drivers.append(oscilate(self.intf.clk))
return drivers
| from hwt.hdlObjects.constants import READ, WRITE, NOP
from hwt.simulator.agentBase import SyncAgentBase
from hwt.simulator.shortcuts import oscilate
class BramPort_withoutClkAgent(SyncAgentBase):
"""
:ivar requests: list of tuples (request type, address, [write data]) - used for driver
:ivar data: list of data in memory, used for monitor
:ivar mem: if agent is in monitor mode (= is slave) all reads and writes are performed on
mem object
"""
def __init__(self, intf, clk=None, rstn=None):
super().__init__(intf, clk=clk, rstn=rstn, allowNoReset=True)
self.requests = []
self.readPending = False
self.readed = []
self.mem = {}
def doReq(self, s, req):
rw = req[0]
addr = req[1]
if rw == READ:
rw = 0
wdata = None
self.readPending = True
elif rw == WRITE:
wdata = req[2]
rw = 1
else:
raise NotImplementedError(rw)
intf = self.intf
s.w(rw, intf.we)
s.w(addr, intf.addr)
s.w(wdata, intf.din)
def onReadReq(self, s, addr):
"""
on readReqRecieved in monitor mode
"""
self.requests.append((READ, addr))
def onWriteReq(self, s, addr, data):
"""
on writeReqRecieved in monitor mode
"""
self.requests.append((WRITE, addr, data))
def monitor(self, s):
intf = self.intf
yield s.updateComplete
if self.requests:
req = self.requests.pop(0)
t = req[0]
addr = req[1]
assert addr._isFullVld(), s.now
if t == READ:
s.write(self.mem[addr.val], intf.dout)
else:
assert t == WRITE
s.write(None, intf.dout)
self.mem[addr.val] = req[2]
if self.enable:
en = s.read(intf.en)
assert en.vldMask
if en.val:
we = s.read(intf.we)
assert we.vldMask
addr = s.read(intf.addr)
if we.val:
data = s.read(intf.din)
self.onWriteReq(s, addr, data)
else:
self.onReadReq(s, addr)
def driver(self, s):
intf = self.intf
readPending = self.readPending
if self.requests and self.enable:
req = self.requests.pop(0)
if req is NOP:
s.w(0, intf.en)
s.w(0, intf.we)
self.readPending = False
else:
self.doReq(s, req)
s.w(1, intf.en)
else:
s.w(0, intf.en)
s.w(0, intf.we)
self.readPending = False
if readPending:
yield s.updateComplete
d = s.r(intf.dout)
self.readed.append(d)
class BramPortAgent(BramPort_withoutClkAgent):
def __init__(self, intf, clk=None, rstn=None):
if clk is None:
clk = intf.clk
super().__init__(intf, clk=clk, rstn=rstn)
def getDrivers(self):
drivers = super(BramPortAgent, self).getDrivers()
drivers.append(oscilate(self.intf.clk))
return drivers
| Python | 0.000001 |
3e33a94580d386be71298bcc7fb2d4a4bc19dd34 | apply only the unified diff instead of the whole file | gitmagic/fixup.py | gitmagic/fixup.py | import gitmagic
from git.cmd import Git
from io import import StringIO
def fixup(repo, destination_picker, change_finder, args={}):
repo.index.reset()
for change in change_finder(repo):
_apply_change(repo, change)
destination_commits = destination_picker.pick(change)
if not destination_commits:
repo.index.commit( message = "WARNING: no destination commit")
continue
destination = destination_commits[0]
gitmagic.checkpoint("Should I create fixup commit for {} -> {}:{}\n{}".format(
change.a_file_name,
destination.hexsha[:7],
destination.summary,
change.unified_diff().read()), args)
repo.index.commit( message = "fixup! {}".format(destination.message))
def _apply_change(repo, change):
git = Git(repo.working_dir)
git.execute(['git', 'apply', '-'], istream=StringIO(change.unified_diff()))
| import gitmagic
def fixup(repo, destination_picker, change_finder, args={}):
repo.index.reset()
for change in change_finder(repo):
_apply_change(repo, change)
destination_commits = destination_picker.pick(change)
if not destination_commits:
repo.index.commit( message = "WARNING: no destination commit")
continue
destination = destination_commits[0]
gitmagic.checkpoint("Should I create fixup commit for {} -> {}:{}\n{}".format(
change.a_file_name,
destination.hexsha[:7],
destination.summary,
change.unified_diff().read()), args)
repo.index.commit( message = "fixup! {}".format(destination.message))
def _apply_change(repo, change):
#todo: apply unified diff only
repo.index.add([change.a_file_name])
| Python | 0.000001 |
eb642e63cd32b972ccdec4f487b9a7e2e7cb17b5 | make product_change_type template filter work with hidden plans | billing/templatetags/billing_tags.py | billing/templatetags/billing_tags.py | from django import template
import billing.loading
from pricing.products import Product
register = template.Library()
@register.filter
def product_change_type(product, user):
upc = user.billing_account.get_current_product_class()
if isinstance(product, Product):
product = type(product)
if upc:
products = billing.loading.get_products(hidden=True)
upc_index = products.index(upc)
p_index = products.index(product)
if upc_index < p_index:
return 'upgrade'
elif upc_index == p_index:
return None
else:
return 'downgrade'
else:
return 'upgrade'
| from django import template
import billing.loading
from pricing.products import Product
register = template.Library()
@register.filter
def product_change_type(product, user):
upc = user.billing_account.get_current_product_class()
if isinstance(product, Product):
product = type(product)
if upc:
products = billing.loading.get_products()
upc_index = products.index(upc)
p_index = products.index(product)
if upc_index < p_index:
return 'upgrade'
elif upc_index == p_index:
return None
else:
return 'downgrade'
else:
return 'upgrade'
| Python | 0 |
58c685aa03c51a96a25af9dd7d6792035b1f167e | fix update_firebase_installation | plugins/tff_backend/bizz/dashboard.py | plugins/tff_backend/bizz/dashboard.py | # -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
import time
from collections import defaultdict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from mcfw.rpc import arguments, returns
from plugins.rogerthat_api.to.installation import InstallationLogTO, InstallationTO
from plugins.tff_backend.bizz.flow_statistics import get_flow_run_ticker_entry
from plugins.tff_backend.bizz.installations import list_installations, get_ticker_entry_for_installation
from plugins.tff_backend.firebase import put_firebase_data, remove_firebase_data
from plugins.tff_backend.models.statistics import FlowRun
from plugins.tff_backend.to.dashboard import TickerEntryTO
@returns([TickerEntryTO])
def rebuild_flow_stats(start_date):
# type: (datetime) -> list[TickerEntryTO]
stats_per_flow_name = defaultdict(dict)
ticker_entries = []
for flow_run in FlowRun.list_by_start_date(start_date): # type: FlowRun
stats_per_flow_name[flow_run.flow_name][flow_run.id] = flow_run.status
ticker_entries.append(get_flow_run_ticker_entry(flow_run))
put_firebase_data('/dashboard/flows.json', stats_per_flow_name)
return ticker_entries
def rebuild_installation_stats(date):
cursor = None
max_timestamp = time.mktime(date.timetuple())
has_more = True
# keys = possible values of InstallationTO.status
firebase_data = {
'started': {},
'in_progress': {},
'finished': {}
}
ticker_entries = []
while has_more:
installation_list = list_installations(page_size=1000, cursor=cursor, detailed=True)
cursor = installation_list.cursor
if not installation_list.more:
has_more = False
for installation in installation_list.results:
if installation.timestamp <= max_timestamp:
has_more = False
else:
firebase_data[installation.id] = installation.status
# timestamp might not be the most accurate but good enough
ticker_entries.append(get_ticker_entry_for_installation(installation, []))
put_firebase_data('/dashboard/installations.json', firebase_data)
return ticker_entries
@arguments(installation=InstallationTO, logs=[InstallationLogTO])
def update_firebase_installation(installation, logs):
# type: (InstallationTO, list[InstallationLogTO]) -> None
ticker_entry = get_ticker_entry_for_installation(installation, logs)
put_firebase_data('/dashboard/installations.json', {installation.id, installation.status})
put_firebase_data('/dashboard/ticker/%s.json' % ticker_entry.id, ticker_entry.to_dict())
def rebuild_firebase_data():
# Removes all /dashboard data from firebase and rebuilds it
# Shouldn't be ran more than once a month if all goes well
ticker_entries = []
remove_firebase_data('dashboard.json')
date = datetime.now() - relativedelta(days=7)
ticker_entries.extend(rebuild_installation_stats(date))
ticker_entries.extend(rebuild_flow_stats(date))
put_firebase_data('/dashboard/ticker.json', {entry.id: entry.to_dict() for entry in ticker_entries})
| # -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
import time
from collections import defaultdict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from mcfw.rpc import arguments, returns
from plugins.rogerthat_api.to.installation import InstallationLogTO, InstallationTO
from plugins.tff_backend.bizz.flow_statistics import get_flow_run_ticker_entry
from plugins.tff_backend.bizz.installations import list_installations, get_ticker_entry_for_installation
from plugins.tff_backend.firebase import put_firebase_data, remove_firebase_data
from plugins.tff_backend.models.statistics import FlowRun
from plugins.tff_backend.to.dashboard import TickerEntryTO
@returns([TickerEntryTO])
def rebuild_flow_stats(start_date):
# type: (datetime) -> list[TickerEntryTO]
stats_per_flow_name = defaultdict(dict)
ticker_entries = []
for flow_run in FlowRun.list_by_start_date(start_date): # type: FlowRun
stats_per_flow_name[flow_run.flow_name][flow_run.id] = flow_run.status
ticker_entries.append(get_flow_run_ticker_entry(flow_run))
put_firebase_data('/dashboard/flows.json', stats_per_flow_name)
return ticker_entries
def rebuild_installation_stats(date):
cursor = None
max_timestamp = time.mktime(date.timetuple())
has_more = True
# keys = possible values of InstallationTO.status
firebase_data = {
'started': {},
'in_progress': {},
'finished': {}
}
ticker_entries = []
while has_more:
installation_list = list_installations(page_size=1000, cursor=cursor, detailed=True)
cursor = installation_list.cursor
if not installation_list.more:
has_more = False
for installation in installation_list.results:
if installation.timestamp <= max_timestamp:
has_more = False
else:
firebase_data[installation.id] = installation.status
# timestamp might not be the most accurate but good enough
ticker_entries.append(get_ticker_entry_for_installation(installation, []))
put_firebase_data('/dashboard/installations.json', firebase_data)
return ticker_entries
@arguments(installation=InstallationTO, logs=[InstallationLogTO])
def update_firebase_installation(installation, logs):
# type: (InstallationTO, list[InstallationLogTO]) -> None
ticker_entry = get_ticker_entry_for_installation(installation, logs)
put_firebase_data('/dashboard/installations.json' % {installation.id, installation.status})
put_firebase_data('/dashboard/ticker/%s.json' % ticker_entry.id, ticker_entry.to_dict())
def rebuild_firebase_data():
# Removes all /dashboard data from firebase and rebuilds it
# Shouldn't be ran more than once a month if all goes well
ticker_entries = []
remove_firebase_data('dashboard.json')
date = datetime.now() - relativedelta(days=7)
ticker_entries.extend(rebuild_installation_stats(date))
ticker_entries.extend(rebuild_flow_stats(date))
put_firebase_data('/dashboard/ticker.json', {entry.id: entry.to_dict() for entry in ticker_entries})
| Python | 0.000001 |
dca5bd7d16866c0badf6c9d4ae69335aacef9f6d | Add sleep-and-try-again when getting MB info | audio_pipeline/util/MBInfo.py | audio_pipeline/util/MBInfo.py | __author__ = 'cephalopodblue'
import musicbrainzngs as ngs
from . import Util
import time
class MBInfo():
default_server = ngs.hostname
def __init__(self, server=None, backup_server=None, useragent=("hidat_audio_pipeline", "0.1")):
if server is not None and server != self.default_server:
ngs.set_hostname(server)
self.backup_server = backup_server
ngs.set_useragent(useragent[0], useragent[1])
#####
# == Get Release
# Retrieves a raw release from MusicBrainz using their API
#####
def get_release(self, release_id):
if Util.is_mbid(release_id):
include=["artist-credits", "recordings", "isrcs", "media", "release-groups", "labels", "artists"]
try:
mb_release = ngs.get_release_by_id(release_id, includes=include)['release']
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - wait 10 seconds and try again
time.sleep(10)
try:
mb_release = ngs.get_release_by_id(release_id, includes=include)['release']
except ngs.NetworkError as e:
# if we still can't reach it, propagate up the error
mb_release = None
# propagate error up
raise e
return mb_release
#####
# == Get artist
# Retrieves raw artist metadata from MusicBrainz using their API
#####
def get_artist(self, artist_id):
if Util.is_mbid(artist_id):
include=["aliases", "url-rels", "annotation", "artist-rels"]
try:
mb_artist = ngs.get_artist_by_id(artist_id, includes=include)['artist']
return mb_artist
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - if we have a local, try hitting it?
mb_artist = None
# propagate error up
raise e
| __author__ = 'cephalopodblue'
import musicbrainzngs as ngs
from . import Util
class MBInfo():
default_server = ngs.hostname
def __init__(self, server=None, backup_server=None, useragent=("hidat_audio_pipeline", "0.1")):
if server is not None and server != self.default_server:
ngs.set_hostname(server)
self.backup_server = backup_server
ngs.set_useragent(useragent[0], useragent[1])
#####
# == Get Release
# Retrieves a raw release from MusicBrainz using their API
#####
def get_release(self, release_id):
if Util.is_mbid(release_id):
include=["artist-credits", "recordings", "isrcs", "media", "release-groups", "labels", "artists"]
try:
mb_release = ngs.get_release_by_id(release_id, includes=include)['release']
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - if we have a local, try hitting it?
mb_release = None
# propagate error up
raise e
return mb_release
#####
# == Get artist
# Retrieves raw artist metadata from MusicBrainz using their API
#####
def get_artist(self, artist_id):
if Util.is_mbid(artist_id):
include=["aliases", "url-rels", "annotation", "artist-rels"]
try:
mb_artist = ngs.get_artist_by_id(artist_id, includes=include)['artist']
return mb_artist
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - if we have a local, try hitting it?
mb_artist = None
# propagate error up
raise e
| Python | 0 |
bf188dfae49ab23c8f5dd7eeb105951f6c068b7f | Add filtering by is_circle to Role admin. | backend/feedbag/role/admin.py | backend/feedbag/role/admin.py | from django.contrib import admin
from django.utils.translation import ugettext as _
from .models import Role
class IsCircleListFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('Is Circle')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'is_circle'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (('is_circle', _('Is circle')), ('is_not_circle', _('Is not circle')),)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'is_circle':
return queryset.filter(children__isnull=False)
elif self.value() == 'is_not_circle':
return queryset.filter(children__isnull=True)
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
list_display = ('name', 'is_circle', 'parent', 'purpose', 'archived',)
list_filter = ('archived', IsCircleListFilter)
search_fields = ('name',)
actions = ['archive_role']
def archive_role(self, request, queryset):
for role in queryset:
role.archive()
self.message_user(request, "Roles were successfully archived.")
archive_role.short_description = 'Archive selected roles'
| from django.contrib import admin
from .models import Role
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
list_display = ('name',
'is_circle',
'parent',
'purpose',
'archived',
)
list_filter = ('archived',)
actions = ['archive_role']
def archive_role(self, request, queryset):
for role in queryset:
role.archive()
self.message_user(request, "Roles were successfully archived.")
archive_role.short_description = 'Archive selected roles'
| Python | 0 |
0fac23c22307ca598e0cc6712280903c2a7d559d | Improve auto battle module | gbf_bot/auto_battle.py | gbf_bot/auto_battle.py | import logging
import random
import time
import pyautogui
from . import top_left, window_size
from . import auto_battle_config as config
from . import utility
from .components import Button
logger = logging.getLogger(__name__)
attack = Button('attack.png', config['attack'])
auto = Button('auto.png', config['auto'])
def activate(battle_time):
pyautogui.PAUSE = 1.3
# wait before battle start
w, h = window_size
start_pt = (top_left[0] + w//2, top_left[1] + h*1//3)
region = start_pt + (w//2, h*2//3)
while True:
time.sleep(0.5)
found = utility.locate(attack.path, region)
if found is not None:
break
logger.info('click attack')
attack.double_click()
time.sleep(random.random() * 0.35)
logger.info('click auto')
auto.click()
# battle result
time.sleep(battle_time)
| import logging
import random
import time
import pyautogui
from . import auto_battle_config as config
from .components import Button
logger = logging.getLogger(__name__)
attack = Button('attack.png', config['attack'])
auto = Button('auto.png', config['auto'])
def activate(battle_time):
pyautogui.PAUSE = 1.3
time.sleep(5 + random.random() * 0.25)
logger.info('click attack')
attack.double_click()
time.sleep(random.random() * 0.35)
logger.info('click auto')
auto.click()
# battle result
time.sleep(battle_time + random.random() * 3)
| Python | 0.000001 |
d37c2328a8ed58778f4c39091add317878831b4e | increment version | grizli/version.py | grizli/version.py | # git describe --tags
__version__ = "0.7.0-47-g6450ea1"
| # git describe --tags
__version__ = "0.7.0-41-g39ad8ff"
| Python | 0.000004 |
d30d10a477f0b46fa73da76cb1b010e1376c3ff2 | Update version for a new PYPI package. | gtable/version.py | gtable/version.py | __version__ = '0.7'
| __version__ = '0.6.2'
| Python | 0 |
311be8c11b513fd2b3d2bb4427b5bc0b43c2539c | Move reverse along with geometry | caminae/core/forms.py | caminae/core/forms.py | from math import isnan
from django.forms import ModelForm
from django.contrib.gis.geos import LineString
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Submit, Div
from crispy_forms.bootstrap import FormActions
from .models import Path
from .widgets import LineStringWidget
class PathForm(ModelForm):
geom = forms.gis.LineStringField(widget=LineStringWidget)
reverse_geom = forms.BooleanField(
required=False,
label = _("Reverse path"),
help_text = _("The path will be reversed once saved"),
)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Div('name',
'structure',
'stake',
'trail',
Field('comments', css_class='input-xlarge'),
'datasource',
'networks',
'usages',
'valid',
css_class="span4",
),
Div('geom',
'reverse_geom',
css_class="span7",),
FormActions(
Submit('cancel', 'Cancel'),
Submit('save_changes', _('Save changes'), css_class="btn-primary offset1"),
css_class="form-actions span11",
)
)
def __init__(self, *args, **kwargs):
super(PathForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.helper.form_action = self.instance.get_update_url()
else:
self.helper.form_action = reverse("core:path_add")
def save(self, commit=True):
path = super(PathForm, self).save(commit=False)
if self.cleaned_data.get('reverse_geom'):
# path.geom.reverse() won't work for 3D coords
reversed_coord = path.geom.coords[-1::-1]
# FIXME: why do we have to filter nan variable ?! Why are they here in the first place ?
valid_coords = [ (x, y, 0.0 if isnan(z) else z) for x, y, z in reversed_coord ]
path.geom = LineString(valid_coords)
if commit:
path.save()
return path
class Meta:
model = Path
exclude = ('geom_cadastre',)
| from math import isnan
from django.forms import ModelForm
from django.contrib.gis.geos import LineString
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Submit, Div
from crispy_forms.bootstrap import FormActions
from .models import Path
from .widgets import LineStringWidget
class PathForm(ModelForm):
geom = forms.gis.LineStringField(widget=LineStringWidget)
reverse_geom = forms.BooleanField(
required=False,
label = _("Reverse geometry"),
help_text = _("The geometry will be reversed once saved"),
)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Div('name',
'structure',
'stake',
'trail',
Field('comments', css_class='input-xlarge'),
'datasource',
'networks',
'usages',
'valid',
'reverse_geom',
css_class="span4",
),
Div('geom',
css_class="span7",),
FormActions(
Submit('cancel', 'Cancel'),
Submit('save_changes', _('Save changes'), css_class="btn-primary offset1"),
css_class="form-actions span11",
)
)
def __init__(self, *args, **kwargs):
super(PathForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.helper.form_action = self.instance.get_update_url()
else:
self.helper.form_action = reverse("core:path_add")
def save(self, commit=True):
path = super(PathForm, self).save(commit=False)
if self.cleaned_data.get('reverse_geom'):
# path.geom.reverse() won't work for 3D coords
reversed_coord = path.geom.coords[-1::-1]
# FIXME: why do we have to filter nan variable ?! Why are they here in the first place ?
valid_coords = [ (x, y, 0.0 if isnan(z) else z) for x, y, z in reversed_coord ]
path.geom = LineString(valid_coords)
if commit:
path.save()
return path
class Meta:
model = Path
exclude = ('geom_cadastre',)
| Python | 0.000001 |
f9dadd363d9f370d884c0b127e1f63df8916f3c8 | Rename some functions | calculation.py | calculation.py | """Calculation functions."""
from __future__ import division
import numpy as np
from . import plotting
import matplotlib.pyplot as plt
from scipy.signal import butter, freqz
def deconv_process(excitation, system_response, fs):
"""Deconvolution.
It is a necessity to zeropadd the excitation signal
to avoid zircular artifacts, if the system response is longer
than the excitation signal.
"""
NFFT = _pow2(len(excitation) + len(system_response) - 1)
excitation_f = np.fft.rfft(excitation, NFFT)
excitation_f_inv = 1 / excitation_f
# butter_w, butter_h = butter_bandpass(10, 22049, fs, NFFT//2+1, order=2)
return np.fft.irfft(np.fft.rfft(system_response, NFFT) * excitation_f_inv)
def snr_db(signal, noise):
"""Calculating Signal-to-noise ratio.
Parameters
----------
signal : array_like
Signal vector
noise : array_like
Noise vector
Returns
-------
Return SNR in dB
"""
return 10 * np.log10(_mean_power(signal) / _mean_power(noise))
def _mean_power(signal):
return np.mean(np.square(signal))
def _pow2(n):
i = 1
while i < n:
i *= 2
return i
def butter_bandpass(lower_bound, higher_bound, fs, NFFT, order):
wl = lower_bound / (fs / 2)
wh = higher_bound / (fs / 2)
b, a = butter(order, [wl, wh], btype='band')
butter_w, butter_h = freqz(b, a, worN=NFFT, whole=True)
return butter_w, butter_h
#~ def limiter(signal_f_inv, threshold_dB):
#~ signal_f_inv_abs = np.abs(signal_f_inv)
#~ signal_f_inv_phase = np.angle(signal_f_inv)
#~ signal_f_inv_abs_dB = plotting._dB_calculation(signal_f_inv_abs)
#~ array_positions = np.where(signal_f_inv_abs_dB > signal_f_inv_abs_dB.max() + threshold_dB)
#~ threshold = 10**((signal_f_inv_abs_dB.max()+threshold_dB)/20)
#~ signal_f_inv_abs[array_positions] = threshold
#~ signal_f_inv = signal_f_inv_abs * np.exp(1j*signal_f_inv_phase)
#~ return signal_f_inv
def awgn_noise(level, size=None, seed=1):
scale = 10 ** (level / 20.)
np.random.seed(seed)
return np.random.normal(scale=scale, size=size)
#~ def coherency(excitation, system_response):
#~ Rxx = np.correlate(excitation, excitation, 'full')
#~ Ryy = np.correlate(system_response, system_response, 'full')
#~ Ryx = np.correlate(system_response, excitation, 'full')
#~ return np.abs(Ryx) ** 2 / (Rxx * Ryy)
| """Calculation functions."""
from __future__ import division
import numpy as np
from scipy.signal import butter, freqz
def deconv_process(excitation, system_response, fs):
"""Deconvolution.
It is a necessity to zeropadd the excitation signal
to avoid zircular artifacts, if the system response is longer
than the excitation signal. Therfore, the excitation signal has
been extended for freely chosen 5 seconds as default. If you want
to simulate the 'Cologne Cathedral', feel free to zeropadd
more seconds.
"""
NFFT = _pow2(len(excitation) + len(system_response) - 1)
excitation_f = np.fft.fft(excitation, NFFT)
excitation_f_inv = 1 / excitation_f
# butter_w, butter_h = butter_bandpass(20, 20000, fs, NFFT, order=2)
return np.fft.ifft(np.fft.fft(system_response, NFFT) * excitation_f_inv).real
def snr_db(signal, noise):
"""Calculating Signal-to-noise ratio.
Parameters
----------
signal : array_like
Signal vector
noise : array_like
Noise vector
Returns
-------
Return SNR in dB
"""
return 10 * np.log10(_mean_power(signal) / _mean_power(noise))
def _mean_power(signal):
return np.mean(np.abs(signal ** 2))
def _pow2(n):
i = 1
while i < n:
i *= 2
return i
def coherency(excitation, system_response):
Rxx = np.correlate(excitation, excitation, 'full')
Ryy = np.correlate(system_response, system_response, 'full')
Ryx = np.correlate(system_response, excitation, 'full')
return np.abs(Ryx) ** 2 / (Rxx * Ryy)
def butter_bandpass(lower_bound, higher_bound, fs, NFFT, order):
wl = lower_bound / (fs / 2)
wh = higher_bound / (fs / 2)
b, a = butter(order, [wl, wh], btype='band')
butter_w, butter_h = freqz(b, a, worN=NFFT, whole=True)
return butter_w, butter_h
def limiter(signal, threshold_dB):
array_positions = np.where(signal < threshold_dB)
signal[array_positions] = threshold_dB
return signal
def noise_db(level, size=None, seed=1):
scale = 10 ** (level / 20.)
np.random.seed(seed)
return np.random.normal(scale=scale, size=size)
| Python | 0.00292 |
88f36912de48a84e4e3778889948f85655ba9064 | Remove token logging | canis/oauth.py | canis/oauth.py | from os import environ
from urllib import urlencode
from datetime import datetime, timedelta
from flask import Flask, request, redirect
import requests
app = Flask(__name__)
SPOTIFY_CLIENT_ID = environ['CANIS_SPOTIFY_API_CLIENT_ID']
SPOTIFY_SECRET = environ['CANIS_SPOTIFY_API_SECRET']
SPOTIFY_CALLBACK = environ.get('CANIS_SPOTIFY_API_CALLBACK', 'http://127.0.0.1:5000/callback/')
access_token = None
refresh_token = None
expiration = None
@app.route('/login')
def login():
args = {
'client_id': SPOTIFY_CLIENT_ID,
'response_type': 'code',
'redirect_uri': SPOTIFY_CALLBACK,
'scope': 'playlist-read-private playlist-modify-private playlist-modify-public',
}
arg_str = urlencode(args)
url = 'https://accounts.spotify.com/authorize?{}'.format(arg_str)
return redirect(url)
@app.route('/callback/')
def callback():
args = {
'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': SPOTIFY_CALLBACK,
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
shutdown_server()
return "You're good to go"
def refresh():
args = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
def store_token_response(resp):
global access_token
global refresh_token
global expiration
access_token = resp['access_token']
if resp.get('refresh_token'):
refresh_token = resp['refresh_token']
expiration = datetime.utcnow() + timedelta(seconds=int(resp['expires_in']))
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
| from os import environ
from urllib import urlencode
from datetime import datetime, timedelta
from flask import Flask, request, redirect
import requests
app = Flask(__name__)
SPOTIFY_CLIENT_ID = environ['CANIS_SPOTIFY_API_CLIENT_ID']
SPOTIFY_SECRET = environ['CANIS_SPOTIFY_API_SECRET']
SPOTIFY_CALLBACK = environ.get('CANIS_SPOTIFY_API_CALLBACK', 'http://127.0.0.1:5000/callback/')
access_token = None
refresh_token = None
expiration = None
@app.route('/login')
def login():
args = {
'client_id': SPOTIFY_CLIENT_ID,
'response_type': 'code',
'redirect_uri': SPOTIFY_CALLBACK,
'scope': 'playlist-read-private playlist-modify-private playlist-modify-public',
}
arg_str = urlencode(args)
url = 'https://accounts.spotify.com/authorize?{}'.format(arg_str)
return redirect(url)
@app.route('/callback/')
def callback():
args = {
'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': SPOTIFY_CALLBACK,
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
shutdown_server()
return "You're good to go"
def refresh():
args = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
def store_token_response(resp):
global access_token
global refresh_token
global expiration
access_token = resp['access_token']
if resp.get('refresh_token'):
refresh_token = resp['refresh_token']
expiration = datetime.utcnow() + timedelta(seconds=int(resp['expires_in']))
print (access_token, refresh_token, expiration)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
| Python | 0.000001 |
4556add4d9c3645559e51005129dcc65bd0b00ca | __VERSION__ changed | stop_words/__init__.py | stop_words/__init__.py | import json
import os
__VERSION__ = (2015, 2, 23)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
STOP_WORDS_DIR = os.path.join(CURRENT_DIR, 'stop-words')
STOP_WORDS_CACHE = {}
with open(os.path.join(STOP_WORDS_DIR, 'languages.json'), 'rb') as map_file:
buffer = map_file.read()
buffer = buffer.decode('ascii')
LANGUAGE_MAPPING = json.loads(buffer)
AVAILABLE_LANGUAGES = list(LANGUAGE_MAPPING.values())
def get_version():
"""
:rtype: basestring
"""
return ".".join(str(v) for v in __VERSION__)
class StopWordError(Exception):
pass
def get_stop_words(language, cache=True):
"""
:type language: basestring
:rtype: list
"""
try:
language = LANGUAGE_MAPPING[language]
except KeyError:
if language not in AVAILABLE_LANGUAGES:
raise StopWordError('{0}" language is unavailable.'.format(
language
))
if cache and language in STOP_WORDS_CACHE:
return STOP_WORDS_CACHE[language]
language_filename = os.path.join(STOP_WORDS_DIR, language + '.txt')
try:
with open(language_filename, 'rb') as language_file:
stop_words = [line.decode('utf-8').strip()
for line in language_file.readlines()]
stop_words = apply_filters(stop_words, language)
except IOError:
raise StopWordError(
'{0}" file is unreadable, check your installation.'.format(
language_filename
)
)
if cache:
STOP_WORDS_CACHE[language] = stop_words
return stop_words
_filters = {None: []}
def apply_filters(stopwords, language):
"""
Apply registered filters to stopwords
:param stopwords: list
:param language: string
:return: filtered stopwords
"""
if language in _filters:
for func in _filters[language]:
stopwords = func(stopwords)
for func in _filters[None]:
stopwords = func(stopwords, language)
return stopwords
def add_filter(func, language=None):
"""
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
"""
if not language in _filters:
_filters[language] = []
_filters[language].append(func)
def remove_filter(func, language=None):
"""
:param func:
:param language:
:return:
"""
if not (language in _filters and func in _filters[language]):
return False
_filters[language].remove(func)
return True
def safe_get_stop_words(language):
"""
:type language: basestring
:rtype: list
"""
try:
return get_stop_words(language)
except StopWordError:
return []
| import json
import os
__VERSION__ = (2015, 2, 21)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
STOP_WORDS_DIR = os.path.join(CURRENT_DIR, 'stop-words')
STOP_WORDS_CACHE = {}
with open(os.path.join(STOP_WORDS_DIR, 'languages.json'), 'rb') as map_file:
buffer = map_file.read()
buffer = buffer.decode('ascii')
LANGUAGE_MAPPING = json.loads(buffer)
AVAILABLE_LANGUAGES = list(LANGUAGE_MAPPING.values())
def get_version():
"""
:rtype: basestring
"""
return ".".join(str(v) for v in __VERSION__)
class StopWordError(Exception):
pass
def get_stop_words(language, cache=True):
"""
:type language: basestring
:rtype: list
"""
try:
language = LANGUAGE_MAPPING[language]
except KeyError:
if language not in AVAILABLE_LANGUAGES:
raise StopWordError('{0}" language is unavailable.'.format(
language
))
if cache and language in STOP_WORDS_CACHE:
return STOP_WORDS_CACHE[language]
language_filename = os.path.join(STOP_WORDS_DIR, language + '.txt')
try:
with open(language_filename, 'rb') as language_file:
stop_words = [line.decode('utf-8').strip()
for line in language_file.readlines()]
stop_words = apply_filters(stop_words, language)
except IOError:
raise StopWordError(
'{0}" file is unreadable, check your installation.'.format(
language_filename
)
)
if cache:
STOP_WORDS_CACHE[language] = stop_words
return stop_words
_filters = {None: []}
def apply_filters(stopwords, language):
"""
Apply registered filters to stopwords
:param stopwords: list
:param language: string
:return: filtered stopwords
"""
if language in _filters:
for func in _filters[language]:
stopwords = func(stopwords)
for func in _filters[None]:
stopwords = func(stopwords, language)
return stopwords
def add_filter(func, language=None):
"""
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
"""
if not language in _filters:
_filters[language] = []
_filters[language].append(func)
def remove_filter(func, language=None):
"""
:param func:
:param language:
:return:
"""
if not (language in _filters and func in _filters[language]):
return False
_filters[language].remove(func)
return True
def safe_get_stop_words(language):
"""
:type language: basestring
:rtype: list
"""
try:
return get_stop_words(language)
except StopWordError:
return []
| Python | 0.999994 |
1391dd0b084f2c26fd5d0afceb81ffb5daab5dcf | Remove path and user filter from admin | rest_framework_tracking/admin.py | rest_framework_tracking/admin.py | from django.contrib import admin
from .models import APIRequestLog
class APIRequestLogAdmin(admin.ModelAdmin):
date_hierarchy = 'requested_at'
list_display = ('id', 'requested_at', 'response_ms', 'status_code',
'user', 'method',
'path', 'remote_addr', 'host',
'query_params')
list_filter = ('method', 'status_code')
admin.site.register(APIRequestLog, APIRequestLogAdmin)
| from django.contrib import admin
from .models import APIRequestLog
class APIRequestLogAdmin(admin.ModelAdmin):
date_hierarchy = 'requested_at'
list_display = ('id', 'requested_at', 'response_ms', 'status_code',
'user', 'method',
'path', 'remote_addr', 'host',
'query_params')
list_filter = ('user', 'path', 'method', 'status_code')
admin.site.register(APIRequestLog, APIRequestLogAdmin)
| Python | 0 |
6051ef3a68db15b220e939240f7bfcb34db1c7c8 | Check if cache value is not None, not truthy | tunigo/api.py | tunigo/api.py | from __future__ import unicode_literals
import time
import requests
from tunigo.cache import Cache
from tunigo.genre import Genre, SubGenre
from tunigo.playlist import Playlist
from tunigo.release import Release
BASE_URL = 'https://api.tunigo.com/v3/space'
BASE_QUERY = 'locale=en&product=premium&version=6.38.31&platform=web'
class Tunigo(object):
def __init__(self, region='all', max_results=1000, cache_time=3600):
self._region = region
self._max_results = max_results
self._cache = Cache(cache_time)
def __repr__(self):
return "Tunigo(region='{}', max_results={}, cache_time={})".format(
self._region,
self._max_results,
self._cache._cache_time)
def _get(self, key, options=''):
uri = ('{}/{}?region={}&per_page={}&{}'
.format(BASE_URL, key, self._region,
self._max_results, BASE_QUERY))
if options:
uri = '{}&{}'.format(uri, options)
result = requests.get(uri)
if (result.status_code != 200 or
'application/json' not in result.headers['content-type']):
return []
return result.json()['items']
def get_playlists(self, key, options='', cache_key=''):
if not cache_key:
cache_key = 'playlists-{}-{}'.format(key, options)
cache_value = self._cache.get(cache_key)
if cache_value is not None:
return cache_value
else:
playlists = []
for item in self._get(key, options):
playlists.append(Playlist(item_array=item['playlist']))
self._cache.insert(cache_key, playlists)
return playlists
def get_featured_playlists(self):
return self.get_playlists('featured-playlists',
'dt={}'.format(time.strftime('%FT%H:01:00')),
'featured-playlists')
def get_top_lists(self):
return self.get_playlists('toplists')
def get_genres(self):
cache_key = 'genres'
cache_value = self._cache.get(cache_key)
if cache_value is not None:
return cache_value
else:
genres = []
for item in self._get('genres'):
if item['genre']['templateName'] != 'toplists':
genres.append(Genre(item_array=item['genre']))
self._cache.insert(cache_key, genres)
return genres
def get_genre_playlists(self, genre=None, sub_genre=None):
if type(genre) == Genre:
genre_key = genre.key
else:
genre_key = genre
if type(sub_genre) == SubGenre:
sub_genre_key = sub_genre.key
if not genre_key:
genre_key = sub_genre.main_genre.key
else:
sub_genre_key = sub_genre
if sub_genre_key and sub_genre_key != 'all':
options = 'filter={}'.format(sub_genre_key)
else:
options = ''
return self.get_playlists(genre_key, options)
def get_new_releases(self):
cache_key = 'releases'
cache_value = self._cache.get(cache_key)
if cache_value is not None:
return cache_value
else:
releases = []
for item in self._get('new-releases'):
releases.append(Release(item_array=item['release']))
self._cache.insert(cache_key, releases)
return releases
| from __future__ import unicode_literals
import time
import requests
from tunigo.cache import Cache
from tunigo.genre import Genre, SubGenre
from tunigo.playlist import Playlist
from tunigo.release import Release
BASE_URL = 'https://api.tunigo.com/v3/space'
BASE_QUERY = 'locale=en&product=premium&version=6.38.31&platform=web'
class Tunigo(object):
def __init__(self, region='all', max_results=1000, cache_time=3600):
self._region = region
self._max_results = max_results
self._cache = Cache(cache_time)
def __repr__(self):
return "Tunigo(region='{}', max_results={}, cache_time={})".format(
self._region,
self._max_results,
self._cache._cache_time)
def _get(self, key, options=''):
uri = ('{}/{}?region={}&per_page={}&{}'
.format(BASE_URL, key, self._region,
self._max_results, BASE_QUERY))
if options:
uri = '{}&{}'.format(uri, options)
result = requests.get(uri)
if (result.status_code != 200 or
'application/json' not in result.headers['content-type']):
return []
return result.json()['items']
def get_playlists(self, key, options='', cache_key=''):
if not cache_key:
cache_key = 'playlists-{}-{}'.format(key, options)
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
playlists = []
for item in self._get(key, options):
playlists.append(Playlist(item_array=item['playlist']))
self._cache.insert(cache_key, playlists)
return playlists
def get_featured_playlists(self):
return self.get_playlists('featured-playlists',
'dt={}'.format(time.strftime('%FT%H:01:00')),
'featured-playlists')
def get_top_lists(self):
return self.get_playlists('toplists')
def get_genres(self):
cache_key = 'genres'
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
genres = []
for item in self._get('genres'):
if item['genre']['templateName'] != 'toplists':
genres.append(Genre(item_array=item['genre']))
self._cache.insert(cache_key, genres)
return genres
def get_genre_playlists(self, genre=None, sub_genre=None):
if type(genre) == Genre:
genre_key = genre.key
else:
genre_key = genre
if type(sub_genre) == SubGenre:
sub_genre_key = sub_genre.key
if not genre_key:
genre_key = sub_genre.main_genre.key
else:
sub_genre_key = sub_genre
if sub_genre_key and sub_genre_key != 'all':
options = 'filter={}'.format(sub_genre_key)
else:
options = ''
return self.get_playlists(genre_key, options)
def get_new_releases(self):
cache_key = 'releases'
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
releases = []
for item in self._get('new-releases'):
releases.append(Release(item_array=item['release']))
self._cache.insert(cache_key, releases)
return releases
| Python | 0 |
a4cb47b928f6cd7f62780eb620e66a9494b17302 | Generate a full graph and a graph without age restricted content | generate_graph.py | generate_graph.py | from graphviz import Digraph
import math
import pymongo
def main():
client = pymongo.MongoClient()
db = client.reddit
related_subs = {}
subscribers = {}
adult = {}
subreddits = db.subreddits.find({'type': 'subreddit'})
if subreddits:
for subreddit in subreddits:
title = subreddit['_id']
links = subreddit['linked']
if 'subscribers' in subreddit:
subscribers[title] = subreddit['subscribers']
if 'adult' in subreddit:
adult[title] = True
related_subs[title] = links
generate_full_graph(related_subs, subscribers, adult, min_subscribers=0)
generate_censored_graph(related_subs, subscribers, adult, min_subscribers=100)
generate_adult_graph(related_subs, subscribers, adult, min_subscribers=100)
def generate_full_graph(related_subs, subscribers, adult, min_subscribers):
g = Digraph('G', filename='full.gv')
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub:
continue
# Filter: only include edge if sub has # subscribers
if sub in subscribers:
subscriber_cnt = subscribers[sub]
if subscriber_cnt >= min_subscribers:
log_cnt = math.log2(subscriber_cnt)
g.edge(key, sub, weight=str(log_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
def generate_censored_graph(related_subs, subscribers, adult, min_subscribers):
g = Digraph('G', filename='censored.gv')
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub:
continue
# Filter: only include edge if sub has # subscribers
if sub in subscribers and not sub in adult:
subscriber_cnt = subscribers[sub]
if subscriber_cnt >= min_subscribers:
log_cnt = math.log2(subscriber_cnt)
g.edge(key, sub, weight=str(log_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
def generate_adult_graph(related_subs, subscribers, adult, min_subscribers):
g = Digraph('G', filename='adult.gv')
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub:
continue
# Filter: only include edge if sub has # subscribers
if sub in subscribers and sub in adult:
subscriber_cnt = subscribers[sub]
if subscriber_cnt >= min_subscribers:
log_cnt = math.log2(subscriber_cnt)
g.edge(key, sub, weight=str(log_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
if __name__ == '__main__':
main()
| from graphviz import Digraph
import math
import pymongo
def main():
client = pymongo.MongoClient()
db = client.reddit
related_subs = {}
subscribers = {}
adult = {}
subreddits = db.subreddits.find({'type': 'subreddit'})
if subreddits:
for subreddit in subreddits:
title = subreddit['_id']
links = subreddit['linked']
if 'subscribers' in subreddit:
subscribers[title] = subreddit['subscribers']
if 'adult' in subreddit:
adult[title] = True
related_subs[title] = links
generate_adult_graph(related_subs, subscribers, adult, min_subscribers=100)
def generate_adult_graph(related_subs, subscribers, adult, min_subscribers):
g = Digraph('G', filename='adult.gv')
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub:
continue
# Filter: only include edge if sub has # subscribers
if sub in subscribers and sub in adult:
subscriber_cnt = subscribers[sub]
if subscriber_cnt >= min_subscribers:
log_cnt = math.log2(subscriber_cnt)
g.edge(key, sub, weight=str(log_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
if __name__ == '__main__':
main()
| Python | 0.998926 |
ad4b5ccf7c89fa67e69d065c47edaa9e18c009ee | add docstrings add if __name__ == '__main__': to make pydoc work | src/hal/user_comps/pyvcp.py | src/hal/user_comps/pyvcp.py | #!/usr/bin/env python
# This is a component of emc
# Copyright 2007 Anders Wallin <anders.wallin@helsinki.fi>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Python Virtual Control Panel for EMC
A virtual control panel (VCP) is used to display and control
HAL pins, which are either BIT or FLOAT valued.
Usage: pyvcp -c compname myfile.xml
compname is the name of the HAL component to be created.
The name of the HAL pins associated with the VCP will begin with 'compname.'
myfile.xml is an XML file which specifies the layout of the VCP.
Valid XML tags are described in the documentation for pyvcp_widgets.py
"""
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import vcpparse
import hal
from Tkinter import Tk
import getopt
def usage():
""" prints the usage message """
print "Usage: pyvcp -c hal_component_name myfile.xml"
def main():
""" creates a HAL component.
calls vcpparse with the specified XML file.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "c:")
except getopt.GetoptError, detail:
print detail
usage()
sys.exit(1)
component_name = None
for o, a in opts:
if o == "-c":
component_name = a
if component_name is None:
usage()
sys.exit(1)
try:
filename=args[0]
except:
usage()
sys.exit(1)
pyvcp0 = Tk()
pyvcp0.title(component_name)
vcpparse.filename=filename
pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0)
pycomp.ready()
try:
pyvcp0.mainloop()
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# This is a component of emc
# Copyright 2007 Anders Wallin <anders.wallin@helsinki.fi>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import vcpparse
import hal
from Tkinter import Tk
import getopt
def usage():
print "Usage: pyvcp -c hal_component_name myfile.xml"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "c:")
except getopt.GetoptError, detail:
print detail
usage()
sys.exit(1)
#try:
# opts, args = getopt.getopt(sys.argv[1:], "c:")
#except getopt.GetoptError:
# print "usage: pyvcp -c hal_component_name myfile.xml"
# sys.exit(0)
#print opts
#print args
component_name = None
for o, a in opts:
if o == "-c":
component_name = a
if component_name is None:
usage()
sys.exit(1)
try:
filename=args[0]
except:
usage()
sys.exit(1)
#try:
# filename=sys.argv[1]
#except:
# print "Error: No XML file specified!"
# sys.exit()
pyvcp0 = Tk()
pyvcp0.title(component_name)
vcpparse.filename=filename
pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0)
pycomp.ready()
try:
pyvcp0.mainloop()
except KeyboardInterrupt:
sys.exit(0)
main()
| Python | 0.000254 |
214b1882a0eaf00bdd5dedbb02a28bba7f8d247b | update version to 1.1.7 | cartoview/__init__.py | cartoview/__init__.py | __version__ = (1, 1, 7, 'alpha', 0)
| __version__ = (1, 1, 5, 'alpha', 0)
| Python | 0 |
74a25caa15d0ab32d83355bc90cc415f5ff8cd1b | Remove unused imports. | exp/viroscopy/model/HIVEpidemicModelABC.py | exp/viroscopy/model/HIVEpidemicModelABC.py | """
A script to estimate the HIV epidemic model parameters using ABC.
"""
from apgl.util import *
from exp.viroscopy.model.HIVGraph import HIVGraph
from exp.viroscopy.model.HIVABCParameters import HIVABCParameters
from exp.viroscopy.model.HIVEpidemicModel import HIVEpidemicModel
from exp.viroscopy.model.HIVRates import HIVRates
from exp.viroscopy.model.HIVModelUtils import HIVModelUtils
from exp.viroscopy.model.HIVGraphMetrics import HIVGraphMetrics2
from apgl.predictors.ABCSMC import ABCSMC
import logging
import sys
import numpy
import multiprocessing
FORMAT = "%(levelname)s:root:%(process)d:%(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMAT)
numpy.set_printoptions(suppress=True, precision=4, linewidth=100)
numpy.seterr(invalid='raise')
#First try the experiment on some toy data
resultsDir = PathDefaults.getOutputDir() + "viroscopy/toy/"
graphFile = resultsDir + "ToyEpidemicGraph0"
targetGraph = HIVGraph.load(graphFile)
numTimeSteps = 10
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
times = numpy.linspace(0, T, numTimeSteps)
graphMetrics = HIVGraphMetrics2(times)
realSummary = graphMetrics.summary(targetGraph)
epsilonArray = numpy.array([0.8, 0.6, 0.5])*numTimeSteps
def breakFunc(graph, currentTime):
return graphMetrics.shouldBreak(realSummary, graph, epsilonArray[0], currentTime)
def createModel(t):
"""
The parameter t is the particle index.
"""
undirected = True
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
graph = HIVGraph(M, undirected)
alpha = 2
zeroVal = 0.9
p = Util.powerLawProbs(alpha, zeroVal)
hiddenDegSeq = Util.randomChoice(p, graph.getNumVertices())
rates = HIVRates(graph, hiddenDegSeq)
model = HIVEpidemicModel(graph, rates, T)
model.setRecordStep(recordStep)
model.setPrintStep(printStep)
model.setBreakFunction(breakFunc)
return model
if len(sys.argv) > 1:
numProcesses = int(sys.argv[1])
else:
numProcesses = multiprocessing.cpu_count()
posteriorSampleSize = 20
thetaLen = 10
logging.debug("Posterior sample size " + str(posteriorSampleSize))
meanTheta = HIVModelUtils.defaultTheta()
abcParams = HIVABCParameters(meanTheta, 0.5, 0.2)
abcSMC = ABCSMC(epsilonArray, realSummary, createModel, abcParams, graphMetrics)
abcSMC.setPosteriorSampleSize(posteriorSampleSize)
thetasArray = abcSMC.run()
meanTheta = numpy.mean(thetasArray, 0)
stdTheta = numpy.std(thetasArray, 0)
logging.debug(thetasArray)
logging.debug("meanTheta=" + str(meanTheta))
logging.debug("stdTheta=" + str(stdTheta))
logging.debug("realTheta=" + str(HIVModelUtils.defaultTheta()))
thetaFileName = resultsDir + "ThetaDistSimulated.pkl"
Util.savePickle(thetasArray, thetaFileName)
| """
A script to estimate the HIV epidemic model parameters using ABC.
"""
from apgl.graph.SparseGraph import SparseGraph
from apgl.graph.GraphStatistics import GraphStatistics
from apgl.util import *
from exp.viroscopy.model.HIVGraph import HIVGraph
from exp.viroscopy.model.HIVABCParameters import HIVABCParameters
from exp.viroscopy.model.HIVEpidemicModel import HIVEpidemicModel
from exp.viroscopy.model.HIVRates import HIVRates
from exp.viroscopy.model.HIVModelUtils import HIVModelUtils
from exp.viroscopy.model.HIVGraphMetrics import HIVGraphMetrics2
from apgl.predictors.ABCSMC import ABCSMC
from apgl.util.ProfileUtils import ProfileUtils
import logging
import sys
import numpy
import multiprocessing
import scipy.stats
FORMAT = "%(levelname)s:root:%(process)d:%(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMAT)
numpy.set_printoptions(suppress=True, precision=4, linewidth=100)
numpy.seterr(invalid='raise')
#First try the experiment on some toy data
resultsDir = PathDefaults.getOutputDir() + "viroscopy/toy/"
graphFile = resultsDir + "ToyEpidemicGraph0"
targetGraph = HIVGraph.load(graphFile)
numTimeSteps = 10
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
times = numpy.linspace(0, T, numTimeSteps)
graphMetrics = HIVGraphMetrics2(times)
realSummary = graphMetrics.summary(targetGraph)
epsilonArray = numpy.array([0.8, 0.6, 0.5])*numTimeSteps
def breakFunc(graph, currentTime):
return graphMetrics.shouldBreak(realSummary, graph, epsilonArray[0], currentTime)
def createModel(t):
"""
The parameter t is the particle index.
"""
undirected = True
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
graph = HIVGraph(M, undirected)
alpha = 2
zeroVal = 0.9
p = Util.powerLawProbs(alpha, zeroVal)
hiddenDegSeq = Util.randomChoice(p, graph.getNumVertices())
rates = HIVRates(graph, hiddenDegSeq)
model = HIVEpidemicModel(graph, rates, T)
model.setRecordStep(recordStep)
model.setPrintStep(printStep)
model.setBreakFunction(breakFunc)
return model
if len(sys.argv) > 1:
numProcesses = int(sys.argv[1])
else:
numProcesses = multiprocessing.cpu_count()
posteriorSampleSize = 20
thetaLen = 10
logging.debug("Posterior sample size " + str(posteriorSampleSize))
meanTheta = HIVModelUtils.defaultTheta()
abcParams = HIVABCParameters(meanTheta, 0.5, 0.2)
abcSMC = ABCSMC(epsilonArray, realSummary, createModel, abcParams, graphMetrics)
abcSMC.setPosteriorSampleSize(posteriorSampleSize)
thetasArray = abcSMC.run()
meanTheta = numpy.mean(thetasArray, 0)
stdTheta = numpy.std(thetasArray, 0)
logging.debug(thetasArray)
logging.debug("meanTheta=" + str(meanTheta))
logging.debug("stdTheta=" + str(stdTheta))
logging.debug("realTheta=" + str(HIVModelUtils.defaultTheta()))
thetaFileName = resultsDir + "ThetaDistSimulated.pkl"
Util.savePickle(thetasArray, thetaFileName)
| Python | 0 |
eb5ab4abdc18f56ac21524225d5c1168ece35def | allow for category based link | generic/models.py | generic/models.py | from django.core.urlresolvers import reverse, Resolver404
from django.db import models
from preferences.models import Preferences
from snippetscream import resolve_to_name
class Link(models.Model):
title = models.CharField(
max_length=256,
help_text='A short descriptive title.',
)
view_name = models.CharField(
max_length=256,
help_text="View name to which this link will redirect. This takes \
precedence over Category and URL fields below.",
blank=True,
null=True,
)
category = models.ForeignKey(
'category.Category',
help_text="Category to which this link will redirect. This takes \
precedence over URL field below.",
blank=True,
null=True,
)
url = models.CharField(
max_length=256,
help_text='URL to which this menu link will redirect.',
blank=True,
null=True,
)
def get_absolute_url(self):
"""
Returns URL to which link should redirect based on a reversed view name
category or explicitly provided URL in that order of precedence.
"""
if self.view_name:
return reverse(self.view_name)
elif self.category:
return self.category.get_absolute_url()
else:
return self.url
def is_active(self, request):
"""
Determines whether or not the link can be consider active based on the
request path. True if the request path can be resolved to the same view
name as is contained in view_name field. Otherwise True if request path
starts with URL as resolved for category contained in category field.
Otherwise True if request path starts with URL as contained in url
field (needs some work).
"""
try:
pattern_name = resolve_to_name(request.path_info)
except Resolver404:
pattern_name = None
active = False
if pattern_name:
active = pattern_name == self.view_name
if not active and self.category:
active = request.path_info.startswith(self.category.get_absolute_url())
if not active and self.url:
active = request.path_info.startswith(self.url)
return active
def __unicode__(self):
return self.title
class MenuPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.MenuLinkPosition')
class NavbarPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.NavbarLinkPosition')
class LinkPosition(models.Model):
link = models.ForeignKey(Link)
position = models.IntegerField()
class Meta():
abstract = True
ordering = ('position',)
def __unicode__(self):
return "Link titled %s in position %s." % (self.link.title, \
self.position)
class MenuLinkPosition(LinkPosition):
preferences = models.ForeignKey(MenuPreferences)
class NavbarLinkPosition(LinkPosition):
preferences = models.ForeignKey(NavbarPreferences)
| from django.core.urlresolvers import reverse, Resolver404
from django.db import models
from preferences.models import Preferences
from snippetscream import resolve_to_name
class Link(models.Model):
title = models.CharField(
max_length=256,
help_text='A short descriptive title.',
)
view_name = models.CharField(
max_length=256,
help_text="View name to which this link will redirect. This takes \
precedence over url field below.",
blank=True,
null=True,
)
url = models.CharField(
max_length=256,
help_text='URL to which this menu link will redirect.',
blank=True,
null=True,
)
def get_absolute_url(self):
"""
Returns url to which link should redirect based on a reversed view name
or otherwise explicitly provided url.
"""
if self.view_name:
return reverse(self.view_name)
else:
return self.url
def is_active(self, request):
"""
Determines whether or not the link can be consider active based on the
request path. True if the request path can be resolved to the same view
name as is contained in view_name field. Otherwise True if request path
starts with url as contained in url field (needs some work).
"""
try:
pattern_name = resolve_to_name(request.path_info)
except Resolver404:
pattern_name = None
active = False
if pattern_name:
active = pattern_name == self.view_name
if not active and self.url:
active = request.path_info.startswith(self.url)
return active
def __unicode__(self):
return self.title
class MenuPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.MenuLinkPosition')
class NavbarPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.NavbarLinkPosition')
class LinkPosition(models.Model):
link = models.ForeignKey(Link)
position = models.IntegerField()
class Meta():
abstract = True
ordering = ('position',)
def __unicode__(self):
return "Link titled %s in position %s." % (self.link.title, \
self.position)
class MenuLinkPosition(LinkPosition):
preferences = models.ForeignKey(MenuPreferences)
class NavbarLinkPosition(LinkPosition):
preferences = models.ForeignKey(NavbarPreferences)
| Python | 0 |
455e1fe93b612c7049059cf217652862c995fe97 | Replace dict(<list_comprehension>) pattern with dict comprehension | import_export/instance_loaders.py | import_export/instance_loaders.py | from __future__ import unicode_literals
class BaseInstanceLoader(object):
"""
Base abstract implementation of instance loader.
"""
def __init__(self, resource, dataset=None):
self.resource = resource
self.dataset = dataset
def get_instance(self, row):
raise NotImplementedError
class ModelInstanceLoader(BaseInstanceLoader):
"""
Instance loader for Django model.
Lookup for model instance by ``import_id_fields``.
"""
def get_queryset(self):
return self.resource._meta.model.objects.all()
def get_instance(self, row):
try:
params = {}
for key in self.resource.get_import_id_fields():
field = self.resource.fields[key]
params[field.attribute] = field.clean(row)
return self.get_queryset().get(**params)
except self.resource._meta.model.DoesNotExist:
return None
class CachedInstanceLoader(ModelInstanceLoader):
"""
Loads all possible model instances in dataset avoid hitting database for
every ``get_instance`` call.
This instance loader work only when there is one ``import_id_fields``
field.
"""
def __init__(self, *args, **kwargs):
super(CachedInstanceLoader, self).__init__(*args, **kwargs)
pk_field_name = self.resource.get_import_id_fields()[0]
self.pk_field = self.resource.fields[pk_field_name]
ids = [self.pk_field.clean(row) for row in self.dataset.dict]
qs = self.get_queryset().filter(**{
"%s__in" % self.pk_field.attribute: ids
})
self.all_instances = {
self.pk_field.get_value(instance): instance
for instance in qs
}
def get_instance(self, row):
return self.all_instances.get(self.pk_field.clean(row))
| from __future__ import unicode_literals
class BaseInstanceLoader(object):
"""
Base abstract implementation of instance loader.
"""
def __init__(self, resource, dataset=None):
self.resource = resource
self.dataset = dataset
def get_instance(self, row):
raise NotImplementedError
class ModelInstanceLoader(BaseInstanceLoader):
"""
Instance loader for Django model.
Lookup for model instance by ``import_id_fields``.
"""
def get_queryset(self):
return self.resource._meta.model.objects.all()
def get_instance(self, row):
try:
params = {}
for key in self.resource.get_import_id_fields():
field = self.resource.fields[key]
params[field.attribute] = field.clean(row)
return self.get_queryset().get(**params)
except self.resource._meta.model.DoesNotExist:
return None
class CachedInstanceLoader(ModelInstanceLoader):
"""
Loads all possible model instances in dataset avoid hitting database for
every ``get_instance`` call.
This instance loader work only when there is one ``import_id_fields``
field.
"""
def __init__(self, *args, **kwargs):
super(CachedInstanceLoader, self).__init__(*args, **kwargs)
pk_field_name = self.resource.get_import_id_fields()[0]
self.pk_field = self.resource.fields[pk_field_name]
ids = [self.pk_field.clean(row) for row in self.dataset.dict]
qs = self.get_queryset().filter(**{
"%s__in" % self.pk_field.attribute: ids
})
self.all_instances = dict([
(self.pk_field.get_value(instance), instance)
for instance in qs])
def get_instance(self, row):
return self.all_instances.get(self.pk_field.clean(row))
| Python | 0.000003 |
ac8c1b6849c490c776636e3771e80344e6b0fb2e | Update github3.search.user for consistency | github3/search/user.py | github3/search/user.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import users
from ..models import GitHubCore
class UserSearchResult(GitHubCore):
"""Representation of a search result for a user.
This object has the following attributes:
.. attribute:: score
The confidence score of this result.
.. attribute:: text_matches
If present, a list of text strings that match the search string.
.. attribute:: user
A :class:`~github3.users.ShortUser` representing the user found
in this search result.
"""
def _update_attributes(self, data):
result = data.copy()
self.score = result.pop('score')
self.text_matches = result.pop('text_matches', [])
self.user = users.ShortUser(result, self)
def _repr(self):
return '<UserSearchResult [{0}]>'.format(self.user)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import users
from ..models import GitHubCore
class UserSearchResult(GitHubCore):
def _update_attributes(self, data):
result = data.copy()
#: Score of the result
self.score = self._get_attribute(result, 'score')
if 'score' in result:
del result['score']
#: Text matches
self.text_matches = self._get_attribute(result, 'text_matches', [])
if 'text_matches' in result:
del result['text_matches']
#: User object matching the search
self.user = users.ShortUser(result, self)
def _repr(self):
return '<UserSearchResult [{0}]>'.format(self.user)
| Python | 0 |
b15f401fe270b69e46fb3009c4d55c917736fb27 | Bump version | guild/__init__.py | guild/__init__.py | # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.3.0.dev8"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
| # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.3.0.dev7"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
| Python | 0 |
156817ee4e11c6a363511d915a9ea5cf96e41fb5 | add statistics tracking | benchbuild/experiments/mse.py | benchbuild/experiments/mse.py | """
Test Maximal Static Expansion.
This tests the maximal static expansion implementation by
Nicholas Bonfante (implemented in LLVM/Polly).
"""
from benchbuild.extensions import ExtractCompileStats
from benchbuild.experiment import RuntimeExperiment
from benchbuild.extensions import RunWithTime, RuntimeExtension
from benchbuild.settings import CFG
class PollyMSE(RuntimeExperiment):
"""The polly experiment."""
NAME = "polly-mse"
def actions_for_project(self, project):
"""Compile & Run the experiment with -O3 enabled."""
project.cflags = [
"-O3",
"-fno-omit-frame-pointer",
"-mllvm", "-polly",
"-mllvm", "-polly-enable-mse",
"-mllvm", "-polly-process-unprofitable",
"-mllvm", "-polly-optree-analyze-known=0",
"-mllvm", "-polly-enable-delicm=0",
]
project.compiler_extension = ExtractCompileStats(project, self)
project.runtime_extension = \
RunWithTime(
RuntimeExtension(project, self,
{'jobs': int(CFG["jobs"].value())}))
return self.default_runtime_actions(project)
| """
Test Maximal Static Expansion.
This tests the maximal static expansion implementation by
Nicholas Bonfante (implemented in LLVM/Polly).
"""
from benchbuild.experiment import RuntimeExperiment
from benchbuild.extensions import RunWithTime, RuntimeExtension
from benchbuild.settings import CFG
class PollyMSE(RuntimeExperiment):
"""The polly experiment."""
NAME = "polly-mse"
def actions_for_project(self, project):
"""Compile & Run the experiment with -O3 enabled."""
project.cflags = [
"-O3",
"-fno-omit-frame-pointer",
"-mllvm", "-stats",
"-mllvm", "-polly",
"-mllvm", "-polly-enable-mse",
"-mllvm", "-polly-process-unprofitable",
"-mllvm", "-polly-optree-analyze-known=0",
"-mllvm", "-polly-enable-delicm=0",
]
project.runtime_extension = \
RunWithTime(
RuntimeExtension(project, self,
{'jobs': int(CFG["jobs"].value())}))
return self.default_runtime_actions(project)
| Python | 0.000005 |
8a178f1249b968e315b8492ed15c033aca119033 | Reset closed site property | bluebottle/clients/tests/test_api.py | bluebottle/clients/tests/test_api.py | from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from rest_framework import status
from bluebottle.clients import properties
from bluebottle.test.utils import BluebottleTestCase
class ClientSettingsTestCase(BluebottleTestCase):
def setUp(self):
super(ClientSettingsTestCase, self).setUp()
self.settings_url = reverse('settings')
@override_settings(CLOSED_SITE=False, TOP_SECRET="*****",EXPOSED_TENANT_PROPERTIES=['closed_site'])
def test_settings_show(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], False)
self.assertNotIn('topSecret', response.data)
# Check that exposed setting gets overwritten by client property
setattr(properties, 'CLOSED_SITE', True)
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], True)
# Check that previously hidden setting can be exposed
setattr(properties, 'EXPOSED_TENANT_PROPERTIES', ['closed_site', 'top_secret'])
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('topSecret', response.data)
setattr(properties, 'CLOSED_SITE', False)
| from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from rest_framework import status
from bluebottle.clients import properties
from bluebottle.test.utils import BluebottleTestCase
class ClientSettingsTestCase(BluebottleTestCase):
def setUp(self):
super(ClientSettingsTestCase, self).setUp()
self.settings_url = reverse('settings')
@override_settings(CLOSED_SITE=False, TOP_SECRET="*****",EXPOSED_TENANT_PROPERTIES=['closed_site'])
def test_settings_show(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], False)
self.assertNotIn('topSecret', response.data)
# Check that exposed setting gets overwritten by client property
setattr(properties, 'CLOSED_SITE', True)
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], True)
# Check that previously hidden setting can be exposed
setattr(properties, 'EXPOSED_TENANT_PROPERTIES', ['closed_site', 'top_secret'])
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('topSecret', response.data)
| Python | 0 |
e469163c5b103483b294f9c1f37c918177e7dbce | Make prsync.py really useful | admin/prsync.py | admin/prsync.py | #!/usr/bin/env python
import os
import sys
import subprocess
import urlparse
import pygithub3
try:
import angr
angr_dir = os.path.realpath(os.path.join(os.path.dirname(angr.__file__), '../..'))
except ImportError:
print 'Please run this script in the angr virtualenv!'
sys.exit(1)
def main(branch_name=None, do_push=False):
print 'Enter the urls of the pull requests, separated by newlines. EOF to finish:'
urls = sys.stdin.read().strip().split('\n')
if len(urls) == 0:
sys.exit(0)
prs = []
gh = pygithub3.Github()
for url in urls:
try:
path = urlparse.urlparse(url).path
pathkeys = path.split('/')
prs.append(gh.pull_requests.get(int(pathkeys[4]), pathkeys[1], pathkeys[2]))
assert pathkeys[3] == 'pull'
except Exception: # pylint: disable=broad-except
print url, 'is not a github pull request url'
import ipdb; ipdb.set_trace()
sys.exit(1)
if branch_name is None:
branch_name = 'pr/%s-%d' % (prs[0].head['label'].replace(':','/'), prs[0].number)
for pr in prs:
repo_path = os.path.join(angr_dir, pr.base['repo']['name'])
print '\x1b[32;1m$', 'git', 'checkout', '-B', branch_name, 'master', '\x1b[0m'
subprocess.call(['git', 'checkout', '-B', branch_name, 'master'], cwd=repo_path)
print '\x1b[32;1m$', 'git', 'pull', pr.head['repo']['git_url'], pr.head['ref'], '\x1b[0m'
subprocess.call(['git', 'pull', pr.head['repo']['git_url'], pr.head['ref']], cwd=repo_path)
if do_push:
print '\x1b[32;1m$', 'git', 'push', '-f', '-u', 'origin', branch_name, '\x1b[0m'
subprocess.call(['git', 'push', '-f', '-u', 'origin', branch_name], cwd=repo_path)
repolist = ' '.join(pr.base['repo']['name'] for pr in prs)
print
print '\x1b[33;1mTo merge this pull request, run the following commands:\x1b[0m'
print 'REPOS=%s ./git_all.sh checkout master' % repolist
print 'REPOS=%s ./git_all.sh merge %s' % (repolist, branch_name)
print 'REPOS=%s ./git_all.sh push' % repolist
print 'REPOS=%s ./git_all.sh branch -D %s' % (repolist, branch_name)
if __name__ == '__main__':
s_do_push = False
if '-p' in sys.argv:
s_do_push = True
sys.argv.remove('-n')
if len(sys.argv) > 1:
main(sys.argv[1], do_push=s_do_push)
else:
main(do_push=s_do_push)
| #!/usr/bin/env python
import os
import sys
import subprocess
import urlparse
import pygithub3
try:
import angr
angr_dir = os.path.realpath(os.path.join(os.path.dirname(angr.__file__), '../..'))
except ImportError:
print 'Please run this script in the angr virtualenv!'
sys.exit(1)
def main(branch_name=None):
print 'Enter the urls of the pull requests, separated by newlines. EOF to finish:'
urls = sys.stdin.read().strip().split('\n')
if len(urls) == 0:
sys.exit(0)
prs = []
gh = pygithub3.Github()
for url in urls:
try:
path = urlparse.urlparse(url).path
pathkeys = path.split('/')
prs.append(gh.pull_requests.get(int(pathkeys[4]), pathkeys[1], pathkeys[2]))
assert pathkeys[3] == 'pull'
except Exception: # pylint: disable=broad-except
print url, 'is not a github pull request url'
import ipdb; ipdb.set_trace()
sys.exit(1)
if branch_name is None:
branch_name = 'pr/' + prs[0].head['label'].replace(':','/')
for pr in prs:
repo_path = os.path.join(angr_dir, pr.base['repo']['name'])
print '\x1b[32;1m$', 'git', 'checkout', '-B', branch_name, 'master', '\x1b[0m'
subprocess.call(['git', 'checkout', '-B', branch_name, 'master'], cwd=repo_path)
print '\x1b[32;1m$', 'git', 'pull', pr.head['repo']['git_url'], pr.head['ref'], '\x1b[0m'
subprocess.call(['git', 'pull', pr.head['repo']['git_url'], pr.head['ref']], cwd=repo_path)
print '\x1b[32;1m$', 'git', 'push', '-f', '-u', 'origin', branch_name, '\x1b[0m'
subprocess.call(['git', 'push', '-f', '-u', 'origin', branch_name], cwd=repo_path)
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main()
| Python | 0.000006 |
9f32fee9da5ccffec9a86f62ab9a55625eb65ff7 | Fix menu user input | admin/wakeup.py | admin/wakeup.py | #!/bin/env python3
from pathlib import Path
import subprocess
import logging
import sys
logger = logging.getLogger(__name__)
wol_path = "~/.config/wol.cfg"
# TODO query list from database when available
def main():
global wol_path
wol_path = Path(wol_path).expanduser()
# iterate wake on lan list, wollist
menu = generate_menulist(wol_path)
while True:
user_choice = ""
display_menu(menu)
try:
choice = input("Your choice: ")
user_choice = int(choice) - 1
if check_in_bounds(user_choice, menu):
break
else:
logger.info("Choose a number from the menu.")
except (KeyboardInterrupt, EOFError):
logger.error("\nbye")
sys.exit(0)
except (ValueError, TypeError):
logger.error("Input is not a number.")
hostname, hwadress = menu[user_choice]
subprocess.run(["wol", hwadress])
def display_menu(menu):
for i, item in enumerate(menu):
print("{} - {}".format((i+1),item))
def check_in_bounds(choice, l):
length = len(l)
if choice < length and choice >= 0:
return True
else:
return False
def generate_menulist(path):
menu = list()
with path.open() as wollist:
for record in wollist:
menu.append(tuple(record.strip().split(" ")))
return menu
def usage():
pass
if __name__ == "__main__":
main()
| #!/bin/env python3
from pathlib import Path
import subprocess
def main():
main.path = Path(".config/wol.cfg")
main.path = main.path.home().joinpath(main.path)
# iterate wake on lan list, wollist
menu = generate_menulist(main.path)
if display_menu(menu):
hostname, hwadress = menu[main.user_choice]
subprocess.run(["wol", hwadress])
def display_menu(menu):
for i, item in enumerate(menu):
print("{} - {}".format((i+1),item))
try:
choice = input("Your choice: ")
main.user_choice = int(choice) - 1
except KeyboardInterrupt:
print()
return False
if check_in_bounds(main.user_choice, menu):
return True
else:
print("{:-^80}".format("Invalid choice"))
display_menu(menu)
def check_in_bounds(choice, l):
length = len(l)
if choice < length and choice >= 0:
return True
else:
return False
def generate_menulist(path):
menu = list()
with path.open() as wollist:
for record in wollist:
menu.append(tuple(record.strip().split(" ")))
return menu
def usage():
pass
if __name__ == "__main__":
main()
| Python | 0.002013 |
3a3a8217bd5ff63c77eb4d386bda042cfd7a1196 | delete the depency to sale_order_extend | sale_order_report/__openerp__.py | sale_order_report/__openerp__.py | # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Sale Order Report',
'summary': 'Sale order report in Qweb',
'version': '8.0.1.0',
'category': 'Sales',
'website': 'http://clearcorp.cr',
'author': 'ClearCorp',
'license': 'AGPL-3',
'sequence': 10,
'application': False,
'installable': True,
'auto_install': False,
"depends": [
'sale_order_discount',
'base_reporting',
],
"data": [
'data/report.paperformat.xml',
'data/sale_report.xml',
'views/report_sale_order.xml',
'views/report_sale_order_layout.xml',
'views/report_sale_order_layout_header.xml',
'views/report_sale_order_layout_footer.xml',
],
}
| # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Sale Order Report',
'summary': 'Sale order report in Qweb',
'version': '8.0.1.0',
'category': 'Sales',
'website': 'http://clearcorp.cr',
'author': 'ClearCorp',
'license': 'AGPL-3',
'sequence': 10,
'application': False,
'installable': True,
'auto_install': False,
"depends": [
'sale_order_discount',
'sale_order_extended',
'base_reporting',
],
"data": [
'data/report.paperformat.xml',
'data/sale_report.xml',
'views/report_sale_order.xml',
'views/report_sale_order_layout.xml',
'views/report_sale_order_layout_header.xml',
'views/report_sale_order_layout_footer.xml',
],
}
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.