commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
788cc159e4d734b972e22ccf06dbcd8ed8f94885 | Update DictStack implementation from jaraco.collections 3.5.1 | distutils/_collections.py | distutils/_collections.py | import collections
import itertools
# from jaraco.collections 3.5.1
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> len(stack)
3
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(tuple(list.__iter__(self))):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
def __len__(self):
return len(list(iter(self)))
| import collections
import itertools
# from jaraco.collections 3.5
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(self):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
| Python | 0 |
3ff2ecfd26097b37832a397a43db6121a0bc3627 | Remove superfluous comment. | djadyen/management/commands/adyen_maintenance.py | djadyen/management/commands/adyen_maintenance.py | from datetime import timedelta
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from djadyen import settings
from djadyen.choices import Status
from djadyen.models import AdyenNotification
class Command(BaseCommand):
help = "Process the adyen notifications that are not processed yet."
def handle(self, *args, **options):
order_models = [apps.get_model(model) for model in settings.ADYEN_ORDER_MODELS]
# Process notifications which have been sent by Adyen.
for notification in AdyenNotification.objects.filter(is_processed=False):
notification_data = notification.get_notification_data()
reference = notification_data.get('merchantReference')
for order_model in order_models:
orders = order_model.objects.filter(reference=reference)
for order in orders:
order.process_notification(notification)
# After five days of an Order having status 'Pending', move them to 'Error'
five_days_ago = timezone.now() - timedelta(days=5)
for order_model in order_models:
for obj in order_model.objects.filter(
status=Status.Pending,
created_on__lte=five_days_ago
):
obj.status = Status.Error
obj.save()
| from datetime import timedelta
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from djadyen import settings
from djadyen.choices import Status
from djadyen.models import AdyenNotification
class Command(BaseCommand):
help = "Process the adyen notifications that are not processed yet."
def handle(self, *args, **options):
order_models = [apps.get_model(model) for model in settings.ADYEN_ORDER_MODELS]
#
# N.B. In our implementations there use to be a limit at how far back in the past we
# would go to process notifications. I'm not sure why it existed, so i've removed it.
#
# Process notifications which have been sent by Adyen.
for notification in AdyenNotification.objects.filter(is_processed=False):
notification_data = notification.get_notification_data()
reference = notification_data.get('merchantReference')
for order_model in order_models:
orders = order_model.objects.filter(reference=reference)
for order in orders:
order.process_notification(notification)
# After five days of an Order having status 'Pending', move them to 'Error'
five_days_ago = timezone.now() - timedelta(days=5)
for order_model in order_models:
for obj in order_model.objects.filter(
status=Status.Pending,
created_on__lte=five_days_ago
):
obj.status = Status.Error
obj.save()
| Python | 0.000001 |
32eba84ec5527f1afc82998e98f5d15035e311c1 | Allow forced loading. Contemplating changing the default too. | chef/base.py | chef/base.py | from chef.api import ChefAPI
class DelayedAttribute(object):
"""Descriptor that calls ._populate() before access to implement lazy loading."""
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
if instance is None:
return self
if not getattr(instance, '_populated', False):
instance._populate()
instance._populated = True
return getattr(instance, '_'+self.attr)
class ChefObjectMeta(type):
"""Metaclass for ChefObject to implement lazy attributes."""
def __init__(cls, name, bases, d):
for attr in cls.attributes:
setattr(cls, attr, DelayedAttribute(attr))
class ChefObject(object):
"""A base class for Chef API objects."""
__metaclass__ = ChefObjectMeta
url = ''
attributes = []
def __init__(self, name, api=None, lazy=True):
self.name = name
self.api = api or ChefAPI.get_global()
self.url = self.__class__.url + '/' + self.name
if not lazy:
self._populate()
@classmethod
def list(cls, api=None):
api = api or ChefAPI.get_global()
for name, url in api[cls.url].iteritems():
yield cls(name, api=api)
def save(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('PUT', self.url, data=self)
def delete(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('DELETE', self.url)
def _populate(self):
data = self.api[self.url]
for attr in self.__class__.attributes:
setattr(self, '_'+attr, data[attr])
| from chef.api import ChefAPI
class DelayedAttribute(object):
"""Descriptor that calls ._populate() before access to implement lazy loading."""
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
if instance is None:
return self
if not getattr(instance, '_populated', False):
instance._populate()
instance._populated = True
return getattr(instance, '_'+self.attr)
class ChefObjectMeta(type):
"""Metaclass for ChefObject to implement lazy attributes."""
def __init__(cls, name, bases, d):
for attr in cls.attributes:
setattr(cls, attr, DelayedAttribute(attr))
class ChefObject(object):
"""A base class for Chef API objects."""
__metaclass__ = ChefObjectMeta
url = ''
attributes = []
def __init__(self, name, api=None):
self.name = name
self.api = api or ChefAPI.get_global()
self.url = self.__class__.url + '/' + self.name
@classmethod
def list(cls, api=None):
api = api or ChefAPI.get_global()
for name, url in api[cls.url].iteritems():
yield cls(name, api=api)
def save(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('PUT', self.url, data=self)
def delete(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('DELETE', self.url)
def _populate(self):
data = self.api[self.url]
for attr in self.__class__.attributes:
setattr(self, '_'+attr, data[attr])
| Python | 0 |
6ba2dc8cf06efd74cae941c370e75ccddcf1d25c | fix broken arg of DnnL2Pool2DNode | treeano/sandbox/nodes/l2_pool.py | treeano/sandbox/nodes/l2_pool.py | import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("l2_pool")
class L2PoolNode(treeano.Wrapper1NodeImpl):
"""
node that takes the L2 norm of the pooled over region
"""
hyperparameter_names = ("pool_size",)
def architecture_children(self):
nodes = [
tn.SqrNode(self.name + "_sqr"),
self._children.children,
# convert mean pool to sum pool by multiplying by pool size
tn.MultiplyConstantNode(self.name + "_mul"),
tn.SqrtNode(self.name + "_sqrt"),
]
return [tn.SequentialNode(self.name + "_sequential", nodes)]
def init_state(self, network):
super(L2PoolNode, self).init_state(network)
pool_size = network.find_hyperparameter(["pool_size"])
network.set_hyperparameter(self.name + "_mul",
"value",
# cast to float, to not trigger
# warn_float64
float(np.prod(pool_size)))
def L2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.MeanPool2DNode(name + "_pool", **kwargs),
**l2_kwargs)
def DnnL2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.DnnMeanPoolNode(name + "_pool", **kwargs),
**l2_kwargs)
| import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("l2_pool")
class L2PoolNode(treeano.Wrapper1NodeImpl):
"""
node that takes the L2 norm of the pooled over region
"""
hyperparameter_names = ("pool_size",)
def architecture_children(self):
nodes = [
tn.SqrNode(self.name + "_sqr"),
self._children.children,
# convert mean pool to sum pool by multiplying by pool size
tn.MultiplyConstantNode(self.name + "_mul"),
tn.SqrtNode(self.name + "_sqrt"),
]
return [tn.SequentialNode(self.name + "_sequential", nodes)]
def init_state(self, network):
super(L2PoolNode, self).init_state(network)
pool_size = network.find_hyperparameter(["pool_size"])
network.set_hyperparameter(self.name + "_mul",
"value",
# cast to float, to not trigger
# warn_float64
float(np.prod(pool_size)))
def L2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.MeanPool2DNode(name + "_pool", **kwargs),
**l2_kwargs)
def DnnL2Pool2DNode(name, pool_size, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.DnnMeanPoolNode(name + "_pool", **kwargs),
**l2_kwargs)
| Python | 0.000004 |
7967d5fb49cd1bb0b1ed8d2417c3ace36f47600d | Refactor denoise tests and add tests for bilateral filter | skimage/filter/tests/test_denoise.py | skimage/filter/tests/test_denoise.py | import numpy as np
from numpy.testing import run_module_suite, assert_raises
from skimage import filter, data, color, img_as_float
lena = img_as_float(data.lena()[:256, :256])
lena_gray = color.rgb2gray(lena)
def test_tv_denoise_2d():
# lena image
img = lena_gray
# add noise to lena
img += 0.5 * img.std() * np.random.random(img.shape)
# clip noise so that it does not exceed allowed range for float images.
img = np.clip(img, 0, 1)
# denoise
denoised_lena = filter.tv_denoise(img, weight=60.0)
# which dtype?
assert denoised_lena.dtype in [np.float, np.float32, np.float64]
from scipy import ndimage
grad = ndimage.morphological_gradient(img, size=((3, 3)))
grad_denoised = ndimage.morphological_gradient(
denoised_lena, size=((3, 3)))
# test if the total variation has decreased
assert grad_denoised.dtype == np.float
assert (np.sqrt((grad_denoised**2).sum())
< np.sqrt((grad**2).sum()) / 2)
def test_tv_denoise_float_result_range():
# lena image
img = lena_gray
int_lena = np.multiply(img, 255).astype(np.uint8)
assert np.max(int_lena) > 1
denoised_int_lena = filter.tv_denoise(int_lena, weight=60.0)
# test if the value range of output float data is within [0.0:1.0]
assert denoised_int_lena.dtype == np.float
assert np.max(denoised_int_lena) <= 1.0
assert np.min(denoised_int_lena) >= 0.0
def test_tv_denoise_3d():
"""Apply the TV denoising algorithm on a 3D image representing a sphere."""
x, y, z = np.ogrid[0:40, 0:40, 0:40]
mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
mask = 100 * mask.astype(np.float)
mask += 60
mask += 20 * np.random.random(mask.shape)
mask[mask < 0] = 0
mask[mask > 255] = 255
res = filter.tv_denoise(mask.astype(np.uint8), weight=100)
assert res.dtype == np.float
assert res.std() * 255 < mask.std()
# test wrong number of dimensions
assert_raises(ValueError, filter.tv_denoise, np.random.random((8, 8, 8, 8)))
def test_denoise_bilateral_2d():
img = lena_gray
# add some random noise
img += 0.5 * img.std() * np.random.random(img.shape)
img = np.clip(img, 0, 1)
out1 = filter.denoise_bilateral(img, sigma_color=0.1, sigma_range=20)
out2 = filter.denoise_bilateral(img, sigma_color=0.2, sigma_range=30)
# make sure noise is reduced
assert img.std() > out1.std()
assert out1.std() > out2.std()
def test_denoise_bilateral_3d():
img = lena
# add some random noise
img += 0.5 * img.std() * np.random.random(img.shape)
img = np.clip(img, 0, 1)
out1 = filter.denoise_bilateral(img, sigma_color=0.1, sigma_range=20)
out2 = filter.denoise_bilateral(img, sigma_color=0.2, sigma_range=30)
# make sure noise is reduced
assert img.std() > out1.std()
assert out1.std() > out2.std()
if __name__ == "__main__":
run_module_suite()
| import numpy as np
from numpy.testing import run_module_suite
from skimage import filter, data, color
class TestTvDenoise():
def test_tv_denoise_2d(self):
"""
Apply the TV denoising algorithm on the lena image provided
by scipy
"""
# lena image
lena = color.rgb2gray(data.lena())[:256, :256]
# add noise to lena
lena += 0.5 * lena.std() * np.random.randn(*lena.shape)
# clip noise so that it does not exceed allowed range for float images.
lena = np.clip(lena, 0, 1)
# denoise
denoised_lena = filter.tv_denoise(lena, weight=60.0)
# which dtype?
assert denoised_lena.dtype in [np.float, np.float32, np.float64]
from scipy import ndimage
grad = ndimage.morphological_gradient(lena, size=((3, 3)))
grad_denoised = ndimage.morphological_gradient(
denoised_lena, size=((3, 3)))
# test if the total variation has decreased
assert grad_denoised.dtype == np.float
assert (np.sqrt((grad_denoised**2).sum())
< np.sqrt((grad**2).sum()) / 2)
def test_tv_denoise_float_result_range(self):
# lena image
lena = color.rgb2gray(data.lena())[:256, :256]
int_lena = np.multiply(lena, 255).astype(np.uint8)
assert np.max(int_lena) > 1
denoised_int_lena = filter.tv_denoise(int_lena, weight=60.0)
# test if the value range of output float data is within [0.0:1.0]
assert denoised_int_lena.dtype == np.float
assert np.max(denoised_int_lena) <= 1.0
assert np.min(denoised_int_lena) >= 0.0
def test_tv_denoise_3d(self):
"""
Apply the TV denoising algorithm on a 3D image representing
a sphere.
"""
x, y, z = np.ogrid[0:40, 0:40, 0:40]
mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
mask = 100 * mask.astype(np.float)
mask += 60
mask += 20 * np.random.randn(*mask.shape)
mask[mask < 0] = 0
mask[mask > 255] = 255
res = filter.tv_denoise(mask.astype(np.uint8), weight=100)
assert res.dtype == np.float
assert res.std() * 255 < mask.std()
# test wrong number of dimensions
a = np.random.random((8, 8, 8, 8))
try:
res = filter.tv_denoise(a)
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
| Python | 0 |
539038ba1135b68786adb44d7660c82a96794971 | Remove logging | imgproc.py | imgproc.py | from SimpleCV import *
import numpy
import cv2
def process_image(obj, img, config, each_blob=None):
"""
:param obj: Object we're tracking
:param img: Input image
:param config: Controls
:param each_blob: function, taking a SimpleCV.Blob as an argument, that is called for every candidate blob
:return: Mask with candidates
"""
hsv_image = img.toHSV()
segmented = Image(cv2.inRange(hsv_image.rotate90().getNumpyCv2(),
numpy.array([config.min_hue, config.min_sat, config.min_val]),
numpy.array([config.max_hue, config.max_sat, config.max_val])))
segmented = segmented.dilate(2)
blobs = segmented.findBlobs()
if blobs:
for b in blobs:
if b.radius() > 10:
rect_width = b.minRectWidth()
rect_height = b.minRectHeight()
aspect_ratio = rect_width / rect_height
square_error = abs(obj.aspect_ratio - aspect_ratio) / abs(aspect_ratio)
if square_error < 0.1:
if not each_blob: # default to just outlining
# minRectX and minRectY actually give the center point, not the minX and minY, so we shift by 1/2
rect_ctr_x = b.minRectX()
mrX = rect_ctr_x-rect_width/2
mrY = b.minRectY()-rect_height/2
segmented.drawRectangle(mrX, mrY, rect_width,
rect_height, color=Color.GREEN, width=6)
# px * (px/cm) = cm
offset = int(round((rect_ctr_x - segmented.width/2) * (obj.width / rect_width)))
segmented.drawText('Offset %s cm' % offset, mrX, mrY, Color.RED, 64)
else:
each_blob(b)
# Give the result mask
return segmented.applyLayers() | from SimpleCV import *
import numpy
import cv2
def process_image(obj, img, config, each_blob=None):
"""
:param obj: Object we're tracking
:param img: Input image
:param config: Controls
:param each_blob: function, taking a SimpleCV.Blob as an argument, that is called for every candidate blob
:return: Mask with candidates
"""
hsv_image = img.toHSV()
print([config.min_hue, config.min_sat, config.min_val])
print([config.max_hue, config.max_sat, config.max_val])
segmented = Image(cv2.inRange(hsv_image.rotate90().getNumpyCv2(),
numpy.array([config.min_hue, config.min_sat, config.min_val]),
numpy.array([config.max_hue, config.max_sat, config.max_val])))
segmented = segmented.dilate(2)
blobs = segmented.findBlobs()
if blobs:
for b in blobs:
if b.radius() > 10:
rect_width = b.minRectWidth()
rect_height = b.minRectHeight()
aspect_ratio = rect_width / rect_height
square_error = abs(obj.aspect_ratio - aspect_ratio) / abs(aspect_ratio)
if square_error < 0.1:
if not each_blob: # default to just outlining
# minRectX and minRectY actually give the center point, not the minX and minY, so we shift by 1/2
rect_ctr_x = b.minRectX()
mrX = rect_ctr_x-rect_width/2
mrY = b.minRectY()-rect_height/2
segmented.drawRectangle(mrX, mrY, rect_width,
rect_height, color=Color.GREEN, width=6)
# px * (px/cm) = cm
offset = int(round((rect_ctr_x - segmented.width/2) * (obj.width / rect_width)))
segmented.drawText('Offset %s cm' % offset, mrX, mrY, Color.RED, 64)
else:
each_blob(b)
# Give the result mask
return segmented.applyLayers() | Python | 0.000001 |
5fb74f09f4a1ee883b6cea5b8f531d8ef01e61f7 | Update Lexer | lexer.py | lexer.py | import ply.lex as lex
class Lexer(object):
reserved = {
'and' : 'AND',
'do' : 'DO',
'else' : 'ELSE',
'while' : 'WHILE',
'then' : 'THEN',
'end' : 'END',
'for' : 'FOR',
'if' : 'IF',
'var' : 'VAR',
'or' : 'OR'
}
# List of token names
tokens = [
'ID' , 'NUMBER' , 'PLUS' ,
'MINUS' , 'TIMES' , 'DIVIDE' ,
'EQUAL' , 'NOTEQUAL' , 'LESSEQUAL' ,
'GREATEREQUAL' , 'LESS' , 'GREATER' ,
'ASSIGN' , 'LPAREN' , 'RPAREN' ,
'SEMICOLON' , 'COMMA'
] + list(reserved.values())
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUAL = r'=='
t_NOTEQUAL = r'~='
t_LESSEQUAL = r'<='
t_GREATEREQUAL = r'>='
t_LESS = r'<'
t_GREATER = r'>'
t_ASSIGN = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_SEMICOLON = r';'
t_COMMA = r','
# Regular expressions rules
def t_ID(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = self.reserved.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r'\d+'
t.value = int(t.value)
return t
t_ignore = ' \t'
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
print "Ilegal character '%s' at line %d: " % (t.value[0], t.lexer.lineno)
t.lexer.skip(1)
def build(self, **kwargs):
self.lexer = lex.lex(module = self, **kwargs)
# Testing the output
def test(self, code):
self.lexer.input(code)
while True:
tok = self.lexer.token()
if not tok:
break;
print tok
| import ply.lex as lex
class Lexer(object):
reserved = {
'and' : 'AND',
'break' : 'BREAK',
'do' : 'DO',
'else' : 'ELSE',
'elseif' : 'ELSEIF',
'end' : 'END',
'false' : 'FALSE',
'for' : 'FOR',
'function': 'FUNCTION',
'if' : 'IF',
'in' : 'IN',
'local' : 'LOCAL',
'nil' : 'NIL',
'not' : 'NOT',
'or' : 'OR',
'repeat' : 'REPEAT',
'return' : 'RETURN',
'then' : 'THEN',
'true' : 'TRUE',
'until' : 'UNTIL',
'while' : 'WHILE'
}
# List of token names
tokens = [
'ID' , 'NUMBER' , 'CHAR',
'STRING' , 'PLUS' , 'MINUS',
'TIMES' , 'DIVIDE' , 'PERCENT',
'CIRCUMFLEX', 'SHARP' , 'EQUAL',
'NOTEQUAL' , 'LESSEQUAL' , 'GREATEREQUAL',
'LESS' , 'GREATER' , 'ASSIGN',
'LPAREN' , 'RPAREN' , 'LCURLY',
'RCURLY' , 'LSQUARE' , 'RSQUARE',
'SEMICOLON' , 'COLON' , 'COMMA',
'DOT' , 'TWODOTS' , 'THREEDOTS'
] + list(reserved.values())
t_CHAR = r'(\"([^\\\n]|(\\.))?\")|(\'([^\\\n]|(\\.))?\')'
t_STRING = r'(\"([^\\\n]|(\\.))*?\")|(\'([^\\\n]|(\\.))*?\')'
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_PERCENT = r'%'
t_CIRCUMFLEX = r'\^'
t_SHARP = r'\#'
t_EQUAL = r'=='
t_NOTEQUAL = r'~='
t_LESSEQUAL = r'<='
t_GREATEREQUAL = r'>='
t_LESS = r'<'
t_GREATER = r'>'
t_ASSIGN = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LCURLY = r'\{'
t_RCURLY = r'}'
t_LSQUARE = r'\['
t_RSQUARE = r'\]'
t_SEMICOLON = r';'
t_COLON = r':'
t_COMMA = r','
t_DOT = r'\.'
t_TWODOTS = r'\.\.'
t_THREEDOTS = r'\.\.\.'
# Regular expressions rules
def t_COMMENT(self, t):
r'--.*\n'
pass
# Discarded comment
def t_ID(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = self.reserved.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r'\d+'
t.value = int(t.value)
return t
t_ignore = ' \t'
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
print "Ilegal character '%s' at line %d: " % (t.value[0], t.lexer.lineno)
t.lexer.skip(1)
def build(self, **kwargs):
self.lexer = lex.lex(module = self, **kwargs)
# Testing the output
def test(self, code):
self.lexer.input(code)
while True:
tok = self.lexer.token()
if not tok:
break;
print tok
| Python | 0.000001 |
372ce38d1ddcf2fd65d83df2499d97d4fc2128e6 | Fix issue in cbb.py | ed2d/physics/cbb.py | ed2d/physics/cbb.py | from ed2d.physics.collisiondata import*
from ed2d.glmath import vector
# Circle Bounding Box
class CBB(object):
def __init__(self, radius, center):
'''Creates a circle bounding box object to be used with the physics engine. Takes in a float for the radius and an array for the center.'''
self.radius = radius
self.center = vector.Vector(3, data=center)
def intersectCBB(self, oCBB):
tempDistance = self.center - oCBB.center
distanceCenters = tempDistance.magnitude()
distanceRadii = self.radius + oCBB.radius
# Collision happens when the distance between the two centers is less than the sum of the radii
state = distanceCenters < distanceRadii
# Calculate the depth penetration
depthPenetration = distanceCenters - (distanceRadii)
return CollisionData(state, tempDistance, depthPenetration)
def getCenter(self):
return self.center
def getRadius(self):
return self.radius
| from ed2d.physics.collisiondata import*
from ed2d.glmath import vector
# Circle Bounding Box
class CBB(object):
def __init__(self, radius, center):
'''Creates a circle bounding box object to be used with the physics engine. Takes in a float for the radius and an array for the center.'''
self.radius = radius
self.center = vector.Vector(3, data=center)
def intersectCBB(self, oCBB):
tempDistance = self.center - oCBB.center
distanceCenters = tempDistance.magnitude()
distanceRadii = self.radius + oCBB.radius
# Collision happens when the distance between the two centers is less than the sum of the radii
state = distanceCenters < distanceRadii
# Calculate the depth penetration
depthPenetration = distanceCenters - (distanceRadii)
return CollisionData(state, tempDistance, depthPenetration)
def getCenter(self):
return center
def getRadius(self):
return radius | Python | 0.000001 |
1451d199833b405929105f939f57b4d4faf50fa2 | Use new py.test to generate vector-vs-scalar tests | skyfield/tests/test_vectorization.py | skyfield/tests/test_vectorization.py | """Determine whether arrays work as well as individual inputs."""
from itertools import izip
from numpy import array
from ..constants import T0
from ..planets import earth, mars
from ..timescales import JulianDate, julian_date
dates = array([
julian_date(1969, 7, 20, 20. + 18. / 60.),
T0,
julian_date(2012, 12, 21),
julian_date(2027, 8, 2, 10. + 7. / 60. + 50. / 3600.),
])
deltas = array([39.707, 63.8285, 66.8779, 72.])
def compute_planetary_position(ut1, delta_t):
jd = JulianDate(ut1=ut1, delta_t=delta_t)
yield jd.ut1
yield jd.tt
yield jd.tdb
observer = earth(jd)
yield observer.position
yield observer.velocity
yield observer.jd.ut1
yield observer.jd.tt
yield observer.jd.tdb
astrometric = observer.observe(mars)
yield astrometric.position
yield astrometric.velocity
ra, dec, distance = astrometric.radec()
yield ra.hours()
yield dec.degrees()
yield distance
def generate_comparisons(computation):
"""Set up comparisons between vector and scalar outputs of `computation`.
The `computation` should be a generator that accepts both vector and
scalar input, and that yields a series of values whose shape
corresponds to its input's shape.
"""
vector_results = list(computation(dates, deltas))
for i, (date, delta_t) in enumerate(zip(dates, deltas)):
g = computation(date, delta_t)
for vector, scalar in izip(vector_results, g):
f = g.gi_frame
location = '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
yield location, vector, i, scalar
def pytest_generate_tests(metafunc):
if 'vector_vs_scalar' in metafunc.fixturenames:
metafunc.parametrize('vector_vs_scalar',
list(generate_comparisons(compute_planetary_position))
)
def test_vector_vs_scalar(vector_vs_scalar):
location, vector, i, scalar = vector_vs_scalar
assert (vector.T[i] == scalar).all(), (
'{}:\n {}[{}] != {}'.format(location, vector.T, i, scalar))
| """Determine whether arrays work as well as individual inputs."""
import pytest
from numpy import array
from ..constants import T0
from ..planets import earth, mars
from ..timescales import JulianDate, julian_date
dates = array([
julian_date(1969, 7, 20, 20. + 18. / 60.),
T0,
julian_date(2012, 12, 21),
julian_date(2027, 8, 2, 10. + 7. / 60. + 50. / 3600.),
])
deltas = array([39.707, 63.8285, 66.8779, 72.])
def generate_planetary_position(ut1, delta_t):
jd = JulianDate(ut1=ut1, delta_t=delta_t)
yield jd.ut1
yield jd.tt
yield jd.tdb
observer = earth(jd)
yield observer.position
yield observer.velocity
yield observer.jd.ut1
yield observer.jd.tt
yield observer.jd.tdb
astrometric = observer.observe(mars)
yield astrometric.position
yield astrometric.velocity
ra, dec, distance = astrometric.radec()
yield ra.hours()
yield dec.degrees()
yield distance
@pytest.fixture(params=[generate_planetary_position])
def gradual_computation(request):
return request.param
def test_gradual_computations(gradual_computation):
vector_results = list(gradual_computation(dates, deltas))
correct_length = len(dates)
for vector_value in vector_results:
assert vector_value.shape[-1] == correct_length
for i, (date, delta) in enumerate(zip(dates, deltas)):
scalar_results = list(gradual_computation(date, delta))
for vector_value, scalar_value in zip(vector_results, scalar_results):
assert (vector_value.T[i] == scalar_value).all()
| Python | 0 |
0ea687403b01dbc6268c15550f0caf45a54e9106 | Fix Joust picking with multiple minions in the deck | fireplace/cards/utils.py | fireplace/cards/utils.py | import random
from hearthstone.enums import CardClass, CardType, GameTag, Race, Rarity
from ..actions import *
from ..aura import Refresh
from ..dsl import *
from ..events import *
from ..utils import custom_card
# For buffs which are removed when the card is moved to play (eg. cost buffs)
# This needs to be Summon, because of Summon from the hand
REMOVED_IN_PLAY = Summon(PLAYER, OWNER).after(Destroy(SELF))
RandomCard = lambda **kw: RandomCardPicker(**kw)
RandomCollectible = lambda **kw: RandomCardPicker(collectible=True, **kw)
RandomMinion = lambda **kw: RandomCollectible(type=CardType.MINION, **kw)
RandomBeast = lambda **kw: RandomMinion(race=Race.BEAST)
RandomMurloc = lambda **kw: RandomMinion(race=Race.MURLOC)
RandomSpell = lambda **kw: RandomCollectible(type=CardType.SPELL, **kw)
RandomTotem = lambda **kw: RandomCardPicker(race=Race.TOTEM)
RandomWeapon = lambda **kw: RandomCollectible(type=CardType.WEAPON, **kw)
RandomSparePart = lambda **kw: RandomCardPicker(spare_part=True, **kw)
class RandomEntourage(RandomCardPicker):
def pick(self, source):
self._cards = source.entourage
return super().pick(source)
class RandomID(RandomCardPicker):
def pick(self, source):
self._cards = self.args
return super().pick(source)
Freeze = lambda target: SetTag(target, (GameTag.FROZEN, ))
Stealth = lambda target: SetTag(target, (GameTag.STEALTH, ))
Unstealth = lambda target: UnsetTag(target, (GameTag.STEALTH, ))
Taunt = lambda target: SetTag(target, (GameTag.TAUNT, ))
GiveCharge = lambda target: SetTag(target, (GameTag.CHARGE, ))
GiveDivineShield = lambda target: SetTag(target, (GameTag.DIVINE_SHIELD, ))
GiveWindfury = lambda target: SetTag(target, (GameTag.WINDFURY, ))
CLEAVE = Hit(TARGET_ADJACENT, Attr(SELF, GameTag.ATK))
COINFLIP = RandomNumber(0, 1) == 1
EMPTY_HAND = Count(FRIENDLY_HAND) == 0
HOLDING_DRAGON = Find(FRIENDLY_HAND + DRAGON)
JOUST = Joust(RANDOM(FRIENDLY_DECK + MINION), RANDOM(ENEMY_DECK + MINION))
def SET(amt):
return lambda self, i: amt
# Buff helper
def buff(atk=0, health=0, **kwargs):
buff_tags = {}
if atk:
buff_tags[GameTag.ATK] = atk
if health:
buff_tags[GameTag.HEALTH] = health
for tag in GameTag:
if tag.name.lower() in kwargs.copy():
buff_tags[tag] = kwargs.pop(tag.name.lower())
if "immune" in kwargs:
value = kwargs.pop("immune")
buff_tags[GameTag.CANT_BE_DAMAGED] = value
buff_tags[GameTag.CANT_BE_TARGETED_BY_OPPONENTS] = value
if kwargs:
raise NotImplementedError(kwargs)
class Buff:
tags = buff_tags
return Buff
| import random
from hearthstone.enums import CardClass, CardType, GameTag, Race, Rarity
from ..actions import *
from ..aura import Refresh
from ..dsl import *
from ..events import *
from ..utils import custom_card
# For buffs which are removed when the card is moved to play (eg. cost buffs)
# This needs to be Summon, because of Summon from the hand
REMOVED_IN_PLAY = Summon(PLAYER, OWNER).after(Destroy(SELF))
RandomCard = lambda **kw: RandomCardPicker(**kw)
RandomCollectible = lambda **kw: RandomCardPicker(collectible=True, **kw)
RandomMinion = lambda **kw: RandomCollectible(type=CardType.MINION, **kw)
RandomBeast = lambda **kw: RandomMinion(race=Race.BEAST)
RandomMurloc = lambda **kw: RandomMinion(race=Race.MURLOC)
RandomSpell = lambda **kw: RandomCollectible(type=CardType.SPELL, **kw)
RandomTotem = lambda **kw: RandomCardPicker(race=Race.TOTEM)
RandomWeapon = lambda **kw: RandomCollectible(type=CardType.WEAPON, **kw)
RandomSparePart = lambda **kw: RandomCardPicker(spare_part=True, **kw)
class RandomEntourage(RandomCardPicker):
def pick(self, source):
self._cards = source.entourage
return super().pick(source)
class RandomID(RandomCardPicker):
def pick(self, source):
self._cards = self.args
return super().pick(source)
Freeze = lambda target: SetTag(target, (GameTag.FROZEN, ))
Stealth = lambda target: SetTag(target, (GameTag.STEALTH, ))
Unstealth = lambda target: UnsetTag(target, (GameTag.STEALTH, ))
Taunt = lambda target: SetTag(target, (GameTag.TAUNT, ))
GiveCharge = lambda target: SetTag(target, (GameTag.CHARGE, ))
GiveDivineShield = lambda target: SetTag(target, (GameTag.DIVINE_SHIELD, ))
GiveWindfury = lambda target: SetTag(target, (GameTag.WINDFURY, ))
CLEAVE = Hit(TARGET_ADJACENT, Attr(SELF, GameTag.ATK))
COINFLIP = RandomNumber(0, 1) == 1
EMPTY_HAND = Count(FRIENDLY_HAND) == 0
HOLDING_DRAGON = Find(FRIENDLY_HAND + DRAGON)
JOUST = Joust(FRIENDLY_DECK + MINION, ENEMY_DECK + MINION)
def SET(amt):
return lambda self, i: amt
# Buff helper
def buff(atk=0, health=0, **kwargs):
buff_tags = {}
if atk:
buff_tags[GameTag.ATK] = atk
if health:
buff_tags[GameTag.HEALTH] = health
for tag in GameTag:
if tag.name.lower() in kwargs.copy():
buff_tags[tag] = kwargs.pop(tag.name.lower())
if "immune" in kwargs:
value = kwargs.pop("immune")
buff_tags[GameTag.CANT_BE_DAMAGED] = value
buff_tags[GameTag.CANT_BE_TARGETED_BY_OPPONENTS] = value
if kwargs:
raise NotImplementedError(kwargs)
class Buff:
tags = buff_tags
return Buff
| Python | 0 |
42462135cec040d17f8ce4488c1ee6bb3b59f406 | Bump mono-basic to @mono/mono-basic/b8011b2f274606323da0927214ed98336465f467 | packages/mono-basic.py | packages/mono-basic.py | GitHubTarballPackage ('mono', 'mono-basic', '4.0.1', 'b8011b2f274606323da0927214ed98336465f467',
configure = './configure --prefix="%{prefix}"',
override_properties = { 'make': 'make' }
)
| GitHubTarballPackage ('mono', 'mono-basic', '3.0', '0d0440feccf648759f7316f93ad09b1e992ea13a',
configure = './configure --prefix="%{prefix}"',
override_properties = { 'make': 'make' }
)
| Python | 0.000001 |
3ddddbd24bb37c30df80233ec4c70c38b6c29e82 | Update leaflet request to be over https | emstrack/forms.py | emstrack/forms.py | from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/leaflet.html'
class Media:
css = {
'all': ('https://cdnjs.cloudflare.com/ajax/libs/leaflet/v0.7.7/leaflet.css',
'leaflet/css/location_form.css',
'leaflet/css/LeafletWidget.css')
}
js = (
'https://cdnjs.cloudflare.com/ajax/libs/leaflet/v0.7.7/leaflet.js',
'leaflet/js/LeafletWidget.js'
)
def render(self, name, value, attrs=None):
# add point
if value:
attrs.update({ 'point': { 'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid }
})
return super().render(name, value, attrs)
| from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/leaflet.html'
class Media:
css = {
'all': ('https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.css',
'leaflet/css/location_form.css',
'leaflet/css/LeafletWidget.css')
}
js = (
'https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.js',
'leaflet/js/LeafletWidget.js'
)
def render(self, name, value, attrs=None):
# add point
if value:
attrs.update({ 'point': { 'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid }
})
return super().render(name, value, attrs)
| Python | 0 |
612698f37ab726fb77aa1f284c97d01d1d726abf | Bump version | django_anyvcs/__init__.py | django_anyvcs/__init__.py | # Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '2.5.0'
| # Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '2.4.0'
| Python | 0 |
9674a0869c2a333f74178e305677259e7ac379c3 | Make the Websocket's connection header value case-insensitive | examples/ignore_websocket.py | examples/ignore_websocket.py | # This script makes mitmproxy switch to passthrough mode for all HTTP
# responses with "Connection: Upgrade" header. This is useful to make
# WebSockets work in untrusted environments.
#
# Note: Chrome (and possibly other browsers), when explicitly configured
# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
# to the proxy before they initiate the websocket connection.
# To make WebSockets work in these cases, supply
# `--ignore :80$` as an additional parameter.
# (see http://mitmproxy.org/doc/features/passthrough.html)
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
value = flow.response.headers.get_first("Connection", None)
if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | # This script makes mitmproxy switch to passthrough mode for all HTTP
# responses with "Connection: Upgrade" header. This is useful to make
# WebSockets work in untrusted environments.
#
# Note: Chrome (and possibly other browsers), when explicitly configured
# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
# to the proxy before they initiate the websocket connection.
# To make WebSockets work in these cases, supply
# `--ignore :80$` as an additional parameter.
# (see http://mitmproxy.org/doc/features/passthrough.html)
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
if flow.response.headers.get_first("Connection", None) == "Upgrade":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | Python | 0.005274 |
7c90e73d3ffa2a8209a751b01c7cd8bd3122b13b | Use actual feature values instead of binary for making pivot predictions | scripts/build_pivot_training_data.py | scripts/build_pivot_training_data.py | #!/usr/bin/env python
from os.path import join, dirname
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import scipy.sparse
import sys
from uda_common import read_feature_groups
def main(args):
if len(args) < 3:
sys.stderr.write("Three required arguments: <pivot file> <data file> <output directory>\n")
sys.exit(-1)
pivot_file = args[0]
model_dir = dirname(pivot_file)
group_name = join(model_dir, 'reduced-feature-groups.txt')
group_map = read_feature_groups(group_name)
domain_inds = group_map['Domain']
out_dir = args[2]
sys.stderr.write("Reading in data files\n")
all_X, all_y = load_svmlight_file(args[1])
all_X = all_X.tolil()
## Zero out domain-indicator variables (not needed for this step)
all_X[:,domain_inds[0]] = 0
all_X[:,domain_inds[1]] = 0
num_instances, num_feats = all_X.shape
sys.stderr.write("Reading in pivot files and creating pivot labels dictionary\n")
## Read pivots file into dictionary:
pivots = []
pivot_labels = {}
for line in open(pivot_file, 'r'):
pivot = int(line.strip())
pivots.append(pivot)
pivot_labels[pivot] = np.zeros((num_instances,1))
pivot_labels[pivot] += np.round(all_X[:,pivot] > 0).astype('int').toarray()
sys.stderr.write("Creating pivot matrices for each feature group\n")
#ind_groups = [None] * num_feats
for group_key,group_inds in group_map.items():
group_inds = np.array(group_inds)
group_X = scipy.sparse.lil_matrix(np.zeros((num_instances, num_feats)))
group_X += all_X
group_X[:, group_inds] = 0
group_X[:, pivots] = 0
for group_ind in group_inds:
if group_ind in pivots:
out_file = join(out_dir, 'pivot_%s-training.liblinear' % group_ind)
print('Writing file %s ' % out_file)
sys.stderr.write('.')
dump_svmlight_file(group_X, pivot_labels[group_ind][:,0], out_file)
sys.stderr.write('\n')
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| #!/usr/bin/env python
from os.path import join, dirname
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import scipy.sparse
import sys
from uda_common import read_feature_groups
def main(args):
if len(args) < 3:
sys.stderr.write("Three required arguments: <pivot file> <data file> <output directory>\n")
sys.exit(-1)
pivot_file = args[0]
model_dir = dirname(pivot_file)
group_name = join(model_dir, 'reduced-feature-groups.txt')
group_map = read_feature_groups(group_name)
domain_inds = group_map['Domain']
out_dir = args[2]
sys.stderr.write("Reading in data files\n")
all_X, all_y = load_svmlight_file(args[1])
all_X = all_X.tolil()
## Zero out domain-indicator variables (not needed for this step)
all_X[:,domain_inds[0]] = 0
all_X[:,domain_inds[1]] = 0
num_instances, num_feats = all_X.shape
sys.stderr.write("Reading in pivot files and creating pivot labels dictionary\n")
## Read pivots file into dictionary:
pivots = []
pivot_labels = {}
for line in open(pivot_file, 'r'):
pivot = int(line.strip())
pivots.append(pivot)
pivot_labels[pivot] = np.zeros((num_instances,1))
pivot_labels[pivot] += np.round(all_X[:,pivot] > 0).astype('int').toarray()
sys.stderr.write("Creating pivot matrices for each feature group\n")
#ind_groups = [None] * num_feats
for group_key,group_inds in group_map.items():
group_inds = np.array(group_inds)
group_X = scipy.sparse.lil_matrix(np.zeros((num_instances, num_feats)))
group_X += (all_X > 0).astype('int')
group_X[:, group_inds] = 0
group_X[:, pivots] = 0
for group_ind in group_inds:
if group_ind in pivots:
out_file = join(out_dir, 'pivot_%s-training.liblinear' % group_ind)
print('Writing file %s ' % out_file)
sys.stderr.write('.')
dump_svmlight_file(group_X, pivot_labels[group_ind][:,0], out_file)
sys.stderr.write('\n')
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| Python | 0 |
d981caff4b6710a3779f25fd8955fd111d9ea0cf | fix export error in dj 19 | django_tablib/datasets.py | django_tablib/datasets.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from .base import BaseDataset
class SimpleDataset(BaseDataset):
def __init__(self, queryset, headers=None, encoding='utf-8'):
self.queryset = queryset
self.encoding = encoding
if headers is None:
# We'll set the queryset to include all fields including calculated
# aggregates using the same names as a values() queryset:
v_qs = queryset.values()
headers = []
headers.extend(v_qs.query.extra_select)
try:
field_names = v_qs.query.values_select
except AttributeError:
# django < 1.9
field_names = v_qs.field_names
headers.extend(field_names)
headers.extend(v_qs.query.aggregate_select)
self.header_list = headers
self.attr_list = headers
elif isinstance(headers, dict):
self.header_dict = headers
self.header_list = self.header_dict.keys()
self.attr_list = self.header_dict.values()
elif isinstance(headers, (tuple, list)):
self.header_list = headers
self.attr_list = headers
super(SimpleDataset, self).__init__()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from .base import BaseDataset
class SimpleDataset(BaseDataset):
def __init__(self, queryset, headers=None, encoding='utf-8'):
self.queryset = queryset
self.encoding = encoding
if headers is None:
# We'll set the queryset to include all fields including calculated
# aggregates using the same names as a values() queryset:
v_qs = queryset.values()
headers = []
headers.extend(v_qs.query.extra_select)
headers.extend(v_qs.field_names)
headers.extend(v_qs.query.aggregate_select)
self.header_list = headers
self.attr_list = headers
elif isinstance(headers, dict):
self.header_dict = headers
self.header_list = self.header_dict.keys()
self.attr_list = self.header_dict.values()
elif isinstance(headers, (tuple, list)):
self.header_list = headers
self.attr_list = headers
super(SimpleDataset, self).__init__()
| Python | 0 |
a43b62c60b00233fa84c66bf4a332410903476eb | fix typo | django_fabric/fabfile.py | django_fabric/fabfile.py | # -*- coding: utf8 -*-
from fabric.api import local, run, cd
from fabric.operations import sudo
from fabric import colors
from fabric.context_managers import settings
from fabric.contrib.console import confirm
from fabric.contrib import django
from fabric.utils import abort
class App():
project_paths = {}
project_package = None
test_settings = None
def __init__(self, project_paths, project_package, test_settings=None):
self.project_paths = project_paths
self.project_package = project_package
self.test_settings = None
django.project(project_package)
def local_management_command(self, command, *args, **kwargs):
return local("venv/bin/python manage.py %s" % command, *args, **kwargs)
def run_management_command(self, instance, command):
code_dir = self.project_paths[instance]
with cd(code_dir):
return run("venv/bin/python manage.py %s" % command)
def test(self, is_deploying=True):
with settings(warn_only=True):
print(colors.yellow("Running tests, please wait!"))
if settings is None:
command = "test --settings=%s" % \
self.test_settings
else:
command = "test"
result = self.local_management_command(command, capture=True)
if result.failed:
print(colors.red("Tests failed"))
if is_deploying:
if not confirm('Do you really want to deploy?'):
abort('')
else:
print(colors.green("All tests ok"))
def run_server_updates(self, instance):
code_dir = self.project_paths[instance]
with cd(code_dir):
run("git fetch")
run("git reset --hard origin/master")
run("venv/bin/pip install -r requirements.txt")
from django.conf import settings
if 'south' in settings.INSTALLED_APPS:
self.run_management_command(instance,
"syncdb --noinput --migrate")
else:
self.run_management_command(instance, "syncdb --noinput")
if 'djangobower' in settings.INSTALLED_APPS:
self.run_management_command(instance, "bower_install")
self.run_management_command(instance, "collectstatic --noinput")
def restart_app(self, instance):
raise NotImplementedError
def deploy(self, instance):
self.run_server_updates(instance)
self.restart_app(instance)
def deploy_dev(self):
if confirm("Do you want to run tests before deploying?"):
self.test(is_deploying=True)
self.deploy('dev')
def deploy_prod(self, run_test=True):
if run_test:
self.test(is_deploying=True)
self.deploy('prod')
class UwsgiApp(App):
ini_files = {}
def __init__(self, ini_files, *args, **kwargs):
super(UwsgiApp, self).__init__(*args, **kwargs)
self.ini_files = ini_files
def restart_app(self, instance):
sudo("touch %s" % self.ini_files[instance])
| # -*- coding: utf8 -*-
from fabric.api import local, run, cd
from fabric.operations import sudo
from fabric import colors
from fabric.context_managers import settings
from fabric.contrib.console import confirm
from fabric.contrib import django
from fabric.utils import abort
class App():
project_paths = {}
project_package = None
test_settings = None
def __init__(self, project_paths, project_package, test_settings=None):
self.project_paths = project_paths
self.project_package = project_package
self.test_settings = None
django.project(project_package)
def local_management_command(self, command, *args, **kwargs):
return local("venv/bin/python manage.py %s" % command, *args, **kwargs)
def run_management_command(self, instance, command):
code_dir = self.project_paths[instance]
with cd(code_dir):
return run("venv/bin/python manage.py %s" % command)
def test(self, is_deploying=True):
with settings(warn_only=True):
print(colors.yellow("Running tests, please wait!"))
if settings is None:
command = "test --settings=%s" % \
self.test_settings
else:
command = "test"
result = self.local_management_command(command, capture=True)
if result.failed:
print(colors.red("Tests failed"))
if is_deploying:
if not confirm('Do you really want to deploy?'):
abort('')
else:
print(colors.green("All tests ok"))
def run_server_updates(self, instance):
code_dir = self.project_paths[instance]
with cd(code_dir):
run("git fetch")
run("git reset --hard origin/master")
run("venv/bin/pip install -r requirements.txt")
from django.conf import settings
if 'south' in settings.INSTALLED_APPS:
self.run_management_command(instance,
"syncdb --noinput --migrate")
else:
self.run_management_command(instance, "syncdb --noinput")
if 'djangobower' in settings.INSTALLED_APPS:
self.run_management_command(instance, "bower_install")
self.run_management_command(instance, "collectstatic --noinput")
def restart_app(self, instance):
raise NotImplementedError
def deploy(self, instance):
self.run_server_updates(instance)
self.restart_app(instance)
def deploy_dev(self):
if confirm("Do you want to run tests before deploying?"):
self.test(is_deploying=True)
self.deploy('dev')
def deploy_prod(self, run_test=True):
if run_test:
self.test(is_deploying=True)
self.deploy('prod')
class UwsgiApp(App):
ini_files = {}
def __init__(self, ini_files, *args, **kwargs):
super(UwsgiApp, self).__init__(*args, **kwargs)
self.ini_files = ini_files
def restart_app(self, instance):
sudo("touch %s" % self.ini_files['instance'])
| Python | 0.999991 |
a5add45a7f4fb1f9651e49fb5f20fe1c9953c0b8 | Assert expected dates for A1 | esios/archives.py | esios/archives.py | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
from libsaas import http, parsers, port
from libsaas.services import base
from esios.utils import translate_param, serialize_param
LIQUICOMUN_PRIORITY = [
'C7', 'A7', 'C6', 'A6', 'C5', 'A5', 'C4', 'A4', 'C3', 'A3', 'C2', 'A2',
'C1', 'A1'
]
def parser_none(body, code, headers):
return body
class Archive(base.RESTResource):
path = 'archives'
def get_filename(self):
return self.__class__.__name__
def order_key_function(self, param):
return param['name']
def validate_range(self, start, end):
return True
@base.apimethod
def get(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
assert self.validate_range(start_date, end_date), "Dates are not in the expected range for the requested version"
date_type = 'datos'
start_date = start_date.isoformat()
end_date = end_date.isoformat()
locale = 'en'
param_list = ('locale', 'start_date', 'end_date', 'date_type')
if taxonomy_terms:
param_list += ('taxonomy_terms',)
params = base.get_params(
param_list,
locals(),
translate_param=translate_param,
serialize_param=serialize_param,
)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def download(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
# gets filename from class name
filename = self.get_filename()
body = self.get(start_date, end_date, taxonomy_terms)
regs = [a for a in body['archives'] if filename in a['name']]
sorted_list = sorted(regs, key=self.order_key_function)
# gets last (better) file
url = sorted_list[0]['download']['url']
request = http.Request('GET', self.parent.get_url() + url)
return request, parser_none
class Liquicomun(Archive):
def get_filename(self):
return super(Liquicomun, self).get_filename().lower()
def order_key_function(self, param):
return LIQUICOMUN_PRIORITY.index(param['name'][:2])
def get(self, start_date, end_date, taxonomy_terms=None):
if taxonomy_terms is None:
taxonomy_terms = []
taxonomy_terms.append('Settlements')
return super(Liquicomun, self).get(start_date, end_date, taxonomy_terms)
class A1_liquicomun(Archive):
""" This month and future """
def validate_range(self, start, end):
## Validate range for A1 period (this month & future)
### toDo acotar future
today = datetime.today()
try:
first_day_current_month = datetime(today.year, today.month, 1)
assert start >= first_day_current_month
last_day_current_month = first_day_current_month + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
assert end <= last_day_current_month
except:
return False
return True
def order_key_function(self, param):
print (param)
if type(param) == list:
param = param[0]
name = (param['name'])
assert name == "A1_liquicomun"
return LIQUICOMUN_PRIORITY.index(name[:2])
class A2_liquicomun(Liquicomun):
""" Just previous month """
pass
| # -*- coding: utf-8 -*-
from datetime import datetime
from libsaas import http, parsers, port
from libsaas.services import base
from esios.utils import translate_param, serialize_param
LIQUICOMUN_PRIORITY = [
'C7', 'A7', 'C6', 'A6', 'C5', 'A5', 'C4', 'A4', 'C3', 'A3', 'C2', 'A2',
'C1', 'A1'
]
def parser_none(body, code, headers):
return body
class Archive(base.RESTResource):
path = 'archives'
def get_filename(self):
return self.__class__.__name__
def order_key_function(self, param):
return param['name']
def validate_dates(self, start, end):
return True
@base.apimethod
def get(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
assert self.validate_dates(start_date, end_date), "Dates are not in the expected range for the requested version"
date_type = 'datos'
start_date = start_date.isoformat()
end_date = end_date.isoformat()
locale = 'en'
param_list = ('locale', 'start_date', 'end_date', 'date_type')
if taxonomy_terms:
param_list += ('taxonomy_terms',)
params = base.get_params(
param_list,
locals(),
translate_param=translate_param,
serialize_param=serialize_param,
)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def download(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
# gets filename from class name
filename = self.get_filename()
body = self.get(start_date, end_date, taxonomy_terms)
regs = [a for a in body['archives'] if filename in a['name']]
sorted_list = sorted(regs, key=self.order_key_function)
# gets last (better) file
url = sorted_list[0]['download']['url']
request = http.Request('GET', self.parent.get_url() + url)
return request, parser_none
class Liquicomun(Archive):
def get_filename(self):
return super(Liquicomun, self).get_filename().lower()
def order_key_function(self, param):
return LIQUICOMUN_PRIORITY.index(param['name'][:2])
def get(self, start_date, end_date, taxonomy_terms=None):
if taxonomy_terms is None:
taxonomy_terms = []
taxonomy_terms.append('Settlements')
return super(Liquicomun, self).get(start_date, end_date, taxonomy_terms)
class A1_liquicomun(Archive):
""" This month and future """
## Validate dates in A1 period (this month & future)
def order_key_function(self, param):
print (param)
if type(param) == list:
param = param[0]
name = (param['name'])
assert name == "A1_liquicomun"
return LIQUICOMUN_PRIORITY.index(name[:2])
class A2_liquicomun(Liquicomun):
""" Just previous month """
pass
| Python | 0.999994 |
4c500ce1995da97861e37647b61efaf14c6b08d0 | Load saved RDD | code/main.py | code/main.py | from spark_model import SparkModel
import socket
from document import Document
from pyspark import SparkContext, SparkConf
from boto.s3.connection import S3Connection
from pyspark import SparkConf, SparkContext
import json
import sys
from datetime import datetime
def log_results(saved, model_type, start_time, end_time, score, n_subs, clean_n_subs):
with open('../logs/log.txt', 'a') as f:
f.write('-'*40+'\n')
duration = str(end_time - start_time).split('.')[0]
f.write('Model: %s\n' % model_type)
f.write('Number of subs: %s\n' % n_subs)
f.write('Percentage subs parsed: %.1f%%\n' % (100*float(clean_n_subs) / n_subs))
f.write('Time to run: %s\n' % duration)
f.write('Accuracy: %.2f\n' % score)
f.write('Saved.'*saved)
if __name__ == '__main__':
with open('/root/.aws/credentials.json') as f:
CREDENTIALS = json.load(f)
# sc = SparkContext()
APP_NAME = 'spark_model'
conf = (SparkConf()
.setAppName(APP_NAME)
.set("spark.executor.cores", 4)
.setMaster('spark://ec2-54-173-173-223.compute-1.amazonaws.com:7077'))
sc = SparkContext(conf=conf, pyFiles=['document.py'])
conn = S3Connection(CREDENTIALS['ACCESS_KEY'], CREDENTIALS['SECRET_ACCESS_KEY'])
model_type = sys.argv[1] if len(sys.argv) > 1 else 'naive_bayes'
start_time = datetime.now()
sm = SparkModel(sc, conn, model_type=model_type)
sm.preprocess('rdd3.pkl')
subs, clean_subs = sm.n_subs, len(sm.labeled_paths)
sm.train()
score = sm.eval_score()
saved = True
try:
sm.labeled_points.saveAsPickleFile('labeled_points.pkl')
except:
saved = False
end_time = datetime.now()
log_results(saved, model_type, start_time, end_time, score, subs, clean_subs)
sc.stop()
| from spark_model import SparkModel
import socket
from document import Document
from pyspark import SparkContext, SparkConf
from boto.s3.connection import S3Connection
from pyspark import SparkConf, SparkContext
import json
import sys
from datetime import datetime
def log_results(model_type, start_time, end_time, score, n_subs, clean_n_subs):
with open('../logs/log.txt', 'a') as f:
f.write('-'*40+'\n')
duration = str(end_time - start_time).split('.')[0]
f.write('Model: %s\n' % model_type)
f.write('Number of subs: %s\n' % n_subs)
f.write('Percentage subs parsed: %.1f%%\n' % (100*float(clean_n_subs) / n_subs))
f.write('Time to run: %s\n' % duration)
f.write('Accuracy: %.2f\n' % score)
if __name__ == '__main__':
with open('/root/.aws/credentials.json') as f:
CREDENTIALS = json.load(f)
# sc = SparkContext()
APP_NAME = 'spark_model'
conf = (SparkConf()
.setAppName(APP_NAME)
.set("spark.executor.cores", 4)
.setMaster('spark://ec2-54-173-173-223.compute-1.amazonaws.com:7077'))
sc = SparkContext(conf=conf, pyFiles=['document.py'])
conn = S3Connection(CREDENTIALS['ACCESS_KEY'], CREDENTIALS['SECRET_ACCESS_KEY'])
model_type = sys.argv[1] if len(sys.argv) > 1 else 'naive_bayes'
start_time = datetime.now()
sm = SparkModel(sc, conn, model_type=model_type)
sm.preprocess()
subs, clean_subs = sm.n_subs, len(sm.labeled_paths)
sm.train()
score = sm.eval_score()
sm.RDD.saveAsPickleFile('rdd.pkl')
end_time = datetime.now()
log_results(model_type, start_time, end_time, score, subs, clean_subs)
sc.stop()
| Python | 0.000001 |
9ec2382de5a3d5377fee03a6151e5afbf36f8e71 | add doc link | code/mode.py | code/mode.py | # an in-depth rundown of this program
# can be found at:
# https://github.com/joshhartigan/learn-programming/blob/master/Most%20Frequent%20Integer.md
def mode(array):
count = {}
for elem in array:
try:
count[elem] += 1
except (KeyError):
count[elem] = 1
# get max count
maximum = 0
modeKey = 0
for key in count.keys():
if count[key] > maximum:
maximum = count[key]
modeKey = key
return modeKey
| def mode(array):
count = {}
for elem in array:
try:
count[elem] += 1
except (KeyError):
count[elem] = 1
# get max count
maximum = 0
modeKey = 0
for key in count.keys():
if count[key] > maximum:
maximum = count[key]
modeKey = key
return modeKey
| Python | 0 |
ae2284fa85e1ef7be43792b72480018729b1c2ba | Bump PEP version for __version__ comment | fluent_blogs/__init__.py | fluent_blogs/__init__.py | # following PEP 440
__version__ = "1.0"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
| # following PEP 386
__version__ = "1.0"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
| Python | 0.000001 |
ac249c24c2f72764a8618a0f2e9cd1909d50d1d5 | Allow to specify custom options for EscapeCode preprocessor. | foliant/backends/base.py | foliant/backends/base.py | from importlib import import_module
from shutil import copytree
from datetime import date
from logging import Logger
from foliant.utils import spinner
class BaseBackend(object):
'''Base backend. All backends must inherit from this one.'''
targets = ()
required_preprocessors_before = ()
required_preprocessors_after = ()
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False):
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.working_dir = self.project_path / self.config['tmp_dir']
def get_slug(self) -> str:
'''Generate a slug from the project title and version and the current date.
Spaces in title are replaced with underscores, then the version and the current date
are appended.
'''
if 'slug' in self.config:
return self.config['slug']
components = []
components.append(self.config['title'].replace(' ', '_'))
version = self.config.get('version')
if version:
components.append(str(version))
components.append(str(date.today()))
return '-'.join(components)
def apply_preprocessor(self, preprocessor: str or dict):
'''Apply preprocessor.
:param preprocessor: Preprocessor name or a dict of the preprocessor name and its options
'''
if isinstance(preprocessor, str):
preprocessor_name, preprocessor_options = preprocessor, {}
elif isinstance(preprocessor, dict):
(preprocessor_name, preprocessor_options), = (*preprocessor.items(),)
with spinner(
f'Applying preprocessor {preprocessor_name}',
self.logger,
self.quiet,
self.debug
):
try:
preprocessor_module = import_module(f'foliant.preprocessors.{preprocessor_name}')
preprocessor_module.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
preprocessor_options
).apply()
except ModuleNotFoundError:
raise ModuleNotFoundError(f'Preprocessor {preprocessor_name} is not installed')
except Exception as exception:
raise type(exception)(
f'Failed to apply preprocessor {preprocessor_name}: {exception}'
)
def preprocess_and_make(self, target: str) -> str:
'''Apply preprocessors required by the selected backend and defined in the config file,
then run the ``make`` method.
:param target: Output format: pdf, docx, html, etc.
:returns: Result as returned by the ``make`` method
'''
src_path = self.project_path / self.config['src_dir']
copytree(src_path, self.working_dir)
common_preprocessors = (
*self.required_preprocessors_before,
*self.config.get('preprocessors', ()),
*self.required_preprocessors_after
)
if self.config.get('escape_code', False):
if isinstance(self.config['escape_code'], dict):
escapecode_preprocessor = {
'escapecode': self.config['escape_code'].get('options', {})
}
else:
escapecode_preprocessor = 'escapecode'
preprocessors = (
escapecode_preprocessor,
*common_preprocessors,
'unescapecode'
)
else:
preprocessors = (
*common_preprocessors,
'_unescape'
)
for preprocessor in preprocessors:
self.apply_preprocessor(preprocessor)
return self.make(target)
def make(self, target: str) -> str:
'''Make the output from the source. Must be implemented by every backend.
:param target: Output format: pdf, docx, html, etc.
:returns: Typically, the path to the output file, but in general any string
'''
raise NotImplementedError
| from importlib import import_module
from shutil import copytree
from datetime import date
from logging import Logger
from foliant.utils import spinner
class BaseBackend(object):
'''Base backend. All backends must inherit from this one.'''
targets = ()
required_preprocessors_before = ()
required_preprocessors_after = ()
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False):
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.working_dir = self.project_path / self.config['tmp_dir']
def get_slug(self) -> str:
'''Generate a slug from the project title and version and the current date.
Spaces in title are replaced with underscores, then the version and the current date
are appended.
'''
if 'slug' in self.config:
return self.config['slug']
components = []
components.append(self.config['title'].replace(' ', '_'))
version = self.config.get('version')
if version:
components.append(str(version))
components.append(str(date.today()))
return '-'.join(components)
def apply_preprocessor(self, preprocessor: str or dict):
'''Apply preprocessor.
:param preprocessor: Preprocessor name or a dict of the preprocessor name and its options
'''
if isinstance(preprocessor, str):
preprocessor_name, preprocessor_options = preprocessor, {}
elif isinstance(preprocessor, dict):
(preprocessor_name, preprocessor_options), = (*preprocessor.items(),)
with spinner(
f'Applying preprocessor {preprocessor_name}',
self.logger,
self.quiet,
self.debug
):
try:
preprocessor_module = import_module(f'foliant.preprocessors.{preprocessor_name}')
preprocessor_module.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
preprocessor_options
).apply()
except ModuleNotFoundError:
raise ModuleNotFoundError(f'Preprocessor {preprocessor_name} is not installed')
except Exception as exception:
raise type(exception)(
f'Failed to apply preprocessor {preprocessor_name}: {exception}'
)
def preprocess_and_make(self, target: str) -> str:
'''Apply preprocessors required by the selected backend and defined in the config file,
then run the ``make`` method.
:param target: Output format: pdf, docx, html, etc.
:returns: Result as returned by the ``make`` method
'''
src_path = self.project_path / self.config['src_dir']
copytree(src_path, self.working_dir)
common_preprocessors = (
*self.required_preprocessors_before,
*self.config.get('preprocessors', ()),
*self.required_preprocessors_after
)
if self.config.get('escape_code', False):
preprocessors = (
'escapecode',
*common_preprocessors,
'unescapecode'
)
else:
preprocessors = (
*common_preprocessors,
'_unescape'
)
for preprocessor in preprocessors:
self.apply_preprocessor(preprocessor)
return self.make(target)
def make(self, target: str) -> str:
'''Make the output from the source. Must be implemented by every backend.
:param target: Output format: pdf, docx, html, etc.
:returns: Typically, the path to the output file, but in general any string
'''
raise NotImplementedError
| Python | 0 |
ef9cd0033ccfd314592be7987c262a61d0ec2fba | fix thing I apparently never testedgit add light.py | light.py | light.py | import RPi.GPIO as GPIO
class Light:
def __init__(self, pin):
self.pin = pin
self.status = False
GPIO.setup(pin, GPIO.OUT)
def toggle(self):
self.status = not self.status
self.do()
def on(self):
self.status = True
self.do()
def off(self):
self.status = False
self.do()
def do(self):
GPIO.output(self.pin, self.status)
if self.status:
logging.debug("illuminating pin #%(pinNum)d" % {'pinNum': self.pin})
| import RPi.GPIO as GPIO
class Light:
def __init__(self, pin):
self.pin = pin
self.status = False
GPIO.setup(pin, GPIO.OUT)
def toggle(self):
self.status = not self.status
self.do()
def on(self):
self.status = True
self.do()
def off(self):
self.status = False
self.do()
def do(self):
GPIO.output(light.pin, light.status)
if light.status:
logging.debug("illuminating pin #%(pinNum)d" % {'pinNum': light.pin})
| Python | 0 |
f0d4b430b627fb9e2b18ba3f82c936698fac6430 | Update to version 1.3 | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Report CSV, for OpenERP
# Copyright (C) 2013 XCG Consulting (http://odoo.consulting)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Report CSV",
"version": "1.3",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """
Export reports as CSV:
- General Ledger
- Trial Balance
Provides the usual filters (by account, period, currency, etc).
""",
"depends": [
'account_report_webkit',
'analytic_structure',
],
"data": [
'wizard/general_ledger_csv_wizard_view.xml',
'wizard/trial_balance_csv_wizard_view.xml',
'csv_menu.xml',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Account Report CSV, for OpenERP
# Copyright (C) 2013 XCG Consulting (http://odoo.consulting)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Report CSV",
"version": "1.2",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """
Export reports as CSV:
- General Ledger
- Trial Balance
Provides the usual filters (by account, period, currency, etc).
""",
"depends": [
'account_report_webkit',
'analytic_structure',
],
"data": [
'wizard/general_ledger_csv_wizard_view.xml',
'wizard/trial_balance_csv_wizard_view.xml',
'csv_menu.xml',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
| Python | 0 |
7b176d1e775ddec384a76d6de9c121e114a8738e | load ACL | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Analytic Online, for OpenERP
# Copyright (C) 2013 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Analytic Structure",
"version" : "0.1",
"author" : "XCG Consulting",
"category": 'Dependency',
"description": """
This module allows to use several analytic dimensions through a structure related
to an object model.
==================================================================================
""",
'website': 'http://www.openerp-experts.com',
"depends" : ['base'],
"data": [
'security/ir.model.access.csv',
'analytic_dimension.xml',
],
#'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Account Analytic Online, for OpenERP
# Copyright (C) 2013 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Analytic Structure",
"version" : "0.1",
"author" : "XCG Consulting",
"category": 'Dependency',
"description": """
This module allows to use several analytic dimensions through a structure related
to an object model.
==================================================================================
""",
'website': 'http://www.openerp-experts.com',
"depends" : ['base'],
"data": [
'analytic_dimension.xml',
],
#'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.000002 |
febfb4c9a5ec5ddfe1f13067c1bc63533e58b09b | DEBUG = False | elgassia/settings.py | elgassia/settings.py | """
Django settings for elgassia project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g*!b+gf-k1j53qo9&uaoz^$j6x4g2^8pzpyf5gjqf7%tam#e@q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'main.context_processors.theme',
)
# should be changed in local_settings
ALLOWED_HOSTS = [
'*'
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jquery',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'elgassia.urls'
WSGI_APPLICATION = 'elgassia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# use dj_database_url as default default
try:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
except ImportError:
pass
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = ()
try:
from local_settings import *
except ImportError:
pass
| """
Django settings for elgassia project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g*!b+gf-k1j53qo9&uaoz^$j6x4g2^8pzpyf5gjqf7%tam#e@q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'main.context_processors.theme',
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jquery',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'elgassia.urls'
WSGI_APPLICATION = 'elgassia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
try:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
except ImportError:
pass
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| Python | 0.000001 |
08fddbdc0ac70a549bac82131771218107186def | add discription | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
{
'name': "Account Discount",
'summary': """
Use Tax model for discounts as well""",
'description': """
Odoo OpenERP Account Discount from Tax
This module adds new concept to use tax model as discount model and print both taxes and discounts separetly.
The steps to perform are very easy:
First you define new tax with negative amount (e.g Name: Discount 10%, Amount: -0.10).
Enable Is Discount Checkbox.
Then add this dicount from the Taxes/Discounts column per invoice line.
This way, you can separate and analyze discounts using different account/analytic account as well.
""",
'author': "Khaled Hamed",
'website': "http://www.grandtk.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Accounting',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'account_discount_view.xml'
],
'installable': True,
'price': 5,
'currency': 'EUR',
}
| # -*- coding: utf-8 -*-
{
'name': "Account Discount",
'summary': """
Apply Discount model to taxes""",
'description': """
The purpose is to apply discount record for the same tax model
""",
'author': "Khaled Hamed",
'website': "http://www.grandtk.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Accounting',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'account_discount_view.xml'
],
'installable': True,
'price': 5,
'currency': 'EUR',
}
| Python | 0.000952 |
6d83f2150f7c6177385b9f2d8abbe48cd2979130 | Add staleness to MonthCache Admin display | events/admin.py | events/admin.py | from django.contrib import admin
from .models import Calendar,MonthCache
# Register your models here.
@admin.register(Calendar)
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name','remote_id','css_class')
@admin.register(MonthCache)
class MonthCacheAdmin(admin.ModelAdmin):
list_display = ('calendar','month','data_cached_on','is_cache_stale')
| from django.contrib import admin
from .models import Calendar,MonthCache
# Register your models here.
@admin.register(Calendar)
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name','remote_id','css_class')
@admin.register(MonthCache)
class MonthCacheAdmin(admin.ModelAdmin):
list_display = ('calendar','month','data_cached_on')
| Python | 0 |
d308bbd0200e1b4783bf63cafda03650579b9351 | change help text | ynr/apps/official_documents/models.py | ynr/apps/official_documents/models.py | import os
from django.db import models
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
DOCUMENT_UPLOADERS_GROUP_NAME = "Document Uploaders"
def document_file_name(instance, filename):
return os.path.join(
"official_documents", str(instance.ballot.ballot_paper_id), filename
)
class OfficialDocument(TimeStampedModel):
NOMINATION_PAPER = "Nomination paper"
DOCUMENT_TYPES = (
(NOMINATION_PAPER, "Nomination paper", "Nomination papers"),
)
document_type = models.CharField(
blank=False,
choices=[(d[0], d[1]) for d in DOCUMENT_TYPES],
max_length=100,
)
uploaded_file = models.FileField(
upload_to=document_file_name, max_length=800
)
ballot = models.ForeignKey(
"candidates.Ballot", null=False, on_delete=models.CASCADE
)
source_url = models.URLField(
help_text="The URL of this document", max_length=1000
)
relevant_pages = models.CharField(
"The pages containing information about this ballot",
max_length=50,
default="",
)
class Meta:
get_latest_by = "modified"
def __str__(self):
return "{} ({})".format(self.ballot.ballot_paper_id, self.source_url)
def get_absolute_url(self):
return reverse(
"ballot_paper_sopn",
kwargs={"ballot_id": self.ballot.ballot_paper_id},
)
@property
def locked(self):
"""
Is this post election locked?
"""
return self.ballot.candidates_locked
@property
def lock_suggested(self):
"""
Is there a suggested lock for this document?
"""
return self.ballot.suggestedpostlock_set.exists()
def get_pages(self):
if self.relevant_pages and not self.relevant_pages == "all":
pages = self.relevant_pages.split(",")
return sorted(int(p) for p in pages)
@property
def first_page_number(self):
if self.get_pages():
return self.get_pages()[0]
@property
def last_page_number(self):
if self.get_pages():
return self.get_pages()[-1]
| import os
from django.db import models
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
DOCUMENT_UPLOADERS_GROUP_NAME = "Document Uploaders"
def document_file_name(instance, filename):
return os.path.join(
"official_documents", str(instance.ballot.ballot_paper_id), filename
)
class OfficialDocument(TimeStampedModel):
NOMINATION_PAPER = "Nomination paper"
DOCUMENT_TYPES = (
(NOMINATION_PAPER, "Nomination paper", "Nomination papers"),
)
document_type = models.CharField(
blank=False,
choices=[(d[0], d[1]) for d in DOCUMENT_TYPES],
max_length=100,
)
uploaded_file = models.FileField(
upload_to=document_file_name, max_length=800
)
ballot = models.ForeignKey(
"candidates.Ballot", null=False, on_delete=models.CASCADE
)
source_url = models.URLField(
help_text="The page that links to this document", max_length=1000
)
relevant_pages = models.CharField(
"The pages containing information about this ballot",
max_length=50,
default="",
)
class Meta:
get_latest_by = "modified"
def __str__(self):
return "{} ({})".format(self.ballot.ballot_paper_id, self.source_url)
def get_absolute_url(self):
return reverse(
"ballot_paper_sopn",
kwargs={"ballot_id": self.ballot.ballot_paper_id},
)
@property
def locked(self):
"""
Is this post election locked?
"""
return self.ballot.candidates_locked
@property
def lock_suggested(self):
"""
Is there a suggested lock for this document?
"""
return self.ballot.suggestedpostlock_set.exists()
def get_pages(self):
if self.relevant_pages and not self.relevant_pages == "all":
pages = self.relevant_pages.split(",")
return sorted(int(p) for p in pages)
@property
def first_page_number(self):
if self.get_pages():
return self.get_pages()[0]
@property
def last_page_number(self):
if self.get_pages():
return self.get_pages()[-1]
| Python | 0.000029 |
cedae39716587fcc0459a05e74acc43b190d7457 | split download | example-era5.py | example-era5.py | #!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client()
r = c.retrieve("reanalysis-era5-pressure-levels",
{
"variable": "temperature",
"pressure_level": "250",
"product_type": "reanalysis",
"date": "2017-12-01/2017-12-31",
"time": "12:00",
"format": "grib"
})
r.download("dowload.grib")
| #!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client()
r = c.retrieve("reanalysis-era5-pressure-levels",
{
"variable": "temperature",
"pressure_level": "250",
"product_type": "reanalysis",
"date": "2017-12-01/2017-12-31",
"time": "12:00",
"format": "grib"
})
r.download("dowload.grib")
print(r)
r.delete()
| Python | 0.000005 |
cdf7dfc01cca8472c517d2a93d89e97e1f838103 | Add metanode to degree_df functionality | hetio/stats.py | hetio/stats.py | import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((node.identifier, node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree'])
return df.sort_values(['node_name', 'metaedge'])
def get_metanode_to_degree_df(graph):
"""
Return a dictionary of metanode to degree_df, where degree_df is a
wide-format dataframe of node degrees.
"""
metanode_to_degree_df = dict()
for metanode in graph.metagraph.get_nodes():
degree_df = get_degrees_for_metanode(graph, metanode)
degree_df = pandas.pivot_table(degree_df, values='degree',
index=['node_id', 'node_name'], columns='metaedge').reset_index()
metanode_to_degree_df[metanode] = degree_df
return metanode_to_degree_df
def degrees_to_excel(graph, path):
"""
Write node degrees to a multisheet excel spreadsheet. Path should end in
a valid excel extension that `pandas.ExcelWriter` can detect, such as
`.xlsx`.
"""
metanode_to_degree_df = get_metanode_to_degree_df(graph)
writer = pandas.ExcelWriter(path)
for metanode, degree_df in metanode_to_degree_df.items():
degree_df.to_excel(writer, sheet_name=str(metanode), index=False)
writer.close()
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height)
grid.map(seaborn.distplot, 'degree', kde=False)
grid.set_titles('{col_name}')
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams['figure.max_open_warning']
matplotlib.rcParams['figure.max_open_warning'] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format='pdf')
pdf_pages.close()
matplotlib.rcParams['figure.max_open_warning'] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series['metanode'] = metanode
series['abbreviation'] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges])
series['nodes'] = len(nodes)
series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes)
rows.append(series)
metanode_df = pandas.DataFrame(rows).sort_values('metanode')
return metanode_df
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges(exclude_inverts=True).items():
series = pandas.Series()
series['metaedge'] = str(metaedge)
series['abbreviation'] = metaedge.get_abbrev()
series['edges'] = len(edges)
series['source_nodes'] = len(set(edge.source for edge in edges))
series['target_nodes'] = len(set(edge.target for edge in edges))
rows.append(series)
metaedge_df = pandas.DataFrame(rows).sort_values('metaedge')
return metaedge_df
| import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((str(node), node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree'])
return df.sort_values(['node_name', 'metaedge'])
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height)
grid.map(seaborn.distplot, 'degree', kde=False)
grid.set_titles('{col_name}')
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams['figure.max_open_warning']
matplotlib.rcParams['figure.max_open_warning'] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format='pdf')
pdf_pages.close()
matplotlib.rcParams['figure.max_open_warning'] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series['metanode'] = metanode
series['abbreviation'] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges])
series['nodes'] = len(nodes)
series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes)
rows.append(series)
metanode_df = pandas.DataFrame(rows).sort_values('metanode')
return metanode_df
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges(exclude_inverts=True).items():
series = pandas.Series()
series['metaedge'] = str(metaedge)
series['abbreviation'] = metaedge.get_abbrev()
series['edges'] = len(edges)
series['source_nodes'] = len(set(edge.source for edge in edges))
series['target_nodes'] = len(set(edge.target for edge in edges))
rows.append(series)
metaedge_df = pandas.DataFrame(rows).sort_values('metaedge')
return metaedge_df
| Python | 0 |
3846907435da720c075ab89579b970da5019b49f | Add Tapastic/AmpleTime | dosagelib/plugins/tapastic.py | dosagelib/plugins/tapastic.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('AmpleTime', 'Ample-Time'),
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| Python | 0.000001 |
62314491b148c51e7c27e13aded283a0622c47f4 | improve h5py config check | hpat/config.py | hpat/config.py | try:
from .io import _hdf5
import h5py
# TODO: make sure h5py/hdf5 supports parallel
except ImportError:
_has_h5py = False
else:
_has_h5py = True
try:
import pyarrow
except ImportError:
_has_pyarrow = False
else:
_has_pyarrow = True
try:
from . import ros_cpp
except ImportError:
_has_ros = False
else:
_has_ros = True
try:
from . import cv_wrapper
except ImportError:
_has_opencv = False
else:
_has_opencv = True
import hpat.cv_ext
try:
from . import hxe_ext
except ImportError:
_has_xenon = False
else:
_has_xenon = True
import hpat.io.xenon_ext
| try:
from .io import _hdf5
except ImportError:
_has_h5py = False
else:
_has_h5py = True
try:
import pyarrow
except ImportError:
_has_pyarrow = False
else:
_has_pyarrow = True
try:
from . import ros_cpp
except ImportError:
_has_ros = False
else:
_has_ros = True
try:
from . import cv_wrapper
except ImportError:
_has_opencv = False
else:
_has_opencv = True
import hpat.cv_ext
try:
from . import hxe_ext
except ImportError:
_has_xenon = False
else:
_has_xenon = True
import hpat.io.xenon_ext
| Python | 0 |
75729e3e06c560892f0bf285fdd8a15f9f58b7d5 | Delete local file with no signature, without trying reget | lib/oelite/fetch/url.py | lib/oelite/fetch/url.py | import oelite.fetch
import bb.utils
import os
import urlgrabber
import hashlib
class UrlFetcher():
SUPPORTED_SCHEMES = ("http", "https", "ftp")
def __init__(self, uri, d):
if not uri.scheme in self.SUPPORTED_SCHEMES:
raise Exception(
"Scheme %s not supported by oelite.fetch.UrlFetcher"%(
uri.scheme))
self.url = "%s://%s"%(uri.scheme, uri.location)
try:
isubdir = uri.params["isubdir"]
except KeyError:
isubdir = uri.isubdir
self.localname = os.path.basename(uri.location)
self.localpath = os.path.join(uri.ingredients, isubdir, self.localname)
self.signatures = d.get("FILE") + ".sig"
self.uri = uri
self.fetch_signatures = d["__fetch_signatures"]
return
def signature(self):
try:
self._signature = self.fetch_signatures[self.localname]
return self._signature
except KeyError:
raise oelite.fetch.NoSignature(self.uri, "signature unknown")
def grab(self, url, reget=None):
print "grabbing %s"%(url)
if reget:
try:
return urlgrabber.urlgrab(url, self.localpath, reget=reget)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
if not (e[0] == 14 and e[1].startswith("HTTP Error 416")):
return None
try:
return urlgrabber.urlgrab(url, self.localpath)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
return None
def fetch(self):
localdir = os.path.dirname(self.localpath)
if not os.path.exists(localdir):
bb.utils.mkdirhier(localdir)
url = self.url
while url:
if os.path.exists(self.localpath):
if "_signature" in dir(self):
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
if self._signature == m.hexdigest():
return True
else:
print "Expected signature: %s"%self._signature
print "Obtained signature: %s"%m.hexdigest()
raise Exception("Signature mismatch")
os.unlink(self.localpath)
f = self.grab(url)
if f:
break
url = self.uri.alternative_mirror()
if not f or f != self.localpath:
return False
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
signature = m.hexdigest()
if not "_signature" in dir(self):
return (self.localname, signature)
return signature == self._signature
| import oelite.fetch
import bb.utils
import os
import urlgrabber
import hashlib
class UrlFetcher():
SUPPORTED_SCHEMES = ("http", "https", "ftp")
def __init__(self, uri, d):
if not uri.scheme in self.SUPPORTED_SCHEMES:
raise Exception(
"Scheme %s not supported by oelite.fetch.UrlFetcher"%(
uri.scheme))
self.url = "%s://%s"%(uri.scheme, uri.location)
try:
isubdir = uri.params["isubdir"]
except KeyError:
isubdir = uri.isubdir
self.localname = os.path.basename(uri.location)
self.localpath = os.path.join(uri.ingredients, isubdir, self.localname)
self.signatures = d.get("FILE") + ".sig"
self.uri = uri
self.fetch_signatures = d["__fetch_signatures"]
return
def signature(self):
try:
self._signature = self.fetch_signatures[self.localname]
return self._signature
except KeyError:
raise oelite.fetch.NoSignature(self.uri, "signature unknown")
def grab(self, url, reget=None):
print "grabbing %s"%(url)
if reget:
try:
return urlgrabber.urlgrab(url, self.localpath, reget=reget)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
if not (e[0] == 14 and e[1].startswith("HTTP Error 416")):
return None
try:
return urlgrabber.urlgrab(url, self.localpath)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
return None
def fetch(self):
localdir = os.path.dirname(self.localpath)
if not os.path.exists(localdir):
bb.utils.mkdirhier(localdir)
url = self.url
while url:
if os.path.exists(self.localpath):
if "_signature" in dir(self):
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
if self._signature == m.hexdigest():
return True
else:
print "Expected signature: %s"%self._signature
print "Obtained signature: %s"%m.hexdigest()
raise Exception("Signature mismatch")
f = self.grab(url, reget="simple")
else:
f = self.grab(url)
if f:
break
url = self.uri.alternative_mirror()
if not f or f != self.localpath:
return False
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
signature = m.hexdigest()
if not "_signature" in dir(self):
return (self.localname, signature)
return signature == self._signature
| Python | 0 |
ae7a5bef1e3ee0216651dc4aeef3abcbab3cf76e | update code | Strings/alternating-characters.py | Strings/alternating-characters.py | # Alternating Characters
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/alternating-characters/problem
# Time complexity: O(n)
def alternatingCharacters(s):
sumChars = 0
for i in range(len(s)):
if i == 0 or tempChar != s[i]:
tempChar = s[i]
continue
if tempChar == s[i]:
sumChars += 1
return sumChars
q = int(input().strip())
for a0 in range(q):
print(alternatingCharacters(input().strip())) | # Alternating Characters
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/alternating-characters/problem
def alternatingCharacters(s):
sumChars = 0
for i in range(len(s)):
if i == 0 or tempChar != s[i]:
tempChar = s[i]
continue
if tempChar == s[i]:
sumChars += 1
return sumChars
q = int(input().strip())
for a0 in range(q):
print(alternatingCharacters(input().strip())) | Python | 0 |
44fbc835354b7612d5d203250255a323c8759b64 | fix log %(levelname)-8s to align | torequests/logs.py | torequests/logs.py | #! coding:utf-8
import logging
dummy_logger = logging.getLogger('torequests.dummy')
main_logger = logging.getLogger('torequests.main')
def init_logger(name='', handler_path_levels=None,
level=logging.INFO, formatter=None,
formatter_str=None, datefmt="%Y-%m-%d %H:%M:%S"):
"""Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level : the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-8s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-8s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
"""
levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO,
'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = '%(levelname)-8s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s'
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(
name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [['', 'INFO']]
# ---------------------------------------
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(
path) if path else logging.StreamHandler()
handler.setLevel(levels.get(handler_level.upper(), 1) if isinstance(
handler_level, str) else handler_level)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| #! coding:utf-8
import logging
dummy_logger = logging.getLogger('torequests.dummy')
main_logger = logging.getLogger('torequests.main')
def init_logger(name='', handler_path_levels=None,
level=logging.INFO, formatter=None,
formatter_str=None, datefmt="%Y-%m-%d %H:%M:%S"):
"""Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level : the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
"""
levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO,
'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s'
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(
name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [['', 'INFO']]
# ---------------------------------------
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(
path) if path else logging.StreamHandler()
handler.setLevel(levels.get(handler_level.upper(), 1) if isinstance(
handler_level, str) else handler_level)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| Python | 0.000001 |
db47a651e380709c33c54c86f9a3861187772406 | Add metrics to MNIST | eva/examples/mnist.py | eva/examples/mnist.py | #%% Setup.
from collections import namedtuple
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Nadam
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils
from keras.utils.visualize_util import plot
from keras import backend as K
from eva.models.pixelcnn import PixelCNN
Data = namedtuple('Data', 'x y')
nb_classes = 10
img_rows, img_cols = 28, 28
nb_filters = 128
blocks = 4
batch_size = 128
nb_epoch = 4
def clean_data(x, y, rows, cols):
if K.image_dim_ordering() == 'th':
x = x.reshape(x.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
x = x.reshape(x.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
x = x.astype('float32') / 255
y = np_utils.to_categorical(y, nb_classes)
# New way
x[np.where(x > 0)] = 1
print('X shape:', x.shape)
print(x.shape[0], 'samples')
return x, y
def get_data(rows, cols):
return [Data(*clean_data(*data, rows, cols)) for data in mnist.load_data()]
def get_input(rows, cols):
return (1, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, 1)
train, test = get_data(img_rows, img_cols)
input_shape = get_input(img_rows, img_cols)
input_dims = np.prod(input_shape)
model = PixelCNN(input_shape, nb_filters, blocks)
model.summary()
plot(model)
#%% Train.
model.fit(train.x, train.x, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test.x, test.x))
score = model.evaluate(test.x, test.x, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#%% Save model.
model.save('pixelcnn.h5')
| #%% Setup.
from collections import namedtuple
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Nadam
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils
from keras.utils.visualize_util import plot
from keras import backend as K
from eva.models.pixelcnn import PixelCNN
Data = namedtuple('Data', 'x y')
nb_classes = 10
img_rows, img_cols = 28, 28
nb_filters = 128
blocks = 4
batch_size = 128
nb_epoch = 4
def clean_data(x, y, rows, cols):
if K.image_dim_ordering() == 'th':
x = x.reshape(x.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
x = x.reshape(x.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
x = x.astype('float32') / 255
y = np_utils.to_categorical(y, nb_classes)
# New way
x[np.where(x > 0)] = 1
print('X shape:', x.shape)
print(x.shape[0], 'samples')
return x, y
def get_data(rows, cols):
return [Data(*clean_data(*data, rows, cols)) for data in mnist.load_data()]
def get_input(rows, cols):
return (1, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, 1)
train, test = get_data(img_rows, img_cols)
input_shape = get_input(img_rows, img_cols)
input_dims = np.prod(input_shape)
model = PixelCNN(input_shape, nb_filters, blocks)
model.summary()
plot(model)
#%% Train.
model.fit(train.x, train.x, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test.x, test.x))
score = model.evaluate(test.x, test.x, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#%% Save model.
model.save('pixelcnn.h5') | Python | 0.000019 |
7dc20e510a1e8b93d470c8d26c530a0ce7affefb | format indent. (#3) | flasklogin.py | flasklogin.py | from flask import Flask , request , abort , redirect , Response ,url_for
from flask.ext.login import LoginManager , login_required , UserMixin , login_user
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret_key'
login_manager = LoginManager()
login_manager.login_view = "login"
login_manager.init_app(app)
class User(UserMixin):
def __init__(self , username , password , id , active=True):
self.id = id
self.username = username
self.password = password
self.active = active
def get_id(self):
return self.id
def is_active(self):
return self.active
def get_auth_token(self):
return make_secure_token(self.username , key='secret_key')
class UsersRepository:
def __init__(self):
self.users = dict()
self.users_id_dict = dict()
self.identifier = 0
def save_user(self , user):
self.users_id_dict.setdefault(user.id , user)
self.users.setdefault(user.username , user)
def get_user(self , username):
return self.users.get(username)
def get_user_by_id(self , userid):
return self.users.get(userid)
def next_index(self):
self.identifier +=1
return self.identifier
users_repository = UsersRepository()
@app.route('/')
@app.route('/hello')
def index():
return "<h2>Hello World</h2>"
@app.route('/home')
@login_required
def home():
return "<h1>User Home</h1>"
@app.route('/login' , methods=['GET' , 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
registeredUser = users_repository.get_user(username)
print('Users '+ str(users_repository.users))
print('Register user %s , password %s' % (registeredUser.username, registeredUser.password))
if registeredUser != None and registeredUser.password == password:
print('Logged in..')
login_user(registeredUser)
return redirect(url_for('home'))
else:
return abort(401)
else:
return Response('''
<form action="" method="post">
<p><input type=text name=username>
<p><input type=password name=password>
<p><input type=submit value=Login>
</form>
''')
@app.route('/register' , methods = ['GET' , 'POST'])
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
new_user = User(username , password , users_repository.next_index())
users_repository.save_user(new_user)
return Response("Registered Successfully")
else:
return Response('''
<form action="" method="post">
<p><input type=text name=username placeholder="Enter username">
<p><input type=password name=password placeholder="Enter password">
<p><input type=submit value=Login>
</form>
''')
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return users_repository.get_user_by_id(userid)
if __name__ == '__main__':
app.run(host='192.168.218.100', port=4005, debug =True)
| from flask import Flask , request , abort , redirect , Response ,url_for
from flask.ext.login import LoginManager , login_required , UserMixin , login_user
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret_key'
login_manager = LoginManager()
login_manager.login_view = "login"
login_manager.init_app(app)
class User(UserMixin):
def __init__(self , username , password , id , active=True):
self.id = id
self.username = username
self.password = password
self.active = active
def get_id(self):
return self.id
def is_active(self):
return self.active
def get_auth_token(self):
return make_secure_token(self.username , key='secret_key')
class UsersRepository:
def __init__(self):
self.users = dict()
self.users_id_dict = dict()
self.identifier = 0
def save_user(self , user):
self.users_id_dict.setdefault(user.id , user)
self.users.setdefault(user.username , user)
def get_user(self , username):
return self.users.get(username)
def get_user_by_id(self , userid):
return self.users.get(userid)
def next_index(self):
self.identifier +=1
return self.identifier
users_repository = UsersRepository()
@app.route('/')
@app.route('/hello')
def index():
return "<h2>Hello World</h2>"
@app.route('/home')
@login_required
def home():
return "<h1>User Home</h1>"
@app.route('/login' , methods=['GET' , 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
registeredUser = users_repository.get_user(username)
print('Users '+ str(users_repository.users))
print('Register user %s , password %s' % (registeredUser.username, registeredUser.password))
if registeredUser != None and registeredUser.password == password:
print('Logged in..')
login_user(registeredUser)
return redirect(url_for('home'))
else:
return abort(401)
else:
return Response('''
<form action="" method="post">
<p><input type=text name=username>
<p><input type=password name=password>
<p><input type=submit value=Login>
</form>
''')
@app.route('/register' , methods = ['GET' , 'POST'])
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
new_user = User(username , password , users_repository.next_index())
users_repository.save_user(new_user)
return Response("Registered Successfully")
else:
return Response('''
<form action="" method="post">
<p><input type=text name=username placeholder="Enter username">
<p><input type=password name=password placeholder="Enter password">
<p><input type=submit value=Login>
</form>
''')
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return users_repository.get_user_by_id(userid)
if __name__ == '__main__':
app.run( debug =True)
| Python | 0.000001 |
df12bb251bbb6ab1b7efc1e955eb87faa73c6c15 | Add message for correct orfik answer | events/orfik/views.py | events/orfik/views.py | from django.shortcuts import render, redirect, get_object_or_404
from events.orfik import models
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from general import models as generalmodels
from django.contrib import messages
def make_player(request):
try:
player = request.user.player
except:
user = request.user
p = models.Player()
p.nickname = user.username
p.user = request.user
p.save()
def check_end():
return generalmodels.Variable.objects.get(name='orfikend').time <= timezone.now()
def check_start():
return generalmodels.Variable.objects.get(name='orfikstart').time <= timezone.now()
def home(request):
data = {}
template = 'orfik/home.html'
data['starttime'] = generalmodels.Variable.objects.get(name='orfikstart').time
data['started'] = check_start()
if request.user.is_authenticated():
make_player(request)
data['new_nick_form'] = models.NickForm()
ended = check_end()
# Has orfik ended?
if ended:
data['endtime'] = ended
data['winner'] = models.Player.objects.all().order_by('-max_level','last_solve')[0] == request.user.player
return render(request, template, data)
# If it has not ended, has it started?
if data['started']:
return redirect('events:orfik:question', q_no=0)
# It has not started, get the available questions
data['questions'] = models.Question.objects.filter(number__lte=request.user.player.max_level).order_by('number')
if request.method == 'POST':
form = models.Nickform(request.POST)
if form.is_valid():
form.save()
return render(request, template, data)
def instructions(request):
return render(request, 'orfik/instructions.html')
def leader(request):
data = {}
template = 'orfik/leader.html'
endtime = generalmodels.Variable.objects.get(name='orfikend').time
data['players'] = models.Player.objects.all().order_by('-max_level','last_solve')
if endtime <= timezone.now():
data['winner'] = data['players'][0]
return render(request, template, data)
@login_required
def question(request, q_no):
make_player(request)
starttime = generalmodels.Variable.objects.get(name='orfikstart').time
player = request.user.player
# Check if orfik has started
if starttime > timezone.now():
return redirect('events:orfik:home')
q_no = int(q_no)
# If player is not on question
if player.max_level != q_no:
return redirect('events:orfik:question', q_no=player.max_level)
data = {}
template = 'orfik/question.html'
question = get_object_or_404(models.Question,number=q_no)
data['question'] = question
if request.method == 'GET':
data['form'] = models.AnswerForm()
if request.method == 'POST':
form = models.AnswerForm(request.POST)
if question.number == player.max_level: # This is his first potential
# Correct answer
if form.is_valid():
attempt = form.save(commit=False)
attempt.player = player
attempt.question = question
attempt.save()
if attempt.is_correct():
player.last_solve = timezone.now()
player.max_level += 1
player.save()
messages.info(request, 'Corrent answer!')
return redirect('events:orfik:question', q_no=question.number+1)
else:
messages.info(request, 'Wrong answer. Try again!')
return redirect('events:orfik:question', q_no=question.number)
else:
data['form'] = form
return render(request, template, data)
| from django.shortcuts import render, redirect, get_object_or_404
from events.orfik import models
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from general import models as generalmodels
from django.contrib import messages
def make_player(request):
try:
player = request.user.player
except:
user = request.user
p = models.Player()
p.nickname = user.username
p.user = request.user
p.save()
def check_end():
return generalmodels.Variable.objects.get(name='orfikend').time <= timezone.now()
def check_start():
return generalmodels.Variable.objects.get(name='orfikstart').time <= timezone.now()
def home(request):
data = {}
template = 'orfik/home.html'
data['starttime'] = generalmodels.Variable.objects.get(name='orfikstart').time
data['started'] = check_start()
if request.user.is_authenticated():
make_player(request)
data['new_nick_form'] = models.NickForm()
ended = check_end()
# Has orfik ended?
if ended:
data['endtime'] = ended
data['winner'] = models.Player.objects.all().order_by('-max_level','last_solve')[0] == request.user.player
return render(request, template, data)
# If it has not ended, has it started?
if data['started']:
return redirect('events:orfik:question', q_no=0)
# It has not started, get the available questions
data['questions'] = models.Question.objects.filter(number__lte=request.user.player.max_level).order_by('number')
if request.method == 'POST':
form = models.Nickform(request.POST)
if form.is_valid():
form.save()
return render(request, template, data)
def instructions(request):
return render(request, 'orfik/instructions.html')
def leader(request):
data = {}
template = 'orfik/leader.html'
endtime = generalmodels.Variable.objects.get(name='orfikend').time
data['players'] = models.Player.objects.all().order_by('-max_level','last_solve')
if endtime <= timezone.now():
data['winner'] = data['players'][0]
return render(request, template, data)
@login_required
def question(request, q_no):
make_player(request)
starttime = generalmodels.Variable.objects.get(name='orfikstart').time
player = request.user.player
# Check if orfik has started
if starttime > timezone.now():
return redirect('events:orfik:home')
q_no = int(q_no)
# If player is not on question
if player.max_level != q_no:
return redirect('events:orfik:question', q_no=player.max_level)
data = {}
template = 'orfik/question.html'
question = get_object_or_404(models.Question,number=q_no)
data['question'] = question
if request.method == 'GET':
data['form'] = models.AnswerForm()
if request.method == 'POST':
form = models.AnswerForm(request.POST)
if question.number == player.max_level: # This is his first potential
# Correct answer
if form.is_valid():
attempt = form.save(commit=False)
attempt.player = player
attempt.question = question
attempt.save()
if attempt.is_correct():
player.last_solve = timezone.now()
player.max_level += 1
player.save()
return redirect('events:orfik:question', q_no=question.number+1)
else:
messages.info(request, 'Wrong answer. Try again!')
return redirect('events:orfik:question', q_no=question.number)
else:
data['form'] = form
return render(request, template, data)
| Python | 0.000093 |
f6672fd0074052ba71bc1266590f0ef0db8f14d0 | fix import. | blackgate/cli.py | blackgate/cli.py | # -*- coding: utf-8 -*-
import click
from blackgate.core import component
from blackgate.server import run
@click.group()
def main():
# README CONFIG
component.install_from_config(config)
@main.command()
def start():
run(config.get('port', 9654))
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
import click
from blackgate.core import component
from blackgate.server importrun
@click.group()
def main():
# README CONFIG
component.install_from_config(config)
@main.command()
def start():
run(config.get('port', 9654))
if __name__ == '__main__':
main()
| Python | 0 |
1c9a16a0896cd39aca2b44c0ef5c4eb155d1dab7 | Add a test for 2 framgnets case. | server/kcaa/manipulator_util_test.py | server/kcaa/manipulator_util_test.py | #!/usr/bin/env python
import pytest
import manipulator_util
class TestManipulatorManager(object):
def pytest_funcarg__manager(self, request):
return manipulator_util.ManipulatorManager(None, {}, 0)
def test_in_schedule_fragment(self):
in_schedule_fragment = (
manipulator_util.ManipulatorManager.in_schedule_fragment)
assert in_schedule_fragment(0, [0, 3600])
assert in_schedule_fragment(1800, [0, 3600])
assert in_schedule_fragment(3599, [0, 3600])
assert not in_schedule_fragment(3600, [0, 3600])
assert not in_schedule_fragment(5400, [0, 3600])
def test_are_auto_manipulator_scheduled_disabled(self, manager):
manager.set_auto_manipulator_schedules(False, [[0, 3600]])
assert not manager.are_auto_manipulator_scheduled(0)
def test_are_auto_manipulator_scheduled_one_fragment(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600]])
assert manager.are_auto_manipulator_scheduled(0)
assert manager.are_auto_manipulator_scheduled(1800)
assert manager.are_auto_manipulator_scheduled(3599)
assert not manager.are_auto_manipulator_scheduled(3600)
assert not manager.are_auto_manipulator_scheduled(5400)
def test_are_auto_manipulator_scheduled_two_fragments(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600],
[7200, 10800]])
assert manager.are_auto_manipulator_scheduled(0)
assert not manager.are_auto_manipulator_scheduled(3600)
assert manager.are_auto_manipulator_scheduled(7200)
assert manager.are_auto_manipulator_scheduled(10799)
assert not manager.are_auto_manipulator_scheduled(10800)
assert manager.are_auto_manipulator_scheduled(0)
def main():
import doctest
doctest.testmod(manipulator_util)
pytest.main(args=[__file__.replace('.pyc', '.py')])
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import pytest
import manipulator_util
class TestManipulatorManager(object):
def pytest_funcarg__manager(self, request):
return manipulator_util.ManipulatorManager(None, {}, 0)
def test_in_schedule_fragment(self):
in_schedule_fragment = (
manipulator_util.ManipulatorManager.in_schedule_fragment)
assert in_schedule_fragment(0, [0, 3600])
assert in_schedule_fragment(1800, [0, 3600])
assert in_schedule_fragment(3599, [0, 3600])
assert not in_schedule_fragment(3600, [0, 3600])
assert not in_schedule_fragment(5400, [0, 3600])
def test_are_auto_manipulator_scheduled_disabled(self, manager):
manager.set_auto_manipulator_schedules(False, [[0, 3600]])
assert not manager.are_auto_manipulator_scheduled(0)
def test_are_auto_manipulator_scheduled_one_fragment(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600]])
assert manager.are_auto_manipulator_scheduled(0)
assert manager.are_auto_manipulator_scheduled(1800)
assert manager.are_auto_manipulator_scheduled(3599)
assert not manager.are_auto_manipulator_scheduled(3600)
assert not manager.are_auto_manipulator_scheduled(5400)
def main():
import doctest
doctest.testmod(manipulator_util)
pytest.main(args=[__file__.replace('.pyc', '.py')])
if __name__ == '__main__':
main()
| Python | 0.000001 |
3bd383a15902d8367097a4348de64c929732767b | Fix Test | tests/NewsParser_Test.py | tests/NewsParser_Test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: balicanta
# @Date: 2014-10-25 09:57:26
# @Last Modified by: balicanta
# @Last Modified time: 2014-10-27 23:44:57
from NewsParser import NewsParser
from requests.utils import get_encodings_from_content
test_fixtures = [
{"url": "http://udn.com/NEWS/NATIONAL/NAT3/9017464.shtml",
"title": "聯合報直擊", "author": "呂思逸","content":"是由陳老闆批了棉花棒"},
{"url": "http://world.yam.com/post.php?id=2732",
"title": "海潮人潮兇", "author":"", "content": "這座遊人如織的水都"},
{"url": "http://news.ltn.com.tw/news/business/breakingnews/1142153",
"title": "魏家退出101", "author":"", "content": "財政部次長吳當傑今天傍晚表示"}
]
def test_parser():
for test_fixture in test_fixtures:
parser = NewsParser(test_fixture['url'])
title = parser.getTitle()
author = parser.getAuthor()
content = parser.getContent()
assert test_fixture['title'] in title.encode('utf-8')
assert test_fixture['author'] in author.encode('utf-8')
assert test_fixture['content'] in content.encode('utf-8')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: balicanta
# @Date: 2014-10-25 09:57:26
# @Last Modified by: bustta
# @Last Modified time: 2014-10-27 23:22:08
from NewsParser import NewsParser
from requests.utils import get_encodings_from_content
test_fixtures = [
{"url": "http://udn.com/NEWS/NATIONAL/NAT3/9017464.shtml",
"title": "聯合報直擊", "author": "呂思逸"},
{"url": "http://world.yam.com/post.php?id=2732",
"title": "海潮人潮兇", "content": "這座遊人如織的水都"},
{"url": "http://news.ltn.com.tw/news/business/breakingnews/1142153",
"title": "魏家退出101", "content": "財政部次長吳當傑今天傍晚表示"}
]
def test_parser():
for test_fixture in test_fixtures:
parser = NewsParser(test_fixture['url'])
title = parser.getTitle()
author = parser.getAuthor()
content = parser.getContent()
assert test_fixture['title'] in title.encode('utf-8')
assert test_fixture['author'] in author.encode('utf-8')
assert test_fixture['content'] in content.encode('utf-8')
| Python | 0.000001 |
97478d2bb38b94a5effbbc74db3ae1a0360f9a19 | remove vm.id usage in exeption message | vmpool/endpoint.py | vmpool/endpoint.py | # coding: utf-8
from core.utils import generator_wait_for
from core.logger import log_pool
from core.config import config
from core.exceptions import PlatformException, NoSuchEndpoint, \
CreationException
from vmpool.virtual_machines_pool import pool
from vmpool.platforms import Platforms
from vmpool.vmqueue import q
def get_vm_from_pool(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
log_pool.debug('Got vm with params: %s' % vm.info)
return vm
else:
raise NoSuchEndpoint('No such endpoint: %s' % endpoint_name)
def new_vm(desired_caps):
platform = desired_caps.get("platform", None)
if hasattr(config, "PLATFORM") and config.PLATFORM:
log_pool.info(
'Using %s. Desired platform %s has been ignored.' %
(config.PLATFORM, platform)
)
platform = config.PLATFORM
desired_caps["platform"] = platform
if isinstance(platform, unicode):
platform = platform.encode('utf-8')
if not platform:
raise CreationException(
'Platform parameter for new endpoint not found in dc'
)
if not Platforms.check_platform(platform):
raise PlatformException('No such platform %s' % platform)
delayed_vm = q.enqueue(desired_caps)
yield delayed_vm
for condition in generator_wait_for(
lambda: delayed_vm.vm, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm
if not delayed_vm.vm:
raise CreationException(
"Timeout while waiting for vm with platform %s" % platform
)
yield delayed_vm.vm
for condition in generator_wait_for(
lambda: delayed_vm.vm.ready, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm.vm
if not delayed_vm.vm.ready:
raise CreationException(
'Timeout while building vm %s (platform: %s)' %
(delayed_vm.vm.name, platform)
)
log_pool.info('Got vm for request with params: %s' % delayed_vm.vm.info)
yield delayed_vm.vm
def delete_vm(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
if vm.is_preloaded():
vm.rebuild()
else:
vm.delete()
msg = "Vm %s has been deleted" % endpoint_name
log_pool.info(msg)
else:
msg = "Vm %s not found in pool or vm is busy" % endpoint_name
log_pool.info(msg)
| # coding: utf-8
from core.utils import generator_wait_for
from core.logger import log_pool
from core.config import config
from core.exceptions import PlatformException, NoSuchEndpoint, \
CreationException
from vmpool.virtual_machines_pool import pool
from vmpool.platforms import Platforms
from vmpool.vmqueue import q
def get_vm_from_pool(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
log_pool.debug('Got vm with params: %s' % vm.info)
return vm
else:
raise NoSuchEndpoint('No such endpoint: %s' % endpoint_name)
def new_vm(desired_caps):
platform = desired_caps.get("platform", None)
if hasattr(config, "PLATFORM") and config.PLATFORM:
log_pool.info(
'Using %s. Desired platform %s has been ignored.' %
(config.PLATFORM, platform)
)
platform = config.PLATFORM
desired_caps["platform"] = platform
if isinstance(platform, unicode):
platform = platform.encode('utf-8')
if not platform:
raise CreationException(
'Platform parameter for new endpoint not found in dc'
)
if not Platforms.check_platform(platform):
raise PlatformException('No such platform %s' % platform)
delayed_vm = q.enqueue(desired_caps)
yield delayed_vm
for condition in generator_wait_for(
lambda: delayed_vm.vm, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm
if not delayed_vm.vm:
raise CreationException(
"Timeout while waiting for vm with platform %s" % platform
)
yield delayed_vm.vm
for condition in generator_wait_for(
lambda: delayed_vm.vm.ready, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm.vm
if not delayed_vm.vm.ready:
raise CreationException(
'Timeout while building vm %s (platform: %s)' %
(delayed_vm.vm.id, platform)
)
log_pool.info('Got vm for request with params: %s' % delayed_vm.vm.info)
yield delayed_vm.vm
def delete_vm(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
if vm.is_preloaded():
vm.rebuild()
else:
vm.delete()
msg = "Vm %s has been deleted" % endpoint_name
log_pool.info(msg)
else:
msg = "Vm %s not found in pool or vm is busy" % endpoint_name
log_pool.info(msg)
| Python | 0 |
01bb6723b2bc7ab7a7fb6629e304f5ed42f40af4 | Add GSM characters test case for a unicode message. | tests/clockwork_tests.py | tests/clockwork_tests.py | # -*- coding: utf-8 -*-
import unittest
import clockwork
import clockwork_exceptions
class ApiTests(unittest.TestCase):
api_key = "YOUR_API_KEY_HERE"
def test_should_send_single_message(self):
"""Sending a single SMS with the minimum detail and no errors should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="This is a test message")
response = api.send(sms)
self.assertTrue(response.success)
def test_should_send_single_unicode_message(self):
"""Sending a single SMS with the full GSM character set (apart from ESC and form feed) should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(
to="441234567890",
#Message table copied from http://www.clockworksms.com/doc/reference/faqs/gsm-character-set/
#Note, the "/f" (form feed) character does not work as lxml prohibits it.
message= u'''@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞÆæßÉ'''
u''' !“#¤%&‘()*+,-./'''
u'''0123456789:;<=>?'''
u'''¡ABCDEFGHIJKLMNO'''
u'''PQRSTUVWXYZÄÖÑܧ'''
u'''¿abcdefghijklmno'''
u'''pqrstuvwxyzäöñüà'''
u'''€[\]^{|}~'''
,long=True)
response = api.send(sms)
self.assertTrue(response.success)
def test_should_fail_with_no_message(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_fail_with_no_to(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="", message="This is a test message")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_send_multiple_messages(self):
"""Sending multiple sms messages should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="This is a test message 2")
response = api.send([sms1,sms2])
for r in response:
self.assertTrue(r.success)
def test_should_send_multiple_messages_with_erros(self):
"""Sending multiple sms messages, one of which has an invalid message should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="")
response = api.send([sms1,sms2])
self.assertTrue(response[0].success)
self.assertFalse(response[1].success)
def test_should_fail_with_invalid_key(self):
api = clockwork.API("this_key_is_wrong")
sms = clockwork.SMS(to="441234567890", message="This is a test message 1")
self.assertRaises(clockwork_exceptions.ApiException, api.send, sms)
def test_should_be_able_to_get_balance(self):
api = clockwork.API(self.api_key)
balance = api.get_balance()
self.assertEqual('PAYG',balance['account_type'])
if __name__ == "__main__":
unittest.main()
| import unittest
import clockwork
import clockwork_exceptions
class ApiTests(unittest.TestCase):
api_key = "YOUR_API_KEY_HERE"
def test_should_send_single_message(self):
"""Sending a single SMS with the minimum detail and no errors should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="This is a test message")
response = api.send(sms)
self.assertTrue(response.success)
def test_should_fail_with_no_message(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_fail_with_no_to(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="", message="This is a test message")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_send_multiple_messages(self):
"""Sending multiple sms messages should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="This is a test message 2")
response = api.send([sms1,sms2])
for r in response:
self.assertTrue(r.success)
def test_should_send_multiple_messages_with_erros(self):
"""Sending multiple sms messages, one of which has an invalid message should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="")
response = api.send([sms1,sms2])
self.assertTrue(response[0].success)
self.assertFalse(response[1].success)
def test_should_fail_with_invalid_key(self):
api = clockwork.API("this_key_is_wrong")
sms = clockwork.SMS(to="441234567890", message="This is a test message 1")
self.assertRaises(clockwork_exceptions.ApiException, api.send, sms)
def test_should_be_able_to_get_balance(self):
api = clockwork.API(self.api_key)
balance = api.get_balance()
self.assertEqual('PAYG',balance['account_type'])
if __name__ == "__main__":
unittest.main()
| Python | 0 |
4548b24c17caf6149b741c7f8a8f743f4ff431b4 | Remove partitions | 2011/candy_splitting.py | 2011/candy_splitting.py | #!/usr/bin/env python
from __future__ import print_function
from functools import reduce
def split_candies(candies):
assert isinstance(candies, list)
xor = reduce(lambda x, y: x ^ y, candies)
if xor == 0:
return sum(candies) - min(candies)
else:
return 0
if __name__ == '__main__':
import os
samples = [
[1,2,3,4,5],
[3,5,6]
]
for sample in samples:
max_candy = split_candies(sample)
if max_candy > 0:
print(max_candy)
else:
print('NO')
data_files = ['C-small-practice',
'C-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = [[int(_) for _ in in_.split(' ')] for in_ in inputs[1::2]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
print(test_case)
max_candy = split_candies(test_case)
if max_candy > 0:
output_file.write('Case #{0}: {1}\n'.format(i, max_candy))
else:
output_file.write('Case #{0}: {1}\n'.format(i, 'NO'))
i += 1
| #!/usr/bin/env python
from __future__ import print_function
from functools import reduce
def split_candies(candies):
assert isinstance(candies, list)
partitions = sorted_k_partitions(candies, 2)
print(partitions)
max_candy = 0
for partition in partitions:
xor0 = reduce(lambda x, y: x ^ y, partition[0])
sum0 = sum(partition[0])
xor1 = reduce(lambda x, y: x ^ y, partition[1])
sum1 = sum(partition[1])
print(xor0, xor1, sum0, sum1)
if xor0 == xor1:
max_candy = max(max_candy, max(sum0, sum1))
return max_candy
def sorted_k_partitions(seq, k):
"""Returns a list of all unique k-partitions of `seq`.
Each partition is a list of parts, and each part is a tuple.
The parts in each individual partition will be sorted in shortlex
order (i.e., by length first, then lexicographically).
The overall list of partitions will then be sorted by the length
of their first part, the length of their second part, ...,
the length of their last part, and then lexicographically.
"""
n = len(seq)
groups = [] # a list of lists, currently empty
def generate_partitions(i):
if i >= n:
yield list(map(tuple, groups))
else:
if n - i > k - len(groups):
for group in groups:
group.append(seq[i])
for item in generate_partitions(i + 1):
yield item # Python3: yield from generate_partitions(i + 1)
group.pop()
if len(groups) < k:
groups.append([seq[i]])
for item in generate_partitions(i + 1):
yield item # Python3: yield from generate_partitions(i + 1)
groups.pop()
result = generate_partitions(0)
# Sort the parts in each partition in shortlex order
result = [sorted(ps, key = lambda p: (len(p), p)) for ps in result]
# Sort partitions by the length of each part, then lexicographically.
result = sorted(result, key = lambda ps: (len(ps), ps)) # Python3: *map(len, ps)
return result
if __name__ == '__main__':
import os
samples = [
[1,2,3,4,5],
[3,5,6]
]
for sample in samples:
max_candy = split_candies(sample)
if max_candy > 0:
print(max_candy)
else:
print('NO')
data_files = ['C-small-practice',
'C-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = [[int(_) for _ in in_.split(' ')] for in_ in inputs[1::2]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
print(test_case)
max_candy = split_candies(test_case)
if max_candy > 0:
output_file.write('Case #{0}: {1}\n'.format(i, max_candy))
else:
output_file.write('Case #{0}: {1}\n'.format(i, 'NO'))
i += 1
| Python | 0.000011 |
08125322609e97e868c5c712df9e35e4c556434d | Use enumerate() instead of managing an index variable. | httparchive.py | httparchive.py | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpArchive(dict):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values."""
pass
class ArchivedHttpRequest(object):
def __init__(self, command, host, path, request_body):
self.command = command
self.host = host
self.path = path
self.request_body = request_body
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class ArchivedHttpResponse(object):
def __init__(self, status, reason, headers, response_data):
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
def get_header(self, key):
for k, v in self.headers:
if key == k:
return v
return None
def set_header(self, key, value):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers[i] = (key, value)
return
self.headers.append((key, value))
def remove_header(self, key):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers.pop(i)
return
| #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpArchive(dict):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values."""
pass
class ArchivedHttpRequest(object):
def __init__(self, command, host, path, request_body):
self.command = command
self.host = host
self.path = path
self.request_body = request_body
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class ArchivedHttpResponse(object):
def __init__(self, status, reason, headers, response_data):
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
def get_header(self, key):
for k, v in self.headers:
if key == k:
return v
return None
def set_header(self, key, value):
i = 0
for k, v in self.headers:
if key == k:
self.headers[i] = (key, value)
return
i = i + 1
self.headers.append((key, value))
def remove_header(self, key):
i = 0
for k, v in self.headers:
if key == k:
self.headers.pop(i)
return
i = i + 1
| Python | 0.999997 |
d5482b10a712863c36a59d8ce82f3958ec41e78b | Add CORS on /swagger.json | APITaxi/api/__init__.py | APITaxi/api/__init__.py | # -*- coding: utf-8 -*-
from flask.ext.restplus import apidoc, Api
from flask import Blueprint, render_template
from flask_cors import cross_origin
api_blueprint = Blueprint('api', __name__)
api = Api(api_blueprint, doc=False, catch_all_404s=True,
title='API version 2.0')
ns_administrative = api.namespace('administrative',
description="Administrative APIs", path='/')
def init_app(app):
from . import hail, taxi, ads, drivers, zupc, profile, vehicle, documents
api.init_app(app, add_specs=False)
app.register_blueprint(api_blueprint)
app.register_blueprint(apidoc.apidoc)
@app.route('/swagger.json', endpoint='api.specs')
@cross_origin()
def swagger():
return render_template('swagger.json', host=app.config['SERVER_NAME']), 200,
{'Content-Type': 'application/json'}
| # -*- coding: utf-8 -*-
from flask.ext.restplus import apidoc, Api
from flask import Blueprint, render_template
api_blueprint = Blueprint('api', __name__)
api = Api(api_blueprint, doc=False, catch_all_404s=True,
title='API version 2.0')
ns_administrative = api.namespace('administrative',
description="Administrative APIs", path='/')
def init_app(app):
from . import hail, taxi, ads, drivers, zupc, profile, vehicle, documents
api.init_app(app, add_specs=False)
app.register_blueprint(api_blueprint)
app.register_blueprint(apidoc.apidoc)
@app.route('/swagger.json', endpoint='api.specs')
def swagger():
return render_template('swagger.json', host=app.config['SERVER_NAME']), 200,
{'Content-Type': 'application/json'}
| Python | 0.000001 |
d16b57f3edca478622b84f56dfee7b2eea1f7498 | Add basic reporting | botbot/report.py | botbot/report.py | """Generate a report about file errors"""
import os
import sys
import math
from pkg_resources import resource_exists, resource_filename
from jinja2 import Environment, FileSystemLoader
from . import problems
_DEFAULT_RES_PATH = os.path.join('resources', 'templates')
_GENERIC_REPORT_NAME = 'generic.txt'
_ENV_REPORT_NAME = 'env.txt'
class ReporterBase():
def __init__(self, chkr):
self.chkr = chkr
def write_status(self, barlen):
"""Write where we're at"""
done = self.chkr.status['checked']
total = self.chkr.status['files']
perc = done / total
filllen = math.ceil(perc * barlen)
print('[{0}] {1:.0%}\r'.format(filllen * '#' + (barlen - filllen) * '-', perc), end='')
if perc == 1:
print('\n', end='')
sys.stdout.flush()
def _get_template_filename(self, name):
"""Find the filename of a template. Can be a filename or just a name."""
parts = str(name).split('.')
if parts[len(parts) - 1] == 'txt':
return name
else:
return '.'.join(parts + ['txt'])
def _get_supporting_prob_info(self, probid):
return problems.every_problem.get(probid)
def _get_env(self, template):
tmppath = os.path.join(_DEFAULT_RES_PATH,
self._get_template_filename(template))
if resource_exists(__package__, tmppath):
return Environment(
loader=FileSystemLoader(resource_filename(__package__, _DEFAULT_RES_PATH)),
trim_blocks=True
)
else:
raise FileNotFoundError('No such template')
class OneshotReporter(ReporterBase):
"""Does one-off reports after one-off checks"""
def __init__(self, chkr, out=sys.stdout):
super().__init__(chkr)
self.out = out
def _should_print_report(self, filelist):
for values in filelist.values():
if values:
return True
return False
def write_report(self, fmt, shared, attr='problems'):
# print(self.chkr.checked)
for user, probs in self.chkr.checked.items():
if probs:
print(user.pw_gecos)
for prob, files in probs.items():
print('\t' + prob)
for f in files:
print('\t\t' + str(f))
class DaemonReporter(ReporterBase):
"""Reports issues in daemon mode"""
def __init__(self, chkr):
super().__init__(chkr)
def write_report(self):
"""
Continuously report. (Note: this implementation is temporary until
email gets working.)
"""
#TODO: implement emailing!
queue = self.chkr.checked
while queue:
finfo = queue.pop()
print("{} -- {}".format(finfo['path'], ', '.join(finfo['problems'])))
class EnvReporter(ReporterBase):
"""Reports environment issues"""
def __init__(self, chkr, out=sys.stdout):
"""Constructor for the EnvReporter"""
self.out = out
self.chkr = chkr
def write_report(self):
"""Write a report on environment variables"""
env = self._get_env(_ENV_REPORT_NAME)
if self.chkr.problems:
tempgen = env.get_template(_ENV_REPORT_NAME).generate(
problist=[(self._get_supporting_prob_info(p[0]), p[1])
for p in self.chkr.problems]
)
if self.out != sys.stdout:
print('Writing report to {}.'.format(self.out))
out = open(self.out, mode='w')
else:
print('Report:')
out = sys.stdout
for line in tempgen:
print(line, file=out, end='')
print('\n', file=out, end='')
out.close()
else:
print('No problems here!')
| """Generate a report about file errors"""
import os
import sys
import math
from pkg_resources import resource_exists, resource_filename
from jinja2 import Environment, FileSystemLoader
from . import problems
_DEFAULT_RES_PATH = os.path.join('resources', 'templates')
_GENERIC_REPORT_NAME = 'generic.txt'
_ENV_REPORT_NAME = 'env.txt'
class ReporterBase():
def __init__(self, chkr):
self.chkr = chkr
def write_status(self, barlen):
"""Write where we're at"""
done = self.chkr.status['checked']
total = self.chkr.status['files']
perc = done / total
filllen = math.ceil(perc * barlen)
print('[{0}] {1:.0%}\r'.format(filllen * '#' + (barlen - filllen) * '-', perc), end='')
if perc == 1:
print('\n', end='')
sys.stdout.flush()
def _get_template_filename(self, name):
"""Find the filename of a template. Can be a filename or just a name."""
parts = str(name).split('.')
if parts[len(parts) - 1] == 'txt':
return name
else:
return '.'.join(parts + ['txt'])
def _get_supporting_prob_info(self, probid):
return problems.every_problem.get(probid)
def _get_env(self, template):
tmppath = os.path.join(_DEFAULT_RES_PATH,
self._get_template_filename(template))
if resource_exists(__package__, tmppath):
return Environment(
loader=FileSystemLoader(resource_filename(__package__, _DEFAULT_RES_PATH)),
trim_blocks=True
)
else:
raise FileNotFoundError('No such template')
class OneshotReporter(ReporterBase):
"""Does one-off reports after one-off checks"""
def __init__(self, chkr, out=sys.stdout):
super().__init__(chkr)
self.out = out
def _should_print_report(self, filelist):
for values in filelist.values():
if values:
return True
return False
def write_report(self, fmt, shared, attr='problems'):
print(self.chkr.checked)
class DaemonReporter(ReporterBase):
"""Reports issues in daemon mode"""
def __init__(self, chkr):
super().__init__(chkr)
def write_report(self):
"""
Continuously report. (Note: this implementation is temporary until
email gets working.)
"""
#TODO: implement emailing!
queue = self.chkr.checked
while queue:
finfo = queue.pop()
print("{} -- {}".format(finfo['path'], ', '.join(finfo['problems'])))
class EnvReporter(ReporterBase):
"""Reports environment issues"""
def __init__(self, chkr, out=sys.stdout):
"""Constructor for the EnvReporter"""
self.out = out
self.chkr = chkr
def write_report(self):
"""Write a report on environment variables"""
env = self._get_env(_ENV_REPORT_NAME)
if self.chkr.problems:
tempgen = env.get_template(_ENV_REPORT_NAME).generate(
problist=[(self._get_supporting_prob_info(p[0]), p[1])
for p in self.chkr.problems]
)
if self.out != sys.stdout:
print('Writing report to {}.'.format(self.out))
out = open(self.out, mode='w')
else:
print('Report:')
out = sys.stdout
for line in tempgen:
print(line, file=out, end='')
print('\n', file=out, end='')
out.close()
else:
print('No problems here!')
| Python | 0 |
6e525872537cd31a80cb791d6594a1f6800c61b4 | add invers option, add args-parsing | i2c/PCF8574.py | i2c/PCF8574.py | #!/usr/bin/python
import sys
import smbus
import time
import argparse
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(busnumber)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
# Reads data from PCF8574 and prints the inverted state of each port
def readPCF8574_INV(busnumber,address):
address = int(address,16)
busnumber = int(busnumber)
bus = smbus.SMBus(busnumber)
state = 255 - bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>(7-i))
print str(port) + ': ' + str(value)
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i",action='store_true', help="Invert the bit of in- and output")
parser.add_argument('i2c_bus', help='Number of active i2c-bus (0 or 1)')
parser.add_argument('i2c_address', help='address of PCF8574')
args = parser.parse_args()
# run commands
if args.i:
readPCF8574_INV(args.i2c_bus,args.i2c_address)
else:
readPCF8574(args.i2c_bus,args.i2c_address)
| #!/usr/bin/python
import sys
import smbus
import time
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(1)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
if len(sys.argv) != 3:
print "Usage: python PCF8574.py bus address"
exit(1)
bus = sys.argv[1]
address = sys.argv[2]
readPCF8574(bus,address)
| Python | 0.000002 |
7cca2fab9fe697fe0e31be0ea6dcd43e29028bfb | better example output | example/shapes.py | example/shapes.py | import pprint
from rdc.etl.transform.util import Log
from rdc.etl.transform.extract import Extract
from rdc.etl.harness.threaded2 import ThreadedHarness as ThreadedHarness2
from rdc.etl.harness.threaded import ThreadedHarness
def build_producer(name):
return Extract(({'producer': name, 'id': 1}, {'producer': name, 'id': 2}))
print '>>> Test of simple linear shape'
for Harness in ThreadedHarness, ThreadedHarness2:
print "With %r" % Harness
h = Harness()
p1 = build_producer('p1')
h.chain_add(p1, Log())
h()
print 'Summary:'
pprint.pprint(h._transforms)
print '\n'
| from rdc.etl.status.console import ConsoleStatus
from rdc.etl.transform.util import Log
from rdc.etl.transform.extract import Extract
from rdc.etl.harness.threaded2 import ThreadedHarness as ThreadedHarness2
from rdc.etl.harness.threaded import ThreadedHarness
def build_producer(name):
return Extract(({'producer': name, 'id': 1}, {'producer': name, 'id': 2}))
for Harness in ThreadedHarness, ThreadedHarness2:
print
print "-------------------------------"
print "With %r" % Harness
print
print
h = Harness()
h.status.append(ConsoleStatus())
p1 = build_producer('p1')
h.chain_add(p1, Log())
h()
print
print
| Python | 0.999999 |
229a0db6574f75acf94cad6612dd39351fa6656a | Use absolute import. (Should this go into 2.5?) | Lib/test/test_cpickle.py | Lib/test/test_cpickle.py | import cPickle
import unittest
from cStringIO import StringIO
from test.pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import test_support
class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads
error = cPickle.BadPickleGet
module = cPickle
class cPicklePicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = cPickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleFastPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def test_recursive_inst(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_inst,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. Jack Jansen reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
def test_main():
test_support.run_unittest(
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests
)
if __name__ == "__main__":
test_main()
| import cPickle
import unittest
from cStringIO import StringIO
from pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import test_support
class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads
error = cPickle.BadPickleGet
module = cPickle
class cPicklePicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = cPickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleFastPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def test_recursive_inst(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_inst,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. Jack Jansen reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
def test_main():
test_support.run_unittest(
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests
)
if __name__ == "__main__":
test_main()
| Python | 0 |
c888e52788ec37641f97f761d2052902db20582a | Add missing dates | erpnext/accounts/dashboard.py | erpnext/accounts/dashboard.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from itertools import groupby
from operator import itemgetter
import frappe
from frappe.utils import add_to_date, date_diff, getdate, nowdate
from erpnext.accounts.report.general_ledger.general_ledger import execute
def get(filters=None):
filters = frappe._dict({
"company": "Gadget Technologies Pvt. Ltd.",
"from_date": get_from_date_from_timespan(filters.get("timespan")),
"to_date": "2020-12-12",
"account": "Cash - GTPL",
"group_by": "Group by Voucher (Consolidated)"
})
report_columns, report_results = execute(filters=filters)
interesting_fields = ["posting_date", "balance"]
columns = [column for column in report_columns if column["fieldname"] in interesting_fields]
_results = []
for row in report_results[1:-2]:
_results.append([row[key] for key in interesting_fields])
grouped_results = groupby(_results, key=itemgetter(0))
results = [list(values)[-1] for key, values in grouped_results]
results = add_missing_dates(results, from_date, to_date)
return {
"labels": [result[0] for result in results],
"datasets": [{
"name": "Cash - GTPL",
"values": [result[1] for result in results]
}]
}
def get_from_date_from_timespan(timespan):
days = months = years = 0
if "Last Week" == timespan:
days = -7
if "Last Month" == timespan:
months = -1
elif "Last Quarter" == timespan:
months = -3
elif "Last Year" == timespan:
years = -1
return add_to_date(None, years=years, months=months, days=days,
as_string=True, as_datetime=True)
def add_missing_dates(incomplete_results, from_date, to_date):
dates = [r[0] for r in incomplete_results]
day_count = date_diff(to_date, from_date)
results_dict = dict(incomplete_results)
last_date, last_balance = incomplete_results[0]
results = []
for date in (add_to_date(getdate(from_date), days=n) for n in range(day_count + 1)):
if date in results_dict:
last_date = date
last_balance = results_dict[date]
results.append([date, last_balance])
return results
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from itertools import groupby
from operator import itemgetter
import frappe
from frappe.utils import add_to_date
from erpnext.accounts.report.general_ledger.general_ledger import execute
def get(filters=None):
filters = frappe._dict({
"company": "Gadget Technologies Pvt. Ltd.",
"from_date": get_from_date_from_timespan(filters.get("timespan")),
"to_date": "2020-12-12",
"account": "Cash - GTPL",
"group_by": "Group by Voucher (Consolidated)"
})
report_columns, report_results = execute(filters=filters)
interesting_fields = ["posting_date", "balance"]
columns = [column for column in report_columns if column["fieldname"] in interesting_fields]
_results = []
for row in report_results[1:-2]:
_results.append([row[key] for key in interesting_fields])
grouped_results = groupby(_results, key=itemgetter(0))
results = [list(values)[-1] for key, values in grouped_results]
return {
"labels": [result[0] for result in results],
"datasets": [{
"name": "Cash - GTPL",
"values": [result[1] for result in results]
}]
}
def get_from_date_from_timespan(timespan):
days = months = years = 0
if "Last Week" == timespan:
days = -7
if "Last Month" == timespan:
months = -1
elif "Last Quarter" == timespan:
months = -3
elif "Last Year" == timespan:
years = -1
return add_to_date(None, years=years, months=months, days=days,
as_string=True, as_datetime=True)
| Python | 0.000043 |
dca8dce24e0bea671b52d456909c35e43c4f5929 | move exchange endpoint into consumer urlspace | example/urls.py | example/urls.py | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from .views import ConsumerView, ConsumerExchangeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='example/home.html'), name='home'),
url(r'^consumer/exchange/', ConsumerExchangeView.as_view(), name='exchange'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'example/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^o/', include('oauth2_provider.urls')),
url(r'^consumer/$', ConsumerView.as_view(), name="consumer"),
)
| from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from .views import ConsumerView, ConsumerExchangeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='example/home.html'), name='home'),
url(r'^exchange/', ConsumerExchangeView.as_view(), name='exchange'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'example/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^o/', include('oauth2_provider.urls')),
url(r'^consumer/$', ConsumerView.as_view(), name="consumer"),
)
| Python | 0.000002 |
dcc472a6c8e15e7fc105277332681b38e40640df | Revert open_file_dialog example | examples/open_file_dialog.py | examples/open_file_dialog.py | import webview
import threading
"""
This example demonstrates creating an open file dialog.
"""
def open_file_dialog():
import time
time.sleep(5)
print(webview.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=True))
if __name__ == '__main__':
t = threading.Thread(target=open_file_dialog)
t.start()
webview.create_window("Open file dialog example", "http://www.flowrl.com")
| import webview
import threading
"""
This example demonstrates creating an open file dialog.
"""
def open_file_dialog():
import time
time.sleep(5)
print(webview.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=False))
if __name__ == '__main__':
t = threading.Thread(target=open_file_dialog)
t.start()
webview.create_window("Open file dialog example", "http://www.flowrl.com")
| Python | 0 |
ef0e9f59ee1df18a5c37a559e78d0350d9e0a624 | Use `import_by_path`/`import_string` instead of manually `__import__`ing things | enumfields/fields.py | enumfields/fields.py | from django.core.exceptions import ValidationError
from django.db import models
from enum import Enum
import six
from django.db.models.fields import NOT_PROVIDED
try:
from django.utils.module_loading import import_string
except ImportError:
from django.utils.module_loading import import_by_path as import_string
class EnumFieldMixin(six.with_metaclass(models.SubfieldBase)):
def __init__(self, enum, **options):
if isinstance(enum, six.string_types):
self.enum = import_string(enum)
else:
self.enum = enum
if "choices" not in options:
options["choices"] = [(i, i.name) for i in self.enum] # choices for the TypedChoiceField
super(EnumFieldMixin, self).__init__(**options)
def to_python(self, value):
if value is None or value == '':
return None
for m in self.enum:
if value == m:
return value
if value == m.value or str(value) == str(m.value) or str(value) == str(m):
return m
raise ValidationError('%s is not a valid value for enum %s' % (value, self.enum))
def get_prep_value(self, value):
return None if value is None else value.value
def value_to_string(self, obj):
"""
This method is needed to support proper serialization. While its name is value_to_string()
the real meaning of the method is to convert the value to some serializable format.
Since most of the enum values are strings or integers we WILL NOT convert it to string
to enable integers to be serialized natively.
"""
value = self._get_val_from_obj(obj)
return value.value if value else None
def get_default(self):
if self.has_default():
if self.default is None:
return None
if isinstance(self.default, Enum):
return self.default
return self.enum(self.default)
return super(EnumFieldMixin, self).get_default()
def deconstruct(self):
name, path, args, kwargs = super(EnumFieldMixin, self).deconstruct()
kwargs['enum'] = self.enum
kwargs.pop('choices', None)
if 'default' in kwargs:
if hasattr(kwargs["default"], "value"):
kwargs["default"] = kwargs["default"].value
return name, path, args, kwargs
class EnumField(EnumFieldMixin, models.CharField):
def __init__(self, enum, *args, **kwargs):
kwargs.setdefault("max_length", 10)
super(EnumField, self).__init__(enum, **kwargs)
self.validators = []
class EnumIntegerField(EnumFieldMixin, models.IntegerField):
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, Enum):
return value.value
try:
return int(value)
except ValueError:
return self.to_python(value).value
# South compatibility stuff
def converter_func(enum_class):
return "'%s.%s'" % (enum_class.__module__, enum_class.__name__)
def enum_value(an_enum):
return an_enum.value
rules = [
(
[EnumFieldMixin],
[],
{
"enum": ["enum", {'is_django_function': True, "converter": converter_func}],
"default": ['default', {'default': NOT_PROVIDED, 'ignore_dynamics': True,
'converter': enum_value}]},
)
]
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules, ["^enumfields\.fields"])
except ImportError:
pass
| from django.core.exceptions import ValidationError
from django.db import models
from enum import Enum
import six
from django.db.models.fields import NOT_PROVIDED
class EnumFieldMixin(six.with_metaclass(models.SubfieldBase)):
def __init__(self, enum, **options):
if isinstance(enum, six.string_types):
module_name, class_name = enum.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [class_name])
self.enum = getattr(module, class_name)
else:
self.enum = enum
if "choices" not in options:
options["choices"] = [(i, i.name) for i in self.enum] # choices for the TypedChoiceField
super(EnumFieldMixin, self).__init__(**options)
def to_python(self, value):
if value is None or value == '':
return None
for m in self.enum:
if value == m:
return value
if value == m.value or str(value) == str(m.value) or str(value) == str(m):
return m
raise ValidationError('%s is not a valid value for enum %s' % (value, self.enum))
def get_prep_value(self, value):
return None if value is None else value.value
def value_to_string(self, obj):
"""
This method is needed to support proper serialization. While its name is value_to_string()
the real meaning of the method is to convert the value to some serializable format.
Since most of the enum values are strings or integers we WILL NOT convert it to string
to enable integers to be serialized natively.
"""
value = self._get_val_from_obj(obj)
return value.value if value else None
def get_default(self):
if self.has_default():
if self.default is None:
return None
if isinstance(self.default, Enum):
return self.default
return self.enum(self.default)
return super(EnumFieldMixin, self).get_default()
def deconstruct(self):
name, path, args, kwargs = super(EnumFieldMixin, self).deconstruct()
kwargs['enum'] = self.enum
kwargs.pop('choices', None)
if 'default' in kwargs:
if hasattr(kwargs["default"], "value"):
kwargs["default"] = kwargs["default"].value
return name, path, args, kwargs
class EnumField(EnumFieldMixin, models.CharField):
def __init__(self, enum, *args, **kwargs):
kwargs.setdefault("max_length", 10)
super(EnumField, self).__init__(enum, **kwargs)
self.validators = []
class EnumIntegerField(EnumFieldMixin, models.IntegerField):
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, Enum):
return value.value
try:
return int(value)
except ValueError:
return self.to_python(value).value
# South compatibility stuff
def converter_func(enum_class):
return "'%s.%s'" % (enum_class.__module__, enum_class.__name__)
def enum_value(an_enum):
return an_enum.value
rules = [
(
[EnumFieldMixin],
[],
{
"enum": ["enum", {'is_django_function': True, "converter": converter_func}],
"default": ['default', {'default': NOT_PROVIDED, 'ignore_dynamics': True,
'converter': enum_value}]},
)
]
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules, ["^enumfields\.fields"])
except ImportError:
pass
| Python | 0.000003 |
965236870ce5bf6dcbe9398b444b977c796b096e | set the right keyword to the close function | simphony_paraview/tests/test_show.py | simphony_paraview/tests/test_show.py | import unittest
from hypothesis import given
from paraview import servermanager
from paraview.simple import Disconnect
from simphony_paraview.show import show
from simphony_paraview.core.testing import cuds_containers
class TestShow(unittest.TestCase):
def setUp(self):
if servermanager.ActiveConnection is not None:
Disconnect()
self.closed = False
def tearDown(self):
if servermanager.ActiveConnection is not None:
raise RuntimeError('There is still an active connection')
@given(cuds_containers)
def test_valid_cuds_containers(self, setup):
# XXX This is a very basic test.
# given
cuds, kind = setup
def close(obj, event):
obj.TerminateApp()
show(cuds, testing=close)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
| import unittest
from hypothesis import given
from paraview import servermanager
from paraview.simple import Disconnect
from simphony_paraview.show import show
from simphony_paraview.core.testing import cuds_containers
class TestShow(unittest.TestCase):
def setUp(self):
if servermanager.ActiveConnection is not None:
Disconnect()
self.closed = False
def tearDown(self):
if servermanager.ActiveConnection is not None:
raise RuntimeError('There is still an active connection')
@given(cuds_containers)
def test_valid_cuds_containers(self, setup):
# XXX This is a very basic test.
# given
cuds, kind = setup
def close(obj, event):
obj.TerminateApp()
show(cuds, close)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
| Python | 0.000021 |
c0358584f2b5a05947ebb558c6d10293cc969a1a | Fix tests | tests/test_dependenpy.py | tests/test_dependenpy.py | # -*- coding: utf-8 -*-
"""Main test script."""
from dependenpy.cli import main
def test_main():
"""Main test method."""
main(['-lm', 'dependenpy'])
| # -*- coding: utf-8 -*-
"""Main test script."""
from dependenpy.cli import main
def test_main():
"""Main test method."""
main('dependenpy')
| Python | 0.000003 |
f6debd39f929616ca72763682c25a52bc01b536b | Update test_filterbank.py | tests/test_filterbank.py | tests/test_filterbank.py | from blimpy import Filterbank, read_header, fix_header
import pylab as plt
import numpy as np
import os
from pprint import pprint
def test_voyager():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
fb = Filterbank(filename)
fb.info()
fb.plot_spectrum()
plt.show()
fb = Filterbank(filename, f_start=8420, f_stop=8420.5)
fb.info()
fb.plot_spectrum()
plt.show()
def test_voyager_extract():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.info()
fb.plot_spectrum()
plt.show()
fb.write_to_filterbank(new_filename)
fb2 = Filterbank(new_filename)
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove(new_filename)
def test_voyager_fix_header():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.write_to_filterbank(new_filename)
fb = Filterbank(new_filename)
filename = new_filename
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 7)
assert read_header(filename)['ibeam'] == 7
fix_header(filename, 'ibeam', 1)
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 13)
assert read_header(filename)['ibeam'] == 13
pprint(read_header(filename))
fix_header(filename, 'rawdatafile', './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw'
fix_header(filename, 'rawdatafile', './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw'
os.remove(new_filename)
def test_filterbank_gen():
""" Generate a blimpy from nothing """
filename = '/bldata/gbt_data/voyager_f1032192_t300_v2.fil'
fb0 = Filterbank(filename)
fb0.info()
fb = Filterbank(header_dict=fb0.header, data_array=fb0.data)
fb.info()
print "Writing to blimpy..."
fb.write_to_filterbank('test.fil')
print "Writing to hdf5..."
fb.write_to_hdf5('test.h5')
fb2 = Filterbank('test.h5')
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove('test.h5')
if __name__ == "__main__":
#test_voyager()
#test_voyager_extract()
#test_voyager_fix_header()
#test_filterbank_gen()
| from blimpy import Filterbank, read_header, fix_header
import pylab as plt
import numpy as np
import os
from pprint import pprint
def test_voyager():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
fb = Filterbank(filename)
fb.info()
fb.plot_spectrum()
plt.show()
fb = Filterbank(filename, f_start=8420, f_stop=8420.5)
fb.info()
fb.plot_spectrum()
plt.show()
def test_voyager_extract():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.info()
fb.plot_spectrum()
plt.show()
fb.write_to_filterbank(new_filename)
fb2 = Filterbank(new_filename)
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove(new_filename)
def test_voyager_fix_header():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.write_to_filterbank(new_filename)
fb = Filterbank(new_filename)
filename = new_filename
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 7)
assert read_header(filename)['ibeam'] == 7
fix_header(filename, 'ibeam', 1)
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 13)
assert read_header(filename)['ibeam'] == 13
pprint(read_header(filename))
fix_header(filename, 'rawdatafile', './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw'
fix_header(filename, 'rawdatafile', './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw'
os.remove(new_filename)
def test_filterbank_gen():
""" Generate a blimpy from nothing """
filename = '/bldata/gbt_data/voyager_f1032192_t300_v2.fil'
fb0 = Filterbank(filename)
fb0.info()
fb = Filterbank(header_dict=fb0.header, data_array=fb0.data)
fb.info()
print "Writing to blimpy..."
fb.write_to_filterbank('test.fil')
print "Writing to hdf5..."
fb.write_to_hdf5('test.h5')
fb2 = Filterbank('test.h5')
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove('test.h5')
if __name__ == "__main__":
#test_voyager()
#test_voyager_extract()
#test_voyager_fix_header()
test_filterbank_gen()
| Python | 0.000001 |
dbf520bb4050c5e393a4de3be9c136fef1cd88f2 | break test | tests/test_functional.py | tests/test_functional.py | # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from foobar.user.models import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 300
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from foobar.user.models import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| Python | 0.000005 |
a6435a8713985464b8c37a438ac035d65f66b4cd | Add more user mapfiles and validate | tests/test_large_file.py | tests/test_large_file.py | import logging
import os
import cProfile
import glob
import json
import mappyfile
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
from mappyfile.validator import Validator
def output(fn):
"""
Parse, transform, and pretty print
the result
"""
p = Parser(expand_includes=False)
m = MapfileToDict()
v = Validator()
ast = p.parse_file(fn)
# print(ast)
d = m.transform(ast)
assert(v.validate(d))
output_file = fn + ".map"
try:
mappyfile.utils.write(d, output_file)
except Exception:
logging.warning(json.dumps(d, indent=4))
logging.warning("%s could not be successfully re-written", fn)
raise
# now try reading it again
ast = p.parse_file(output_file)
d = m.transform(ast)
assert(v.validate(d))
def main():
sample_dir = os.path.join(os.path.dirname(__file__), "mapfiles")
mapfiles = glob.glob(sample_dir + '/*.txt')
# mapfiles = ["map4.txt"]
for fn in mapfiles:
print("Processing {}".format(fn))
fn = os.path.join(sample_dir, fn)
pr = cProfile.Profile()
pr.enable()
output(fn)
pr.disable()
# pr.print_stats(sort='time')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
print("Done!")
| import logging
import cProfile
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
def output(fn):
"""
Parse, transform, and pretty print
the result
"""
p = Parser()
m = MapfileToDict()
ast = p.parse_file(fn)
# print(ast)
d = m.transform(ast)
# print(d)
pp = PrettyPrinter(indent=0, newlinechar=" ", quote="'")
pp.pprint(d)
def main():
fns = [r"D:\Temp\large_map1.txt", r"D:\Temp\large_map2.txt"]
for fn in fns:
pr = cProfile.Profile()
pr.enable()
output(fn)
pr.disable()
pr.print_stats(sort='time')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
print("Done!")
| Python | 0 |
2a816cbb29488861fe8897a6af9359db254018c1 | Fix up test_paraboloid accuracy | tests/test_paraboloid.py | tests/test_paraboloid.py | import jtrace
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_properties():
import random
for i in range(100):
A = random.gauss(0.7, 0.8)
B = random.gauss(0.8, 1.2)
para = jtrace.Paraboloid(A, B)
assert para.A == A
assert para.B == B
def test_call():
import random
for i in range(100):
A = random.gauss(0.2, 0.3)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
assert isclose(para(x, y), A*(x*x + y*y)+B)
def test_intersect():
import random
for i in range(100):
A = random.gauss(0.05, 0.01)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r = jtrace.Ray(x, y, -1000, 0, 0, 1, 0)
isec = para.intersect(r)
assert isclose(isec.point.x, x)
assert isclose(isec.point.y, y)
assert isclose(isec.point.z, para(x, y))
# We can also check just for mutual consistency of the paraboloid,
# ray and intersection.
vx = random.gauss(0.0, 0.001)
vy = random.gauss(0.0, 0.001)
vz = 1.0
v = jtrace.Vec3(vx, vy, vz).UnitVec3()
r = jtrace.Ray(jtrace.Vec3(x, y, -10), v, 0)
isec = para.intersect(r)
p1 = r(isec.t)
p2 = isec.point
assert isclose(p1.x, p2.x)
assert isclose(p1.y, p2.y)
assert isclose(p1.z, p2.z)
assert isclose(para(p1.x, p2.y), p1.z, abs_tol=1e-6)
if __name__ == '__main__':
test_properties()
test_call()
test_intersect()
| import jtrace
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_properties():
import random
for i in range(100):
A = random.gauss(0.7, 0.8)
B = random.gauss(0.8, 1.2)
para = jtrace.Paraboloid(A, B)
assert para.A == A
assert para.B == B
def test_call():
import random
for i in range(100):
A = random.gauss(0.2, 0.3)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
assert isclose(para(x, y), A*(x*x + y*y)+B)
def test_intersect():
import random
for i in range(100):
A = random.gauss(0.2, 0.1)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r = jtrace.Ray(x, y, -1000, 0, 0, 1, 0)
isec = para.intersect(r)
assert isclose(isec.point.x, x)
assert isclose(isec.point.y, y)
assert isclose(isec.point.z, para(x, y))
# We can also check just for mutual consistency of the paraboloid,
# ray and intersection.
vx = random.gauss(0.0, 0.001)
vy = random.gauss(0.0, 0.001)
vz = 1.0
v = jtrace.Vec3(vx, vy, vz).UnitVec3()
r = jtrace.Ray(jtrace.Vec3(x, y, -1000), v, 0)
isec = para.intersect(r)
p1 = r(isec.t)
p2 = isec.point
assert isclose(p1.x, p2.x)
assert isclose(p1.y, p2.y)
assert isclose(p1.z, p2.z)
assert isclose(para(p1.x, p2.y), p1.z, abs_tol=1e-3)
| Python | 0.999279 |
5e642c912ff7be5424e78e3dfe356c9579a39320 | fix typo in get_networks function | web_frontend/cloudscheduler/csv2/utils.py | web_frontend/cloudscheduler/csv2/utils.py | from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
import config
'''
dev code
= db_session.query(Cloud).filter(Cloud.cloud_type=="openstack")
db_session.merge(new_flav)
db_session.commit()
'''
def get_quotas(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Quota = Base.classes.cloud_quotas
quota_list = db_session.query(Quota)
return quota_list
def get_vms(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
VM = Base.classes.cloud_vm
vm_list = db_session.query(VM)
return vm_list
def get_flavors(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Flavors = Base.classes.cloud_flavors
flavor_list = db_session.query(Flavors)
return flavor_list
def get_images(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Images = Base.classes.cloud_images
image_list = db_session.query(Images)
return image_list
def get_networks(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Networks = Base.classes.cloud_networks
network_list = db_session.query(Networks)
return network_list
def get_groups(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Groups = Base.classes.csv2_groups
group_list = db_session.query(Groups)
return group_list
# may be best to query the view instead of the resources table
def get_group_resources(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
GroupResources = Base.classes.csv2_group_resources
group_resources_list = db_session.query(GroupResources)
return group_resources_list | from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
import config
'''
dev code
= db_session.query(Cloud).filter(Cloud.cloud_type=="openstack")
db_session.merge(new_flav)
db_session.commit()
'''
def get_quotas(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Quota = Base.classes.cloud_quotas
quota_list = db_session.query(Quota)
return quota_list
def get_vms(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
VM = Base.classes.cloud_vm
vm_list = db_session.query(VM)
return vm_list
def get_flavors(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Flavors = Base.classes.cloud_flavors
flavor_list = db_session.query(Flavors)
return flavor_list
def get_images(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Images = Base.classes.cloud_images
image_list = db_session.query(Images)
return image_list
def get_networks(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Networks = Base.classes.cloud_quotas
network_list = db_session.query(Networks)
return network_list
def get_groups(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Groups = Base.classes.csv2_groups
group_list = db_session.query(Groups)
return group_list
# may be best to query the view instead of the resources table
def get_group_resources(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
GroupResources = Base.classes.csv2_group_resources
group_resources_list = db_session.query(GroupResources)
return group_resources_list | Python | 0.000948 |
ddf311b4dc7c08f3f08516c702531053f8919720 | Tidy imports | tests/test_validation.py | tests/test_validation.py | import json
from django.test import TestCase
from django_slack.exceptions import ChannelNotFound, MsgTooLong
from django_slack.backends import Backend
class TestOverride(TestCase):
def test_ok_result(self):
backend = Backend()
backend.validate('application/json', json.dumps({'ok': True}), {})
def test_msg_too_long_result(self):
# Arbitrarily chosen 'simple' error
backend = Backend()
with self.assertRaises(
MsgTooLong,
expected_regexp=r"MsgTooLong: msg_too_long",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'msg_too_long'}),
{},
)
def test_channel_not_found_result(self):
backend = Backend()
with self.assertRaises(
ChannelNotFound,
expected_regexp=r"ChannelNotFound: channel 'bad-channel' could not be found",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'channel_not_found'}),
{'channel': 'bad-channel'},
)
| import json
from django.conf import settings
from django.test import TestCase, override_settings
from django_slack.exceptions import ChannelNotFound, MsgTooLong
from django_slack.backends import Backend
class TestOverride(TestCase):
def test_ok_result(self):
backend = Backend()
backend.validate('application/json', json.dumps({'ok': True}), {})
def test_msg_too_long_result(self):
# Arbitrarily chosen 'simple' error
backend = Backend()
with self.assertRaises(
MsgTooLong,
expected_regexp=r"MsgTooLong: msg_too_long",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'msg_too_long'}),
{},
)
def test_channel_not_found_result(self):
backend = Backend()
with self.assertRaises(
ChannelNotFound,
expected_regexp=r"ChannelNotFound: channel 'bad-channel' could not be found",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'channel_not_found'}),
{'channel': 'bad-channel'},
)
| Python | 0 |
1ee39cd3174b487038b62a3a6a66bac46571775a | Test that symlinks are properly created in bin_dir | tests/test_virtualenv.py | tests/test_virtualenv.py | import virtualenv
import optparse
import os
import shutil
import sys
import tempfile
from mock import patch, Mock
def test_version():
"""Should have a version string"""
assert virtualenv.virtualenv_version, "Should have version"
@patch('os.path.exists')
def test_resolve_interpreter_with_absolute_path(mock_exists):
"""Should return absolute path if given and exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=True)
exe = virtualenv.resolve_interpreter("/usr/bin/python42")
assert exe == "/usr/bin/python42", "Absolute path should return as is"
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_nonexistant_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = False
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_invalid_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=False)
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
def test_activate_after_future_statements():
"""Should insert activation line after last future statement"""
script = [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'print("Hello, world!")'
]
assert virtualenv.relative_script(script) == [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'',
"import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this",
'',
'print("Hello, world!")'
]
def test_cop_update_defaults_with_store_false():
"""store_false options need reverted logic"""
class MyConfigOptionParser(virtualenv.ConfigOptionParser):
def __init__(self, *args, **kwargs):
self.config = virtualenv.ConfigParser.RawConfigParser()
self.files = []
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_environ_vars(self, prefix='VIRTUALENV_'):
yield ("no_site_packages", "1")
cop = MyConfigOptionParser()
cop.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default)")
defaults = {}
cop.update_defaults(defaults)
assert defaults == {'system_site_packages': 0}
def test_install_python_symlinks():
"""Should create the right symlinks in bin_dir"""
tmp_virtualenv = tempfile.mkdtemp()
try:
home_dir, lib_dir, inc_dir, bin_dir = \
virtualenv.path_locations(tmp_virtualenv)
virtualenv.install_python(home_dir, lib_dir, inc_dir, bin_dir, False,
False)
py_exe_no_version = 'python'
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
required_executables = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
for pth in required_executables:
assert os.path.exists(os.path.join(bin_dir, pth)), ("%s should "
"exist in bin_dir" % pth)
finally:
shutil.rmtree(tmp_virtualenv)
| import virtualenv
import optparse
from mock import patch, Mock
def test_version():
"""Should have a version string"""
assert virtualenv.virtualenv_version, "Should have version"
@patch('os.path.exists')
def test_resolve_interpreter_with_absolute_path(mock_exists):
"""Should return absolute path if given and exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=True)
exe = virtualenv.resolve_interpreter("/usr/bin/python42")
assert exe == "/usr/bin/python42", "Absolute path should return as is"
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_nonexistant_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = False
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_invalid_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=False)
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
def test_activate_after_future_statements():
"""Should insert activation line after last future statement"""
script = [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'print("Hello, world!")'
]
assert virtualenv.relative_script(script) == [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'',
"import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this",
'',
'print("Hello, world!")'
]
def test_cop_update_defaults_with_store_false():
"""store_false options need reverted logic"""
class MyConfigOptionParser(virtualenv.ConfigOptionParser):
def __init__(self, *args, **kwargs):
self.config = virtualenv.ConfigParser.RawConfigParser()
self.files = []
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_environ_vars(self, prefix='VIRTUALENV_'):
yield ("no_site_packages", "1")
cop = MyConfigOptionParser()
cop.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default)")
defaults = {}
cop.update_defaults(defaults)
assert defaults == {'system_site_packages': 0}
| Python | 0.000001 |
50b6c9a9e55a22dc1893fcaf6f8800015992d41d | Make import more specific | iatidq/util.py | iatidq/util.py |
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from contextlib import contextmanager
import os
import urllib2
import json
from flask import request, current_app
import traceback
import collections
download_headers = {'User-Agent': "PWYF/Aid Transparency Tracker"}
@contextmanager
def report_error(success, failure):
try:
yield
if success is not None:
print success
except Exception, e:
if failure is not None:
print failure, e
#print traceback.print_exc()
finally:
pass
def ensure_download_dir(directory):
if not os.path.exists(directory):
with report_error(None, "Couldn't create directory"):
os.makedirs(directory)
def download_file(url, path):
with file(path, 'w') as localFile:
req = urllib2.Request(url, headers=download_headers)
webFile = urllib2.urlopen(req)
localFile.write(webFile.read())
webFile.close()
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs),
indent=None if request.is_xhr else 2, cls=JSONEncoder),
mimetype='application/json')
def resort_sqlalchemy_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']["indicator_order"],
y[1]['indicator']["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_dict_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']['indicator_order'],
y[1]['indicator']['indicator_order'])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_indicator_tests(data):
resort_fn = lambda x, y: cmp(x[1]["indicator_order"],
y[1]["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
import contextlib
import os
import urllib2
import json
from flask import request, current_app
import traceback
import collections
download_headers = {'User-Agent': "PWYF/Aid Transparency Tracker"}
@contextlib.contextmanager
def report_error(success, failure):
try:
yield
if success is not None:
print success
except Exception, e:
if failure is not None:
print failure, e
#print traceback.print_exc()
finally:
pass
def ensure_download_dir(directory):
if not os.path.exists(directory):
with report_error(None, "Couldn't create directory"):
os.makedirs(directory)
def download_file(url, path):
with file(path, 'w') as localFile:
req = urllib2.Request(url, headers=download_headers)
webFile = urllib2.urlopen(req)
localFile.write(webFile.read())
webFile.close()
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs),
indent=None if request.is_xhr else 2, cls=JSONEncoder),
mimetype='application/json')
def resort_sqlalchemy_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']["indicator_order"],
y[1]['indicator']["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_dict_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']['indicator_order'],
y[1]['indicator']['indicator_order'])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_indicator_tests(data):
resort_fn = lambda x, y: cmp(x[1]["indicator_order"],
y[1]["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
| Python | 0 |
a8389e913b417dc37e23f9cfc1f52ab63802c8a4 | movie title encode to support multiple language | demo/indexMlTmdb.py | demo/indexMlTmdb.py | import json
def enrich(movie):
""" Enrich for search purposes """
if 'title' in movie:
movie['title_sent'] = 'SENTINEL_BEGIN ' + movie['title']
def reindex(es, analysisSettings={}, mappingSettings={}, movieDict={}, index='tmdb'):
import elasticsearch.helpers
settings = {
"settings": {
"number_of_shards": 1,
"index": {
"analysis" : analysisSettings,
}}}
if mappingSettings:
settings['mappings'] = mappingSettings #C
es.indices.delete(index, ignore=[400, 404])
es.indices.create(index, body=settings)
def bulkDocs(movieDict):
for id, movie in movieDict.items():
if 'release_date' in movie and movie['release_date'] == "":
del movie['release_date']
enrich(movie)
addCmd = {"_index": index, #E
"_type": "movie",
"_id": id,
"_source": movie}
yield addCmd
if 'title' in movie:
print("%s added to %s" % (movie['title'].encode('utf-8'), index))
elasticsearch.helpers.bulk(es, bulkDocs(movieDict))
if __name__ == "__main__":
from utils import Elasticsearch
from sys import argv
es = Elasticsearch(timeout=30)
movieDict = json.loads(open('tmdb.json').read())
reindex(es, movieDict=movieDict)
| import json
def enrich(movie):
""" Enrich for search purposes """
if 'title' in movie:
movie['title_sent'] = 'SENTINEL_BEGIN ' + movie['title']
def reindex(es, analysisSettings={}, mappingSettings={}, movieDict={}, index='tmdb'):
import elasticsearch.helpers
settings = {
"settings": {
"number_of_shards": 1,
"index": {
"analysis" : analysisSettings,
}}}
if mappingSettings:
settings['mappings'] = mappingSettings #C
es.indices.delete(index, ignore=[400, 404])
es.indices.create(index, body=settings)
def bulkDocs(movieDict):
for id, movie in movieDict.items():
if 'release_date' in movie and movie['release_date'] == "":
del movie['release_date']
enrich(movie)
addCmd = {"_index": index, #E
"_type": "movie",
"_id": id,
"_source": movie}
yield addCmd
if 'title' in movie:
print("%s added to %s" % (movie['title'], index))
elasticsearch.helpers.bulk(es, bulkDocs(movieDict))
if __name__ == "__main__":
from utils import Elasticsearch
from sys import argv
es = Elasticsearch(timeout=30)
movieDict = json.loads(open('tmdb.json').read())
reindex(es, movieDict=movieDict)
| Python | 0.999999 |
e4c92b7d8cdd808b2415c2edf11576a87264f7f3 | Remove context_stack_on_request_context() | frasco/ctx.py | frasco/ctx.py | from flask import has_request_context, _request_ctx_stack
from frasco.utils import unknown_value
from werkzeug.local import LocalProxy, LocalStack
from contextlib import contextmanager
import functools
class ContextStack(LocalStack):
def __init__(self, top=None, default_item=None, allow_nested=True, ignore_nested=False):
super(ContextStack, self).__init__()
self.default_top = top
self.default_item = default_item
self.allow_nested = allow_nested
self.ignore_nested = ignore_nested
@property
def stack(self):
return getattr(self._local, 'stack', None) or []
@property
def is_stacked(self):
return bool(self.stack)
def push(self, item=unknown_value):
if self.is_stacked and not self.allow_nested:
raise RuntimeError('Context does not support nesting')
if self.is_stacked and self.ignore_nested:
item = self.top
elif item is unknown_value:
if callable(self.default_item):
item = self.default_item()
else:
item = self.default_item
super(ContextStack, self).push(item)
return item
def replace(self, item):
stack = self.stack
if stack:
stack.pop()
stack.append(item)
return item
@property
def top(self):
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return self.default_top
@contextmanager
def ctx(self, item=unknown_value, **kwargs):
item = self.push(item, **kwargs)
try:
yield item
finally:
self.pop()
def __call__(self, *args, **kwargs):
return self.ctx(*args, **kwargs)
def make_proxy(self):
return super(ContextStack, self).__call__()
delayed_result = object()
class DelayedCallsContext(ContextStack):
def __init__(self):
super(DelayedCallsContext, self).__init__(default_item=list, ignore_nested=True)
def call(self, func, args, kwargs):
if self.top is not None:
self.top.append((func, args, kwargs))
return delayed_result
return func(*args, **kwargs)
def pop(self, drop_calls=False):
top = super(DelayedCallsContext, self).pop()
if not drop_calls and not self.is_stacked:
for func, args, kwargs in top:
func(*args, **kwargs)
def proxy(self, func):
@functools.wraps(func)
def proxy(*args, **kwargs):
return self.call(func, args, kwargs)
proxy.call_now = func
return proxy
class FlagContextStack(ContextStack):
def __init__(self, flag=False):
super(FlagContextStack, self).__init__(flag, not flag)
self.once_stack = ContextStack()
def push(self, item=unknown_value, once=False):
self.once_stack.push(once)
return super(FlagContextStack, self).push(item)
def pop(self):
self.once_stack.pop()
return super(FlagContextStack, self).pop()
def once(self, value=unknown_value):
return self.ctx(unknown_value, once=True)
def consume_once(self):
top = self.top
if self.once_stack.top:
self.once_stack.replace(False)
self.replace(self.stack[-2] if len(self.stack) > 1 else self.default_top)
return top
def once_consumer(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.consume_once()
return func(*args, **kwargs)
return wrapper
def active(self):
if self.once_stack.top:
return self.consume_once()
return self.top
| from flask import has_request_context, _request_ctx_stack
from frasco.utils import unknown_value
from werkzeug.local import LocalProxy, LocalStack
from contextlib import contextmanager
import functools
class ContextStack(LocalStack):
def __init__(self, top=None, default_item=None, allow_nested=True, ignore_nested=False):
super(ContextStack, self).__init__()
self.default_top = top
self.default_item = default_item
self.allow_nested = allow_nested
self.ignore_nested = ignore_nested
@property
def stack(self):
return getattr(self._local, 'stack', None) or []
@property
def is_stacked(self):
return bool(self.stack)
def push(self, item=unknown_value):
if self.is_stacked and not self.allow_nested:
raise RuntimeError('Context does not support nesting')
if self.is_stacked and self.ignore_nested:
item = self.top
elif item is unknown_value:
if callable(self.default_item):
item = self.default_item()
else:
item = self.default_item
super(ContextStack, self).push(item)
return item
def replace(self, item):
stack = self.stack
if stack:
stack.pop()
stack.append(item)
return item
@property
def top(self):
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return self.default_top
@contextmanager
def ctx(self, item=unknown_value, **kwargs):
item = self.push(item, **kwargs)
try:
yield item
finally:
self.pop()
def __call__(self, *args, **kwargs):
return self.ctx(*args, **kwargs)
def make_proxy(self):
return super(ContextStack, self).__call__()
def context_stack_on_request_context(name, cls=ContextStack):
def _get_object():
if has_request_context() and not hasattr(_request_ctx_stack.top, name):
setattr(_request_ctx_stack.top, name, cls())
return getattr(_request_ctx_stack.top, name, None)
return LocalProxy(_get_object)
delayed_result = object()
class DelayedCallsContext(ContextStack):
def __init__(self):
super(DelayedCallsContext, self).__init__(default_item=list, ignore_nested=True)
def call(self, func, args, kwargs):
if self.top is not None:
self.top.append((func, args, kwargs))
return delayed_result
return func(*args, **kwargs)
def pop(self, drop_calls=False):
top = super(DelayedCallsContext, self).pop()
if not drop_calls and not self.is_stacked:
for func, args, kwargs in top:
func(*args, **kwargs)
def proxy(self, func):
@functools.wraps(func)
def proxy(*args, **kwargs):
return self.call(func, args, kwargs)
proxy.call_now = func
return proxy
class FlagContextStack(ContextStack):
def __init__(self, flag=False):
super(FlagContextStack, self).__init__(flag, not flag)
self.once_stack = ContextStack()
def push(self, item=unknown_value, once=False):
self.once_stack.push(once)
return super(FlagContextStack, self).push(item)
def pop(self):
self.once_stack.pop()
return super(FlagContextStack, self).pop()
def once(self, value=unknown_value):
return self.ctx(unknown_value, once=True)
def consume_once(self):
top = self.top
if self.once_stack.top:
self.once_stack.replace(False)
self.replace(self.stack[-2] if len(self.stack) > 1 else self.default_top)
return top
def once_consumer(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.consume_once()
return func(*args, **kwargs)
return wrapper
def active(self):
if self.once_stack.top:
return self.consume_once()
return self.top
| Python | 0.000001 |
1c116355e91ebed668620f8f84d9d4331de4adab | include first 3 sentences only | cogs/wiki.py | cogs/wiki.py | import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
from dateutil.parser import isoparse
from utils import aiohttp_wrap as aw
class Wiki(commands.Cog):
SUMMARY_URI = "https://en.wikipedia.org/api/rest_v1/page/summary/{}?redirect=true"
SEARCH_URI = "http://en.wikipedia.org/w/api.php?action=opensearch&format=json&search={}&limit=1&namespace=0"
HEADERS = {
"user-agent": "qtbot/1.0 - A friendly discord bot (https://github.com/Naught0/qtbot)"
}
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.command(name="wiki", aliases=["wi"])
async def wiki_search(self, ctx, *, query=None):
""" Get the closest matching Wikipedia article for a given query """
formatted_query = quote_plus(query)
# Get wiki page
wiki_info = await aw.aio_get_json(
self.session,
self.SEARCH_URI.format(formatted_query),
headers=self.HEADERS,
)
# No result found
if not wiki_info[1]:
return await ctx.error(f"Sorry, I couldn't find anything for `{query}`.")
# Get summary
article_title = quote_plus(wiki_info[1][0].replace(" ", "_"), safe="_")
article_summary = await aw.aio_get_json(
self.session, self.SUMMARY_URI.format(article_title), headers=self.HEADERS
)
# Get wiki image
article_html = await aw.aio_get_text(
self.session, article_summary["content_urls"]["desktop"]["page"]
)
soup = BeautifulSoup(article_html)
article_image = soup.head.find(attrs={"property": "og:image"})
# Create embed
em = discord.Embed(
title=article_summary["titles"]["display"], color=discord.Color.blurple()
)
em.description = ' '.join(article_summary["extract"].split('. ')[:3])
em.url = article_summary["content_urls"]["desktop"]["page"]
em.set_thumbnail(
url="https://lh5.ggpht.com/1Erjb8gyF0RCc9uhnlfUdbU603IgMm-G-Y3aJuFcfQpno0N4HQIVkTZERCTo65Iz2II=w300"
if article_image is None
else article_image.attrs["content"]
)
em.set_footer(text="last edited")
em.timestamp = isoparse(article_summary["timestamp"])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Wiki(bot))
| import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
from dateutil.parser import isoparse
from utils import aiohttp_wrap as aw
class Wiki(commands.Cog):
SUMMARY_URI = "https://en.wikipedia.org/api/rest_v1/page/summary/{}?redirect=true"
SEARCH_URI = "http://en.wikipedia.org/w/api.php?action=opensearch&format=json&search={}&limit=1&namespace=0"
HEADERS = {
"user-agent": "qtbot/1.0 - A friendly discord bot (https://github.com/Naught0/qtbot)"
}
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.command(name="wiki", aliases=["wi"])
async def wiki_search(self, ctx, *, query=None):
""" Get the closest matching Wikipedia article for a given query """
formatted_query = quote_plus(query)
# Get wiki page
wiki_info = await aw.aio_get_json(
self.session,
self.SEARCH_URI.format(formatted_query),
headers=self.HEADERS,
)
# No result found
if not wiki_info[1]:
return await ctx.error(f"Sorry, I couldn't find anything for `{query}`.")
# Get summary
article_title = quote_plus(wiki_info[1][0].replace(" ", "_"), safe="_")
article_summary = await aw.aio_get_json(
self.session, self.SUMMARY_URI.format(article_title), headers=self.HEADERS
)
# Get wiki image
article_html = await aw.aio_get_text(
self.session, article_summary["content_urls"]["desktop"]["page"]
)
soup = BeautifulSoup(article_html)
article_image = soup.head.find(attrs={"property": "og:image"})
# Create embed
em = discord.Embed(
title=article_summary["titles"]["display"], color=discord.Color.blurple()
)
em.description = article_summary["extract"]
em.url = article_summary["content_urls"]["desktop"]["page"]
em.set_thumbnail(
url="https://lh5.ggpht.com/1Erjb8gyF0RCc9uhnlfUdbU603IgMm-G-Y3aJuFcfQpno0N4HQIVkTZERCTo65Iz2II=w300"
if article_image is None
else article_image.attrs["content"]
)
em.set_footer(text="last edited")
em.timestamp = isoparse(article_summary["timestamp"])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Wiki(bot))
| Python | 0.000072 |
4f8429e9cd17f207ef429bdf21508cfac4200c4c | improve display | examples/admin.py | examples/admin.py | # -*- coding: utf-8 -*-
#
# django-granadilla
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from examples.models import LdapGroup, LdapUser
class LdapGroupAdmin(admin.ModelAdmin):
exclude = ['dn', 'usernames']
list_display = ['name', 'gid']
search_fields = ['name']
class LdapUserAdmin(admin.ModelAdmin):
exclude = ['dn', 'password', 'photo']
list_display = ['username', 'first_name', 'last_name', 'email', 'uid']
search_fields = ['first_name', 'last_name', 'full_name', 'username']
admin.site.register(LdapGroup, LdapGroupAdmin)
admin.site.register(LdapUser, LdapUserAdmin)
| # -*- coding: utf-8 -*-
#
# django-granadilla
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from examples.models import LdapGroup, LdapUser
class LdapGroupAdmin(admin.ModelAdmin):
exclude = ['dn', 'usernames']
list_display = ['name', 'gid']
search_fields = ['name']
class LdapUserAdmin(admin.ModelAdmin):
exclude = ['dn', 'password', 'photo']
list_display = ['username', 'uid']
search_fields = ['first_name', 'last_name', 'full_name', 'username']
admin.site.register(LdapGroup, LdapGroupAdmin)
admin.site.register(LdapUser, LdapUserAdmin)
| Python | 0.000001 |
47f8458553d42adbc9aa2c78bfdf002ed26d582a | update tests to support latest google-cloud-core (#23) | tests/unit/test__http.py | tests/unit/test__http.py | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.dns._http import Connection
return Connection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_build_api_url_no_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._make_one(object())
uri = conn.build_api_url("/foo")
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path, "/".join(["", "dns", conn.API_VERSION, "foo"]))
parms = dict(parse_qsl(qs))
pretty_print = parms.pop("prettyPrint", "false")
self.assertEqual(pretty_print, "false")
self.assertEqual(parms, {})
def test_build_api_url_w_custom_endpoint(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
custom_endpoint = "https://foo-dns.googleapis.com"
conn = self._make_one(object(), api_endpoint=custom_endpoint)
uri = conn.build_api_url("/foo")
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), custom_endpoint)
self.assertEqual(path, "/".join(["", "dns", conn.API_VERSION, "foo"]))
parms = dict(parse_qsl(qs))
pretty_print = parms.pop("prettyPrint", "false")
self.assertEqual(pretty_print, "false")
self.assertEqual(parms, {})
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._make_one(object())
uri = conn.build_api_url("/foo", {"bar": "baz"})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path, "/".join(["", "dns", conn.API_VERSION, "foo"]))
parms = dict(parse_qsl(qs))
self.assertEqual(parms["bar"], "baz")
def test_extra_headers(self):
import requests
from google.cloud import _http as base_http
http = mock.create_autospec(requests.Session, instance=True)
response = requests.Response()
response.status_code = 200
response_data = b"brent-spiner"
response._content = response_data
http.request.return_value = response
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
req_data = "req-data-boring"
result = conn.api_request("GET", "/rainbow", data=req_data, expect_json=False)
self.assertEqual(result, response_data)
expected_headers = {
"Accept-Encoding": "gzip",
base_http.CLIENT_INFO_HEADER: conn.user_agent,
"User-Agent": conn.user_agent,
}
expected_uri = conn.build_api_url("/rainbow")
http.request.assert_called_once_with(
data=req_data,
headers=expected_headers,
method="GET",
url=expected_uri,
timeout=60,
)
| # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.dns._http import Connection
return Connection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_build_api_url_no_extra_query_params(self):
conn = self._make_one(object())
URI = "/".join([conn.API_BASE_URL, "dns", conn.API_VERSION, "foo"])
self.assertEqual(conn.build_api_url("/foo"), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._make_one(object())
uri = conn.build_api_url("/foo", {"bar": "baz"})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path, "/".join(["", "dns", conn.API_VERSION, "foo"]))
parms = dict(parse_qsl(qs))
self.assertEqual(parms["bar"], "baz")
def test_build_api_url_w_custom_endpoint(self):
custom_endpoint = "https://foo-dns.googleapis.com"
conn = self._make_one(object(), api_endpoint=custom_endpoint)
URI = "/".join([custom_endpoint, "dns", conn.API_VERSION, "foo"])
self.assertEqual(conn.build_api_url("/foo"), URI)
def test_extra_headers(self):
import requests
from google.cloud import _http as base_http
http = mock.create_autospec(requests.Session, instance=True)
response = requests.Response()
response.status_code = 200
response_data = b"brent-spiner"
response._content = response_data
http.request.return_value = response
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
req_data = "req-data-boring"
result = conn.api_request("GET", "/rainbow", data=req_data, expect_json=False)
self.assertEqual(result, response_data)
expected_headers = {
"Accept-Encoding": "gzip",
base_http.CLIENT_INFO_HEADER: conn.user_agent,
"User-Agent": conn.user_agent,
}
expected_uri = conn.build_api_url("/rainbow")
http.request.assert_called_once_with(
data=req_data,
headers=expected_headers,
method="GET",
url=expected_uri,
timeout=60,
)
| Python | 0 |
cbc8632a74f32415b2819b678340b6e4f0944dba | Use build_context factory | tests/unit/tools/list.py | tests/unit/tools/list.py | # encoding: UTF-8
import unittest
from tml.tools.list import List
from tml.tools.template import Template
from tests.mock import Client
from tml import build_context
class ListTest(unittest.TestCase):
def setUp(self):
self.context = build_context(client = Client.read_all(), locale = 'ru')
def test_render(self):
self.assertEquals('a, b, c', List(['a','b','c']).render(self.context), 'Just list')
self.assertEquals('a;b;c', List(['a','b','c'], separator = ';').render(self.context), 'Custom separator')
self.assertEquals('a, b and c', List(['a','b','c'], last_separator = 'and').render(self.context), 'Last separator')
self.assertEquals('a, b', List(['a','b','c'], limit = 2).render(self.context), 'Limit')
self.assertEquals('a and b', List(['a','b','c'], limit = 2, last_separator = 'and').render(self.context), 'Limit')
self.assertEquals('a', List(['a'], limit = 2, last_separator = 'and').render(self.context), 'One element')
def test_tpl(self):
list = List(['a','b','c'], tpl = Template('<b>{$0}</b>'))
self.assertEquals('<b>a</b>, <b>b</b>, <b>c</b>', list.render(self.context), 'Apply template')
list = List([{'name':'Вася','gender':'male'},{'name':'Андрей','gender':'male'},{'name':'Семен','gender':'male'}], tpl = Template('{$0::dat}'), last_separator = u'и')
self.assertEquals(u'Васе, Андрею и Семену', list.render(self.context), 'Apply context')
if __name__ == '__main__':
unittest.main()
| # encoding: UTF-8
import unittest
from tml.tools.list import List
from tml.tools.template import Template
from tests.mock import Client
from tml import Context
class list(unittest.TestCase):
def setUp(self):
self.context = Context(client = Client.read_all(), locale = 'ru')
def test_render(self):
self.assertEquals('a, b, c', List(['a','b','c']).render(self.context), 'Just list')
self.assertEquals('a;b;c', List(['a','b','c'], separator = ';').render(self.context), 'Custom separator')
self.assertEquals('a, b and c', List(['a','b','c'], last_separator = 'and').render(self.context), 'Last separator')
self.assertEquals('a, b', List(['a','b','c'], limit = 2).render(self.context), 'Limit')
self.assertEquals('a and b', List(['a','b','c'], limit = 2, last_separator = 'and').render(self.context), 'Limit')
self.assertEquals('a', List(['a'], limit = 2, last_separator = 'and').render(self.context), 'One element')
def test_tpl(self):
list = List(['a','b','c'], tpl = Template('<b>{$0}</b>'))
self.assertEquals('<b>a</b>, <b>b</b>, <b>c</b>', list.render(self.context), 'Apply template')
list = List([{'name':'Вася','gender':'male'},{'name':'Андрей','gender':'male'},{'name':'Семен','gender':'male'}], tpl = Template('{$0::dat}'), last_separator = u'и')
self.assertEquals(u'Васе, Андрею и Семену', list.render(self.context), 'Apply context')
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
dbce79102efa8fee233af95939f1ff0b9d060b00 | Update example workflow to show you can use classes | examples/basic.py | examples/basic.py | import time
from simpleflow import (
activity,
Workflow,
futures,
)
@activity.with_attributes(task_list='quickstart', version='example')
def increment(x):
return x + 1
@activity.with_attributes(task_list='quickstart', version='example')
def double(x):
return x * 2
# A simpleflow activity can be any callable, so a function works, but a class
# will also work given the processing happens in __init__()
@activity.with_attributes(task_list='quickstart', version='example')
class Delay(object):
def __init__(self, t, x):
time.sleep(t)
return x
class BasicWorkflow(Workflow):
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, x, t=30):
y = self.submit(increment, x)
yy = self.submit(Delay, t, y)
z = self.submit(double, y)
print '({x} + 1) * 2 = {result}'.format(
x=x,
result=z.result)
futures.wait(yy, z)
return z.result
| import time
from simpleflow import (
activity,
Workflow,
futures,
)
@activity.with_attributes(task_list='quickstart', version='example')
def increment(x):
return x + 1
@activity.with_attributes(task_list='quickstart', version='example')
def double(x):
return x * 2
@activity.with_attributes(task_list='quickstart', version='example')
def delay(t, x):
time.sleep(t)
return x
class BasicWorkflow(Workflow):
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, x, t=30):
y = self.submit(increment, x)
yy = self.submit(delay, t, y)
z = self.submit(double, y)
print '({x} + 1) * 2 = {result}'.format(
x=x,
result=z.result)
futures.wait(yy, z)
return z.result
| Python | 0 |
9308152c67bc2ad2150a76e7897c8fd2568bf590 | Bump version: 0.0.4 -> 0.0.5 | conanfile.py | conanfile.py | from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.5"
class IWYUCTargetCmakeConan(ConanFile):
name = "iwyu-target-cmake"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"tooling-find-pkg-util/master@smspillaz/tooling-find-pkg-util",
"tooling-cmake-util/master@smspillaz/tooling-cmake-util")
url = "http://github.com/polysquare/iwyu-target-cmake"
license = "MIT"
options = {
"dev": [True, False]
}
default_options = "dev=False"
def requirements(self):
if self.options.dev:
self.requires("cmake-module-common/master@smspillaz/cmake-module-common")
def source(self):
zip_name = "iwyu-target-cmake.zip"
download("https://github.com/polysquare/"
"iwyu-target-cmake/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/iwyu-target-cmake",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
| from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.4"
class IWYUCTargetCmakeConan(ConanFile):
name = "iwyu-target-cmake"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"tooling-find-pkg-util/master@smspillaz/tooling-find-pkg-util",
"tooling-cmake-util/master@smspillaz/tooling-cmake-util")
url = "http://github.com/polysquare/iwyu-target-cmake"
license = "MIT"
options = {
"dev": [True, False]
}
default_options = "dev=False"
def requirements(self):
if self.options.dev:
self.requires("cmake-module-common/master@smspillaz/cmake-module-common")
def source(self):
zip_name = "iwyu-target-cmake.zip"
download("https://github.com/polysquare/"
"iwyu-target-cmake/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/iwyu-target-cmake",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
| Python | 0 |
487752f542880aa62d472734d88e00f29c9c5cae | make data a complete stub file | fabfile/data.py | fabfile/data.py | #!/usr/bin/env python
"""
Commands that update or process the application data.
"""
from fabric.api import task
@task(default=True)
def update():
"""
Stub function for updating app-specific data.
"""
pass
| #!/usr/bin/env python
"""
Commands that update or process the application data.
"""
from datetime import datetime
import json
from fabric.api import task
from facebook import GraphAPI
from twitter import Twitter, OAuth
import app_config
import copytext
@task(default=True)
def update():
"""
Stub function for updating app-specific data.
"""
#update_featured_social()
@task
def update_featured_social():
"""
Update featured tweets
"""
COPY = copytext.Copy(app_config.COPY_PATH)
secrets = app_config.get_secrets()
# Twitter
print 'Fetching tweets...'
twitter_api = Twitter(
auth=OAuth(
secrets['TWITTER_API_OAUTH_TOKEN'],
secrets['TWITTER_API_OAUTH_SECRET'],
secrets['TWITTER_API_CONSUMER_KEY'],
secrets['TWITTER_API_CONSUMER_SECRET']
)
)
tweets = []
for i in range(1, 4):
tweet_url = COPY['share']['featured_tweet%i' % i]
if isinstance(tweet_url, copytext.Error) or unicode(tweet_url).strip() == '':
continue
tweet_id = unicode(tweet_url).split('/')[-1]
tweet = twitter_api.statuses.show(id=tweet_id)
creation_date = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)
tweet_url = 'http://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id'])
photo = None
html = tweet['text']
subs = {}
for media in tweet['entities'].get('media', []):
original = tweet['text'][media['indices'][0]:media['indices'][1]]
replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (media['url'], app_config.PROJECT_SLUG, tweet_url, media['display_url'])
subs[original] = replacement
if media['type'] == 'photo' and not photo:
photo = {
'url': media['media_url']
}
for url in tweet['entities'].get('urls', []):
original = tweet['text'][url['indices'][0]:url['indices'][1]]
replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (url['url'], app_config.PROJECT_SLUG, tweet_url, url['display_url'])
subs[original] = replacement
for hashtag in tweet['entities'].get('hashtags', []):
original = tweet['text'][hashtag['indices'][0]:hashtag['indices'][1]]
replacement = '<a href="https://twitter.com/hashtag/%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'hashtag\', 0, \'%s\']);">%s</a>' % (hashtag['text'], app_config.PROJECT_SLUG, tweet_url, '#%s' % hashtag['text'])
subs[original] = replacement
for original, replacement in subs.items():
html = html.replace(original, replacement)
# https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
tweets.append({
'id': tweet['id'],
'url': tweet_url,
'html': html,
'favorite_count': tweet['favorite_count'],
'retweet_count': tweet['retweet_count'],
'user': {
'id': tweet['user']['id'],
'name': tweet['user']['name'],
'screen_name': tweet['user']['screen_name'],
'profile_image_url': tweet['user']['profile_image_url'],
'url': tweet['user']['url'],
},
'creation_date': creation_date,
'photo': photo
})
# Facebook
print 'Fetching Facebook posts...'
fb_api = GraphAPI(secrets['FACEBOOK_API_APP_TOKEN'])
facebook_posts = []
for i in range(1, 4):
fb_url = COPY['share']['featured_facebook%i' % i]
if isinstance(fb_url, copytext.Error) or unicode(fb_url).strip() == '':
continue
fb_id = unicode(fb_url).split('/')[-1]
post = fb_api.get_object(fb_id)
user = fb_api.get_object(post['from']['id'])
user_picture = fb_api.get_object('%s/picture' % post['from']['id'])
likes = fb_api.get_object('%s/likes' % fb_id, summary='true')
comments = fb_api.get_object('%s/comments' % fb_id, summary='true')
#shares = fb_api.get_object('%s/sharedposts' % fb_id)
creation_date = datetime.strptime(post['created_time'],'%Y-%m-%dT%H:%M:%S+0000')
creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)
# https://developers.facebook.com/docs/graph-api/reference/v2.0/post
facebook_posts.append({
'id': post['id'],
'message': post['message'],
'link': {
'url': post['link'],
'name': post['name'],
'caption': (post['caption'] if 'caption' in post else None),
'description': post['description'],
'picture': post['picture']
},
'from': {
'name': user['name'],
'link': user['link'],
'picture': user_picture['url']
},
'likes': likes['summary']['total_count'],
'comments': comments['summary']['total_count'],
#'shares': shares['summary']['total_count'],
'creation_date': creation_date
})
# Render to JSON
output = {
'tweets': tweets,
'facebook_posts': facebook_posts
}
with open('data/featured.json', 'w') as f:
json.dump(output, f)
| Python | 0.000003 |
09ab8f6290e3c5bf33e01857d11b124444a4c990 | add sendaddr support to isotp | examples/isotp.py | examples/isotp.py | DEBUG = False
def msg(x):
if DEBUG:
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(panda, 1, addr+8, bus)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0, sendaddr=None):
msg = recv(panda, 1, addr, bus)[0]
if sendaddr is None:
sendaddr = addr-8
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(sendaddr, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
if DEBUG:
print "R:",dat.encode("hex")
return dat
| DEBUG = False
def msg(x):
if DEBUG:
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(panda, 1, addr+8, bus)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0):
msg = recv(panda, 1, addr, bus)[0]
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(addr-8, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
if DEBUG:
print "R:",dat.encode("hex")
return dat
| Python | 0 |
ba1186c47e5f3466faeea9f2d5bf96948d5f7183 | Add --strict flag to raise exception on undefined variables | confuzzle.py | confuzzle.py | import sys
import argparse
import yaml
import jinja2
def render(template_string, context_dict, strict=False):
template = jinja2.Template(template_string)
if strict:
template.environment.undefined = jinja2.StrictUndefined
return template.render(**context_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Config file template. If not supplied, stdin is used")
parser.add_argument('config', type=argparse.FileType('r'), help="YAML data file to read")
parser.add_argument('--out', '-o', dest='out', type=argparse.FileType('w'), default=sys.stdout, help="Output file to write. If not supplied, stdout is used")
parser.add_argument('--strict', dest='strict', action='store_true', default=False, help="Raise an exception on undefined variables")
args = parser.parse_args()
context_dict = yaml.load(args.config.read())
template_string = args.template.read()
rendered = render(template_string, context_dict, args.strict)
args.out.write(rendered)
if __name__ == "__main__":
main()
| import sys
import argparse
import yaml
from jinja2 import Template
def render(template_string, context_dict):
template = Template(template_string)
return template.render(**context_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Config file template. If not supplied, stdin is used")
parser.add_argument('config', type=argparse.FileType('r'), help="YAML data file to read")
parser.add_argument('--out', '-o', dest='out', type=argparse.FileType('w'), default=sys.stdout, help="Output file to write. If not supplied, stdout is used")
args = parser.parse_args()
context_dict = yaml.load(args.config.read())
template_string = args.template.read()
rendered = render(template_string, context_dict)
args.out.write(rendered)
if __name__ == "__main__":
main()
| Python | 0 |
bfd1e90365446fe1a7c1e5ae710dbf497cc405fb | Fix test with newline problems in Windows | utest/writer/test_filewriters.py | utest/writer/test_filewriters.py | from __future__ import with_statement
import unittest
from StringIO import StringIO
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils.asserts import assert_equals
from robot.utils import ET, ETSource
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equals(len(output.splitlines()), 4)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''
*** test case *** some and other
A test
A kw an arg'''.strip()
for exp, act in zip(expected.splitlines(), output.getvalue().splitlines()):
assert_equals(repr(exp), repr(act))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test
A kw an arg'''.strip()
actual = output.getvalue().strip()
assert_equals(expected.splitlines(), actual.splitlines())
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equals(len(lines), 4)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equals(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| from __future__ import with_statement
import unittest
from StringIO import StringIO
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils.asserts import assert_equals
from robot.utils import ET, ETSource
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equals(len(output.splitlines()), 4)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''
*** test case *** some and other
A test
A kw an arg'''.strip()
for exp, act in zip(expected.splitlines(), output.getvalue().splitlines()):
assert_equals(repr(exp), repr(act))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test
A kw an arg'''.strip()
actual = output.getvalue().strip()
assert_equals(repr(expected), repr(actual))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equals(len(lines), 4)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equals(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
b336e83a63722b3a3e4d3f1779686149d5cef8d1 | Add compatibility for Python 2 | setuptools/tests/test_setopt.py | setuptools/tests/test_setopt.py | # coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser.get('names', 'jaraco') == 'йарацо'
assert parser.get('names', 'other') == 'yes'
| # coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(config, '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser['names']['jaraco'] == 'йарацо'
assert parser['names']['other'] == 'yes'
| Python | 0.00002 |
70ccca895892fc81eb07c4d0b4b7cefe17554b77 | Fix typo | src/checker/plugin/links_finder_plugin.py | src/checker/plugin/links_finder_plugin.py | from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor.encode('utf-8')))
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
| from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor.encode('utf-8'())
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
| Python | 0.999999 |
71f67f02dd26e29002ced50298b245c6114ece3b | Update mathfunctions.py | Python/Math/mathfunctions.py | Python/Math/mathfunctions.py | # File with the functions which will be used in math script
# Number to the power of
def po (number, pof):
b = number
for _ in range(pof - 1):
b = int(b) * int(number)
return b
# Factors of a number
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
# Sqare root of number
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
# Linear Patern Solver
def lseq (ls1, ls2, ls3, ls4):
if int(ls2) - int(ls1) == int(ls4) - int(ls3):
lsd1 = int(ls2) - int(ls1) # common difference
lsc = int(lsd1) - int(ls1) # constant e.g. Tn = xn + c
lsc = int(lsc) * -1
if lsd1 == 1: # added to change Tn = 1n to Tn = n
return("Tn = %sn+" % (lsd1) + ("%s" % (lsc)))
elif lsc == 0: # added to prevent problem where 0 is neither '+' or '-'. So a sequence: 1;2;3;4 -> Tn = n0
return("Tn = %sn" % (lsd1))
else:
return("Tn = %sn+" % (lsd1) + ("%s" % (lsc)))
elif ls2 - ls1 != ls4 - ls3:
return("This is not a Linear Equation!")
# THIS CAN SERIOUSLY BE DONE BETTER WITH CREATING OTHER FUCNTIONS, BUT LEAVING IT HERE FOR NOW...
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
| # File with the functions which will be used in math script
# Number to the power of
def po (number, pof):
b = number
for _ in range(pof - 1):
b = int(b) * int(number)
return b
# Factors of a number
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
# Sqare root of number
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
# THIS CAN SERIOUSLY BE DONE BETTER WITH CREATING OTHER FUCNTIONS, BUT LEAVING IT HERE FOR NOW...
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
| Python | 0.000003 |
1d3eb0bafd46f3e9cfb7d6395ad1a100052ff821 | Clean up parameter types (#52527) | lib/ansible/plugins/doc_fragments/online.py | lib/ansible/plugins/doc_fragments/online.py | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r'''
options:
api_token:
description:
- Online OAuth token.
type: str
aliases: [ oauth_token ]
api_url:
description:
- Online API URL
type: str
default: 'https://api.online.net'
aliases: [ base_url ]
api_timeout:
description:
- HTTP timeout to Online API in seconds.
type: int
default: 30
aliases: [ timeout ]
validate_certs:
description:
- Validate SSL certs of the Online API.
type: bool
default: yes
notes:
- Also see the API documentation on U(https://console.online.net/en/api/)
- If C(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
environment variable.
'''
| # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
api_token:
description:
- Online OAuth token.
aliases: ['oauth_token']
api_url:
description:
- Online API URL
default: 'https://api.online.net'
aliases: ['base_url']
api_timeout:
description:
- HTTP timeout to Online API in seconds.
default: 30
aliases: ['timeout']
validate_certs:
description:
- Validate SSL certs of the Online API.
default: yes
type: bool
notes:
- Also see the API documentation on U(https://console.online.net/en/api/)
- If C(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
environment variable.
'''
| Python | 0 |
eb3f93ac64953eacecdd48e2cb8d5ca80554a95b | Update search-for-a-range.py | Python/search-for-a-range.py | Python/search-for-a-range.py | # Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first index where target <= nums[idx]
left = self.binarySearch(lambda x, y: x >= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first index where target < nums[idx]
right = self.binarySearch(lambda x, y: x > y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while left + 1 < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid
return right
if __name__ == "__main__":
print Solution().searchRange([2, 2], 3)
print Solution().searchRange([5, 7, 7, 8, 8, 10], 8)
| # Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first index where target <= nums[idx]
left = self.binarySearch(lambda x, y: x <= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first index where target < nums[idx]
right = self.binarySearch(lambda x, y: x < y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while right - left > 1:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid
return right
if __name__ == "__main__":
print Solution().searchRange([2, 2], 3)
print Solution().searchRange([5, 7, 7, 8, 8, 10], 8)
| Python | 0 |
8831fb698e6ce4c263b1b3f02eba09744b46d64b | Remove unused variable (via yapf) | basis_set_exchange/curate/readers/cfour.py | basis_set_exchange/curate/readers/cfour.py | from ... import lut
from ..skel import create_skel
def read_cfour(basis_lines, fname):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '!#'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
bs_name = None
while i < len(basis_lines):
line = basis_lines[i]
lsplt = line.split(':')
elementsym = lsplt[0]
if bs_name is None:
bs_name = lsplt[1]
elif lsplt[1] != bs_name:
raise RuntimeError("Multiple basis sets in a file")
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 2 # Skip comment line
nshell = int(basis_lines[i].strip())
i += 1
# Read in the AM, ngeneral, and nprim for each shell
# This is in a block just after nshell
all_am = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_ngen = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_nprim = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
assert len(all_am) == nshell
assert len(all_ngen) == nshell
assert len(all_nprim) == nshell
for shell_idx in range(nshell):
shell_am = [all_am[shell_idx]]
ngen = all_ngen[shell_idx]
nprim = all_nprim[shell_idx]
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {'function_type': func_type, 'region': '', 'angular_momentum': shell_am}
exponents = []
coefficients = []
# Read in exponents block
while len(exponents) < nprim:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
exponents.extend([x.strip() for x in line.split()])
i += 1
# Read in all coefficients
for prim in range(nprim):
coef_tmp = []
while len(coef_tmp) < ngen:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
coef_tmp.extend([x.strip() for x in line.split()])
i += 1
coefficients.append(coef_tmp)
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data
| from ... import lut
from ..skel import create_skel
def read_cfour(basis_lines, fname):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '!#'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
bs_name = None
while i < len(basis_lines):
line = basis_lines[i]
lsplt = line.split(':')
elementsym = lsplt[0]
if bs_name is None:
bs_name = lsplt[1]
elif lsplt[1] != bs_name:
raise RuntimeError("Multiple basis sets in a file")
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 2 # Skip comment line
nshell = int(basis_lines[i].strip())
i += 1
# Read in the AM, ngeneral, and nprim for each shell
# This is in a block just after nshell
all_am = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_ngen = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_nprim = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
assert len(all_am) == nshell
assert len(all_ngen) == nshell
assert len(all_nprim) == nshell
for shell_idx in range(nshell):
shell_am = [all_am[shell_idx]]
ngen = all_ngen[shell_idx]
nprim = all_nprim[shell_idx]
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {'function_type': func_type, 'region': '', 'angular_momentum': shell_am}
exponents = []
coefficients = []
# Read in exponents block
while len(exponents) < nprim:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.extend([x.strip() for x in line.split()])
i += 1
# Read in all coefficients
for prim in range(nprim):
coef_tmp = []
while len(coef_tmp) < ngen:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
coef_tmp.extend([x.strip() for x in line.split()])
i += 1
coefficients.append(coef_tmp)
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data
| Python | 0 |
946b3867f464d96e85056b60d94593346a39cc51 | add map to tweet list | index.py | index.py | import os
import time
import TwitterAPI
import src.art.fluid
import src.art.gas
import src.art.map
# Configuration
twitterAPI = TwitterAPI.TwitterAPI(
consumer_key=os.environ["CONSUMER_KEY"],
consumer_secret=os.environ["CONSUMER_SECRET"],
access_token_key=os.environ["ACCESS_TOKEN_KEY"],
access_token_secret=os.environ["ACCESS_TOKEN_SECRET"]
)
# Generate
types = [src.art.fluid, src.art.gas, src.art.map]
totalTypes = len(types)
current = 0
while True:
print("\x1b[36mIce\x1b[0m Crafting Post 💡")
seedText = types[current].generate()
f = open("art.png", "rb")
twitterAPI.request("statuses/update_with_media", {
"status": seedText
}, {
"media[]": f.read()
})
f.close()
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
current = (current + 1) % totalTypes
time.sleep(1020)
| import os
import time
import TwitterAPI
import src.art.fluid
import src.art.gas
# Configuration
twitterAPI = TwitterAPI.TwitterAPI(
consumer_key=os.environ["CONSUMER_KEY"],
consumer_secret=os.environ["CONSUMER_SECRET"],
access_token_key=os.environ["ACCESS_TOKEN_KEY"],
access_token_secret=os.environ["ACCESS_TOKEN_SECRET"]
)
# Generate
types = [src.art.fluid, src.art.gas]
totalTypes = len(types)
current = 0
while True:
print("\x1b[36mIce\x1b[0m Crafting Post 💡")
seedText = types[current].generate()
f = open("art.png", "rb")
twitterAPI.request("statuses/update_with_media", {
"status": seedText
}, {
"media[]": f.read()
})
f.close()
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
current = (current + 1) % totalTypes
time.sleep(1020)
| Python | 0.000002 |
34adb8bb30860eb7748188a7d1a9345a09c4519f | Implement punctuation filtering | index.py | index.py | from nltk.tokenize import word_tokenize, sent_tokenize
import getopt
import sys
import os
import io
import string
def build_dict(docs):
dictionary = set()
for doc_id, doc in docs.items():
dictionary.update(doc)
dictionary = list(dictionary)
dictionary.sort()
return dictionary
def build_postings(dictionary):
postings = {}
for term in dictionary:
postings[term] = []
return postings
def populate_postings(docs, postings):
for doc_id, doc in docs.items():
for term in set(doc):
postings[term].append(doc_id)
def load_data(dir_doc):
docs = {}
for dirpath, dirnames, filenames in os.walk(dir_doc):
for name in filenames:
file = os.path.join(dirpath, name)
with io.open(file, 'r+') as f:
docs[name] = f.read()
return docs
def preprocess(docs):
punctuations = set(string.punctuation)
processed_docs = {}
for doc_id, doc in docs.items():
processed_docs[doc_id] = set(word_tokenize(doc.lower()))
processed_docs[doc_id].difference_update(punctuations)
return processed_docs
def usage():
print("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
if __name__ == '__main__':
dir_doc = dict_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
dir_doc = a
elif o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if dir_doc == None or dict_file == None or postings_file == None:
usage()
sys.exit(2)
docs = load_data(dir_doc)
docs = preprocess(docs)
dictionary = build_dict(docs)
postings = build_postings(dictionary)
populate_postings(docs, postings)
| from nltk.tokenize import word_tokenize, sent_tokenize
import getopt
import sys
import os
import io
def build_dict(docs):
dictionary = set()
for doc_id, doc in docs.items():
dictionary.update(doc)
dictionary = list(dictionary)
dictionary.sort()
return dictionary
def build_postings(dictionary):
postings = {}
for term in dictionary:
postings[term] = []
return postings
def populate_postings(docs, postings):
for doc_id, doc in docs.items():
for term in set(doc):
postings[term].append(doc_id)
def load_data(dir_doc):
docs = {}
for dirpath, dirnames, filenames in os.walk(dir_doc):
for name in filenames:
file = os.path.join(dirpath, name)
with io.open(file, 'r+') as f:
docs[name] = f.read()
return docs
def preprocess(docs):
processed_docs = {}
for doc_id, doc in docs.items():
processed_docs[doc_id] = set(word_tokenize(doc.lower()))
return processed_docs
def usage():
print("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
if __name__ == '__main__':
dir_doc = dict_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
dir_doc = a
elif o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if dir_doc == None or dict_file == None or postings_file == None:
usage()
sys.exit(2)
docs = load_data(dir_doc)
docs = preprocess(docs)
dictionary = build_dict(docs)
postings = build_postings(dictionary)
populate_postings(docs, postings)
| Python | 0.999999 |
e320c8558646233b78760e1c84c5334a3a743d6d | Fix test_ensemble on Python 3.5 | tests/test_ensemble.py | tests/test_ensemble.py | import pytest
from rasa_core.policies import Policy
from rasa_core.policies.ensemble import PolicyEnsemble
class WorkingPolicy(Policy):
@classmethod
def load(cls, path):
return WorkingPolicy()
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def __eq__(self, other):
return isinstance(other, WorkingPolicy)
def test_policy_loading_simple(tmpdir):
original_policy_ensemble = PolicyEnsemble([WorkingPolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
loaded_policy_ensemble = PolicyEnsemble.load(str(tmpdir))
assert original_policy_ensemble.policies == loaded_policy_ensemble.policies
class LoadReturnsNonePolicy(Policy):
@classmethod
def load(cls, path):
return None
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_none(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
with pytest.raises(Exception):
PolicyEnsemble.load(str(tmpdir))
class LoadReturnsWrongTypePolicy(Policy):
@classmethod
def load(cls, path):
return ""
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_wrong_type(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
with pytest.raises(Exception):
PolicyEnsemble.load(str(tmpdir))
| import pytest
from rasa_core.policies import Policy
from rasa_core.policies.ensemble import PolicyEnsemble
class WorkingPolicy(Policy):
@classmethod
def load(cls, path):
return WorkingPolicy()
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def __eq__(self, other):
return isinstance(other, WorkingPolicy)
def test_policy_loading_simple(tmpdir):
original_policy_ensemble = PolicyEnsemble([WorkingPolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
loaded_policy_ensemble = PolicyEnsemble.load(tmpdir)
assert original_policy_ensemble.policies == loaded_policy_ensemble.policies
class LoadReturnsNonePolicy(Policy):
@classmethod
def load(cls, path):
return None
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_none(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
class LoadReturnsWrongTypePolicy(Policy):
@classmethod
def load(cls, path):
return ""
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_wrong_type(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
| Python | 0.998424 |
ed410e81af61699a16c34c1edbbaa18a80bcdcfe | use global DocSimServer instance in views | docsim/documents/views.py | docsim/documents/views.py | from ujson import dumps
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from rest_framework.generics import ListAPIView, RetrieveAPIView
from .docsimserver import DocSimServer
from .models import Cluster, Document
from .serializers import ClusterSerializer
ACCEPTED = 202
DSS = DocSimServer()
@csrf_exempt
@require_POST
def add_or_update(request):
id = request.POST.get('id')
text = request.POST.get('text')
if id and text:
Document(id=id, text=text).save()
return HttpResponse(status=ACCEPTED)
else:
return HttpResponseBadRequest()
class ClusterList(ListAPIView):
model = Cluster
serializer_class = ClusterSerializer
class ClusterDetail(RetrieveAPIView):
model = Cluster
serializer_class = ClusterSerializer
@csrf_exempt
@require_POST
def find_similar(request):
try:
text = request.POST['text']
min_score = float(request.POST.get('min_score', .8))
max_results = int(request.POST.get('max_results', 10))
except:
return HttpResponseBadRequest()
id = request.POST.get('id')
doc = Document(id=id, text=text)
tokens = doc.tokens()
similar = DSS.find_similar({'tokens': tokens}, min_score=min_score,
max_results=max_results)
if id:
doc.save()
DSS.server.index([{'id': id, 'tokens': tokens}])
return HttpResponse(content=dumps(similar), content_type='text/json')
| from ujson import dumps
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from rest_framework.generics import ListAPIView, RetrieveAPIView
from .docsimserver import DocSimServer
from .models import Cluster, Document
from .serializers import ClusterSerializer
ACCEPTED = 202
@csrf_exempt
@require_POST
def add_or_update(request):
id = request.POST.get('id')
text = request.POST.get('text')
if id and text:
Document(id=id, text=text).save()
return HttpResponse(status=ACCEPTED)
else:
return HttpResponseBadRequest()
class ClusterList(ListAPIView):
model = Cluster
serializer_class = ClusterSerializer
class ClusterDetail(RetrieveAPIView):
model = Cluster
serializer_class = ClusterSerializer
@csrf_exempt
@require_POST
def find_similar(request):
try:
text = request.POST['text']
min_score = float(request.POST.get('min_score', .8))
max_results = int(request.POST.get('max_results', 10))
except:
return HttpResponseBadRequest()
id = request.POST.get('id')
doc = Document(id=id, text=text)
dss = DocSimServer()
tokens = doc.tokens()
similar = dss.find_similar({'tokens': tokens}, min_score=min_score,
max_results=max_results)
if id:
doc.save()
dss.server.index([{'id': id, 'tokens': tokens}])
return HttpResponse(content=dumps(similar), content_type='text/json')
| Python | 0 |
829d68f842c5076be7a8b2c3963c032977fe2f47 | Bump to 4.4-dp2. | pebble_tool/version.py | pebble_tool/version.py | version_base = (4, 4, 0)
version_suffix = 'dp2'
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[2])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| version_base = (4, 4, 0)
version_suffix = 'dp1'
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[2])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| Python | 0.000001 |
bc91c7abdc5754917642614930ad24d5db169c9a | simplify settings | shotglass/shotglass/settings.py | shotglass/shotglass/settings.py | import os
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
ROOT_URLCONF = 'shotglass.urls'
SECRET_KEY = 'qjg2s4mxb$mmv0e222yg7ot2obfg8rh+u7s@7l!fwv1@r*fa_n'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
WSGI_APPLICATION = 'shotglass.wsgi.application'
STATIC_URL = '/static/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
| # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qjg2s4mxb$mmv0e222yg7ot2obfg8rh+u7s@7l!fwv1@r*fa_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
# 'source',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'shotglass.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shotglass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
| Python | 0.00049 |
752132f83cacb15273625f819eed1dab1d558e97 | Make sure all relevant fields are shown in the admin interface | dictionary/admin.py | dictionary/admin.py | from daisyproducer.dictionary.models import Word
from django.contrib import admin
class WordAdmin(admin.ModelAdmin):
list_display = ('untranslated', 'grade1', 'grade2', 'type', 'isConfirmed', 'isLocal')
ordering = ('untranslated',)
search_fields = ('untranslated',)
admin.site.register(Word, WordAdmin)
| from daisyproducer.dictionary.models import Word
from django.contrib import admin
class WordAdmin(admin.ModelAdmin):
list_display = ('untranslated', 'grade1', 'grade2', 'type', 'isConfirmed')
ordering = ('untranslated',)
search_fields = ('untranslated',)
admin.site.register(Word, WordAdmin)
| Python | 0 |
bda269c5b745703cf517222e004caf0233b40699 | refactor p4io to io | tests/test_get_data.py | tests/test_get_data.py | from planet4 import io
import datetime as dt
def test_get_numbers_from_date_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
assert io.split_date_from_fname(fname1) == [2014, 6, 2]
def test_get_datetime_object_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
dt_obj = dt.datetime(2014, 6, 2)
assert dt_obj == io.get_dt_from_fname(fname1)
def test_from_2_files_get_latest_file(monkeypatch):
import glob
fname1 = '/a/b/c/2014-06-02_some_name.h5'
fname2 = '/a/b/c/2014-06-09_some_name.h5'
def mockreturn(path):
return [fname1, fname2]
monkeypatch.setattr(glob, 'glob', mockreturn)
x = io.get_current_database_fname()
assert x == fname2
| from planet4 import p4io
import datetime as dt
def test_get_numbers_from_date_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
assert p4io.split_date_from_fname(fname1) == [2014, 6, 2]
def test_get_datetime_object_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
dt_obj = dt.datetime(2014, 6, 2)
assert dt_obj == p4io.get_dt_from_fname(fname1)
def test_from_2_files_get_latest_file(monkeypatch):
import glob
fname1 = '/a/b/c/2014-06-02_some_name.h5'
fname2 = '/a/b/c/2014-06-09_some_name.h5'
def mockreturn(path):
return [fname1, fname2]
monkeypatch.setattr(glob, 'glob', mockreturn)
x = p4io.get_current_database_fname()
assert x == fname2
| Python | 0.999999 |
9b06a061a4bc439ea96761ead0a1397470cfff56 | update tests | tests/test_labeling.py | tests/test_labeling.py | from __future__ import print_function
from builtins import zip
from builtins import object
from usaddress import parse, GROUP_LABEL
from parserator.training import readTrainingData
import unittest
class TestSimpleAddresses(object) : # for test generators, must inherit from object
def test_simple_addresses(self):
test_file = 'measure_performance/test_data/simple_address_patterns.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestSyntheticAddresses(object) :
def test_synthetic_addresses(self):
test_file = 'measure_performance/test_data/synthetic_osm_data.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestUS50Addresses(object) :
def test_us50(self):
test_file = 'measure_performance/test_data/us50_test_tagged.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield fuzzyEquals, address_text, labels_pred, labels_true
def equals(addr,
labels_pred,
labels_true) :
prettyPrint(addr, labels_pred, labels_true)
assert labels_pred == labels_true
def fuzzyEquals(addr,
labels_pred,
labels_true) :
labels = []
fuzzy_labels = []
for label in labels_pred:
if label.startswith('StreetName') :
fuzzy_labels.append('StreetName')
elif label.startswith('AddressNumber') :
fuzzy_labels.append('AddressNumber')
elif label == ('Null') :
fuzzy_labels.append('NotAddress')
else:
fuzzy_labels.append(label)
for label in labels_true:
labels.append(label)
prettyPrint(addr, fuzzy_labels, labels)
assert fuzzy_labels == labels
def prettyPrint(addr, predicted, true) :
print("ADDRESS: ", addr)
print("fuzzy pred: ", predicted)
print("true: ", true)
if __name__== "__main__":
unittest.main()
| from __future__ import print_function
from builtins import zip
from builtins import object
from usaddress import parse, GROUP_LABEL
from parserator.training import readTrainingData
import unittest
class TestSimpleAddresses(object) : # for test generators, must inherit from object
def test_simple_addresses(self):
test_file = 'measure_performance/test_data/simple_address_patterns.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestSyntheticAddresses(object) :
def test_synthetic_addresses(self):
test_file = 'measure_performance/test_data/synthetic_osm_data.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestUS50Addresses(object) :
def test_us50(self):
test_file = 'measure_performance/test_data/us50_test_tagged.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield fuzzyEquals, address_text, labels_pred, labels_true
def equals(addr,
labels_pred,
labels_true) :
prettyPrint(addr, labels_pred, labels_true)
assert labels_pred == labels_true
def fuzzyEquals(addr,
labels_pred,
labels_true) :
labels = []
fuzzy_labels = []
for label in labels_pred:
if label.startswith('StreetName') :
fuzzy_labels.append('StreetName')
elif label.startswith('AddressNumber') :
fuzzy_labels.append('AddressNumber')
else:
fuzzy_labels.append(label)
for label in labels_true:
labels.append(label)
prettyPrint(addr, fuzzy_labels, labels)
assert fuzzy_labels == labels
def prettyPrint(addr, predicted, true) :
print("ADDRESS: ", addr)
print("fuzzy pred: ", predicted)
print("true: ", true)
if __name__== "__main__":
unittest.main()
| Python | 0.000001 |
ab574b6c40b6e58f396c9522be864a78478617c1 | Remove TestMainLoop.test_concurrency | tests/test_mainloop.py | tests/test_mainloop.py | # -*- Mode: Python -*-
import os
import sys
import select
import signal
import time
import unittest
from gi.repository import GLib
from compathelper import _bytes
class TestMainLoop(unittest.TestCase):
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_exception_handling(self):
pipe_r, pipe_w = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe_w)
select.select([pipe_r], [], [])
os.close(pipe_r)
os._exit(1)
def child_died(pid, status, loop):
loop.quit()
raise Exception("deadbabe")
loop = GLib.MainLoop()
GLib.child_watch_add(GLib.PRIORITY_DEFAULT, pid, child_died, loop)
os.close(pipe_r)
os.write(pipe_w, _bytes("Y"))
os.close(pipe_w)
def excepthook(type, value, traceback):
self.assertTrue(type is Exception)
self.assertEqual(value.args[0], "deadbabe")
sys.excepthook = excepthook
try:
got_exception = False
try:
loop.run()
except:
got_exception = True
finally:
sys.excepthook = sys.__excepthook__
#
# The exception should be handled (by printing it)
# immediately on return from child_died() rather
# than here. See bug #303573
#
self.assertFalse(got_exception)
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_sigint(self):
pid = os.fork()
if pid == 0:
time.sleep(0.5)
os.kill(os.getppid(), signal.SIGINT)
os._exit(0)
loop = GLib.MainLoop()
try:
loop.run()
self.fail('expected KeyboardInterrupt exception')
except KeyboardInterrupt:
pass
self.assertFalse(loop.is_running())
os.waitpid(pid, 0)
| # -*- Mode: Python -*-
import os
import sys
import select
import signal
import time
import unittest
try:
from _thread import start_new_thread
start_new_thread # pyflakes
except ImportError:
# Python 2
from thread import start_new_thread
from gi.repository import GLib
from compathelper import _bytes
class TestMainLoop(unittest.TestCase):
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_exception_handling(self):
pipe_r, pipe_w = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe_w)
select.select([pipe_r], [], [])
os.close(pipe_r)
os._exit(1)
def child_died(pid, status, loop):
loop.quit()
raise Exception("deadbabe")
loop = GLib.MainLoop()
GLib.child_watch_add(GLib.PRIORITY_DEFAULT, pid, child_died, loop)
os.close(pipe_r)
os.write(pipe_w, _bytes("Y"))
os.close(pipe_w)
def excepthook(type, value, traceback):
self.assertTrue(type is Exception)
self.assertEqual(value.args[0], "deadbabe")
sys.excepthook = excepthook
try:
got_exception = False
try:
loop.run()
except:
got_exception = True
finally:
sys.excepthook = sys.__excepthook__
#
# The exception should be handled (by printing it)
# immediately on return from child_died() rather
# than here. See bug #303573
#
self.assertFalse(got_exception)
@unittest.skipUnless(hasattr(signal, "SIGUSR1"), "no SIGUSR1")
def test_concurrency(self):
def on_usr1(signum, frame):
pass
try:
# create a thread which will terminate upon SIGUSR1 by way of
# interrupting sleep()
orig_handler = signal.signal(signal.SIGUSR1, on_usr1)
start_new_thread(time.sleep, (10,))
# now create two main loops
loop1 = GLib.MainLoop()
loop2 = GLib.MainLoop()
GLib.timeout_add(100, lambda: os.kill(os.getpid(), signal.SIGUSR1))
GLib.timeout_add(500, loop1.quit)
loop1.run()
loop2.quit()
finally:
signal.signal(signal.SIGUSR1, orig_handler)
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_sigint(self):
pid = os.fork()
if pid == 0:
time.sleep(0.5)
os.kill(os.getppid(), signal.SIGINT)
os._exit(0)
loop = GLib.MainLoop()
try:
loop.run()
self.fail('expected KeyboardInterrupt exception')
except KeyboardInterrupt:
pass
self.assertFalse(loop.is_running())
os.waitpid(pid, 0)
| Python | 0.013365 |
3cc7e0cebc8a7a7410ce6b239e55db0cf55b1dc8 | Fix broken tests in test_messages | tests/test_messages.py | tests/test_messages.py | from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'From': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with(sid='MM123', body={'Body': ''})
| from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with('MM123', {'Body': ''})
| Python | 0.000648 |
f6ecf6a45e2749261a20869aca5dfca6d7c03494 | Correct method doc. | qiprofile_rest_client/helpers/database.py | qiprofile_rest_client/helpers/database.py | """Mongo Engine interaction utilities."""
def get_or_create(klass, key=None, **non_key):
"""
This function stands in for the Mongo Engine ``get_or_create``
collection method which was deprecated in mongoengine v0.8.0
and dropped in mongoengine v0.10.0, since MongoDB does not
support transactions.
If there is an object of the given Mongo Engine data model
class which matches the primary key, then that object is
returned. Otherwise, a new object is created with the content
prescribed by both the primary and non-primary parameters.
The create step is an upsert, i.e. a new object is created only
if it does not yet exist. The upsert allows for the small
possibility that an object is created after the fetch attempt
but before the create attempt. In that situation, the existing
object non-key content is modified and the modified object is
returned.
:Note: The idiom used in this function modifies the solution
proposed in http://stackoverflow.com/questions/25846462/mongoengine-replacing-get-or-create-with-upsert-update-one/25863633#25863633.
That StackOverflow work-around returns the following error:
ValueError: update only works with $ operators
The work-around to the StackOverflow work-around is to call
the data model class *update_one* method rather than *modify*.
:param klass: the Mongo Engine data model class
:param key: the secondary field key {attribute: value}
dictionary, or None if no fields comprise a secondary key
:param non_key: the non-key {attribute: value} dictionary
:return: the existing or new object
"""
try:
# Search by primary key.
return klass.objects.get(**key)
except klass.DoesNotExist:
# Create the new object as an upsert. Specify the MongoDB Engine
# set__*attribute* modification options for each non-primary
# key (attribute, value) pair.
mod_opts = {'set__' + attr: val for attr, val in non_key.iteritems()}
return klass.objects(**key).update_one(upsert=True, **mod_opts)
| """Mongo Engine interaction utilities."""
def get_or_create(klass, pk, **non_pk):
"""
This function stands in for the Mongo Engine ``get_or_create``
collection method which was deprecated in mongoengine v0.8.0
and dropped in mongoengine v0.10.0, since MongoDB does not
support transactions.
If there is an object of the given Mongo Engine data model
class which matches the primary key, then that object is
returned. Otherwise, a new object is created with the content
prescribed by both the primary and non-primary parameters.
The create step is an upsert, i.e. a new object is created only
if it does not yet exist. The upsert allows for the small
possibility that an object is created after the fetch attempt
but before the create attempt. In that situation, the existing
object non-key content is modified and the modified object is
returned.
:Note: The idiom used in this function modifies the solution
proposed in http://stackoverflow.com/questions/25846462/mongoengine-replacing-get-or-create-with-upsert-update-one/25863633#25863633.
That StackOverflow work-around returns the following error:
ValueError: update only works with $ operators
The work-around to the StackOverflow work-around is to use
call *update* rather than *modify*.
:param klass: the Mongo Engine data model class
:param pk: the primary key {attribute: value} dictionary
:param non_pk: the non-key {attribute: value} dictionary
:return: the existing or new object
"""
try:
return klass.objects.get(**pk)
except klass.DoesNotExist:
mod_opts = {'set__' + attr: val for attr, val in non_pk.iteritems()}
return klass.objects(**pk).update_one(upsert=True, **mod_opts)
| Python | 0 |
c2df896183f80fe3ca0eab259874bc4385d399e9 | Clean up detrius in parallel test file | tests/test_parallel.py | tests/test_parallel.py | from __future__ import with_statement
from fabric.api import run, parallel, env, hide
from utils import FabricTest, eq_
from server import server, RESPONSES
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
| from __future__ import with_statement
from datetime import datetime
import copy
import getpass
import sys
import paramiko
from nose.tools import with_setup
from fudge import (Fake, clear_calls, clear_expectations, patch_object, verify,
with_patched_object, patched_context, with_fakes)
from fabric.context_managers import settings, hide, show
from fabric.network import (HostConnectionCache, join_host_strings, normalize,
denormalize)
from fabric.io import output_loop
import fabric.network # So I can call patch_object correctly. Sigh.
from fabric.state import env, output, _get_system_username
from fabric.operations import run, sudo
from fabric.decorators import parallel
from utils import *
from server import (server, PORT, RESPONSES, PASSWORDS, CLIENT_PRIVKEY, USER,
CLIENT_PRIVKEY_PASSPHRASE)
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
| Python | 0 |
8f86eacf1b85a0c497f9e8586a59cc19e6a0484f | Stop passing a recorder argument unecessarily in tests | tests/test_pipeline.py | tests/test_pipeline.py | from __future__ import print_function
import pytest
from plumbium.processresult import record, pipeline, call
class DummyRecorder(object):
def write(self, results):
self.results = results
@pytest.fixture
def simple_pipeline():
@record('an_output')
def recorded_function():
call(['echo', 'test output'])
return 'test_result'
def a_pipeline():
recorded_function()
return a_pipeline
@pytest.fixture
def failing_pipeline():
@record('an_output')
def recorded_function():
raise IOError
def a_pipeline():
recorded_function()
return a_pipeline
def test_result(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
print(pipeline.results)
assert pipeline.results[0]['an_output'] == 'test_result'
def test_stdout_captured(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
proc = pipeline.results[0].as_dict()
assert proc['printed_output'] == 'test output\n'
def test_exception_captured(failing_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', failing_pipeline, str(tmpdir))
proc = pipeline.results[0].as_dict()
assert 'IOError' in proc['exception']
def test_save_filename(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'test': 1},
filename='result_file_{metadata[test]:03d}'
)
assert 'result_file_001.tar.gz' in [f.basename for f in tmpdir.listdir()]
| from __future__ import print_function
import pytest
from plumbium.processresult import record, pipeline, call
class DummyRecorder(object):
def write(self, results):
self.results = results
@pytest.fixture
def simple_pipeline():
@record('an_output')
def recorded_function():
call(['echo', 'test output'])
return 'test_result'
def a_pipeline():
recorded_function()
return a_pipeline
@pytest.fixture
def failing_pipeline():
@record('an_output')
def recorded_function():
raise IOError
def a_pipeline():
recorded_function()
return a_pipeline
def test_result(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
print(pipeline.results)
assert pipeline.results[0]['an_output'] == 'test_result'
def test_stdout_captured(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', simple_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert proc['printed_output'] == 'test output\n'
def test_exception_captured(failing_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', failing_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert 'IOError' in proc['exception']
def test_save_filename(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'test': 1},
filename='result_file_{metadata[test]:03d}'
)
assert 'result_file_001.tar.gz' in [f.basename for f in tmpdir.listdir()]
| Python | 0.000002 |
7b75f508bf651bdeb57bdc4d263ced26434054c8 | add pct test | tests/test_pvmodule.py | tests/test_pvmodule.py | """
Tests for pvmodules.
"""
from nose.tools import ok_
from pvmismatch.pvmismatch_lib.pvmodule import PVmodule, TCT96, PCT96
def test_calc_mod():
pvmod = PVmodule()
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_TCT_mod():
pvmod = PVmodule(cell_pos=TCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_PCT_mod():
pvmod = PVmodule(cell_pos=PCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
if __name__ == "__main__":
test_calc_mod()
test_calc_TCT_mod()
| """
Tests for pvmodules.
"""
from nose.tools import ok_
from pvmismatch.pvmismatch_lib.pvmodule import PVmodule, TCT96
def test_calc_mod():
pvmod = PVmodule()
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_TCT_mod():
pvmod = PVmodule(cell_pos=TCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
if __name__ == "__main__":
test_calc_mod()
test_calc_TCT_mod()
| Python | 0.000014 |
20d1ab60c718869d86deed5410d5aef428042195 | remove unused json import | tests/test_redirect.py | tests/test_redirect.py | import pytest
from urllib.parse import quote
from sanic.response import text, redirect
@pytest.fixture
def redirect_app(app):
@app.route('/redirect_init')
async def redirect_init(request):
return redirect("/redirect_target")
@app.route('/redirect_init_with_301')
async def redirect_init_with_301(request):
return redirect("/redirect_target", status=301)
@app.route('/redirect_target')
async def redirect_target(request):
return text('OK')
@app.route('/1')
def handler(request):
return redirect('/2')
@app.route('/2')
def handler(request):
return redirect('/3')
@app.route('/3')
def handler(request):
return text('OK')
@app.route('/redirect_with_header_injection')
async def redirect_with_header_injection(request):
return redirect("/unsafe\ntest-header: test-value\n\ntest-body")
return app
def test_redirect_default_302(redirect_app):
"""
We expect a 302 default status code and the headers to be set.
"""
request, response = redirect_app.test_client.get(
'/redirect_init',
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
assert response.headers["Content-Type"] == 'text/html; charset=utf-8'
def test_redirect_headers_none(redirect_app):
request, response = redirect_app.test_client.get(
uri="/redirect_init",
headers=None,
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
def test_redirect_with_301(redirect_app):
"""
Test redirection with a different status code.
"""
request, response = redirect_app.test_client.get(
"/redirect_init_with_301",
allow_redirects=False)
assert response.status == 301
assert response.headers["Location"] == "/redirect_target"
def test_get_then_redirect_follow_redirect(redirect_app):
"""
With `allow_redirects` we expect a 200.
"""
request, response = redirect_app.test_client.get(
"/redirect_init",
allow_redirects=True)
assert response.status == 200
assert response.text == 'OK'
def test_chained_redirect(redirect_app):
"""Test test_client is working for redirection"""
request, response = redirect_app.test_client.get('/1')
assert request.url.endswith('/1')
assert response.status == 200
assert response.text == 'OK'
try:
assert response.url.endswith('/3')
except AttributeError:
assert response.url.path.endswith('/3')
def test_redirect_with_header_injection(redirect_app):
"""
Test redirection to a URL with header and body injections.
"""
request, response = redirect_app.test_client.get(
"/redirect_with_header_injection",
allow_redirects=False)
assert response.status == 302
assert "test-header" not in response.headers
assert not response.text.startswith('test-body')
@pytest.mark.parametrize("test_str", ["sanic-test", "sanictest", "sanic test"])
async def test_redirect_with_params(app, test_client, test_str):
@app.route("/api/v1/test/<test>/")
async def init_handler(request, test):
assert test == test_str
return redirect("/api/v2/test/{}/".format(quote(test)))
@app.route("/api/v2/test/<test>/")
async def target_handler(request, test):
assert test == test_str
return text("OK")
test_cli = await test_client(app)
response = await test_cli.get("/api/v1/test/{}/".format(quote(test_str)))
assert response.status == 200
txt = await response.text()
assert txt == "OK"
| import pytest
import json
from urllib.parse import quote
from sanic.response import text, redirect
@pytest.fixture
def redirect_app(app):
@app.route('/redirect_init')
async def redirect_init(request):
return redirect("/redirect_target")
@app.route('/redirect_init_with_301')
async def redirect_init_with_301(request):
return redirect("/redirect_target", status=301)
@app.route('/redirect_target')
async def redirect_target(request):
return text('OK')
@app.route('/1')
def handler(request):
return redirect('/2')
@app.route('/2')
def handler(request):
return redirect('/3')
@app.route('/3')
def handler(request):
return text('OK')
@app.route('/redirect_with_header_injection')
async def redirect_with_header_injection(request):
return redirect("/unsafe\ntest-header: test-value\n\ntest-body")
return app
def test_redirect_default_302(redirect_app):
"""
We expect a 302 default status code and the headers to be set.
"""
request, response = redirect_app.test_client.get(
'/redirect_init',
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
assert response.headers["Content-Type"] == 'text/html; charset=utf-8'
def test_redirect_headers_none(redirect_app):
request, response = redirect_app.test_client.get(
uri="/redirect_init",
headers=None,
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
def test_redirect_with_301(redirect_app):
"""
Test redirection with a different status code.
"""
request, response = redirect_app.test_client.get(
"/redirect_init_with_301",
allow_redirects=False)
assert response.status == 301
assert response.headers["Location"] == "/redirect_target"
def test_get_then_redirect_follow_redirect(redirect_app):
"""
With `allow_redirects` we expect a 200.
"""
request, response = redirect_app.test_client.get(
"/redirect_init",
allow_redirects=True)
assert response.status == 200
assert response.text == 'OK'
def test_chained_redirect(redirect_app):
"""Test test_client is working for redirection"""
request, response = redirect_app.test_client.get('/1')
assert request.url.endswith('/1')
assert response.status == 200
assert response.text == 'OK'
try:
assert response.url.endswith('/3')
except AttributeError:
assert response.url.path.endswith('/3')
def test_redirect_with_header_injection(redirect_app):
"""
Test redirection to a URL with header and body injections.
"""
request, response = redirect_app.test_client.get(
"/redirect_with_header_injection",
allow_redirects=False)
assert response.status == 302
assert "test-header" not in response.headers
assert not response.text.startswith('test-body')
@pytest.mark.parametrize("test_str", ["sanic-test", "sanictest", "sanic test"])
async def test_redirect_with_params(app, test_client, test_str):
@app.route("/api/v1/test/<test>/")
async def init_handler(request, test):
assert test == test_str
return redirect("/api/v2/test/{}/".format(quote(test)))
@app.route("/api/v2/test/<test>/")
async def target_handler(request, test):
assert test == test_str
return text("OK")
test_cli = await test_client(app)
response = await test_cli.get("/api/v1/test/{}/".format(quote(test_str)))
assert response.status == 200
txt = await response.text()
assert txt == "OK"
| Python | 0.000002 |
9744226621e27d4bd5d19a52b75b718e86bfef87 | Add extra filter for equipment | lims/equipment/views.py | lims/equipment/views.py |
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInAdminGroupOrRO
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(viewsets.ModelViewSet):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve', 'status',)
search_fields = ('name',)
permission_classes = (IsInAdminGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
}
class EquipmentReservationViewSet(viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
|
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInAdminGroupOrRO
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(viewsets.ModelViewSet):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve',)
search_fields = ('name',)
permission_classes = (IsInAdminGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
}
class EquipmentReservationViewSet(viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.