commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
1e26e986ed1f5fa2050e7a04363939a8c83c1043
|
fix test of SSD.predict
|
tests/links_tests/model_tests/ssd_tests/test_ssd.py
|
tests/links_tests/model_tests/ssd_tests/test_ssd.py
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.ssd import Multibox
from chainercv.links.model.ssd import SSD
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
insize = 32
grids = (10, 4, 1)
def __call__(self, x):
n_sample = x.shape[0]
n_dims = (32, 16, 8)
return [
chainer.Variable(
_random_array(self.xp, (n_sample, n_dim, grid, grid)))
for grid, n_dim in zip(self.grids, n_dims)]
class DummySSD(SSD):
def __init__(self, n_fg_class):
super(DummySSD, self).__init__(
extractor=DummyExtractor(),
multibox=Multibox(
n_class=n_fg_class + 1,
aspect_ratios=((2,), (2, 3), (2,))),
steps=(0.1, 0.25, 1),
sizes=(0.1, 0.25, 1, 1.2),
mean=(0, 1, 2))
@testing.parameterize(
{'n_fg_class': 1},
{'n_fg_class': 5},
{'n_fg_class': 20},
)
class TestSSD(unittest.TestCase):
def setUp(self):
self.link = DummySSD(n_fg_class=self.n_fg_class)
self.n_bbox = 10 * 10 * 4 + 4 * 4 * 6 + 1 * 1 * 4
def _check_default_bbox(self):
self.assertIsInstance(self.link._default_bbox, self.link.xp.ndarray)
self.assertEqual(self.link._default_bbox.shape, (self.n_bbox, 4))
def test_default_bbox_cpu(self):
self._check_default_bbox()
@attr.gpu
def test_default_bbox_gpu(self):
self.link.to_gpu()
self._check_default_bbox()
def _check_decode(self):
loc = _random_array(self.link.xp, (1, self.n_bbox, 4))
conf = _random_array(
self.link.xp, (1, self.n_bbox, self.n_fg_class + 1))
bboxes, scores = self.link._decode(loc, conf)
self.assertIsInstance(bboxes, self.link.xp.ndarray)
self.assertEqual(bboxes.shape, (1, self.n_bbox, 4))
self.assertIsInstance(scores, self.link.xp.ndarray)
self.assertEqual(scores.shape, (1, self.n_bbox, self.n_fg_class + 1))
def test_decode_cpu(self):
self._check_decode()
@attr.gpu
def test_decode_gpu(self):
self.link.to_gpu()
self._check_decode()
def _check_call(self):
x = _random_array(self.link.xp, (1, 3, 32, 32))
loc, conf = self.link(x)
self.assertIsInstance(loc, chainer.Variable)
self.assertIsInstance(loc.data, self.link.xp.ndarray)
self.assertEqual(loc.shape, (1, self.n_bbox, 4))
self.assertIsInstance(conf, chainer.Variable)
self.assertIsInstance(conf.data, self.link.xp.ndarray)
self.assertEqual(conf.shape, (1, self.n_bbox, self.n_fg_class + 1))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_suppress(self):
raw_bbox = _random_array(self.link.xp, (self.n_bbox, 4))
raw_score = _random_array(
self.link.xp, (self.n_bbox, self.n_fg_class + 1))
bbox, label, score = self.link._suppress(raw_bbox, raw_score)
self.assertIsInstance(bbox, self.link.xp.ndarray)
self.assertEqual(bbox.ndim, 2)
self.assertLessEqual(bbox.shape[0], self.n_bbox * self.n_fg_class)
self.assertEqual(bbox.shape[1], 4)
self.assertIsInstance(label, self.link.xp.ndarray)
self.assertEqual(label.ndim, 1)
self.assertEqual(label.shape[0], bbox.shape[0])
self.assertIsInstance(score, self.link.xp.ndarray)
self.assertEqual(score.ndim, 1)
self.assertEqual(score.shape[0], bbox.shape[0])
def test_suppress_cpu(self):
self._check_suppress()
@attr.gpu
def test_suppress_gpu(self):
self.link.to_gpu()
self._check_suppress()
def test_prepare(self):
img = np.random.randint(0, 255, size=(3, 480, 640))
img = self.link._prepare(img)
self.assertEqual(img.shape, (3, self.link.insize, self.link.insize))
def test_use_preset(self):
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('visualize')
self.assertEqual(self.link.nms_thresh, 0.45)
self.assertEqual(self.link.score_thresh, 0.6)
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('evaluate')
self.assertEqual(self.link.nms_thresh, 0.45)
self.assertEqual(self.link.score_thresh, 0.01)
with self.assertRaises(ValueError):
self.link.use_preset('unknown')
def _check_predict(self):
imgs = [
_random_array(self.link.xp, (3, 640, 480)),
_random_array(self.link.xp, (3, 320, 320))]
bboxes, labels, scores = self.link.predict(imgs)
self.assertEqual(len(bboxes), len(imgs))
self.assertEqual(len(labels), len(imgs))
self.assertEqual(len(scores), len(imgs))
for bbox, label, score in zip(bboxes, labels, scores):
self.assertIsInstance(bbox, self.link.xp.ndarray)
self.assertEqual(bbox.ndim, 2)
self.assertLessEqual(bbox.shape[0], self.n_bbox * self.n_fg_class)
self.assertEqual(bbox.shape[1], 4)
self.assertIsInstance(label, self.link.xp.ndarray)
self.assertEqual(label.ndim, 1)
self.assertEqual(label.shape[0], bbox.shape[0])
self.assertIsInstance(score, self.link.xp.ndarray)
self.assertEqual(score.ndim, 1)
self.assertEqual(score.shape[0], bbox.shape[0])
def test_predict_cpu(self):
self._check_predict()
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
self._check_predict()
testing.run_module(__name__, __file__)
|
Python
| 0.000009
|
@@ -4757,35 +4757,25 @@
andom_array(
-self.link.x
+n
p, (3, 640,
@@ -4803,35 +4803,25 @@
andom_array(
-self.link.x
+n
p, (3, 320,
|
18b3c099feedb1eb10eeb6e8c358c77e5e7dd264
|
Add probability output to logistic regression
|
ML/regression.py
|
ML/regression.py
|
"""
Linear and logistic regression including regularization.
Currently, only L2 regularization is available.
Includes closed form, gradient descent, and SGD solvers.
"""
import numpy as np
from descentmethods import gradientdescent
class BaseRegression:
"""Base Class for implimenting Linear Regression"""
def __init__(self):
"""
Attributes::
_learned (bool): Keeps track of if Linear Regression has been fit
_weights (np.ndarray): vector of weights for linear regression
"""
self._learned = False
self._weights = np.NaN
@property
def learned(self):
return self._learned
@property
def weights(self):
return self._weights
@learned.setter
def learned(self, value):
self._learned = value
@weights.setter
def weights(self, value):
self._weights = value
class LinearRegression(BaseRegression):
"""Class for implimenting Linear Regression"""
def predict(self, X):
"""
Args:
X (np.ndarray): Test data of shape[n_samples, n_features]
Returns:
np.ndarray: shape[n_samples, 1], predicted values
Raises:
ValueError if model has not been fit
"""
if not self.learned:
raise NameError('Fit model first')
# Add column of 1s to X for bias
X = np.asarray(X)
X = np.column_stack((np.ones(np.shape(X)[0]), X))
prediction = np.dot(X, np.transpose(self.weights))
return prediction
def grad(self, X, y, weights):
"""
Computes the gradient (needed if using gradient descent).
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
weights (np.ndarray): Optional use of gradient descent to
calculate weights.
if False, uses closed form solution to calculate weights.
Returns:
np.array: the gradient of the linear regression cost function
"""
hypothesis = np.dot(X, weights) - y
gradient = np.dot(np.transpose(X), hypothesis) / np.size(y)
return gradient
def fit(self, X, y, gradient=False, reg_parameter=0):
"""
Currently, only L2 regularization is implemented.
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
gradient (bool): Optional use of gradient descent to
calculate weights.
if False, uses closed form solution to calculate weights.
reg_parameter (float): float to determine strength of
regulatrization penalty if 0, then no linear regression
without regularization is performed.
Returns: an instance of self
"""
y = np.asarray(y)
X = np.asarray(X)
X = np.column_stack((np.ones(np.shape(X)[0]), X))
if gradient:
self.weights = gradientdescent(X, y, self.grad,
reg_param=reg_parameter)
else:
# Calculate weights (closed form solution)
XtX_lambaI = np.dot(np.transpose(X), X) + reg_parameter * \
np.identity(len(np.dot(np.transpose(X), X)))
self.weights = np.dot(np.linalg.pinv(XtX_lambaI),
np.dot(np.transpose(X), y))
self.learned = True
return self
class LogisticRegression(BaseRegression):
"""Logistic Regression classifier with gradient descent implementation"""
@staticmethod
def logistic_function(logistic_input):
"""
Args:
logistic_input (np.ndarray): array of shape[n_samples, 1]
Returns:
np.ndarray: shape[n_samples, 1], logistic transformation of data
"""
return 1 / (1 + np.exp(-logistic_input))
def grad(self, X, y, weights):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
weights (np.ndarray): Optional use of gradient descent to
calculate weights.
if False, uses closed form solution to calculate weights.
Returns:
np.ndarray: the gradient of the linear regression cost function
"""
hypothesis = self.logistic_function(np.dot(X, weights)) - y
gradient = np.dot(np.transpose(X), hypothesis) / np.size(y)
return gradient
def predict(self, X):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
Returns:
np.ndarray: shape[n_samples, 1], the predicted values
Raises:
ValueError if model has not been fit
"""
if not self.learned:
raise NameError('Fit model first')
# Add column of 1s to X for bias
X = np.asarray(X)
X = np.column_stack((np.ones(np.shape(X)[0]), X))
prediction = self.logistic_function(np.dot(X,
np.transpose(self.weights)))
return np.round(prediction)
def fit(self, X, y, reg_parameter=0):
"""
Currently, only L2 regularization is implemented.
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
y (np.ndarray): Target values of shape[n_samples, 1]
reg_parameter (float): float to determine strength of
regulatrization penalty.
if 0, then no linear regression without regularization
is performed
Returns: an instance of self
Raises:
ValueError if y contains values other than 0 and 1
"""
y = np.asarray(y)
if False in np.in1d(y, [0, 1]):
raise NameError('y required to contain only 0 and 1')
X = np.asarray(X)
X = np.column_stack((np.ones(np.shape(X)[0]), X))
self.weights = gradientdescent(X, y, self.grad,
reg_param=reg_parameter)
self.learned = True
return self
|
Python
| 0.999999
|
@@ -886,16 +886,17 @@
value%0A%0A
+%0A
class Li
@@ -4659,32 +4659,51 @@
predict(self, X
+, probability=False
):%0A %22%22%22%0A
@@ -4780,32 +4780,146 @@
les, n_features%5D
+%0A probability (bool): If True, return probabilities%0A If False, return class predictions.
%0A%0A Return
@@ -5396,24 +5396,78 @@
.weights)))%0A
+ if probability:%0A return prediction%0A
retu
|
b9c953cffd0c9961c22c0c671648f5e5a3e4426c
|
Update server
|
alchemy_server.py
|
alchemy_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
import os
from models import Charity, Logo, Description
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
@app.route("/gci")
def gci():
global session
query = session.query(Charity)\
.leftjoin(Logo, Charity.name == Logo.name)\
.leftjoin(Description, Charity.name == Description.name)
charities = pd.read_sql(query.statment, session.bind)
query = session.query(Charity.category).distinct()
categories = pd.read_sql(query.statment, session.bind)
payload = {'categories':categories.values(), 'charities':charities.to_dict('index')}
return jsonify(payload)
if __name__ == "__main__":
db = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'])
Session = sessionmaker(bind=db)
session = Session()
app.run(host='0.0.0.0')
print('test')
|
Python
| 0.000001
|
@@ -440,20 +440,16 @@
session%0A
-
%0A que
@@ -474,24 +474,79 @@
(Charity
+, Description.description, Logo.logo_url, Logo.has_face
)%5C%0A .
leftjoin
@@ -533,28 +533,24 @@
face)%5C%0A .
-left
join(Logo, C
@@ -585,12 +585,8 @@
.
-left
join
@@ -633,20 +633,16 @@
n.name)%0A
-
%0A cha
@@ -664,38 +664,43 @@
d_sql(query.stat
+e
ment,
+con=
session.bind)%0A
@@ -691,30 +691,143 @@
session.bind
-)%0A
+, index_col = 'name')%0A charities = charities%5Bcharities%5B'has_face'%5D == False%5D%0A charities.drop('has_face', axis=1)%0A
%0A query =
@@ -909,21 +909,28 @@
ery.stat
+e
ment,
+ con =
session
@@ -936,24 +936,91 @@
n.bind)%0A
+categories = categories%5B~categories%5B'category'%5D.str.contains(',')%5D%0A
%0A payload
@@ -1053,16 +1053,23 @@
s.values
+.tolist
(), 'cha
@@ -1283,20 +1283,16 @@
ssion()%0A
-
%0A app
@@ -1315,23 +1315,4 @@
.0')
-%0A print('test')%0A
|
bdfcbe357950106e369a3e5fd25b2bdba7cc7f7c
|
add missing coverage
|
test_log_utils.py
|
test_log_utils.py
|
'''
Created on Oct 1, 2015
@author: kashefy
'''
import os
import sys
import shutil
import tempfile
from nose.tools import assert_equal, assert_false, \
assert_is_instance, assert_is_none, assert_less, assert_true
import log_utils as lu
CURRENT_MODULE_PATH = os.path.abspath(sys.modules[__name__].__file__)
TEST_DATA_DIRNAME = 'test_data'
TEST_LOG_FILENAME = 'caffe.hostname.username.log.INFO.20150917-163712.31405'
class TestPID:
def test_pid_from_str(self):
n = 26943
res = lu.pid_from_str('%s' % n)
assert_is_instance(res, int)
assert_equal(res, n)
def test_pid_from_str_neg(self):
n = -26943
res = lu.pid_from_str('%s' % n)
assert_is_instance(res, int)
assert_equal(res, n)
def test_pid_from_str_invalid_alpha(self):
n = 26943
x = ['a%s' % n, '%sa' % n, '269a43']
for s in x:
res = lu.pid_from_str(s)
assert_is_instance(res, int)
assert_equal(res, -1)
def test_pid_from_logname(self):
s = 'caffe.host.user.log.INFO.20151001-132750.26943'
res = lu.pid_from_logname(s)
assert_is_instance(res, int)
assert_equal(res, 26943)
n = 77775
s = 'log.%s' % n
res = lu.pid_from_logname(s)
assert_is_instance(res, int)
assert_equal(res, n)
def test_pid_from_logname_invalid(self):
s = 'caffe.host.user.log.INFO.20151001-132750.26943.txt'
res = lu.pid_from_logname(s)
assert_is_instance(res, int)
assert_equal(res, -1)
def test_read_pid(self):
fpath = os.path.join(os.path.dirname(CURRENT_MODULE_PATH),
TEST_DATA_DIRNAME,
TEST_LOG_FILENAME)
result = lu.read_pid(fpath)
assert_is_instance(result, int)
assert_equal(result, 31405)
def test_read_pid_from_content(self):
fpath = os.path.join(os.path.dirname(CURRENT_MODULE_PATH),
TEST_DATA_DIRNAME,
TEST_LOG_FILENAME)
path_temp_dir = tempfile.mkdtemp()
fpath2 = os.path.join(path_temp_dir, "foo.txt")
shutil.copyfile(fpath, fpath2)
try:
assert_less(lu.pid_from_logname(fpath2), 0)
result = lu.read_pid(fpath2)
assert_is_instance(result, int)
assert_equal(result, 31405)
except Exception:
pass
shutil.rmtree(path_temp_dir)
class TestCaffeLog:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
self.path_real_log = os.path.join(os.path.dirname(CURRENT_MODULE_PATH),
TEST_DATA_DIRNAME,
TEST_LOG_FILENAME)
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_is_caffe_log_invalid_dir(self):
assert_true(os.path.isdir(self.dir_tmp))
assert_false(lu.is_caffe_log(self.dir_tmp))
def test_is_caffe_log(self):
assert_true(lu.is_caffe_log(self.path_real_log))
def test_is_caffe_log_invalid_prefix(self):
fpath = os.path.join(self.dir_tmp,
"foo.hostname.username.log.INFO.20150917-163712.31405")
with open(fpath, 'w') as f:
f.write('log file')
assert_false(lu.is_caffe_log(fpath))
def test_is_caffe_log_invalid_content(self):
fpath = os.path.join(self.dir_tmp,
"caffe.hostname.username.log.INFO.20150917-163712.31405")
with open(fpath, 'w') as f:
f.write('foo')
assert_false(lu.is_caffe_log(fpath))
def test_is_caffe_info_log(self):
assert_true(lu.is_caffe_log(self.path_real_log))
def test_is_caffe_info_log_invalid_fname(self):
fpath = os.path.join(self.dir_tmp,
"foo.hostname.username.log.ERROR.20150917-163712.31405")
with open(fpath, 'w') as f:
f.write('log file')
assert_false(lu.is_caffe_info_log(fpath))
class TestFindLine:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
def test_find_line(self):
fpath = os.path.join(self.dir_tmp, "foo.txt")
with open(fpath, 'w') as f:
f.write('line one\n')
f.write('line two\n')
f.write('LINE x\n')
f.write('line y\n')
f.write('last line\n')
assert_is_none(lu.find_line(fpath, 'hello'))
assert_equal(lu.find_line(fpath, 'line'), 'line one' + os.linesep)
assert_equal(lu.find_line(fpath, 'LINE'), 'LINE x' + os.linesep)
|
Python
| 0.000005
|
@@ -143,16 +143,31 @@
t_false,
+ assert_raises,
%5C%0A a
@@ -2718,32 +2718,530 @@
r)%0A %0A
+ def test_read_pid_invalid(self):%0A %0A path_temp_dir = tempfile.mkdtemp()%0A fpath = os.path.join(path_temp_dir, TEST_LOG_FILENAME)%0A %0A with open(fpath, 'w') as f:%0A f.write('log file')%0A %0A assert_true(lu.is_caffe_info_log(fpath))%0A %0A fpath2 = os.path.join(path_temp_dir, %22foo.txt%22)%0A shutil.copyfile(fpath, fpath2)%0A %0A assert_raises(IOError, lu.read_pid, fpath2)%0A %0A shutil.rmtree(path_temp_dir)%0A
%0Aclass T
|
434e459059bba2a1e52e953813caae532a3cb16b
|
Update test_consume_4
|
test_wordcount.py
|
test_wordcount.py
|
import os.path
import tempfile
import wordcount_lib
def _make_testfile(filename, data):
"Make a temp file containing the given data; return full path to file."
tempdir = tempfile.mkdtemp(prefix='wordcounttest_')
testfile = os.path.join(tempdir, filename)
with open(testfile, 'wt') as fp:
fp.write(data)
return testfile
def test_consume_1():
# do a basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a b cc\nddd')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10
assert words == 4
assert lines == 2
def test_consume_2():
# do another basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 12 # includes whitespace in char count
assert words == 5
assert lines == 5
def test_consume_3():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c ')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 7 # includes whitespace in char count
assert words == 3
assert lines == 1
def test_consume_4():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c d e')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 9 # includes whitespace in char count
assert words == 5
assert lines == 1
|
Python
| 0.000003
|
@@ -1502,9 +1502,10 @@
==
-9
+10
|
2fa092add3508b774c58e880089c18c3275df840
|
Set block_align on target population if given
|
backend/populate_targets.py
|
backend/populate_targets.py
|
import django
import os
import yaml
from backend.settings import BASE_DIR
from django.db import IntegrityError
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target
def create_target(target):
method = ''
for m in Target.METHOD_CHOICES:
if target['method'] == m[1]:
method = m[0]
break
if method:
target['method'] = method
else:
print '[!] Invalid method for target "{}".'.format(target['name'])
return
target_args = {
'name': target['name'],
'endpoint': target['endpoint'],
'prefix': target['prefix'],
'alphabet': target['alphabet'],
'secretlength': target['secretlength'],
'alignmentalphabet': target['alignmentalphabet'],
'recordscardinality': target['recordscardinality'],
'method': target['method']
}
if 'maxreflectionlength' in target:
target_args['maxreflectionlength'] = target['maxreflectionlength']
t = Target(**target_args)
t.save()
print '''Created Target:
\tname: {}
\tendpoint: {}
\tprefix: {}
\talphabet: {}
\tsecretlength: {}
\talignmentalphabet: {}
\trecordscardinality: {}
\tmethod: {}'''.format(
t.name,
t.endpoint,
t.prefix,
t.alphabet,
t.secretlength,
t.alignmentalphabet,
t.recordscardinality,
t.method
)
if __name__ == '__main__':
try:
with open(os.path.join(BASE_DIR, 'target_config.yml'), 'r') as ymlconf:
cfg = yaml.load(ymlconf)
except IOError, err:
print 'IOError: %s' % err
exit(1)
targets = cfg.items()
for t in targets:
target = t[1]
target['name'] = t[0]
try:
create_target(target)
except (IntegrityError, ValueError), err:
if isinstance(err, IntegrityError):
print '[!] Target "{}" already exists.'.format(target['name'])
elif isinstance(err, ValueError):
print '[!] Invalid parameters for target "{}".'.format(target['name'])
|
Python
| 0.000001
|
@@ -1024,16 +1024,107 @@
length'%5D
+%0A if 'block_align' in target:%0A target_args%5B'block_align'%5D = target%5B'block_align'%5D
%0A%0A t
|
829941e9d4675645752fa207c461dd596da6264c
|
refactor html template selection
|
satchmo/apps/satchmo_store/mail.py
|
satchmo/apps/satchmo_store/mail.py
|
from django.conf import settings
from django.template import loader, Context, TemplateDoesNotExist
from livesettings import config_value
import os.path
from socket import error as SocketError
import logging
log = logging.getLogger('satchmo_store.mail')
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail
else:
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
class NoRecipientsException(StandardError):
pass
def send_store_mail(subject, context, template, recipients_list=None,
format_subject=False, send_to_store=False,
fail_silently=False):
"""
:parameter: subject: A string.
:parameter: format_subject: Determines whether the *subject* parameter
is formatted. Only the %(shop_name)s specifier is supported now.
:parameter: context: A dictionary to use when rendering the message body.
This dictionary overwrites an internal dictionary which provides the key
`shop_name`.
:parameter: template: The path of the template to use when rendering the
message body.
If store config is set to enable HTML emails, will attempt to find the HTML
template and send it.
"""
from satchmo_store.shop.models import Config
shop_config = Config.objects.get_current()
shop_email = shop_config.store_email
shop_name = shop_config.store_name
send_html = config_value('SHOP', 'HTML_EMAIL')
if not shop_email:
log.warn('No email address configured for the shop. Using admin settings.')
shop_email = settings.ADMINS[0][1]
c_dict = {'shop_name': shop_name}
if format_subject:
subject = subject % c_dict
c_dict.update(context)
c = Context(c_dict)
t = loader.get_template(template)
body = t.render(c)
if send_html:
base_dir,base_name = os.path.split(template)
file_name, ext = os.path.splitext(base_name)
template_name = file_name + '.html'
if settings.DEBUG:
log.info("Attempting to send html mail.")
try:
html_t = loader.get_template(os.path.join(base_dir, template_name))
html_body = html_t.render(c)
except TemplateDoesNotExist:
log.warn('Unable to find html email template %s. Falling back to text only email.' % os.path.join(base_dir, template_name))
send_html = False
recipients = recipients_list or []
if send_to_store:
recipients.append(shop_email)
if not recipients:
raise NoRecipientsException
try:
if send_html:
msg = EmailMultiAlternatives(subject, body, shop_email, recipients)
msg.attach_alternative(html_body, "text/html")
msg.send(fail_silently=fail_silently)
else:
send_mail(subject, body, shop_email, recipients,
fail_silently=fail_silently)
except SocketError, e:
if settings.DEBUG:
log.error('Error sending mail: %s' % e)
log.warn('Ignoring email error, since you are running in DEBUG mode. Email was:\nTo:%s\nSubject: %s\n---\n%s', ",".join(recipients), subject, body)
else:
log.fatal('Error sending mail: %s' % e)
raise IOError('Could not send email. Please make sure your email settings are correct and that you are not being blocked by your ISP.')
|
Python
| 0
|
@@ -1767,70 +1767,8 @@
t)%0A%0A
- t = loader.get_template(template)%0A body = t.render(c)%0A%0A
@@ -1773,32 +1773,32 @@
if send_html:%0A
+
base_dir
@@ -2037,21 +2037,16 @@
-html_
t = load
@@ -2104,49 +2104,8 @@
e))%0A
- html_body = html_t.render(c)%0A
@@ -2304,16 +2304,105 @@
False%0A%0A
+ if not send_html:%0A t = loader.get_template(template)%0A%0A body = t.render(c)%0A%0A
reci
@@ -2665,16 +2665,16 @@
pients)%0A
+
@@ -2704,13 +2704,8 @@
ive(
-html_
body
|
e2b28182e60f5a8fda474ac27e4dbcdaab12c016
|
Add default Sentry site so we can tell diff between prod and staging
|
config/settings/production.py
|
config/settings/production.py
|
# -*- coding: utf-8 -*-
from .base import *
import dj_database_url
#######################
# DEBUG CONFIGURATION #
#######################
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-DEBUG
DEBUG = env('DJANGO_DEBUG', False)
##########################
# DATABASE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests
DATABASES['default']['ATOMIC_REQUESTS'] = True
#######################
# CACHE CONFIGURATION #
#######################
# https://docs.djangoproject.com/en/1.8/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
##########################
# TEMPLATE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-TEMPLATES
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [path.join(APPS_DIR, 'templates')],
'OPTIONS': {
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
])
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
#############################
# STATIC FILE CONFIGURATION #
#############################
# https://docs.djangoproject.com/en/1.8/ref/settings/#staticfiles-storage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
######################
# HOST CONFIGURATION #
######################
# https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['*.texastribune.org']
#########################
# LOGGING CONFIGURATION #
#########################
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
##########################
# SECURITY CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/1.8/ref/settings/#secret-key
# This key should only be used for development and testing!
SECRET_KEY = env('SECRET_KEY')
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-content-type-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
########################
# SENTRY CONFIGURATION #
########################
# https://getsentry.com/for/django/
RAVEN_CONFIG = {
'dsn': env('SENTRY_DSN'),
}
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
|
Python
| 0
|
@@ -4446,16 +4446,65 @@
_DSN'),%0A
+ 'site': env('SENTRY_SITE', 'Public Schools')%0A
%7D%0A%0AINSTA
|
3e141b9352a153757604a323b385719f445639c6
|
add feature
|
tierpsy/analysis/vid_subsample/createSampleVideo.py
|
tierpsy/analysis/vid_subsample/createSampleVideo.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 18 18:22:12 2016
@author: ajaver
"""
import os
import cv2
import h5py
import numpy as np
from tierpsy.helper.params import read_fps
from tierpsy.helper.misc import TimeCounter, print_flush
def getSubSampleVidName(masked_image_file):
#used by AnalysisPoints.py and CheckFinished.py
return masked_image_file.replace('.hdf5', '_subsample.avi')
def _getCorrectedTimeVec(fid, tot_frames):
'''time vector used to account for missing frames'''
if '/timestamp/raw' in fid:
timestamp_ind = fid['/timestamp/raw'][:]
else:
#if there is not valid timestamp field considered that there are not missing frames
return np.arange(tot_frames)
#remove any nan, I notice that sometimes the last number is a nan
timestamp_ind = timestamp_ind[~np.isnan(timestamp_ind)]
if timestamp_ind.size < tot_frames-1: #invalid timestamp
#if there is not valid frames skip
return np.arange(tot_frames)
tot_timestamps = int(timestamp_ind[-1])
#%%
#make sure to compensate for missing frames, so the video will have similar length.
tt_vec = np.full(tot_timestamps+1, np.nan)
current_frame = 0
for ii in range(tot_timestamps+1):
tt_vec[ii] = current_frame
current_timestamp = timestamp_ind[current_frame]
if current_timestamp <= ii:
current_frame += 1
return tt_vec
def createSampleVideo(masked_image_file, sample_video_name = '', time_factor = 8,
size_factor = 5, dflt_fps=30, codec='MPEG'):
#%%
if not sample_video_name:
sample_video_name = getSubSampleVidName(masked_image_file)
# initialize timers
base_name = masked_image_file.rpartition('.')[0].rpartition(os.sep)[-1]
progressTime = TimeCounter('{} Generating subsampled video.'.format(base_name))
with h5py.File(masked_image_file, 'r') as fid:
masks = fid['/mask']
tot_frames, im_h, im_w = masks.shape
im_h, im_w = im_h//size_factor, im_w//size_factor
fps, is_default_timestamp = read_fps(masked_image_file, dflt_fps)
tt_vec = _getCorrectedTimeVec(fid, tot_frames)
#%%
#codec values that work 'H264' #'MPEG' #XVID
vid_writer = cv2.VideoWriter(sample_video_name, \
cv2.VideoWriter_fourcc(*codec), fps/2, (im_w,im_h), isColor=False)
assert vid_writer.isOpened()
for frame_number in range(0, tot_frames, time_factor*2):
current_frame = tt_vec[frame_number]
img = masks[current_frame]
im_new = cv2.resize(img, (im_w,im_h))
vid_writer.write(im_new)
if frame_number % (500*time_factor) == 0:
# calculate the progress and put it in a string
print_flush(progressTime.get_str(frame_number))
vid_writer.release()
print_flush(progressTime.get_str(frame_number) + ' DONE.')
#%%
if __name__ == '__main__':
#mask_file_name = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/Agar_Test/MaskedVideos/Agar_Screening_101116/N2_N10_F1-3_Set1_Pos3_Ch6_12112016_002739.hdf5'
#masked_image_file = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/Agar_Test/MaskedVideos/Agar_Screening_101116/unc-9_N3_F1-3_Set1_Pos3_Ch4_12112016_002739.hdf5'
masked_image_file = r'C:\Users\wormrig\Documents\GitHub\Multiworm_Tracking\Tests\data\test_1\MaskedVideos\Capture_Ch1_18062015_140908.hdf5'
createSampleVideo(masked_image_file)
|
Python
| 0
|
@@ -1467,16 +1467,37 @@
e_file,
+%0A
sample_v
@@ -1512,16 +1512,37 @@
e = '',
+%0A
time_fac
@@ -1571,17 +1571,16 @@
-
size_fac
@@ -1591,36 +1591,236 @@
= 5,
- dflt_fps=30, codec='MPEG'):
+%0A skip_factor = 2, %0A dflt_fps=30, %0A codec='MPEG'):%0A #skip factor is to reduce the size of the movie by using less frames (so we use 15fps for example instead of 30fps)%0A
%0A
@@ -2624,17 +2624,27 @@
c), fps/
-2
+skip_factor
, (im_w,
@@ -2760,16 +2760,20 @@
frames,
+int(
time_fac
@@ -2776,17 +2776,28 @@
_factor*
-2
+skip_factor)
):%0A
|
abd2ad6098cb0bc827a8bebf12f21f1131dc83fa
|
Change version number
|
fluxghost/__init__.py
|
fluxghost/__init__.py
|
__version__ = "0.8.0"
DEBUG = False
|
Python
| 0.000009
|
@@ -12,17 +12,17 @@
= %220.8.
-0
+1
%22%0ADEBUG
|
52eed6f6d771045b2c06a941db17665785e90b23
|
return an error exit code if tests failed
|
tests/__init__.py
|
tests/__init__.py
|
import unittest
import parse
import extent
def load_tests():
return unittest.TestSuite([parse.load_tests(), extent.load_tests()])
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(load_tests())
|
Python
| 0.001138
|
@@ -1,12 +1,23 @@
+import sys%0A
import unitt
@@ -171,16 +171,25 @@
__%22:%0A
+ result =
unittes
@@ -236,8 +236,63 @@
ests())%0A
+ if not result.wasSuccessful():%0A sys.exit(1)%0A
|
a0d8be58248eaa3d314c624cd4150afc1d3dd203
|
Fix DereferrablePanelTestCase.tearDownClass
|
tests/__init__.py
|
tests/__init__.py
|
import sublime
from textwrap import dedent
from unittesting import DeferrableTestCase
class DereferrablePanelTestCase(DeferrableTestCase):
@classmethod
def setUpClass(cls):
"""
Set up global test environment once for all tests owned by this class.
"""
cls.window = sublime.active_window()
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
settings = cls.view.settings()
settings.set("auto_indent", False)
settings.set("detect_indentation", False)
settings.set("fold_buttons", False)
settings.set("gutter", False)
settings.set("line_numbers", False)
settings.set("scroll_past_end", False)
settings.set("syntax", "Packages/MarkdownEditing/syntaxes/Markdown.sublime-syntax")
settings.set("word_wrap", False)
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
@classmethod
def tearDownClass(cls):
"""
Teardown global test environment once all tests finished.
"""
cls.view = cls.window.destroy_output_panel("MarkdownUnitTests")
@classmethod
def addCaretAt(cls, row, col):
"""
Add caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setCaretTo(cls, row, col):
"""
Move caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().clear()
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setBlockText(cls, text):
"""
Replace everything with given block text
:param text: The triple quoted block text to put into scratch view.
"""
cls.setText(dedent(text).strip("\n"))
@classmethod
def setText(cls, text):
"""
Replace everything with given text
:param text: The text to put into scratch view.
"""
cls.view.run_command("select_all")
cls.view.run_command("right_delete")
cls.view.run_command("insert", {"characters": text})
@classmethod
def getText(cls):
"""
Return view's text content
"""
return cls.view.substr(sublime.Region(0, cls.view.size()))
@classmethod
def getRow(cls, row):
"""
Return row's text content.
:param row: The natural 1-based row number. 1=first row
"""
return cls.view.substr(cls.view.line(cls.textPoint(row, 0)))
@classmethod
def textPoint(cls, row, col):
"""
Return textpoint for given row,col coordinats.
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
return cls.view.text_point(row - 1, col - 1)
def assertEqualBlockText(self, text):
"""
Assert view containing `text` after detenting and stripping whitespace.
:param text:
Triple quoted text, which is detented and stripped
before being compared with view's content.
"""
self.assertEqual(self.getText(), dedent(text).strip("\n"))
def assertEqualText(self, text):
"""
Assert view containing `text`.
:param text: The text expected to be equal with view's content.
"""
self.assertEqual(self.getText(), text)
|
Python
| 0.000001
|
@@ -1075,35 +1075,24 @@
%22%22%22%0A
- cls.view =
cls.window.
|
37ea71485ed8b1b1130a3cff06ba3081dae75e39
|
Switch to BytesIO where possible, use StringIO.StringIO elsewhere.
|
tests/__init__.py
|
tests/__init__.py
|
"""Testing helpers and base classes for better isolation."""
from contextlib import contextmanager
import datetime
import errno
import logging
import os
import StringIO
import subprocess
from tempfile import NamedTemporaryFile
import unittest
from mock import patch
import yaml
import utility
@contextmanager
def stdout_guard():
stdout = StringIO.StringIO()
with patch('sys.stdout', stdout):
yield
if stdout.getvalue() != '':
raise AssertionError(
'Value written to stdout: {}'.format(stdout.getvalue()))
def use_context(test_case, context):
result = context.__enter__()
test_case.addCleanup(context.__exit__, None, None, None)
return result
class TestCase(unittest.TestCase):
"""TestCase provides a better isolated version of unittest.TestCase."""
log_level = logging.INFO
test_environ = {}
def setUp(self):
super(TestCase, self).setUp()
def _must_not_Popen(*args, **kwargs):
"""Tests may patch Popen but should never call it."""
self.fail("subprocess.Popen(*{!r}, **{!r}) called".format(
args, kwargs))
self.addCleanup(setattr, subprocess, "Popen", subprocess.Popen)
subprocess.Popen = _must_not_Popen
self.addCleanup(setattr, os, "environ", os.environ)
os.environ = dict(self.test_environ)
setup_test_logging(self, self.log_level)
def assertIsTrue(self, expr, msg=None):
"""Assert that expr is the True object."""
self.assertIs(expr, True, msg)
def assertIsFalse(self, expr, msg=None):
"""Assert that expr is the False object."""
self.assertIs(expr, False, msg)
def addContext(self, context):
"""Enter context manager for the remainder of the test, then leave.
This can be used in place of a with block in setUp, which must return
and may not yield. Note that exceptions will not be passed in when
calling __exit__."""
self.addCleanup(context.__exit__, None, None, None)
return context.__enter__()
class FakeHomeTestCase(TestCase):
"""FakeHomeTestCase creates an isolated home dir for Juju to use."""
def setUp(self):
super(FakeHomeTestCase, self).setUp()
self.home_dir = use_context(self, utility.temp_dir())
os.environ['HOME'] = self.home_dir
os.environ['PATH'] = os.path.join(self.home_dir, '.local', 'bin')
self.juju_home = os.path.join(self.home_dir, '.juju')
os.mkdir(self.juju_home)
self.set_public_clouds(get_default_public_clouds())
def set_public_clouds(self, data_dict):
"""Set the data in the public-clouds.yaml file.
:param data_dict: A dictionary of data, which is used to overwrite
the data in public-clouds.yaml, or None, in which case the file
is removed."""
dest_file = os.path.join(self.home_dir, '.juju/public-clouds.yaml')
if data_dict is None:
try:
os.remove(dest_file)
except OSError as error:
if error.errno != errno.ENOENT:
raise
else:
with open(dest_file, 'w') as file:
yaml.safe_dump(data_dict, file)
def setup_test_logging(testcase, level=None):
log = logging.getLogger()
testcase.addCleanup(setattr, log, 'handlers', log.handlers)
log.handlers = []
testcase.log_stream = StringIO.StringIO()
handler = logging.StreamHandler(testcase.log_stream)
handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
log.addHandler(handler)
if level is not None:
testcase.addCleanup(log.setLevel, log.level)
log.setLevel(level)
# suppress nosetests
setup_test_logging.__test__ = False
@contextmanager
def parse_error(test_case):
stderr = StringIO.StringIO()
with test_case.assertRaises(SystemExit):
with patch('sys.stderr', stderr):
yield stderr
@contextmanager
def temp_os_env(key, value):
"""Set the environment key to value for the context, then restore it."""
org_value = os.environ.get(key, '')
os.environ[key] = value
try:
yield
finally:
os.environ[key] = org_value
def assert_juju_call(test_case, mock_method, client, expected_args,
call_index=None):
"""Check a mock's positional arguments.
:param test_case: The test case currently being run.
:param mock_mothod: The mock object to be checked.
:param client: Ignored.
:param expected_args: The expected positional arguments for the call.
:param call_index: Index of the call to check, if None checks first call
and checks for only one call."""
if call_index is None:
test_case.assertEqual(len(mock_method.mock_calls), 1)
call_index = 0
empty, args, kwargs = mock_method.mock_calls[call_index]
test_case.assertEqual(args, (expected_args,))
class FakePopen(object):
"""Create an artifical version of the Popen class."""
def __init__(self, out, err, returncode):
self._out = out
self._err = err
self._code = returncode
def communicate(self):
self.returncode = self._code
return self._out, self._err
def poll(self):
return self._code
@contextmanager
def observable_temp_file():
"""Get a name which is used to create temporary files in the context."""
temporary_file = NamedTemporaryFile(delete=False)
try:
with temporary_file as temp_file:
with patch('jujupy.NamedTemporaryFile',
return_value=temp_file):
with patch.object(temp_file, '__exit__'):
yield temp_file
finally:
try:
os.unlink(temporary_file.name)
except OSError as e:
# File may have already been deleted, e.g. by temp_yaml_file.
if e.errno != errno.ENOENT:
raise
@contextmanager
def client_past_deadline(client):
"""Create a client patched to be past its deadline."""
soft_deadline = datetime.datetime(2015, 1, 2, 3, 4, 6)
now = soft_deadline + datetime.timedelta(seconds=1)
old_soft_deadline = client._backend.soft_deadline
client._backend.soft_deadline = soft_deadline
try:
with patch.object(client._backend, '_now', return_value=now,
autospec=True):
yield client
finally:
client._backend.soft_deadline = old_soft_deadline
def get_default_public_clouds():
"""The dict used to fill public-clouds.yaml by FakeHomeTestCase."""
return {
'clouds': {
'foo': {
'type': 'foo',
'auth-types': ['access-key'],
'regions': {
# This is the fake juju endpoint:
'bar': {'endpoint': 'bar.foo.example.com'},
'fee': {'endpoint': 'fee.foo.example.com'},
'fi': {'endpoint': 'fi.foo.example.com'},
'foe': {'endpoint': 'foe.foo.example.com'},
'fum': {'endpoint': 'fum.foo.example.com'},
}
},
'qux': {
'type': 'fake',
'auth-types': ['access-key'],
'regions': {
'north': {'endpoint': 'north.qux.example.com'},
'south': {'endpoint': 'south.qux.example.com'},
}
},
}
}
|
Python
| 0
|
@@ -147,16 +147,97 @@
port os%0A
+import io%0Atry:%0A from StringIO import StringIO%0Aexcept ImportError:%0A from io
import S
@@ -421,31 +421,24 @@
tdout =
-StringIO.String
+io.Bytes
IO()%0A
@@ -3504,25 +3504,16 @@
tream =
-StringIO.
StringIO
@@ -3902,23 +3902,16 @@
r =
-StringIO.String
+io.Bytes
IO()
|
175ce2f3b7fe5932166c6bd2ef3596517d71a9f8
|
Work around weird API results
|
foomodules/Lichess.py
|
foomodules/Lichess.py
|
import requests
import time
from datetime import timedelta, datetime
import babel.dates
import foomodules.Base as Base
_LAST_REQUEST = None
def check_and_set_ratelimit():
global _LAST_REQUEST
if _LAST_REQUEST is None:
return True
now = time.monotonic()
if now - _LAST_REQUEST < 0.8:
return False
_LAST_REQUEST = now
return True
def long_ratelimit(dt=61):
global _LAST_REQUEST
_LAST_REQUEST = time.monotonic() + dt
WHITE = "♔"
BLACK = "♚"
COLOURMAP = {
"white": WHITE,
"black": BLACK,
}
class Games(Base.ArgparseCommand):
def __init__(self, command_name="!games", **kwargs):
super().__init__(command_name, **kwargs)
def username(s):
if any(c in "&?/ " for c in s):
raise ValueError("not a valid user name")
if len(s) > 256:
raise ValueError("you can’t be serious")
return s.casefold()
self.argparse.add_argument(
"user",
metavar="USERNAME",
type=username,
help="User whose games to query"
)
self.argparse.add_argument(
"--in-progress", "--playing",
action="store_true",
default=False,
help="Limit to games in progress"
)
self.argparse.add_argument(
"--rated",
action="store_true",
default=False,
help="Limit to rated games"
)
self.argparse.add_argument(
"-n",
dest="amount",
type=int,
default=3,
help="Number of games to fetch (up to 10, default: 3)"
)
def _format_analysis(self, analysis):
return "{blunder}/{mistake}/{inaccuracy}".format(
**analysis
)
def _call(self, msg, args, errorSink=None):
if not (1 <= args.amount <= 10):
self.reply(msg, "invalid amount of games")
return
if not check_and_set_ratelimit():
self.reply(msg, "please wait a bit")
return
req = requests.get(
"https://en.lichess.org/api/user/{}/games".format(
args.user,
),
params={
"playing": str(int(args.in_progress)),
"rated": str(int(args.rated)),
"nb": str(args.amount),
}
)
if req.status_code == 429:
self.reply(
msg,
"explicit rate limit from server, "
"please wait at least one minute"
)
return
elif req.status_code != 200:
self.reply(
msg,
"{} {}".format(req.status_code, req.reason)
)
return
items = []
now = datetime.utcnow()
for game in req.json()["currentPageResults"]:
uid1 = game["players"]["white"].get("userId", "anon")
uid2 = game["players"]["black"].get("userId", "anon")
if game["color"] == "white":
vs = "{} vs. {} {}".format(WHITE, uid2, BLACK)
else:
vs = "{} vs. {} {}".format(BLACK, uid1, WHITE)
status = game["status"]
misc = []
if "winner" in game:
misc.append("{} won".format(COLOURMAP[game["winner"]]))
try:
analysis = game["players"]["white"]["analysis"]
except KeyError:
pass
else:
misc.append("{} {}".format(
WHITE,
self._format_analysis(analysis))
)
try:
analysis = game["players"]["black"]["analysis"]
except KeyError:
pass
else:
misc.append("{} {}".format(
BLACK,
self._format_analysis(analysis))
)
lastmove = "never"
try:
lastmove_t = game["lastMoveAt"]
except KeyError:
pass
else:
try:
lastmove_t = datetime.utcfromtimestamp(
lastmove_t/1000
)
except ValueError:
pass
else:
lastmove = babel.dates.format_timedelta(
now - lastmove_t,
format="short",
locale="en_GB",
)
items.append(
"{url} • {vs}, {status}, {variant} variant, "
"{nturns} turns ({lastmove}){misc}".format(
url="https://lichess.org/{}".format(game["id"]),
vs=vs,
status=status,
variant=game["variant"],
nturns=game["turns"],
lastmove=lastmove,
misc=(", "+", ".join(misc)) if misc else ""
)
)
if items:
self.reply(
msg,
"\n".join([""]+items)
)
else:
self.reply(
msg,
"no games found"
)
|
Python
| 0
|
@@ -3043,24 +3043,26 @@
%0A
+ #
if game%5B%22co
@@ -3082,32 +3082,34 @@
te%22:%0A
+ #
vs = %22%7B%7D vs
@@ -3147,32 +3147,34 @@
ACK)%0A
+ #
else:%0A
@@ -3171,24 +3171,26 @@
%0A
+ #
vs = %22%7B
@@ -3230,16 +3230,131 @@
WHITE)%0A%0A
+ vs = %22%7B%7D %7B%7D vs. %7B%7D %7B%7D%22.format(%0A WHITE, uid1,%0A BLACK, uid2%0A )%0A%0A
|
7c2f34990dc3bf5b4736541a6e9faf88a07581fa
|
remove useless import
|
tests/__init__.py
|
tests/__init__.py
|
import asyncio
import asynctest
import logging
import os
from functools import wraps
import shortuuid
from typing import Generator, Any
from yarl import URL
from aio_pika import Connection, connect, Channel, Queue, Exchange
log = logging.getLogger(__name__)
for logger_name in ('pika.channel', 'pika.callback', 'pika.connection'):
logging.getLogger(logger_name).setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://guest:guest@localhost"))
if not AMQP_URL.path:
AMQP_URL.path = '/'
class AsyncTestCase(asynctest.TestCase):
forbid_get_event_loop = True
def get_random_name(self, *args):
prefix = ['test']
for item in args:
prefix.append(item)
prefix.append(shortuuid.uuid())
return ".".join(prefix)
class BaseTestCase(AsyncTestCase):
async def create_connection(self, cleanup=True) -> Generator[Any, None, Connection]:
client = await connect(AMQP_URL, loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
async def create_channel(self, connection=None, cleanup=True, **kwargs) -> Generator[Any, None, Channel]:
if connection is None:
connection = await self.create_connection()
channel = await connection.channel(**kwargs)
if cleanup:
self.addCleanup(channel.close)
return channel
async def declare_queue(self, *args, **kwargs) -> Generator[Any, None, Queue]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
queue = await channel.declare_queue(*args, **kwargs)
self.addCleanup(queue.delete)
return queue
async def declare_exchange(self, *args, **kwargs) -> Generator[Any, None, Exchange]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
exchange = await channel.declare_exchange(*args, **kwargs)
self.addCleanup(exchange.delete)
return exchange
def timeout(timeout_sec=5):
def decorator(func):
@wraps(func)
async def wrap(self, *args, **kwargs):
loop = self.loop
task = loop.create_task(func(self, *args, **kwargs))
def on_timeout():
if task.done():
return
task.cancel()
self.loop.call_later(timeout_sec, on_timeout)
return await task
return wrap
return decorator
|
Python
| 0.000004
|
@@ -1,19 +1,4 @@
-import asyncio%0A
impo
|
1858b0ae7f70798f3d11ecca1af55719a52def49
|
Fix downgrade in migration
|
neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py
|
neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""cisco plugin cleanup
Revision ID: 2a6d0b51f4bb
Revises: 1d76643bcec4
Create Date: 2013-01-17 22:24:37.730466
"""
# revision identifiers, used by Alembic.
revision = '2a6d0b51f4bb'
down_revision = '1d76643bcec4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'portprofile_bindings')
op.drop_table(u'portprofiles')
op.drop_table(u'port_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'port_bindings',
sa.Column(u'id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column(u'port_id', sa.String(255), nullable=False),
sa.Column(u'blade_intf_dn', sa.String(255), nullable=False),
sa.Column(u'portprofile_name', sa.String(255),
nullable=True),
sa.Column(u'vlan_name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'vif_id', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'portprofiles',
sa.Column(u'uuid', sa.String(255), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'uuid')
)
op.create_table(
u'portprofile_bindings',
sa.Column(u'id', sa.String(255), nullable=False),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'port_id', sa.Integer(), nullable=True),
sa.Column(u'portprofile_id', sa.String(255), nullable=True),
sa.Column(u'portprofile_id', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ),
sa.ForeignKeyConstraint(['ports'], ['ports.id'], ),
sa.PrimaryKeyConstraint(u'id')
)
|
Python
| 0.00003
|
@@ -2735,32 +2735,34 @@
ort_id', sa.
-Integer(
+String(255
), nullable=
@@ -2853,38 +2853,31 @@
sa.Column(u'
-portprofile_id
+default
', sa.Boolea
@@ -3014,17 +3014,19 @@
t(%5B'port
-s
+_id
'%5D, %5B'po
|
bb94d126ae9ff86efc00cfbda5f3fff375490e16
|
Add missing import to tests/__init__.py.
|
tests/__init__.py
|
tests/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
from alchemy_db_utils_unittest import *
from black_box_tests import *
from bulk_insert_unittest import *
from cache_gc_unittest import *
from db_loader_unittest import *
from db_loader_unittest import *
from deterministic_hazard_unittest import *
from deterministic_risk_unittest import *
from geo_unittest import *
from handlers_unittest import *
from hazard_classical_unittest import *
from hazard_nrml_unittest import *
from hazard_unittest import *
from java_unittest import *
from job_unittest import *
from kvs_unittest import *
from logs_unittest import *
from loss_map_output_unittest import *
from loss_output_unittest import *
from output_hazard_unittest import *
from output_risk_unittest import *
from output_unittest import *
from output_writers_unittest import *
from parser_exposure_portfolio_unittest import *
from parser_hazard_curve_unittest import *
from parser_hazard_map_unittest import *
from parser_vulnerability_model_unittest import *
from probabilistic_unittest import *
from producer_unittest import *
from risk_job_unittest import *
from risk_parser_unittest import *
from risk_unittest import *
from schema_unittest import *
from shapes_unittest import *
from tools_dbmaint_unittest import *
from utils_general_unittest import *
from utils_tasks_unittest import *
from utils_version_unittest import *
from validator_unittest import *
import glob
import os
import sys
for path in glob.glob(os.path.join(os.path.dirname(__file__), '*test*.py')):
test = os.path.splitext(os.path.basename(path))[0]
module = 'tests.' + test
if module not in sys.modules:
print >>sys.stderr, "Potential missing import of " + module
|
Python
| 0.999554
|
@@ -1238,24 +1238,58 @@
st import *%0A
+from input_risk_unittest import *%0A
from java_un
|
51e26f24d224978b58c21ab3e083960356429f23
|
Add tests for BCS operators without subscripts
|
tests/bcs_test.py
|
tests/bcs_test.py
|
"""Test for the reduced BCS Hamiltonian."""
import pytest
from sympy import KroneckerDelta
from drudge import ReducedBCSDrudge
@pytest.fixture(scope='module')
def rbcs(spark_ctx):
"""Initialize the environment for a reduced BCS problem."""
return ReducedBCSDrudge(spark_ctx)
def test_rbcs_has_basic_commutations(rbcs: ReducedBCSDrudge):
"""Test the basic commutation rules for Reduced BCS problem."""
dr = rbcs
p = dr.names
# Test access of the basic operators.
n_, pdag_, p_ = rbcs.cartan, rbcs.raise_, rbcs.lower
assert n_ is p.N
assert pdag_ is p.Pdag
assert p_ is p.P
# Test commutation on the same site.
i_ = p.i
n_i = dr.sum(n_[i_])
pdag_i = dr.sum(pdag_[i_])
p_i = dr.sum(p_[i_])
comm = (n_i | pdag_i).simplify()
assert comm == dr.sum(2 * pdag_i)
comm = (n_i | p_i).simplify()
assert comm == dr.sum(-2 * p_i)
comm = (p_i | pdag_i).simplify()
assert comm == dr.sum(1 - n_i)
# Test commutation on different ranges. This ensures that the delta
# simplifier is working properly.
a_ = p.a
n_a = dr.sum(n_[a_])
pdag_a = dr.sum(pdag_[a_])
p_a = dr.sum(p_[a_])
comm = (n_i | pdag_a).simplify()
assert comm == 0
comm = (n_i | p_a).simplify()
assert comm == 0
comm = (p_i | pdag_a).simplify()
assert comm == 0
comm = (p_i | n_a).simplify()
assert comm == 0
def test_rbcs_has_basic_commutations_in_fermi(rbcs: ReducedBCSDrudge):
"""Test the pairing commutation rules in terms of fermion operators.
This function primarily tests the internal function of the reduced BCS
Hamiltonian.
"""
dr = rbcs
p = dr.names
# Here we are only interested in the same site, since different sites are so
# easily commutative.
# Test commutation on the same site.
a_ = p.a
n_a = dr.sum(dr.cartan[a_])
pdag_a = dr.sum(dr.raise_[a_])
p_a = dr.sum(dr.lower[a_])
comm = n_a | pdag_a
assert comm.n_terms == 2
diff = dr._transl2fermi(comm - dr.sum(2 * pdag_a))
assert diff.simplify() == 0
comm = n_a | p_a
diff = dr._transl2fermi(comm - dr.sum(-2 * p_a))
assert diff.simplify() == 0
comm = p_a | pdag_a
diff = dr._transl2fermi(comm - dr.sum(1 - n_a))
assert diff.simplify() == 0
def test_rbcs_gives_vev(rbcs: ReducedBCSDrudge):
"""Test VEV utility for reduced BCS problem."""
dr = rbcs
p = dr.names
n_, pdag_, p_ = rbcs.cartan, rbcs.raise_, rbcs.lower
i_ = p.i
j_ = p.j
a_ = p.a
res = dr.eval_vev(dr.sum(n_[i_]))
assert res.simplify() == 2
res = dr.eval_vev(dr.sum(n_[a_]))
assert res.simplify() == 0
# Test tensor methods.
res = dr.sum(pdag_[j_] * p_[i_]).eval_vev()
assert res == dr.sum(KroneckerDelta(j_, i_).simplify())
def test_rbcs_has_ham(rbcs: ReducedBCSDrudge):
"""Test the Hamiltonian for the reduced BCS problem."""
dr = rbcs
ham = dr.ham
# Here we tentatively just test the number of terms.
assert ham.n_terms == 4 + 2
|
Python
| 0
|
@@ -613,16 +613,244 @@
is p.P%0A%0A
+ # Test commutation without subscripts.%0A comm = dr.simplify(n_ %7C pdag_)%0A assert comm == 2 * pdag_%0A comm = dr.simplify(n_ %7C p_)%0A assert comm == -2 * p_%0A comm = dr.simplify(p_ %7C pdag_)%0A assert comm == 1 - n_%0A%0A
# Te
|
3225abc4006378d0b9f1e861116aac8116d47ec0
|
fix wrong indent
|
monasca_notification/plugins/slack_notifier.py
|
monasca_notification/plugins/slack_notifier.py
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlparse
import requests
from monasca_notification.plugins import abstract_notifier
"""
notification.address = https://slack.com/api/chat.postMessage?token=token&channel=#channel"
Slack documentation about tokens:
1. Login to your slack account via browser and check the following pages
a. https://api.slack.com/docs/oauth-test-tokens
b. https://api.slack.com/tokens
"""
class SlackNotifier(abstract_notifier.AbstractNotifier):
def __init__(self, log):
self._log = log
def config(self, config_dict):
self._config = {'timeout': 5}
self._config.update(config_dict)
@property
def type(self):
return "slack"
@property
def statsd_name(self):
return 'sent_slack_count'
def _build_slack_message(self, notification):
"""Builds slack message body
"""
slack_request = dict(text=notification.alarm_description)
return json.dumps(slack_request)
def send_notification(self, notification):
"""Send the notification via slack
Posts on the given url
"""
slack_message = self._build_slack_message(notification)
address = notification.address
# "#" is reserved character and replace it with ascii equivalent
# Slack room has "#" as first character
address = address.replace("#", "%23")
parsed_url = urlparse.urlsplit(address)
query_params = urlparse.parse_qs(parsed_url.query)
# URL without query params
url = urlparse.urljoin(address, urlparse.urlparse(address).path)
# Default option is to do cert verification
verify = self._config.get('insecure', False)
# If ca_certs is specified, do cert validation and ignore insecure flag
if (self._config.get("ca_certs")):
verify = self._config.get("ca_certs")
proxyDict = None
if (self._config.get("proxy")):
proxyDict = {"https": self._config.get("proxy")}
try:
# Posting on the given URL
self._log.debug("Sending to the url {0} , with query_params {1}".format(url, query_params))
result = requests.post(url=url,
data=slack_message,
verify=verify,
params=query_params,
proxies=proxyDict,
timeout=self._config['timeout'])
result.raise_for_status()
if result.headers['content-type'] == 'application/json':
response = result.json()
if response.get('ok'):
self._log.debug("Notification successfully posted.")
return True
else:
self._log.warning("Received an error message {} when trying to send to slack on URL {}."
.format(response.get("error"), url))
return False
except Exception as ex:
self._log.exception("Error trying to send to slack on URL {}: {}".format(url, ex.message))
return False
|
Python
| 0.984856
|
@@ -3382,36 +3382,8 @@
.%22)%0A
- return True%0A
@@ -3578,32 +3578,36 @@
l))%0A
+
return False%0A
@@ -3603,16 +3603,40 @@
n False%0A
+ return True%0A
|
4bafa90acca39a3d3fa5df0303d885c810244700
|
Add URL
|
lc034_find_first_and_last_position_of_element_in_sorted_array.py
|
lc034_find_first_and_last_position_of_element_in_sorted_array.py
|
"""Leetcode 34. Find First and Last Position of Element in Sorted Array
Medium
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
"""
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time complexity: O(logn), where n is the length of nums.
Space complexity: O(1).
"""
# Apply to 2 binary searches to update result [-1, -1].
res = [-1, -1]
if not nums:
return res
# Apply the 1st binary search to search target's left position.
first, last = 0, len(nums) - 1
while first < last:
mid = first + (last - first) // 2
if nums[mid] < target:
first = mid + 1
else:
last = mid
if nums[first] != target:
return res
else:
res[0] = first
# Apply the 2nd binary search to search target's right position.
last = len(nums) - 1
while first < last:
# Make mid biased to the right.
mid = first + (last - first) // 2 + 1
if nums[mid] > target:
last = mid - 1
else:
first = mid
res[1] = last
return res
def main():
# Ans: [3,4]
nums = [5,7,7,8,8,10]
target = 8
print Solution().searchRange(nums, target)
# Ans: [-1,-1]
nums = [5,7,7,8,8,10]
target = 6
print Solution().searchRange(nums, target)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -73,16 +73,108 @@
Medium%0A%0A
+URL: https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array%0A%0A
Given an
|
89ec39a1479bf532a5fc2ef7b2f9a4afdfde075f
|
Use Pattern from typing
|
src/python/m5/ext/pystats/group.py
|
src/python/m5/ext/pystats/group.py
|
# Copyright (c) 2021 The Regents of The University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from typing import Callable, Dict, Iterator, List, Optional, Union
from .jsonserializable import JsonSerializable
from .statistic import Scalar, Statistic
from .timeconversion import TimeConversion
class Group(JsonSerializable):
"""
Used to create the heirarchical stats structure. A Group object contains a
map of labeled Groups, Statistics, Lists of Groups, or List of Statistics.
"""
type: Optional[str]
time_conversion: Optional[TimeConversion]
def __init__(self, type: Optional[str] = None,
time_conversion: Optional[TimeConversion] = None,
**kwargs: Dict[str, Union["Group",Statistic,List["Group"],
List["Statistic"]]]):
if type is None:
self.type = "Group"
else:
self.type = type
self.time_conversion = time_conversion
for key,value in kwargs.items():
setattr(self, key, value)
def children(self, predicate: Optional[Callable[[str], bool]] = None
) -> Iterator[Union["Group", Statistic]]:
""" Iterate through all of the children, optionally with a predicate
```
>>> system.children(lambda _name: 'cpu' in name)
[cpu0, cpu1, cpu2]
```
:param: predicate(str) -> bool: Optional. Each child's name is passed
to this function. If it returns true, then the child is
yielded. Otherwise, the child is skipped.
If not provided then all children are returned.
"""
for attr in self.__dict__:
# Check the provided predicate. If not a match, skip this child
if predicate and not predicate(attr): continue
obj = getattr(self, attr)
if isinstance(obj, Group) or isinstance(obj, Statistic):
yield obj
def find(self, name: str) -> Iterator[Union["Group", Statistic]]:
""" Find all stats that match the name
This function searches all of the "children" in this group. It yields
the set of attributes (children) that have the `name` as a substring.
The order of the objects returned by the generator is arbitrary.
```
>>> system.find('cpu')
[cpu0, cpu1, cpu2, cpu3, other_cpu, ...]
```
This is useful for performing aggregates over substats. For instance:
```
>>> total_instructions = sum([cpu.exec_context.thread_0.numInsts.value
for cpu in simstat.system.find('cpu')])
100000
```
:param: name: The name to search for
"""
yield from self.children(lambda _name: _name in name)
def find_re(self, regex: Union[str, re.Pattern]
) -> Iterator[Union["Group", Statistic]]:
""" Find all stats that match the name
This function searches all of the "children" in this group. It yields
the set of attributes (children) that have the `name` mathing the
regex provided. The order of the objects returned by the generator is
arbitrary.
```
>>> system.find_re('cpu[0-9]')
[cpu0, cpu1, cpu2]
```
Note: The above will not match `cpu_other`.
:param: regex: The regular expression used to search. Can be a
precompiled regex or a string in regex format
"""
if isinstance(regex, str):
regex = re.compile(regex)
yield from self.children(lambda _name: regex.search(_name))
class Vector(Group):
"""
The Vector class is used to store vector information. However, in gem5
Vectors, in practise, hold information that is more like a dictionary of
Scalar Values. This class may change, and may be merged into Group in
accordance to decisions made in relation to
https://gem5.atlassian.net/browse/GEM5-867.
"""
def __init__(self, scalar_map: Dict[str,Scalar]):
super(Vector, self).__init__(
type="Vector",
time_conversion=None,
**scalar_map,
)
|
Python
| 0
|
@@ -1613,16 +1613,25 @@
ptional,
+ Pattern,
Union%0A%0A
@@ -4324,19 +4324,16 @@
on%5Bstr,
-re.
Pattern%5D
|
f2dcee8364087209b7f160806a023ce2dc198466
|
Remove hidden keys
|
bdp/platform/frontend/src/bdp_fe/jobconf/views_util.py
|
bdp/platform/frontend/src/bdp_fe/jobconf/views_util.py
|
"""
Utility functions for controllers.
"""
from random import choice
from django.conf import settings
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404
from models import CustomJobModel
from pymongo import Connection
from pymongo.errors import AutoReconnect, ConnectionFailure
from bdp_fe.jobconf.models import Job
from bdp_fe.middleware403 import Http403
HIDDEN_KEYS = ['_id', 'job_id']
class NoResultsError(Exception):
pass
class NoConnectionError(Exception):
pass
class MongoRecord(object):
"""
A MongoRecord is a document from a Mongo database, but with additional
methods to allow for easier display.
"""
def __init__(self, raw_mongo_document, primary_key):
self.document = raw_mongo_document
self.pk = primary_key
def get_primary_key(self):
"""Gets the value of the primary key for this record"""
return self.document[self.pk]
def get_fields(self):
"""Gets the values of all non-primary keys for this record"""
ans = {}
for k, v in self.document.iteritems():
if k != self.pk:
ans.setdefault(k, v)
return ans
def safe_int_param(query_dict, param_name, default_value=None):
"""
Safe conversion of query parameters to int.
By default, returns None for absent or non-integer values.
"""
try:
return int(query_dict.get(param_name, ''))
except ValueError:
return default_value
def get_owned_job_or_40x(request, job_id):
try:
job = get_object_or_404(Job, pk=int(job_id))
except ValueError:
raise HttpResponseNotFound()
if job.user == request.user:
return job
else:
raise Http403()
def retrieve_results(job_id, primary_key):
ans = []
jobmodel = CustomJobModel.objects.get(id=job_id)
mongo_url = jobmodel.mongo_url()
mongo_db = jobmodel.job.user.username
mongo_collection = 'job_%s' % jobmodel.job.id
try:
connection = Connection(mongo_url)
db = connection[mongo_db]
job_results = db[mongo_collection]
if not primary_key:
some_result = job_results.find_one()
if not some_result:
raise NoResultsError
primary_key = choice([k for k in some_result.keys()
if k not in HIDDEN_KEYS])
for job_result in job_results.find():
mongo_result = MongoRecord(job_result, primary_key)
ans.append(mongo_result)
return ans
except AutoReconnect, ConnectionFailure:
raise NoConnectionError
|
Python
| 0.000002
|
@@ -414,23 +414,8 @@
= %5B
-'_id', 'job_id'
%5D%0A%0Ac
|
49c64731fab1de1fc08b61a70190930b829d70d3
|
Remove import for random
|
src/python/m5/internal/__init__.py
|
src/python/m5/internal/__init__.py
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import core
import debug
import event
import random
import stats
import trace
|
Python
| 0.000003
|
@@ -1611,22 +1611,8 @@
ent%0A
-import random%0A
impo
|
f3c7f0d488bcd41ed3fb19d83e78fa1436315a08
|
Add test for improved pretty printing behaviour
|
bindings/pyroot/pythonizations/test/pretty_printing.py
|
bindings/pyroot/pythonizations/test/pretty_printing.py
|
import unittest
import ROOT
class PrettyPrinting(unittest.TestCase):
# Helpers
def _print(self, obj):
print("print({}) -> {}".format(repr(obj), obj))
# Tests
def test_RVec(self):
x = ROOT.ROOT.VecOps.RVec("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLVector(self):
x = ROOT.std.vector("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLMap(self):
x = ROOT.std.map("string", "int")()
for i, s in enumerate(["foo", "bar"]):
x[s] = i
self._print(x)
self.assertIn("foo", x.__str__())
self.assertIn("bar", x.__str__())
def test_STLPair(self):
x = ROOT.std.pair("string", "int")("foo", 42)
self._print(x)
self.assertIn("foo", x.__str__())
def test_STLString(self):
# std::string is not pythonized with the pretty printing, because:
# 1. gInterpreter->ToString("s") returns ""s""
# 2. cppyy already does the right thing
s = ROOT.std.string("x")
self.assertEqual(str(s), "x")
def test_TH1F(self):
x = ROOT.TH1F("name", "title", 10, 0, 1)
self._print(x)
self.assertEqual("Name: name Title: title NbinsX: 10", x.__str__())
def test_user_class(self):
# Test fall-back to __repr__
ROOT.gInterpreter.Declare('class MyClass {};')
x = ROOT.MyClass()
self._print(x)
s = x.__str__()
r = x.__repr__()
self.assertIn("MyClass object at", s)
self.assertEqual(s, r)
def test_null_object(self):
# ROOT-9935: test null proxied cpp object
x = ROOT.MakeNullPointer("TTree")
s = x.__str__()
r = x.__repr__()
self.assertIn("TTree object at", s)
self.assertEqual(s, r)
# TNamed and TObject are not pythonized because these object are touched
# by PyROOT before any pythonizations are added. Following, the classes
# are not piped through the pythonizor functions again.
"""
def test_TNamed(self):
x = ROOT.TNamed("name", "title")
self._print(x)
self.assertEqual("Name: name Title: title", x.__str__())
def test_TObject(self):
x = ROOT.TObject()
self._print(x)
self.assertEqual("Name: TObject Title: Basic ROOT object", x.__str__())
"""
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1960,16 +1960,749 @@
(s, r)%0A%0A
+ def test_user_class_with_str(self):%0A # ROOT-10967: Respect existing __str__ method defined in C++%0A ROOT.gInterpreter.Declare('struct MyClassWithStr %7B std::string __str__() %7B return %22foo%22; %7D %7D;')%0A x = ROOT.MyClassWithStr()%0A self._print(x)%0A s = x.__str__()%0A r = x.__repr__()%0A self.assertIn(%22MyClassWithStr object at%22, r)%0A self.assertEqual(s, %22foo%22)%0A%0A # Test inherited class%0A ROOT.gInterpreter.Declare('struct MyClassWithStr2 : public MyClassWithStr %7B %7D;')%0A x2 = ROOT.MyClassWithStr2()%0A self._print(x2)%0A s2 = x2.__str__()%0A r2 = x2.__repr__()%0A self.assertIn(%22MyClassWithStr2 object at%22, r2)%0A self.assertEqual(s2, %22foo%22)%0A%0A
%0A # T
|
01c88b514c64f001fc7824a30b8609a425d646ef
|
Set defaults for CI and DETERMINISTIC_TESTS. (#653)
|
tests/conftest.py
|
tests/conftest.py
|
# -*- coding: utf-8 -*-
'''
General-purpose fixtures for vdirsyncer's testsuite.
'''
import logging
import os
import click_log
from hypothesis import HealthCheck, Verbosity, settings
import pytest
@pytest.fixture(autouse=True)
def setup_logging():
click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG)
try:
import pytest_benchmark
except ImportError:
@pytest.fixture
def benchmark():
return lambda x: x()
else:
del pytest_benchmark
settings.suppress_health_check = [HealthCheck.too_slow]
settings.register_profile("ci", settings(
max_examples=1000,
verbosity=Verbosity.verbose,
))
settings.register_profile("deterministic", settings(
derandomize=True,
))
if os.environ['DETERMINISTIC_TESTS'].lower() == 'true':
settings.load_profile("deterministic")
elif os.environ['CI'].lower() == 'true':
settings.load_profile("ci")
|
Python
| 0
|
@@ -719,17 +719,21 @@
.environ
-%5B
+.get(
'DETERMI
@@ -745,17 +745,26 @@
C_TESTS'
-%5D
+, 'false')
.lower()
@@ -837,14 +837,27 @@
iron
-%5B'CI'%5D
+.get('CI', 'false')
.low
|
88bba8a6145f67fd65e4062123db295601c92000
|
Fix lint errors
|
tests/conftest.py
|
tests/conftest.py
|
# -*- encoding: utf-8
import os
from hotchocolate import Site
import hotchocolate.cli as hcli
# TODO: Tidy this up, and don't duplicate code from cli.py
curdir = os.path.abspath(os.curdir)
os.chdir('tests/examplesite')
site = Site.from_folder('content')
site.build()
os.chdir(curdir)
|
Python
| 0.000396
|
@@ -61,40 +61,8 @@
ite%0A
-import hotchocolate.cli as hcli%0A
%0A%0A#
|
34fcbd340f1e045dc39e0468c9126d7ab13cc94d
|
Fix npm install during tests
|
tests/conftest.py
|
tests/conftest.py
|
import os
import os.path
import json
from pathlib import Path
import subprocess
import tarfile
import pytest
import py
def pytest_addoption(parser):
parser.addoption("--elm-version", default='0.19.1',
help="specify the version of Elm to test")
def pytest_generate_tests(metafunc):
if 'elm_version' in metafunc.fixturenames:
metafunc.parametrize(
"elm_version",
[metafunc.config.getoption('elm_version')],
scope='session')
def elm_stuff_fixture_path(elm_version):
filename = '{}-core-elm-stuff.tar.gz'.format(elm_version)
return py.path.local(__file__).dirpath('fixtures', filename)
def elm_core_fixture_path(elm_version):
filename = '{}-elm-core.tar.gz'.format(elm_version)
return py.path.local(__file__).dirpath('fixtures', filename)
# Helper function for use in tests
def install_elm(to: Path, elm_version: str) -> Path:
npm_package = {
'dependencies': {
'elm': _get_npm_version_range(elm_version)
}
}
with open(str(to / 'package.json'), 'w') as f:
json.dump(npm_package, f)
subprocess.check_call(('npm', 'install'), cwd=str(to))
return to / 'node_modules' / '.bin' / 'elm'
def _get_npm_version_range(elm_version: str) -> str:
if _is_exact(elm_version):
return elm_version
min_version, gt_op, _, lt_op, max_version = elm_version.split(' ')
return '{gt_op}{min_version} {lt_op}{max_version}'.format(
min_version=min_version,
gt_op=_flip_inequality_op(gt_op),
lt_op=lt_op,
max_version=max_version,
)
def _is_exact(elm_version: str) -> bool:
return ' ' not in elm_version
def _flip_inequality_op(op: str) -> str:
# assume there's only one < or >
return op.replace('>', '<').replace('<', '>')
@pytest.fixture
def fixture_path():
return py.path.local(__file__).dirpath('fixtures')
@pytest.fixture
def module_fixture_path(elm_version):
return py.path.local(__file__).dirpath('fixtures', elm_version)
@pytest.fixture
def mock_popular_packages(mocker):
mocker.patch('elm_doc.catalog_tasks.missing_popular_packages', return_value=[])
@pytest.fixture(scope='session')
def elm(tmpdir_factory, elm_version):
tmpdir = tmpdir_factory.mktemp('elm-{}'.format(elm_version))
return str(install_elm(Path(str(tmpdir)), elm_version))
@pytest.fixture
def make_elm_project(mocker, module_fixture_path):
def for_version(elm_version, root_dir, sources={}, package_overrides={}, copy_elm_stuff=False):
'''
:param elm_version: Version of Elm to specify in elm.json
:param root_dir: Directory to create an Elm project in
:param sources: A mapping of source directory relative to root_dir to source files to copy
from tests/fixtures/{elm_version}/
:param package_overrides: Properties to override in the generated elm.json
:param copy_elm_stuff: Whether to copy a cache of core Elm libraries
'''
root_dir.ensure('project', dir=True)
project_dir = root_dir.join('project')
source_dirs = list(sources.keys())
elm_package = dict(default_elm_package[elm_version], **{'source-directories': source_dirs})
elm_package.update(package_overrides)
elm_package_filename = 'elm-package.json' if elm_version == '0.18.0' else 'elm.json'
project_dir.join(elm_package_filename).write(json.dumps(elm_package))
if copy_elm_stuff:
if elm_version == '0.18.0':
_extract_tarball(elm_stuff_fixture_path(elm_version), project_dir)
else:
_extract_tarball(elm_core_fixture_path(elm_version), root_dir)
for source_dir, modules in sources.items():
project_dir.ensure(source_dir, dir=True)
for module in modules:
project_dir.join(source_dir, module).write(module_fixture_path.join(module).read())
elm_home = str(root_dir.join('.elm'))
mocker.patch('elm_doc.elm_platform.ELM_HOME', Path(elm_home))
mocker.patch.dict(os.environ, {'ELM_HOME': elm_home})
return project_dir
return for_version
def _extract_tarball(tarball, dest):
with dest.as_cwd():
with tarfile.open(str(tarball)) as tar:
tar.extractall()
default_elm_package = {}
default_elm_package['0.18.0'] = {
"version": "1.0.0",
"summary": "helpful summary of your project, less than 80 characters",
"repository": "https://github.com/user/project.git",
"license": "BSD3",
"source-directories": [
"."
],
"exposed-modules": [],
"dependencies": {
"elm-lang/core": "5.0.0 <= v < 6.0.0",
"elm-lang/html": "2.0.0 <= v < 3.0.0"
},
"elm-version": "0.18.0 <= v < 0.19.0"
}
default_elm_package['0.19.0'] = {
"type": "application",
"source-directories": [
"."
],
"elm-version": "0.19.0",
"dependencies": {
"direct": {
"elm/core": "1.0.2",
"elm/html": "1.0.0"
},
"indirect": {
"elm/json": "1.1.2",
"elm/virtual-dom": "1.0.2"
}
},
"test-dependencies": {
"direct": {},
"indirect": {}
}
}
|
Python
| 0.000006
|
@@ -1326,16 +1326,38 @@
return
+'latest-%7Bv%7D'.format(v=
elm_vers
@@ -1359,16 +1359,17 @@
_version
+)
%0A min
|
8dc79a0a1b99d1742ae297db7da26a0404e5ec33
|
Fix pep8
|
tests/conftest.py
|
tests/conftest.py
|
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base
from config import create_new_sqla
from helpers import get_video_douban_ids
test_database_url = 'sqlite:///test.db'
@pytest.fixture(scope='session')
def session(request):
sqla = create_new_sqla(test_database_url, echo=False)
session = sqla['session']
engine = sqla['engine']
Base.metadata.create_all(engine)
def teardown():
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def douban_movie_ids():
return list(get_video_douban_ids())
|
Python
| 0.000001
|
@@ -554,16 +554,17 @@
ession%0A%0A
+%0A
@pytest.
|
195cabda3d599f5d6f2a7d0e5090f1c2f057ff0c
|
fix appveyor env variable
|
tests/fixtures.py
|
tests/fixtures.py
|
import pytest
from lcopt import LcoptModel
from lcopt.interact import FlaskSandbox
from lcopt.utils import DEFAULT_DB_NAME, check_for_config
import os
import brightway2 as bw2
MODEL_NAME = 'modelName'
PARAMETER_DATA = '[{"id":"p_3_2","Name":"Tea leaves","Unit":"kg","Normal tea":"0.01","Black tea":"0.01"},{"id":"p_0_2","Name":"Boiling water","Unit":"l","Normal tea":"1","Black tea":"1"},{"id":"p_2_4","Name":"Black tea","Unit":"l","Normal tea":"0.8","Black tea":"0.8"},{"id":"kettle_power","Name":"Power rating of kettle, kW","Unit":"","Normal tea":"1.5","Black tea":"1.5"},{"id":"boil_time","Name":"Kettle boiling time, mins","Unit":"","Normal tea":"3","Black tea":"3"},{"id":"milk_density","Name":"Density of milk","Unit":"","Normal tea":"1.035","Black tea":"1.035"},{"id":"milk_amount","Name":"Amount of milk, l","Unit":"","Normal tea":"0.02","Black tea":"0"}]'
EXISTING_PROCESS_NAME = 'Process 1'
EXISTING_PROCESS_NAME_2 = 'Process 2'
FINAL_PROCESS_NAME = 'Process 3'
NEW_PROCESS_NAME = 'Process 4'
NEW_OUTPUT_NAME = 'Output 4'
EXISTING_INPUT_NAME = 'Input 2'
ELECTRICITY_NAME = "market for electricity, medium voltage {DE} [kilowatt hour]"
ELECTRICITY_ID = "('Ecoinvent3_3_cutoff', '8a1ef516cc78d560d3a677357b366de2')"
CO2_NAME = "Carbon dioxide, fossil (emission to air) [kilogram]"
CO2_ID = "('biosphere3', '349b29d1-3e58-4c66-98b9-9d1a076efd2e')"
TEST_MODEL_NAME = "Test_model"
FULL_MODEL_PATH = r"assets/{}".format(TEST_MODEL_NAME)
IS_TRAVIS = 'TRAVIS' in os.environ
if IS_TRAVIS or APPVEYOR:
EI_USERNAME = os.environ['EI_USERNAME']
EI_PASSWORD = os.environ['EI_PASSWORD']
WRITE_CONFIG = False
else:
config = check_for_config()
if config is not None:
if "ecoinvent" in config:
EI_USERNAME = config['ecoinvent'].get('username')
EI_PASSWORD = config['ecoinvent'].get('password')
WRITE_CONFIG = False
@pytest.fixture
def blank_model():
return LcoptModel(MODEL_NAME, ei_username=EI_USERNAME, ei_password=EI_PASSWORD, write_config=WRITE_CONFIG)
@pytest.fixture
def forwast_model():
return LcoptModel(MODEL_NAME, useForwast=True)
@pytest.fixture
def populated_model(blank_model):
model = blank_model
name = 'test_process_1'
unit = 'kilogram'
output_name = 'test_output_1'
exchanges = [{'name':output_name, 'type':'production', 'unit':unit, 'lcopt_type':'intermediate'}]
location ='GLO'
model.create_process(name, exchanges, location, unit)
name = 'test_process_2'
unit = 'kilogram'
output_name = 'test_output_2'
exchanges = [{'name':output_name, 'type':'production', 'unit':unit, 'lcopt_type':'intermediate'}]
location ='GLO'
model.create_process(name, exchanges, location, unit)
return model
@pytest.fixture
def linked_model(populated_model):
db = populated_model.database
source = populated_model.get_exchange('test_process_1')
source_exc = db['items'][source]['exchanges']
source_output = [x['input'] for x in source_exc if x['type'] == 'production'][0]
target = populated_model.get_exchange('test_process_2')
new_exchange = {'amount': 1,
'comment': 'technosphere exchange of {}'.format('test'),
'input': source_output,
'type': 'technosphere',
'uncertainty type': 1}
db['items'][target]['exchanges'].append(new_exchange)
populated_model.parameter_scan()
return populated_model
@pytest.fixture
def parameterised_model(linked_model):
linked_model.add_parameter('test_parameter', description = 'test description', default = 1)
param_ids = [x for x in linked_model.params]
e_param_name = 'test_parameter'
linked_model.add_parameter(e_param_name, description = 'test description', default = 1)
new_function = "{}*2".format(e_param_name)
parameter = linked_model.params[param_ids[0]]
parameter['function'] = new_function
return linked_model
@pytest.fixture
def fully_formed_model():
script_path = os.path.dirname(os.path.realpath(__file__))
loadpath = os.path.join(script_path, FULL_MODEL_PATH)
return LcoptModel(load = loadpath, ei_username=EI_USERNAME, ei_password=EI_PASSWORD, write_config=WRITE_CONFIG)
@pytest.fixture
def blank_app(blank_model):
sandbox = FlaskSandbox(blank_model)
return sandbox.create_app()
@pytest.fixture
def blank_flask_client(blank_app):
blank_app.config['TESTING'] = True
return blank_app.test_client()
@pytest.fixture
def app(fully_formed_model):
sandbox = FlaskSandbox(fully_formed_model)
app = sandbox.create_app()
return app
@pytest.fixture
def flask_client(app):
app.config['TESTING'] = True
return app.test_client()
|
Python
| 0.000001
|
@@ -1478,16 +1478,55 @@
.environ
+%0AIS_APPVEYOR = 'APPVEYOR' in os.environ
%0A%0Aif IS_
@@ -1535,16 +1535,19 @@
AVIS or
+IS_
APPVEYOR
|
74cfff61731e19d115566741a4f3a1af68431141
|
Add extensions for `gyp`
|
identify/extensions.py
|
identify/extensions.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
EXTENSIONS = {
'apinotes': {'text', 'apinotes'},
'asar': {'binary', 'asar'},
'bash': {'text', 'shell', 'bash'},
'bat': {'text', 'batch'},
'bmp': {'binary', 'image', 'bitmap'},
'bz2': {'binary', 'bzip2'},
'c': {'text', 'c'},
'cc': {'text', 'c++'},
'cfg': {'text'},
'cmake': {'text', 'cmake'},
'cnf': {'text'},
'coffee': {'text', 'coffee'},
'conf': {'text'},
'cpp': {'text', 'c++'},
'crt': {'text', 'pem'},
'cson': {'text', 'cson'},
'css': {'text', 'css'},
'csv': {'text', 'csv'},
'def': {'text', 'def'},
'ejs': {'text', 'ejs'},
'eot': {'binary', 'eot'},
'eps': {'binary', 'eps'},
'erb': {'text', 'erb'},
'exe': {'binary'},
'eyaml': {'text', 'yaml'},
'feature': {'text', 'gherkin'},
'fish': {'text', 'fish'},
'gemspec': {'text', 'ruby'},
'gif': {'binary', 'image', 'gif'},
'go': {'text', 'go'},
'gotmpl': {'text', 'gotmpl'},
'gradle': {'text', 'groovy'},
'groovy': {'text', 'groovy'},
'gyb': {'text', 'gyb'},
'gz': {'binary', 'gzip'},
'h': {'text', 'header', 'c', 'c++'},
'hpp': {'text', 'header', 'c++'},
'htm': {'text', 'html'},
'html': {'text', 'html'},
'icns': {'binary', 'icns'},
'ico': {'binary', 'icon'},
'idl': {'text', 'idl'},
'inc': {'text', 'inc'},
'ini': {'text', 'ini'},
'jade': {'text', 'jade'},
'jar': {'binary', 'zip', 'jar'},
'java': {'text', 'java'},
'jinja': {'text', 'jinja'},
'jinja2': {'text', 'jinja'},
'jpeg': {'binary', 'image', 'jpeg'},
'jpg': {'binary', 'image', 'jpeg'},
'js': {'text', 'javascript'},
'json': {'text', 'json'},
'jsx': {'text', 'jsx'},
'key': {'text', 'pem'},
'less': {'text', 'less'},
'm': {'text', 'c'},
'manifest': {'text', 'manifest'},
'map': {'text', 'map'},
'markdown': {'text', 'markdown'},
'md': {'text', 'markdown'},
'mk': {'text', 'makefile'},
'mm': {'text', 'c++'},
'modulemap': {'text', 'modulemap'},
'ngdoc': {'text', 'ngdoc'},
'otf': {'binary', 'otf'},
'p12': {'binary', 'p12'},
'patch': {'text', 'diff'},
'pdf': {'binary', 'pdf'},
'pem': {'text', 'pem'},
'php': {'text', 'php'},
'php4': {'text', 'php'},
'php5': {'text', 'php'},
'pl': {'text', 'perl'},
'plist': {'text', 'plist'},
'png': {'binary', 'image', 'png'},
'po': {'text', 'pofile'},
'pp': {'text', 'puppet'},
'proto': {'text', 'proto'},
'py': {'text', 'python'},
'r': {'text', 'r'},
'rb': {'text', 'ruby'},
'rs': {'text', 'rust'},
'rst': {'text', 'rst'},
's': {'text', 'asm'},
'scss': {'text', 'scss'},
'sh': {'text', 'shell'},
'sls': {'text', 'salt'},
'so': {'binary'},
'spec': {'text', 'spec'},
'sql': {'text', 'sql'},
'svg': {'text', 'svg'},
'swf': {'binary', 'swf'},
'swift': {'text', 'swift'},
'swiftdeps': {'text', 'swiftdeps'},
'tac': {'text', 'twisted', 'python'},
'tar': {'binary', 'tar'},
'tgz': {'binary', 'gzip'},
'thrift': {'text', 'thrift'},
'tiff': {'binary', 'image', 'tiff'},
'toml': {'text', 'toml'},
'tf': {'text', 'terraform'},
'ts': {'text', 'ts'},
'ttf': {'binary', 'ttf'},
'txt': {'text', 'plain-text'},
'vdx': {'text', 'vdx'},
'vim': {'text', 'vim'},
'wav': {'binary', 'audio', 'wav'},
'whl': {'binary', 'wheel', 'zip'},
'woff': {'binary', 'woff'},
'woff2': {'binary', 'woff2'},
'xml': {'text', 'xml'},
'yaml': {'text', 'yaml'},
'yml': {'text', 'yaml'},
'zip': {'binary', 'zip'},
'zsh': {'text', 'shell', 'zsh'},
}
NAMES = {
'.babelrc': {'text', 'json', 'babelrc'},
'.bowerrc': {'text', 'json', 'bowerrc'},
'.coveragerc': {'text', 'ini', 'coveragerc'},
'.dockerignore': {'text', 'dockerignore'},
'.editorconfig': {'text', 'editorconfig'},
'.gitattributes': {'text', 'gitattributes'},
'.gitignore': {'text', 'gitignore'},
'.gitmodules': {'text', 'gitmodules'},
'.jshintrc': {'text', 'json', 'jshintrc'},
'.mailmap': {'text', 'mailmap'},
'.npmignore': {'text', 'npmignore'},
'AUTHORS': EXTENSIONS['txt'],
'COPYING': EXTENSIONS['txt'],
'Dockerfile': {'text', 'dockerfile'},
'Gemfile': EXTENSIONS['rb'],
'Jenkinsfile': {'text', 'groovy'},
'LICENSE': EXTENSIONS['txt'],
'MAINTAINERS': EXTENSIONS['txt'],
'Makefile': EXTENSIONS['mk'],
'NOTICE': EXTENSIONS['txt'],
'PATENTS': EXTENSIONS['txt'],
'README': EXTENSIONS['txt'],
'Rakefile': EXTENSIONS['rb'],
'setup.cfg': EXTENSIONS['ini'],
}
|
Python
| 0
|
@@ -1137,24 +1137,101 @@
t', 'gyb'%7D,%0A
+ 'gyp': %7B'text', 'gyp', 'python'%7D,%0A 'gypi': %7B'text', 'gyp', 'python'%7D,%0A
'gz': %7B'
|
ab81767d7504bc3016786780902d8c3997e37f64
|
Add option to use proxies in JiraHook
|
airflow/contrib/hooks/jira_hook.py
|
airflow/contrib/hooks/jira_hook.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from jira import JIRA
from jira.exceptions import JIRAError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class JiraHook(BaseHook, LoggingMixin):
"""
Jira interaction hook, a Wrapper around JIRA Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: string
"""
def __init__(self,
jira_conn_id='jira_default'):
super(JiraHook, self).__init__(jira_conn_id)
self.jira_conn_id = jira_conn_id
self.client = None
self.get_conn()
def get_conn(self):
if not self.client:
self.log.debug('Creating Jira client for conn_id: %s', self.jira_conn_id)
get_server_info = True
validate = True
extra_options = {}
conn = None
if self.jira_conn_id is not None:
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# only required attributes are taken for now,
# more can be added ex: async, logging, max_retries
# verify
if 'verify' in extra_options \
and extra_options['verify'].lower() == 'false':
extra_options['verify'] = False
# validate
if 'validate' in extra_options \
and extra_options['validate'].lower() == 'false':
validate = False
if 'get_server_info' in extra_options \
and extra_options['get_server_info'].lower() == 'false':
get_server_info = False
try:
self.client = JIRA(conn.host,
options=extra_options,
basic_auth=(conn.login, conn.password),
get_server_info=get_server_info,
validate=validate)
except JIRAError as jira_error:
raise AirflowException('Failed to create jira client, jira error: %s'
% str(jira_error))
except Exception as e:
raise AirflowException('Failed to create jira client, error: %s'
% str(e))
return self.client
|
Python
| 0
|
@@ -1301,16 +1301,47 @@
default'
+,%0A proxies=None
):%0A
@@ -1429,16 +1429,47 @@
conn_id%0A
+ self.proxies = proxies%0A
@@ -3065,16 +3065,77 @@
validate
+,%0A proxies=self.proxies
)%0A
|
423ec9d9b38be990ab7dca027877e1c12f3d07fe
|
add in django-registration update media url
|
imagr_site/settings.py
|
imagr_site/settings.py
|
"""
Django settings for imagr_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_0)ionh8p(-xw=uh-3_8un)^xo+=&obsad&lhohn-d93j(p!21'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'imagr_users.ImagrUser'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagr_users',
'imagr_images',
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagr_site.urls'
WSGI_APPLICATION = 'imagr_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = 'http://localhost:8000/media/'
MEDIA_ROOT = BASE_DIR + "/media/"
|
Python
| 0
|
@@ -1100,16 +1100,36 @@
south',%0A
+ 'registration',%0A
)%0A%0AMIDDL
@@ -2110,29 +2110,8 @@
= '
-http://localhost:8000
/med
|
1675252b3442ff4e32881ce1c28f1753c521fb3f
|
Remove the main from the new file
|
code/spearmint-configs/dbnmnist/mnistdbn.py
|
code/spearmint-configs/dbnmnist/mnistdbn.py
|
"""Spearmint for the DBN module in pydeeplearn."""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import argparse
from lib import deepbelief as db
from lib.common import *
from lib.activationfunctions import *
from read import readmnist
parser = argparse.ArgumentParser(description='digit recognition')
parser.add_argument('--path',dest='path', type = str, default="/data/mcr10/project/pydeeplearn/code/MNIST",
help="the path to the MNIST files")
parser.add_argument('--trainSize', type=int, default=100,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=10,
help='the number of testing cases to be considered')
args = parser.parse_args()
def trainDBN(unsupervisedLearningRate,
supervisedLearningRate,
visibleDropout,
hiddenDropout,
miniBatchSize,
momentumMax,
maxEpochs):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(args.trainSize, args.trainSize + args.testSize,
digits=None, bTrain=True, path=args.path)
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainVectors = np.array(trainVectors, dtype='float')
trainingScaledVectors = scale(trainVectors)
testVectors = np.array(testVectors, dtype='float')
testingScaledVectors = scale(testVectors)
trainVectorLabels = labelsToVectors(trainLabels, 10)
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
nesterovMomentum=True,
rbmNesterovMomentum=True,
activationFunction=Rectified(),
rbmActivationFunctionVisible=Identity(),
rbmActivationFunctionHidden=RectifiedNoisy(),
rmsprop=True,
visibleDropout=0.8,
hiddenDropout=0.5,
weightDecayL1=0,
weightDecayL2=0,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
miniBatchSize=miniBatchSize,
# TODO: make this a learned param
preTrainEpochs=100,
sparsityConstraintRbm=False,
sparsityTragetRbm=0.01,
sparsityRegularizationRbm=None)
net.train(trainingScaledVectors, trainVectorLabels,
maxEpochs=maxEpochs, validation=False)
proabilities, predicted = net.classify(testingScaledVectors)
error = getClassificationError(predicted, testLabels)
print "error", error
return error
# Write a function like this called 'main'
def main(job_id, params):
print 'params', params
return trainDBN(unsupervisedLearningRate=params['unsupervisedLearningRate'][0],
supervisedLearningRate=params['supervisedLearningRate'][0],
visibleDropout=params['visibleDropout'][0],
hiddenDropout=params['hiddenDropout'][0],
miniBatchSize=params['miniBatchSize'][0],
momentumMax=params['momentumMax'][0],
maxEpochs=params['maxEpochs'][0])
if __name__ == '__main__':
params = {
'unsupervisedLearningRate': [0],
'supervisedLearningRate': [0],
'visibleDropout': [0],
'hiddenDropout': [0],
'miniBatchSize': [0],
'momentumMax': [0],
'maxEpochs': [0]
}
main(1, params)
|
Python
| 0.000002
|
@@ -3452,261 +3452,4 @@
%5D)%0A%0A
-if __name__ == '__main__':%0A params = %7B%0A 'unsupervisedLearningRate': %5B0%5D,%0A 'supervisedLearningRate': %5B0%5D,%0A 'visibleDropout': %5B0%5D,%0A 'hiddenDropout': %5B0%5D,%0A 'miniBatchSize': %5B0%5D,%0A 'momentumMax': %5B0%5D,%0A 'maxEpochs': %5B0%5D%0A %7D%0A main(1, params)
|
ac754a6a711edc9b3628499ae18e74892efd7f98
|
Add recording interaction print statements
|
src/tdl/runner/recording_system.py
|
src/tdl/runner/recording_system.py
|
import unirest
RECORDING_SYSTEM_ENDPOINT = "http://localhost:41375"
class RecordingEvent:
def __init__(self):
pass
ROUND_START = 'new'
ROUND_SOLUTION_DEPLOY = 'deploy'
ROUND_COMPLETED = 'done'
class RecordingSystem:
def __init__(self, recording_required):
self._recording_required = recording_required
def is_recording_system_ok(self):
return RecordingSystem.is_running() if self._recording_required else True
@staticmethod
def is_running():
try:
response = unirest.get("{}/status".format(RECORDING_SYSTEM_ENDPOINT))
if response.code == 200 and response.body.startswith("OK"):
return True
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
return False
def notify_event(self, round_id, event_name):
self._send_post("/notify", round_id + "/" + event_name)
def tell_to_stop(self):
self._send_post("/stop", "")
def _send_post(self, endpoint, body):
if not self.is_recording_system_ok():
return
try:
response = unirest.post("{}{}".format(RECORDING_SYSTEM_ENDPOINT, endpoint),
params=body)
if response.code != 200:
print("Recording system returned code: {}".format(response.code))
return
if not response.body.startswith("ACK"):
print("Recording system returned body: {}".format(response.body))
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
def on_new_round(self, round_id):
self.notify_event(round_id, RecordingEvent.ROUND_START)
|
Python
| 0.000004
|
@@ -873,16 +873,92 @@
_name):%0A
+ print('Notify round %22%7B%7D%22, event %22%7B%7D%22'.format(round_id, event_name))%0A
@@ -1034,32 +1034,75 @@
_to_stop(self):%0A
+ print('Stopping recording system')%0A
self._se
|
0b3247c23d37c372d3f3984391b976fa904d00c6
|
bump to v1.4.0 (#5975)
|
var/spack/repos/builtin/packages/miniamr/package.py
|
var/spack/repos/builtin/packages/miniamr/package.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Miniamr(MakefilePackage):
"""Proxy Application. 3D stencil calculation with
Adaptive Mesh Refinement (AMR)
"""
homepage = "https://mantevo.org"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/MiniAMR/miniAMR_1.0_all.tgz"
tags = ['proxy-app', 'ecp-proxy-app']
version('1.0', '812e5aaaab99689a4e9381a3bbd718a6')
variant('mpi', default=True, description='Build with MPI support')
depends_on('mpi', when="+mpi")
@property
def build_targets(self):
targets = []
if '+mpi' in self.spec:
targets.append('CC={0}'.format(self.spec['mpi'].mpicc))
targets.append('LDLIBS=-lm')
targets.append('--file=Makefile.mpi')
targets.append('--directory=miniAMR_ref')
else:
targets.append('--file=Makefile.serial')
targets.append('--directory=miniAMR_serial')
return targets
def install(self, spec, prefix):
# Manual installation
mkdir(prefix.bin)
mkdir(prefix.doc)
if '+mpi' in spec:
install('miniAMR_ref/miniAMR.x', prefix.bin)
else:
install('miniAMR_serial/miniAMR.x', prefix.bin)
# Install Support Documents
install('miniAMR_ref/README', prefix.doc)
|
Python
| 0
|
@@ -1432,83 +1432,56 @@
http
+s
://
-mantevo.org/downloads/releaseTarballs/miniapps/MiniAMR/miniAMR_1.0_all.t
+github.com/Mantevo/miniAMR/archive/v1.4.tar.
gz%22%0A
@@ -1543,45 +1543,47 @@
('1.
+4.
0', '
-812e5aaaab99689a4e9381a3bbd718a6
+3aab0247047a94e343709cf2e51cc46e
')%0A%0A
@@ -1888,17 +1888,44 @@
('LD
-LIBS=-lm'
+=%7B0%7D'.format(self.spec%5B'mpi'%5D.mpicc)
)%0A
@@ -1954,29 +1954,34 @@
nd('
---file=Makefile.mpi')
+LDLIBS=-lm')%0A else:
%0A
@@ -2009,105 +2009,104 @@
nd('
---directory=miniAMR_ref')%0A else:%0A targets.append('--file=Makefile.serial')%0A
+CC=%7B0%7D'.format(self.compiler.cc))%0A targets.append('LD=%7B0%7D'.format(self.compiler.cc))%0A
@@ -2137,30 +2137,19 @@
rectory=
-miniAMR_serial
+ref
')%0A%0A
@@ -2301,140 +2301,22 @@
i
-f '+mpi' in spec:%0A install('miniAMR_ref/miniAMR.x', prefix.bin)%0A else:%0A install('miniAMR_serial/miniAMR
+nstall('ref/ma
.x',
@@ -2328,17 +2328,16 @@
ix.bin)%0A
-%0A
@@ -2385,16 +2385,8 @@
ll('
-miniAMR_
ref/
|
c982b49e8091972a54e18233c95b21c594687200
|
fix package references
|
xcloud/xcloud.py
|
xcloud/xcloud.py
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import logging
import os
from fnmatch import fnmatch
import yaml
import utils
import cloudoptions.CloudOptions
_log = logging.getLogger(__name__)
def main():
try:
options = {}
config_file = os.path.expanduser('~/.xcloud')
if os.path.exists(config_file):
with open(config_file, 'r') as fhd:
options = yaml.load(fhd)
parser = argparse.ArgumentParser(description='Provision servers in openstack.')
parser.add_argument(
'-l',
'--log-level',
dest='log_level',
default='INFO',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='optional. Set the log level.')
# parser.add_argument('-c', '--config', default='~/.xcloud', required=True, help='x-cloud config file used to '
# 'store openstack information')
parser.add_argument('-u', '--username', default=os.environ.get('XCLOUD_USERNAME', None),
help='username to access openstack')
parser.add_argument('-p', '--password', default=os.environ.get('XCLOUD_PASSWORD', None),
help='password to access openstack')
parser.add_argument('-f', '--file', default=None, required=True, help='provision manifest file')
subparsers = parser.add_subparsers(help='actions')
parser_a = subparsers.add_parser('scale', help='resize servers')
parser_a.add_argument('-w', '--watch', default=False, action='store_true', help='watch the servers')
parser_a.add_argument('-r', '--replicas', default=None, type=int, help='number of servers to scale to')
parser_a.add_argument('--rebuild', default=False, action='store_true', help='rebuild the servers one-by-one')
parser_a.add_argument('--max-age', default='1d', type=utils.tdelta, help='max age to rebuild servers')
parser_a.add_argument('-n', '--name', default='*', type=str, help='name of cluster')
parser_a.set_defaults(func=scale_cli)
parser_a = subparsers.add_parser('update', help='update servers')
parser_a.add_argument('-n', '--name', default='*', type=str, help='name of cluster')
parser_a.set_defaults(func=update_cli)
parser_a = subparsers.add_parser('delete-all', help='delete all servers')
parser_a.add_argument('-n', '--name', default='*', type=str, help='name of cluster')
parser_a.set_defaults(func=delete_all_cli)
parser_a = subparsers.add_parser('list', help='list all servers')
parser_a.add_argument('-n', '--name', default='*', type=str, help='name of cluster')
parser_a.set_defaults(func=list_cli)
args = parser.parse_args()
logging.getLogger('requests').setLevel(logging.ERROR)
fmt = "[%(relativeCreated)-8d] %(levelname)s %(module)s: %(message)s"
logging.basicConfig(level=getattr(logging, args.log_level), format=fmt)
args.func(args)
except KeyboardInterrupt:
exit(0)
def delete_all_cli(args):
all_options = CloudOptions.create_from_file(args.file, args)
for options in all_options:
option_name = options['name']
if fnmatch(option_name, args.name):
cloud = Cloud.create(options)
cloud.delete_all()
def scale_cli(args):
all_options = CloudOptions.create_from_file(args.file, args)
for options in all_options:
option_name = options['name']
if fnmatch(option_name, args.name):
cloud = Cloud.create(options)
cloud.scale(args)
def update_cli(args):
all_options = CloudOptions.create_from_file(args.file, args)
for options in all_options:
option_name = options['name']
if fnmatch(option_name, args.name):
cloud = Cloud.create(options)
cloud.update_servers(args)
def list_cli(args):
all_options = CloudOptions.create_from_file(args.file, args)
for options in all_options:
option_name = options['name']
if fnmatch(option_name, args.name):
cloud = Cloud.create(options)
servers = cloud.find_servers(option_name)
print 'CLUSTER: %s' % option_name
print ''
print ' %s %s %s' % ('fqdn'.ljust(30), 'ip'.ljust(15), 'floater'.ljust(15))
print ' %s %s %s' % ('-' * 30, '-' * 15, '-' * 30)
for server in servers:
print ' %s %s %s' % (server.fqdn.ljust(30), server.fixed_ip.ljust(15), server.metadata['floating_ip'].ljust(15))
print ''
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -128,24 +128,59 @@
t utils%0A
-%0Aimport
+from xcloud.cloud import Cloud%0Afrom xcloud.
cloudopt
@@ -183,17 +183,24 @@
doptions
-.
+ import
CloudOpt
|
7b2b2fb96957b9ea94ddb0656fd047724564f31d
|
add v1.1 (#9289)
|
var/spack/repos/builtin/packages/sw4lite/package.py
|
var/spack/repos/builtin/packages/sw4lite/package.py
|
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class Sw4lite(MakefilePackage):
"""Sw4lite is a bare bone version of SW4 intended for testing
performance optimizations in a few important numerical kernels of SW4."""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "https://geodynamics.org/cig/software/sw4"
url = "https://github.com/geodynamics/sw4lite/archive/v1.0.zip"
git = "https://github.com/geodynamics/sw4lite.git"
version('develop', branch='master')
version('1.0', '3d911165f4f2ff6d5f9c1bd56ab6723f')
variant('openmp', default=True, description='Build with OpenMP support')
variant('precision', default='double', values=('float', 'double'),
multi=False, description='Floating point precision')
variant('ckernel', default=False, description='C or Fortran kernel')
depends_on('blas')
depends_on('lapack')
depends_on('mpi')
parallel = False
@property
def build_targets(self):
targets = []
spec = self.spec
if spec.variants['precision'].value == 'double':
cxxflags = ['-I../src', '-I../src/double']
else:
cxxflags = ['-I../src', '-I../src/float']
cflags = []
fflags = []
if '+openmp' in self.spec:
cflags.append('-DSW4_OPENMP')
cflags.append(self.compiler.openmp_flag)
cxxflags.append('-DSW4_OPENMP')
cxxflags.append(self.compiler.openmp_flag)
fflags.append(self.compiler.openmp_flag)
if spec.variants['ckernel'].value is True:
cxxflags.append('-DSW4_CROUTINES')
targets.append('ckernel=yes')
targets.append('FC=' + spec['mpi'].mpifc)
targets.append('CXX=' + spec['mpi'].mpicxx)
targets.append('CFLAGS={0}'.format(' '.join(cflags)))
targets.append('CXXFLAGS={0}'.format(' '.join(cxxflags)))
targets.append('FFLAGS={0}'.format(' '.join(fflags)))
targets.append('EXTRA_CXX_FLAGS=')
targets.append('EXTRA_FORT_FLAGS=')
lapack_blas = spec['lapack'].libs + spec['blas'].libs
if spec.satisfies('%gcc'):
targets.append('EXTRA_LINK_FLAGS={0} -lgfortran'
.format(lapack_blas.ld_flags))
else:
targets.append('EXTRA_LINK_FLAGS={0}'.format(lapack_blas.ld_flags))
return targets
def install(self, spec, prefix):
mkdir(prefix.bin)
exe_name = glob.glob('*/sw4lite')[0]
install(exe_name, prefix.bin)
install_tree('tests', prefix.tests)
|
Python
| 0
|
@@ -1692,16 +1692,110 @@
aster')%0A
+ version('1.1', sha256='34b5f7b56f9e40474c14abebcaa024192de018de6beb6dafee53d3db5b07c6d3')%0A
vers
|
779393e6c18539c97ff3bdaeb471253170645bc2
|
Update group.py
|
web-app/numeter_webapp/configuration/forms/group.py
|
web-app/numeter_webapp/configuration/forms/group.py
|
"""
Group Form module.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from djangular.forms.angular_model import NgModelFormMixin
from core.models import Group
class Group_Form(forms.ModelForm):
"""Simple Group Form"""
class Meta:
model = Group
widgets = {
'name': forms.TextInput({'placeholder':_('Name'), 'class':'span', 'ng-model': 'tabIndex.form.name'}),
}
def get_submit_url(self):
"""Return url matching with creation or updating."""
if self.instance.id:
return self.instance.get_rest_detail_url()
else:
return self.instance.get_rest_list_url()
def get_submit_method(self):
"""Return method matching with creation or updating."""
if self.instance.id:
return 'PATCH'
else:
return 'POST'
|
Python
| 0
|
@@ -106,67 +106,8 @@
s _%0A
-from djangular.forms.angular_model import NgModelFormMixin%0A
from
|
7ca6dd5cd84222845db331afd97fc2f314999cff
|
fix yaspin.compat module docstring
|
yaspin/compat.py
|
yaspin/compat.py
|
# -*- coding: utf-8 -*-
"""
tests.compat
~~~~~~~~~~~~~
Compatibility layer.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
builtin_str = str
bytes = str
str = unicode # noqa
def iteritems(dct):
return dct.iteritems()
else:
builtin_str = str
bytes = bytes
str = str
def iteritems(dct):
return dct.items()
|
Python
| 0
|
@@ -26,13 +26,14 @@
%22%22%22%0A
-tests
+yaspin
.com
|
9a15095217b0f1f1ed154ccba7950c041ff01260
|
FIX default event type to avoid null values error
|
website_event_compassion/models/event_compassion.py
|
website_event_compassion/models/event_compassion.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, api
from odoo.addons.website.models.website import slug
class EventCompassion(models.Model):
_name = 'crm.event.compassion'
_inherit = ['crm.event.compassion', 'website.published.mixin',
'translatable.model', 'website.seo.metadata']
name = fields.Char(translate=True)
website_description = fields.Html(translate=True, sanitize=False)
thank_you_text = fields.Html(translate=True)
picture_1 = fields.Binary('Banner image', attachment=True)
filename_1 = fields.Char(compute='_compute_filenames')
website_side_info = fields.Html(
string='Side info', translate=True, sanitize=False
)
event_type_id = fields.Many2one(
'event.type', 'Type', required=True,
# Avoids selecting generic events
domain=[('id', '>', 1)],
)
type = fields.Selection(compute='_compute_event_type', store=True)
odoo_event_id = fields.Many2one('event.event')
accepts_registrations = fields.Boolean(
related='event_type_id.accepts_registrations')
seats_expected = fields.Integer(related='odoo_event_id.seats_expected')
@api.multi
def _compute_website_url(self):
for event in self:
event.website_url = "/event/{}".format(slug(event))
@api.multi
def _compute_filenames(self):
for event in self:
event.filename_1 = event.name + '-1.jpg'
@api.multi
@api.depends('event_type_id')
def _compute_event_type(self):
sport = self.env.ref('website_event_compassion.event_type_sport')
stand = self.env.ref('website_event_compassion.event_type_stand')
concert = self.env.ref('website_event_compassion.event_type_concert')
pres = self.env.ref('website_event_compassion.event_type_presentation')
meeting = self.env.ref('website_event_compassion.event_type_meeting')
group = self.env.ref('website_event_compassion.event_type_group_visit')
youth = self.env.ref('website_event_compassion.event_type_youth_trip')
indiv = self.env.ref(
'website_event_compassion.event_type_individual_visit')
for event in self:
if event.event_type_id == sport:
event.type = 'sport'
elif event.event_type_id == stand:
event.type = 'stand'
elif event.event_type_id == concert:
event.type = 'concert'
elif event.event_type_id == pres:
event.type = 'presentation'
elif event.event_type_id == meeting:
event.type = 'meeting'
elif event.event_type_id in group | youth | indiv:
event.type = 'tour'
def open_registrations(self):
"""
This will create an event.event record and link it to the Compassion
Event. It's useful for adding participants and managing e-mails
and participant list.
:return: action opening the wizard
"""
return {
'name': 'Open event registrations',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'crm.event.compassion.open.wizard',
'context': self.env.context,
'target': 'new',
}
def open_participants(self):
return {
'name': 'Manage participants',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'kanban,tree,form,calendar,graph',
'res_model': 'event.registration',
'domain': [('event_id', '=', self.odoo_event_id.id)],
'context': self.with_context(
default_compassion_event_id=self.id,
default_event_type_id=self.event_type_id.id,
default_event_id=self.odoo_event_id.id,
default_amount_objective=self.odoo_event_id.
participants_amount_objective
).env.context,
'target': 'current',
}
|
Python
| 0
|
@@ -1209,16 +1209,25 @@
lection(
+%0A
compute=
@@ -1248,16 +1248,35 @@
t_type',
+ default='meeting',
store=T
|
739f72ae0bd873ac8d51789e90988d609b08a803
|
Add typos and nonsense to pass distribution plan
|
inpassing/pass_util.py
|
inpassing/pass_util.py
|
# Copyright (c) 2016 Luke San Antonio Bialecki
# All rights reserved.
from sqlalchemy.sql import and_, or_
from .models import Pass, PassRequest, db
def get_user_passes(user_id):
"""Returns all owned, borrowed and requested passes of a user."""
# Find pending and successfull requests
pending_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id == None)
).all()
successful_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id != None)
).all()
# Borrowed passes are ones that are not owned by this user but are currently
# being used / borrowed.
borrowed_passes = db.session.query(Pass).filter(
and_(Pass.owner_id != user_id,
Pass.user_id == user_id)
).all()
# All non-pending passes related to this user
passes = borrowed_passes[:]
# Note that the request state ID and spot num can be different from
# what was actually assigned, so we have to use the values from the assigned
# pass object, itself.
passes.extend([req.assigned_pass for req in successful_requests])
ret = []
ret.extend([{
'pass_id': pas.id,
'org_id': pas.org_id,
'pending': False,
'owned': pas.owner_id == user_id,
'using': ((pas.owner_id == user_id and pas.user_id == None) or
pas.user_id == user_id),
'state_id': pas.state_id,
'spot_num': pas.spot_num,
} for pas in passes])
ret.extend([{
'request_id': req.id,
'org_id': req.org_id,
'pending': True,
'owned': False,
'using': False,
'request_time': req.request_time.isoformat(),
'state_id': req.state_id,
'spot_num': req.spot_num,
} for req in pending_requests])
return ret
def distribute_passes(users):
"""Distribute / lend passes to new users with a fancy magic algorithm.
= Proposed algorithm
1. For each user, weight the time since their last borrow and how many
borrows overall to form their score.
2. Sort users by score
3. Give pass to top user.
= Stupid Ideas
1. First come, first serve.
2. Distribute a pass to a random seeking individual at a random time after
the pass goes up for grabs.
3. Give Luke the pass. Always.
= Smart Ideas
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$ 1. Pay to for each pass. Price scales with the score detailed above. $
$ 2. Have users play that gambling game where you drop a ball on pegs $
$ and it randomly goes left or right until the bottom. The ball in the $
$ center hole gets the pass. Each ball costs the user one ad viewing. $
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
|
Python
| 0
|
@@ -2193,16 +2193,17 @@
by score
+.
%0A 3.
@@ -2220,16 +2220,64 @@
to t
-op
+he
user
-.
+ with the highest score.%0A 4. ???%0A 5. Profit
%0A%0A
@@ -2367,15 +2367,17 @@
dom
+(
seeking
+)
ind
@@ -2470,14 +2470,16 @@
ss.
+*
Always
+*
.%0A%0A
@@ -2586,14 +2586,17 @@
1.
-Pay to
+Users pay
for
@@ -2605,19 +2605,24 @@
ach pass
-. P
+ where p
rice sca
@@ -2644,23 +2644,92 @@
ore
-detailed above.
+ $%0A $ detailed above.
$%0A
@@ -3031,20 +3031,21 @@
$$$$$$$$$$$%0A
+%0A
%22%22%22%0A
|
b9fc0685b3adb05a5049cfa9b68676e00878d48a
|
Add .fillna(0)
|
instagram_collector.py
|
instagram_collector.py
|
import sys
from settings import instgram_access_token
from api import InstagramAPI, Alchemy
import pandas as pd
def following_users(api, user_name):
instgram_user_id = api.user_id(user_name=user_name)
following_users = api.follows_list(user_id=instgram_user_id)
return following_users
def userinfo_list(api, following_users):
userinfo_list = []
for user in following_users:
entries = api.media_list(user["user_id"])
for entry in entries:
tag_list = Alchemy.tag_list(image_url=entry['url'])
if tag_list is None:
return userinfo_list
entry.update({'tag_list': tag_list})
tags = [entry['tag_list'] for entry in entries]
df = pd.DataFrame(tags).fillna(0)
user_summery = df.sum()
user_summery = user_summery.to_dict()
user.update(user_summery)
userinfo_list.append(user)
return userinfo_list
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if len(argvs) != 2:
print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0])
quit()
instgram_user_name = argvs[1]
api = InstagramAPI(access_token=instgram_access_token)
following_users = following_users(api, instgram_user_name)
following_users = following_users[0:40]
userinfo_list = userinfo_list(api, following_users)
users_df = pd.DataFrame(userinfo_list)
users_df.to_csv("user_tags.csv")
# for following_user in following_users:
# # entries = api.media_list(user_name=following_user)
# # for entry in entries:
# # image_url = entry["url"]
# # tag_list = Alchemy.tag_list(image_url=image_url)
# # entry.update({"tag_list": tag_list})
# # print(entry)
# # print(entries)
print(userinfo_list)
|
Python
| 0.000001
|
@@ -1410,16 +1410,26 @@
fo_list)
+.fillna(0)
%0A use
|
1483b7946f929ee6dc8d5a8e972c712af35d4aea
|
Add capacity to save parsed objects to models in management command "process_xslt"
|
xml_json_import/management/commands/process_xslt.py
|
xml_json_import/management/commands/process_xslt.py
|
from django.core.management.base import BaseCommand
from lxml import etree, html
import urllib2
from os import path
class Command(BaseCommand):
help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result'
def add_arguments(self, parser):
parser.add_argument('url', help='URL to fetch source XML')
parser.add_argument('xslt_file', help='Path to XSLT transformation file')
parser.add_argument('--validate', action='store_true',
help='Validate against Relax NG schema after transformation')
rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng')
parser.add_argument('--rng_file', default=rng_file,
help='Path to RELAX NG file. Defaults to schema.rng in module dir. '
'Used only if --validate is set')
def handle(self, *args, **options):
response = urllib2.urlopen(options['url'])
encoding = response.headers.getparam('charset')
content_type = response.info().type
if 'xml' in content_type:
source_etree = etree.parse(response)
elif 'html' in content_type:
source_etree = html.parse(response)
xslt_etree = etree.parse(options['xslt_file'])
transform = etree.XSLT(xslt_etree)
transformed_etree = transform(source_etree)
output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding)
print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + output
if options['validate']:
rng_file_etree = etree.parse(options['rng_file'])
relaxng = etree.RelaxNG(rng_file_etree)
try:
relaxng.assertValid(transformed_etree)
print 'Document is valid'
except etree.DocumentInvalid as ex:
print 'Document is not valid: ' + str(ex)
|
Python
| 0
|
@@ -112,16 +112,34 @@
rt path%0D
+%0Aimport importlib%0D
%0A%0D%0Aclass
@@ -944,16 +944,443 @@
s set')%0D
+%0A parser.add_argument('--save', action='store_true', %0D%0A help='Save data to the model. Successful validation against Relax NG '%0D%0A 'schema is required. Model names and fields in transformed XML '%0D%0A 'must represent existing models and fields. Otherwise import '%0D%0A 'will break with an exception')%0D
%0A%0D%0A d
@@ -2092,16 +2092,35 @@
lidate'%5D
+ or options%5B'save'%5D
:%0D%0A
@@ -2296,32 +2296,32 @@
sformed_etree)%0D%0A
-
@@ -2347,16 +2347,949 @@
valid'%0D%0A
+ if options%5B'save'%5D:%0D%0A saved_objects_count = 0%0D%0A for model_element in transformed_etree.xpath('//model'):%0D%0A application_name, model_name = model_element.attrib%5B'model'%5D.split('.')%0D%0A models_import_str = application_name + '.models'%0D%0A models = importlib.import_module(models_import_str)%0D%0A model = getattr(models, model_name)%0D%0A for item_element in model_element.xpath('.//item'):%0D%0A obj = model()%0D%0A for field_element in item_element.xpath('.//field'):%0D%0A setattr(obj, field_element.attrib%5B'name'%5D, field_element.text)%0D%0A obj.save()%0D%0A saved_objects_count += 1%0D%0A print 'Saved objects: ' + str(saved_objects_count)%0D%0A
|
c1d8cf4a2aea0ece6bc301c30fa9dde36d9d9cc6
|
Version bump [ci skip]
|
bulbs/__init__.py
|
bulbs/__init__.py
|
__version__ = "0.3.4"
|
Python
| 0
|
@@ -16,7 +16,7 @@
0.3.
-4
+5
%22%0A
|
5ac7bba7ba8f411ed8daaf2055fde56eda152b6c
|
Add missing context processor to test app
|
tests/runtests.py
|
tests/runtests.py
|
#!/usr/bin/env python
import os
import sys
from optparse import OptionParser
AVAILABLE_DATABASES = {
'psql': {'ENGINE': 'django.db.backends.postgresql_psycopg2'},
'mysql': {'ENGINE': 'django.db.backends.mysql'},
'sqlite': {'ENGINE': 'django.db.backends.sqlite3'},
}
def main():
# Parse the command-line options.
parser = OptionParser()
parser.add_option(
"-v", "--verbosity",
action="store",
dest="verbosity",
default="1",
type="choice",
choices=["0", "1", "2", "3"],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_option(
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_option(
"--failfast",
action="store_true",
dest="failfast",
default=False,
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_option(
"-d", "--database",
action="store",
dest="database",
default="sqlite",
type="choice",
choices=list(AVAILABLE_DATABASES.keys()),
help="Select database backend for tests. Available choices: {}".format(
', '.join(AVAILABLE_DATABASES.keys())),
)
options, args = parser.parse_args()
# Configure Django.
from django.conf import settings
# database settings
if options.database:
database_setting = AVAILABLE_DATABASES[options.database]
if options.database == "sqlite":
database_default_name = os.path.join(os.path.dirname(__file__), "db.sqlite3")
else:
database_default_name = "test_project"
database_setting.update(dict(
NAME=os.environ.get("DB_NAME", database_default_name),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", "")))
else:
database_setting = dict(
ENGINE=os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
NAME=os.environ.get("DB_NAME", os.path.join(os.path.dirname(__file__), "db.sqlite3")),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", ""))
settings.configure(
DEBUG=False,
DATABASES={
"default": database_setting
},
ROOT_URLCONF='test_watson.urls',
INSTALLED_APPS=(
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"watson",
"test_watson",
),
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
),
USE_TZ=True,
STATIC_URL="/static/",
TEST_RUNNER="django.test.runner.DiscoverRunner",
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
}],
)
# Run Django setup (1.7+).
import django
try:
django.setup()
except AttributeError:
pass # This is Django < 1.7
# Configure the test runner.
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=int(options.verbosity),
interactive=options.interactive,
failfast=options.failfast,
)
# Run the tests.
failures = test_runner.run_tests(["test_watson"])
if failures:
sys.exit(failures)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -3380,16 +3380,112 @@
ates'%5D,%0A
+ 'OPTIONS': %7B'context_processors': %5B'django.contrib.auth.context_processors.auth'%5D%7D,%0A
|
d4d57718674e078a71c860d53b7157f4e75dfec2
|
Fix mistakes with message printing in tester and tidy up output a little bit.
|
tests/runtests.py
|
tests/runtests.py
|
#!/usr/bin/env python
"""
Simple script to run test OpenCL programs
"""
import argparse
try:
# Python 3
import configparser as cp
except ImportError:
# Python 2.x
import ConfigParser as cp
import copy
import glob
import logging
import os
import re
import subprocess
import shutil
import sys
def printError(string):
logging.info('\033[0;32m*** {} ***\033[0m'.format(string))
class LibTest(object):
def __init__(self, path):
self.path = os.path.abspath(path)
def _run(self, extra_env):
logging.info('*** Running : {} ***'.format(self.path))
env = copy.deepcopy(os.environ)
env.update(extra_env)
logging.debug('env: {}'.format(env))
expectedOutputDir = os.path.join(os.path.dirname(self.path), "gvki-0")
assert not os.path.exists(expectedOutputDir)
retcode = subprocess.call(self.path, env=env, stdin=None, stdout=None, stderr=None, cwd=os.path.dirname(self.path), shell=False)
if retcode != 0:
logging.error('\033[0;31m*** {} failed ***\033[0m'.format(self.path))
else:
logging.info('\033[0;32m*** {} passed ***\033[0m'.format(self.path))
if not os.path.exists(expectedOutputDir):
PrintError('OutputDir missing')
return 1
else:
expectedJSONFile = os.path.join(expectedOutputDir, 'log.json')
if not os.path.exists(expectedJSONFile):
return 1
# FIXME: Check the contents of the JSON file and kernel(s) look right
# There should be at least one kernel
recordedKernels=glob.glob(expectedOutputDir + os.path.sep + '*.cl')
logging.info('Recorded kernels: {}'.format(recordedKernels))
if len(recordedKernels) == 0:
PrintError('No kernels recorded')
return 1
logging.info('')
logging.info('')
# FIXME: Check stuff was recorded
return 0 if retcode == 0 else 1
# So we can sort tests
def __lt__(self, other):
return self.path < other.path
class MacroLibTest(LibTest):
def run(self):
return self._run({})
class PreloadLibTest(LibTest):
def __init__(self, path, libPath):
super(PreloadLibTest, self).__init__(path)
self.libPath = libPath
def __lt__(self, other):
return self.path < other.path
def run(self):
if sys.platform == 'darwin':
return self._run({ 'DYLD_INSERT_LIBRARIES': self.libPath})
else:
return self._run({ 'LD_PRELOAD': self.libPath})
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('directory', help='Directory to scan for test OpenCL programs')
parser.add_argument('-l', '--loglevel', type=str, default="info",choices=['debug','info','warning','error','critical'])
parsedArgs = parser.parse_args(args)
logging.basicConfig(level=getattr(logging, parsedArgs.loglevel.upper(), None))
if not os.path.isdir(parsedArgs.directory):
logging.error('"{}" is not a directory'.format(parsedArgs.directory))
return 1
directoryContainingConfig = os.path.dirname(__file__)
# Load config file
configFilePath = os.path.join(directoryContainingConfig, 'config.cfg')
logging.info('Looking for config file "{}"'.format(configFilePath))
config = cp.ConfigParser()
config.read(configFilePath)
preloadlibPath = config.get('settings', 'preloadLibPath')
preloadlibPath = preloadlibPath.replace("//","/")
logging.debug('preloadLibPath is "{}"'.format(preloadlibPath))
if not os.path.exists(preloadlibPath):
logging.error('preloadLibPath "{}" does not exist'.format(preloadlibPath))
return 1
logging.info('Scanning for tests in "{}"'.format(parsedArgs.directory))
tests = [ ]
for (dirpath, dirnames, filenames) in os.walk(parsedArgs.directory):
for f in filenames:
if f.endswith('_gvki_preload'):
tests.append( PreloadLibTest( os.path.join(dirpath, f), preloadlibPath))
elif f.endswith('_gvki_macro'):
tests.append( MacroLibTest( os.path.join(dirpath, f)))
# clean up any old output directories
for directory in dirnames:
if re.match(r'gvki-\d+', directory):
toRemove = os.path.join(dirpath, directory)
logging.info('Deleting {}'.format(toRemove))
shutil.rmtree(toRemove)
logging.info('Found {} tests'.format(len(tests)))
tests.sort()
count = 0
for test in tests:
logging.info('Running test: {}'.format(test.path))
count += test.run()
logging.info('Failures {}'.format(count))
return count != 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Python
| 0
|
@@ -318,21 +318,98 @@
ntError(
-strin
+msg):%0A logging.error('%5C033%5B0;31m*** %7B%7D ***%5C033%5B0m'.format(msg))%0A%0Adef printOk(ms
g):%0A
@@ -462,18 +462,16 @@
mat(
+m
s
-trin
g))%0A%0A
+%0A
clas
@@ -1083,63 +1083,34 @@
-logging.error('%5C033%5B0;31m*** %7B%7D failed ***%5C033%5B0m
+printError('%7B%7D failed
'.for
-m
at(s
@@ -1150,56 +1150,26 @@
-logging.info('%5C033%5B0;32m*** %7B%7D passed ***%5C033%5B0m
+printOk('%7B%7D passed
'.fo
@@ -1248,33 +1248,33 @@
-P
+p
rintError('Outpu
@@ -1866,17 +1866,17 @@
-P
+p
rintErro
@@ -4783,30 +4783,28 @@
()%0A%0A
-logging.info('
+msg = '# of
Failures
@@ -4821,16 +4821,89 @@
t(count)
+%0A if count == 0:%0A printOk(msg)%0A else:%0A printError(msg
)%0A%0A r
|
84cdde09d574d2a52446bd751445747407733b22
|
Remove print statement
|
tests/settings.py
|
tests/settings.py
|
import uuid
import os.path
from django.conf import global_settings, settings
from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps
from oscar.defaults import * # noqa
from accounts import TEMPLATE_DIR as ACCOUNTS_TEMPLATE_DIR
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
STATICFILES_FINDERS=(
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
SECRET_KEY = str(uuid.uuid4())
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'accounts',
'compressor',
'widget_tweaks',
] + get_core_apps()
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
)
DEBUG=False
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
}
ROOT_URLCONF = 'tests.urls'
TEMPLATE_DIRS = (
OSCAR_MAIN_TEMPLATE_DIR,
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
ACCOUNTS_TEMPLATE_DIR,
# Include sandbox templates as they patch from templates that
# are in Oscar 0.4 but not 0.3
'sandbox/templates',
)
print TEMPLATE_DIRS
STATIC_URL='/static/'
COMPRESS_ROOT=''
COMPRESS_ENABLED=False
SITE_ID=1
ACCOUNTS_UNIT_NAME='Giftcard'
NOSE_ARGS=['--nocapture']
USE_TZ=True
DDF_FILL_NULLABLE_FIELDS=False
ACCOUNTS_DEFERRED_INCOME_ACCOUNT_TYPES=('Test accounts',)
|
Python
| 0.007015
|
@@ -1904,29 +1904,8 @@
%0A)%0A%0A
-print TEMPLATE_DIRS%0A%0A
STAT
|
3612a418a443b724332aa6759e30eefd8122df3e
|
Update expected package name and version
|
tests/test_cli.py
|
tests/test_cli.py
|
import pytest
import json
import os.path
from click.testing import CliRunner
from elm_doc import cli
@pytest.fixture
def runner():
return CliRunner()
def test_cli_missing_arg(tmpdir, runner):
with tmpdir.as_cwd():
result = runner.invoke(cli.main)
assert result.exception
assert result.exit_code == 2
def test_cli_invalid_mount_at(tmpdir, runner):
with tmpdir.as_cwd():
result = runner.invoke(cli.main, ['--output', 'docs', '.', '--mount-at', 'elm'])
assert result.exception
assert result.exit_code == 2
assert 'mount-at' in result.output
def test_cli_non_binary_elm_make(tmpdir, runner):
with tmpdir.as_cwd():
tmpdir.join('elm-make').write('binwrapped elm!')
result = runner.invoke(cli.main, ['--output', 'docs', '.', '--elm-make', 'elm-make'])
assert result.exception
assert result.exit_code == 2, result.output
assert 'elm-make' in result.output
def test_cli_non_existent_elm_make(tmpdir, runner):
with tmpdir.as_cwd():
result = runner.invoke(cli.main, ['--output', 'docs', '.', '--elm-make', 'elm-make'])
assert result.exception
assert result.exit_code == 2, result.output
assert 'elm-make' in result.output
def test_cli_in_empty_project(tmpdir, runner):
with tmpdir.as_cwd():
result = runner.invoke(cli.main, ['--output', 'docs', '.'])
assert result.exception
assert result.exit_code != 0
def test_cli_doit_only_arg_in_real_project(tmpdir, runner, elm_version, make_elm_project):
project_dir = make_elm_project(elm_version, tmpdir, copy_elm_stuff=True)
with tmpdir.as_cwd():
tmpdir.mkdir('docs')
result = runner.invoke(cli.main, ['--output', 'docs', project_dir.basename, '--doit-args', 'clean', '--dry-run'])
assert not result.exception, result.output
assert result.exit_code == 0
assert tmpdir.join('docs').check(exists=True)
def test_cli_in_real_project(tmpdir, runner, overlayer, elm_version, make_elm_project):
modules = ['Main.elm']
project_dir = make_elm_project(elm_version, tmpdir, copy_elm_stuff=True, modules=modules)
output_dir = tmpdir.join('docs')
with tmpdir.as_cwd():
project_dir.join('README.md').write('hello')
result = runner.invoke(cli.main, ['--output', 'docs', project_dir.basename])
assert not result.exception, result.output
assert result.exit_code == 0
assert output_dir.join('assets').check(dir=True)
assert output_dir.join('artifacts').check(dir=True)
elm_lang_html_docs_path = output_dir.join(
'packages', 'elm-lang', 'html', '2.0.0', 'documentation.json')
assert elm_lang_html_docs_path.check()
package_dir = output_dir.join('packages', 'user', 'project', '1.0.0')
package_latest_link = package_dir.dirpath('latest')
assert package_latest_link.check(dir=True, link=True)
assert not os.path.isabs(package_latest_link.readlink())
assert package_dir.join('README.md').check()
package_index = package_dir.join('index.html')
assert package_index.check()
package_main = package_dir.join('Main')
assert package_main.check()
package_docs = package_dir.join('documentation.json')
assert package_docs.check()
assert json.loads(package_docs.read())[0]['name'] == 'Main'
all_packages = output_dir.join('all-packages')
assert all_packages.check()
assert len(json.loads(all_packages.read())) > 0
new_packages = output_dir.join('new-packages')
assert new_packages.check()
assert len(json.loads(new_packages.read())) > 0
def test_cli_validate_real_project(
tmpdir, runner, overlayer, elm_version, make_elm_project):
modules = ['Main.elm']
project_dir = make_elm_project(elm_version, tmpdir, copy_elm_stuff=True, modules=modules)
output_dir = tmpdir.join('docs')
with tmpdir.as_cwd():
project_dir.join('README.md').write('hello')
result = runner.invoke(cli.main, ['--validate', project_dir.basename])
assert not result.exception, result.output
assert result.exit_code == 0
assert output_dir.check(exists=False)
def test_cli_validate_subset_of_real_project_with_forced_exclusion(
tmpdir, runner, overlayer, elm_version, make_elm_project):
modules = ['Main.elm', 'MissingModuleComment.elm']
project_dir = make_elm_project(elm_version, tmpdir, copy_elm_stuff=True, modules=modules)
output_dir = tmpdir.join('docs')
with tmpdir.as_cwd():
project_dir.join('README.md').write('hello')
result = runner.invoke(cli.main, [
project_dir.basename,
os.path.join(project_dir.basename, 'Main.elm'),
os.path.join(project_dir.basename, 'MissingModuleComment.elm'),
'--validate',
'--exclude',
'MissingModuleComment',
'--force-exclusion',
])
assert not result.exception, result.output
assert result.exit_code == 0
# validation should not output anything
assert output_dir.check(exists=False)
def test_cli_validate_invalid_project_with_masked_exclude(
tmpdir, runner, overlayer, elm_version, make_elm_project, request):
modules = ['MissingModuleComment.elm', 'PublicFunctionNotInAtDocs.elm']
project_dir = make_elm_project(elm_version, tmpdir, copy_elm_stuff=True, modules=modules)
output_dir = tmpdir.join('docs')
with tmpdir.as_cwd():
result = runner.invoke(cli.main, ['--output', 'docs', '--validate', project_dir.basename])
problem_lines = [line for line in result.output.splitlines()
if 'NO DOCS' in line or 'DOCS MISTAKE' in line]
assert len(problem_lines) == 2
# traceback should be suppressed
assert 'CalledProcessError' not in result.output
assert result.exception, result.output
assert result.exit_code == 1
assert output_dir.check(exists=False)
|
Python
| 0
|
@@ -2667,13 +2667,8 @@
'elm
--lang
', '
@@ -2675,17 +2675,17 @@
html', '
-2
+1
.0.0', '
|
26116bb984f7a970c67bcdc01ff026a3fc5f0905
|
create secondary parses
|
tests/test_ddl.py
|
tests/test_ddl.py
|
from pytest import fixture
from cdm.ddl import parse_line, create_vertex, create_vertex_index,\
CreateVertex, \
CreateEdge, CreateProperty, CreateIndex, CreateGraph
def test_create_graph():
s = "CREATE GRAPH jon"
parsed = parse_line(s)
assert isinstance(parsed, CreateGraph)
assert "system.createGraph('jon').build()" in str(parsed)
def test_create_vertex_label():
cmd = "CREATE vertex movie"
result = create_vertex.parseString(cmd)[0]
assert isinstance(result, CreateVertex)
result = parse_line(cmd)
assert isinstance(result, CreateVertex)
assert result.label == "movie"
assert "buildVertexLabel" in str(result)
assert "movie" in str(result)
result2 = parse_line("CREATE vertex label movie")
assert isinstance(result, CreateVertex)
def test_create_edge_label():
result = parse_line("CREATE edge rated")
assert isinstance(result, CreateEdge)
assert result.label == "rated"
result2 = parse_line("CREATE edge label rated")
assert isinstance(result2, CreateEdge)
def test_create_property():
result = parse_line("CREATE PROPERTY name text")
assert isinstance(result, CreateProperty)
result = parse_line("CREATE PROPERTY name TEXT")
assert isinstance(result, CreateProperty)
"""
graph.schema().vertexLabel("ip").buildVertexIndex("ipById").materialized().byPropertyKey("id").add()
Secondary
graph.schema().vertexLabel("ip").buildVertexIndex("ipByCountry").secondary().byPropertyKey("country").add()
Search
graph.schema().vertexLabel("swid").buildVertexIndex("search").search().byPropertyKey("dob").add()
"""
def test_create_index_fulltext():
s = "CREATE materialized INDEX movie_title_idx ON VERTEX movie(title )"
result = create_vertex_index.parseString(s)
# result = parse_line()
# assert isinstance(result, CreateIndex)
#
# def test_create_index_materialize():
# result = parse_line("CREATE INDEX movie_title_idx ON movie(title) SEARCH");
# result = parse_line("CREATE INDEX user_id_idx ON movie(user_id) MATERIALIZED")
|
Python
| 0.000281
|
@@ -1798,16 +1798,137 @@
ring(s)%0A
+ s = %22CREATE secondary INDEX movie_title_idx ON VERTEX movie(title )%22%0A result = create_vertex_index.parseString(s)%0A
%0A%0A#
|
8802611f515df7b123f907efb6f7ffac9f11a42f
|
create mock ami and add test for ami list.
|
tests/test_ec2.py
|
tests/test_ec2.py
|
from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output.ec2 import (output_ec2_list, output_ec2_info)
from acli.services.ec2 import (ec2_list, ec2_info, ec2_summary)
from acli.config import Config
from moto import mock_ec2
import pytest
from boto3.session import Session
session = Session(region_name="eu-west-1")
@pytest.yield_fixture(scope='function')
def ec2_instances():
"""EC2 mock service"""
mock = mock_ec2()
mock.start()
client = session.client('ec2')
client.create_security_group(GroupName='group1', Description='my first sec group')
reservations = client.run_instances(ImageId='ami-12345', MinCount=2, MaxCount=2, SecurityGroups=['group1'])
for i, s in enumerate(reservations.get('Instances')):
client.create_tags(
Resources=[s.get('InstanceId')],
Tags=[{'Key': 'Name', 'Value': 'Bob'}])
ec2_resource = session.resource('ec2')
all_instances = ec2_resource.instances.all()
yield all_instances
mock.stop()
config = Config(cli_args={'--region': 'eu-west-1',
'--access_key_id': 'AKIAIOSFODNN7EXAMPLE',
'--secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'})
def test_ec2_list_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
@mock_ec2
def test_ec2_list_service_no_instances():
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
def test_ec2_info_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_info(aws_config=config, instance_id=list(ec2_instances)[0].id)
def test_ec2_list_output(ec2_instances):
with pytest.raises(SystemExit):
assert output_ec2_list(output_media='console', instances=ec2_instances)
def test_ec2_output(ec2_instances):
with pytest.raises(SystemExit):
instance = list(ec2_instances)[0]
assert output_ec2_info(output_media='console', instance=instance)
# def test_ec2_summary(ec2_instances):
# with pytest.raises(SystemExit):
# instance = list(ec2_instances)[0]
# assert ec2_summary(aws_config=config)
|
Python
| 0
|
@@ -193,18 +193,28 @@
_summary
+, ami_list
)%0A
-
from acl
@@ -1033,16 +1033,550 @@
top()%0A%0A%0A
+@pytest.yield_fixture(scope='function')%0Adef amis():%0A %22%22%22AMI mock service%22%22%22%0A mock = mock_ec2()%0A mock.start()%0A client = session.client('ec2')%0A reservation = client.run_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)%0A instance = reservation.get('Instances')%5B0%5D%0A image_id = client.create_image(InstanceId=instance.get('InstanceId'),%0A Name=%22test-ami%22,%0A Description=%22this is a test ami%22)%0A yield client.describe_images()%0A mock.stop()%0A%0A
config =
@@ -2550,16 +2550,129 @@
ance)%0A%0A%0A
+def test_ami_list_service(amis):%0A with pytest.raises(SystemExit):%0A assert ami_list(aws_config=config)%0A%0A
# def te
|
581eb398360cff5de1488fa06890195c808f8d10
|
fix make requests test
|
tests/test_run.py
|
tests/test_run.py
|
# coding=utf-8
from os.path import join
import pytest
from xpaw.spider import Spider
from xpaw.cmdline import main
from xpaw.run import run_crawler, run_spider, make_requests
from xpaw.http import HttpRequest, HttpResponse
from xpaw.errors import ClientError, HttpError
def test_run_crawler(tmpdir):
proj_name = 'test_run_crawler'
proj_dir = join(str(tmpdir), proj_name)
main(argv=['xpaw', 'init', proj_dir])
run_crawler(proj_dir, log_level='DEBUG')
def test_run_crawler_bad_config(tmpdir, capsys):
proj_dir = join(str(tmpdir))
config_file = join(proj_dir, 'config.py')
with open(config_file, 'w') as f:
f.write('bad config')
with pytest.raises(SyntaxError):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
def test_failed_to_create_cluster(tmpdir, capsys):
proj_dir = join(str(tmpdir))
with pytest.raises(Exception):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
class DummySpider(Spider):
def start_requests(self):
pass
def parse(self, response):
pass
def test_run_spider():
run_spider(DummySpider, log_level='DEBUG')
def test_make_requests():
requests = [None, 'http://localhost:8080',
'http://python.org/', HttpRequest('http://python.org'),
'http://httpbin.org/status/404']
results = make_requests(requests, log_level='DEBUG')
assert len(results) == len(requests)
assert results[0] is None
assert isinstance(results[1], ClientError)
assert isinstance(results[2], HttpResponse) and results[2].status == 200
assert isinstance(results[3], HttpResponse) and results[3].status == 200
assert isinstance(results[4], HttpError) and results[4].response.status == 404
|
Python
| 0.000002
|
@@ -1231,22 +1231,15 @@
p://
-localhost:8080
+unknonw
',%0A
|
4ba0a99a626e54cd7ca68692c5135bcd6b2f8d3a
|
Add test for STL vertex order
|
tests/test_stl.py
|
tests/test_stl.py
|
"""
Check things related to STL files
"""
try:
from . import generic as g
except BaseException:
import generic as g
class STLTests(g.unittest.TestCase):
def test_header(self):
m = g.get_mesh('featuretype.STL')
# make sure we have the right mesh
assert g.np.isclose(m.volume, 11.627733431196749, atol=1e-6)
# should have saved the header from the STL file
assert len(m.metadata['header']) > 0
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
def test_attrib(self):
m = g.get_mesh('featuretype.STL')
len_vertices = len(m.vertices)
# assign some random vertex attributes
random = g.np.random.random(len(m.vertices))
m.vertex_attributes['random'] = random
m.vertex_attributes['nah'] = 20
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
# check all vertices are still in place
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(random, m.vertex_attributes['random'])
assert len(m.vertices) == len_vertices
# remove all vertices except four
v_mask = [0, 1, 2, 3]
m.update_vertices(v_mask)
# make sure things are still correct
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(m.vertex_attributes['random'], random[v_mask])
assert len(m.vertices) == len(v_mask)
def test_ascii_multibody(self):
s = g.get_mesh('multibody.stl')
assert len(s.geometry) == 2
def test_empty(self):
# demo files to check
empty_files = ['stl_empty_ascii.stl',
'stl_empty_bin.stl']
for empty_file in empty_files:
e = g.get_mesh('emptyIO/' + empty_file)
# result should be an empty scene without vertices
assert isinstance(e, g.trimesh.Scene)
assert not hasattr(e, 'vertices')
# create export
try:
e.export(file_type='ply')
except BaseException:
return
raise ValueError("Shouldn't export empty scenes!")
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
Python
| 0
|
@@ -3198,16 +3198,664 @@
enes!%22)%0A
+ %0A def test_vertex_order(self):%0A # removing doubles should respect the vertex order%0A m_raw = g.get_mesh('featuretype.STL', process=False)%0A m_proc = g.get_mesh('featuretype.STL', process=True)%0A%0A verts_raw = g.trimesh.grouping.hashable_rows(m_raw.vertices)%0A verts_proc = g.trimesh.grouping.hashable_rows(m_proc.vertices) %0A%0A # go through all processed verts%0A # find index in unprocessed mesh%0A idxs = %5B%5D%0A for vert in verts_proc:%0A idxs.append(g.np.where(verts_raw == vert)%5B0%5D%5B0%5D)%0A%0A # indices should be increasing%0A assert (g.np.diff(idxs) %3E= 0).all()%0A%0A%0A
%0A%0Aif __n
|
f30b658275a62294593d31175e1e13118140abb7
|
Fix flake8 in test_vcs.py
|
tests/test_vcs.py
|
tests/test_vcs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs
------------
Tests for `cookiecutter.vcs` module.
"""
import locale
import os
import pytest
import subprocess
import unittest
from cookiecutter.compat import patch
from cookiecutter import exceptions, utils, vcs
from tests.skipif_markers import skipif_no_network
try:
no_network = os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
no_network = False
encoding = locale.getdefaultlocale()[1]
@skipif_no_network
def test_git_clone():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git'
)
assert repo_dir == 'cookiecutter-pypackage'
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
@skipif_no_network
def test_git_clone_checkout():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git',
'console-script'
)
git_dir = 'cookiecutter-pypackage'
assert repo_dir == git_dir
assert os.path.isfile(os.path.join('cookiecutter-pypackage', 'README.rst'))
proc = subprocess.Popen(
['git', 'symbolic-ref', 'HEAD'],
cwd=git_dir,
stdout=subprocess.PIPE
)
symbolic_ref = proc.communicate()[0]
branch = symbolic_ref.decode(encoding).strip().split('/')[-1]
assert 'console-script' == branch
if os.path.isdir(git_dir):
utils.rmtree(git_dir)
@skipif_no_network
def test_git_clone_custom_dir():
os.makedirs("tests/custom_dir1/custom_dir2/")
repo_dir = vcs.clone(
repo_url='https://github.com/audreyr/cookiecutter-pypackage.git',
checkout=None,
clone_to_dir="tests/custom_dir1/custom_dir2/"
)
with utils.work_in("tests/custom_dir1/custom_dir2/"):
test_dir = 'tests/custom_dir1/custom_dir2/cookiecutter-pypackage'
assert repo_dir == test_dir.replace("/", os.sep)
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('tests/custom_dir1'):
utils.rmtree('tests/custom_dir1')
@skipif_no_network
def test_hg_clone():
repo_dir = vcs.clone(
'https://bitbucket.org/pokoli/cookiecutter-trytonmodule'
)
assert repo_dir == 'cookiecutter-trytonmodule'
assert os.path.isfile('cookiecutter-trytonmodule/README.rst')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
@skipif_no_network
def test_vcs_not_installed(monkeypatch):
monkeypatch.setattr(
'cookiecutter.vcs.identify_repo',
lambda x: u'stringthatisntashellcommand'
)
with pytest.raises(exceptions.VCSNotInstalled):
vcs.clone("http://norepotypespecified.com")
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -186,46 +186,8 @@
st%0A%0A
-from cookiecutter.compat import patch%0A
from
@@ -424,16 +424,17 @@
e()%5B1%5D%0A%0A
+%0A
@skipif_
@@ -2519,16 +2519,17 @@
dule')%0A%0A
+%0A
@skipif_
|
e5fb49914cb8ff5bd336ad7362e409d3b34eb534
|
make -a short for --archive
|
teuthology/run.py
|
teuthology/run.py
|
import argparse
import os
import yaml
def config_file(string):
config = {}
try:
with file(string) as f:
g = yaml.safe_load_all(f)
for new in g:
config.update(new)
except IOError, e:
raise argparse.ArgumentTypeError(str(e))
return config
class MergeConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
config = getattr(namespace, self.dest)
from teuthology.misc import deep_merge
for new in values:
deep_merge(config, new)
def parse_args():
parser = argparse.ArgumentParser(description='Run ceph integration tests')
parser.add_argument(
'-v', '--verbose',
action='store_true', default=None,
help='be more verbose',
)
parser.add_argument(
'config',
metavar='CONFFILE',
nargs='+',
type=config_file,
action=MergeConfig,
default={},
help='config file to read',
)
parser.add_argument(
'--archive',
metavar='DIR',
help='path to archive results in',
)
parser.add_argument(
'--description',
help='job description',
)
parser.add_argument(
'--owner',
help='job owner',
)
parser.add_argument(
'--lock',
action='store_true',
default=False,
help='lock machines for the duration of the run',
)
parser.add_argument(
'--block',
action='store_true',
default=False,
help='block until locking machines succeeds (use with --lock)',
)
args = parser.parse_args()
return args
def main():
from gevent import monkey; monkey.patch_all()
from .orchestra import monkey; monkey.patch_all()
import logging
log = logging.getLogger(__name__)
ctx = parse_args()
loglevel = logging.INFO
if ctx.verbose:
loglevel = logging.DEBUG
logging.basicConfig(
level=loglevel,
)
if 'targets' in ctx.config and 'roles' in ctx.config:
targets = len(ctx.config['targets'])
roles = len(ctx.config['roles'])
assert targets >= roles, \
'%d targets are needed for all roles but found %d listed.' % (roles, targets)
if ctx.block:
assert ctx.lock, \
'the --block option is only supported with the --lock option'
from teuthology.misc import read_config
read_config(ctx)
log.debug('\n '.join(['Config:', ] + yaml.safe_dump(ctx.config, default_flow_style=False).splitlines()))
ctx.summary = dict(success=True)
if ctx.owner is None:
from teuthology.misc import get_user
ctx.owner = get_user()
ctx.summary['owner'] = ctx.owner
if ctx.description is not None:
ctx.summary['description'] = ctx.description
if ctx.archive is not None:
os.mkdir(ctx.archive)
handler = logging.FileHandler(
filename=os.path.join(ctx.archive, 'teuthology.log'),
)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S',
)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
with file(os.path.join(ctx.archive, 'pid'), 'w') as f:
f.write('%d' % os.getpid())
with file(os.path.join(ctx.archive, 'owner'), 'w') as f:
f.write(ctx.owner + '\n')
for task in ctx.config['tasks']:
assert 'kernel' not in task, \
'kernel installation shouldn be a base-level item, not part of the tasks list'
init_tasks = []
if ctx.lock:
assert 'targets' not in ctx.config, \
'You cannot specify targets in a config file when using the --lock option'
init_tasks.append({'internal.lock_machines': len(ctx.config['roles'])})
init_tasks.extend([
{'internal.save_config': None},
{'internal.check_lock': None},
{'internal.connect': None},
{'internal.check_conflict': None},
])
if 'kernel' in ctx.config:
init_tasks.append({'kernel': ctx.config['kernel']})
init_tasks.extend([
{'internal.base': None},
{'internal.archive': None},
{'internal.coredump': None},
{'internal.syslog': None},
{'internal.timer': None},
])
ctx.config['tasks'][:0] = init_tasks
from teuthology.run_tasks import run_tasks
try:
run_tasks(tasks=ctx.config['tasks'], ctx=ctx)
finally:
if not ctx.summary.get('success') and ctx.config.get('nuke-on-error'):
from teuthology.nuke import nuke
# only unlock if we locked them in the first place
nuke(ctx, log, ctx.lock)
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'summary.yaml'), 'w') as f:
yaml.safe_dump(ctx.summary, f, default_flow_style=False)
if not ctx.summary.get('success', True):
import sys
sys.exit(1)
def schedule():
parser = argparse.ArgumentParser(description='Schedule ceph integration tests')
parser.add_argument(
'config',
metavar='CONFFILE',
nargs='*',
type=config_file,
action=MergeConfig,
default={},
help='config file to read',
)
parser.add_argument(
'--name',
help='name of suite run the job is part of',
)
parser.add_argument(
'--last-in-suite',
action='store_true',
default=False,
help='mark the last job in a suite so suite post-processing can be run',
)
parser.add_argument(
'--email',
help='where to send the results of a suite (only applies to the last job in a suite)',
)
parser.add_argument(
'--timeout',
help='how many seconds to wait for jobs to finish before emailing results (only applies to the last job in a suite',
type=int,
)
parser.add_argument(
'--description',
help='job description',
)
parser.add_argument(
'--owner',
help='job owner',
)
parser.add_argument(
'--delete',
metavar='JOBID',
type=int,
nargs='*',
help='list of jobs to remove from the queue',
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
help='be more verbose',
)
ctx = parser.parse_args()
if not ctx.last_in_suite:
assert not ctx.email, '--email is only applicable to the last job in a suite'
assert not ctx.timeout, '--timeout is only applicable to the last job in a suite'
from teuthology.misc import read_config, get_user
if ctx.owner is None:
ctx.owner = 'scheduled_{user}'.format(user=get_user())
read_config(ctx)
import teuthology.queue
beanstalk = teuthology.queue.connect(ctx)
beanstalk.use('teuthology')
if ctx.delete:
for jobid in ctx.delete:
job = beanstalk.peek(jobid)
if job is None:
print 'job {jid} is not in the queue'.format(jid=jobid)
else:
job.delete()
return
job_config = dict(
config=ctx.config,
name=ctx.name,
last_in_suite=ctx.last_in_suite,
email=ctx.email,
description=ctx.description,
owner=ctx.owner,
verbose=ctx.verbose,
)
if ctx.timeout is not None:
job_config['results_timeout'] = ctx.timeout
job = yaml.safe_dump(job_config)
jid = beanstalk.put(job, ttr=60*60*24)
print 'Job scheduled with ID {jid}'.format(jid=jid)
|
Python
| 0.000871
|
@@ -1041,24 +1041,30 @@
ent(%0A
+ '-a',
'--archive'
|
40c8bec919f2e04befb021d51706f39793eb77a2
|
Fix typo
|
tfyarn/factory.py
|
tfyarn/factory.py
|
from __future__ import print_function
from tfyarn.clusterspecgen_client import ClusterSpecGenClient
import os
import socket
import tensorflow
import time
def createClusterSpec(job_name, task_index, application_id=None, container_id=None, am_address=None):
if application_id is None:
application_id = os.environ['APPLICATION_ID']
if container_id is None:
container_id = os.environ['CONTAINER_ID']
if am_address is None:
am_address = os.environ['AM_ADDRESS']
client = ClusterSpecGenClient(am_address)
host = socket.gethostname()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
client.register_container(application_id, container_id, host, port, job_name, task_index)
while True:
time.sleep(0.2)
cluster_spec_list = client.get_cluster_spec()
if cluster_spec_list is None:
print(container_id + ': createTrainServer: clusterSpec: None')
pass
elif len(cluster_spec_list) == 0:
print(container_id + ': createTrainServer: clusterSpec: (empty)')
pass
else:
break
workers = []
pses = []
last_worker_task_id = -1
last_ps_task_id = -1
for container in cluster_spec_list:
if container.jobName == 'worker':
assert container.taskIndex == last_worker_task_id + 1
last_worker_task_id = container.taskIndex
workers.append(container.ip + ':' + str(container.port))
elif container.jobName == 'ps':
assert container.taskIndex == last_ps_task_id + 1
last_ps_task_id = container.taskIndex
pses.append(container.ip + ':' + str(container.port))
cluster_spec_map = {'worker': workers, 'ps': pses}
print(container_id + ': createTrainServer: clusterSpec: ', end='')
print(cluster_spec_map)
s.close()
return tensorflow.train.ClusterSpec(cluster_spec_map)
|
Python
| 0.999999
|
@@ -1216,25 +1216,28 @@
orker_task_i
-d
+ndex
= -1%0A la
@@ -1248,17 +1248,20 @@
s_task_i
-d
+ndex
= -1%0A
@@ -1396,25 +1396,28 @@
orker_task_i
-d
+ndex
+ 1%0A
@@ -1435,25 +1435,28 @@
orker_task_i
-d
+ndex
= container
@@ -1631,17 +1631,20 @@
s_task_i
-d
+ndex
+ 1%0A
@@ -1666,17 +1666,20 @@
s_task_i
-d
+ndex
= conta
|
4663589ae44437344ec88dc96dc2ca9bdf55b581
|
add metric AUC
|
tgboost/metric.py
|
tgboost/metric.py
|
import numpy as np
def accuracy(preds, labels):
return np.mean(labels == preds.round())
def error(preds, labels):
return 1.0 - accuracy(preds,labels)
def mean_square_error(preds, labels):
return np.mean(np.square(preds - labels))
def mean_absolute_error(preds, labels):
return np.mean(np.abs(preds - labels))
metrics = {"acc": accuracy,
"error": error,
"mse": mean_square_error,
"mae": mean_absolute_error}
def get_metric(eval_metric):
return metrics[eval_metric]
|
Python
| 0.999999
|
@@ -327,16 +327,961 @@
bels))%0A%0A
+%0Adef tied_rank(x):%0A sorted_x = sorted(zip(x,range(len(x))))%0A r = %5B0 for k in x%5D%0A cur_val = sorted_x%5B0%5D%5B0%5D%0A last_rank = 0%0A for i in range(len(sorted_x)):%0A if cur_val != sorted_x%5Bi%5D%5B0%5D:%0A cur_val = sorted_x%5Bi%5D%5B0%5D%0A for j in range(last_rank, i):%0A r%5Bsorted_x%5Bj%5D%5B1%5D%5D = float(last_rank+1+i)/2.0%0A last_rank = i%0A if i==len(sorted_x)-1:%0A for j in range(last_rank, i+1):%0A r%5Bsorted_x%5Bj%5D%5B1%5D%5D = float(last_rank+i+2)/2.0%0A return r%0A%0A%0A# the auc code is from https://github.com/benhamner/Metrics, thanks benhamner%0Adef auc(posterior, actual):%0A r = tied_rank(posterior)%0A num_positive = len(%5B0 for x in actual if x==1%5D)%0A num_negative = len(actual)-num_positive%0A sum_positive = sum(%5Br%5Bi%5D for i in range(len(r)) if actual%5Bi%5D==1%5D)%0A auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /%0A (num_negative*num_positive))%0A return auc%0A%0A
metrics
@@ -1401,16 +1401,39 @@
te_error
+,%0A %22auc%22: auc
%7D%0A%0A%0Adef
|
d0c921f2397e8f5d324f83e11b2bc51cfed3c4ea
|
Rename variable inside MutableHeaders implementation
|
starlette/datastructures.py
|
starlette/datastructures.py
|
import typing
from urllib.parse import parse_qsl, urlparse
class URL(str):
@property
def components(self):
if not hasattr(self, "_components"):
self._components = urlparse(self)
return self._components
@property
def scheme(self):
return self.components.scheme
@property
def netloc(self):
return self.components.netloc
@property
def path(self):
return self.components.path
@property
def params(self):
return self.components.params
@property
def query(self):
return self.components.query
@property
def fragment(self):
return self.components.fragment
@property
def username(self):
return self.components.username
@property
def password(self):
return self.components.password
@property
def hostname(self):
return self.components.hostname
@property
def port(self):
return self.components.port
# Type annotations for valid `__init__` values to QueryParams and Headers.
StrPairs = typing.Sequence[typing.Tuple[str, str]]
StrDict = typing.Mapping[str, str]
class QueryParams(typing.Mapping[str, str]):
"""
An immutable multidict.
"""
def __init__(
self, value: typing.Union[str, typing.Union[StrDict, StrPairs]] = None
) -> None:
if value is None:
value = []
elif isinstance(value, str):
value = parse_qsl(value)
if hasattr(value, "items"):
items = list(typing.cast(StrDict, value).items())
else:
items = list(typing.cast(StrPairs, value))
self._dict = {k: v for k, v in reversed(items)}
self._list = items
def getlist(self, key: str) -> typing.List[str]:
return [item_value for item_key, item_value in self._list if item_key == key]
def keys(self):
return [key for key, value in self._list]
def values(self):
return [value for key, value in self._list]
def items(self):
return list(self._list)
def get(self, key, default=None):
if key in self._dict:
return self._dict[key]
else:
return default
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __eq__(self, other):
if not isinstance(other, QueryParams):
other = QueryParams(other)
return sorted(self._list) == sorted(other._list)
def __repr__(self):
return "QueryParams(%s)" % repr(self._list)
class Headers(typing.Mapping[str, str]):
"""
An immutable, case-insensitive multidict.
"""
def __init__(self, raw_headers=None) -> None:
if raw_headers is None:
self._list = []
else:
for header_key, header_value in raw_headers:
assert isinstance(header_key, bytes)
assert isinstance(header_value, bytes)
assert header_key == header_key.lower()
self._list = raw_headers
def keys(self):
return [key.decode("latin-1") for key, value in self._list]
def values(self):
return [value.decode("latin-1") for key, value in self._list]
def items(self):
return [
(key.decode("latin-1"), value.decode("latin-1"))
for key, value in self._list
]
def get(self, key: str, default: str = None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key: str) -> typing.List[str]:
get_header_key = key.lower().encode("latin-1")
return [
item_value.decode("latin-1")
for item_key, item_value in self._list
if item_key == get_header_key
]
def mutablecopy(self):
return MutableHeaders(self._list[:])
def __getitem__(self, key: str):
get_header_key = key.lower().encode("latin-1")
for header_key, header_value in self._list:
if header_key == get_header_key:
return header_value.decode("latin-1")
raise KeyError(key)
def __contains__(self, key: str):
get_header_key = key.lower().encode("latin-1")
for header_key, header_value in self._list:
if header_key == get_header_key:
return True
return False
def __iter__(self):
return iter(self.items())
def __len__(self):
return len(self._list)
def __eq__(self, other):
if not isinstance(other, Headers):
return False
return sorted(self._list) == sorted(other._list)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.items()))
class MutableHeaders(Headers):
def __setitem__(self, key: str, value: str):
"""
Set the header `key` to `value`, removing any duplicate entries.
Retains insertion order.
"""
set_key = key.lower().encode("latin-1")
set_value = value.encode("latin-1")
pop_indexes = []
for idx, (item_key, item_value) in enumerate(self._list):
if item_key == set_key:
pop_indexes.append(idx)
for idx in reversed(pop_indexes[1:]):
del self._list[idx]
if pop_indexes:
idx = pop_indexes[0]
self._list[idx] = (set_key, set_value)
else:
self._list.append((set_key, set_value))
def __delitem__(self, key: str):
"""
Remove the header `key`.
"""
del_key = key.lower().encode("latin-1")
pop_indexes = []
for idx, (item_key, item_value) in enumerate(self._list):
if item_key == del_key:
pop_indexes.append(idx)
for idx in reversed(pop_indexes):
del (self._list[idx])
def setdefault(self, key: str, value: str):
"""
If the header `key` does not exist, then set it to `value`.
Returns the header value.
"""
set_key = key.lower().encode("latin-1")
set_value = value.encode("latin-1")
for idx, (item_key, item_value) in enumerate(self._list):
if item_key == set_key:
return item_value.decode("latin-1")
self._list.append((set_key, set_value))
return value
|
Python
| 0
|
@@ -5200,35 +5200,37 @@
in-1%22)%0A%0A
-pop
+found
_indexes = %5B%5D%0A
@@ -5337,35 +5337,37 @@
-pop
+found
_indexes.append(
@@ -5392,35 +5392,37 @@
idx in reversed(
-pop
+found
_indexes%5B1:%5D):%0A
@@ -5464,19 +5464,21 @@
if
-pop
+found
_indexes
@@ -5497,19 +5497,21 @@
idx =
-pop
+found
_indexes
|
00140b48d7473c0f6738e5bc7894370baee9ef30
|
Remove debugging
|
IATISimpleTester/lib/helpers.py
|
IATISimpleTester/lib/helpers.py
|
from collections import defaultdict
import re
from lxml import etree
from IATISimpleTester import app
# given an expression list and the name of an expression,
# select it,
def select_expression(expression_list, expression_name, default_expression_name=None):
expression_dicts = {x["id"]: x for x in expression_list}
if expression_name not in expression_dicts:
expression_name = default_expression_name
return expression_name, expression_dicts.get(expression_name)
def slugify(inp):
return inp.lower().replace(' ', '-')
def pprint(explanation):
print(explanation)
explanation = explanation.strip().capitalize().replace('\n', '<br>') + '.'
return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
|
Python
| 0.000005
|
@@ -572,31 +572,8 @@
n):%0A
- print(explanation)%0A
|
c7abba5e48eb243e9174c72a374c3c60a9d12fc6
|
fix limiter init signature
|
thetis/limiter.py
|
thetis/limiter.py
|
"""
Slope limiter implementation.
Tuomas Karna 2015-08-26
"""
from __future__ import absolute_import
from .utility import *
from .firedrake import VertexBasedLimiter
import ufl
from pyop2.profiling import timed_region, timed_function, timed_stage # NOQA
def assert_function_space(fs, family, degree):
"""
Checks the family and degree of function space.
Raises AssertionError if function space differs.
If the function space lies on an extruded mesh, checks both spaces of the
outer product.
"""
ufl_elem = fs.ufl_element()
if ufl_elem.family() == 'TensorProductElement':
if ufl_elem.num_sub_elements() > 0:
# VectorElement case
assert isinstance(ufl_elem, ufl.VectorElement)
ufl_elem = ufl_elem.sub_elements()[0]
# extruded mesh
assert ufl_elem._A.family() == family,\
'horizontal space must be {0:s}'.format(family)
assert ufl_elem._B.family() == family,\
'vertical space must be {0:s}'.format(family)
assert ufl_elem._A.degree() == degree,\
'degree of horizontal space must be {0:d}'.format(degree)
assert ufl_elem._B.degree() == degree,\
'degree of vertical space must be {0:d}'.format(degree)
else:
# assume 2D mesh
assert ufl_elem.family() == family,\
'function space must be {0:s}'.format(family)
assert ufl_elem.degree() == degree,\
'degree of function space must be {0:d}'.format(degree)
class VertexBasedP1DGLimiter(VertexBasedLimiter):
"""
Vertex based limiter for P1DG tracer fields.
Based on firedrake implementation by Andrew McRae.
[1] Kuzmin Dmitri (2010). A vertex-based hierarchical slope limiter
for p-adaptive discontinuous Galerkin methods. Journal of Computational
and Applied Mathematics, 233(12):3077-3085.
http://dx.doi.org/10.1016/j.cam.2009.05.028
"""
def __init__(self, p1dg_space, p1cg_space, p0_space):
"""
Initialize limiter.
Parameters
----------
p1dg_space : FunctionSpace instance
P1DG function space where the scalar field belongs to
p1cg_space : FunctionSpace instance
Corresponding continuous function space (for min/max limits)
p0_space : FunctionSpace instance
Corresponding P0 function space (for centroids)
"""
assert_function_space(p1dg_space, 'Discontinuous Lagrange', 1)
super(VertexBasedP1DGLimiter, self).__init__(p1dg_space)
self.mesh = self.P0.mesh()
self.is_2d = self.mesh.geometric_dimension() == 2
def compute_bounds(self, field):
"""
Re-compute min/max values of all neighbouring centroids
"""
# Call general-purpose bound computation.
super(VertexBasedP1DGLimiter, self).compute_bounds(field)
# NOTE This does not limit solution at lateral boundaries at all
# NOTE Omit for now
# # Add nodal values from lateral boundaries
# par_loop("""
# for (int i=0; i<qmax.dofs; i++) {
# qmax[i][0] = fmax(qmax[i][0], field[i][0]);
# qmin[i][0] = fmin(qmin[i][0], field[i][0]);
# }""",
# ds,
# {'qmax': (self.max_field, RW),
# 'qmin': (self.min_field, RW),
# 'field': (field, READ)})
if not self.is_2d:
# Add nodal values from surface/bottom boundaries
# NOTE calling firedrake par_loop with measure=ds_t raises an error
bottom_nodes = self.P1CG.bt_masks['geometric'][0]
top_nodes = self.P1CG.bt_masks['geometric'][1]
bottom_idx = op2.Global(len(bottom_nodes), bottom_nodes, dtype=np.int32, name='node_idx')
top_idx = op2.Global(len(top_nodes), top_nodes, dtype=np.int32, name='node_idx')
code = """
void my_kernel(double **qmax, double **qmin, double **centroids, int *idx) {
double face_mean = 0;
for (int i=0; i<%(nnodes)d; i++) {
face_mean += centroids[idx[i]][0];
}
face_mean /= %(nnodes)d;
for (int i=0; i<%(nnodes)d; i++) {
qmax[idx[i]][0] = fmax(qmax[idx[i]][0], face_mean);
qmin[idx[i]][0] = fmin(qmin[idx[i]][0], face_mean);
}
}"""
kernel = op2.Kernel(code % {'nnodes': len(bottom_nodes)}, 'my_kernel')
op2.par_loop(kernel, self.mesh.cell_set,
self.max_field.dat(op2.WRITE, self.max_field.function_space().cell_node_map()),
self.min_field.dat(op2.WRITE, self.min_field.function_space().cell_node_map()),
field.dat(op2.READ, field.function_space().cell_node_map()),
bottom_idx(op2.READ),
iterate=op2.ON_BOTTOM)
op2.par_loop(kernel, self.mesh.cell_set,
self.max_field.dat(op2.WRITE, self.max_field.function_space().cell_node_map()),
self.min_field.dat(op2.WRITE, self.min_field.function_space().cell_node_map()),
field.dat(op2.READ, field.function_space().cell_node_map()),
top_idx(op2.READ),
iterate=op2.ON_TOP)
def apply(self, field):
with timed_stage('limiter'):
super(VertexBasedP1DGLimiter, self).apply(field)
|
Python
| 0
|
@@ -1965,30 +1965,8 @@
pace
-, p1cg_space, p0_space
):%0A
|
979782d1cfc492d8c609cf02eea0d449bc9abe3f
|
Fix path for windows platfrom (#742)
|
caravel/config.py
|
caravel/config.py
|
"""The main config file for Caravel
All configuration in this file can be overridden by providing a caravel_config
in your PYTHONPATH as there is a ``from caravel_config import *``
at the end of this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# ---------------------------------------------------------
# Caravel specific config
# ---------------------------------------------------------
ROW_LIMIT = 50000
CARAVEL_WORKERS = 16
CARAVEL_WEBSERVER_PORT = 8088
CARAVEL_WEBSERVER_TIMEOUT = 60
CUSTOM_SECURITY_MANAGER = None
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/caravel.db'
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# Flask-WTF flag for CSRF
CSRF_ENABLED = True
# Whether to run the web server in debug mode or not
DEBUG = False
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = "Caravel"
# Uncomment to setup Setup an App icon
APP_ICON = "/static/assets/images/caravel_logo.png"
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'babel/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
# 'fr': {'flag': 'fr', 'name': 'French'},
# 'zh': {'flag': 'cn', 'name': 'Chinese'},
}
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = None
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
"""
1) http://docs.python-guide.org/en/latest/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = '/tmp/caravel.log'
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = ""
try:
from caravel_config import * # noqa
except ImportError:
pass
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
|
Python
| 0.000001
|
@@ -1009,16 +1009,188 @@
vel.db'%0A
+# this is for platform specific: %22nt%22 is for windows, %22posix%22 is *nix (including Mac)%0Aif os.name == %22nt%22:%0A SQLALCHEMY_DATABASE_URI = 'sqlite:///c:%5C%5Ctmp%5C%5Ccaravel.db' %0A
# SQLALC
|
303a8c149c30d4dd1d9c833c6716d5ab0da88e04
|
Change version number to 1.2.
|
cbclib/version.py
|
cbclib/version.py
|
"""a cbclib version storage module."""
version_tuple = (1, 1, 1)
full_version = "%d.%d.%d" % version_tuple
|
Python
| 0.999741
|
@@ -53,20 +53,20 @@
e = (1,
-1, 1
+2, 0
)%0Afull_v
|
994b50c3856e01d3cec712515efe11c0f286781e
|
Remove deprecated alias
|
ipywidgets/__init__.py
|
ipywidgets/__init__.py
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with Jupyter's display machinery::
from ipywidgets import IntSlider
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
import os
from IPython import get_ipython
from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
from .widgets import *
from traitlets import link, dlink
def load_ipython_extension(ip):
"""Set up Jupyter to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target(ip.kernel)
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
if kernel is None:
kernel = get_ipython().kernel
kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
# deprecated alias
handle_kernel = register_comm_target
def _handle_ipython():
"""Register with the comm target at import if running in Jupyter"""
ip = get_ipython()
if ip is None:
return
load_ipython_extension(ip)
_handle_ipython()
|
Python
| 0.000003
|
@@ -828,17 +828,16 @@
dlink%0A%0A
-%0A
def load
@@ -992,17 +992,16 @@
ernel)%0A%0A
-%0A
def regi
@@ -1232,65 +1232,8 @@
d)%0A%0A
-# deprecated alias%0Ahandle_kernel = register_comm_target%0A%0A
def
|
d152332a72ada99f20106b613531a1046786480f
|
fix jupyter notebook import warning
|
lightning/visualization.py
|
lightning/visualization.py
|
import requests
import json
import webbrowser
import random
import string
class Visualization(object):
def __init__(self, session=None, json=None, auth=None):
self.session = session
self.id = json.get('id')
self.auth = auth
if self.session.lgn.ipython_enabled:
from IPython.kernel.comm import Comm
self.comm = Comm('lightning', {'id': self.id})
self.comm_handlers = {}
self.comm.on_msg(self._handle_comm_message)
def _format_url(self, url):
if not url.endswith('/'):
url += '/'
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
return url + '?host=' + quote(self.session.host)
def _update_image(self, image):
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/images'
url = self._format_url(url)
files = {'file': image}
return requests.put(url, files=files, data={'type': 'image'}, auth=self.auth)
def _append_image(self, image):
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/images'
url = self._format_url(url)
files = {'file': image}
return requests.post(url, files=files, data={'type': 'image'}, auth=self.auth)
def _append_data(self, data=None, field=None):
payload = {'data': data}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/'
if field:
url += field
url = self._format_url(url)
return requests.post(url, data=json.dumps(payload), headers=headers, auth=self.auth)
def _update_data(self, data=None, field=None):
payload = {'data': data}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/data/'
if field:
url += field
url = self._format_url(url)
return requests.put(url, data=json.dumps(payload), headers=headers, auth=self.auth)
def get_permalink(self):
return self.session.host + '/visualizations/' + str(self.id)
def get_public_link(self):
return self.get_permalink() + '/public/'
def get_embed_link(self):
return self._format_url(self.get_permalink() + '/embed')
def get_html(self):
r = requests.get(self.get_embed_link(), auth=self.auth)
return r.text
def open(self):
webbrowser.open(self.get_public_link())
def delete(self):
url = self.get_permalink()
return requests.delete(url)
def on(self, event_name, handler):
if self.session.lgn.ipython_enabled:
self.comm_handlers[event_name] = handler
else:
raise Exception('The current implementation of this method is only compatible with IPython.')
def _handle_comm_message(self, message):
# Parsing logic taken from similar code in matplotlib
message = json.loads(message['content']['data'])
if message['type'] in self.comm_handlers:
self.comm_handlers[message['type']](message['data'])
@classmethod
def _create(cls, session=None, data=None, images=None, type=None, options=None, description=None):
if options is None:
options = {}
url = session.host + '/sessions/' + str(session.id) + '/visualizations'
if not images:
payload = {'data': data, 'type': type, 'options': options}
if description:
payload['description'] = description
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, data=json.dumps(payload), headers=headers, auth=session.auth)
if r.status_code == 404:
raise Exception(r.text)
elif not r.status_code == requests.codes.ok:
raise Exception('Problem uploading data')
viz = cls(session=session, json=r.json(), auth=session.auth)
else:
first_image, remaining_images = images[0], images[1:]
files = {'file': first_image}
payload = {'type': type, 'options': json.dumps(options)}
if description:
payload['description'] = description
r = requests.post(url, files=files, data=payload, auth=session.auth)
if r.status_code == 404:
raise Exception(r.text)
elif not r.status_code == requests.codes.ok:
raise Exception('Problem uploading images')
viz = cls(session=session, json=r.json(), auth=session.auth)
for image in remaining_images:
viz._append_image(image)
return viz
class VisualizationLocal(object):
def __init__(self, html):
self._html = html
@classmethod
def _create(cls, data=None, images=None, type=None, options=None):
import base64
from jinja2 import Template, escape
t = Template(cls.load_template())
options = escape(json.dumps(options))
random_id = 'A' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(9))
fields = {'viz': type, 'options': options, 'viz_id': random_id}
if images:
bytes = ['data:image/png;base64,' + base64.b64encode(img) + ',' for img in images]
fields['images'] = escape(json.dumps(bytes))
else:
data = escape(json.dumps(data))
fields['data'] = data
html = t.render(**fields)
viz = cls(html)
return viz
def get_html(self):
"""
Return html for this local visualization.
Assumes that Javascript has already been embedded,
to be used for rendering in notebooks.
"""
return self._html
def save_html(self, filename=None, overwrite=False):
"""
Save self-contained html to a file.
Parameters
----------
filename : str
The filename to save to
"""
if filename is None:
raise ValueError('Please provide a filename, e.g. viz.save_html(filename="viz.html").')
import os
base = self._html
js = self.load_embed()
if os.path.exists(filename):
if overwrite is False:
raise ValueError("File '%s' exists. To ovewrite call save_html with overwrite=True."
% os.path.abspath(filename))
else:
os.remove(filename)
with open(filename, "wb") as f:
f.write(base.encode('utf-8'))
f.write('<script>' + js.encode('utf-8') + '</script>')
@staticmethod
def load_template():
import os
location = os.path.join(os.path.dirname(__file__), 'lib/template.html')
return open(location).read()
@staticmethod
def load_embed():
import os
location = os.path.join(os.path.dirname(__file__), 'lib/embed.js')
import codecs
return codecs.open(location, "r", "utf-8").read()
|
Python
| 0.000001
|
@@ -311,24 +311,19 @@
from
-IPython.
+ipy
kernel.c
|
8d5d8cc8d61596a62513039d79abb57f274333ef
|
Set version as 0.9.0
|
alignak_backend_client/__init__.py
|
alignak_backend_client/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak REST backend client library
This module is a Python library used for the REST API of the Alignak backend
"""
# Application version and manifest
VERSION = (0, 7, 0)
__application__ = u"Alignak Backend client"
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__author__ = u"Alignak team"
__author_email__ = u"david.durieux@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__description__ = u"Alignak backend client library"
__releasenotes__ = u"""Alignak backend client library"""
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend-client"
__doc_url__ = "http://alignak-backend-client.readthedocs.org"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
manifest = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'doc': __doc_url__
}
|
Python
| 0.999418
|
@@ -221,17 +221,17 @@
N = (0,
-7
+9
, 0)%0A%0A__
|
4317960a50c06dea0521d08266057825b3e4bcde
|
Fix the makefile issue under Windows.
|
tools/makefile.py
|
tools/makefile.py
|
import os
import sys
from utils import *
from utils import _make_path_relative
import rtconfig
makefile = '''phony := all
all:
include config.mk
ifneq ($(MAKE_LIB),1)
TARGET := rtthread.elf
include src.mk
endif
$(if $(strip $(RTT_ROOT)),,$(error RTT_ROOT not defined))
include $(RTT_ROOT)/tools/rtthread.mk
'''
def TargetMakefile(env):
project = ProjectInfo(env)
BSP_ROOT = os.path.abspath(env['BSP_ROOT'])
RTT_ROOT = os.path.abspath(env['RTT_ROOT'])
match_bsp = False
if BSP_ROOT.startswith(RTT_ROOT):
match_bsp = True
make = open('config.mk', 'w')
make.write('BSP_ROOT ?= %s\n' % BSP_ROOT.replace('\\', '\\\\'))
make.write('RTT_ROOT ?= %s\n' % RTT_ROOT.replace('\\', '\\\\'))
make.write('\n')
cross = os.path.abspath(rtconfig.EXEC_PATH)
cross = os.path.join(cross, rtconfig.PREFIX)
make.write('CROSS_COMPILE ?=%s' % cross.replace('\\', '\\\\'))
make.write('\n')
make.write('\n')
make.write('CFLAGS :=%s' % (rtconfig.CFLAGS))
make.write('\n')
make.write('AFLAGS :=%s' % (rtconfig.AFLAGS))
make.write('\n')
make.write('LFLAGS :=%s' % (rtconfig.LFLAGS))
make.write('\n')
if 'CXXFLAGS' in dir(rtconfig):
make.write('CXXFLAGS :=%s' % (rtconfig.CXXFLAGS))
make.write('\n')
make.write('\n')
Files = project['FILES']
Headers = project['HEADERS']
CPPDEFINES = project['CPPDEFINES']
paths = [os.path.normpath(i) for i in project['CPPPATH']]
CPPPATH = []
for path in paths:
fn = os.path.normpath(path)
if match_bsp:
if fn.startswith(BSP_ROOT):
fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')
elif fn.startswith(RTT_ROOT):
fn = '$(RTT_ROOT)' + fn.replace(RTT_ROOT, '')
else:
if fn.startswith(RTT_ROOT):
fn = '$(RTT_ROOT)' + fn.replace(RTT_ROOT, '')
elif fn.startswith(BSP_ROOT):
fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')
CPPPATH.append(fn)
path = ''
paths = CPPPATH
for item in paths:
path += '\t-I%s \\\n' % item
make.write('CPPPATHS :=')
if path[0] == '\t': path = path[1:]
length = len(path)
if path[length - 2] == '\\': path = path[:length - 2]
make.write(path)
make.write('\n')
make.write('\n')
defines = ''
for item in project['CPPDEFINES']:
defines += ' -D%s' % item
make.write('DEFINES :=')
make.write(defines)
make.write('\n')
files = Files
Files = []
for file in files:
fn = os.path.normpath(file)
if match_bsp:
if fn.startswith(BSP_ROOT):
fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')
elif fn.startswith(RTT_ROOT):
fn = '$(RTT_ROOT)' + fn.replace(RTT_ROOT, '')
else:
if fn.startswith(RTT_ROOT):
fn = '$(RTT_ROOT)' + fn.replace(RTT_ROOT, '')
elif fn.startswith(BSP_ROOT):
fn = '$(BSP_ROOT)' + fn.replace(BSP_ROOT, '')
Files.append(fn)
# print(fn)
src = open('src.mk', 'w')
files = Files
src.write('SRC_FILES :=\n')
for item in files:
src.write('SRC_FILES +=%s\n' % item)
make = open('Makefile', 'w')
make.write(makefile)
make.close()
return
|
Python
| 0
|
@@ -639,36 +639,33 @@
.replace('%5C%5C', '
-%5C%5C%5C%5C
+/
'))%0A make.wri
@@ -704,36 +704,33 @@
.replace('%5C%5C', '
-%5C%5C%5C%5C
+/
'))%0A make.wri
@@ -3228,16 +3228,35 @@
' %25 item
+.replace('%5C%5C', '/')
)%0A%0A m
|
a482b8136cacc4a498b59978c41b84d2e551becc
|
Copy the .msi as output by virgo-base
|
tools/pkgutils.py
|
tools/pkgutils.py
|
#!/usr/bin/env python
import os
import errno
import platform
import sys
import subprocess
# Figure out what type of package to build based on platform info
#
# TODO: Windows does MSI?
deb = ['debian', 'ubuntu']
rpm = ['redhat', 'fedora', 'suse', 'opensuse', 'centos']
dist = platform.dist()[0].lower()
def pkg_type():
if dist in deb:
return "deb"
if dist in rpm:
return "rpm"
if sys.platform == "win32":
return "windows"
return None
def pkg_dir():
system = platform.system().lower()
machine = platform.machine().lower()
addon = ""
if system == "freebsd":
system = system + platform.release().lower()[0]
if system == "linux":
dist = platform.dist()
if dist[0] == 'debian':
if dist[1][0] == '6':
dist = [dist[0], 'squeeze']
else:
dist = [dist[0], 'undefined']
# Lower case everyting (looking at you Ubuntu)
dist = tuple([x.lower() for x in dist])
# Treat all redhat 5.* versions the same
# redhat-5.5 becomes redhat-5
if (dist[0] == "redhat" or dist[0] == "centos"):
major = dist[1].split(".")[0]
distro = dist[0]
# http://bugs.centos.org/view.php?id=5197
# CentOS 5.7 identifies as redhat
if int(major) <= 5 and distro == "redhat":
f = open('/etc/redhat-release')
new_dist = f.read().lower().split(" ")[0]
if new_dist == "centos":
distro = "centos"
dist = (distro, major)
dist = "%s-%s" % dist[:2]
return "%s-%s" % (dist, machine)
return "%s-%s%s" % (system, machine, addon)
def sh(cmd):
print cmd
rv = subprocess.call(cmd, shell=True)
if rv != 0:
print "Exit Code: %s" % (rv)
sys.exit(1)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def package_binary():
pkgType = pkg_type()
if pkgType == 'windows':
return 'rackspace-monitoring-agent.msi'
return 'rackspace-monitoring-agent'
def system_info():
# gather system, machine, and distro info
machine = platform.machine()
system = platform.system().lower()
return (machine, system, pkg_dir())
def _git_describe(is_exact, git_dir, cwd):
describe = "git "
if cwd:
describe = "%s --git-dir=%s/.git --work-tree=%s " % (describe, git_dir, cwd)
if is_exact:
options = "--exact-match"
else:
options = "--always"
describe = "%s describe --tags %s" % (describe, options)
p = subprocess.Popen(describe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=cwd)
version, errors = p.communicate()
if errors:
raise ValueError("The command failed:\n%s\n%s" % (describe, errors))
return version
# git describe return "0.1-143-ga554734"
# git_describe() returns {'release': '143', 'tag': '0.1', 'hash': 'ga554734'}
def git_describe(is_exact=False, split=True, cwd=None):
try:
version = _git_describe(is_exact, cwd, cwd)
except ValueError:
version = ""
if not version:
version = _git_describe(is_exact, "..", cwd)
version = version.strip()
if split:
version = version.split('-')
return version
def git_head():
p = subprocess.Popen('git rev-parse HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
version, errors = p.communicate()
return version.strip()
def package_builder_dir():
"""returns the directory that is packaged into rpms/debs.
This is useful because the builders maybe specifiy different cflags, etc, which
interfere with generating symbols files."""
pkgType = pkg_type()
basePath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if pkgType == 'deb':
buildDirArgs = [basePath, 'out', 'Debug']
elif pkgType == 'rpm':
v = git_describe()
buildDirArgs = [basePath, 'out']
buildDirArgs += ('rpmbuild', 'BUILD', "rackspace-monitoring-agent-%s" % v[0])
buildDirArgs += ('out', 'Debug')
elif pkgType == 'windows':
buildDirArgs = [basePath, 'Release']
else:
raise AttributeError('Unsupported pkg type, %s' % (pkgType))
return os.path.join(*buildDirArgs)
if __name__ == "__main__":
print pkg_type()
|
Python
| 0.000001
|
@@ -2150,34 +2150,13 @@
rn '
-rackspace-monitoring-agent
+virgo
.msi
@@ -4368,16 +4368,22 @@
ePath, '
+base%5C%5C
Release'
|
26d93980bcf2e8463c8fc390059563d74b189418
|
Create commit_zero (was forgotten) in repoinit.py
|
tools/repoinit.py
|
tools/repoinit.py
|
#!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to initialise a Github repo to be
used as a basis for a Wrap db entry. Also calculates a basic
upstream.wrap."""
import datetime
import git
import hashlib
import os
import shutil
import sys
import urllib.request
upstream_templ = '''[wrap-file]
directory = %s
source_url = %s
source_filename = %s
source_hash = %s
'''
readme = '''This repository contains a Meson build definition for project {reponame}.
For more information please see http://mesonbuild.com.
'''
mit_license = '''Copyright (c) {year} The Meson development team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def initialize(reponame):
repo = git.Repo.init('.')
with open('readme.txt', 'w') as ofile:
ofile.write(readme.format(reponame=reponame))
with open('LICENSE.build', 'w') as ofile:
ofile.write(mit_license.format(year=datetime.datetime.now().year))
repo.index.add(['readme.txt', 'LICENSE.build'])
repo.index.commit('Created repository for project %s.' % reponame)
origin = repo.create_remote('origin', 'git@github.com:mesonbuild/%s.git' % reponame)
origin.push(repo.head.ref.name)
shutil.rmtree('.git')
os.unlink('readme.txt')
def build_upstream_wrap(zipurl, filename, directory):
with urllib.request.urlopen(zipurl) as r:
data = r.read()
open(filename, 'wb').write(data)
h = hashlib.sha256()
h.update(data)
dhash = h.hexdigest()
with open('upstream.wrap', 'w') as ofile:
ofile.write(upstream_templ % (directory, zipurl, filename, dhash))
if __name__ == '__main__':
if len(sys.argv) != 5:
print(sys.argv[0], '<reponame> <zipurl> <filename> <directory>')
sys.exit(1)
reponame = sys.argv[1]
zipurl = sys.argv[2]
filename = sys.argv[3]
directory = sys.argv[4]
initialize(reponame)
build_upstream_wrap(zipurl, filename, directory)
print('Done, now do the branching + stuff.')
|
Python
| 0.000001
|
@@ -2517,24 +2517,33 @@
build'%5D)%0A
+ commit =
repo.index.
@@ -2590,32 +2590,81 @@
s.' %25 reponame)%0A
+ tag = repo.create_tag('commit_zero', commit)%0A
origin = rep
@@ -2732,24 +2732,24 @@
%25 reponame)%0A
-
origin.p
@@ -2768,24 +2768,45 @@
d.ref.name)%0A
+ origin.push(tag)%0A
shutil.r
|
298553d4caae67c9a8deeb2288b320689f1e4014
|
return List instead of ES response object (#458)
|
analytics_data_api/v0/documents.py
|
analytics_data_api/v0/documents.py
|
from django.conf import settings
from elasticsearch_dsl import Date, Document, Float, Integer, Keyword, Q
from analytics_data_api.constants import learner
class RosterUpdate(Document):
"""
Index which store last update date of passed index.
"""
date = Date()
target_index = Keyword()
class Index:
name = settings.ELASTICSEARCH_LEARNERS_UPDATE_INDEX
settings = settings.ELASTICSEARCH_INDEX_SETTINGS
@classmethod
def get_last_updated(cls):
return cls.search().query('term', target_index=settings.ELASTICSEARCH_LEARNERS_INDEX).execute()
class RosterEntry(Document):
"""
Index which store learner information of a course.
"""
course_id = Keyword()
user_id = Integer()
username = Keyword()
name = Keyword()
email = Keyword()
language = Keyword()
location = Keyword()
year_of_birth = Integer()
level_of_education = Keyword()
gender = Keyword()
mailing_address = Keyword()
city = Keyword()
country = Keyword()
goals = Keyword()
enrollment_mode = Keyword()
cohort = Keyword()
segments = Keyword() # segments is an array/list of strings
problems_attempted = Integer()
problems_completed = Integer()
problem_attempts_per_completed = Float()
# Useful for ordering problem_attempts_per_completed (because results can include null, which is
# different from zero). attempt_ratio_order is equal to the number of problem attempts if
# problem_attempts_per_completed is > 1 and set to -problem_attempts if
# problem_attempts_per_completed = 1.
attempt_ratio_order = Integer()
discussion_contributions = Integer()
enrollment_date = Date()
videos_viewed = Integer()
last_updated = Date()
class Index:
name = settings.ELASTICSEARCH_LEARNERS_INDEX
settings = settings.ELASTICSEARCH_INDEX_SETTINGS
@classmethod
def get_course_user(cls, course_id, username):
"""
Search learner in course.
"""
return cls.search().query('term', course_id=course_id).query('term', username=username).execute()
@classmethod
def get_users_in_course(
cls,
course_id,
segments=None,
ignore_segments=None,
cohort=None,
enrollment_mode=None,
text_search=None,
sort_policies=None,
):
"""
Construct a search query for all users in `course_id` and return the Search object.
sort_policies is an array, where the first element is the primary sort.
Elements in the array are dicts with fields: order_by (field to sort by)
and sort_order (either 'asc' or 'desc'). Default to 'username' and 'asc'.
Raises `ValueError` if both `segments` and `ignore_segments` are provided.
"""
if not sort_policies:
sort_policies = [{'order_by': None, 'sort_order': None}]
# set default sort policy to 'username' and 'asc'
for field, default in [('order_by', 'username'), ('sort_order', 'asc')]:
if sort_policies[0][field] is None:
sort_policies[0][field] = default
# Error handling
if segments and ignore_segments:
raise ValueError('Cannot combine `segments` and `ignore_segments` parameters.')
for segment in (segments or []) + (ignore_segments or []):
if segment not in learner.SEGMENTS:
raise ValueError("segments/ignore_segments value '{segment}' must be one of: ({segments})".format(
segment=segment, segments=', '.join(learner.SEGMENTS)
))
order_by_options = (
'username', 'email', 'discussion_contributions', 'problems_attempted', 'problems_completed',
'problem_attempts_per_completed', 'attempt_ratio_order', 'videos_viewed'
)
sort_order_options = ('asc', 'desc')
for sort_policy in sort_policies:
if sort_policy['order_by'] not in order_by_options:
raise ValueError("order_by value '{order_by}' must be one of: ({order_by_options})".format(
order_by=sort_policy['order_by'], order_by_options=', '.join(order_by_options)
))
if sort_policy['sort_order'] not in sort_order_options:
raise ValueError("sort_order value '{sort_order}' must be one of: ({sort_order_options})".format(
sort_order=sort_policy['sort_order'], sort_order_options=', '.join(sort_order_options)
))
search = cls.search()
search.query = Q('bool', must=[Q('term', course_id=course_id)])
# Filtering/Search
if segments:
search.query.must.append(Q('bool', should=[Q('term', segments=segment) for segment in segments]))
elif ignore_segments:
for segment in ignore_segments:
search = search.query(~Q('term', segments=segment)) # pylint: disable=invalid-unary-operand-type
if cohort:
search = search.query('term', cohort=cohort)
if enrollment_mode:
search = search.query('term', enrollment_mode=enrollment_mode)
if text_search:
search.query.must.append(Q('multi_match', query=text_search, fields=['name', 'username', 'email']))
# construct the sort hierarchy
search_request = search.sort(*[
{
sort_policy['order_by']: {
'order': sort_policy['sort_order'],
# ordering of missing fields
'missing': '_last' if sort_policy['sort_order'] == 'asc' else '_first'
}
}
for sort_policy in sort_policies
])
return search_request.execute()
@classmethod
def get_course_metadata(cls, course_id):
"""
Returns the number of students belonging to particular cohorts,
segments, and enrollment modes within a course. Returns data in the
following format:
{
'cohorts': {
<cohort_name>: <learner_count>
},
'segments': {
<segment_name>: <learner_count>
},
'enrollment_modes': {
<enrollment_mode_name>: <learner_count>
}
}
"""
# Use the configured default page size to set the number of aggregate search results.
page_size = getattr(settings, 'AGGREGATE_PAGE_SIZE', 10)
search = cls.search()
search.query = Q('bool', must=[Q('term', course_id=course_id)])
search.aggs.bucket('enrollment_modes', 'terms', field='enrollment_mode', size=page_size)
search.aggs.bucket('segments', 'terms', field='segments', size=page_size)
search.aggs.bucket('cohorts', 'terms', field='cohort', size=page_size)
response = search.execute().to_dict()
# Build up the map of aggregation name to count
aggregations = {
agg_field: {
bucket['key']: bucket['doc_count']
for bucket in agg_item['buckets']
}
for agg_field, agg_item in response['aggregations'].items()
}
# Add default values of 0 for segments with no learners
for segment in learner.SEGMENTS:
if segment not in aggregations['segments']:
aggregations['segments'][segment] = 0
return aggregations
|
Python
| 0.000012
|
@@ -1,12 +1,27 @@
+import logging%0A
%0Afrom django
@@ -166,16 +166,54 @@
earner%0A%0A
+logger = logging.getLogger(__name__)%0A%0A
%0Aclass R
@@ -5823,20 +5823,19 @@
re
-turn
+s =
search_
@@ -5843,32 +5843,257 @@
equest.execute()
+%0A # debugging, to be removed%0A logger.warning(res.__class__.__name__)%0A items = res.hits%0A if items:%0A logger.warning(items%5B0%5D)%0A logger.warning(dir(items%5B0%5D))%0A return items
%0A%0A @classmeth
|
d2ebde1d3fbee6b5f6856768e0fee6f7273e9a55
|
Fix inconsistent field name
|
accelerator_abstract/models/base_mentor_program_office_hour.py
|
accelerator_abstract/models/base_mentor_program_office_hour.py
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from accelerator_abstract.models.accelerator_model import AcceleratorModel
HOUR_IS_PAST_MESSAGE = "This office hour is in the past"
HOUR_HAS_BEEN_CANCELED_MESSAGE = "This office hour has been canceled"
HOUR_NOT_SPECIFIED_MESSAGE = "Office hour has not been specified"
HOUR_OWNED_BY_ANOTHER_MESSAGE = "This office hour is owned by another user"
@python_2_unicode_compatible
class BaseMentorProgramOfficeHour(AcceleratorModel):
program = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Program"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='mentor_officehours',
on_delete=models.CASCADE)
finalist = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name="Finalist",
blank=True,
null=True,
related_name='finalist_officehours',
on_delete=models.CASCADE)
startup = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Startup"),
blank=True,
null=True,
related_name='startup_officehours',
on_delete=models.SET_NULL)
start_date_time = models.DateTimeField(db_index=True)
end_date_time = models.DateTimeField(db_index=True)
description = models.TextField(blank=True)
location = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Location"),
null=True,
blank=True,
on_delete=models.CASCADE)
notify_reservation = models.BooleanField(default=True)
topics = models.TextField(blank=True)
meeting_info = models.CharField(max_length=256, blank=True, default="")
class Meta(AcceleratorModel.Meta):
db_table = '{}_mentorprogramofficehour'.format(
AcceleratorModel.Meta.app_label)
abstract = True
verbose_name = "Office Hour"
unique_together = ('program', 'mentor', 'start_date_time')
ordering = ['start_date_time']
def __str__(self):
hour_type = "Reserved"
if self.is_open():
hour_type = "Open"
return "%s office hour with %s" % (hour_type, self.mentor)
def is_open(self):
return not bool(self.finalist)
|
Python
| 0.000022
|
@@ -2014,36 +2014,65 @@
models.
-Text
+Char
Field(
-blank=True
+max_length=2000, blank=True, default=%22%22
)%0A me
|
2c42b84a5ffd7ce42295488271781c08ab372bd3
|
add website_multi_company_portal to demo addons
|
website_multi_company/__manifest__.py
|
website_multi_company/__manifest__.py
|
# -*- coding: utf-8 -*-
{
"name": """Real Multi Website""",
"summary": """Yes, you can set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!""",
"category": "eCommerce",
"live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=10.0",
"images": ['images/website_multi_company_main.png'],
"version": "1.2.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@it-projects.info",
"website": "https://twitter.com/yelizariev",
"license": "LGPL-3",
"price": 400.00,
"currency": "EUR",
"depends": [
"website",
"website_multi_theme",
"ir_config_parameter_multi_company",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/website_views.xml",
"views/website_templates.xml",
"views/website_menu_views.xml",
"views/website_theme_views.xml",
"views/res_config_views.xml",
],
"qweb": [
],
"demo": [
# "data/website_demo.xml",
],
"post_load": "post_load",
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
"demo_title": "Real Multi Website",
"demo_addons": [
"website_multi_company_sale",
],
"demo_addons_hidden": [
"website_multi_company_demo",
],
"demo_url": "website-multi-company",
"demo_summary": "The module allows to set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!",
"demo_images": [
"images/website_multi_company_main.png",
]
}
|
Python
| 0
|
@@ -1328,24 +1328,64 @@
pany_sale%22,%0A
+ %22website_multi_company_portal%22,%0A
%5D,%0A %22
|
08300a23fc06b9fa46435cf83e62778064b95424
|
Fix support for xarray < 0.11 until it is released.
|
cfgrib/cfgrib_.py
|
cfgrib/cfgrib_.py
|
#
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function
import numpy as np
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import Frozen, FrozenOrderedDict
from xarray.backends.common import AbstractDataStore, BackendArray
from xarray.backends.locks import ensure_lock, SerializableLock
# FIXME: Add a dedicated lock just in case, even if ecCodes is supposed to be thread-safe in most
# circumstances. See: https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions
ECCODES_LOCK = SerializableLock()
class CfGribArrayWrapper(BackendArray):
def __init__(self, datastore, array):
self.datastore = datastore
self.shape = array.shape
self.dtype = array.dtype
self.array = array
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _getitem(self, key):
with self.datastore.lock:
return self.array[key]
class CfGribDataStore(AbstractDataStore):
"""
Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.
"""
def __init__(self, filename, lock=None, **backend_kwargs):
import cfgrib
if lock is None:
lock = ECCODES_LOCK
self.lock = ensure_lock(lock)
# NOTE: filter_by_keys is a dict, but CachingFileManager only accepts hashable types
if 'filter_by_keys' in backend_kwargs:
backend_kwargs['filter_by_keys'] = tuple(backend_kwargs['filter_by_keys'].items())
self.ds = cfgrib.open_file(filename, mode='r', **backend_kwargs)
def open_store_variable(self, name, var):
if isinstance(var.data, np.ndarray):
data = var.data
else:
data = indexing.LazilyOuterIndexedArray(CfGribArrayWrapper(self, var.data))
encoding = self.ds.encoding.copy()
encoding['original_shape'] = var.data.shape
return Variable(var.dimensions, data, var.attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in self.ds.variables.items())
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {k for k, v in self.ds.dimensions.items() if v is None}
return encoding
|
Python
| 0
|
@@ -956,16 +956,25 @@
ndArray%0A
+try:%0A
from xar
@@ -1029,16 +1029,276 @@
bleLock%0A
+except ImportError:%0A # no locking for xarray %3C= 0.11%0A def ensure_lock(lock):%0A return lock%0A%0A class SerializableLock(object):%0A def __enter__(self):%0A pass%0A%0A def __exit__(self, exc_type, exc_val, exc_tb):%0A pass%0A%0A
%0A# FIXME
|
51da6fd489d6a35e4b74d1813861e305e04e1eeb
|
Bring up to speed with project architecture changes.
|
scripts/99-create-group-project.py
|
scripts/99-create-group-project.py
|
#!/usr/bin/python3
import argparse
import csv
import fnmatch
import os
import shutil
parser = argparse.ArgumentParser(description='Create an group project.')
parser.add_argument('--project', required=True, help='Directory with a set of aerial images.')
parser.add_argument('source', metavar='source-projects', nargs='+',
help='input projects')
args = parser.parse_args()
project_dir = args.project
analysis_dir = os.path.join(project_dir, "ImageAnalysis")
meta_dir = os.path.join(analysis_dir, "meta")
models_dir = os.path.join(analysis_dir, "models")
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
if not os.path.isdir(analysis_dir):
os.makedirs(analysis_dir)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
if not os.path.isdir(models_dir):
os.makedirs(models_dir)
# quick input sanity check
for p in args.source:
if not os.path.isdir(p):
print("Cannot find source project:", p)
print("Aborting.")
quit()
# add symbolic links to source images from each project
print("Creating symbolic links to source image files...")
for p in args.source:
for image in sorted(os.listdir(p)):
if fnmatch.fnmatch(image, '*.jpg') or fnmatch.fnmatch(image, '*.JPG'):
src = os.path.join(p, image)
dest = os.path.join(project_dir, image)
if os.path.exists(dest):
print("Warning, dest already exists:", dest)
else:
os.symlink(src, dest)
# create a combo pix4d.csv file
print("Assembling combination pix4d.csv file")
full_list = []
fields = None
for p in args.source:
csv_path = os.path.join(p, "pix4d.csv")
with open(csv_path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if fields is None:
fields = row.keys()
full_list.append(row)
csv_path = os.path.join(project_dir, "pix4d.csv")
with open(csv_path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writeheader()
for row in full_list:
writer.writerow(row)
# copy config.json from first listed source project
print("Copying config.json from source project")
config_src = os.path.join(args.source[0], "ImageAnalysis", "config.json")
config_dest = os.path.join(analysis_dir, "config.json")
if os.path.exists(config_src):
shutil.copyfile(config_src, config_dest)
# populate the meta directory
print("Populating the meta directory with symbolic links.")
for p in args.source:
meta_src = os.path.join(p, "ImageAnalysis", "meta")
for file in sorted(os.listdir(meta_src)):
if fnmatch.fnmatch(file, '*.feat') or fnmatch.fnmatch(file, '*.desc'):
src = os.path.join(meta_src, file)
dest = os.path.join(meta_dir, file)
if os.path.exists(dest):
print("Warning, dest already exists:", dest)
else:
os.symlink(src, dest)
if fnmatch.fnmatch(file, '*.match'):
src = os.path.join(meta_src, file)
dest = os.path.join(meta_dir, file)
shutil.copyfile(src, dest)
# populate the models directory
print("Populating the models directory with symbolic links.")
for p in args.source:
models_src = os.path.join(p, "ImageAnalysis", "models")
for file in sorted(os.listdir(models_src)):
if fnmatch.fnmatch(file, '*.jpg') or fnmatch.fnmatch(file, '*.JPG'):
src = os.path.join(mdoels_src, file)
dest = os.path.join(models_dir, file)
if os.path.exists(dest):
print("Warning, dest already exists:", dest)
else:
os.symlink(src, dest)
print("Now run the 2a set poses script to create the image.json files, initial poses, and update the project NED reference point")
print("Skip the 3a detect features script")
print("Run the 4a matching script, taking advantage of all the matches that were found for the individual groups")
print("After matching, run the optimizer and rendering scripts.")
|
Python
| 0
|
@@ -566,16 +566,112 @@
models%22)
+%0Astate_dir = os.path.join(analysis_dir, %22state%22)%0Acache_dir = os.path.join(analysis_dir, %22cache%22)
%0A%0Aif not
@@ -918,16 +918,138 @@
s_dir)%0A%0A
+if not os.path.isdir(state_dir):%0A os.makedirs(state_dir)%0A%0Aif not os.path.isdir(cache_dir):%0A os.makedirs(cache_dir)%0A%0A
# quick
@@ -2901,20 +2901,20 @@
ile, '*.
-feat
+json
') or fn
@@ -2936,20 +2936,21 @@
ile, '*.
-desc
+match
'):%0A
@@ -3056,191 +3056,330 @@
-if os.path.exists(dest):%0A print(%22Warning, dest already exists:%22, dest)%0A else:%0A os.symlink(src, dest)%0A if fnmatch.fnmatch(file, '*.match
+shutil.copyfile(src, dest)%0A%0A# populate the cache directory%0Aprint(%22Populating the cache directory with symbolic links.%22)%0Afor p in args.source:%0A cache_src = os.path.join(p, %22ImageAnalysis%22, %22cache%22)%0A for file in sorted(os.listdir(cache_src)):%0A if fnmatch.fnmatch(file, '*.feat') or fnmatch.fnmatch(file, '*.desc
'):%0A
@@ -3401,36 +3401,37 @@
= os.path.join(
-meta
+cache
_src, file)%0A
@@ -3450,36 +3450,37 @@
= os.path.join(
-meta
+cache
_dir, file)%0A
@@ -3483,39 +3483,154 @@
-shutil.copyfile
+if os.path.exists(dest):%0A print(%22Warning, dest already exists:%22, dest)%0A else:%0A os.symlink
(src, dest)%0A
@@ -3967,10 +3967,10 @@
in(m
-d
o
+d
els_
|
b671d67aaf80df9297213973659c59a4ebd72e08
|
test file changed
|
pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py
|
pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py
|
import unittest
import pycqed as pq
import os
from pycqed.analysis_v2 import measurement_analysis as ma
from pycqed.analysis_v2 import Two_state_T1_analysis as Ta
class Test_efT1_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_efT1_analysis(self):
Ta.efT1_analysis(
t_start='20180606_144110', auto=True, close_figs=False)
|
Python
| 0.000001
|
@@ -492,8 +492,64 @@
=False)%0A
+ self.fit_res%5B'fit_res_P0'%5D.params%5B'tau1'%5D.value%0A
|
196b9547b4dbcbfbf4891c7fd3ea3b9944018430
|
Revert "Revert "Added script for cron job to load surveys to database.""
|
scripts/cronRefreshEdxQualtrics.py
|
scripts/cronRefreshEdxQualtrics.py
|
from surveyextractor import QualtricsExtractor
import getopt, sys
# Script for scheduling regular EdxQualtrics updates
# Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r"
qe = QualtricsExtractor()
opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses'])
for opt, arg in opts:
if opt in ('-a', '--reset'):
qe.resetMetadata()
qe.resetSurveys()
qe.resetResponses()
elif opt in ('-m', '--loadmeta'):
qe.loadSurveyMetadata()
elif opt in ('-s', '--loadsurvey'):
qe.resetSurveys()
qe.loadSurveyData()
elif opt in ('-r', '--loadresponses'):
qe.loadResponseData()
|
Python
| 0
|
@@ -57,15 +57,23 @@
topt
-,
+%0Aimport
sys%0A%0A
+##
# Sc
@@ -121,16 +121,18 @@
updates%0A
+##
# Usage
@@ -189,16 +189,138 @@
-s -r%22%0A%0A
+# Append directory for dependencies to PYTHONPATH%0Asys.path.append(%22/home/dataman/Code/qualtrics_etl/src/qualtrics_etl/%22)%0A%0A
qe = Qua
|
adfdfb46d9264f5733926d9ed5a95aece3326921
|
fix failing api_encode test for python3
|
janrain/capture/api.py
|
janrain/capture/api.py
|
""" Base class for making API calls to the Janrain API. """
# pylint: disable=E0611
from janrain.capture.exceptions import ApiResponseError
from janrain.utils import utf8_encode
from json import dumps as to_json
from contextlib import closing
from base64 import b64encode
from hashlib import sha1
import hmac
import time
import logging
logger = logging.getLogger(__name__)
# Use a try/catch when importing requests so that the setup.py script can still
# import from __init__.py without failing.
try:
import requests
except ImportError:
logger.warn("Missing 'requests' module. Install using 'pip install " \
"requests'.")
def api_encode(value):
"""
Encodes a native Python value in a way that the API expects. Encodes lists
and dicts to JSON and boolean values to 'true' or 'false'.
Args:
value - The Python value to encode.
Returns:
The value encoded for the Janrain API.
"""
if isinstance(value, (dict, list, tuple)):
return to_json(value).encode('utf-8')
if value is True:
return 'true'
if value is False:
return 'false'
try:
if isinstance(value, basestring):
return value.encode('utf-8')
except NameError:
if isinstance(value, str):
return value.encode('utf-8')
return value
def api_decode(value):
"""Convert api encoded values from utf-8 back to unicode"""
try:
return value.decode('utf-8')
except AttributeError:
return value
def generate_signature(api_call, unsigned_params):
"""
Sign the API call by generating an "Authentication" header.
Args:
api_call - The API endpoint as a relative URL.
unsigned_params - A dictionary of parameters in the POST to the API.
Returns:
A 2-tuple containing the HTTP headers needed to sign the request and
the modified parameters which should be sent to the request.
"""
params = unsigned_params.copy()
params = {k: api_decode(v) for k, v in params.items()}
# Do not POST authentication parameters. Use them to create an
# authentication header instead.
access_token = params.pop('access_token', None)
client_id = params.pop('client_id', None)
client_secret = params.pop('client_secret', None)
headers = {}
if access_token:
# Simply use the access token if provided rather than id/secret
headers['Authorization'] = "OAuth {}".format(access_token)
else:
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
data = "{}\n{}\n".format(api_call, timestamp)
if params:
kv_str = ["{}={}".format(k, v)
for k, v in params.items()]
kv_str.sort()
data += "\n".join(kv_str) + "\n"
sha1_str = hmac.new(
client_secret.encode('utf-8'),
utf8_encode(data),
sha1
).digest()
hash_str = b64encode(sha1_str)
headers['Date'] = timestamp
signature = "Signature {}:{}".format(
client_id,
hash_str.decode('utf-8'))
headers['Authorization'] = signature
logger.debug(signature)
return headers, params
def raise_api_exceptions(response):
"""
Parse the response from the API converting errors into exceptions.
Args:
response - The JSON response from the Janrain API.
Raises:
ApiResponseError
"""
if response['stat'] == 'error':
logger.debug("Response:\n" + to_json(response, indent=4))
try:
message = response['error_description']
except KeyError:
message = response['message']
raise ApiResponseError(response['code'], response['error'], \
message, response)
class Api(object):
"""
Base object for making API calls to the Janrain API.
Args:
api_url - Absolute URL to API.
defaults - A dictionary of default params to pass to every call.
compress - A boolean indicating to use gzip compression.
sign_requests - A boolean indicating to sign the requests.
Example:
defaults = {'client_id': "...", 'client_secret': "..."}
api = janrain.capture.Api("https://...", defaults)
count = api.call("entity.count", type_name="user")
"""
def __init__(self, api_url, defaults={}, compress=True, sign_requests=True):
if api_url[0:4] == "http":
self.api_url = api_url
else:
self.api_url = "https://" + api_url
self.defaults = defaults
self.sign_requests = sign_requests
self.compress = compress
def call(self, api_call, **kwargs):
"""
Low-level method for making API calls. It handles encoding the
parameters, constructing authentication headers, decoding the response,
and converting API error responses into Python exceptions.
Args:
api_call - The API endpoint as a relative URL.
Keyword Args:
Keyword arguments are specific to the api_call and can be found in
the Janrain API documentation at:
http://developers.janrain.com/documentation/capture/restful_api/
Raises:
ApiResponseError
"""
# Encode values for the API (JSON, bools, nulls)
params = self.defaults.copy()
for key, value in kwargs.items():
if value is not None:
params[key] = value
params = {k: api_encode(v) for k, v in params.items()}
if api_call[0] != "/":
api_call = "/" + api_call
url = self.api_url + api_call
logger.debug(url)
# Signing the request modifies the request object and params in-place.
# Sign the request *before* encoding and passing the params.
if self.sign_requests:
headers, params = generate_signature(api_call, params)
else:
headers = {}
# Print the parameters (for debugging)
print_params = params.copy()
if 'client_secret' in print_params:
print_params['client_secret'] = "REDACTED"
logger.debug(print_params)
# Accept gzip compression
if self.compress:
headers['Accept-encoding'] = 'gzip'
# Let any exceptions here get raised to the calling code. This includes
# things like connection errors and timeouts.
r = requests.post(url, headers=headers, data=params)
try:
raise_api_exceptions(r.json())
if r.status_code not in (200, 400, 401):
# /oauth/token returns 400 or 401
r.raise_for_status()
return r.json()
except ValueError:
# The response was not valid JSON (empty body, 5xx errors, etc.)
r.raise_for_status()
|
Python
| 0.000004
|
@@ -1076,16 +1076,32 @@
n 'true'
+.encode('utf-8')
%0A if
@@ -1138,16 +1138,32 @@
'false'
+.encode('utf-8')
%0A try
|
c047f33561f304a9932c1d43284c59ae51035c69
|
update client
|
chat/consumers.py
|
chat/consumers.py
|
import re
import json
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Room
from .models import Player
log = logging.getLogger(__name__)
@channel_session
def ws_connect(message):
# Extract the room from the message. This expects message.path to be of the
# form /chat/{label}/, and finds a Room if the message path is applicable,
# and if the Room exists. Otherwise, bails (meaning this is a some othersort
# of websocket). So, this is effectively a version of _get_object_or_404.
try:
prefix, label = message['path'].decode('ascii').strip('/').split('/')
if prefix != 'chat':
log.debug('invalid ws path=%s', message['path'])
return
room = Room.objects.get(label=label)
except ValueError:
log.debug('invalid ws path=%s', message['path'])
return
except Room.DoesNotExist:
log.debug('ws room does not exist label=%s', label)
return
log.debug('chat connect room=%s client=%s:%s',
room.label, message['client'][0], message['client'][1])
# Need to be explicit about the channel layer so that testability works
# This may be a FIXME?
Group('chat-'+label).add(message.reply_channel)
message.channel_session['room'] = room.label
@channel_session
def ws_receive(message):
# Look up the room from the channel session, bailing if it doesn't exist
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
except KeyError:
log.debug('no room in channel_session')
return
except Room.DoesNotExist:
log.debug('recieved message, buy room does not exist label=%s', label)
return
# Parse out a chat message from the content text, bailing if it doesn't
# conform to the expected message format.
try:
data = json.loads(message['text'])
except ValueError:
log.debug("ws message isn't json text=%s", text)
return
if set(data.keys()) != set(('handle', 'message')):
log.debug("ws message unexpected format data=%s", data)
return
if data:
player = None
try:
player = room.players.filter(position=data['handle']).first()
except ValueError:
log.debug("something is wrong")
return
if player is not None:
if player.address != message.reply_channel.name:
log.debug("this room's position has been occupied by another guy")
return
else:
room.players.create(position=data['handle'],address=message.reply_channel.name)
log.debug('chat message room=%s handle=%s message=%s',
room.label, data['handle'], data['message'])
m = room.messages.create(**data)
# See above for the note about Group
Group('chat-'+label).send({'text': json.dumps(m.as_dict())})
@channel_session
def ws_disconnect(message):
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
Group('chat-'+label).discard(message.reply_channel)
except (KeyError, Room.DoesNotExist):
pass
|
Python
| 0.000001
|
@@ -3157,24 +3157,211 @@
ly_channel)%0A
+ player = room.player.filter(address=message.reply_channel.name).first()%0A if player is not None:%0A room.player.filter(address=message.reply_channel.name).delete()%0A
except (
|
f1111b6d7eb387e7287497c1853addd003a81f39
|
Add a length limit
|
chatterbox/irc.py
|
chatterbox/irc.py
|
import time
import random
import irc.bot
class Bot(irc.bot.SingleServerIRCBot):
def __init__(self, generator, channels, nickname, server, port=6667):
super().__init__([(server, port)], nickname, nickname)
self.generator = generator
self.channels_to_join = channels
self.nick = nickname
def on_nicknameinuse(self, c, e):
self.nick = c.get_nickname() + '_'
c.nick(self.nick)
def on_welcome(self, c, e):
for channel in self.channels_to_join:
c.join(channel)
def on_privmsg(self, c, e):
sentence = self.generator.generate_sentence()
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.source.nick, sentence)
def on_pubmsg(self, c, e):
if self.nick in e.arguments[0]:
sentence = self.generator.generate_sentence()
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.target, sentence)
|
Python
| 0.998833
|
@@ -611,32 +611,38 @@
erate_sentence()
+%5B:450%5D
%0A time.sl
@@ -864,16 +864,22 @@
ntence()
+%5B:450%5D
%0A
|
7e4554b98c4bd431431e5c22845a18ba842349e8
|
fix pol append logic error
|
scripts/mc_log_autocorrelations.py
|
scripts/mc_log_autocorrelations.py
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Record information about antenna autocorrelations, as logged into the Redis
server by the correlator software.
"""
from __future__ import absolute_import, division, print_function
import socket
import time
from builtins import int
import re
import datetime
from astropy.time import Time
import numpy as np
import redis
from hera_mc import autocorrelations, mc
# Preliminaries. We have a small validity check since the M&C design specifies
# the memory, network, and system load are to be 5-minute averages.
MONITORING_INTERVAL = 60 # seconds
# End of config.
parser = mc.get_mc_argument_parser()
parser.add_argument("--redishost", "-r", default="redishost",
help="The hostname of the redis server.")
parser.add_argument('--redisport', '-p', default=6379,
help="Port for the redis server connection.")
parser.add_argument('--debug', action='store_true',
help='Print out debugging information.')
parser.add_argument('--noop', action='store_true',
help='Do not actually save information to the database.')
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
# allocate the maximum size of the autocorrelations as a buffer.
auto = np.zeros(8192, dtype=np.float32)
# make a redis pool and connect to redis
redis_pool = redis.ConnectionPool(host=args.redishost, port=args.redisport)
rsession = redis.Redis(connection_pool=redis_pool)
with db.sessionmaker() as dbsession:
try:
while True:
hostname = socket.gethostname()
keys = [
k.decode("utf-8")
for k in rsession.keys()
if k.startswith(b"auto") and not k.endswith(b"timestamp")
]
ants = []
pols = []
for key in keys:
match = re.search(r"auto:(?P<ant>\d+)(?P<pol>e|n)", key)
if match is not None:
ant, pol = int(match.group("ant")), match.group("pol")
ants.append(ant)
pols.append(pols)
ants = np.unique(ants)
pols = np.unique(pols)
# We put an identical timestamp for all records. The records from the
# redis server also include timestamps (as JDs), but I think it's actually
# preferable to use our own clock here. Note that we also ensure that the
# records grabbed in one execution of this script have identical
# timestamps, which is a nice property.
auto_time = datetime.datetime.utcnow()
for ant in ants:
for pol in pols:
d = rsession.get("auto:{ant:d}{pol:s}".format(ant=ant, pol=pol))
if d is not None:
auto = np.frombuffer(d, dtype=np.float32)
# For now, we just compute the median:
ac = autocorrelations.Autocorrelations()
ac.time = auto_time
ac.antnum = ant
ac.polarization = pol
ac.measurement_type = autocorrelations.MeasurementTypes.median
# must turn np.float32 into plain Python float
ac.value = np.median(auto).item()
if args.debug:
print(auto.shape, repr(ac))
if not args.noop:
dbsession.add(ac)
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'good')
dbsession.commit()
time.sleep(MONITORING_INTERVAL)
except KeyboardInterrupt:
pass
except Exception:
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'errored')
dbsession.commit()
raise
|
Python
| 0.000003
|
@@ -2195,17 +2195,16 @@
pend(pol
-s
)%0A
|
58483cbd70f1ae5e55656b01238f26bd6da6f903
|
Change format
|
captain_hook/services/telegram/commands/base/base_command.py
|
captain_hook/services/telegram/commands/base/base_command.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import telegram
class BaseCommand:
def __init__(self, config):
self.config = config
self.telegram_bot = telegram.Bot(self.config["token"])
self.bot_info = self.telegram_bot.getMe()
def run(self, messageObj, config):
raise NotImplementedError
def sendMessage(self, chat_id,
text,
parse_mode=telegram.ParseMode.MARKDOWN,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
try:
self.telegram_bot.sendMessage(chat_id,
text,
parse_mode,
disable_web_page_preview,
disable_notification,
reply_to_message_id,
reply_markup,
timeout,
**kwargs
)
except telegram.error.RetryAfter:
pass
def sendPhoto(self, chat_id,
photo='',
**kwargs):
try:
self.telegram_bot.send_photo(chat_id=chat_id, photo=photo, **kwargs)
except telegram.error.RetryAfter:
pass
def sendDocument(self, chat_id,
document='',
**kwargs):
try:
self.telegram_bot.send_document(chat_id=chat_id, document=document, **kwargs)
except telegram.error.RetryAfter:
pass
|
Python
| 0.000004
|
@@ -767,32 +767,17 @@
Message(
-chat_id,%0A
+%0A
@@ -776,35 +776,41 @@
-
+chat_id,%0A
@@ -819,34 +819,8 @@
xt,%0A
-
@@ -847,34 +847,8 @@
de,%0A
-
@@ -905,34 +905,8 @@
-
-
disa
@@ -943,34 +943,8 @@
-
-
repl
@@ -964,34 +964,8 @@
id,%0A
-
@@ -1010,124 +1010,42 @@
- timeout,%0A **kwargs%0A
+timeout,%0A **kwargs%0A
|
a59e30b87310ad2727844a5f01cdc566ab436b88
|
handle json load failure on response body from graphite
|
check_graphite.py
|
check_graphite.py
|
#!/usr/bin/env python
"""
check_graphite.py
~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import json
import optparse
import urllib
import urllib2
import sys
NAGIOS_STATUSES = {
'OK': 0,
'WARNING': 1,
'CRITICAL': 2,
'UNKNOWN': 3
}
class Graphite(object):
def __init__(self, url, targets, time_from, time_until):
self.url = url.rstrip('/')
self.targets = targets
self.time_from = time_from
self.time_until = time_until
params = [('target', t) for t in self.targets] +\
[('from', self.time_from)] +\
[('until', self.time_until)] +\
[('format', 'json')]
self.full_url = self.url + '/render?' +\
urllib.urlencode(params)
def check_datapoints(self, datapoints, func, **kwargs):
if kwargs.get('threshold'):
return [x for x in datapoints if x and func(x, kwargs['threshold'])]
elif kwargs.get('compare'):
return [datapoints[x] for x in xrange(len(datapoints)) if func(datapoints[x], kwargs['compare'][x])]
def fetch_metrics(self):
try:
response = urllib2.urlopen(self.full_url)
if response.code != 200:
return None
else:
return json.loads(response.read())
except urllib2.URLError:
return None
def generate_output(self, datapoints, *args, **kwargs):
check_output = dict(OK=[], WARNING=[], CRITICAL=[])
warning = kwargs.get('warning', 0)
critical = kwargs.get('critical', 0)
target = kwargs.get('target', 'timeseries')
if len(args) > 1:
(warn_oob, crit_oob) = args
else:
crit_oob = [x for x in args[0] if x]
warn_oob = []
if crit_oob:
check_output['CRITICAL'].append('%s [crit=%f|datapoints=%s]' %\
(target, critical, ','.join(['%s' % str(x) for x in crit_oob])))
elif warn_oob:
check_output['WARNING'].append('%s [warn=%f|datapoints=%s]' %\
(target, warning, ','.join(['%s' % str(x) for x in warn_oob])))
else:
check_output['OK'].append('%s [warn=%0.3f|crit=%f|datapoints=%s]' %\
(target, warning, critical, ','.join(['%s' % str(x) for x in datapoints])))
return check_output
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-U', '--graphite-url', dest='graphite_url',
default='http://localhost/',
metavar='URL',
help='Graphite URL [%default]')
parser.add_option('-t', '--target', dest='targets',
action='append',
help='Target to check')
parser.add_option('--from', dest='time_from',
help='From timestamp/date')
parser.add_option('--until', dest='time_until',
default='now',
help='Until timestamp/date [%default]')
parser.add_option('--percentile', dest='percentile',
default=0,
type='int',
metavar='PERCENT',
help='Use nPercentile Graphite function on the target (returns one datapoint)')
parser.add_option('--confidence', dest='confidence_bands',
default=False,
action='store_true',
help='Use holtWintersConfidenceBands Graphite function on the target')
parser.add_option('--over', dest='over',
default=True,
action='store_true',
help='Over specified WARNING or CRITICAL threshold [%default]')
parser.add_option('--under', dest='under',
default=False,
action='store_true',
help='Under specified WARNING or CRITICAL threshold [%default]')
parser.add_option('-W', dest='warning',
metavar='VALUE',
help='Warning if datapoints beyond VALUE')
parser.add_option('-C', dest='critical',
metavar='VALUE',
help='Critical if datapoints beyond VALUE')
(options, args) = parser.parse_args()
for mandatory in ['time_from', 'targets']:
if not options.__dict__[mandatory]:
print 'ERROR: missing option: --%s\n' % mandatory.replace('time_', '').replace('targets', 'target')
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
real_from = options.time_from
if options.under:
options.over = False
if options.confidence_bands:
targets = [options.targets[0], 'holtWintersConfidenceBands(%s)' % options.targets[0]]
if options.over:
check_func = lambda x, y: x > y
else:
check_func = lambda x, y: x < y
check_threshold = None
from_slice = int(options.time_from) * -1
real_from = '-2w'
else:
for mandatory in ['warning', 'critical']:
if not options.__dict__[mandatory]:
print 'ERROR: missing option: --%s\n' % mandatory
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
if options.percentile:
targets = ['nPercentile(%s, %d)' % (options.targets[0], options.percentile)]
else:
targets = options.targets
try:
warn = float(options.warning)
crit = float(options.critical)
if options.over:
check_func = lambda x, y: x > y
else:
check_func = lambda x, y: x < y
except ValueError:
print 'ERROR: WARNING or CRITICAL threshold is not a number\n'
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
check_output = {}
graphite = Graphite(options.graphite_url, targets, real_from, options.time_until)
metric_data = graphite.fetch_metrics()
if metric_data:
if options.confidence_bands:
for target in metric_data:
if target['target'].startswith('holtWintersConfidenceUpper'):
if options.over:
expected_datapoints = [x[0] for x in target.get('datapoints', [])][from_slice:]
elif target['target'].startswith('holtWintersConfidenceLower'):
if options.under:
expected_datapoints = [x[0] for x in target.get('datapoints', [])][from_slice:]
else:
actual_datapoints = [x[0] for x in target.get('datapoints', [])][from_slice:]
target_name = target['target']
if actual_datapoints and expected_datapoints:
points_oob = graphite.check_datapoints(actual_datapoints, check_func, compare=expected_datapoints)
check_output[target['target']] = graphite.generate_output(actual_datapoints,
points_oob,
target=target['target'])
else:
for target in metric_data:
datapoints = [x[0] for x in target.get('datapoints', []) if x]
crit_oob = graphite.check_datapoints(datapoints, check_func, threshold=crit)
warn_oob = graphite.check_datapoints(datapoints, check_func, threshold=warn)
check_output[target['target']] = graphite.generate_output(datapoints,
warn_oob,
crit_oob,
target=target['target'],
warning=warn,
critical=crit)
else:
print 'CRITICAL: No output from Graphite!'
sys.exit(NAGIOS_STATUSES['CRITICAL'])
for target, messages in check_output.iteritems():
if messages['CRITICAL']:
exit_code = NAGIOS_STATUSES['CRITICAL']
elif messages['WARNING']:
exit_code = NAGIOS_STATUSES['WARNING']
else:
exit_code = NAGIOS_STATUSES['OK']
for status_code in ['CRITICAL', 'WARNING', 'OK']:
if messages[status_code]:
print '\n'.join(['%s: %s' % (status_code, status) for status in messages[status_code]])
sys.exit(exit_code)
|
Python
| 0
|
@@ -1366,24 +1366,35 @@
ib2.URLError
+, TypeError
:%0A
|
6f03120a57d40491e7d8245e10989a3e03b9481d
|
Set up task list for cook robot
|
se306/src/package1/scripts/cook.py
|
se306/src/package1/scripts/cook.py
|
#!/usr/bin/env python
import roslib
import rospy
import std_msgs.msg
import navigation
from std_msgs.msg import String
class Cook(navigation.Navigation):
''' When a message is passed out from the scheduler, determine whether it is
relevant to this object. If so, take the neccessary action
'''
def process_event(self, action_msg):
message = str(action_msg).split("data: ")[1]
if ('Cook.cook_' in message):
self.navigate.current_path = list(self.cook_path)
self.navigate.target_coordinate = self.navigate.current_path.pop(0)
def __init__(self):
self.rate = rospy.Rate(20)
# Create a navigation object which will be used to manage all the calls
# relating to movement. Passed the robot's name so that the publisher
# and subscribers for it's navigation can be set up.
#Eventually we will make this input a variable instead of hardcoded
self.navigate = navigation.Navigation("robot_2")
rospy.Subscriber("scheduler", String, self.process_event)
while not rospy.is_shutdown():
self.navigate.movement_publisher.publish(self.navigate.move_cmd)
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('cook_robot')
cook = Cook()
|
Python
| 0.999988
|
@@ -410,16 +410,142 @@
essage):
+%0A%0A%09%09%09self.task_list.append(message)%0A%09%09%09%0A%0A%0A%09def perform_task(self, task):%0A%0A%09%09self.status = %22active%22 %0A%0A%09%09if task ==%22Cook.cook_%22:
%0A%09%09%09self
@@ -662,16 +662,20 @@
pop(0)%0A%0A
+%0A%09%0A%0A
%09def __i
@@ -716,16 +716,61 @@
ate(20)%0A
+%09%09self.task_list = %5B%5D%0A%09%09self.status = %22idle%22%0A
%09%09# Crea
@@ -1179,16 +1179,16 @@
down():%0A
-
%09%09%09self.
@@ -1247,16 +1247,200 @@
ve_cmd)%0A
+%0A%09%09%09if (len(self.navigate.target_coordinate) == 0):%0A%09%09%09%09self.status = %22idle%22%0A%0A%0A%09%09%09if (len(self.task_list) %3E 0 and self.status == %22idle%22):%0A%09%09%09%09self.perform_task(self.task_list.pop(0))%0A%0A
%09%09%09self.
|
da55338b1bfc82bd303a3003fef881ceb3605b28
|
Make views time-centric, not date-centric
|
tracking/views.py
|
tracking/views.py
|
import logging
from datetime import timedelta
from django import forms
from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
from django.utils.timezone import now
from tracking.models import Visitor, Pageview
from tracking.settings import TRACK_PAGEVIEWS
log = logging.getLogger(__file__)
# tracking wants to accept more formats than default, here they are
input_formats = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m', # '2006-10'
'%Y', # '2006'
]
class DashboardForm(forms.Form):
start_time = forms.DateTimeField(
required=False, input_formats=input_formats)
end_time = forms.DateTimeField(
required=False, input_formats=input_formats)
@permission_required('tracking.view_visitor')
def dashboard(request):
"Counts, aggregations and more!"
end_time = now()
start_time = end_time - timedelta(days=1)
defaults = {'start_time': start_time, 'end_time': end_time}
form = DashboardForm(data=request.GET or defaults)
if form.is_valid():
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
# determine when tracking began
try:
track_start_time = Visitor.objects.earliest('start_time').start_time
except Visitor.DoesNotExist:
track_start_time = now()
# If the start_date is before tracking began, warn about incomplete data
warn_incomplete = (start_time < track_start_time)
# queries take `date` objects (for now)
start_date = start_time.date()
end_date = end_time.date()
user_stats = Visitor.objects.user_stats(start_date, end_date)
visitor_stats = Visitor.objects.stats(start_date, end_date)
if TRACK_PAGEVIEWS:
pageview_stats = Pageview.objects.stats(start_date, end_date)
else:
pageview_stats = None
context = {
'form': form,
'track_start_time': track_start_time,
'warn_incomplete': warn_incomplete,
'user_stats': user_stats,
'visitor_stats': visitor_stats,
'pageview_stats': pageview_stats,
}
return render(request, 'tracking/dashboard.html', context)
|
Python
| 0.999068
|
@@ -1648,75 +1648,8 @@
ow)%0A
- start_date = start_time.date()%0A end_date = end_time.date()%0A%0A
@@ -1690,37 +1690,37 @@
stats(start_
-dat
+tim
e, end_
-dat
+tim
e)%0A visit
@@ -1754,37 +1754,37 @@
stats(start_
-dat
+tim
e, end_
-dat
+tim
e)%0A if TR
@@ -1856,21 +1856,21 @@
art_
-dat
+tim
e, end_
-dat
+tim
e)%0A
|
419ab74f1bb0c0d46a1547cb1d3bc9ab16d5b925
|
Update cindy/setting.py
|
cindy/settings.py
|
cindy/settings.py
|
"""
Django settings for cindy project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9suovzr#od0ywfg9e422zpqqx1e0dnp%xaw3vv2y@7mbscpswn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cindy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cindy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
Python
| 0
|
@@ -457,17 +457,16 @@
e__)))%0A%0A
-%0A
# Quick-
@@ -825,18 +825,86 @@
OSTS = %5B
-%5D
%0A
+ 'localhost',%0A '127.0.0.1',%0A 'heyrict.pythonanywhere.com',%0A%5D
%0A%0A# Appl
@@ -2104,17 +2104,16 @@
ation'%0A%0A
-%0A
# Databa
@@ -2323,17 +2323,16 @@
%7D%0A%7D%0A%0A
-%0A
# Passwo
@@ -2468,32 +2468,40 @@
%0A 'NAME':
+%0A
'django.contrib
@@ -2581,32 +2581,40 @@
%0A 'NAME':
+%0A
'django.contrib
@@ -2684,32 +2684,40 @@
%0A 'NAME':
+%0A
'django.contrib
@@ -2796,16 +2796,24 @@
'NAME':
+%0A
'django
@@ -2883,17 +2883,16 @@
%7D,%0A%5D%0A%0A
-%0A
# Intern
@@ -2982,13 +2982,13 @@
= '
-en-us
+ja-jp
'%0A%0AT
@@ -3003,11 +3003,19 @@
= '
-UTC
+Japan/Tokyo
'%0A%0AU
@@ -3062,17 +3062,16 @@
= True%0A%0A
-%0A
# Static
|
2de8b9c95d5b0cbe9f990a0c6e82ae315c6aa21b
|
Add fp.sort property
|
claripy/ast/fp.py
|
claripy/ast/fp.py
|
from .bits import Bits
from ..ast.base import Base, _make_name
class FP(Bits):
def to_fp(self, rm, sort):
if rm is None:
rm = fp.RM.default()
return fpToFP(rm, self, sort)
def raw_to_fp(self):
return self
def to_bv(self):
return fpToIEEEBV(self)
def FPI(model, **kwargs):
kwargs['length'] = model.sort.length
return FP('I', (model,), **kwargs)
def FloatingPoint(name, sort, explicit_name=None):
n = _make_name(name, sort.length, explicit_name=explicit_name, prefix='FP_')
return FP('FP', (n, sort), variables={n}, symbolic=True, simplified=Base.FULL_SIMPLIFY, length=sort.length)
def FPV(*args):
return FPI(fp.FPV(*args), variables=set(), symbolic=False, simplified=Base.FULL_SIMPLIFY, eager=True)
#
# unbound floating point conversions
#
from .. import operations
from .. import fp
from .bv import BV
from .bool import Bool
def _fp_length_calc(a1, a2, a3=None):
if isinstance(a1, fp.RM) and a3 is None:
raise Exception()
if a3 is None:
return a2.length
else:
return a3.length
fpToFP = operations.op('fpToFP', object, FP, bound=False, calc_length=_fp_length_calc)
fpToFPUnsigned = operations.op('fpToFPUnsigned', (fp.RM, BV, fp.FSort), FP, bound=False, calc_length=_fp_length_calc)
fpFP = operations.op('fpFP', (BV, BV, BV), FP, bound=False,
calc_length=lambda a, b, c: a.length + b.length + c.length)
fpToIEEEBV = operations.op('fpToIEEEBV', (FP,), BV, bound=False, calc_length=lambda fp: fp.length)
fpToSBV = operations.op('fpToSBV', (fp.RM, FP, (int, long)), BV, bound=False, calc_length=lambda _rm, _fp, len: len)
fpToUBV = operations.op('fpToUBV', (fp.RM, FP, (int, long)), BV, bound=False, calc_length=lambda _rm, _fp, len: len)
#
# unbound float point comparisons
#
def _fp_cmp_check(a, b):
return a.length == b.length, "FP lengths must be the same"
fpEQ = operations.op('fpEQ', (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpGT = operations.op('fpGT', (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpGEQ = operations.op('fpGEQ', (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpLT = operations.op('fpLT', (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
fpLEQ = operations.op('fpLEQ', (FP, FP), Bool, bound=False, extra_check=_fp_cmp_check)
#
# unbound floating point arithmetic
#
def _fp_binop_check(rm, a, b): #pylint:disable=unused-argument
return a.length == b.length, "Lengths must be equal"
def _fp_binop_length(rm, a, b): #pylint:disable=unused-argument
return a.length
fpAbs = operations.op('fpAbs', (FP,), FP, bound=False, calc_length=lambda x: x.length)
fpNeg = operations.op('fpNeg', (FP,), FP, bound=False, calc_length=lambda x: x.length)
fpSub = operations.op('fpSub', (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length)
fpAdd = operations.op('fpAdd', (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length)
fpMul = operations.op('fpMul', (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length)
fpDiv = operations.op('fpDiv', (fp.RM, FP, FP), FP, bound=False, extra_check=_fp_binop_check, calc_length=_fp_binop_length)
#
# bound fp operations
#
fp.__eq__ = operations.op('fpEQ', (FP, FP), Bool, extra_check=_fp_cmp_check)
|
Python
| 0.000001
|
@@ -300,16 +300,98 @@
(self)%0A%0A
+ @property%0A def sort(self):%0A return fp.FSort.from_size(self.length)%0A%0A
def FPI(
|
d19b72f42801dde328ae1e1d935c5df3a5797d4e
|
update manage.py for refactored appstate/config modules
|
app/manage.py
|
app/manage.py
|
import os
import sys
import scriptine
from scriptine.shell import sh
from geobox.web import create_app
def babel_init_lang_command(lang):
"Initialize new language."
sh('pybabel init -i geobox/web/translations/messages.pot -d geobox/web/translations -l %s' % (lang,))
def babel_refresh_command():
"Extract messages and update translation files."
# get directory of all extension that also use translations
import wtforms
wtforms_dir = os.path.dirname(wtforms.__file__)
extensions = ' '.join([wtforms_dir])
sh('pybabel extract -F babel.cfg -k lazy_gettext -k _l -o geobox/web/translations/messages.pot geobox/web geobox/model geobox/lib ' + extensions)
sh('pybabel update -i geobox/web/translations/messages.pot -d geobox/web/translations')
def babel_compile_command():
"Compile translations."
sh('pybabel compile -d geobox/web/translations')
def fixtures_command():
from geobox.config import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
add_fixtures(session)
session.commit()
def init_db_command():
from geobox.config import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
session.commit()
def webserver_command(config='./geobox.ini'):
from geobox.config import GeoBoxConfig, GeoBoxState
config = GeoBoxConfig.from_file(config)
if not config:
sys.exit(1)
app_state = GeoBoxState(config)
app = create_app(app_state)
# scriptine removed sub-command from argv,
# but Flask reloader needs complete sys.argv
sys.argv[1:1] = ['webserver']
app.run(port=config.get('web', 'port'))
if __name__ == '__main__':
scriptine.run()
|
Python
| 0
|
@@ -918,38 +918,40 @@
from geobox.
-config
+appstate
import GeoBoxSt
@@ -1298,38 +1298,40 @@
from geobox.
-config
+appstate
import GeoBoxSt
@@ -1682,22 +1682,24 @@
geobox.
-config
+appstate
import
@@ -1708,27 +1708,59 @@
oBox
-Config, GeoBoxState
+State%0A from geobox.defaults import GeoBoxConfig%0A
%0A
|
0b77e09ac16006d1baa6a5f4093b51c1a13863e9
|
Add as_dict method to Digit model
|
app/models.py
|
app/models.py
|
from app import db
class Digit(db.Model):
id = db.Column(db.INTEGER, primary_key=True)
label = db.Column(db.INTEGER)
tsne_x = db.Column(db.REAL)
tsne_y = db.Column(db.REAL)
tsne_z = db.Column(db.REAL)
array = db.Column(db.String)
image = db.Column(db.BLOB)
def __repr__(self):
return '<Digit %d %d>' % (self.id, self.label)
|
Python
| 0.000004
|
@@ -36,16 +36,46 @@
Model):%0A
+ __tablename__ = 'digits'%0A%0A
id =
@@ -281,39 +281,8 @@
ing)
-%0A image = db.Column(db.BLOB)
%0A%0A
@@ -358,8 +358,222 @@
.label)%0A
+%0A def as_dict(self, fields=None):%0A if not fields:%0A return %7Bc.name: getattr(self, c.name) for c in self.__table__.columns%7D%0A else:%0A return %7Bc: getattr(self, c) for c in fields%7D%0A
|
0929935874570fce5b64f3abbece781a4133b565
|
Disable flaky NaCl SRPC integration tests.
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# TODO(ncbray): Reenable when this issue is resolved.
# http://code.google.com/p/nativeclient/issues/detail?id=2091
tests_to_disable.append('run_ppapi_bad_browser_test')
# This thread safety stress test is flaky on at least Windows.
# See http://code.google.com/p/nativeclient/issues/detail?id=2124
# TODO(mseaborn): Reenable when this issue is resolved.
tests_to_disable.append('run_ppapi_ppb_var_browser_test')
# Te behavior of the URLRequest changed slightly and this test needs to be
# updated. http://code.google.com/p/chromium/issues/detail?id=94352
tests_to_disable.append('run_ppapi_ppb_url_request_info_browser_test')
if sys.platform == 'darwin':
# The following test is failing on Mac OS X 10.5. This may be
# because of a kernel bug that we might need to work around.
# See http://code.google.com/p/nativeclient/issues/detail?id=1835
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_async_messaging_test')
# The following test fails on debug builds of Chromium.
# See http://code.google.com/p/nativeclient/issues/detail?id=2077
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_ppapi_example_font_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
'--disable_tests=%s' % ','.join(tests_to_disable)] + args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
Python
| 0.000069
|
@@ -1701,16 +1701,514 @@
test')%0A%0A
+ # TODO(ncbray) why did these tests flake?%0A # http://code.google.com/p/nativeclient/issues/detail?id=2230%0A tests_to_disable.extend(%5B%0A 'run_pm_manifest_file_chrome_browser_test',%0A 'run_srpc_basic_chrome_browser_test',%0A 'run_srpc_hw_data_chrome_browser_test',%0A 'run_srpc_hw_chrome_browser_test',%0A 'run_srpc_manifest_file_chrome_browser_test',%0A 'run_srpc_nameservice_chrome_browser_test',%0A 'run_srpc_nrd_xfer_chrome_browser_test',%0A %5D)%0A%0A
if sys
|
58c2913d14a18e24761c50f16a2de09f452f9120
|
Fix Cleverbot.reset erroring without conversations
|
cleverbot/base.py
|
cleverbot/base.py
|
import pickle
from .utils import (GenericUnpickler, convo_property, ensure_file,
error_on_kwarg, get_slots)
class AttributeMixin(object):
__slots__ = ()
url = 'https://www.cleverbot.com/getreply'
def __getattr__(self, attr):
"""Allow access to the stored data through attributes."""
try:
return self.data[attr]
except KeyError:
message = "{!r} object has no attribute {!r}"
raise AttributeError(message.format(type(self).__name__, attr))
@property
def cs(self):
return self.data.get('cs')
@cs.setter
def cs(self, value):
self.data['cs'] = value
@cs.deleter
def cs(self):
self.data.pop('cs', None)
class CleverbotBase(AttributeMixin):
"""Base class for Cleverbot."""
def __init__(self, key, **kwargs): # Python 2 compatible keyword-only args
self.key = key
self.data = {}
if 'cs' in kwargs:
self.data['cs'] = kwargs.pop('cs')
self.timeout = kwargs.pop('timeout', None)
for tweak in ('tweak1', 'tweak2', 'tweak3'):
setattr(self, tweak, kwargs.pop(tweak, None))
self.conversations = None
error_on_kwarg(self.__init__, kwargs)
def __getstate__(self):
state = vars(self).copy()
del state['session']
return state
def __setstate__(self, state):
self.__init__(None) # Set the session
vars(self).update(state)
convos = self.conversations
if convos is None:
return
if isinstance(convos, dict):
convos = convos.values()
for convo in convos:
convo.session = self.session
def conversation(self, name, convo):
"""Initialize conversations if necessary and add the conversation to
it.
"""
if self.conversations is None:
self.conversations = {} if name is not None else []
if name is not None:
message = "Can't mix named conversations with nameless ones"
assert isinstance(self.conversations, dict), message
self.conversations[name] = convo
else:
message = "Can't mix nameless conversations with named ones"
assert isinstance(self.conversations, list), message
self.conversations.append(convo)
def reset(self):
"""Reset Cleverbot's stored data and all of its conversations."""
self.data = {}
for convo in self.conversations:
if isinstance(self.conversations, dict):
convo = self.conversations[convo]
convo.reset()
def save(self, file):
"""Save Cleverbot and all of its conversations into the specified file
object.
Arguments:
file: A filename or a file object that accepts bytes to save the
data to.
"""
with ensure_file(file, 'wb') as file:
pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)
def load(self, file):
"""Load and replace Cleverbot's conversations with the previously saved
conversations from the file.
Arguments:
file: The filename or file object to load the saved conversations
from.
"""
cleverbot = load(type(self).__module__, file)
self.data = cleverbot.data
convos = cleverbot.conversations
self.conversations = convos
if convos is None:
return
if isinstance(convos, dict):
convos = convos.values()
for convo in convos:
convo.cleverbot = self
convo.session = self.session
class ConversationBase(AttributeMixin):
"""Base class for Conversation."""
__slots__ = ('cleverbot', 'data', '_key', '_timeout', '_tweak1', '_tweak2',
'_tweak3', 'session')
def __init__(self, cleverbot, **kwargs):
self.cleverbot = cleverbot
self.data = {}
for item in ('key', 'cs', 'timeout', 'tweak1', 'tweak2', 'tweak3'):
if item in kwargs:
setattr(self, item, kwargs.pop(item))
self.session = cleverbot.session
error_on_kwarg(self.__init__, kwargs)
def __getstate__(self):
return {item: getattr(self, item) for item in get_slots(type(self))
if hasattr(self, item) and item != 'session'}
def __setstate__(self, state):
for item, value in state.items():
setattr(self, item, value)
def reset(self):
self.data = {}
key = convo_property('key')
timeout = convo_property('timeout')
tweak1 = convo_property('tweak1')
tweak2 = convo_property('tweak2')
tweak3 = convo_property('tweak3')
class SayMixinBase(object):
__slots__ = ()
def _get_params(self, input, kwargs):
params = {
'key': self.key,
'input': input,
'cs': self.data.get('cs'),
'cb_settings_tweak1': self.tweak1,
'cb_settings_tweak2': self.tweak2,
'cb_settings_tweak3': self.tweak3,
'wrapper': 'cleverbot.py'
}
for tweak in ('tweak1', 'tweak2', 'tweak3'):
if tweak not in kwargs:
continue
setting = 'cb_settings_' + tweak
if setting in kwargs:
message = "Supplied both {!r} and {!r}"
raise TypeError(message.format(tweak, setting))
kwargs[setting] = kwargs.pop(tweak)
params.update(kwargs)
# aiohttp doesn't filter None values
return {key: value for key, value in params.items()
if value is not None}
def load(module, file):
with ensure_file(file, 'rb') as file:
return GenericUnpickler(file, module=module).load()
|
Python
| 0.000001
|
@@ -2495,28 +2495,24 @@
-for
convo
- in
+s =
self.co
@@ -2526,53 +2526,83 @@
ions
-:%0A if isinstance(self.conversation
+%0A if convos is None:%0A return%0A%0A if isinstance(convo
s, d
@@ -2623,45 +2623,61 @@
-
convo
+s
=
-self.conversations%5B
+convos.values()%0A for convo in
convo
-%5D
+s:
%0A
|
73f8895ae00f3d076c73bc49a03b870abb2a30cc
|
Fix typo
|
app/models.py
|
app/models.py
|
from django.db import models
import mongoengine
from mongoengine import Document, EmbeddedDocument
from mongoengine.fields import *
import os
# Create your models here.
class Greeting(models.Model):
when = models.DateTimeField('date created', auto_now_add=True)
USER = os.getenv('DATABASE_USER')
PASWORD = os.getenv('DATABASE_PASSWORD')
MONGODB_URI = "mongodb+srv://{}:{}@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority".format(USER, PASWORD)
mongoengine.connect('fikanotedb', host=MONGODB_URI)
class Shownote(EmbeddedDocument):
url = URLField()
title = StringField()
date = DateTimeField()
class FikanoteDB(Document):
title = StringField()
number = IntField()
person = ListField(StringField())
agenda = StringField()
date = DateTimeField()
shownotes = ListField(EmbeddedDocumentField(Shownote))
meta = {'collection': 'fikanotedb'}
class AgendaDB(Document):
url = URLField()
title = StringField()
date = DateTimeField()
meta = {'collection': 'agendadb'}
|
Python
| 0.999999
|
@@ -299,16 +299,17 @@
ER')%0APAS
+S
WORD = o
@@ -372,13 +372,37 @@
v://
-%7B%7D:%7B%7D
+%22 + USER + %22:%22 + PASSWORD + %22
@fik
|
4ea456e991e64d00122d85030fa2161cca22b4a3
|
Remove dead import
|
appy/views.py
|
appy/views.py
|
from collections import defaultdict
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.http import JsonResponse
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST
from appy.models import Application, Position, Tag
from appy.utils import apply_for_position
def home(request):
return render(request, 'home.html')
@require_POST
def signup(request):
username = request.POST.get('username')
password = request.POST.get('password')
if username and password:
User.objects.create_user(username=username, password=password)
user = authenticate(username=username, password=password)
login(request, user)
return redirect('positions')
else:
return render(request, 'home.html', {'errors': 'Unable to create user'})
@require_POST
def login_view(request):
username = request.POST.get('username')
password = request.POST.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('positions')
return render(request, 'home.html', {'errors': 'Unable to log in'})
def logout_view(request):
logout(request)
return redirect('home')
@login_required
def positions(request):
context = {}
if request.method == 'POST':
company = request.POST.get('company')
job_title = request.POST.get('job_title')
tag_search = request.POST.get('tag_search')
context.update({'company': company, 'job_title': job_title, 'tag_search': tag_search})
positions = search_positions(company, job_title, tag_search)
else:
positions = Position.objects.all()
applied_to = set([app.position for app in Application.objects.filter(user=request.user)])
for position in positions:
position.already_applied = position in applied_to
positions = sort_positions(positions, request.user)
context.update({'positions': positions})
return render(request, 'positions.html', context)
@login_required
def create_position(request):
if request.method == 'POST':
company = request.POST.get('company')
job_title = request.POST.get('job_title')
description = request.POST.get('description')
tags = []
tag_list = request.POST.get('tags')
for tag_name in tag_list.split(','):
tag_name = tag_name.strip()
try:
t = Tag.objects.get(description=tag_name)
except Tag.DoesNotExist:
t = Tag.objects.create(description=tag_name)
tags.append(t)
position = Position.objects.create(
company=company,
job_title=job_title,
description=description
)
for t in tags:
position.tags.add(t)
return render(request, 'create_position.html')
else:
return render(request, 'create_position.html')
def search_positions(company, job_title, tag_search):
positions = Position.objects.all()
if company:
positions = positions.filter(company__icontains=company)
if job_title:
positions = positions.filter(job_title__icontains=job_title)
if tag_search:
p_ids = set()
tags = Tag.objects.filter(description__icontains=tag_search)
for tag in tags:
for p in tag.position_set.all():
p_ids.add(p.id)
positions = positions.filter(id__in=p_ids)
return positions
def sort_positions(positions, user):
if Application.objects.filter(user=user).count() < 10:
return positions.order_by('-created_at')
user_tag_counts = defaultdict(int)
for app in Application.objects.filter(user=user).prefetch_related('position__tags'):
tags = app.position.tags.all()
for t in tags:
user_tag_counts[t.description] += 1
return sorted(positions, key=lambda p: recommendation_score(p, user_tag_counts), reverse=True)
def recommendation_score(position, user_tag_counts):
score = 0
tags = position.tags.all()
for t in tags:
score += user_tag_counts[t.description]
return score
@login_required
def applications(request):
applications = Application.objects.filter(user=request.user)
status_choices = Application.STATUS_CHOICES
return render(request, 'applications.html', {
'applications': applications,
'status_choices': status_choices,
})
@login_required
def apply(request):
user = request.user
position_id = request.POST.get('position_id')
position = Position.objects.get(id=position_id)
apply_for_position(position, user)
return JsonResponse({'success': True})
@login_required
def update_status(request):
app_id = request.POST.get('app_id')
new_status = request.POST.get('new_status')
app = Application.objects.get(user=request.user, id=app_id)
app.status = new_status
app.save()
return JsonResponse({'success': True})
@login_required
def delete_app(request):
app_id = request.POST.get('app_id')
app = Application.objects.get(user=request.user, id=app_id)
app.delete()
return JsonResponse({'success': True})
|
Python
| 0.000001
|
@@ -196,56 +196,8 @@
ser%0A
-from django.core.context_processors import csrf%0A
from
|
f181ef90e1a7a8e1c5676a4ffaf50ee8469305eb
|
Tweak armdecomp3.py
|
armdecomp3.py
|
armdecomp3.py
|
#!/usr/bin/env python3
import sys
from sys import argv, stdout
from os import SEEK_SET, SEEK_CUR, SEEK_END
from errno import EPIPE
from struct import pack, unpack
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress(indata):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def readshort():
# big-endian
a = next(it)
b = next(it)
return (a << 8) | b
def copybyte():
data.append(next(it))
header = bytes(next(it) for _ in range(4))
assert header[0] == 0x10
decompressed_size, = unpack("<L", header[1:] + b"\x00")
while len(data) < decompressed_size:
b = readbyte()
if b == 0:
# dumb optimization
for _ in range(8):
copybyte()
continue
flags = bits(b)
for flag in flags:
if flag == 0:
try:
copybyte()
except StopIteration:
return data
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
# +3 for overlays
# +1 for files
disp = (sh & 0xfff) + 3
for _ in range(count):
writebyte(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
assert len(data) == decompressed_size
#extra = f.read()
#assert len(extra) == 0, repr(extra)
return data
def main(args):
f = open(args[0], "rb")
# grab the underlying binary stream
stdout = sys.stdout.detach()
# the compression header is at the end of the file
f.seek(-8, SEEK_END)
header = f.read(8)
# decompression goes backwards.
# end < here < start
# end_delta == here - decompression end address
# start_delta == decompression start address - here
end_delta, start_delta = unpack("<LL", header)
filelen = f.tell()
padding = end_delta >> 0x18
end_delta &= 0xFFFFFF
decompressed_size = start_delta + end_delta
f.seek(filelen - end_delta, SEEK_SET)
header = b'\x10' + pack("<L", decompressed_size)[:3]
data = bytearray()
data.extend(f.read(end_delta - padding))
data.extend(header[::-1])
data.reverse()
#stdout.write(data.tostring())
uncompressed_data = decompress(data)
uncompressed_data.reverse()
f.seek(0, SEEK_SET)
# first we write up to the portion of the file which was "overwritten" by
# the decompressed data, then the decompressed data itself.
# i wonder if it's possible for decompression to overtake the compressed
# data, so that the decompression code is reading its own output...
try:
stdout.write(f.read(filelen - end_delta))
stdout.write(uncompressed_data)
except IOError as e:
if e.errno == EPIPE:
# don't complain about a broken pipe
pass
else:
raise
def main2(args):
f = open(args[0], "rb")
data = f.read()
stdout = sys.stdout.detach()
stdout.write(decompress(data))
if __name__ == '__main__':
main(argv[1:])
#main2(argv[1:])
|
Python
| 0
|
@@ -419,24 +419,43 @@
press(indata
+, decompressed_size
):%0A %22%22%22De
@@ -813,145 +813,8 @@
))%0A%0A
- header = bytes(next(it) for _ in range(4))%0A assert header%5B0%5D == 0x10%0A decompressed_size, = unpack(%22%3CL%22, header%5B1:%5D + b%22%5Cx00%22)%0A%0A
@@ -2366,65 +2366,8 @@
T)%0A%0A
- header = b'%5Cx10' + pack(%22%3CL%22, decompressed_size)%5B:3%5D%0A
@@ -2385,16 +2385,16 @@
array()%0A
+
data
@@ -2434,38 +2434,8 @@
g))%0A
- data.extend(header%5B::-1%5D)%0A
@@ -2482,24 +2482,24 @@
ostring())%0A%0A
-
uncompre
@@ -2521,24 +2521,43 @@
ompress(data
+, decompressed_size
)%0A uncomp
|
a41c66089005388362a51edb5967f32b3a42c4ac
|
added error message in case input FMx is empty
|
commands/feature_matrix_construction/main/addDiscreteFeat.py
|
commands/feature_matrix_construction/main/addDiscreteFeat.py
|
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
import sys
import addIndicators
import miscIO
import miscTCGA
import tsvIO
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
NA_VALUE = -999999
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def getFeatureList(featFile):
featDict = {}
try:
fh = file(featFile)
for aLine in fh:
aLine = aLine.strip()
tokenList = aLine.split('\t')
if (len(tokenList) == 2):
featDict[tokenList[0]] = int(tokenList[1])
else:
featDict[tokenList[0]] = 4
except:
doNothing = 1
return (featDict)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def addDiscreteFeature(dataD, featName, numLevels):
print " "
print " in addDiscreteFeature ... "
# the feature matrix has thousands of features x hundreds of patients
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
numRow = len(rowLabels)
numCol = len(colLabels)
dataMatrix = dataD['dataMatrix']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
# find the specific feature ...
kFind = []
for iRow in range(numRow):
if (rowLabels[iRow].find(featName) >= 0):
kFind += [iRow]
if (len(kFind) == 0):
print " ERROR ... did not find this feature <%s> " % featName
print " "
return (dataD)
elif (len(kFind) > 1):
print " ERROR ... found too many similar features "
print featName
print kFind
for kk in kFind:
print rowLabels[kk]
print " "
return (dataD)
iRow = kFind[0]
origRowLabel = rowLabels[iRow]
if (not origRowLabel.startswith("N:")):
print " ERROR ... does not make sense to discretized a non-continuous feature "
print origRowLabel
print " "
return (dataD)
print " --> discretizing this feature : <%s> " % origRowLabel
curVec = []
for iCol in range(numCol):
curVal = dataMatrix[iRow][iCol]
if (curVal != NA_VALUE):
curVec += [curVal]
curVec.sort()
# print curVec[:5]
# print curVec[-5:]
# print " "
nPos = len(curVec)
# print " "
# print nPos
numCat = numLevels
print " "
print " category thresholds, counts, etc: "
threshVals = [0] * numCat
for iCat in range(numCat - 1):
iPos = int(float(nPos) * float(iCat + 1) / float(numCat) - 0.5)
if (iPos < 0):
iPos = 0
if (iPos >= nPos):
iPos = nPos - 1
threshVals[iCat] = curVec[iPos]
print iCat, iPos, threshVals[iCat]
threshVals[-1] = curVec[-1] + 0.1
print numCat - 1, nPos, threshVals[-1]
# in case we have a bunch of identical values, make sure that the
# threshold values are all unique and then use that unique list (uTV)
uTV = []
for iTV in threshVals:
if (iTV not in uTV):
uTV += [iTV]
print " --> uTV : ", uTV
catCounts = {}
newVec = ["NA"] * numCol
for iCol in range(numCol):
curVal = dataMatrix[iRow][iCol]
if (curVal != NA_VALUE):
iVal = 0
while (uTV[iVal] < curVal):
iVal += 1
newVec[iCol] = "C%d" % iVal
try:
catCounts[newVec[iCol]] += 1
except:
catCounts[newVec[iCol]] = 1
print " "
print " catCounts : ", len(catCounts), catCounts
print " "
if (origRowLabel[-1] == ":"):
newName = "C:" + origRowLabel[2:] + "cat"
else:
newName = "C:" + origRowLabel[2:] + "_cat"
# now we have the new data ...
print " "
print " adding new feature: ", newName
print " "
newRowLabels = rowLabels + [newName]
print len(rowLabels), len(newRowLabels)
newMatrix = dataMatrix + [newVec]
print len(dataMatrix), len(newMatrix)
newD = {}
newD['rowLabels'] = newRowLabels
newD['colLabels'] = colLabels
newD['dataType'] = dataD['dataType']
newD['dataMatrix'] = newMatrix
return (newD)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
if (len(sys.argv) == 4):
inFile = sys.argv[1]
outFile = sys.argv[2]
featFile = sys.argv[3]
else:
print " "
print " Usage: %s <input TSV file> <output TSV file> <featList file> "
print " "
sys.exit(-1)
print " "
print " Running : %s %s %s %s " % (sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3])
print " "
print " "
# first read in the feature names list
featDict = getFeatureList(featFile)
# now read in the input feature matrix ...
dataD = tsvIO.readTSV(inFile)
# loop over specified features and add a discretized version of each one
for aFeat in featDict.keys():
numLevels = featDict[aFeat]
print " "
print " "
print " **************************************** "
print aFeat, numLevels
print " **************************************** "
print " "
dataD = addDiscreteFeature(dataD, aFeat, numLevels)
rowLabels = dataD['rowLabels']
tmpMatrix = dataD['dataMatrix']
(rowLabels, tmpMatrix) = addIndicators.addIndicators4oneFeat(
rowLabels[-1], rowLabels, tmpMatrix)
# and write the matrix back out
tsvIO.writeTSV_dataMatrix(dataD, 0, 0, outFile)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
|
Python
| 0.999045
|
@@ -5063,24 +5063,132 @@
SV(inFile)%0A%0A
+ if (len(tmpD) == 0):%0A print %22 in addDiscreteFeat ... no input data ... nothing to do here ... %22%0A%0A
# loop o
|
3fce35932346eaf74db97a5325518b2d13575b4f
|
debug and kee ids as list
|
xmlrpc_operation_invoice/operation.py
|
xmlrpc_operation_invoice/operation.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import xmlrpclib
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class XmlrpcOperation(orm.Model):
''' Model name: XmlrpcOperation
'''
_inherit = 'xmlrpc.operation'
# ------------------
# Override function:
# ------------------
def execute_operation(self, cr, uid, operation, parameter, context=None):
''' Virtual function that will be overrided
operation: in this module is 'invoice'
context: xmlrpc context dict
'''
try:
if operation != 'invoice':
# Super call for other cases:
return super(XmlrpcOperation, self).execute_operation(
cr, uid, operation, parameter, context=context)
server_pool = self.pool.get('xmlrpc.server')
xmlrpc_server = server_pool.get_xmlrpc_server(
cr, uid, context=context)
res = xmlrpc_server.execute('invoice', parameter)
if res.get('error', False):
_logger.error(res['error'])
# TODO raise
# TODO confirm export!
except:
_logger.error(sys.exc_info())
raise osv.except_osv(
_('Connect error:'), _('XMLRPC connecting server'))
return res
class AccountInvoice(orm.Model):
''' Add export function to invoice obj
'''
_inherit = 'account.invoice'
def dummy_button(self, cr, uid, ids, context=None):
''' For show an icon as a button
'''
return True
def xmlrpc_export_invoice(self, cr, uid, ids, context=None):
''' Export current invoice
# TODO manage list of invoices?
'''
def clean_description(value):
''' Remove \n and \t and return first 40 char
'''
value = value.replace('\n', ' ')
value = value.replace('\t', ' ')
return value[:40]
assert len(ids) == 1, 'No multi export for now' # TODO remove!!!
# TODO use with validate trigger for get the number
parameter = {}
# Generate string for export file:
mask = '%s%s%s%s' % ( #3 block for readability:
'%-2s%-2s%-6s%-8s%-2s%-8s%-8s', #header
'%-1s%-16s%-60s%-2s%10.2f%10.3f%-5s%-5s%-50s%-10s%-8s', #row
'%-3s', #foot
'\r\n', # Win CR
)
parameter['input_file_string'] = ''
for invoice in self.browse(cr, uid, ids, context=context):
if not invoice.number:
raise osv.except_osv(
_('XMLRPC sync error'),
_('Invoice must be validated!'))
for line in invoice.invoice_line:
parameter['input_file_string'] += self.pool.get(
'xmlrpc.server').clean_as_ascii(
mask % (
# -------------------------------------------------
# Header:
# -------------------------------------------------
# Doc (2)
invoice.journal_id.account_code,
# Serie (2)
invoice.journal_id.account_serie,
# N.(6N) # val.
int(invoice.number.split('/')[-1]),
# Date (8)
'%s%s%s' % (
invoice.date_invoice[:4],
invoice.date_invoice[5:7],
invoice.date_invoice[8:10],
),
# Transport reason (2)
invoice.transportation_reason_id.import_id or '',
# Customer code (8)
invoice.partner_id.sql_customer_code or '',
# Agent code (8)
invoice.mx_agent_id.sql_agent_code or \
invoice.mx_agent_id.sql_supplier_code or '',
# -------------------------------------------------
# Detail:
# -------------------------------------------------
# Tipo di riga 1 (D, R, T)
'R',
# Code (16)
line.product_id.default_code or '',
# Description (60)
clean_description(
line.name if line.use_text_description \
else line.product_id.name),
# UOM (2)
line.product_id.uom_id.account_ref or '',
# Q. 10N (2 dec.)
line.quantity,
# Price 10N (3 dec.)
line.price_unit,
# Tax (5)
line.invoice_line_tax_id[0].account_ref \
if line.invoice_line_tax_id else '',
# Provv. (5)
0,
# Discount (50)
line.multi_discount_rates or '',
# Discount numeric (10)
line.discount or '',
# Account (8)
line.account_id.account_ref or '',
# -------------------------------------------------
# Foot:
# -------------------------------------------------
# Codice Pagamento 3
invoice.payment_term.import_id \
if invoice.payment_term else '',
# TODO bank
))
res = self.pool.get('xmlrpc.operation').execute_operation(
cr, uid, 'invoice', parameter=parameter, context=context)
result_string_file = res.get('result_string_file', False)
if result_string_file:
if result_string_file.startswith('OK'):
# TODO test if number passed if for correct invoice number!
self.write(cr, uid, ids[0], {
'xmlrpc_sync': True,
}, context=context)
return True
# TODO write better error
raise osv.except_osv(
_('Sync error:'),
_('Cannot sync with accounting! (return esit not present'),
)
return False
_columns = {
'xmlrpc_sync': fields.boolean('XMLRPC syncronized'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -8002,16 +8002,60 @@
('OK'):%0A
+ import pdb; pdb.set_trace()%0A
@@ -8161,19 +8161,16 @@
uid, ids
-%5B0%5D
, %7B%0A
@@ -8580,25 +8580,16 @@
%7D
-%0A
%0A%0A# vim:
|
69d9a36eb9d4536d9999395016759ec0ba23ad82
|
Fix playlist preview function
|
zou/app/services/playlists_service.py
|
zou/app/services/playlists_service.py
|
from zou.app.models.playlist import Playlist
from zou.app.models.preview_file import PreviewFile
from zou.app.utils import fields
from zou.app.services import shots_service, tasks_service
from zou.app.services.exception import PlaylistNotFoundException
def all_playlists_for_project(project_id):
return fields.serialize_value(Playlist.get_all_by(project_id=project_id))
def get_playlist_with_preview_file_revisions(playlist_id):
playlist = Playlist.get(playlist_id)
if playlist is None:
raise PlaylistNotFoundException()
playlist_dict = playlist.serialize()
if playlist_dict["shots"] is None:
playlist_dict["shots"] = []
for shot in playlist_dict["shots"]:
shot["preview_files"] = shots_service.get_preview_files_for_shot(
shot["shot_id"]
)
return playlist_dict
def get_preview_files_for_shot(shot_id):
tasks = tasks_service.get_tasks_for_shot(shot_id)
previews = {}
for task in tasks:
preview_files = PreviewFile.query \
.filter_by(task_id=task["id"]) \
.order_by(PreviewFile.revision.desc()) \
.all()
task_type_id = task["task_type_id"]
if len(preview_files) > 0:
previews[task_type_id] = [
{
"id": str(preview_file.id),
"revision": preview_file.revision
}
for preview_file in preview_files
] # Do not add too much field to avoid building too big responses
return previews
|
Python
| 0.000019
|
@@ -737,22 +737,8 @@
%5D =
-shots_service.
get_
|
eb0dda4cc79ac9383038095f648ef8c16e374611
|
Fix bug
|
storjreports/send_storj_reports.py
|
storjreports/send_storj_reports.py
|
#!/usr/bin/env python3
import requests
import json
import os
import re
import uuid
import subprocess
import pkg_resources
import multiprocessing
from os import scandir
SERVER_UUID = None
STORJSHARE_PATH = None
STORJ_WINDOWS_VERSION = '0.2.0'
def examine_configs(path, windows=False):
if windows == False:
version = pkg_resources.get_distribution("storjdash").version
else:
version = STORJ_WINDOWS_VERSION
#version = '0.6.3'
if windows == False:
storj_node_pairs = examine_storjstatus()
else:
storj_node_pairs = None
#storj_node_pairs = None
#print(storj_node_pairs)
report_uuid = str(uuid.uuid4())
potential_config_files = os.scandir(path)
mp_args = []
for config_file in potential_config_files:
if config_file.is_file():
mp_args.append([config_file.name, config_file.path, report_uuid, storj_node_pairs, version, windows])
pool = multiprocessing.Pool()
results = pool.starmap(send_report, mp_args)
print(results)
print('All reports sent')
def get_size_of_path(path):
try:
details = scandir(path)
except FileNotFoundError:
print('Path does not exist: ' + path)
return 0
except PermissionError:
print('Access Denied' + path)
return 0
size = 0
for item in details:
if item.is_dir():
try:
size += get_size_of_path(item.path)
except PermissionError:
pass
else:
try:
size += item.stat().st_size
except FileNotFoundError:
pass
except PermissionError:
pass
return size
def examine_storjstatus():
env = os.environ
env['PATH'] = os.environ.get('PATH') + ':' + STORJSHARE_PATH
proc = subprocess.Popen(['storjshare', 'status'], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
results = proc.communicate()
#print(results)
cells = results[0].split(b'\xe2')
#print(cells)
node_pairs = {}
for index, cell in enumerate(cells):
if '/' in str(cell):
#print(cells[index-15:index+1])
node_id = None
if 'running' in str(cells[index-12]):
node_id = cells[index-13].split(b'\x94\x82')[1]
else:
node_id = cells[index-12].split(b'\x94\x82')[1]
node_path = cells[index].split(b'\x86\x92')[1]
node_id = node_id.strip().decode('utf-8')
node_path = node_path.strip().decode('utf-8')
node_pairs[node_path] = node_id
return node_pairs
def send_report(config_file_name, config_file_path, report_uuid, storj_node_pairs, version, windows):
node_name = config_file_name.split('.')[0]
try:
open_config_file = open(config_file_path, 'r', encoding='utf-8')
config_contents = open_config_file.read()
except UnicodeDecodeError:
try:
open_config_file = open(config_file_path, 'r', encoding='latin-1')
config_contents = open_config_file.read()
except UnicodeDecodeError:
try:
open_config_file = open(config_file_path, 'r')
config_contents = open_config_file.read()
except UnicodeDecodeError:
print('Unable to read config file: ' + config_file_path)
return
config_contents = re.sub(r'\\\n', '', config_contents)
regexed_config = ''
for line in config_contents.split('\n'):
if 'https' not in line:
regexed_config += (re.sub(r'//.*', '', line) + '\n')
#print(regexed_config)
try:
json_config = json.loads(regexed_config)
except json.JSONDecodeError:
try:
regexed_config = re.sub(r'https.*\n', '"', regexed_config)
json_config = json.loads(regexed_config)
except json.JSONDecodeError:
print('Unable to decode JSON file: ' + config_file.name)
return False
try:
storage_path = json_config['storagePath']
capacity_line = json_config['storageAllocation']
except KeyError:
print('Missing Keys in Config File')
return False
current_size = get_size_of_path(storage_path)
if type(capacity_line) != int:
if 'GB' in capacity_line:
capacity_gb = float(capacity_line.split('GB')[0])
capacity = capacity_gb * 1000 * 1000000
elif 'TB' in capacity_line:
capacity_gb = float(capacity_line.split('TB')[0])
capacity = float(capacity_gb * 1000 * 1000 * 1000000)
else:
capacity = float(capacity_line.split('B')[0])
else:
capacity = float(capacity_line)
report_json = {
'server_uuid': SERVER_UUID,
'report_uuid': report_uuid,
'node_name': node_name,
'current_size': current_size,
'node_capacity': capacity,
'version': version
}
if windows is False:
if storage_path in storj_node_pairs.keys():
report_json['storj_node_id'] = storj_node_pairs[storage_path]
else:
report_json['storj_node_id'] = node_name
print('Sending report for node ' + node_name)
print(report_json)
if windows is True:
try:
import servicemanager
servicemanager.LogInfoMsg('Sending report for node' + node_name)
servicemanager.LogInfoMsg(str(report_json))
except ImportError:
pass
requests.post('https://www.storjdash.com/report', json=report_json)
return report_json
def main():
global SERVER_UUID
global STORJSHARE_PATH
try:
settings_file = open('/etc/storjdash.json', 'r')
settings_contents = settings_file.read()
try:
settings = json.loads(settings_contents)
SERVER_UUID = settings['server_uuid']
STORJSHARE_PATH = settings['storjshare_path']
examine_configs(settings['configs_directory'])
except KeyError:
print('Invalid config file. Exiting.')
except json.JSONDecodeError:
print('Corrupted config file. Exiting.')
except FileNotFoundError:
print('Settings File Not Found. Exiting.')
exit(1)
def windows_main():
global SERVER_UUID
try:
import winreg
local_machine_reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
storj_dash_settings = winreg.OpenKey(local_machine_reg, 'Software\StorJDash\StorJDashClient\Settings')
try:
SERVER_UUID = winreg.QueryValueEx(storj_dash_settings, 'serverID')[0]
configs_directory = winreg.QueryValueEx(storj_dash_settings, 'configPath')[0]
print(SERVER_UUID)
print(configs_directory)
examine_configs(configs_directory, windows=True)
except FileNotFoundError:
try:
import servicemanager
servicemanager.LogInfoMsg('Registry Keys Missing. Server not properly setup.')
exit(1)
except ImportError:
exit(1)
except ImportError:
print('Unable to import winreg. Are you on the right OS?')
exit(1)
|
Python
| 0.000001
|
@@ -3967,25 +3967,25 @@
config_file
-.
+_
name)%0A
|
75f28330cd5cf0eea2ec99d8c3f9bf53de18d46c
|
correct typo
|
aot/config.py
|
aot/config.py
|
import logging
import toml
from os.path import exists
class Config:
CONF_FILE_TEMPLATE = 'config/config.{type}.toml'
def __init__(self):
self._config = None
def __getitem__(self, key):
if self._config is None:
raise RuntimeError(
'Configuration is not loaded. '
'Call load_config(type) before trying to use the coniguration',
)
else:
return self._config[key]
def load_config(self, type, version='latest'):
config_path = self.CONF_FILE_TEMPLATE.format(type=type)
if type == 'dev' and not exists(config_path):
docker_config_file = self.CONF_FILE_TEMPLATE.format(type='docker')
logging.info(f'Note: {config_path} not found, using {docker_config_file}')
config_path = docker_config_file
with open(config_path, 'r') as config_file:
self._config = toml.load(config_file)
self._set_version_in_socket_name('api', version)
self._set_version_in_socket_name('cache', version)
def _set_version_in_socket_name(self, section_name, version):
socket = self._config[section_name].get('socket', None)
if socket:
socket = socket.format(version=version)
self._config[section_name]['socket'] = socket
config = Config()
|
Python
| 0.000888
|
@@ -384,16 +384,17 @@
the con
+f
iguratio
|
f2b49f524319cc6df2f6fcaabff114cc9156faf7
|
make 'urls' to be consistent.
|
OIPA/api/dataset/serializers.py
|
OIPA/api/dataset/serializers.py
|
from django.urls import reverse
from rest_framework.serializers import (
HyperlinkedIdentityField, HyperlinkedRelatedField, ModelSerializer,
SerializerMethodField
)
from api.generics.serializers import DynamicFieldsModelSerializer
from iati.models import Activity
from iati_synchroniser.models import Dataset, DatasetNote, Publisher
class DatasetNoteSerializer(ModelSerializer):
class Meta:
model = DatasetNote
fields = (
'model',
'iati_identifier',
'exception_type',
'model',
'field',
'message',
'line_number',
'variable')
class SimplePublisherSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='publishers:publisher-detail')
class Meta:
model = Publisher
fields = (
'id',
'url',
'publisher_iati_id',
'display_name',
'name')
class SimpleDatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = HyperlinkedRelatedField(
view_name='publishers:publisher-detail',
read_only=True)
type = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'type',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'iati_version',
'added_manually',
)
def get_type(self, obj):
return obj.get_filetype_display()
class DatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = SimplePublisherSerializer()
filetype = SerializerMethodField()
activities = SerializerMethodField()
activity_count = SerializerMethodField()
notes = HyperlinkedIdentityField(
view_name='datasets:dataset-notes',)
DatasetNoteSerializer(many=True, source="datasetnote_set")
internal_url = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'activities',
'activity_count',
'activities_count_in_xml',
'activities_count_in_database',
'date_created',
'date_updated',
'last_found_in_registry',
'iati_version',
'sha1',
'note_count',
'notes',
'added_manually',
'is_parsed',
'export_in_progress',
'parse_in_progress',
'internal_url'
)
def get_filetype(self, obj):
return obj.get_filetype_display()
def get_activities(self, obj):
request = self.context.get('request')
url = request.build_absolute_uri(reverse('activities:activity-list'))
return url + '?dataset=' + str(obj.id)
def get_activity_count(self, obj):
return Activity.objects.filter(dataset=obj.id).count()
def get_internal_url(self, obj):
request = self.context.get('request')
# Get internal url from the XML file in the local static folder
internal_url = obj.get_internal_url()
if internal_url:
return request.build_absolute_uri(internal_url)
return None
|
Python
| 0.000002
|
@@ -3109,43 +3109,257 @@
re
-turn url + '?dataset=' + str(obj.id
+quest_format = self.context.get('request').query_params.get('format')%0A return url + '?dataset=' + str(obj.id) + '&format=%7B' %5C%0A 'request_format%7D'.format(%0A request_format=request_format
)%0A%0A
|
fa6c7b32284bc4159e95b7bc339dab7517b2c255
|
add sql example
|
client/ReadAir.py
|
client/ReadAir.py
|
# -*- coding: utf-8 -*-
import serial, time, MySQLdb, re
from socketIO_client import SocketIO, LoggingNamespace
# open a mysql connection
conn=MySQLdb.connect(host="localhost",user="airnow",passwd="password",db="airnow",charset="utf8")
''' SQL to create table:
CREATE TABLE IF NOT EXISTS `air_logs` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`pm25` float NOT NULL,
`aqi` int(11) NOT NULL,
`time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00'
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
'''
sql = "INSERT INTO air_logs(`pm25`,`aqi`,`time`) VALUES(%s,%s,NOW())"
t = serial.Serial("com4", 2400) # serial port and baudrate
i = 0
with SocketIO('localhost', 8000, LoggingNamespace) as socketIO: # connect socket.io server
while True:
i = i + 1
str = t.readline() # read from serial port
socketIO.emit('airnow', str) # raise event to socket.io server
# record data to mysql
if i == 30: # about 30 seconds insert 1 record to database
i = 0 # reset counter
cursor = conn.cursor()
vals = re.split('[:; ]', str) # the str gotten from serial port is: "PM2.5:11.53; AQI:15;"
param = (vals[1], vals[4]) # put PM2.5 value and AQI value to param
n = cursor.execute(sql, param) # execute the sql query
cursor.execute("commit")
#print str #Debug
cursor.close()
# close mysql connection
conn.close()
|
Python
| 0.000243
|
@@ -114,131 +114,131 @@
ce%0A%0A
-# open a mysql connection%0Aconn=MySQLdb.connect(host=%22localhost%22,user=%22airnow%22,passwd=%22password%22,db=%22airnow%22,charset=%22utf8%22)
+%0A''' SQL to create database:%0ACREATE DATABASE IF NOT EXISTS %60airnow%60 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;%0A'''
%0A%0A''
@@ -522,16 +522,139 @@
8;%0A'''%0A%0A
+# open mysql connection%0Aconn=MySQLdb.connect(host=%22localhost%22,user=%22airnow%22,passwd=%22password%22,db=%22airnow%22,charset=%22utf8%22)%0A%0A
sql = %22I
|
2f2861f153d0ba0d088ffe95b196b4154b59ce31
|
Replace constants with literal value.
|
seqr/management/commands/check_bam_cram_paths_tests.py
|
seqr/management/commands/check_bam_cram_paths_tests.py
|
import mock
from io import BytesIO
from django.core.management import call_command
from django.test import TestCase
EXPECTED_EXCEPTION_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n'
EXPECTED_NORMAL_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n'
class CheckBamCramPathsTest(TestCase):
fixtures = ['users', '1kg_project']
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_normal_command(self, mock_validate_path):
mock_validate_path.return_value = ""
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual(EXPECTED_NORMAL_MSG, out.getvalue())
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_exception_command(self, mock_validate_path):
mock_validate_path.side_effect = Exception('Error accessing "/readviz/NA19675.cram"')
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual(EXPECTED_EXCEPTION_MSG, out.getvalue())
|
Python
| 0.000003
|
@@ -114,385 +114,8 @@
ase%0A
-EXPECTED_EXCEPTION_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing %22/readviz/NA19675.cram%22 %5Cn---- DONE ----%5CnChecked 1 samples%5Cn1 failed samples: NA19675_1%5Cn'%0AEXPECTED_NORMAL_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing %22/readviz/NA19675.cram%22 %5Cn---- DONE ----%5CnChecked 1 samples%5Cn1 failed samples: NA19675_1%5Cn'%0A
%0A%0Acl
@@ -534,28 +534,198 @@
ual(
-EXPECTED_NORMAL_MSG,
+'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing %22/readviz/NA19675.cram%22 %5Cn---- DONE ----%5CnChecked 1 samples%5Cn1 failed samples: NA19675_1%5Cn',%0A
out
@@ -1132,31 +1132,198 @@
ual(
-EXPECTED_EXCEPTION_MSG,
+'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing %22/readviz/NA19675.cram%22 %5Cn---- DONE ----%5CnChecked 1 samples%5Cn1 failed samples: NA19675_1%5Cn',%0A
out
|
66f2e9cc8085f51348c797d5a6a2b011370e4c2f
|
Edit method for pages
|
fudcon/ui/backend/views.py
|
fudcon/ui/backend/views.py
|
# -*- coding: utf-8 -*-
from flask import (Blueprint,
redirect, render_template,
url_for, flash)
from fudcon.app import is_fudcon_admin, app
from fudcon.database import db
from fudcon.modules.contents.forms import AddPage
from fudcon.modules.contents.models import Content
bp = Blueprint('admin', __name__, url_prefix='/admin')
items_per_page = app.config['ITEMS_PER_PAGE']
@bp.route('/', methods=['GET', 'POST'])
@is_fudcon_admin
def index():
""" Admin blueprint for this application
"""
return render_template('backend/index.html',
title='Administration')
@bp.route('/pages', methods=['GET','POST'])
@bp.route('pages/<int:page>', methods=['GET', 'POST'])
@is_fudcon_admin
def pages(page=1):
paginate_params = (page, items_per_page, False)
queryset = Content.query.paginate(*paginate_params)
return render_template('backend/pages.html',
title='List pages',
pages=queryset)
@bp.route('/pages/add', methods=['GET', 'POST'])
def add_page():
""" Add page to the application
"""
form = AddPage()
action = url_for('admin.add_page')
if form.validate_on_submit():
content = Content(title=form.title.data,
description=form.description.data,
content_type=form.content_type.data,
is_on_user_menu=form.is_on_user_menu.data,
tag=form.tag.data,
active=form.active.data)
db.session.add(content)
db.session.commit()
flash('Page created')
return redirect(url_for('admin.page'))
return render_template('backend/pages_actions.html',
form=form,
title='Add page',
action=action)
|
Python
| 0.000001
|
@@ -408,16 +408,17 @@
PAGE'%5D%0A%0A
+%0A
@bp.rout
@@ -669,16 +669,17 @@
=%5B'GET',
+
'POST'%5D)
@@ -836,17 +836,16 @@
eryset =
-
Content
@@ -882,13 +882,8 @@
ms)%0A
- %0A
@@ -1792,16 +1792,16 @@
m=form,%0A
-
@@ -1879,8 +1879,649 @@
action)%0A
+%0A%0A@bp.route('/pages/edit/%3Cint:page_id%3E', methods=%5B'GET', 'POST'%5D)%0A@is_fudcon_admin%0Adef edit_page(page_id):%0A query_edit_page = Content.query.filter(Content.id ==%0A page_id).first_or_404()%0A form = AddPage(obj=query_edit_page)%0A action = url_for('admin.edit_page', page_id=page_id)%0A if form.validate_on_submit():%0A form.populate_obj(query_edit_page)%0A db.session.commit()%0A flash('Page edited')%0A return redirect(url_for('admin.page'))%0A return render_template('backend/pages_actions',%0A form=form,%0A action=action)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.