commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
9358060c648c0ee71498f173dcbf6fc839ba6ff8
|
Update expected release date
|
src/penn_chime/constants.py
|
src/penn_chime/constants.py
|
"""Constants."""
from datetime import date
"""
This reflects a date from which previously-run reports will no
longer match current results, indicating when users should
re-run their reports
"""
CHANGE_DATE = date(year=2020, month=4, day=6)
VERSION = 'v1.1.3'
DATE_FORMAT = "%b, %d" # see https://strftime.org
DOCS_URL = "https://code-for-philly.gitbook.io/chime"
EPSILON = 1.0e-7
FLOAT_INPUT_MIN = 0.0001
FLOAT_INPUT_STEP = 0.1
|
Python
| 0
|
@@ -236,9 +236,9 @@
day=
-6
+8
)%0AVE
|
da6c8157e688c8c721bd66e5779ce6f550a5a7e2
|
remove useless code in PathPayment
|
stellar_sdk/operation/path_payment.py
|
stellar_sdk/operation/path_payment.py
|
import warnings
from decimal import Decimal
from typing import List, Union
from .operation import Operation
from ..asset import Asset
from ..keypair import Keypair
from ..xdr import Xdr
from ..strkey import StrKey
from .utils import check_ed25519_public_key, check_amount
class PathPayment(Operation):
"""The :class:`PathPayment` object, which represents a PathPayment
operation on Stellar's network.
Sends an amount in a specific asset to a destination account through a path
of offers. This allows the asset sent (e.g., 450 XLM) to be different from
the asset received (e.g, 6 BTC).
Threshold: Medium
:param destination: The destination account to send to.
:param send_asset: The asset to pay with.
:param send_max: The maximum amount of send_asset to send.
:param dest_asset: The asset the destination will receive.
:param dest_amount: The amount the destination receives.
:param path: A list of Asset objects to use as the path.
:param source: The source account for the payment. Defaults to the
transaction's source account.
"""
def __init__(
self,
destination: str,
send_asset: Asset,
send_max: Union[str, Decimal],
dest_asset: Asset,
dest_amount: Union[str, Decimal],
path: List[Asset],
source: str = None,
) -> None:
warnings.warn(
"Will be removed in version v2.0.0-alpha6, "
"use stellar_sdk.operation.PathPaymentStrictReceive",
DeprecationWarning,
)
super().__init__(source)
check_ed25519_public_key(destination)
check_amount(send_max)
check_amount(dest_amount)
self.destination = destination
self.send_asset = send_asset
self.send_max = send_max
self.dest_asset = dest_asset
self.dest_amount = dest_amount
self.path = path # a list of paths/assets
@classmethod
def _type_code(cls) -> int:
return -1
def _to_operation_body(self) -> Xdr.nullclass:
destination = Keypair.from_public_key(self.destination).xdr_account_id()
send_asset = self.send_asset.to_xdr_object()
dest_asset = self.dest_asset.to_xdr_object()
path = [asset.to_xdr_object() for asset in self.path]
path_payment_strict_receive_op = Xdr.types.PathPaymentStrictReceiveOp(
send_asset,
Operation.to_xdr_amount(self.send_max),
destination,
dest_asset,
Operation.to_xdr_amount(self.dest_amount),
path,
)
body = Xdr.nullclass()
body.type = Xdr.const.PATH_PAYMENT_STRICT_RECEIVE
body.pathPaymentStrictReceiveOp = path_payment_strict_receive_op
return body
@classmethod
def from_xdr_object(
cls, operation_xdr_object: Xdr.types.Operation
) -> "PathPayment":
"""Creates a :class:`PathPayment` object from an XDR Operation
object.
"""
source = Operation.get_source_from_xdr_obj(operation_xdr_object)
destination = StrKey.encode_ed25519_public_key(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destination.ed25519
)
send_asset = Asset.from_xdr_object(
operation_xdr_object.body.pathPaymentStrictReceiveOp.sendAsset
)
dest_asset = Asset.from_xdr_object(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destAsset
)
send_max = Operation.from_xdr_amount(
operation_xdr_object.body.pathPaymentStrictReceiveOp.sendMax
)
dest_amount = Operation.from_xdr_amount(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destAmount
)
path = []
if operation_xdr_object.body.pathPaymentStrictReceiveOp.path:
for x in operation_xdr_object.body.pathPaymentStrictReceiveOp.path:
path.append(Asset.from_xdr_object(x))
return cls(
source=source,
destination=destination,
send_asset=send_asset,
send_max=send_max,
dest_asset=dest_asset,
dest_amount=dest_amount,
path=path,
)
|
Python
| 0.000077
|
@@ -79,160 +79,88 @@
om .
-operation import Operation%0Afrom ..asset import Asset%0Afrom ..keypair import Keypair%0Afrom ..xdr import Xdr%0Afrom ..strkey import StrKey
+path_payment_strict_receive import PathPaymentStrictReceive
%0Afrom .
-utils
+.asset
import
chec
@@ -159,76 +159,58 @@
ort
-check_ed25519_public_key, check_amount%0A%0A%0Aclass PathPayment(Operation
+Asset%0A%0A%0Aclass PathPayment(PathPaymentStrictReceive
):%0A
@@ -443,17 +443,16 @@
nt (e.g.
-,
450 XLM
@@ -501,17 +501,17 @@
ved (e.g
-,
+.
6 BTC).
@@ -1028,30 +1028,13 @@
t__(
-%0A self,%0A
+self,
des
@@ -1047,24 +1047,16 @@
on: str,
-%0A
send_as
@@ -1066,24 +1066,16 @@
: Asset,
-%0A
send_ma
@@ -1097,24 +1097,16 @@
ecimal%5D,
-%0A
dest_as
@@ -1113,24 +1113,33 @@
set: Asset,%0A
+
dest
@@ -1167,24 +1167,16 @@
ecimal%5D,
-%0A
path: L
@@ -1186,24 +1186,16 @@
%5BAsset%5D,
-%0A
source:
@@ -1205,22 +1205,16 @@
r = None
-,%0A
) -%3E Non
@@ -1396,33 +1396,32 @@
ning,%0A )%0A
-%0A
super().
@@ -1433,2646 +1433,78 @@
t__(
-source)%0A check_ed25519_public_key(destination)%0A check_amount(send_max)%0A check_amount(dest_amount)%0A self.destination = destination%0A self.send_asset = send_asset%0A self.send_max = send_max%0A self.dest_asset = dest_asset%0A self.dest_amount = dest_amount%0A self.path = path # a list of paths/assets%0A%0A @classmethod%0A def _type_code(cls) -%3E int:%0A return -1%0A%0A def _to_operation_body(self) -%3E Xdr.nullclass:%0A destination = Keypair.from_public_key(self.destination).xdr_account_id()%0A send_asset = self.send_asset.to_xdr_object()%0A dest_asset = self.dest_asset.to_xdr_object()%0A path = %5Basset.to_xdr_object() for asset in self.path%5D%0A%0A path_payment_strict_receive_op = Xdr.types.PathPaymentStrictReceiveOp(%0A send_asset,%0A Operation.to_xdr_amount(self.send_max),%0A destination,%0A dest_asset,%0A Operation.to_xdr_amount(self.dest_amount),%0A path,%0A )%0A body = Xdr.nullclass()%0A body.type = Xdr.const.PATH_PAYMENT_STRICT_RECEIVE%0A body.pathPaymentStrictReceiveOp = path_payment_strict_receive_op%0A return body%0A%0A @classmethod%0A def from_xdr_object(%0A cls, operation_xdr_object: Xdr.types.Operation%0A ) -%3E %22PathPayment%22:%0A %22%22%22Creates a :class:%60PathPayment%60 object from an XDR Operation%0A object.%0A%0A %22%22%22%0A source = Operation.get_source_from_xdr_obj(operation_xdr_object)%0A destination = StrKey.encode_ed25519_public_key(%0A operation_xdr_object.body.pathPaymentStrictReceiveOp.destination.ed25519%0A )%0A%0A send_asset = Asset.from_xdr_object(%0A operation_xdr_object.body.pathPaymentStrictReceiveOp.sendAsset%0A )%0A dest_asset = Asset.from_xdr_object(%0A operation_xdr_object.body.pathPaymentStrictReceiveOp.destAsset%0A )%0A send_max = Operation.from_xdr_amount(%0A operation_xdr_object.body.pathPaymentStrictReceiveOp.sendMax%0A )%0A dest_amount = Operation.from_xdr_amount(%0A operation_xdr_object.body.pathPaymentStrictReceiveOp.destAmount%0A )%0A%0A path = %5B%5D%0A if operation_xdr_object.body.pathPaymentStrictReceiveOp.path:%0A for x in operation_xdr_object.body.pathPaymentStrictReceiveOp.path:%0A path.append(Asset.from_xdr_object(x))%0A%0A return cls(%0A source=source,%0A destination=destination,%0A send_asset=send_asset,%0A send_max=send_max,%0A dest_asset=dest_asset,%0A dest_amount=dest_amount,%0A path=path,%0A
+destination, send_asset, send_max, dest_asset, dest_amount, path, source
)%0A
|
f126155a832ce1af26a09353338613727e319ac2
|
Fix worker stat parting
|
worker/worker.py
|
worker/worker.py
|
import os
import os.path
import stat
import platform
import tempfile
from time import sleep
import trueskill
import zip
import backend
from compiler import *
from sandbox import *
import smtplib
from email.mime.text import MIMEText
import configparser
parser = configparser.ConfigParser()
parser.read("../halite.ini")
RUN_GAME_FILE_NAME = "runGame.sh"
HALITE_EMAIL = "halite@halite.io"
HALITE_EMAIL_PASSWORD = parser["email"]["password"]
def makePath(path):
"""Deletes anything residing at path, creates path, and chmods the directory"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
os.chmod(path, 0o777)
def sendEmail(subject, body, recipient):
print("Sending email")
msg = MIMEText(body, "html")
msg['Subject'] = subject
msg['From'] = HALITE_EMAIL
msg['To'] = recipient
s = smtplib.SMTP('smtp.gmail.com:587')
s.ehlo()
s.starttls();
s.login(HALITE_EMAIL, HALITE_EMAIL_PASSWORD)
s.sendmail(HALITE_EMAIL, [recipient], msg.as_string())
s.quit()
def compile(user, backend):
"""Downloads and compiles a bot. Posts the compiled bot files to the manager."""
print("Compiling a bot with userID %s" % (user["userID"]))
workingPath = "workingPath"
makePath(workingPath)
botPath = backend.storeBotLocally(int(user["userID"]), workingPath)
zip.unpack(botPath)
language, errors = compile_anything(workingPath)
didCompile = True if errors == None else False
if didCompile:
print("Bot did compile")
zip.zipFolder(workingPath, os.path.join(workingPath, user["userID"]+".zip"))
backend.storeBotRemotely(int(user["userID"]), os.path.join(workingPath, user["userID"]+".zip"))
else:
print("Bot did not compile")
print(str(errors))
sendEmail("Halite Bot Compilation Error", "<h2>The bot that you recently submitted to the Halite competition would not compile on our servers.</h2> Our autocompile script thought that your bot was written in \""+language+".\" Here is a description of the error:<br><pre><code>"+"<br>".join(errors)+"</code></pre>", user["email"])
backend.compileResult(int(user["userID"]), didCompile, language)
shutil.rmtree(workingPath)
def runGame(width, height, users, backend):
"""Downloads compiled bots, runs a game, and posts the results of the game"""
print("Running game with width %d, height %d, and users %s" % (width, height, str(users)))
# Download players to current directory
for user in users:
userDir = str(user["userID"])
if os.path.isdir(userDir):
shutil.rmtree(userDir)
os.mkdir(userDir)
zip.unpack(backend.storeBotLocally(user["userID"], userDir))
# Run game within sandbox
runGameCommand = " ".join(["./"+RUN_GAME_FILE_NAME, str(width), str(height), users[0]["userID"], users[1]["userID"]])
print("Run game command: " + runGameCommand)
print("Game output:")
sandbox = Sandbox(os.getcwd())
sandbox.start("sh -c '"+runGameCommand+"'")
lines = []
while True:
line = sandbox.read_line(200)
if line == None:
break
print(line)
lines.append(line)
replayPath = lines[0]
# Get player ranks and scores by parsing shellOutput
for line in range(len(lines)-len(users), len(lines)):
components = line.split(" ")
playerTag = int(components[0])
users[playerTag-1]["playerTag"] = playerTag
users[playerTag-1]["rank"] = int(components[1])
users[playerTag-1]["territoryAverage"] = float(components[2])
users[playerTag-1]["strengthAverage"] = float(components[3])
users[playerTag-1]["productionAverage"] = float(components[4])
users[playerTag-1]["stillPercentage"] = float(components[5])
users[playerTag-1]["allianceAverage"] = float(components[6])
users[playerTag-1]["turnTimeAverage"] = float(components[7])
# Update trueskill mu and sigma values
teams = [[trueskill.Rating(mu=float(user['mu']), sigma=float(user['sigma']))] for user in users]
newRatings = trueskill.rate(teams)
for a in range(len(newRatings)):
users[a]['mu'] = newRatings[a][0].mu
users[a]['sigma'] = newRatings[a][0].sigma
backend.gameResult(users, replayPath)
os.remove(replayPath)
if __name__ == "__main__":
print("Starting up worker...")
while True:
task = backend.getTask()
if task != None:
print("Got new task: " + str(task))
if task["type"] == "compile":
compile(task["user"], backend)
elif task["type"] == "game":
runGame(int(task["width"]), int(task["height"]), task["users"], backend)
else:
print("Unknown task")
else:
print("No task available. Sleeping for 2 seconds")
sleep(2)
|
Python
| 0.018142
|
@@ -3054,16 +3054,21 @@
for line
+Index
in rang
@@ -3124,16 +3124,28 @@
s = line
+s%5BlineIndex%5D
.split(%22
|
0b6f3e85b5aa427125d6b4e8e4ee8223825c5c51
|
add more test cases
|
numpy/random/tests/test_random.py
|
numpy/random/tests/test_random.py
|
from numpy.testing import *
from numpy import random
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
"""Make sure generated random variables are in [-pi, pi].
Regression test for ticket #986.
"""
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu,1,50)
assert np.all(r > -np.pi) and np.all(r <= np.pi)
def test_hypergeometric_range(self) :
"""Test for ticket #921"""
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
def test_logseries_convergence(self) :
"""Test for ticket #923"""
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert -5 <= random.randint(-5,-1) < -1
x = random.randint(-5,-1,5)
assert np.all(-5 <= x)
assert np.all(x < -1)
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert np.all(old == new)
def test_gaussian_reset(self):
""" Make sure the cached every-other-Gaussian is reset.
"""
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert np.all(old == new)
def test_gaussian_reset_in_media_res(self):
""" When the state is saved with a cached Gaussian, make sure the cached
Gaussian is restored.
"""
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert np.all(old == new)
def test_backwards_compatibility(self):
""" Make sure we can accept old state tuples that do not have the cached
Gaussian value.
"""
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert np.all(x1 == x2)
assert np.all(x1 == x3)
def test_negative_binomial(self):
""" Ensure that the negative binomial results take floating point
arguments without truncation.
"""
self.prng.negative_binomial(0.5, 0.5)
class Test_MTRand(TestCase):
def test_binomial(self):
n, p, N = 10, 0.5, 1000
sample = np.random.binomial(n, p, N)
avg = 1.0 * sum(sample) / N / n
self.assert_(0.45 < avg < 0.55)
self.assertRaises(ValueError, np.random.binomial, -1, 0.5)
self.assertRaises(ValueError, np.random.binomial, 10, -0.1)
self.assertRaises(ValueError, np.random.binomial, 10, 1.1)
def test_binomial_array(self):
# See if first argument can also be an array
a = np.array([10, 20, 5])
b = np.random.binomial(a, 0.4)
self.assert_(b.shape, (3,))
self.assertRaises(ValueError, np.random.binomial, a, -0.1)
a = np.array([10, -20, 5])
self.assertRaises(ValueError, np.random.binomial, a, 0.4)
if __name__ == "__main__":
run_module_suite()
|
Python
| 0
|
@@ -3924,33 +3924,33 @@
n array%0A
-a
+n
= np.array(%5B10,
@@ -3950,32 +3950,71 @@
ay(%5B10, 20, 5%5D)%0A
+ p = np.array(%5B0.4, 0.7, 0.2%5D)%0A%0A
b = np.r
@@ -4028,17 +4028,17 @@
inomial(
-a
+n
, 0.4)%0A
@@ -4069,24 +4069,172 @@
ape, (3,))%0A%0A
+ b = np.random.binomial(5, p)%0A self.assert_(b.shape, (3,))%0A%0A b = np.random.binomial(n, p)%0A self.assert_(b.shape, (3,))%0A%0A
self
@@ -4279,17 +4279,17 @@
nomial,
-a
+n
, -0.1)%0A
@@ -4296,17 +4296,17 @@
-a
+n
= np.ar
@@ -4381,17 +4381,17 @@
nomial,
-a
+n
, 0.4)%0A%0A
|
2de30c0acdbcc2560ee7c9c472df956441cb2bab
|
use better filterType
|
nvchecker_source/vsmarketplace.py
|
nvchecker_source/vsmarketplace.py
|
# MIT licensed
# Copyright (c) 2013-2021 Th3Whit3Wolf <the.white.wolf.is.1337@gmail.com>, et al.
from nvchecker.api import (
VersionResult, Entry, AsyncCache, KeyManager,
TemporaryError, session, GetVersionError,
)
API_URL = 'https://marketplace.visualstudio.com/_apis/public/gallery/extensionquery'
HEADERS = {
'Accept': 'application/json;api-version=6.1-preview.1',
'Content-Type': 'application/json'
}
async def get_version(name: str, conf: Entry, *, cache: AsyncCache, **kwargs):
name = conf.get('vsmarketplace') or name
q = {
'filters': [
{
'criteria': [
{
'filterType': 8,
'value': 'Microsoft.VisualStudio.Code'
},
{
'filterType': 10,
'value': name
},
{
'filterType': 12,
'value': '4096'
}
],
'pageNumber': 1,
'pageSize': 2,
'sortBy': 0,
'sortOrder': 0
}
],
'assetTypes': [],
'flags': 946
}
res = await session.post(
API_URL,
headers = HEADERS,
json = q,
)
j = res.json()
version = j['results'][0]['extensions'][0]['versions'][0]['version']
return version
|
Python
| 0
|
@@ -735,10 +735,9 @@
e':
-10
+7
,%0A
@@ -1196,16 +1196,17 @@
return version
+%0A
|
355372ff51a84c0a6d7d86c0ef1fb12def341436
|
Add the score to Engine.chat return values
|
invada/engine.py
|
invada/engine.py
|
# -*- coding: utf-8 -*-
class Engine:
def __init__(self,
response_pairs,
knowledge={}):
self.response_pairs = response_pairs
self.knowledge = knowledge
def chat(self, user_utterance, context):
best_score = 0
best_response_pair = None
best_captured = {}
for response_pair in self.response_pairs:
captured = response_pair.match(user_utterance, self.knowledge)
if captured is None:
continue
score = response_pair.score(captured, context, self.knowledge)
if best_score < score:
best_score, best_response_pair, best_captured = score, response_pair, captured
return best_response_pair.generate(best_captured, context, self.knowledge)
|
Python
| 0.000033
|
@@ -738,12 +738,29 @@
re
-turn
+sponse, new_context =
bes
@@ -804,28 +804,77 @@
d, context, self.knowledge)%0A
+ return response, new_context, best_score%0A
|
ef29e402c58751a938cb11cee480ac4f4e31aef5
|
Add warning
|
invoke/config.py
|
invoke/config.py
|
from .vendor.etcaetera.config import Config as EtcConfig
from .vendor.etcaetera.adapter import File
class Config(object):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`.
Lightly wraps ``etcaetera.config.Config``, allowing for another level of
configurability (re: which files are loaded and in what order) as well as
convenient access to configuration values, which may be accessed using
dict syntax::
config['foo']
or attribute syntax::
config.foo
Nesting works the same way - dict config values are transparently turned
into objects which honor both the dictionary protocol and the
attribute-access method::
config['foo']['bar']
config.foo.bar
"""
def __init__(self):
"""
Creates a new config object, but does not load any configuration data.
.. note::
To load configuration data, call `~.Config.load` after
initialization.
For convenience, keyword arguments not listed below will be interpreted
as top-level configuration keys, so one may say e.g.::
c = Config(my_setting='my_value')
print(c['my_setting']) # => 'my_value'
:param str global_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
The final result (including extension) will be turned into a fully
qualified file path and have system-appropriate expansion performed
(tildes and so forth).
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``global_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
"""
pass
def load(self):
"""
Performs loading and merging of all config sources.
See :ref:`config-hierarchy` for details on load order and file
locations.
"""
pass
|
Python
| 0.000002
|
@@ -672,16 +672,243 @@
ig.foo%0A%0A
+ .. warning::%0A Any %22real%22 attributes (methods, etc) on %60Config%60 take precedence over%0A settings values - so if you e.g. have a top level setting named%0A %60%60load%60%60, you *must* use dict syntax to access it.%0A%0A
Nest
|
e7154e767f3c82bc06e36aad4618a7518912144f
|
Decode the output text into a str
|
invoke/runner.py
|
invoke/runner.py
|
import os
import pty
import select
import sys
from .vendor import pexpect
from .monkey import Popen, PIPE
from .exceptions import Failure
class Result(object):
"""
A container for information about the result of a command execution.
`Result` instances have the following attributes:
* ``stdout``: The subprocess' standard output, as a multiline string.
* ``stderr``: Same as ``stdout`` but containing standard error (unless
the process was invoked via a pty; see `run`.)
* ``exited``: An integer representing the subprocess' exit/return code.
* ``return_code``: An alias to ``exited``.
* ``ok``: A boolean equivalent to ``exited == 0``.
* ``failed``: The inverse of ``ok``: ``True`` if the program exited with a
nonzero return code.
* ``pty``: A boolean describing whether the subprocess was invoked with a
pty or not; see `run`.
* ``pty_exception``: Typically ``None``, but may be an exception object if
``pty`` was ``True`` and ``run()`` had to swallow an apparently-spurious
``OSError``. Solely for sanity checking/debugging purposes.
"""
# TODO: inherit from namedtuple instead? heh
def __init__(self, stdout, stderr, exited, pty, pty_exception=None):
self.exited = self.return_code = exited
self.stdout = stdout
self.stderr = stderr
self.pty = pty
self.pty_exception = pty_exception
def __nonzero__(self):
# Holy mismatch between name and implementation, Batman!
return self.exited == 0
def __str__(self):
ret = ["Command exited with status %s." % self.exited]
for x in ('stdout', 'stderr'):
val = getattr(self, x)
ret.append("""=== %s ===
%s
""" % (x, val.rstrip()) if val else "(no %s)" % x)
return "\n".join(ret)
@property
def ok(self):
return self.exited == 0
@property
def failed(self):
return not self.ok
def normalize_hide(val):
hide_vals = (None, 'out', 'stdout', 'err', 'stderr', 'both')
if val not in hide_vals:
raise ValueError("'hide' got %r which is not in %r" % (val, hide_vals,))
if val is None:
hide = ()
elif val is 'both':
hide = ('out', 'err')
elif val is 'stdout':
hide = ('out',)
elif val is 'stderr':
hide = ('err',)
else:
hide = (val,)
return hide
def run(command, warn=False, hide=None, pty=False):
"""
Execute ``command`` in a local subprocess, returning a `Result` object.
A `Failure` exception (which contains a reference to the `Result` that
would otherwise have been returned) is raised if the subprocess terminates
with a nonzero return code. This behavior may be disabled by setting
``warn=True``.
To disable copying the subprocess' stdout and/or stderr to the controlling
terminal, specify ``hide='out'``, ``hide='err'`` or ``hide='both'``. (The
default value is ``None``, meaning to print everything.)
.. note::
Stdout and stderr are always captured and stored in the ``Result``
object, regardless of ``hide``'s value.
By default, ``run`` connects directly to the invoked subprocess and reads
its stdout/stderr streams. Some programs will buffer differently (or even
behave differently) in this situation compared to using an actual terminal
or pty. To use a pty, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the ability
to tell stdout apart from stderr is **not possible** when ``pty=True``.
As such, all output will appear on your local stdout and be captured
into the ``stdout`` result attribute. Stderr and ``stderr`` will always
be empty when ``pty=True``.
"""
if pty:
hide = normalize_hide(hide)
out = []
def out_filter(text):
out.append(text)
if 'out' not in hide:
return text
else:
return ""
wrapped_cmd = "/bin/bash -c \"%s\"" % command
p = pexpect.spawn(wrapped_cmd)
# Ensure pexpect doesn't barf with OSError if we fall off the end of
# the child's input on some platforms (e.g. Linux).
exception = None
try:
p.interact(output_filter=out_filter)
except OSError as e:
# Only capture the OSError we expect
if "Input/output error" not in e:
raise
# Ensure it ties off the child, sets exitstatus, etc
p.close()
# Capture the exception in case it's NOT the OSError we think it
# is and folks need to debug
exception = e
result = Result(stdout="".join(out), stderr="", exited=p.exitstatus,
pty=pty, pty_exception=exception)
else:
process = Popen(command,
shell=True,
stdout=PIPE,
stderr=PIPE,
hide=normalize_hide(hide)
)
stdout, stderr = process.communicate()
result = Result(stdout=stdout, stderr=stderr,
exited=process.returncode, pty=pty)
if not (result or warn):
raise Failure(result)
return result
|
Python
| 1
|
@@ -3916,16 +3916,44 @@
end(text
+.decode(sys.stdout.encoding)
)%0A
|
aa459c2db7f1995fda486ef80c30b541ff1895d8
|
Remove unnessesaty params
|
ocds/databridge/contrib/client.py
|
ocds/databridge/contrib/client.py
|
import requests
import requests.adapters
from gevent.pool import Pool
import logging
logger = logging.getLogger(__name__)
class APIClient(object):
def __init__(self, api_key, api_host, api_version, **options):
self.base_url = "{}/api/{}".format(api_host, api_version)
self.session = requests.Session()
self.session.auth = (api_key, '')
self.session.headers = {"Accept": "applicaiton/json",
"Content-type": "application/json"}
resourse = options.get('resourse', 'tenders')
self.resourse_url = "{}/{}".format(self.base_url, resourse)
APIAdapter = requests.adapters.HTTPAdapter(max_retries=5,
pool_connections=50,
pool_maxsize=30)
self.session.mount(self.resourse_url, APIAdapter)
# retrieve cookie
self.session.head("{}/{}".format(self.base_url, 'spore'))
self.pool = Pool(10)
def get_tenders(self, params=None):
if not params:
params = {'feed': 'chages'}
resp = self.session.get(self.resourse_url, params=params)
if resp.ok:
return resp.json()
else:
resp.raise_for_status()
def get_tender(self, tender_id, params=None):
resp = self.session.get(
"{}/{}".format(self.resourse_url, tender_id), params=params
)
if resp.ok:
return resp.json()['data']
else:
resp.raise_for_status()
def fetch(self, tender_ids):
resp = self.pool.map(self.get_tender, [t['id'] for t in tender_ids])
return [r for r in resp]
def get_retreive_clients(api_key, api_host, api_version):
forward = APIClient(api_key, api_host, api_version)
backward = APIClient(api_key, api_host, api_version)
origin_cookie = forward.session.cookies
backward.session.cookies = origin_cookie
return origin_cookie, forward, backward
|
Python
| 0.000007
|
@@ -1308,29 +1308,16 @@
ender_id
-, params=None
):%0A
@@ -1400,31 +1400,16 @@
nder_id)
-, params=params
%0A
@@ -1662,16 +1662,21 @@
in resp
+ if r
%5D%0A%0A%0Adef
|
48cb3e901917c598294c5431c66efe6ed56e465a
|
set DEBUG to true
|
wsgi/settings.py
|
wsgi/settings.py
|
import os
MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST')
MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT')
MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME')
MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD')
PRIV_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem'
PUB_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
|
Python
| 0.999549
|
@@ -349,12 +349,26 @@
public.pem'%0A
+%0ADEBUG = True%0A
|
99f45d201b3513096bf8ebe7c877c836d8e6611a
|
Add logging to web client
|
clients/web/rewebclient/rewebclient.py
|
clients/web/rewebclient/rewebclient.py
|
from flask import Flask, request, render_template, flash, redirect, url_for
from reclient.client import ReClient, ReClientException
import os
DEBUG = False
SECRET_KEY = 'CHANGE ME'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('REWEBCLIENT_SETTINGS', silent=True)
app.config['RE_FRONTEND_URL'] = app.config.get('RE_FRONTEND_URL', None) or os.getenv('RE_FRONTEND_URL')
if app.config['RE_FRONTEND_URL'] is None:
raise RuntimeError("RE_FRONTEND_URL environment variable must be set and point to a reliable-email web frontend!")
client = ReClient(app.config['RE_FRONTEND_URL'])
@app.route('/', methods=['GET', 'POST'])
def index():
# We could use something like WTForms here, but I'll just keep it simple.
# I'm ignoring all kinds of i'llformed user input, and let the web frontend handle the small amount of validation
if request.method == 'POST':
try:
client.submit(
request.form.get('subject', ''),
request.form.get('body', ''),
request.form.get('to_email', ''),
request.form.get('to_name', '')
)
flash(u'Frontend returned a OK, job submitted!')
except ReClientException, ex:
flash(u'Job failed submission: %s' % ex.message)
redirect(url_for('index'))
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
Python
| 0.000001
|
@@ -135,16 +135,31 @@
mport os
+%0Aimport logging
%0A%0ADEBUG
@@ -383,11 +383,86 @@
one)
- or
+%0Aif app.config%5B'RE_FRONTEND_URL'%5D is None:%0A app.config%5B'RE_FRONTEND_URL'%5D =
os.
@@ -649,16 +649,249 @@
end!%22)%0A%0A
+# Logging%0Aif app.config.get('LOG', None) is not None:%0A file_handler = logging.FileHandler(app.config%5B'LOG'%5D)%0A file_handler.setLevel(logging.DEBUG)%0A app.logger.addHandler(file_handler)%0A app.logger.setLevel(logging.DEBUG)%0A%0A
client =
@@ -1747,8 +1747,9 @@
pp.run()
+%0A
|
bffb0c7fb099039afb444cfc641ae7b1978c59f8
|
Exit main script when no observations found
|
ircelsos/main.py
|
ircelsos/main.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 07 23:11:39 2015
@author: Joris Van den Bossche
"""
from __future__ import print_function
def main():
import argparse
parser = argparse.ArgumentParser(
prog='ircelsos',
description='Download air quality data from the SOS of IRCEL - CELINE.')
parser.add_argument('pollutant',
help='The pollutant')
parser.add_argument('--station', '-s', nargs=1,
help='Station number. If no provided, use all available'
' stations for that pollutant')
parser.add_argument('--period', '-p', type=str, nargs=2,
help='Period of the measurements given as "start stop"')
args = parser.parse_args()
from .query_ircelsos import query_ircelsos
from .sosparser import get_observations, parse_observation
print("SOS of IRCEL - CELINE")
print("Downloading ...")
pollutant = args.pollutant
if args.station:
station = args.station[0]
else:
station = None
response = query_ircelsos(pollutant, station, args.period[0],
args.period[1])
observations = get_observations(response)
if not observations:
print('No observations found')
for obs in observations:
st_info, raw_data = parse_observation(obs)
filename = '{0}_{1}.csv'.format(pollutant, st_info['name'])
print("Writing file '{}'".format(filename))
with open(filename, 'w') as f:
f.writelines(raw_data.replace(';', '\n'))
|
Python
| 0
|
@@ -1271,16 +1271,54 @@
found')
+%0A import sys%0A sys.exit()
%0A%0A fo
|
641c7da63b2d7255ed3039d5c26574faa060b333
|
Stop altering the glance API URL
|
openstack_dashboard/api/glance.py
|
openstack_dashboard/api/glance.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
import logging
import thread
from django.conf import settings
import six.moves.urllib.parse as urlparse
import glanceclient as glance_client
from horizon.utils import functions as utils
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
def glanceclient(request):
o = urlparse.urlparse(base.url_for(request, 'image'))
url = "://".join((o.scheme, o.netloc))
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('glanceclient connection created using token "%s" and url "%s"'
% (request.user.token.id, url))
return glance_client.Client('1', url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
"""
image = glanceclient(request).images.get(image_id)
if not hasattr(image, 'name'):
image.name = None
return image
def image_list_detailed(request, marker=None, filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
if len(images) > page_size:
images.pop(-1)
has_more_data = True
else:
images = list(images_iter)
return (images, has_more_data)
def image_update(request, image_id, **kwargs):
return glanceclient(request).images.update(image_id, **kwargs)
def image_create(request, **kwargs):
copy_from = None
if kwargs.get('copy_from'):
copy_from = kwargs.pop('copy_from')
image = glanceclient(request).images.create(**kwargs)
if copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from})
return image
|
Python
| 0.000011
|
@@ -925,50 +925,8 @@
ings
-%0Aimport six.moves.urllib.parse as urlparse
%0A%0Aim
@@ -1121,30 +1121,14 @@
-o = urlparse.urlparse(
+url =
base
@@ -1157,52 +1157,8 @@
ge')
-)%0A url = %22://%22.join((o.scheme, o.netloc))
%0A
|
8dcca03935514321a35f8aabe6e3367dfd8b802e
|
Version bump
|
jasy/__init__.py
|
jasy/__init__.py
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
"""
**Jasy - Web Tooling Framework**
Jasy is a powerful Python3-based tooling framework.
It makes it easy to manage heavy web projects.
Its main goal is to offer an API which could be used by developers to write their custom build/deployment scripts.
"""
__version__ = "1.0.2"
__author__ = "Sebastian Werner <info@sebastian-werner.net>"
import os.path
datadir = os.path.join(os.path.dirname(__file__), "data")
def info():
"""
Prints information about Jasy to the console.
"""
import jasy.core.Console as Console
print("Jasy %s is a powerful web tooling framework" % __version__)
print("Copyright (c) 2010-2012 Zynga Inc. %s" % Console.colorize("http://zynga.com/", "underline"))
print("Visit %s for details." % Console.colorize("https://github.com/zynga/jasy", "underline"))
print()
class UserError(Exception):
"""
Standard Jasy error class raised whenever something happens which the system understands (somehow excepected)
"""
pass
|
Python
| 0.000001
|
@@ -342,11 +342,18 @@
%221.
-0.2
+1.0-alpha1
%22%0A__
|
e85e1021ae20ebecb344c592f60f2ad6607a1db1
|
refactor rename variables for clarity
|
src/main/python/pybuilder/plugins/filter_resources_plugin.py
|
src/main/python/pybuilder/plugins/filter_resources_plugin.py
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from pybuilder.core import init, after, use_plugin
from pybuilder.utils import apply_on_files, read_file, write_file
use_plugin("core")
@init
def init_filter_resources_plugin(project):
project.set_property_if_unset("filter_resources_target", "$dir_target")
project.set_property_if_unset("filter_resources_glob", [])
@after("package", only_once=True)
def filter_resources(project, logger):
globs = project.get_mandatory_property("filter_resources_glob")
if not globs:
logger.warn("No resources to filter configured. Consider removing plugin.")
return
target = project.expand_path("$filter_resources_target")
logger.info("Filter resources matching %s in %s", " ".join(globs), target)
project_dict_wrapper = ProjectDictWrapper(project)
apply_on_files(target, filter_resource, globs, project_dict_wrapper, logger)
def filter_resource(absolute_file_name, relative_file_name, dictionary, logger):
logger.debug("Filtering resource %s", absolute_file_name)
content = "".join(read_file(absolute_file_name))
filtered = string.Template(content).safe_substitute(dictionary)
write_file(absolute_file_name, filtered)
class ProjectDictWrapper(object):
def __init__(self, project):
self.project = project
def __getitem__(self, key):
default_value = "${%s}" % key
fallback_value = self.project.get_property(key, default_value)
return getattr(self.project, key, fallback_value)
|
Python
| 0.000002
|
@@ -1886,24 +1886,25 @@
er(object):%0A
+%0A
def __in
@@ -1996,29 +1996,51 @@
-default_value
+fallback_when_no_substitution_found
= %22$%7B%25s
@@ -2056,24 +2056,44 @@
+project_property_or_
fallback
_value =
@@ -2084,22 +2084,16 @@
fallback
-_value
= self.
@@ -2121,22 +2121,109 @@
key,
- default_value
+%0A fallback_when_no_substitution_found
)%0A
@@ -2262,24 +2262,44 @@
t, key,
+project_property_or_
fallback
_value)%0A
@@ -2294,12 +2294,6 @@
back
-_value
)%0A
|
ab49b0be58975156f96bd5340da8d06f5b8626a5
|
Change to batch_size = 64
|
tensorflow_examples/models/nmt_with_attention/distributed_test.py
|
tensorflow_examples/models/nmt_with_attention/distributed_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed nmt_with_attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf # TF2
from tensorflow_examples.models.nmt_with_attention import distributed_train
from tensorflow_examples.models.nmt_with_attention import utils
assert tf.__version__.startswith('2')
class NmtDistributedBenchmark(tf.test.Benchmark):
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir
def benchmark_one_epoch_1_gpu(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'enable_function': False})
self._run_and_report_benchmark(**kwargs)
def benchmark_one_epoch_1_gpu_function(self):
kwargs = utils.get_common_kwargs()
self._run_and_report_benchmark(**kwargs)
def benchmark_ten_epochs_2_gpus(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'epochs': 10, 'num_gpu': 2, 'batch_size': 128})
self._run_and_report_benchmark(**kwargs)
def _run_and_report_benchmark(self, **kwargs):
start_time_sec = time.time()
train_loss, test_loss = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
extras = {'train_loss': train_loss,
'test_loss': test_loss}
self.report_benchmark(
wall_time=wall_time_sec, extras=extras)
if __name__ == '__main__':
tf.test.main()
|
Python
| 0.000689
|
@@ -1645,11 +1645,10 @@
e':
-128
+64
%7D)%0A
|
dd800a1b0ea3dfc1043d3a9ba867d8634d512613
|
version 0.3.2
|
YesssSMS/const.py
|
YesssSMS/const.py
|
"""constants for YesssSMS."""
VERSION = "0.3.2a2"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_LOGIN_ERROR_STRING = "<strong>Login nicht erfolgreich"
_LOGIN_LOCKED_MESS = "Wegen 3 ungültigen Login-Versuchen ist Ihr Account für \
eine Stunde gesperrt."
_LOGIN_LOCKED_MESS_ENG = "because of 3 failed login-attempts, your account \
has been suspended for one hour"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_SMS_SENDING_SUCCESSFUL_STRING = ">Ihre SMS wurde erfolgreich verschickt!<"
# <div class='alert alert-warning'>Lieber yesss! Kunde,<br /><br />Ihre Karte \
# wurde deaktiviert, da Sie innerhalb der letzten 12 Monate nicht mehr \
# aufgeladen haben. Bitte laden Sie zur Aktivierung Ihrer SIM-Karte Ihr \
# Guthaben wieder auf, da andernfalls in Kürze die Rufnummer gelöscht wird.\
# <br /><br />Ihr yesss! Team</div>
_ACCOUNT_LOCKED_WARNING = ">Ihre Karte wurde deaktiviert, da Sie innerhalb \
der letzten 12 Monate nicht mehr aufgeladen haben."
HELP = {'to_help': 'Recipient phone number in the format: +436601234567',
'desc': 'Send an SMS via the yesss.at website',
'configfile': "Path of a config-file. Default paths are: \
'/etc/yessssms.conf' and '~/.config/yessssms.conf'. \
An example file is yessssms_sample.conf.",
'login': 'Your phone number (eg. 06501234567), used to login at \
yesss.at',
'password': """Your password, it\'s not recommended to use this. \
Use a config-file instead (see: -c, --configfile).""",
'message': 'Message to be sent by SMS',
'version': 'print version information.',
'test': 'send a test message to yourself',
'print-config-file': 'prints a sample config file, that can be piped \
into eg. ~/.config/yessssms.conf.',
}
CONFIG_FILE_CONTENT = """[YESSS_AT]
YESSS_LOGIN = 06501234567
YESSS_PASSWD = mySecretPassword
# you can define a default recipient (will be overridden by -t option)
# YESSS_TO = +43664123123123
"""
CONFIG_FILE_PATHS = ["/etc/yessssms.conf",
"~/.config/yessssms.conf",
]
|
Python
| 0.000003
|
@@ -43,10 +43,8 @@
.3.2
-a2
%22%0A_U
|
a59ec3963e0726c291dbde0d26a5f3468e88966c
|
Add call to audit_orders from cli.py.
|
2017-code/cli.py
|
2017-code/cli.py
|
# cli.py
# Ronald L. Rivest (with Karim Husayn Karimi)
# July 22, 2017
# python3
"""
Command-line parser and dispatch
"""
import argparse
import multi
import election_specification
import ids
import audit
import reported
##############################################################################
# Command-line arguments
def parse_args():
parser = argparse.ArgumentParser(description="""multi.py: A Bayesian post-election audit program for an
election with multiple contests and multiple paper ballot
collections.""")
#v1 and v2:
# Mandatory argument is dirname
parser.add_argument("election_dirname", help="""
The name for this election of the subdirectory within the elections root directory.""")
# All others are optional
# First group sets parameters: election_name, elections_root, audit_seed
parser.add_argument("--election_name", help="""
Human-readable name of the election.""",
default="TestElection")
parser.add_argument("--elections_root", help="""The directory where the subdirectory for the
election is to be found. Defaults to "./elections".""",
default="./elections")
parser.add_argument("--audit_seed",
help="""Seed for the random number generator used for
auditing (arbitrary nonnegative integer). (If omitted, uses clock.)""")
## v2:
parser.add_argument("--read_specification", action="store_true", help="""
Read and check election specification.""")
parser.add_argument("--read_reported", action="store_true", help="""
Read and check reported election data and results.""")
parser.add_argument("--read_seed", action="store_true", help="""
Read audit seed.""")
parser.add_argument("--make_orders", action="store_true", help="""
Make audit orders files.""")
parser.add_argument("--read_audited", action="store_true", help="""
Read and check audited votes.""")
parser.add_argument("--stage",
help="""Run stage STAGE of the audit (may specify "ALL").""")
args = parser.parse_args()
# print("Command line arguments:", args)
return args
def process_args(e, args):
e.election_dirname = ids.filename_safe(args.election_dirname)
e.election_name = args.election_name
ELECTIONS_ROOT = args.elections_root
audit.set_audit_seed(e, args.audit_seed)
if args.read_specification:
# print("read_specification")
election_specification.get_election_specification(e)
elif args.read_reported:
print("read_reported")
election_specification.get_election_specification(e)
reported.get_election_data(e)
elif args.read_seed:
print("read_seed")
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
elif args.make_orders:
print("make_orders")
elif args.read_audited:
print("read_audited")
elif args.stage:
print("stage", args.stage)
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
audit.audit(e, args)
|
Python
| 0
|
@@ -3135,24 +3135,69 @@
ke_orders%22)%0A
+ audit_orders.compute_audit_orders(e)%0A
elif arg
|
75c6a6d602d7a9ce5bbad076eefcd8ecb60e88ed
|
Set un-compile-able working_scope error message to CRITICAL
|
AutoSetSyntax.py
|
AutoSetSyntax.py
|
import logging
import os
import re
import sublime
import sublime_plugin
import sys
sys.path.insert(0, os.path.dirname(__file__))
from SyntaxMappings import SyntaxMappings
PLUGIN_NAME = 'AutoSetSyntax'
PLUGIN_DIR = "Packages/%s" % PLUGIN_NAME
PLUGIN_SETTINGS = PLUGIN_NAME + '.sublime-settings'
LOG_LEVEL_DEFAULT = 'INFO'
LOG_FORMAT = "%(name)s: [%(levelname)s] %(message)s"
settings = None
workingScopeRegex = None
syntaxMappings = None
loggingStreamHandler = None
logger = None
def plugin_unloaded():
global settings, loggingStreamHandler, logger
settings.clear_on_change('log_level')
settings.clear_on_change('syntax_mapping')
settings.clear_on_change('working_scope')
logger.removeHandler(loggingStreamHandler)
def plugin_loaded():
global settings, workingScopeRegex, syntaxMappings, loggingStreamHandler, logger
settings = sublime.load_settings(PLUGIN_SETTINGS)
# create logger stream handler
loggingStreamHandler = logging.StreamHandler()
loggingStreamHandler.setFormatter(logging.Formatter(LOG_FORMAT))
# config logger
logger = logging.getLogger(PLUGIN_NAME)
logger.addHandler(loggingStreamHandler)
applyLogLevel()
syntaxMappings = SyntaxMappings(settings=settings, logger=logger)
compileWorkingScope()
# when the user settings is modified...
settings.add_on_change('log_level', applyLogLevel)
settings.add_on_change('syntax_mapping', syntaxMappings.buildSyntaxMappings)
settings.add_on_change('working_scope', compileWorkingScope)
def applyLogLevel():
""" apply log_level to this plugin """
global settings, logger
logLevel = settings.get('log_level')
try:
logger.setLevel(logging._levelNames[logLevel])
except:
logger.warning('unknown "{0}": {1} (assumed "{2}")'.format('log_level', logLevel, LOG_LEVEL_DEFAULT))
logger.setLevel(logging._levelNames[LOG_LEVEL_DEFAULT])
def compileWorkingScope():
""" compile workingScope into regex object to get better speed """
global settings, workingScopeRegex, logger
workingScope = settings.get('working_scope')
try:
workingScopeRegex = re.compile(workingScope)
except:
errorMessage = 'regex compilation failed in user settings "{0}": {1}'.format('working_scope', workingScope)
logger.error(errorMessage)
sublime.error_message(errorMessage)
workingScopeRegex = None
class AutoSetNewFileSyntax(sublime_plugin.EventListener):
global settings, workingScopeRegex, syntaxMappings
def on_activated_async(self, view):
""" called when a view gains input focus """
if (
self.isEventListenerEnabled('on_activated_async') and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def on_clone_async(self, view):
""" called when a view is cloned from an existing one """
if (
self.isEventListenerEnabled('on_clone_async') and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def on_load_async(self, view):
""" called when the file is finished loading """
if (
self.isEventListenerEnabled('on_load_async') and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def on_modified_async(self, view):
""" called after changes have been made to a view """
if (
self.isEventListenerEnabled('on_modified_async') and
self.isOnlyOneCursor(view) and
self.isFirstCursorNearBeginning(view) and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def on_new_async(self, view):
""" called when a new buffer is created """
if (
self.isEventListenerEnabled('on_new_async') and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def on_post_text_command(self, view, command_name, args):
""" called after a text command has been executed """
if (
self.isOnWorkingScope(view) and
(
self.isEventListenerEnabled('on_post_paste') and
(
command_name == 'patse' or
command_name == 'paste_and_indent'
)
)
):
view.run_command('auto_set_syntax')
def on_pre_save_async(self, view):
""" called just before a view is saved """
if (
self.isEventListenerEnabled('on_pre_save_async') and
self.isOnWorkingScope(view)
):
view.run_command('auto_set_syntax')
def isEventListenerEnabled(self, event):
""" check a event listener is enabled """
try:
return settings.get('event_listeners', None)[event]
except:
logger.warning('"{0}" is not set in user settings (assumed true)'.format('event_listeners -> '+event))
return True
def isOnlyOneCursor(self, view):
""" check there is only one cursor """
return len(view.sel()) == 1
def isFirstCursorNearBeginning(self, view):
""" check the cursor is at first few lines """
return view.rowcol(view.sel()[0].a)[0] < 2
def isOnWorkingScope(self, view):
""" check the scope of the first line is matched by working_scope """
if (
workingScopeRegex is None or
workingScopeRegex.search(view.scope_name(0)) is None
):
return False
return True
class autoSetSyntaxCommand(sublime_plugin.TextCommand):
global settings, syntaxMappings
def run(self, edit):
""" match the first line and set the corresponding syntax """
firstLine = self.getPartialFirstLine()
for syntaxMapping in syntaxMappings.value():
syntaxFile, firstLineMatchRegexes = syntaxMapping
for firstLineMatchRegex in firstLineMatchRegexes:
if firstLineMatchRegex.search(firstLine) is not None:
self.view.set_syntax_file(syntaxFile)
return
def getPartialFirstLine(self):
""" get the (partial) first line """
region = self.view.line(0)
firstLineLengthMax = settings.get('first_line_length_max')
if firstLineLengthMax >= 0:
# if the first line is longer than the max line length,
# then we use the max line length
region = sublime.Region(0, min(region.end(), firstLineLengthMax))
return self.view.substr(region)
|
Python
| 0.000406
|
@@ -2315,21 +2315,24 @@
logger.
-error
+critical
(errorMe
|
ab22a41382f739313d8e5484b4f3d54745e0a888
|
Removed urllib2 import. should fix #1
|
BitcoinTicker.py
|
BitcoinTicker.py
|
import sublime
import sublime_plugin
try:
from urllib.request import urlopen
from urllib.parse import urlparse
import urllib2
except ImportError:
from urlparse import urlparse
from urllib import urlopen
import json
import re
class BitcoinTicker(sublime_plugin.EventListener):
def check_for_calc(self):
"""
If enabled in settings, searches the view for a bitcoin amount to convert and
replaces string with converted value.
Supported formats that will be searched:
1 BTC
0.252 btc
.5 btc
13.303 BTC
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
convert_strings = settings.get('convert_strings')
if convert_strings:
regex = r'([-+]?[0-9]*\.?[0-9]+)\s*btc'
extractions = []
regions = self.view.find_all(regex, sublime.IGNORECASE, "$1", extractions)
added_length = 0
btc_in_usd, exchange_name = self.get_current_exchange()
for index, region in enumerate(regions):
amount = float(extractions[index])
result = btc_in_usd * amount
edit = self.view.begin_edit()
added_length += self.view.insert(edit, region.end() + added_length, " => $%.2f (%s)" % (result, exchange_name))
self.view.end_edit(edit)
def update_status(self):
"""
Updates the view's status bar with the current exchange rate
"""
self.view.set_status('btc', "$%.2f (%s)" % self.get_current_exchange())
def get_current_exchange(self):
"""
Makes API call to exchange (determined via settings) to retrieve latest
exchange rate.
Exchanges:
1 - Mt.Gox
2 - Bitfloor
Returns a tuple consisting of the current exchange rate of 1 bitcoin in USD
as well as the name of the exchange.
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
exchange = settings.get('exchange')
if exchange == 1:
url = 'http://data.mtgox.com/api/1/BTCUSD/ticker'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Mt.Gox'
btc_in_usd = float(resp['return']['last']['value'])
elif exchange == 2:
url = 'https://api.bitfloor.com/ticker/1'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Bitfloor'
btc_in_usd = float(resp['price'])
return (btc_in_usd, exchange_name)
def on_load(self, view):
self.view = view
settings = sublime.load_settings(__name__ + '.sublime-settings')
settings.add_on_change('exchange', self.update_status)
settings.add_on_change('convert_strings', self.check_for_calc)
sublime.set_timeout(self.update_status, 10)
def on_post_save(self, view):
self.view = view
sublime.set_timeout(self.update_status, 10)
self.check_for_calc()
|
Python
| 0.999632
|
@@ -113,25 +113,8 @@
rse%0A
- import urllib2%0A
exce
|
11e0bf0c39893077b18a0c395f23e42751cf7b44
|
Update model documentation
|
Luna/Model.py
|
Luna/Model.py
|
#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
from functools import wraps #To retain the documentation and name of the wrapped functions by these decorators.
from inspect import getargspec #Getting the number of arguments of a function, and setting up listeners for every member.
from weakref import WeakKeyDictionary,WeakSet #To automatically remove listeners and signallers if their class instances are removed.
from Luna.Logger import warning
def model(originalClass):
"""
.. function:: model(originalClass)
Modifies a class such that it becomes part of the model.
This adds signals for each of the class' members, and a ``listenTo`` method.
Using the ``listenTo`` method, a different function can be registered to be
called every time a certain member changes. This is intended to be used by a
viewer to update its view of the model towards the user.
:param originalClass: The class to turn into a part of the model.
:return: The same class, but with added hooks into every of its members that
can be listened to.
"""
originalInit = originalClass.__init__
@wraps(originalClass.__init__)
def newInit(self,*args,**kwargs): #Create a new __init__ function.
self.__listeners = {}
for member in dir(self): #Add a signal for all static members.
#We're creating a WeakKeyDictionary that contains lists of methods.
#These are keyed by the instance of the method, which is a weak reference.
#So if the instance is removed, the methods are removed too.
self.__listeners[member] = WeakKeyDictionary() #Keyed by instance, values are lists of functions.
originalInit(self,*args,**kwargs)
originalClass.__init__ = newInit #Replace the old __init__ with the new one.
originalSetAttr = originalClass.__setattr__ #Create a new __setattr__ function.
@wraps(originalSetAttr)
def newSetAttr(self,name,value):
originalSetAttr(self,name,value)
if name == "__listeners": #Don't want this to be triggered before the listener construction has been set up.
return
if not name in self.__listeners: #Attribute added dynamically. Add a signal for it.
self.__listeners[name] = WeakKeyDictionary()
for instance in self.__listeners[name]: #Call anything that listens to this member.
for listener in self.__listeners[name][instance]:
listener()
originalClass.__setattr__ = newSetAttr #Replace the old __setattr__ with the new one.
def listenTo(self,member,function):
"""
.. function:: listenTo(member,function)
Hooks a function to be called whenever the specified member is changed.
Note that this only works on local members, not on static members.
:param member: The name of the member to listen to, as string.
:param function: The function to call whenever that member is changed.
"""
if not member in self.__listeners:
warning("No member \"{member}\" found to listen to.",member = member)
return
if len(getargspec(function).args) > 1: #Function has arguments. Can't have that.
warning("Listener function {function} must not have any arguments.",function = str(function))
return
if not function.__self__ in self.__listeners[member]:
self.__listeners[member][function.__self__] = []
self.__listeners[member][function.__self__].append(function) #Add this function as listener.
originalClass.listenTo = listenTo #Add the listenTo method.
return originalClass #Return a modified class.
|
Python
| 0
|
@@ -1334,17 +1334,17 @@
m.%0A%0A
-For a saf
+To use th
e mo
@@ -1357,187 +1357,400 @@
stem
-, every model should follow the following rules:%0A- All data must be private inside a class (with field names starting with two%0A underscores).%0A- All functions that change
+ properly, the following code changes are required:%0A- Give all classes that are part of the model the %60%60@model%60%60 decorator.%0A- To let the view update whenever the model is changed, call the %60%60listenTo%60%60%0A function of the model. Supply the member of the model that contains the data,%0A and a reference to the function in the view that will update the view.%0A- The listening functions of
the
-data
+view
mus
@@ -1760,32 +1760,100 @@
ave
-the %60%60setter%60%60 decorator
+no parameters other than%0A %60%60self%60%60. Otherwise they can't be called by the signalling system
.%0A%22%22
|
df0434112364e984cff3fc6d00ea53335d90477a
|
Fix caclulation of lambda. Lambda should use B_theta at edge, here called b_theta_vacuum.
|
external_stability.py
|
external_stability.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 08 20:42:30 2014
@author: Jens von der Linden
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
"""Python 3.x compatibility"""
import scipy.special as spec
from scipy.interpolate import splev
import analytic_condition as ac
def external_stability(params, xi, xi_der, dim_less=False):
r"""
Returns external external stability and dW.
"""
a = params['a']
b_z = splev(a, params['b_z'])
b_theta = splev(a, params['b_theta'])
m = params['m']
k = params['k']
magnetic_potential_energy_ratio = params['magnetic_potential_energy_ratio']
if params['b'] == 'infinity':
lambda_term = lambda_infinity(**{'a': a, 'k': k, 'm': m})
else:
assert isinstance(params['b'], float), "b must be 'infinity' or of \
type float."
lambda_term = lambda_boundary(**{'a': a, 'b': b, 'k': k, 'm': m})
f_term = capital_f(**{'a': a, 'k': k, 'm': m, 'b_theta': b_theta,
'b_z': b_z})
f_adjoint_term = f_adjoint(**{'a': a, 'k': k, 'm': m, 'b_theta': b_theta,
'b_z': b_z})
k_0_sq_term = k_0(**{'k': k, 'm': m, 'a': a})
term1 = f_term**2*a*xi_der/(k_0_sq_term*xi)
term2 = f_term*f_adjoint_term/(k_0_sq_term)
term3 = a**2*f_term**2*lambda_term
delta_w = (term1+term2*term3)*xi**2
if dim_less:
delta_w = magnetic_potential_energy_ratio * delta_w
stable = delta_w > 0
return stable, delta_w
def lambda_infinity(a, k, m):
r"""
Return lambda term for wall at infinity.
"""
k_a = spec.kv(m, abs(k)*a)
k_a_prime = spec.kvp(m, abs(k)*a)
return -k_a/(abs(k*a)*k_a_prime)
def lambda_boundary(a, b, k, m):
r"""
Return lambda term for wall at radius b.
"""
k_a = spec.kv(m, abs(k)*a)
k_a_prime = spec.kvp(m, abs(k)*a)
k_b_prime = spec.kvp(m, abs(k)*b)
i_a = spec.iv(m, abs(k)*a)
i_a_prime = spec.ivp(m, abs(k)*a)
i_b_prime = spec.ivp(m, abs(k)*b)
factor1 = -k_a/(abs(k*a)*k_a_prime)
factor2_num = 1. - k_b_prime*i_a/(i_b_prime*k_a)
factor2_denom = 1. - k_b_prime*i_a_prime/(i_b_prime*k_a_prime)
return factor1*factor2_num/factor2_denom
def k_0(k, m, a):
r"""
Return k_0 term.
"""
return k**2 + m**2 / a**2
def f_adjoint(a, k, m, b_theta, b_z):
r"""
Return adjoint F.
"""
return k*b_z - m*b_theta/a
def capital_f(a, k, m, b_theta, b_z):
r"""
Return F.
"""
return k*b_z + m*b_theta/a
def external_stability_from_notes(params, xi, xi_der, dim_less=False):
a = params['a']
b_z = splev(a, params['b_z'])
b_theta = splev(a, params['b_theta'])
m = params['m']
k_bar = params['k']
magnetic_potential_energy_ratio = params['magnetic_potential_energy_ratio']
term_params = {'a': a, 'k_bar': k_bar, 'm': m, 'b_z': b_z,
'b_theta': b_theta, 'xi': xi, 'xi_der': xi_der}
delta_w = (plasma_term_from_notes(**term_params) -
vacuum_term_from_notes(**term_params))
if dim_less:
delta_w = magnetic_potential_energy_ratio * delta_w
stable = delta_w > 0
return stable, delta_w
def plasma_term_from_notes(a, k_bar, m, b_z, b_theta, xi, xi_der):
r"""
Returns plasma energy term as in my derivation.
"""
f_term = a*(k_bar*b_z + m*b_theta)**2/(k_bar**2 + m**2)
h_term = (k_bar**2*b_z**2 - m**2*b_theta**2)/(k_bar**2 + m**2)
return xi**2 * (f_term*xi_der / xi + h_term)
def vacuum_term_from_notes(a, k_bar, m, b_z, b_theta, xi, xi_der):
r"""
Returns vacuum energy term as in my derivation.
"""
k_a = spec.kv(m, abs(k_bar))
k_a_prime = spec.kvp(m, abs(k_bar))
term1 = (k_bar*b_z + m*b_theta)**2
term2 = xi**2/k_bar*k_a/k_a_prime
return term1*term2
def external_stability_from_analytic_condition(params, xi, xi_der,
dim_less=False):
r"""
"""
a = params['a']
b_z = splev(a, params['b_z'])
b_theta_vacuum = splev(a, params['b_theta'])
m = -params['m']
k_bar = params['k']
core_radius = params['core_radius']
b_theta_plasma = splev(core_radius, params['b_theta'])
epsilon = b_theta_plasma / b_theta_vacuum
lambda_bar = 2*b_theta_plasma / (b_z*a)
delta = xi_der*a / xi
dW = ac.conditions_without_interface(k_bar, lambda_bar, epsilon, m, delta)
stable = dW > 0
return stable, dW
|
Python
| 0.000002
|
@@ -4553,22 +4553,22 @@
b_theta_
-plasma
+vacuum
/ (b_z*
|
93ad5396bb1d574c86a6b3323199e75fe3bb34f4
|
implement protection for non existing directories
|
PyAnalysisTools/base/ShellUtils.py
|
PyAnalysisTools/base/ShellUtils.py
|
import shutil
import os
import subprocess
def make_dirs(path):
path = os.path.expanduser(path)
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as e:
raise OSError
def resolve_path_from_symbolic_links(symbolic_link, relative_path):
def is_symbolic_link(path):
return os.path.islink(path)
if symbolic_link is None or relative_path is None:
return relative_path
if os.path.isabs(relative_path):
return relative_path
if not symbolic_link.endswith("/"):
symbolic_link += "/"
top_level_dir = symbolic_link.split("/")
for n in range(1, len(top_level_dir)):
if is_symbolic_link("/".join(top_level_dir[:-n])):
return os.path.abspath(os.path.join(symbolic_link, relative_path))
return relative_path
def move(src, dest):
try:
shutil.move(src, dest)
except IOError:
raise
def copy(src, dest):
try:
shutil.copy(src, dest)
except:
raise
def remove_directory(path, safe=False):
if safe:
try:
os.removedirs(path)
except OSError:
raise
else:
try:
shutil.rmtree(path)
except OSError as e:
raise e
def source(script_name):
pipe = subprocess.Popen(". %s; env" % script_name, stdout=subprocess.PIPE, shell=True)
output = pipe.communicate()[0]
output = filter(lambda l: len(l.split("=")) == 2, output.splitlines())
env = dict((line.split("=", 1) for line in output))
os.environ.update(env)
|
Python
| 0
|
@@ -1052,24 +1052,72 @@
afe=False):%0A
+ if not os.path.exists(path):%0A return%0A
if safe:
|
188affe12f31973741ae9b429d8aed757fff0d85
|
Fixing timestamp = sec * 1000
|
rockmylight/rockmylight/rml/views.py
|
rockmylight/rockmylight/rml/views.py
|
from django.shortcuts import render
from django.http import JsonResponse
import time
# Create your views here.
def main(request):
context = {}
return render(request, 'rml/main.html', context)
def jam(request):
context = {}
return render(request, 'rml/jam.html', context)
# API part
INTERVAL = 0.5
NUM_OF_FRAMES = 120
COLORS = ['002b36', '073642', '586e75', '657b83',
'839496', '93a1a1', 'eee8d5', 'fdf6e3']
def next_color(color):
index = COLORS.index(color)
if index + 1 == len(COLORS):
return COLORS[0]
return COLORS[index + 1]
def api_dj(request, session_id=1):
data = {}
# number of connected clients in the grid
data['num_of_clients'] = 6
data['frames'] = []
start_time = int(time.time())
color = COLORS[0]
for frame_index in range(NUM_OF_FRAMES):
frame = {}
frame['timestamp'] = start_time * 1000 + frame_index * INTERVAL
frame['color'] = color
color = next_color(color)
data['frames'].append(frame)
repsonse = JsonResponse(data)
return repsonse
|
Python
| 0.998776
|
@@ -878,16 +878,17 @@
amp'%5D =
+(
start_ti
@@ -894,15 +894,8 @@
ime
-* 1000
+ fr
@@ -914,16 +914,24 @@
INTERVAL
+) * 1000
%0A
|
bd3af9f250dc49a0872447b57493c69ba6dad09c
|
Fix encoding
|
RadioBox/RadioBox/player/player.py
|
RadioBox/RadioBox/player/player.py
|
import vlc
import urllib2
import threading
class Player:
"""Represent radio Player."""
def __init__(self):
self.vlc_player = None
self.error_callback = None
self.title_recieved_callback = None
self.media_state_changed_callback = None
self._init_vlc_player()
def _init_vlc_player(self):
"""Init player instance."""
if self.vlc_player is not None:
self.vlc_player.release()
self.vlc_player = vlc.MediaPlayer()
self.current_station = None
if self.media_state_changed_callback is not None:
self.media_state_changed_callback(self.vlc_player.get_state())
def _media_state_changed(self, event):
"""Callback on state changed event.
Args:
event (Event): Event instance.
"""
if self.vlc_player is not None and self.media_state_changed_callback is not None:
self.media_state_changed_callback(self.vlc_player.get_state())
if self.vlc_player is not None and self.vlc_player.get_state() == vlc.State.Error:
if (self.error_callback is not None):
if self.current_station is not None:
self.error_callback('Error playing stream \'{0}\''.format(self.current_station.name))
else:
self.error_callback('Unknown error')
def _get_stream_title_sync(self):
"""Get radio stream title sync. Calls title_recieved_callback when title received."""
if self.current_station is None:
return
request = urllib2.Request(self.current_station.url)
request.add_header('Icy-MetaData', 1)
try:
response = urllib2.urlopen(request)
icy_metaint_header = response.headers.get('icy-metaint')
icy_description_header = response.headers.get('icy-description')
if icy_metaint_header is not None:
metaint = int(icy_metaint_header)
read_buffer = metaint+255
content = response.read(read_buffer)
title = content[metaint:].split("'")[1].encode('utf8')
if self.title_recieved_callback is not None:
self.title_recieved_callback(title)
elif icy_description_header is not None:
if self.title_recieved_callback is not None:
self.title_recieved_callback(icy_description_header.encode('utf8'))
else:
if self.title_recieved_callback is not None:
self.title_recieved_callback(' - ')
except:
if self.title_recieved_callback is not None:
self.title_recieved_callback(' - ')
def is_playing(self):
"""Is music playing.
Returns:
true - if music playing, false - otherwise.
"""
if self.vlc_player is None:
self._init_vlc_player()
return self.vlc_player.get_state() == vlc.State.Playing
def play(self):
"""Play radio station if it is setted."""
if self.vlc_player is None:
self._init_vlc_player()
if self.vlc_player.get_media() is not None:
self.vlc_player.play()
def stop(self):
"""Stop radio station."""
if self.vlc_player is None:
self._init_vlc_player()
if self.is_playing():
self.vlc_player.stop()
def set_station(self, station):
"""Set station given in parameter.
Args:
station (Station): station to set.
"""
self.current_station = station
if self.current_station is None:
self._init_vlc_player()
else:
if self.vlc_player is None:
self._init_vlc_player()
if self.is_playing():
self.stop()
self.vlc_player.set_mrl(self.current_station.url)
self.vlc_player.get_media().event_manager().event_attach(
vlc.EventType.MediaStateChanged, self._media_state_changed)
self.play()
else:
self.vlc_player.set_mrl(self.current_station.url)
self.vlc_player.get_media().event_manager().event_attach(
vlc.EventType.MediaStateChanged, self._media_state_changed)
def set_volume(self, value):
"""Set audio volume.
Args:
value (int): volume value.
"""
if 0 <= value <= 100:
if self.vlc_player is None:
self._init_vlc_player()
self.vlc_player.audio_set_volume(value)
def get_stream_title(self):
"""Get radio stream title async. Calls title_recieved_callback when title received."""
if self.is_playing():
threading.Thread(target=self._get_stream_title_sync).start()
def set_error_callback(self, callback):
"""Set callback when error appearin playing station.
Args:
callback (func): callback to set.
"""
self.error_callback = callback
def set_title_recieved_callback(self, callback):
"""Set callback when station title received.
Args:
callback (func): callback to set.
"""
self.title_recieved_callback = callback
def set_media_state_changed_callback(self, callback):
"""Set callback when media state changed.
Args:
callback (func): callback to set.
"""
self.media_state_changed_callback = callback
|
Python
| 0.998377
|
@@ -2126,31 +2126,16 @@
(%22'%22)%5B1%5D
-.encode('utf8')
%0A%0A
@@ -2434,23 +2434,8 @@
ader
-.encode('utf8')
)%0A
|
fc48d25a4fa5b3b623e2796ce6ddb3aa40e99163
|
Add test for drift vector C lib
|
CDJSVis/tests/test_filtering.py
|
CDJSVis/tests/test_filtering.py
|
"""
Unit tests for filtering C module
"""
import unittest
import numpy as np
from ..filtering import _filtering
################################################################################
class TestSpecieFilterC(unittest.TestCase):
"""
Test specie filter
"""
def test_specieFilter(self):
"""
Specie filter
"""
N = 5
specieArray = np.asarray([0,0,1,0,1], dtype=np.int32)
NScalars = 0
fullScalars = np.asarray([], dtype=np.float64)
visibleAtoms = np.arange(N, dtype=np.int32)
visibleSpecieArray = np.asarray([1], dtype=np.int32)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 2)
self.assertEqual(visibleAtoms[0], 2)
self.assertEqual(visibleAtoms[1], 4)
visibleAtoms = np.arange(N, dtype=np.int32)
visibleSpecieArray = np.asarray([0], dtype=np.int32)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 3)
self.assertEqual(visibleAtoms[0], 0)
self.assertEqual(visibleAtoms[1], 1)
self.assertEqual(visibleAtoms[2], 3)
visibleAtoms = np.arange(N, dtype=np.int32)
visibleSpecieArray = np.asarray([0,1], dtype=np.int32)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 5)
self.assertEqual(visibleAtoms[0], 0)
self.assertEqual(visibleAtoms[1], 1)
self.assertEqual(visibleAtoms[2], 2)
self.assertEqual(visibleAtoms[3], 3)
self.assertEqual(visibleAtoms[4], 4)
visibleSpecieArray = np.asarray([], dtype=np.int32)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 0)
def test_specieFilterFullScalars(self):
"""
Specie filter full scalars
"""
N = 5
specieArray = np.asarray([0,0,1,0,1], dtype=np.int32)
NScalars = 2
visibleAtoms = np.arange(N, dtype=np.int32)
visibleSpecieArray = np.asarray([0], dtype=np.int32)
fullScalars = np.arange(NScalars*N, dtype=np.float64)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 3)
self.assertEqual(visibleAtoms[0], 0)
self.assertEqual(visibleAtoms[1], 1)
self.assertEqual(visibleAtoms[2], 3)
self.assertEqual(fullScalars[0], 0)
self.assertEqual(fullScalars[1], 1)
self.assertEqual(fullScalars[2], 3)
self.assertEqual(fullScalars[5], 5)
self.assertEqual(fullScalars[6], 6)
self.assertEqual(fullScalars[7], 8)
visibleAtoms = np.arange(N, dtype=np.int32)
visibleSpecieArray = np.asarray([1], dtype=np.int32)
fullScalars = np.arange(NScalars*N, dtype=np.float64)
nvis = _filtering.specieFilter(visibleAtoms, visibleSpecieArray, specieArray, NScalars, fullScalars)
self.assertEqual(nvis, 2)
self.assertEqual(visibleAtoms[0], 2)
self.assertEqual(visibleAtoms[1], 4)
self.assertEqual(fullScalars[0], 2)
self.assertEqual(fullScalars[1], 4)
self.assertEqual(fullScalars[5], 7)
self.assertEqual(fullScalars[6], 9)
|
Python
| 0
|
@@ -215,17 +215,16 @@
ieFilter
-C
(unittes
@@ -3512,8 +3512,875 @@
%5B6%5D, 9)%0A
+%0A################################################################################%0A%0Aclass TestCalculateDrift(unittest.TestCase):%0A %22%22%22%0A Test calculate drift vector%0A %0A %22%22%22%0A def test_calculateDrift(self):%0A %22%22%22%0A Calculate drift vector%0A %0A %22%22%22%0A N = 2%0A p = np.asarray(%5B2.0, 0.0, 99.0, 98.0, 1.0, 1.0%5D, dtype=np.float64)%0A r = np.asarray(%5B3.0, 0.5, 1.0, 97.0, 99.0, 0.5%5D, dtype=np.float64)%0A cellDims = np.asarray(%5B100, 100, 100%5D, dtype=np.float64)%0A pbc = np.ones(3, np.int32)%0A driftVector = np.zeros(3, np.float64)%0A %0A ret = _filtering.calculate_drift_vector(N, p, r, cellDims, pbc, driftVector)%0A %0A self.assertEqual(ret, 0)%0A self.assertEqual(driftVector%5B0%5D, 0.0)%0A self.assertEqual(driftVector%5B1%5D, 0.75)%0A self.assertEqual(driftVector%5B2%5D, -0.75)%0A
|
632f70d64bac45365974db834a3a6ddcb16e13ad
|
Add GuardianModelMixin in users/models.py
|
feder/users/models.py
|
feder/users/models.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import python_2_unicode_compatible
# from django.db import models
# from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
def __str__(self):
return self.username
|
Python
| 0
|
@@ -193,98 +193,54 @@
ble%0A
-%0A%0A#
from
-django.db import models%0A# from django.utils.translation import ugettext_lazy as _
+guardian.mixins import GuardianUserMixin%0A
%0A%0A@p
@@ -277,16 +277,35 @@
ss User(
+GuardianUserMixin,
Abstract
|
1386527d268b99aac41ee8ddb047d60fe4948274
|
Modify exception handler
|
appkit/app.py
|
appkit/app.py
|
#!/usr/bin/env python
# coding=utf8
from gi.repository import Gtk, WebKit
import urlparse
import os
import tempfile
import mimetypes
import codecs
import sys
import re
Gtk.init('')
class UrlMappingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class App(object):
"""App
Application class
"""
url_pattern = dict()
document = None # Root DOM
def __init__(self, app_path=None):
app_path = os.path.abspath(os.path.dirname(sys.argv[0]))
window = Gtk.Window()
window.set_title('AppKit')
webkit_web_view = WebKit.WebView()
settings = webkit_web_view.get_settings()
settings.set_property('enable-universal-access-from-file-uris', True)
settings.set_property('enable-developer-extras', True)
settings.set_property('default-encoding', 'utf-8')
window.set_default_size(800, 600)
scrollWindow = Gtk.ScrolledWindow()
scrollWindow.add(webkit_web_view)
window.add(scrollWindow)
window.connect('destroy', Gtk.main_quit)
webkit_web_view.connect(
'notify::load-status',
self.on_notify_load_status)
webkit_web_view.connect(
'resource-request-starting',
self.on_web_view_resource_request_starting)
webkit_web_view.connect(
'resource-response-received',
self.on_web_view_resource_response_received)
webkit_web_view.connect(
'resource-load-finished',
self.on_resource_load_finished)
webkit_web_view.connect(
'navigation_policy_decision_requested',
self.on_navigation_policy_decision_requested)
webkit_main_frame = webkit_web_view.get_main_frame()
webkit_main_frame.connect(
'resource-request-starting',
self.on_web_frame_resource_request_starting)
webkit_main_frame.connect(
'resource-response-received',
self.on_web_frame_resource_response_received)
window.show_all()
self.window = window
self.webkit_web_view = webkit_web_view
self.webkit_main_frame = webkit_main_frame
self.app_path = app_path
def url_map_to_function(self, url):
match_list = list()
for pattern in self.url_pattern:
m = re.match(pattern, url)
if m:
match_list.append(m)
if len(match_list) == 0:
raise UrlMappingError('Can\'t find matched url')
elif len(match_list) > 1:
raise UrlMappingError('Found more than one matched urls')
m = match_list[0]
args = list(m.groups())
kw = m.groupdict()
for value in kw.values():
args.remove(value)
return self.url_pattern[m.re.pattern](*args, **kw)
def route(self, pattern=None):
def decorator(fn):
self.url_pattern[pattern] = fn
return fn
return decorator
def on_notify_load_status(self, webkitView, *args, **kwargs):
"""Callback function when the page was loaded completely
FYI, this function will be called after $(document).ready()
in jQuery
"""
status = webkitView.get_load_status()
if status == status.FINISHED:
print 'Load finished'
def on_navigation_policy_decision_requested(
self,
webkit_web_view,
webkit_web_frame,
webkit_network_request,
webkit_web_navigation_action,
webkit_web_policy_dicision):
print 'navigation_policy_decision_requested'
def on_web_view_resource_request_starting(
self,
web_view,
web_frame,
web_resource,
network_request,
network_response=None):
print 'web_view_resource_request_starting'
def on_web_view_resource_response_received(
self,
web_view,
web_frame,
web_resource,
network_response,
*arg, **kw):
print 'web_view Resource response received'
def on_web_frame_resource_request_starting(
self,
web_frame,
web_resource,
network_request,
network_response=None):
print 'web_frame_resource_request_starting'
url = urlparse.unquote(network_request.get_uri())
url = urlparse.urlparse(url.decode('utf-8'))
if url.scheme == 'app':
if url.netloc == '':
result = self.url_map_to_function(url.path)
# Make sure result is <tuple>
if isinstance(result, unicode) or \
isinstance(result, str):
result = (result,)
(content, mimetype) = response(*result)
print type(content)
file_ext = mimetypes.guess_extension(mimetype)
tmp_file_path = tempfile.mkstemp(suffix=file_ext)[1]
f = codecs.open(tmp_file_path, 'w', encoding='utf-8')
f.write(content)
f.close()
network_request.set_uri('file://' + tmp_file_path + '?tmp=1')
elif url.netloc == 'file':
file_path = self.app_path + url.path
file_path = os.path.normcase(file_path)
network_request.set_uri('file://' + file_path)
def on_web_frame_resource_response_received(
self,
web_frame,
web_resource,
network_response,
*arg, **kw):
print 'web_frame Resource response received'
url = urlparse.urlparse(network_response.get_uri())
url = urlparse.urlparse(url.path)
query = urlparse.parse_qs(url.query)
if 'tmp' in query:
print url.path
os.remove(url.path)
def on_resource_load_finished(
self,
web_view, web_frame, web_resource,
*args, **kw):
print 'resource load finished'
def run(self):
self.webkit_web_view.load_uri('app:///')
Gtk.main()
def _get_app_path(self, path='.'):
print self.__file__
def request_handler():
pass
def response(content=None, mimetype='text/html'):
return (content, mimetype)
|
Python
| 0.000002
|
@@ -181,158 +181,8 @@
)%0A%0A%0A
-class UrlMappingError(Exception):%0A def __init__(self, value):%0A self.value = value%0A%0A def __str__(self):%0A return repr(self.value)%0A%0A%0A
clas
@@ -2347,12 +2347,11 @@
st)
-== 0
+%3E 1
:%0A
@@ -2370,36 +2370,38 @@
ise
-UrlMappingError('Can%5C't find
+Exception('Found more than one
mat
@@ -2408,27 +2408,29 @@
ched url
+s
')%0A
+%0A
elif len
@@ -2425,88 +2425,99 @@
-elif len(match_list) %3E 1:%0A raise UrlMappingError('Found more than one
+try:%0A m = match_list%5B0%5D%0A except:%0A raise Exception('Can%5C't find
mat
@@ -2528,39 +2528,12 @@
url
-s
')%0A%0A
- m = match_list%5B0%5D%0A
|
81b2519f575d35d2f1b735bcaef1901539ee06fa
|
refactor mgmt cmd update-toplist to use just CouchDB
|
mygpo/directory/management/commands/update-toplist.py
|
mygpo/directory/management/commands/update-toplist.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from couchdbkit import ResourceConflict
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress, multi_request_view
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
started = datetime.now()
entries = multi_request_view(Podcast, 'core/podcasts_by_oldid', include_docs=True)
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, entry in enumerate(entries):
subscriber_count = self.get_subscriber_count(entry.get_id())
self.update(entry=entry, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['entry'])
def update(self, entry, started, subscriber_count):
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
entry.subscribers.append(data)
entry.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
)
return x.count()
|
Python
| 0
|
@@ -81,49 +81,8 @@
nd%0A%0A
-from couchdbkit import ResourceConflict%0A%0A
from
@@ -215,28 +215,8 @@
ress
-, multi_request_view
%0Afro
@@ -328,24 +328,75 @@
*options):%0A%0A
+ # couchdbkit doesn't preserve microseconds%0A
star
@@ -414,103 +414,81 @@
ime.
+utc
now()
-%0A entries = multi_request_view(Podcast, 'core/podcasts_by_oldid', include_docs=True
+.replace(microsecond=0)%0A%0A podcasts = Podcast.all_podcasts(
)%0A
@@ -576,21 +576,23 @@
for n,
-entry
+podcast
in enum
@@ -601,14 +601,15 @@
ate(
-entrie
+podcast
s):%0A
@@ -665,21 +665,23 @@
r_count(
-entry
+podcast
.get_id(
@@ -711,19 +711,23 @@
ate(
-entry=entry
+podcast=podcast
, st
@@ -835,21 +835,23 @@
flict(%5B'
-entry
+podcast
'%5D)%0A
@@ -867,21 +867,23 @@
e(self,
-entry
+podcast
, starte
@@ -904,16 +904,148 @@
count):%0A
+%0A # We've already updated this podcast%0A if started in %5Be.timestamp for e in podcast.subscribers%5D:%0A return%0A%0A
@@ -1178,24 +1178,25 @@
)%0A
+%0A
entry.su
@@ -1187,21 +1187,23 @@
-entry
+podcast
.subscri
@@ -1210,20 +1210,73 @@
bers
-.append(data
+ = sorted(podcast.subscribers + %5Bdata%5D, key=lambda e: e.timestamp
)%0A
@@ -1285,13 +1285,15 @@
-entry
+podcast
.sav
@@ -1476,16 +1476,19 @@
tartkey
+
= %5Bpodca
@@ -1526,16 +1526,19 @@
ndkey
+
= %5Bpodca
@@ -1549,16 +1549,121 @@
d, %7B%7D%5D,%0A
+ reduce = True,%0A group = True,%0A group_level = 2,%0A
|
689ac4c7bea84cbebe2c0183d70cf78ecd64bdc7
|
fix pylint error
|
tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py
|
tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.text_vectorization."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.distribute import strategy_combinations
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.platform import test
@ds_combinations.generate(
combinations.combine(
strategy=strategy_combinations.all_strategies +
strategy_combinations.multi_worker_mirrored_strategies,
mode=["eager"]))
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_distribution_strategy_output_with_adapt(self, strategy):
# TODO(b/180614455): remove this check when MLIR bridge is always enabled.
if backend.is_tpu_strategy(strategy):
self.skipTest("This test needs MLIR bridge on TPU.")
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
|
Python
| 0.000002
|
@@ -1547,52 +1547,8 @@
ion%0A
-from tensorflow.python.platform import test%0A
%0A%0A@d
|
98c0d5ffbf16f7ef35f366370ab2c7e0d719d84e
|
change timezone
|
hbapi/settings/base.py
|
hbapi/settings/base.py
|
"""
Django settings for hbapi project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import redis
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f8v9+#zv88j6yqx6tp1vhb3bz#n^2aj^)=l4c!!scmv54&pzy1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'djmoney',
'records.apps.RecordsConfig',
'budgets.apps.BudgetsConfig',
'userprofile.apps.UserprofileConfig',
'bootstrap3'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hbapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hbapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {}
AUTH_USER_MODEL = 'userprofile.User'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, '../static'),
]
# Redis keys
REDIS_KEY_USER_TAGS = 'user_tags_%s'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(levelname)s] %(asctime)s %(pathname)s[%(lineno)d] %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'apps': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
}
}
}
|
Python
| 0.00006
|
@@ -3066,11 +3066,25 @@
= '
-UTC
+America/Vancouver
'%0A%0AU
|
54f8c9e50cdd24532b6c9fe5e6fdf410b60dc79f
|
Change to fresh environment before loading bmis.
|
bmibabel/build.py
|
bmibabel/build.py
|
#! /usr/bin/env python
"""Build a BMI implementation."""
from __future__ import print_function
import os
import sys
import types
import subprocess
import yaml
from scripting.contexts import setenv, homebrew_hidden
from .fetch import load_bmi_components
from .files import install_data_files
from .utils import cd, read_first_of
from .bocca import make_project, build_project, ProjectExistsError
_BUILD_FILES = ['.bmi.yaml', '.bmi.yml', os.path.join('.bmi', 'api.yaml'),
os.path.join('.bmi', 'api.yml')]
def load_script(dir='.'):
"""Load a build script for an API description file.
Parameters
----------
dir : str, optional
Path to folder that contains description file.
Returns
-------
dict
Build description
Raises
------
RuntimeError is the build is not a supported build type.
"""
with cd(dir):
(_, contents) = read_first_of(_BUILD_FILES)
api = yaml.load(contents)
if isinstance(api['build'], dict) and 'brew' in api['build']:
return brew_install_instructions(api['build']['brew'])
else:
return bash_install_instructions(api['build'])
def brew_install_instructions(brewer):
"""Shell commands to install a formula with Homebrew.
Parameters
----------
brewer : dict
Description of Homebrew formula.
Returns
-------
list
Shell commands needed to install the formula.
"""
opts = brewer.get('options', [])
formula = brewer['formula']
if isinstance(opts, types.StringTypes):
opts = [opts]
return [' '.join(['brew', 'install', formula] + opts)]
def bash_install_instructions(script):
"""Shell commands to install something.
Parameters
----------
script : str or iterable
Shell commands as a string or a list of strings.
Returns
-------
list
Shell commands.
"""
if isinstance(script, types.StringTypes):
return [script]
else:
return script
def render_output_block(output, indent=4):
"""Format a block of output text.
Parameters
----------
output : str
Text to format.
indent : int, optional
Number of spaces to indent text.
Returns
-------
str
Formatted text.
"""
lines = output.split(os.linesep)
return os.linesep.join([' ' * indent + line for line in lines])
def execute_build(instructions, prefix='/usr/local'):
"""Build an API from a description.
Parameters
----------
build : dict
Build description.
"""
import pexpect
os.environ['CSDMS_PREFIX'] = prefix
child = pexpect.spawn('bash', echo=False)
prompt = r"\[bmi-babel\]\$ "
child.sendline(r"PS1='[bmi-babel]\$ '")
child.expect([pexpect.TIMEOUT, prompt], timeout=10)
for instruction in instructions:
print('==> %s' % instruction, file=sys.stderr)
child.sendline(instruction + ' || echo FAIL')
i = child.expect([pexpect.TIMEOUT, prompt, 'FAIL'], timeout=300)
print(render_output_block(child.before), file=sys.stderr)
if i == 0 or i == 2:
break
def babel_config(var):
return subprocess.check_output(['babel-config', '--query-var={var}'.format(var=var)]).strip()
def setup_build_env(prefix):
java_home = babel_config('JAVAPREFIX')
build_env = {
'PATH': os.pathsep.join(
[os.path.join(sys.prefix, 'bin'), '/usr/bin', '/bin',
'/usr/sbin', '/etc', '/usr/lib']),
'CC': babel_config('CC'),
'CXX': babel_config('CXX'),
'USER': 'nobody',
'PKG_CONFIG_PATH': os.path.join(sys.prefix, 'lib', 'pkgconfig'),
'PREFIX': prefix,
}
if not java_home:
warnings.warn('JAVA_HOME is not set.')
else:
build_env['PATH'] = os.pathsep.join([
os.path.join(java_home, 'bin'),
build_env['PATH']])
return build_env
def babelize(path_to_bmi, prefix=None, build=True, install=True):
"""Babelize a BMI component.
Parameters
----------
path_to_bmi : str
Path to BMI metadata folder.
prefix : str, optional
Path to installation base.
build : bool, optional
Create a bocca project and then build it.
install : bool, optional
Create, and build a bocca project and then install it.
"""
prefix = prefix or os.path.join(os.sep, 'usr', 'local')
build = build or install
proj = load_bmi_components(path_to_bmi, install_prefix=prefix,
build_api=False)
with homebrew_hidden():
with setenv(setup_build_env(prefix)):
try:
build_dir = make_project(proj, clobber=True)
except ProjectExistsError as error:
print('The specified project (%s) already exists. Exiting.' % error)
return None
if build:
build_project(build_dir, prefix=prefix, install=install)
if install:
proj = load_bmi_components(path_to_bmi, install_prefix=prefix,
build_api=False)
for bmi in proj['bmi']:
install_data_files(bmi['path'], prefix,
include_metadata=True)
def execute_api_build(dir='.', prefix='/usr/local'):
"""Build an API from a file.
Parameters
----------
dir : str, optional
Path to folder that contains description file.
"""
execute_build(build.load_script(dir=dir), prefix=prefix)
|
Python
| 0
|
@@ -3450,16 +3450,58 @@
'bin'),
+ # '/usr/local/gfortran/bin',%0A
'/usr/b
@@ -4506,16 +4506,201 @@
nstall%0A%0A
+ with homebrew_hidden():%0A with setenv(setup_build_env(prefix)):%0A for k, v in os.environ.items():%0A print('%7Bkey%7D=%7Bval%7D'.format(key=k, val=v))%0A%0A
proj
@@ -4781,32 +4781,40 @@
+
build_api=False)
@@ -4818,82 +4818,8 @@
se)%0A
-%0A with homebrew_hidden():%0A with setenv(setup_build_env(prefix)):
%0A
|
85761d00814d1835ace72adb13a43b07b1f5536d
|
Fix issue #18, don't follow symlinks by default
|
botbot/checker.py
|
botbot/checker.py
|
"""Base class for checking file trees"""
import stat
import os
import time
from botbot import problist as pl
class Checker:
"""
Holds a set of checks that can be run on a file to make sure that
it's suitable for the shared directory. Runs checks recursively on a
given path.
"""
# checks is a set of all the checking functions this checker knows of. All
# checkers return a number signifying a specific problem with the
# file specified in the path.
def __init__(self):
self.checks = set() # All checks to perform
self.probs = pl.ProblemList() # List of files with their issues
self.info = {
'files': 0,
'problems': 0,
'time': 0
} # Information about the previous check
def register(self, func):
"""
Add a new checking function to the set, or a list/tuple of
functions.
"""
if hasattr(func, '__call__'):
self.checks.add(func)
else:
for f in list(func):
self.checks.add(f)
def check_tree(self, path):
"""
Run all the checks on every file in the specified path,
recursively. Returns a list of tuples. Each tuple contains 2
elements: the first is the path of the file, and the second is
a list of issues with the file at that path. If link is True,
follow symlinks.
"""
path = os.path.abspath(path)
start = path # Currently unused, could be used to judge depth
to_check = [path]
extime = time.time()
while len(to_check) > 0:
chk_path = to_check.pop()
try:
if stat.S_ISDIR(os.stat(chk_path).st_mode):
new = [os.path.join(chk_path, f) for f in os.listdir(chk_path)]
to_check.extend(new)
else:
self.check_file(chk_path)
except FileNotFoundError:
self.probs.add_problem(chk_path, 'PROB_BROKEN_LINK')
except PermissionError:
self.probs.add_problem(chk_path, 'PROB_DIR_NOT_WRITABLE')
self.info['time'] = time.time() - extime
def check_file(self, chk_path):
"""Check a file against all checkers"""
for check in self.checks:
prob = check(chk_path)
if prob is not None:
self.probs.add_problem(chk_path, prob)
self.info['problems'] += 1
self.info['files'] += 1
def pretty_print_issues(self, verbose):
"""
Print a list of issues with their fixes. Only print issues which
are in problist, unless verbose is true, in which case print
all messages.
TODO: Move into ReportWriter
"""
# Print general statistics
infostring = "Found {problems} problems over {files} files in {time:.2f} seconds."
print(infostring.format(**self.info))
def is_link(path):
"""Check if the given path is a symbolic link"""
return os.path.islink(path) or os.path.abspath(path) != os.path.realpath(path)
|
Python
| 0
|
@@ -1093,24 +1093,36 @@
e(self, path
+, link=False
):%0A %22
@@ -1705,16 +1705,98 @@
if
+not link and is_link(chk_path):%0A continue%0A elif
stat.S_I
@@ -2865,17 +2865,16 @@
tWriter%0A
-%0A
|
48e09e446943b695cc7208bc2a7cad7e53437957
|
Bump to 0.1.1 since I apparently pushed 0.1.0 at some point =/
|
botox/__init__.py
|
botox/__init__.py
|
__version__ = "0.1.0"
|
Python
| 0
|
@@ -12,11 +12,11 @@
= %220.1.
-0
+1
%22%0A
|
b56eb07e06c41dd46d7adaeb0a9b9863c3e165c6
|
Fix mono
|
executors/mono_executor.py
|
executors/mono_executor.py
|
import sys
import os
import re
import errno
from collections import defaultdict
from cptbox import CHROOTSecurity, ALLOW
from cptbox.syscalls import *
from .base_executor import CompiledExecutor
from judgeenv import env
CS_FS = ['.*\.so', '/proc/(?:self/|xen)', '/dev/shm/', '/proc/stat', '/usr/lib/mono',
'/etc/nsswitch.conf$', '/etc/passwd$', '/etc/mono/', '/dev/null$', '.*/.mono/',
'/sys/']
WRITE_FS = ['/proc/self/task/\d+/comm$', '/dev/shm/mono\.\d+$']
UNLINK_FS = re.compile('/dev/shm/mono.\d+$')
class MonoExecutor(CompiledExecutor):
name = 'MONO'
nproc = -1 # If you use Mono on Windows you are doing it wrong.
def get_compiled_file(self):
return self._file('%s.exe' % self.problem)
def get_cmdline(self):
return ['mono', self._executable]
def get_executable(self):
return env['runtime']['mono']
def get_security(self):
fs = CS_FS + [self._dir]
sec = CHROOTSecurity(fs)
sec[sys_sched_getaffinity] = ALLOW
sec[sys_statfs] = ALLOW
sec[sys_ftruncate64] = ALLOW
sec[sys_clock_getres] = ALLOW
sec[sys_socketcall] = ALLOW
sec[sys_sched_yield] = ALLOW
fs = sec.fs_jail
write_fs = re.compile('|'.join(WRITE_FS))
writable = defaultdict(bool)
writable[1] = writable[2] = True
def handle_open(debugger):
file = debugger.readstr(debugger.uarg0)
if fs.match(file) is None:
print>>sys.stderr, 'Not allowed to access:', file
return False
can = write_fs.match(file) is not None
def update():
writable[debugger.result] = can
debugger.on_return(update)
return True
def handle_close(debugger):
writable[debugger.arg0] = False
return True
def handle_dup(debugger):
writable[debugger.arg1] = writable[debugger.arg0]
return True
def handle_write(debugger):
return writable[debugger.arg0]
def handle_ftruncate(debugger):
return writable[debugger.arg0]
def handle_kill(debugger):
# Mono likes to signal other instances of it, but doesn't care if it fails.
def kill_return():
debugger.result = -errno.EPERM
if debugger.arg0 != debugger.pid:
debugger.syscall = debugger.getpid_syscall
debugger.on_return(kill_return)
return True
def unlink(debugger):
path = debugger.readstr(debugger.uarg0)
if UNLINK_FS.match(path) is None:
print 'Not allowed to unlink:', UNLINK_FS
return False
return True
def handle_socket(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
sec[sys_open] = handle_open
sec[sys_close] = handle_close
sec[sys_dup2] = handle_dup
sec[sys_dup3] = handle_dup
sec[sys_write] = handle_write
sec[sys_ftruncate] = handle_ftruncate
sec[sys_kill] = handle_kill
sec[sys_tgkill] = handle_kill
sec[sys_unlink] = unlink
sec[sys_socket] = handle_socket
return sec
@classmethod
def initialize(cls):
if 'mono' not in env['runtime'] or not os.path.isfile(env['runtime']['mono']):
return False
return super(MonoExecutor, cls).initialize()
|
Python
| 0.000018
|
@@ -3440,16 +3440,30 @@
lize(cls
+, sandbox=True
):%0A
@@ -3620,10 +3620,25 @@
tialize(
+sandbox=sandbox
)%0A
|
7b46291dcfd4dd77fd4b41f22d1a382dac722c4a
|
allow store_temp_file to take a file-like object for chunked copy
|
lib/cuckoo/common/utils.py
|
lib/cuckoo/common/utils.py
|
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import ntpath
import string
import tempfile
import xmlrpclib
from datetime import datetime
from lib.cuckoo.common.exceptions import CuckooOperationalError
def create_folders(root=".", folders=[]):
"""Create directories.
@param root: root path.
@param folders: folders list to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
for folder in folders:
if os.path.exists(os.path.join(root, folder)):
continue
else:
create_folder(root, folder)
def create_folder(root=".", folder=None):
"""Create directory.
@param root: root path.
@param folder: folder name to be created.
@raise CuckooOperationalError: if fails to create folder.
"""
if not os.path.exists(os.path.join(root, folder)) and folder:
try:
folder_path = os.path.join(root, folder)
os.makedirs(folder_path)
except OSError as e:
raise CuckooOperationalError("Unable to create folder: %s"
% folder_path)
def convert_char(c):
"""Escapes characters.
@param c: dirty char.
@return: sanitized char.
"""
if c in string.printable:
return c
else:
return r'\x%02x' % ord(c)
def convert_to_printable(s):
"""Convert char to printable.
@param s: string.
@return: sanitized string.
"""
return ''.join(convert_char(c) for c in s)
def datetime_to_iso(timestamp):
"""Parse a datatime string and returns a datetime in iso format.
@param timestamp: timestamp string
@return: ISO datetime
"""
return datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S').isoformat()
def get_filename_from_path(path):
"""Cross-platform filename extraction from path.
@param path: file path.
@return: filename.
"""
dirpath, filename = ntpath.split(path)
return filename if filename else ntpath.basename(dirpath)
def store_temp_file(filedata, filename):
"""Store a temporary file.
@param filedata: content of the original file.
@param filename: name of the original file.
@return: path to the temporary file.
"""
filename = get_filename_from_path(filename)
tmppath = tempfile.gettempdir()
targetpath = os.path.join(tmppath, "cuckoo-tmp")
if not os.path.exists(targetpath):
os.mkdir(targetpath)
tmp_dir = tempfile.mkdtemp(prefix="upload_", dir=targetpath)
tmp_file_path = os.path.join(tmp_dir, filename)
tmp_file = open(tmp_file_path, "wb")
tmp_file.write(filedata)
tmp_file.close()
return tmp_file_path
# xmlrpc + timeout - still a bit ugly - but at least gets rid of setdefaulttimeout
# inspired by
# http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
# (although their stuff was messy, this is cleaner)
class TimeoutServer(xmlrpclib.ServerProxy):
def __init__(self, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
kwargs['transport'] = TimeoutTransport(timeout=timeout)
xmlrpclib.ServerProxy.__init__(self, *args, **kwargs)
def _set_timeout(self, timeout):
t = self._ServerProxy__transport
t.timeout = timeout
# if we still have a socket we need to update that as well
if hasattr(t, '_connection') and t._connection[1] and t._connection[1].sock:
t._connection[1].sock.settimeout(timeout)
class TimeoutTransport(xmlrpclib.Transport):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop('timeout', None)
xmlrpclib.Transport.__init__(self, *args, **kwargs)
def make_connection(self, *args, **kwargs):
conn = xmlrpclib.Transport.make_connection(self, *args, **kwargs)
if self.timeout != None: conn.timeout = self.timeout
return conn
# http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
Python
| 0
|
@@ -2709,24 +2709,258 @@
, %22wb%22)%0A
+%0A # if filedata is file object, do chunked copy%0A if hasattr(filedata, 'read'):%0A chunk = filedata.read(1024)%0A while chunk:%0A tmp_file.write(chunk)%0A chunk = filedata.read(1024)%0A else:%0A
tmp_file.wri
@@ -2972,16 +2972,17 @@
ledata)%0A
+%0A
tmp_
|
68b1b9d824da9225b8b568348a56d5770195d8f8
|
Fix method with classmethod
|
openassessment/xblock/openassesment_template_mixin.py
|
openassessment/xblock/openassesment_template_mixin.py
|
class OpenAssessmentTemplatesMixin(object):
"""
This helps to get templates for different type of assessment that is
offered.
"""
@classmethod
def templates(cls):
"""
Returns a list of dictionary field: value objects that describe possible templates.
VALID_ASSESSMENT_TYPES needs to be declared as a class variable to use it.
"""
templates = []
for assesment_type in cls.VALID_ASSESSMENT_TYPES:
template_id = assesment_type
display_name = cls.VALID_ASSESSMENT_TYPES_DISPLAY_NAMES.get(
assesment_type)
template = cls._create_template_dict(template_id, display_name)
templates.append(template)
return templates
def _create_template_dict(cls, template_id, display_name):
"""
Returns a template dictionary which can be used with Studio API
"""
return {
"template_id": template_id,
"metadata": {
"display_name": display_name,
}
}
|
Python
| 0
|
@@ -749,16 +749,33 @@
plates%0A%0A
+ @classmethod%0A
def
|
c966f50fb63875fed88cf00a6e793796c315577e
|
Change default Balancer.{x,y,z} to True
|
hoomd/tuner/balance.py
|
hoomd/tuner/balance.py
|
from hoomd.operation import _Tuner
from hoomd.parameterdicts import ParameterDict
from hoomd.typeconverter import OnlyType
from hoomd.util import trigger_preprocessing
from hoomd.trigger import Trigger
from hoomd import _hoomd
class LoadBalancer(_Tuner):
R""" Adjusts the boundaries of a domain decomposition on a regular 3D grid.
Args:
x (bool): If True, balance in x dimension.
y (bool): If True, balance in y dimension.
z (bool): If True, balance in z dimension.
tolerance (float): Load imbalance tolerance (if <= 1.0, balance every
step).
maxiter (int): Maximum number of iterations to attempt in a single step.
period (int): Balancing will be attempted every \a period time steps
phase (int): When -1, start on the current time step. When >= 0, execute
on steps where *(step + phase) % period == 0*.
Every *period* steps, the boundaries of the processor domains are adjusted
to distribute the particle load close to evenly between them. The load
imbalance is defined as the number of particles owned by a rank divided by
the average number of particles per rank if the particles had a uniform
distribution:
.. math::
I = \frac{N(i)}{N / P}
where :math:` N(i) ` is the number of particles on processor :math:`i`,
:math:`N` is the total number of particles, and :math:`P` is the number of
ranks.
In order to adjust the load imbalance, the sizes are rescaled by the inverse
of the imbalance factor. To reduce oscillations and communication overhead,
a domain cannot move more than 5% of its current size in a single
rebalancing step, and the edge of a domain cannot move more than half the
distance to its neighbors.
Simulations with interfaces (so that there is a particle density gradient)
or clustering should benefit from load balancing. The potential speedup is
roughly :math:`I-1.0`, so that if the largest imbalance is 1.4, then the
user can expect a roughly 40% speedup in the simulation. This is of course
an estimate that assumes that all algorithms are roughly linear in
:math:`N`, all GPUs are fully occupied, and the simulation is limited by the
speed of the slowest processor. It also assumes that all particles roughly
equal. If you have a simulation where, for example, some particles have
significantly more pair force neighbors than others, this estimate of the
load imbalance may not produce the optimal results.
A load balancing adjustment is only performed when the maximum load
imbalance exceeds a *tolerance*. The ideal load balance is 1.0, so setting
*tolerance* less than 1.0 will force an adjustment every *period*. The load
balancer can attempt multiple iterations of balancing every *period*, and up
to *maxiter* attempts can be made. The optimal values of *period* and
*maxiter* will depend on your simulation.
Load balancing can be performed independently and sequentially for each
dimension of the simulation box. A small performance increase may be
obtained by disabling load balancing along dimensions that are known to be
homogeneous. For example, if there is a planar vapor-liquid interface
normal to the :math:`z` axis, then it may be advantageous to disable
balancing along :math:`x` and :math:`y`.
In systems that are well-behaved, there is minimal overhead of balancing
with a small *period*. However, if the system is not capable of being
balanced (for example, due to the density distribution or minimum domain
size), having a small *period* and high *maxiter* may lead to a large
performance loss. In such systems, it is currently best to either balance
infrequently or to balance once in a short test run and then set the
decomposition statically in a separate initialization.
Balancing is ignored if there is no domain decomposition available (MPI is
not built or is running on a single rank).
"""
def __init__(self, trigger, x=False, y=False, z=False, tolerance=1.02,
max_iterations=1):
defaults = dict(x=x, y=y, z=z, tolerance=tolerance,
max_iterations=max_iterations, trigger=trigger)
self._param_dict = ParameterDict(
x=bool, y=bool, z=bool, max_iterations=int, tolerance=float,
trigger=OnlyType(Trigger, preprocess=trigger_preprocessing))
self._param_dict.update(defaults)
def attach(self, simulation):
if simulation.device.mode == 'gpu':
cpp_cls = getattr(_hoomd, 'LoadBalancerGPU')
else:
cpp_cls = getattr(_hoomd, 'LoadBalancer')
self._cpp_obj = cpp_cls(simulation.state._cpp_sys_def, self.trigger)
super().attach(simulation)
|
Python
| 0.999991
|
@@ -4070,30 +4070,27 @@
, x=
-Fals
+Tru
e, y=
-Fals
+Tru
e, z=
-Fals
+Tru
e, t
|
f3be05b95d7692d0891ee80d3d9c8fb87810dc5c
|
update unit test
|
every_election/apps/api/tests/test_api_election_endpoint.py
|
every_election/apps/api/tests/test_api_election_endpoint.py
|
import json
from datetime import datetime, timedelta
import vcr
from rest_framework.test import APITestCase
from elections.tests.factories import ElectionFactory
from organisations.tests.factories import (
OrganisationFactory, OrganisationDivisionFactory)
class TestElectionAPIQueries(APITestCase):
lat = 51.5010089365
lon = -0.141587600123
fixtures = ['onspd.json']
def test_election_endpoint(self):
id = ElectionFactory(group=None).election_id
resp = self.client.get("/api/elections/")
data = resp.json()
assert len(data['results']) == 1
assert data['results'][0]['election_id'] == id
def test_election_endpoint_current(self):
id_current = ElectionFactory(
group=None, poll_open_date=datetime.today()).election_id
id_future = ElectionFactory( # noqa
group=None,
poll_open_date=datetime.today() - timedelta(days=60)).election_id
resp = self.client.get("/api/elections/?current")
data = resp.json()
assert len(data['results']) == 1
assert data['results'][0]['election_id'] == id_current
assert data['results'][0]['current'] == True
def test_election_endpoint_future(self):
ElectionFactory(
group=None,
poll_open_date=datetime.today(),
election_id="local.place-name-future-election.2017-03-23")
ElectionFactory(
group=None, poll_open_date=datetime.today() - timedelta(days=1))
resp = self.client.get("/api/elections/?future")
data = resp.json()
assert len(data['results']) == 1
assert data['results'][0]['election_id'] == \
"local.place-name-future-election.2017-03-23"
def test_election_endpoint_for_postcode(self):
election_id = "local.place-name.2017-03-23"
ElectionFactory(group=None, election_id=election_id)
ElectionFactory(group=None, geography=None)
resp = self.client.get("/api/elections/?postcode=SW1A1AA")
data = resp.json()
assert len(data['results']) == 1
assert data['results'][0]['election_id'] == election_id
def test_election_endpoint_for_postcode_jsonp(self):
election_id = "local.place-name.2017-03-23"
ElectionFactory(group=None, election_id=election_id)
ElectionFactory(group=None, geography=None)
url = "/api/elections/?postcode=SW1A1AA" + \
"&format=jsonp&callback=a_callback_string"
resp = self.client.get(url)
assert resp.content.decode('utf8').startswith("a_callback_string(")
@vcr.use_cassette(
'fixtures/vcr_cassettes/test_election_for_bad_postcode.yaml')
def test_election_endpoint_for_bad_postcode(self):
election_id = "local.place-name.2017-03-23"
ElectionFactory(group=None, election_id=election_id)
ElectionFactory(group=None, geography=None)
resp = self.client.get("/api/elections/?postcode=SW1A1AX")
data = resp.json()
assert data['detail'] == "Invalid postcode"
def test_election_endpoint_for_lat_lng(self):
election_id = "local.place-name.2017-03-23"
ElectionFactory(group=None, election_id=election_id)
ElectionFactory(group=None, geography=None)
resp = self.client.get(
"/api/elections/?coords=51.5010089365,-0.141587600123")
data = resp.json()
assert data['results'][0]['election_id'] == election_id
assert len(data['results']) == 1
def test_all_expected_fields_returned(self):
org = OrganisationFactory()
org_div = OrganisationDivisionFactory(
organisation=org, territory_code="ENG")
ElectionFactory(group=None, organisation=org, division=org_div)
resp = self.client.get("/api/elections/")
assert resp.json() == json.loads("""
{
"next": null,
"previous": null,
"results": [
{
"group_type": null,
"current": false,
"poll_open_date": "2017-03-23",
"election_id": "local.place-name-0.2017-03-23",
"group": null,
"division": {
"name": "Division 0",
"slug": "0",
"geography_curie": "test:0",
"divisionset": {
"start_date": "2017-05-04",
"legislation_url": "https://example.com/the-law",
"short_title": "Made up boundary changes",
"notes": "This is just for testing.",
"end_date": "2025-05-03",
"consultation_url": "https://example.com/consultation",
"mapit_generation_id": ""
},
"mapit_generation_high": null,
"seats_total": null,
"division_election_sub_type": "",
"division_subtype": "",
"mapit_generation_low": null,
"division_type": "test",
"official_identifier": "0",
"territory_code": "ENG"
},
"election_type": {
"name": "Local elections",
"election_type": "local"
},
"explanation": null,
"voting_system": {
"slug": "",
"name": "",
"uses_party_lists": false
},
"children": [],
"election_subtype": null,
"organisation": {
"slug": "org-0",
"territory_code": "ENG",
"organisation_subtype": "",
"common_name": "Organisation 0",
"official_name": "The Organisation 0 Council",
"organisation_type": "local-authority",
"election_name": "",
"official_identifier": "0",
"gss": "E000000"
},
"election_title": "Election 0",
"elected_role": "Councillor",
"seats_contested": 1,
"tmp_election_id": null
}
],
"count": 1
}
""")
|
Python
| 0
|
@@ -6531,16 +6531,54 @@
ion_id%22:
+ null,%0A %22metadata%22:
null%0A
@@ -6654,11 +6654,8 @@
%22%22%22)%0A
-%0A%0A%0A
|
fa0b16b46fe014be9009bc595bee719cc1fdcc31
|
don't divide by zero
|
apps/amo/management/commands/clean_redis.py
|
apps/amo/management/commands/clean_redis.py
|
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. argv[0] is a dummy argument, the rest get passed
# like a normal command line.
os.execl(sys.executable, 'argv[0]', sys.argv[0], sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1)
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
|
Python
| 0.999407
|
@@ -2225,16 +2225,35 @@
100, 1)
+ if total%5B0%5D else 0
%0A
|
69582dd80518ccc29fc8de9cf5bff54caf62468b
|
Truncate to exact length
|
src/sentry/utils/strings.py
|
src/sentry/utils/strings.py
|
"""
sentry.utils.strings
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import base64
import zlib
def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value
def compress(value):
return base64.b64encode(zlib.compress(value))
def decompress(value):
return zlib.decompress(base64.b64decode(value))
def gunzip(value):
return zlib.decompress(value, 16 + zlib.MAX_WBITS)
|
Python
| 0.000002
|
@@ -529,16 +529,20 @@
%5B:length
+ - 3
%5D + '...
|
81543597b6ac33d46615cca4f66683a0e03e384b
|
Set username better
|
fabric/testing/fixtures.py
|
fabric/testing/fixtures.py
|
"""
`pytest <https://pytest.org>`_ fixtures for easy use of Fabric test helpers.
To get Fabric plus this module's dependencies (as well as those of the main
`fabric.testing.base` module which these fixtures wrap), ``pip install
fabric[pytest]``.
The simplest way to get these fixtures loaded into your test suite so Pytest
notices them is to import them into a ``conftest.py`` (`docs
<http://pytest.readthedocs.io/en/latest/fixture.html#conftest-py-sharing-fixture-functions>`_).
For example, if you intend to use the `remote` and `client` fixtures::
from fabric.testing.fixtures import client, remote
.. versionadded:: 2.1
"""
try:
from pytest import fixture
from mock import patch, Mock
except ImportError:
import warnings
warning = (
"You appear to be missing some optional test-related dependencies;"
"please 'pip install fabric[pytest]'."
)
warnings.warn(warning, ImportWarning)
raise
from .. import Connection
from ..transfer import Transfer
# TODO: if we find a lot of people somehow ending up _with_ pytest but
# _without_ mock and other deps from testing.base, consider doing the
# try/except here too. But, really?
from .base import MockRemote, MockSFTP
@fixture
def connection():
"""
Yields a `.Connection` object with mocked methods.
Specifically, the primary API members (`.Connection.run`,
`.Connection.local`, etc) are replaced with ``mock.Mock`` instances.
Furthermore, ``run.in_stream`` is set to ``False`` to avoid attempts to
read from stdin (which typically plays poorly with pytest and other
capturing test runners).
.. versionadded:: 2.1
"""
c = Connection("host")
c.config.run.in_stream = False
c.run = Mock()
c.local = Mock()
# TODO: rest of API should get mocked too
# TODO: is there a nice way to mesh with MockRemote et al? Is that ever
# really that useful for code that just wants to assert about how run() and
# friends were called?
yield c
#: A convenience rebinding of `connection`.
#:
#: .. versionadded:: 2.1
cxn = connection
@fixture
def remote():
"""
Fixture allowing setup of a mocked remote session & access to sub-mocks.
Yields a `.MockRemote` object (which may need to be updated via
`.MockRemote.expect`, `.MockRemote.expect_sessions`, etc; otherwise a
default session will be used) & calls `.MockRemote.stop` on teardown.
.. versionadded:: 2.1
"""
remote = MockRemote()
yield remote
remote.stop()
@fixture
def sftp():
"""
Fixture allowing setup of a mocked remote SFTP session.
Yields a 3-tuple of: Transfer() object, SFTPClient object, and mocked OS
module.
For many/most tests which only want the Transfer and/or SFTPClient objects,
see `sftp_objs` and `transfer` which wrap this fixture.
.. versionadded:: 2.1
"""
mock = MockSFTP(autostart=False)
client, mock_os = mock.start()
transfer = Transfer(Connection("host"))
yield transfer, client, mock_os
# TODO: old mock_sftp() lacked any 'stop'...why? feels bad man
@fixture
def sftp_objs(sftp):
"""
Wrapper for `sftp` which only yields the Transfer and SFTPClient.
.. versionadded:: 2.1
"""
yield sftp[:2]
@fixture
def transfer(sftp):
"""
Wrapper for `sftp` which only yields the Transfer object.
.. versionadded:: 2.1
"""
yield sftp[0]
@fixture
def client():
"""
Mocks `~paramiko.client.SSHClient` for testing calls to ``connect()``.
Yields a mocked ``SSHClient`` instance.
This fixture updates `~paramiko.client.SSHClient.get_transport` to return a
mock that appears active on first check, then inactive after, matching most
tests' needs by default:
- `.Connection` instantiates, with a None ``.transport``.
- Calls to ``.open()`` test ``.is_connected``, which returns ``False`` when
``.transport`` is falsey, and so the first open will call
``SSHClient.connect`` regardless.
- ``.open()`` then sets ``.transport`` to ``SSHClient.get_transport()``, so
``Connection.transport`` is effectively
``client.get_transport.return_value``.
- Subsequent activity will want to think the mocked SSHClient is
"connected", meaning we want the mocked transport's ``.active`` to be
``True``.
- This includes `.Connection.close`, which short-circuits if
``.is_connected``; having a statically ``True`` active flag means a full
open -> close cycle will run without error. (Only tests that double-close
or double-open should have issues here.)
End result is that:
- ``.is_connected`` behaves False after instantiation and before ``.open``,
then True after ``.open``
- ``.close`` will work normally on 1st call
- ``.close`` will behave "incorrectly" on subsequent calls (since it'll
think connection is still live.) Tests that check the idempotency of
``.close`` will need to tweak their mock mid-test.
For 'full' fake remote session interaction (i.e. stdout/err
reading/writing, channel opens, etc) see `remote`.
.. versionadded:: 2.1
"""
with patch("fabric.connection.SSHClient") as SSHClient:
client = SSHClient.return_value
client.get_transport.return_value = Mock(active=True)
yield client
|
Python
| 0.000013
|
@@ -1323,17 +1323,96 @@
ifically
-,
+:%0A%0A - the hostname is set to %60%60%22host%22%60%60 and the username to %60%60%22user%22%60%60;%0A -
the pri
@@ -1447,20 +1447,16 @@
on.run%60,
-%0A
%60.Conne
@@ -1477,16 +1477,22 @@
etc) are
+%0A
replace
@@ -1525,27 +1525,19 @@
nces
-.%0A
+;
%0A
-Furthermore,
+- the
%60%60r
@@ -1550,16 +1550,30 @@
stream%60%60
+ config option
is set
@@ -1606,15 +1606,17 @@
mpts
- to%0A
+%0A
+to
rea
@@ -1684,16 +1684,18 @@
her%0A
+
capturin
@@ -1709,17 +1709,17 @@
runners)
-.
+;
%0A%0A ..
@@ -1765,21 +1765,39 @@
nection(
-%22
host
+=%22host%22, user=%22user
%22)%0A c
|
a8fb92840ff487c61564175efbf637fec538b480
|
Add signup view to fix error
|
features/gestalten/urls.py
|
features/gestalten/urls.py
|
from allauth.socialaccount import views as socialaccount_views
from allauth.socialaccount.providers.facebook import views as facebook_views
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^stadt/gestalten/$',
views.List.as_view(),
name='gestalten'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/$',
views.Update.as_view(),
name='gestalt-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/avatar/$',
views.UpdateAvatar.as_view(),
name='gestalt-avatar-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/background/$',
views.UpdateBackground.as_view(),
name='gestalt-background-update'),
url(
r'^stadt/login/$',
views.Login.as_view(),
name='login'),
url(r'^stadt/login/cancelled/$',
socialaccount_views.login_cancelled,
name='socialaccount_login_cancelled'),
url(r'^stadt/login/error/$',
socialaccount_views.login_error,
name='socialaccount_login_error'),
url(r'^stadt/login/facebook/$',
facebook_views.oauth2_login,
name='facebook_login'),
url(r'^stadt/login/facebook/callback/$',
facebook_views.oauth2_callback,
name='facebook_callback'),
url(r'^stadt/login/facebook/token/$',
facebook_views.login_by_token,
name='facebook_login_by_token'),
]
|
Python
| 0
|
@@ -1064,24 +1064,133 @@
n_error'),%0A%0A
+ url(r'%5Estadt/login/signup/$',%0A socialaccount_views.signup,%0A name='socialaccount_signup'),%0A%0A
url(r'%5Es
|
1d52996a88eb5aed643fe61ee959bd88373401b3
|
Throw a linebreak in there upon completion
|
filebutler_upload/utils.py
|
filebutler_upload/utils.py
|
from datetime import datetime, timedelta
import sys
class ProgressBar(object):
def __init__(self, filename, fmt):
self.filename = filename
self.fmt = fmt
self.progress = 0
self.total = 0
self.time_started = datetime.now()
self.time_updated = self.time_started
def __call__(self, current, total):
self.progress = current
self.total = total
if datetime.now() - self.time_updated > timedelta(seconds=0.5):
output = self.fmt.format(
filename=self.filename,
percent=self.get_percent(),
speed=self.get_mbps()
)
sys.stdout.write('\r' + output)
sys.stdout.flush()
self.time_updated = datetime.now()
def get_percent(self):
return self.progress / float(self.total)
def get_mbps(self):
time_delta = datetime.now() - self.time_started
if not time_delta.seconds:
return 0
return self.progress * 8 / float(time_delta.seconds) / 1000 / 1000
|
Python
| 0.000003
|
@@ -406,16 +406,56 @@
= total
+%0A final_update = current == total
%0A%0A
@@ -518,16 +518,32 @@
nds=0.5)
+ or final_update
:%0A
@@ -755,16 +755,86 @@
output)%0A
+%0A if final_update:%0A sys.stdout.write('%5Cn')%0A%0A
|
fba588f2df41a698adcf96382856c1fa80c191a7
|
change genexp back to list comprehension in multinomial_coefficients
|
sympycore/arithmetic/number_theory.py
|
sympycore/arithmetic/number_theory.py
|
"""Provides algorithms from number theory.
"""
from .numbers import FractionTuple, normalized_fraction, Complex, Float, div
__all__ = ['gcd', 'lcm', 'factorial',
'integer_digits', 'real_digits',
'multinomial_coefficients']
__docformat__ = "restructuredtext en"
def factorial(n, memo=[1, 1]):
"""Return n factorial (for integers n >= 0 only)."""
if n < 0:
raise ValueError
k = len(memo)
if n < k:
return memo[n]
p = memo[-1]
while k <= n:
p *= k
k += 1
if k < 100:
memo.append(p)
return p
def gcd(*args):
"""Calculate the greatest common divisor (GCD) of the arguments."""
L = len(args)
if L == 0: return 0
if L == 1: return args[0]
if L == 2:
a, b = args
while b:
a, b = b, a % b
return a
return gcd(gcd(args[0], args[1]), *args[2:])
def lcm(*args):
"""Calculate the least common multiple (LCM) of the arguments."""
L = len(args)
if L == 0: return 0
if L == 1: return args[0]
if L == 2: return div(args[0]*args[1], gcd(*args))
return lcm(lcm(args[0], args[1]), *args[2:])
# TODO: this could use the faster implementation in mpmath
def integer_digits(n, base=10):
"""Return a list of the digits of abs(n) in the given base."""
assert base > 1
assert isinstance(n, (int, long))
n = abs(n)
if not n:
return [0]
L = []
while n:
n, digit = divmod(n, base)
L.append(int(digit))
return L[::-1]
# TODO: this could (also?) be implemented as an endless generator
def real_digits(x, base=10, truncation=10):
"""Return ``(L, d)`` where L is a list of digits of ``abs(x)`` in
the given base and ``d`` is the (signed) distance from the leading
digit to the radix point.
For example, 1234.56 becomes ``([1, 2, 3, 4, 5, 6], 4)`` and 0.001
becomes ``([1], -2)``. If, during the generation of fractional
digits, the length reaches `truncation` digits, the iteration is
stopped."""
assert base > 1
assert isinstance(x, (int, long, FractionTuple))
if x == 0:
return ([0], 1)
x = abs(x)
exponent = 0
while x < 1:
x *= base
exponent -= 1
integer, fraction = divmod(x, 1)
L = integer_digits(integer, base)
exponent += len(L)
if fraction:
p, q = fraction
for i in xrange(truncation - len(L)):
p = (p % q) * base
if not p:
break
L.append(int(p//q))
return L, exponent
def binomial_coefficients(n):
"""Return a dictionary containing pairs {(k1,k2) : C_kn} where
C_kn are binomial coefficients and n=k1+k2."""
d = {(0, n):1, (n, 0):1}
a = 1
for k in xrange(1, n//2+1):
a = (a * (n-k+1))//k
d[k, n-k] = d[n-k, k] = a
return d
def binomial_coefficients_list(n):
d = [1] * (n+1)
a = 1
for k in xrange(1, n//2+1):
a = (a * (n-k+1))//k
d[k] = d[n-k] = a
return d
def multinomial_coefficients(m, n, _tuple=tuple, _zip=zip):
"""Return a dictionary containing pairs ``{(k1,k2,..,km) : C_kn}``
where ``C_kn`` are multinomial coefficients such that
``n=k1+k2+..+km``.
For example:
>>> print multinomial_coefficients(2,5)
{(3, 2): 10, (1, 4): 5, (2, 3): 10, (5, 0): 1, (0, 5): 1, (4, 1): 5}
The algorithm is based on the following result:
Consider a polynomial and it's ``m``-th exponent::
P(x) = sum_{i=0}^m p_i x^k
P(x)^n = sum_{k=0}^{m n} a(n,k) x^k
The coefficients ``a(n,k)`` can be computed using the
J.C.P. Miller Pure Recurrence [see D.E.Knuth, Seminumerical
Algorithms, The art of Computer Programming v.2, Addison
Wesley, Reading, 1981;]::
a(n,k) = 1/(k p_0) sum_{i=1}^m p_i ((n+1)i-k) a(n,k-i),
where ``a(n,0) = p_0^n``.
"""
if m==2:
return binomial_coefficients(n)
symbols = [(0,)*i + (1,) + (0,)*(m-i-1) for i in range(m)]
s0 = symbols[0]
p0 = [_tuple(aa-bb for aa,bb in _zip(s,s0)) for s in symbols]
r = {_tuple(aa*n for aa in s0):1}
r_get = r.get
r_update = r.update
l = [0] * (n*(m-1)+1)
l[0] = r.items()
for k in xrange(1, n*(m-1)+1):
d = {}
d_get = d.get
for i in xrange(1, min(m,k+1)):
nn = (n+1)*i-k
if not nn:
continue
t = p0[i]
for t2, c2 in l[k-i]:
tt = _tuple(aa+bb for aa,bb in _zip(t2,t))
cc = nn * c2
b = d_get(tt)
if b is None:
d[tt] = cc
else:
cc = b + cc
if cc:
d[tt] = cc
else:
del d[tt]
r1 = [(t, c//k) for (t, c) in d.iteritems()]
l[k] = r1
r_update(r1)
return r
|
Python
| 0
|
@@ -4662,16 +4662,17 @@
_tuple(
+%5B
aa+bb fo
@@ -4692,16 +4692,17 @@
ip(t2,t)
+%5D
)%0D%0A
|
42eae4634f4bab5649298a65889a4b1a3149d586
|
Use new invalidate_many cache invalidation to invalidate the event_push_actions cache appropriately.
|
synapse/storage/event_push_actions.py
|
synapse/storage/event_push_actions.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from twisted.internet import defer
from synapse.util.caches.descriptors import cachedInlineCallbacks
import logging
import ujson as json
logger = logging.getLogger(__name__)
class EventPushActionsStore(SQLBaseStore):
@defer.inlineCallbacks
def set_push_actions_for_event_and_users(self, event, tuples):
"""
:param event: the event set actions for
:param tuples: list of tuples of (user_id, profile_tag, actions)
"""
values = []
for uid, profile_tag, actions in tuples:
values.append({
'room_id': event.room_id,
'event_id': event.event_id,
'user_id': uid,
'profile_tag': profile_tag,
'actions': json.dumps(actions)
})
yield self.runInteraction(
"set_actions_for_event_and_users",
self._simple_insert_many_txn,
"event_push_actions",
values
)
@cachedInlineCallbacks(num_args=3)
def get_unread_event_push_actions_by_room_for_user(
self, room_id, user_id, last_read_event_id
):
def _get_unread_event_push_actions_by_room(txn):
sql = (
"SELECT stream_ordering, topological_ordering"
" FROM events"
" WHERE room_id = ? AND event_id = ?"
)
txn.execute(
sql, (room_id, last_read_event_id)
)
results = txn.fetchall()
if len(results) == 0:
return []
stream_ordering = results[0][0]
topological_ordering = results[0][1]
sql = (
"SELECT ea.event_id, ea.actions"
" FROM event_push_actions ea, events e"
" WHERE ea.room_id = e.room_id"
" AND ea.event_id = e.event_id"
" AND ea.user_id = ?"
" AND ea.room_id = ?"
" AND ("
" e.topological_ordering > ?"
" OR (e.topological_ordering = ? AND e.stream_ordering > ?)"
")"
)
txn.execute(sql, (
user_id, room_id,
topological_ordering, topological_ordering, stream_ordering
)
)
return [
{"event_id": row[0], "actions": json.loads(row[1])}
for row in txn.fetchall()
]
ret = yield self.runInteraction(
"get_unread_event_push_actions_by_room",
_get_unread_event_push_actions_by_room
)
defer.returnValue(ret)
@defer.inlineCallbacks
def remove_push_actions_for_event_id(self, room_id, event_id):
def f(txn):
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id)
)
yield self.runInteraction(
"remove_push_actions_for_event_id",
f
)
|
Python
| 0
|
@@ -1422,35 +1422,98 @@
-yield self.runInteraction(%0A
+def f(txn):%0A for uid, _, __ in tuples:%0A txn.call_after(%0A
@@ -1524,42 +1524,135 @@
-%22
se
-t_actions_for_event_and_users%22,
+lf.get_unread_event_push_actions_by_room_for_user.invalidate_many,%0A (event.room_id, uid)%0A )
%0A
@@ -1656,24 +1656,31 @@
+return
self._simple
@@ -1695,29 +1695,21 @@
many_txn
-,%0A
+(txn,
%22event_
@@ -1722,35 +1722,122 @@
ctions%22,
-%0A values
+ values)%0A%0A yield self.runInteraction(%0A %22set_actions_for_event_and_users%22,%0A f,
%0A
@@ -1877,16 +1877,26 @@
m_args=3
+, lru=True
)%0A de
@@ -3627,32 +3627,264 @@
def f(txn):%0A
+ # Sad that we have to blow away the cache for the whole room here%0A txn.call_after(%0A self.get_unread_event_push_actions_by_room_for_user.invalidate_many,%0A (room_id,)%0A )%0A
txn.
|
07f96a22afe2d010809d03077d9cdd5ecb43d017
|
Update data source unique name migration to support another name of constraint
|
migrations/0020_change_ds_name_to_non_uniqe.py
|
migrations/0020_change_ds_name_to_non_uniqe.py
|
from redash.models import db
from playhouse.migrate import PostgresqlMigrator, migrate
if __name__ == '__main__':
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
# Change the uniqueness constraint on data source name to be (org, name):
db.database.execute_sql("ALTER TABLE data_sources DROP CONSTRAINT unique_name")
migrate(
migrator.add_index('data_sources', ('org_id', 'name'), unique=True)
)
db.close_db(None)
|
Python
| 0
|
@@ -22,16 +22,30 @@
port db%0A
+import peewee%0A
from pla
@@ -300,87 +300,559 @@
-db.database.execute_sql(%22ALTER TABLE data_sources DROP CONSTRAINT unique_name%22)
+success = False%0A for constraint in %5B'unique_name', 'data_sources_name'%5D:%0A try:%0A db.database.execute_sql(%22ALTER TABLE data_sources DROP CONSTRAINT %7B%7D%22.format(constraint))%0A success = True%0A break%0A except peewee.ProgrammingError:%0A db.close_db(None)%0A%0A if not success:%0A print %22Failed removing uniqueness constraint on data source name.%22%0A print %22Please verify its name in the schema, update the migration and run again.%22%0A exit()%0A
%0A
|
bc5621afa044a486ef7514e1654224102b3cfd54
|
Rename chunk list
|
RecordingApp/app/src/scripts/get_chunks.py
|
RecordingApp/app/src/scripts/get_chunks.py
|
""" Script to generate a json file containing book name, number of
chapters, number of chunks """
import json
import urllib.request
import re
RESULT_JSON_NAME = "chunks.json"
with open("catalog.json") as file:
DATA = json.load(file)
OUTPUT = []
#skip obs for now, loop over all books
for x in range(1, 67):
#gives book name and order (the books are stored out of order in the json)
slug = DATA[x]["slug"]
sort = DATA[x]["sort"]
#Get languages.json
url_lang_cat = DATA[x]["lang_catalog"]
response_lang_cat = urllib.request.urlopen(url_lang_cat)
lang_catalog = json.loads(response_lang_cat.read().decode('utf-8'))
name = lang_catalog[0]["project"]["name"]
#Get resources.json
#0 is for udb, are chunks the same for both?
url_res = lang_catalog[0]["res_catalog"]
response_res = urllib.request.urlopen(url_res)
res_cat = json.loads(response_res.read().decode('utf-8'))
#Get the usfm file
url_usfm = res_cat[0]["usfm"]
response_usfm = urllib.request.urlopen(url_usfm)
usfm_data = response_usfm.read().decode('utf-8')
lines = usfm_data.splitlines()
#keep a count of \c and \s5 tags (chapter and chunk respectively)
chapter = 0
num_chunks = 0
chunk_list = []
for line in lines:
chunk_match = re.search(r'\\s5', line)
#add to the number of chunks seen so far
if chunk_match:
num_chunks += 1
#on a new chapter, append the number of chunks tallied and reset the count
chapter_match = re.search(r'\\c', line)
if chapter_match:
chunk_list.append(num_chunks)
num_chunks = 0
chapter += 1
#append the last chapter
chunk_list.append(num_chunks+1)
#Account for the off by one introduced from chunks coming before chapters
chunk_list_fixed = []
length = len(chunk_list)-1
#eliminate chapter "0"
for i in range(length):
chunk_list_fixed.append(chunk_list[i+1])
#create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['sort'] = sort
book['chapters'] = len(chunk_list_fixed)
book['chunks'] = chunk_list_fixed
#add to the list of books
OUTPUT.append(book)
#output all book data to a json file
with open(RESULT_JSON_NAME, 'w') as outfile:
json.dump(OUTPUT, outfile)
|
Python
| 0.000004
|
@@ -1237,24 +1237,30 @@
0%0A ch
-unk_list
+apters_in_book
= %5B%5D%0A
@@ -1595,32 +1595,38 @@
ch
-unk_list
+apters_in_book
.append(num_
@@ -1720,24 +1720,30 @@
r%0A ch
-unk_list
+apters_in_book
.append(
@@ -1879,24 +1879,30 @@
= len(ch
-unk_list
+apters_in_book
)-1%0A
@@ -1986,24 +1986,30 @@
ppend(ch
-unk_list
+apters_in_book
%5Bi+1%5D)%0A%0A
@@ -2257,16 +2257,16 @@
f books%0A
-
OUTP
@@ -2282,16 +2282,72 @@
(book)%0A%0A
+ break # DEBUG -- only process one book for testing%0A%0A
#output
|
d949c21c4b0a54a9a697a07bf12e22a98dc59ff1
|
Add `attach` method so we can wrap apps like WSGI middleware
|
flask_mustache/__init__.py
|
flask_mustache/__init__.py
|
# flask-mustache Flask plugin
import os
from jinja2 import Template
from flask import current_app, Blueprint
__all__ = ('FlaskMustache',)
mustache_app = Blueprint('mustache', __name__, static_folder='static')
class FlaskMustache(object):
"Wrapper to inject Mustache stuff into Flask"
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.register_blueprint(mustache_app)
# set up global `mustache` function
app.jinja_env.globals['mustache'] = mustache
# attach context processor with template content
app.context_processor(mustache_templates)
# context processor
def mustache_templates():
"Returns the content of all Mustache templates in the Jinja environment"
# short circuit development
if current_app.debug:
return {}
# get all the templates this env knows about
all_templates = current_app.jinja_loader.list_templates()
mustache_templates = {}
for template_name in all_templates:
# TODO: make this configurable
# we only want a specific extension
if template_name.endswith('mustache'):
# throw away everything except the file content
template, _, _ = \
current_app.jinja_loader.get_source(current_app.jinja_env,
template_name)
mustache_templates[template_name] = template
# now we need to render the templates
template_string = """{% if mustache_templates %}
{% for template_name, content in mustache_templates.items() %}
<script type="text/x-mustache-template" id="{{ template_name|replace('/', '-') }}" charset="utf-8">
{{ content|e }}
</script>
{% endfor %}
{% endif %}"""
context = {
'mustache_templates': mustache_templates
}
# returns the full HTML, ready to use in JavaScript
return {'mustache_templates': Template(template_string).render(context)}
# template helper function
def mustache(template, **kwargs):
"""Usage:
{{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }}
This uses the regular Jinja2 loader to find the templates, so your *.mustache files
will need to be available in that path.
"""
template, _, _ = current_app.jinja_loader.get_source(current_app.jinja_env, template)
return pystache.render(template, kwargs, encoding='utf-8')
|
Python
| 0
|
@@ -709,16 +709,184 @@
lates)%0A%0A
+ @staticmethod%0A def attach(app):%0A %22This is written so it can work like WSGI middleware%22%0A # noop%0A _ = FlaskMustache(app)%0A%0A return app%0A%0A
%0A# conte
|
d4d21340bb6b86a510c16ceeb6c27fe3a5a5b90e
|
add DataNode to carry inputs to Flows
|
flow/orchestrator/types.py
|
flow/orchestrator/types.py
|
#!/usr/bin/env python
import redisom as rom
import time
__all__ = ['NodeBase', 'Flow', 'NodeFailedError', 'NodeAlreadyCompletedError',
'Status', 'StartNode', 'StopNode']
# FIXME: collect service names
ORCHESTRATOR = "orchestrator"
def _timestamp(redis_ts):
return redis_ts[0] + redis_ts[1] * 1e-6
class Status(object):
new = "new"
dispatched = "dispatched"
running = "running"
success = "success"
failure = "failure"
cancelled = "cancelled"
_completed_values = set([success, failure])
@staticmethod
def done(status):
return status in _completed_values
class NodeFailedError(RuntimeError):
def __init__(self, node_key, msg):
self.node_key = node_key
RuntimeError.__init__(self, "Node %s failed: %s" %(node_key, msg))
class NodeAlreadyCompletedError(RuntimeError):
def __init__(self, node_key):
self.node_key = node_key
RuntimeError.__init__(self, "Node %s already completed!" %node_key)
class NodeBase(rom.Object):
execute_timestamp = rom.Property(rom.Scalar)
complete_timestamp = rom.Property(rom.Scalar)
flow_key = rom.Property(rom.Scalar)
indegree = rom.Property(rom.Scalar)
name = rom.Property(rom.Scalar)
status = rom.Property(rom.Scalar)
successors = rom.Property(rom.Set)
input_connections = rom.Property(rom.Hash, value_decoder=rom.json_dec,
value_encoder=rom.json_enc)
outputs = rom.Property(rom.Hash, value_decoder=rom.json_dec,
value_encoder=rom.json_enc)
@property
def duration(self):
if not self.execute_timestamp.value:
return None
if not self.complete_timestamp.value:
end = _timestamp(self._connection.time())
else:
end = float(self.complete_timestamp.value)
beg = float(self.execute_timestamp.value)
return end - beg
@property
def environment(self):
local_proxy = getattr(self, 'hidden_environment', None)
if local_proxy.value:
return local_proxy
if self.flow:
return self.flow.environment
@environment.setter
def environment(self, value):
self.hidden_environment.value = value
@property
def working_directory(self):
local_proxy = getattr(self, 'hidden_working_directory', None)
if local_proxy.value:
return local_proxy
if self.flow:
return self.flow.working_directory
@working_directory.setter
def working_directory(self, value):
self.hidden_working_directory.value = value
@property
def user_id(self):
local_proxy = getattr(self, 'hidden_user_id', None)
if local_proxy.value:
return local_proxy
if self.flow:
return self.flow.user_id
@user_id.setter
def user_id(self, value):
self.hidden_user_id.value = value
@property
def inputs(self):
try:
inp_conn = self.input_connections
if not inp_conn:
return None
except:
return None
rv = {}
for idx, props in inp_conn.iteritems():
idx = int(idx)
node = self.flow.node(idx)
outputs = node.outputs
if props:
vals = outputs.values(props.values())
rv.update(zip(props.keys(), vals))
else:
rv.update(outputs)
return rv
@property
def now(self):
return _timestamp(self._connection.time())
@property
def flow(self):
return rom.get_object(self._connection, self.flow_key.value)
def execute(self, services):
if self.execute_timestamp.setnx(self.now):
print "Executing '%s' (key=%s)" % (str(self.name), self.key)
if self.status != Status.cancelled:
self._execute(services)
else:
self.fail(services)
def complete(self, services):
self.status = Status.success
if self.complete_timestamp.setnx(self.now):
for succ_idx in self.successors:
node = self.flow.node(succ_idx)
idg = node.indegree.increment(-1)
if idg == 0:
services[ORCHESTRATOR].execute_node(node.key)
else:
raise NodeAlreadyCompletedError(self.key)
def cancel(self, services):
print "Cancelling", self.name
self.status = Status.cancelled
def fail(self, services):
print "Failing", self.name
self.status = Status.failure
for succ_idx in self.successors:
node = self.flow.node(succ_idx)
node.cancel(services)
if node.indegree.increment(-1) == 0:
node.fail(services)
def _execute(self, services):
raise NotImplementedError("_execute not implemented in %s" %
self.__class__.__name__)
class StartNode(NodeBase):
@property
def inputs(self):
return self.outputs
def _execute(self, services):
self.flow.execute_timestamp.setnx(self.now)
self.complete(services)
class StopNode(NodeBase):
def _execute(self, services):
inputs = self.inputs
if inputs:
self.flow.outputs = inputs
self.complete(services)
def complete(self, services):
print "Completing a stop node (%s)!" % self.name
self.flow.complete(services)
def fail(self, services):
self.flow.fail(services)
def cancel(self, services):
self.flow.cancel(services)
class Flow(NodeBase):
node_keys = rom.Property(rom.List)
hidden_environment = rom.Property(rom.Hash)
hidden_user_id = rom.Property(rom.Scalar)
hidden_working_directory = rom.Property(rom.Scalar)
@property
def flow(self):
flow_key = self.flow_key.value
if not flow_key:
return self
return rom.get_object(self._connection, flow_key)
def node(self, idx):
key = self.node_keys[idx]
if key:
return rom.get_object(self._connection, key)
def _execute(self, services):
services[ORCHESTRATOR].execute_node(self.node_keys[0])
class SleepNode(NodeBase):
sleep_time = rom.Property(rom.Scalar)
def _execute(self, services):
sleep_time = self.sleep_time.value
if sleep_time:
time.sleep(float(sleep_time))
self.complete(services)
|
Python
| 0
|
@@ -3169,19 +3169,19 @@
for
-idx
+key
, props
@@ -3221,60 +3221,50 @@
-idx = int(idx)%0A node = self.flow.node(idx
+node = rom.get_object(self.connection, key
)%0A
@@ -5628,24 +5628,89 @@
services)%0A%0A%0A
+class DataNode(NodeBase):%0A outputs = rom.Property(rom.Hash)%0A%0A%0A
class Flow(N
|
c1c5fbdc2d7cda67668df38d91a2becf546fa852
|
Update transform config in development
|
backdrop/transformers/config/development.py
|
backdrop/transformers/config/development.py
|
TRANSFORMER_AMQP_URL = 'amqp://transformer:notarealpw@localhost:5672/%2Ftransformations'
STAGECRAFT_URL = 'http://localhost:3204'
STAGECRAFT_OAUTH_TOKEN = 'development-oauth-access-token'
BACKDROP_READ_URL = 'http://localhost:3038/data'
BACKDROP_WRITE_URL = 'http://localhost:3039/data'
|
Python
| 0
|
@@ -122,11 +122,11 @@
st:3
-204
+103
'%0AST
@@ -213,22 +213,32 @@
p://
-localhost:3038
+backdrop-read.dev.gov.uk
/dat
@@ -273,22 +273,33 @@
p://
-localhost:3039
+backdrop-write.dev.gov.uk
/dat
|
13c74e663dd511f53e6c0b1bb37b5baa12bba016
|
add tokens for fco transaction buckets
|
backdrop/write/config/development_tokens.py
|
backdrop/write/config/development_tokens.py
|
TOKENS = {
'_foo_bucket': '_foo_bucket-bearer-token',
'bucket': 'bucket-bearer-token',
'foo': 'foo-bearer-token',
'foo_bucket': 'foo_bucket-bearer-token',
'licensing': 'licensing-bearer-token',
'licensing_journey': 'licensing_journey-bearer-token'
}
|
Python
| 0
|
@@ -264,11 +264,563 @@
r-token'
+,%0A 'pay_legalisation_post_journey': 'pay_legalisation_post_journey-bearer-token',%0A 'pay_legalisation_drop_off_journey': 'pay_legalisation_drop_off_journey-bearer-token',%0A 'pay_register_birth_abroad_journey': 'pay_register_birth_abroad_journey-bearer-token',%0A 'pay_register_death_abroad_journey': 'pay_register_death_abroad_journey-bearer-token',%0A 'pay_foreign_marriage_certificates_journey': 'pay_foreign_marriage_certificates_journey-bearer-token',%0A 'deposit_foreign_marriage_journey': 'deposit_foreign_marriage_journey-bearer-token'
%0A%7D%0A
|
700af658169cdb861ff15341c3a03443f207c02e
|
Update __init__.py
|
tendrl/node_agent/manager/__init__.py
|
tendrl/node_agent/manager/__init__.py
|
import signal
import threading
from tendrl.commons import manager as commons_manager
from tendrl.commons import TendrlNS
from tendrl.commons.utils import log_utils as logger
from tendrl.node_agent.provisioner.gluster.manager import \
ProvisioningManager as GlusterProvisioningManager
from tendrl import node_agent
from tendrl.node_agent.message.handler import MessageHandler
from tendrl.node_agent import node_sync
from tendrl.integrations.gluster import GlusterIntegrationNS
class NodeAgentManager(commons_manager.Manager):
def __init__(self):
# Initialize the state sync thread which gets the underlying
# node details and pushes the same to etcd
super(NodeAgentManager, self).__init__(
NS.state_sync_thread,
message_handler_thread=NS.message_handler_thread
)
node_sync.platform_detect.sync()
node_sync.sds_detect.sync()
def main():
# NS.node_agent contains the config object,
# hence initialize it before any other NS
node_agent.NodeAgentNS()
# Init NS.tendrl
TendrlNS()
# Init NS.provisioning
# TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
# provisioning.ProvisioningNS()
# Init NS.integrations.ceph
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
# ceph.CephIntegrationNS()
# Init NS.integrations.gluster
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
GlusterIntegrationNS()
# Compile all definitions
NS.compiled_definitions = \
NS.node_agent.objects.CompiledDefinitions()
NS.compiled_definitions.merge_definitions([
NS.tendrl.definitions, NS.node_agent.definitions,
NS.integrations.gluster.definitions])
NS.node_agent.compiled_definitions = NS.compiled_definitions
# Every process needs to set a NS.type
# Allowed types are "node", "integration", "monitoring"
NS.type = "node"
NS.first_node_inventory_sync = True
NS.state_sync_thread = node_sync.NodeAgentSyncThread()
NS.compiled_definitions.save()
NS.node_context.save()
NS.tendrl_context.save()
NS.node_agent.definitions.save()
# NS.integrations.ceph.definitions.save()
NS.node_agent.config.save()
NS.publisher_id = "node_agent"
NS.message_handler_thread = MessageHandler()
NS.gluster_provisioner = GlusterProvisioningManager(
NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][
'gluster_provisioner']
)
if NS.config.data.get("with_internal_profiling", False):
from tendrl.commons import profiler
profiler.start()
NS.gluster_sds_sync_running = False
m = NodeAgentManager()
m.start()
complete = threading.Event()
def shutdown(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: stopping"}
)
complete.set()
m.stop()
if NS.gluster_sds_sync_running:
NS.gluster_integrations_sync_thread.stop()
def reload_config(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: SIGHUP"}
)
NS.node_agent.ns.setup_common_objects()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGHUP, reload_config)
while not complete.is_set():
complete.wait(timeout=1)
if __name__ == "__main__":
main()
|
Python
| 0.000072
|
@@ -829,86 +829,8 @@
)%0A%0A
- node_sync.platform_detect.sync()%0A node_sync.sds_detect.sync()%0A%0A
%0Adef
|
7b27423bef813befe1bb9dd5cb14843d847bff42
|
Fix mailhog settings
|
backend/project_name/settings/local_base.py
|
backend/project_name/settings/local_base.py
|
from .base import * # noqa
DEBUG = True
HOST = "http://localhost:8000"
SECRET_KEY = "secret"
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir_join("db.sqlite3"),}
}
STATIC_ROOT = base_dir_join("staticfiles")
STATIC_URL = "/static/"
MEDIA_ROOT = base_dir_join("mediafiles")
MEDIA_URL = "/media/"
DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage"
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
AUTH_PASSWORD_VALIDATORS = [] # allow easy passwords only on local
# Celery
CELERY_TASK_ALWAYS_EAGER = True
CELERY_TASK_EAGER_PROPAGATES = True
# Email
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = config("EMAIL_HOST")
EMAIL_HOST_USER = config("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(levelname)-8s [%(asctime)s] %(name)s: %(message)s"},},
"handlers": {
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "standard",},
},
"loggers": {
"": {"handlers": ["console"], "level": "INFO"},
"celery": {"handlers": ["console"], "level": "INFO"},
},
}
JS_REVERSE_JS_MINIFY = False
|
Python
| 0
|
@@ -637,17 +637,38 @@
%0A# Email
+ settings for mailhog
%0A
-
EMAIL_BA
@@ -738,162 +738,35 @@
T =
-config(%22EMAIL_HOST%22)%0AEMAIL_HOST_USER = config(%22EMAIL_HOST_USER%22)%0AEMAIL_HOST_PASSWORD = config(%22EMAIL_HOST_PASSWORD%22)%0AEMAIL_PORT = 587%0AEMAIL_USE_TLS = True
+'mailhog'%0AEMAIL_PORT = 1025
%0A%0A#
|
bce815a12a3ce18d23644c08beda5f97271e559e
|
update token
|
forge/tests/test_github.py
|
forge/tests/test_github.py
|
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, os
from forge.tasks import TaskError
from forge.github import Github
from .common import mktree
from tempfile import mkdtemp
from shutil import rmtree
token = "8c91e6c758b16e7b5d7f0676d3475f9fa33693dd"
def test_list():
gh = Github(token)
repos = gh.list("forgeorg")
assert repos == [(u'forgeorg/foo', u'https://github.com/forgeorg/foo.git')]
def test_pull():
gh = Github(token)
repos = gh.list("forgeorg")
name, url = repos[0]
output = mkdtemp()
gh.pull(url, os.path.join(output, name))
assert os.path.exists(os.path.join(output, name, "README.md"))
rmtree(output)
def test_exists():
gh = Github(token)
assert gh.exists("https://github.com/forgeorg/foo.git")
assert not gh.exists("https://github.com/forgeorg/nosuchrepo.git")
unauth_gh = Github(None)
try:
unauth_gh.exists("https://github.com/forgeorg/nosuchrepo.git")
assert False
except TaskError, e:
assert "Authentication failed" in str(e)
def test_clone():
gh = Github(token)
output = mkdtemp()
gh.clone("https://github.com/forgeorg/foo.git", os.path.join(output, 'foo'))
assert os.path.exists(os.path.join(output, 'foo', "README.md"))
rmtree(output)
|
Python
| 0.000001
|
@@ -757,58 +757,345 @@
ee%0A%0A
-token = %228c91e6c758b16e7b5d7f0676d3475f9fa33693dd%22
+# github will deactivate this token if it detects it in our source, so%0A# we obfuscate it slightly%0Anumbers = %5B48, 49, 51, 99, 99, 101, 52, 51, 48, 53, 54, 100, 57, 56, 97, 50,%0A 55, 97, 54, 53, 55, 55, 49, 48, 49, 55, 48, 54, 55, 102, 100, 48,%0A 102, 57, 49, 51, 97, 48, 102, 51%5D%0Atoken = %22%22.join(chr(c) for c in numbers)
%0A%0Ade
|
9722390c4fa1a6bb5b9e8d66a53219bcc2447b39
|
Use zoom 13 tiles for station tests, so that the station is more likely to not be the only one in the tile, which provides a better test of the rank.
|
test/507-routes-via-stop-positions.py
|
test/507-routes-via-stop-positions.py
|
stations = [
(17, 38596, 49262, 'Penn Station', 895371274L, 1, [
'2100-2297', # Acela Express
'68-69', # Adirondack
'50-51', # Cardinal
'79-80', # Carolinian
'19-20', # Crescent
'230-296', # Empire Service
'600-674', # Keystone Service
'63', # Maple Leaf (Northbound)
'64', # Maple Leaf (Southbound)
'89-90', # Palmetto
'42-43', # Pennsylvanian
'97-98', # Silver Meteor
'91-92', # Silver Star
'54-57', # Vermonter
]),
(17, 37639, 49960, 'Camden Station', 845910705L, 2, ['Camden Line']),
(17, 20958, 50667, 'Castro MUNI', 297863017L, 1, ['K', 'L', 'M', 'T']),
(17, 38163, 49642, '30th Street', 32272623L, 1, [
'2100-2297', # Acela Express
'79-80', # Carolinian
'19-20', # Crescent
'600-674', # Keystone Service
'82-198', # Northeast Regional (Boston/Springfield & Lynchburg)
'89-90', # Palmetto
'Chestnut Hill West Line', # SEPTA - Chestnut Hill West Line
'Cynwyd Line', # SEPTA - Cynwyd Line
'Media/Elwyn Line', # SEPTA - Media/Elwyn Line
'Trenton Line', # SEPTA - Trenton Line
'Wilmington/Newark Line', # SEPTA - Wilmington/Newark Line
'91-92', # Silver Star
])
]
for z, x, y, name, osm_id, expected_rank, expected_routes in stations:
with features_in_tile_layer(z, x, y, 'pois') as pois:
found = False
for poi in pois:
props = poi['properties']
if props['id'] == osm_id:
found = True
routes = props.get('transit_routes', list())
rank = props['kind_tile_rank']
if rank > expected_rank:
raise Exception("Found %r, and was expecting a rank "
"of %r or less, but got %r."
% (name, expected_rank, rank))
for r in expected_routes:
count = 0
for route in routes:
if r in route:
count = count + 1
if count == 0:
raise Exception("Found %r, and was expecting at "
"least one %r route, but found "
"none. Routes: %r"
% (name, r, routes))
if not found:
raise Exception("Did not find %r (ID=%r) in tile." % (name, osm_id))
|
Python
| 0
|
@@ -16,23 +16,21 @@
(1
-7, 38596, 49262
+3, 2412, 3078
, 'P
@@ -539,23 +539,21 @@
(1
-7, 37639, 49960
+3, 2352, 3122
, 'C
@@ -580,17 +580,17 @@
10705L,
-2
+5
, %5B'Camd
@@ -611,23 +611,21 @@
(1
-7, 20958, 50
+3, 1309, 31
66
-7
, 'C
@@ -688,22 +688,20 @@
(1
-7
+3
,
+2
38
-163, 4964
+5, 310
2, '
|
307e0c4bbd7e76c9a8becf39df539413fef20e60
|
Add line magic %cpp
|
bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py
|
bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py
|
import IPython.core.magic as ipym
import ROOT
import utils
@ipym.magics_class
class CppMagics(ipym.Magics):
@ipym.cell_magic
def cpp(self, line, cell=None):
"""Inject into root."""
if cell:
utils.processCppCode(cell)
def load_ipython_extension(ipython):
ipython.register_magics(CppMagics)
|
Python
| 0.000003
|
@@ -112,16 +112,21 @@
@ipym.
+line_
cell_mag
@@ -128,16 +128,16 @@
l_magic%0A
-
def
@@ -211,16 +211,100 @@
if cell
+ is None: # this is a line magic%0A utils.processCppCode(line)%0A else
:%0A
@@ -374,16 +374,16 @@
ython):%0A
+
ipyt
@@ -413,9 +413,8 @@
Magics)%0A
-%0A
|
e90ee25e43cc284e9b6db9eab8413f22b2292fa3
|
Allow to force registration
|
frontend/custom/courses.py
|
frontend/custom/courses.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Université Catholique de Louvain.
#
# This file is part of INGInious.
#
# INGInious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INGInious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with INGInious. If not, see <http://www.gnu.org/licenses/>.
""" A course class with some modification for users """
from collections import OrderedDict
from datetime import datetime
from common.courses import Course
from frontend.accessible_time import AccessibleTime
from frontend.base import get_database
from frontend.custom.tasks import FrontendTask
class FrontendCourse(Course):
""" A course with some modification for users """
_task_class = FrontendTask
def __init__(self, courseid):
Course.__init__(self, courseid)
if self._content.get('nofrontend', False):
raise Exception("That course is not allowed to be displayed directly in the frontend")
if "name" in self._content and "admins" in self._content and isinstance(self._content["admins"], list):
self._name = self._content['name']
self._admins = self._content['admins']
self._accessible = AccessibleTime(self._content.get("accessible", None))
self._registration = AccessibleTime(self._content.get("registration", None))
self._registration_password = self._content.get('registration_password', None)
else:
raise Exception("Course has an invalid json description: " + courseid)
def get_name(self):
""" Return the name of this course """
return self._name
def get_admins(self):
""" Return a list containing the username of the administrators of this course """
return self._admins
def is_open_to_non_admin(self):
""" Returns true if the course is accessible by users that are not administrator of this course """
return self._accessible.is_open()
def is_open_to_user(self, username):
""" Returns true if the course is open to this user """
return (self._accessible.is_open() and self.is_user_registered(username)) or username in self.get_admins()
def is_registration_possible(self):
""" Returns true if users can register for this course """
return self._accessible.is_open() and self._registration.is_open()
def is_password_needed_for_registration(self):
""" Returns true if a password is needed for registration """
return self._registration_password is not None
def get_registration_password(self):
""" Returns the password needed for registration (None if there is no password) """
return self._registration_password
def register_user(self, username, password=None):
""" Register a user to the course. Returns True if the registration succeeded, False else. """
if not self.is_registration_possible():
return False
if self.is_password_needed_for_registration() and self._registration_password != password:
return False
if self.is_open_to_user(username):
return False # already registered?
get_database().registration.insert({"username": username, "courseid": self.get_id(), "date": datetime.now()})
return True
def unregister_user(self, username):
""" Unregister a user from this course """
get_database().registration.remove({"username": username, "courseid": self.get_id()})
def is_user_registered(self, username):
""" Returns True if the user is registered """
return (get_database().registration.find_one({"username": username, "courseid": self.get_id()}) is not None) or username in self.get_admins()
def get_registered_users(self, with_admins=True):
""" Get all the usernames that are registered to this course (in no particular order)"""
l = [entry['username'] for entry in list(get_database().registration.find({"courseid": self.get_id()}, {"username": True, "_id": False}))]
if with_admins:
return list(set(l + self.get_admins()))
else:
return l
def get_accessibility(self):
""" Return the AccessibleTime object associated with the accessibility of this course """
return self._accessible
def get_registration_accessibility(self):
""" Return the AccessibleTime object associated with the registration """
return self._registration
def get_user_completion_percentage(self):
""" Returns the percentage (integer) of completion of this course by the current user """
import frontend.user as User
cache = User.get_data().get_course_data(self.get_id())
if cache is None:
return 0
return int(cache["task_succeeded"] * 100 / cache["total_tasks"])
def get_user_last_submissions(self, limit=5):
""" Returns a given number (default 5) of submissions of task from this course """
from frontend.submission_manager import get_user_last_submissions as extern_get_user_last_submissions
task_ids = []
for task_id in self.get_tasks():
task_ids.append(task_id)
return extern_get_user_last_submissions({"courseid": self.get_id(), "taskid": {"$in": task_ids}}, limit)
def get_tasks(self):
return OrderedDict(sorted(Course.get_tasks(self).items(), key=lambda t: t[1].get_order()))
|
Python
| 0
|
@@ -3213,19 +3213,32 @@
ord=None
+, force=False
):%0A
-
@@ -3328,24 +3328,50 @@
e else. %22%22%22%0A
+ if not force:%0A
if n
@@ -3410,32 +3410,36 @@
():%0A
+
return False%0A
@@ -3427,32 +3427,36 @@
return False%0A
+
if self.
@@ -3538,16 +3538,20 @@
ssword:%0A
+
|
0a3164a47854ed17765d567afc7fc6a05aa0fd21
|
Fix bug in commonsdownloader with argument names
|
commonsdownloader/commonsdownloader.py
|
commonsdownloader/commonsdownloader.py
|
#!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons."""
import os
import logging
import argparse
from thumbnaildownload import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
def download_with_file_list(file_list, output_path):
"""Download files from a given textfile list."""
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
def download_from_files(files, output_path, width):
"""Download files from a given file list."""
for file_name in files:
download_file(file_name, output_path, width=width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
parser.add_argument("-v",
action="count",
dest="verbose",
default=0,
help="Verbosity level. -v for INFO, -vv for DEBUG")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging.basicConfig(level=logging_map[args.verbose])
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -714,21 +714,16 @@
extfile(
-args.
file_lis
@@ -759,21 +759,16 @@
e_name,
-args.
output_p
|
082cc2590f7b263e37fe214e3c4e6fc86039327a
|
correct pyunit
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py
|
import sys
sys.path.insert(1, "../../../")
import h2o
#def tweedie_weights(ip,port):
h2o.init()
data = h2o.import_frame(h2o.locate("smalldata/glm_test/cancar_logIn.csv"))
data["C1M3"] = (data["Class"] == 1 and data["Merit"] == 3).asfactor()
data["C3M3"] = (data["Class"] == 3 and data["Merit"] == 3).asfactor()
data["C4M3"] = (data["Class"] == 4 and data["Merit"] == 3).asfactor()
data["C1M2"] = (data["Class"] == 1 and data["Merit"] == 2).asfactor()
data["Merit"] = data["Merit"].asfactor()
data["Class"] = data["Class"].asfactor()
loss = data["Cost"] / data["Insured"]
loss.setName(0,"Loss")
cancar = loss.cbind(data)
# Without weights
myX = ["Merit","Class","C1M3","C4M3"]
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0)
mean_residual_deviance = dl.mean_residual_deviance()
# With weights
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0,weights_column = "Insured",training_frame = cancar)
if __name__ == "__main__":
h2o.run_test(sys.argv, tweedie_weights)
|
Python
| 0.998797
|
@@ -48,17 +48,16 @@
rt h2o%0A%0A
-#
def twee
@@ -82,20 +82,13 @@
t):%0A
-h2o.init()%0A%0A
+%0A
data
@@ -158,16 +158,20 @@
.csv%22))%0A
+
data%5B%22C1
@@ -228,24 +228,28 @@
.asfactor()%0A
+
data%5B%22C3M3%22%5D
@@ -302,24 +302,28 @@
.asfactor()%0A
+
data%5B%22C4M3%22%5D
@@ -376,24 +376,28 @@
.asfactor()%0A
+
data%5B%22C1M2%22%5D
@@ -450,24 +450,28 @@
.asfactor()%0A
+
data%5B%22Merit%22
@@ -499,16 +499,20 @@
actor()%0A
+
data%5B%22Cl
@@ -544,16 +544,20 @@
actor()%0A
+
loss = d
@@ -586,16 +586,20 @@
sured%22%5D%0A
+
loss.set
@@ -613,16 +613,20 @@
%22Loss%22)%0A
+
cancar =
@@ -644,16 +644,20 @@
(data)%0A%0A
+
# Withou
@@ -666,16 +666,20 @@
weights%0A
+
myX = %5B%22
@@ -708,16 +708,20 @@
%22C4M3%22%5D%0A
+
dl = h2o
@@ -835,32 +835,36 @@
+
train_samples_pe
@@ -959,32 +959,36 @@
+
+
force_load_balan
@@ -1076,32 +1076,36 @@
+
+
score_validation
@@ -1119,16 +1119,20 @@
s = 0)%0A%0A
+
mean_res
@@ -1177,16 +1177,20 @@
ance()%0A%0A
+
# With w
@@ -1196,16 +1196,20 @@
weights%0A
+
dl = h2o
@@ -1323,32 +1323,36 @@
+
train_samples_pe
@@ -1447,32 +1447,36 @@
+
force_load_balan
@@ -1542,32 +1542,36 @@
ng_samples = 0,%0A
+
|
f4c96433a3b42eb13dced4d7c00a5ab78c8ee38f
|
add pickle tests for Hit as well
|
test_elasticsearch_dsl/test_result.py
|
test_elasticsearch_dsl/test_result.py
|
import pickle
from datetime import date
from pytest import raises, fixture
from elasticsearch_dsl import response, Search, DocType, Date, Object
from elasticsearch_dsl.aggs import Terms
from elasticsearch_dsl.response.aggs import AggData, BucketData, Bucket
@fixture
def agg_response(aggs_search, aggs_data):
return response.Response(aggs_search, aggs_data)
def test_agg_response_is_pickleable(agg_response):
agg_response.hits
r = pickle.loads(pickle.dumps(agg_response))
assert r == agg_response
def test_response_is_pickleable(dummy_response):
res = response.Response(Search(), dummy_response)
res.hits
r = pickle.loads(pickle.dumps(res))
assert r == res
def test_response_stores_search(dummy_response):
s = Search()
r = response.Response(s, dummy_response)
assert r._search is s
def test_attribute_error_in_hits_is_not_hidden(dummy_response):
def f(hit):
raise AttributeError()
s = Search().doc_type(employee=f)
r = response.Response(s, dummy_response)
with raises(TypeError):
r.hits
def test_interactive_helpers(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
h = hits[0]
rhits = "[<Hit(test-index/company/elasticsearch): %s>, <Hit(test-index/employee/42): %s...}>, <Hit(test-index/employee/47): %s...}>, <Hit(test-index/employee/53): {}>]" % (
repr(dummy_response['hits']['hits'][0]['_source']),
repr(dummy_response['hits']['hits'][1]['_source'])[:60],
repr(dummy_response['hits']['hits'][2]['_source'])[:60],
)
assert res
assert '<Response: %s>' % rhits == repr(res)
assert rhits == repr(hits)
assert set(['meta', 'city', 'name']) == set(dir(h))
assert "<Hit(test-index/company/elasticsearch): %r>" % dummy_response['hits']['hits'][0]['_source'] == repr(h)
def test_empty_response_is_false(dummy_response):
dummy_response['hits']['hits'] = []
res = response.Response(Search(), dummy_response)
assert not res
def test_len_response(dummy_response):
res = response.Response(Search(), dummy_response)
assert len(res) == 4
def test_iterating_over_response_gives_you_hits(dummy_response):
res = response.Response(Search(), dummy_response)
hits = list(h for h in res)
assert res.success()
assert 123 == res.took
assert 4 == len(hits)
assert all(isinstance(h, response.Hit) for h in hits)
h = hits[0]
assert 'test-index' == h.meta.index
assert 'company' == h.meta.doc_type
assert 'elasticsearch' == h.meta.id
assert 12 == h.meta.score
assert hits[1].meta.parent == 'elasticsearch'
def test_hits_get_wrapped_to_contain_additional_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
hits = res.hits
assert 123 == hits.total
assert 12.0 == hits.max_score
def test_hits_provide_dot_and_bracket_access_to_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
h = res.hits[0]
assert 'Elasticsearch' == h.name
assert 'Elasticsearch' == h['name']
assert 'Honza' == res.hits[2].name.first
with raises(KeyError):
h['not_there']
with raises(AttributeError):
h.not_there
def test_slicing_on_response_slices_on_hits(dummy_response):
res = response.Response(Search(), dummy_response)
assert res[0] is res.hits[0]
assert res[::-1] == res.hits[::-1]
def test_aggregation_base(agg_response):
assert agg_response.aggs is agg_response.aggregations
assert isinstance(agg_response.aggs, response.AggResponse)
def test_aggregations_can_be_iterated_over(agg_response):
aggs = [a for a in agg_response.aggs]
assert len(aggs) == 2
assert all(map(lambda a: isinstance(a, AggData), aggs))
def test_aggregations_can_be_retrieved_by_name(agg_response, aggs_search):
a = agg_response.aggs['popular_files']
assert isinstance(a, BucketData)
assert isinstance(a.meta.agg, Terms)
assert a.meta.agg is aggs_search.aggs.aggs['popular_files']
def test_bucket_response_can_be_iterated_over(agg_response):
popular_files = agg_response.aggregations.popular_files
buckets = [b for b in popular_files]
assert all(isinstance(b, Bucket) for b in buckets)
assert buckets == popular_files.buckets
def test_bucket_keys_get_deserialized(aggs_data, aggs_search):
class Commit(DocType):
info = Object(properties={'committed_date': Date()})
aggs_search._doc_type_map = {'commit': Commit}
agg_response = response.Response(aggs_search, aggs_data)
per_month = agg_response.aggregations.per_month
for b in per_month:
assert isinstance(b.key, date)
|
Python
| 0
|
@@ -685,24 +685,200 @@
t r == res%0A%0A
+def test_hit_is_pickleable(dummy_response):%0A res = response.Response(Search(), dummy_response)%0A hits = pickle.loads(pickle.dumps(res.hits))%0A%0A assert hits == res.hits%0A%0A
def test_res
|
169dda227f85f77ac52a4295e8fb7acd1b3184f5
|
Make byte-separator mandatory in MAC addresses
|
core/observables/mac_address.py
|
core/observables/mac_address.py
|
from __future__ import unicode_literals
import re
from core.observables import Observable
class MacAddress(Observable):
regex = r'(?P<search>(([0-9A-Fa-f]{1,2}[.:-]?){5,7}([0-9A-Fa-f]{1,2})))'
exclude_fields = Observable.exclude_fields
DISPLAY_FIELDS = Observable.DISPLAY_FIELDS
@classmethod
def is_valid(cls, match):
value = match.group('search')
return len(value) > 0
def normalize(self):
self.value = re.sub(r'[.:\-]', '', self.value)
self.value = self.value.upper()
self.value = \
':'.join([self.value[i:i + 2] for i in range(0, len(self.value), 2)])
|
Python
| 0
|
@@ -165,17 +165,16 @@
,2%7D%5B.:-%5D
-?
)%7B5,7%7D(%5B
@@ -431,37 +431,32 @@
(self):%0A
-self.
value = re.sub(r
@@ -484,40 +484,8 @@
lue)
-%0A self.value = self.value
.upp
@@ -510,17 +510,25 @@
value =
-%5C
+':'.join(
%0A
@@ -536,23 +536,8 @@
-':'.join(%5Bself.
valu
@@ -556,16 +556,17 @@
or i in
+x
range(0,
@@ -570,21 +570,16 @@
(0, len(
-self.
value),
@@ -580,11 +580,19 @@
lue), 2)
-%5D
+%0A
)%0A
|
a28f8fe4427c12c2523b16903325d0362b53123e
|
Drop version dependency
|
acme/setup.py
|
acme/setup.py
|
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.2.0.dev0'
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
# Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15)
'PyOpenSSL>=0.15',
'pyrfc3339',
'pytz',
'requests',
'setuptools', # pkg_resources
'six',
'werkzeug',
]
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
testing_extras = [
'nose',
'tox',
]
setup(
name='acme',
version=version,
description='ACME protocol implementation in Python',
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
'testing': testing_extras,
},
entry_points={
'console_scripts': [
'jws = acme.jose.jws:CLI.run',
],
},
test_suite='acme',
)
|
Python
| 0.000001
|
@@ -396,41 +396,8 @@
.13)
-, X509Req.get_extensions (%3E=0.15)
%0A
@@ -412,17 +412,17 @@
SSL%3E=0.1
-5
+3
',%0A '
|
cf2af12d926370d83e909e0d38d2c774553e0408
|
Fix handshake
|
YamTorrent.py
|
YamTorrent.py
|
#!/usr/bin/env python3
import sys
import requests
import hashlib
import bencodepy
import struct
import socket
def DEBUG(s):
if debugging:
print(s)
def ERROR(s):
print(s)
exit()
def main():
# open file in binary
try:
torrentfile = open(sys.argv[1], "rb").read()
except IOError:
ERROR("BAD FILE NAME: " + sys.argv[1])
DEBUG("BEGINNING")
# dictionary of torrent file
# torrentdict = bencode.bdecode(torrentfile)
torrentdict = bencodepy.decode(torrentfile)
# print(torrentdict)
# print(type(torrentdict))
# re-bencode the info section
info = torrentdict[b"info"]
# print(info)
bencodedinfo = bencodepy.encode(info)
# print(info)
# print(bencodedinfo)
#COMPUTE PARAMETERS FOR ANNOUNCE
# SHA1 hash of info section
sha1 = hashlib.sha1(bencodedinfo)
info_hash = sha1.digest()
# print(type(bencodedinfo))
# for char in info_hash:
# print(hex(char))
# print(char)
peer_id = (hashlib.sha1(b"0")).digest()
port = b'6881'
uploaded = b'0'
downloaded = b'0'
try:
left = 0
for f in info[b'files']:
left += f[b'length']
except KeyError:
left = info[b'length']
compact = b'1'
event = b'started'
url = torrentdict[b'announce']
p = {'info_hash': info_hash, 'peer_id': peer_id, 'port': port, 'uploaded': uploaded, 'downloaded': downloaded, 'left': left, 'compact': compact, 'event': event}
#CONTACT TRACKER
r = requests.get(url.decode(), params=p)
# print(info_hash)
# print(bencodedinfo)
# with open("temp.txt",'wb') as f:
# f.write(r.text.encode())
DEBUG('URL')
DEBUG(r.url)
DEBUG('END URL')
DEBUG('CONTENT')
DEBUG(r.content)
DEBUG('END CONTENT')
try:
response = bencodepy.decode(r.content)
except bencodepy.exceptions.DecodingError:
ERROR("BAD RESPONSE")
#COMPUTE PEERS
peers = response[b'peers']
peers_list = []
for i in range(0,len(peers),6):
peer_dict = {}
#not sure if these are right
peer_dict['ip'] = socket.inet_ntoa(peers[i:i+4])
peer_dict['ip_int'] = struct.unpack("!L",peers[i:i+4])[0]
peer_dict['port'] = struct.unpack("!H",peers[i+4:i+6])[0]
peers_list.append(peer_dict)
DEBUG(peers_list)
first_peer = peers_list[0]
first_connection = socket.create_connection((first_peer['ip'],first_peer['port']))
DEBUG(type(first_connection))
handshake = b"handshake: " + struct.pack('!B',19) + b"BitTorrent protocol" + bytearray(8) + info_hash + peer_id
DEBUG(handshake)
DEBUG(len(handshake))
DEBUG(len(info_hash))
DEBUG(len(peer_id))
first_connection.sendall(handshake)
peer_response = first_connection.recv(4096)
DEBUG(peer_response)
if __name__ == '__main__':
debugging = True
main()
|
Python
| 0.000004
|
@@ -105,17 +105,16 @@
ocket%0A%0A%0A
-%0A
def DEBU
@@ -115,16 +115,17 @@
f DEBUG(
+*
s):%0A
@@ -156,12 +156,14 @@
int(
+*
s)%0A%0A
+%0A
def
@@ -168,16 +168,17 @@
f ERROR(
+*
s):%0A
@@ -183,16 +183,17 @@
print(
+*
s)%0A e
@@ -1489,20 +1489,16 @@
event%7D%0A
-
%0A #CO
@@ -1955,17 +1955,16 @@
TE PEERS
-
%0A%0A pe
@@ -2529,25 +2529,8 @@
ke =
- b%22handshake: %22 +
str
@@ -2803,24 +2803,46 @@
%0A%0A DEBUG(
+%22handshake response%22,
peer_respons
|
877a7ff09056ea7ca03f0b31eb4ef8e30ac9d3fa
|
Change names we expect in spreadsheet
|
openprescribing/pipeline/management/commands/import_pcns.py
|
openprescribing/pipeline/management/commands/import_pcns.py
|
from django.core.management import BaseCommand
from django.db import transaction
from frontend.models import PCN, Practice
from openpyxl import load_workbook
class Command(BaseCommand):
help = "This command imports PCNs and PCN mappings"
def add_arguments(self, parser):
parser.add_argument("--filename")
def handle(self, *args, **kwargs):
workbook = load_workbook(kwargs["filename"])
details_sheet = workbook.get_sheet_by_name("PCN Details")
members_sheet = workbook.get_sheet_by_name("PCN Core Partner Details")
pcn_details = {}
for code, name in self.get_pcn_details_from_sheet(details_sheet):
pcn_details[code] = {"name": name, "members": set()}
for practice_code, pcn_code in self.get_pcn_members_from_sheet(members_sheet):
pcn_details[pcn_code]["members"].add(practice_code)
with transaction.atomic():
for code, details in pcn_details.items():
PCN.objects.update_or_create(
code=code, defaults={"name": details["name"]}
)
Practice.objects.filter(code__in=details["members"]).update(pcn=code)
def get_pcn_details_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
CODE_COL = headers.index("PCN Code")
NAME_COL = headers.index("PCN Name")
for n, row in enumerate(rows, start=2):
code = row[CODE_COL]
name = row[NAME_COL]
# Skip blank lines
if not code and not name:
continue
if not code or not name:
raise ValueError("Blank code or name on row {}".format(n))
yield code, name
def get_pcn_members_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
PRACTICE_COL = headers.index("Partner Organisation Code")
PCN_COL = headers.index("PCN Code")
for n, row in enumerate(rows, start=2):
practice_code = row[PRACTICE_COL]
pcn_code = row[PCN_COL]
# Skip blank lines
if not practice_code and not pcn_code:
continue
if not practice_code or not pcn_code:
raise ValueError("Blank code on row {}".format(n))
yield practice_code, pcn_code
|
Python
| 0
|
@@ -467,17 +467,16 @@
ame(%22PCN
-
Details%22
@@ -1958,17 +1958,18 @@
%22Partner
-
+%5Cn
Organisa
@@ -1972,17 +1972,18 @@
nisation
-
+%5Cn
Code%22)%0A
|
6495a032d61d24df3c86b7cdf1d02debb6dcbfda
|
fix doc of camvid
|
chainercv/datasets/camvid/camvid_dataset.py
|
chainercv/datasets/camvid/camvid_dataset.py
|
import glob
import os
import shutil
import numpy as np
import chainer
from chainer.dataset import download
from chainercv import utils
from chainercv.utils import read_image
root = 'pfnet/chainercv/camvid'
url = 'https://github.com/alexgkendall/SegNet-Tutorial/archive/master.zip'
camvid_label_names = (
'Sky',
'Building',
'Pole',
'Road_marking',
'Road',
'Pavement',
'Tree',
'SignSymbol',
'Fence,'
'Car',
'Pedestrian',
'Bicyclist',
'Unlabelled',
)
camvid_label_colors = (
(128, 128, 128),
(128, 0, 0),
(192, 192, 128),
(255, 69, 0),
(128, 64, 128),
(60, 40, 222),
(128, 128, 0),
(192, 128, 128),
(64, 64, 128),
(64, 0, 128),
(64, 64, 0),
(0, 128, 192),
(0, 0, 0),
)
def get_camvid():
data_root = download.get_dataset_directory(root)
download_file_path = utils.cached_download(url)
if len(glob.glob(os.path.join(data_root, '*'))) != 9:
utils.extractall(
download_file_path, data_root, os.path.splitext(url)[1])
data_dir = os.path.join(data_root, 'SegNet-Tutorial-master/CamVid')
if os.path.exists(data_dir):
for fn in glob.glob(os.path.join(data_dir, '*')):
shutil.move(fn, os.path.join(data_root, os.path.basename(fn)))
shutil.rmtree(os.path.dirname(data_dir))
return data_root
class CamVidDataset(chainer.dataset.DatasetMixin):
"""Dataset class for a semantic segmantion task on CamVid `u`_.
.. _`u`: https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid
Args:
data_dir (string): Path to the root of the training data. If this is
:obj:`auto`, this class will automatically download data for you
under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/camvid`.
split ({'train', 'val', 'test'}): Select from dataset splits used
in VOC.
"""
def __init__(self, data_dir='auto', split='train'):
if split not in ['train', 'val', 'test']:
raise ValueError(
'Please pick split from \'train\', \'val\', \'test\'')
if data_dir == 'auto':
data_dir = get_camvid()
img_list_filename = os.path.join(data_dir, '{}.txt'.format(split))
self.filenames = [
[os.path.join(data_dir, fn.replace('/SegNet/CamVid/', ''))
for fn in line.split()] for line in open(img_list_filename)]
def __len__(self):
return len(self.filenames)
def get_example(self, i):
"""Returns the i-th example.
Returns a color image and a label image. Both of them are in CHW
format.
Args:
i (int): The index of the example.
Returns:
tuple of color image and label whose shapes are (3, H, W) and
(H, W) respectively. H and W are height and width of the images.
The dtype of the color image is :obj:`numpy.float32` and
the dtype of the label image is :obj:`numpy.int32`.
"""
if i >= len(self):
raise IndexError('index is too large')
image_fn, label_fn = self.filenames[i]
img = read_image(image_fn, color=True)
label_map = read_image(label_fn, dtype=np.int32, color=False)[0]
return img, label_map
|
Python
| 0
|
@@ -2601,48 +2601,8 @@
age.
- Both of them are in CHW%0A format.
%0A%0A
|
14cee1112f2a506f4ec547b80e897036f601ab6d
|
Fix tests/utils/http_requests.py
|
chroma-manager/tests/utils/http_requests.py
|
chroma-manager/tests/utils/http_requests.py
|
#!/usr/bin/env python
#
# ========================================================
# Copyright (c) 2012 Whamcloud, Inc. All rights reserved.
# ========================================================
import json
import requests
from urlparse import urljoin
class HttpRequests(object):
def __init__(self, server_http_url = '', *args, **kwargs):
self.server_http_url = server_http_url
self.session = requests.session(headers = {"Accept": "application/json", "Content-type": "application/json"})
def get(self, url, **kwargs):
response = self.session.get(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def post(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.post(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def put(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.put(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def delete(self, url, **kwargs):
response = self.session.delete(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def request(self, method, url, **kwargs):
response = self.session.request(
method,
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
class HttpResponse(requests.Response):
def __init__(self, response, *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
self.__dict__.update(response.__dict__.copy())
@property
def json(self):
if self.text == '[]':
return []
else:
try:
return json.loads(self.text)
except ValueError:
print "Bad JSON: %s" % self.text
raise
@property
def successful(self):
# TODO: Make better
return 200 <= self.status_code < 300
class AuthorizedHttpRequests(HttpRequests):
def __init__(self, username, password, *args, **kwargs):
super(AuthorizedHttpRequests, self).__init__(*args, **kwargs)
response = self.get("/api/session/")
if not response.successful:
raise RuntimeError("Failed to open session")
self.session.headers['X-CSRFToken'] = response.cookies['csrftoken']
self.session.cookies['csrftoken'] = response.cookies['csrftoken']
self.session.cookies['sessionid'] = response.cookies['sessionid']
response = self.post("/api/session/", data = json.dumps({'username': username, 'password': password}))
if not response.successful:
raise RuntimeError("Failed to authenticate")
|
Python
| 0.000001
|
@@ -433,16 +433,39 @@
session(
+)%0A self.session.
headers
@@ -496,16 +496,48 @@
n/json%22,
+%0A
%22Conten
@@ -564,17 +564,52 @@
n/json%22%7D
-)
+%0A self.session.verify = False
%0A%0A de
|
af819b3758d87b2214211a4bb0300e0c4b0a3057
|
Add a test for issue #1934
|
tests/functional/test_install_reqs.py
|
tests/functional/test_install_reqs.py
|
import os.path
import textwrap
import pytest
from tests.lib import (pyversion, path_to_url,
_create_test_package_with_subdirectory)
from tests.lib.local_repos import local_checkout
def test_requirements_file(script):
"""
Test installing from a requirements file.
"""
other_lib_name, other_lib_version = 'anyjson', '0.3'
script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\
INITools==0.2
# and something else to test out:
%s<=%s
""" % (other_lib_name, other_lib_version)))
result = script.pip(
'install', '-r', script.scratch_path / 'initools-req.txt'
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
assert script.site_packages / 'initools' in result.files_created
assert result.files_created[script.site_packages / other_lib_name].dir
fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion)
assert result.files_created[script.site_packages / fn].dir
def test_schema_check_in_requirements_file(script):
"""
Test installing from a requirements file with an invalid vcs schema..
"""
script.scratch_path.join("file-egg-req.txt").write(
"\n%s\n" % (
"git://github.com/alex/django-fixture-generator.git"
"#egg=fixture_generator"
)
)
with pytest.raises(AssertionError):
script.pip(
"install", "-vvv", "-r", script.scratch_path / "file-egg-req.txt"
)
def test_relative_requirements_file(script, data):
"""
Test installing from a requirements file with a relative path with an
egg= definition..
"""
url = path_to_url(
os.path.join(data.root, "packages", "..", "packages", "FSPkg")
) + '#egg=FSPkg'
script.scratch_path.join("file-egg-req.txt").write(textwrap.dedent("""\
%s
""" % url))
result = script.pip(
'install', '-vvv', '-r', script.scratch_path / 'file-egg-req.txt'
)
assert (
script.site_packages / 'FSPkg-0.1dev-py%s.egg-info' % pyversion
) in result.files_created, str(result)
assert (script.site_packages / 'fspkg') in result.files_created, (
str(result.stdout)
)
def test_multiple_requirements_files(script, tmpdir):
"""
Test installing from multiple nested requirements files.
"""
other_lib_name, other_lib_version = 'anyjson', '0.3'
script.scratch_path.join("initools-req.txt").write(
textwrap.dedent("""
-e %s@10#egg=INITools-dev
-r %s-req.txt
""") %
(
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
other_lib_name
),
)
script.scratch_path.join("%s-req.txt" % other_lib_name).write(
"%s<=%s" % (other_lib_name, other_lib_version)
)
result = script.pip(
'install', '-r', script.scratch_path / 'initools-req.txt'
)
assert result.files_created[script.site_packages / other_lib_name].dir
fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion)
assert result.files_created[script.site_packages / fn].dir
assert script.venv / 'src' / 'initools' in result.files_created
def test_respect_order_in_requirements_file(script, data):
script.scratch_path.join("frameworks-req.txt").write(textwrap.dedent("""\
parent
child
simple
"""))
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-r',
script.scratch_path / 'frameworks-req.txt'
)
downloaded = [line for line in result.stdout.split('\n')
if 'Downloading/unpacking' in line]
assert 'parent' in downloaded[0], (
'First download should be "parent" but was "%s"' % downloaded[0]
)
assert 'child' in downloaded[1], (
'Second download should be "child" but was "%s"' % downloaded[1]
)
assert 'simple' in downloaded[2], (
'Third download should be "simple" but was "%s"' % downloaded[2]
)
def test_install_local_editable_with_extras(script, data):
to_install = data.packages.join("LocalExtras")
res = script.pip(
'install', '-e', to_install + '[bar]', '--process-dependency-links',
expect_error=False,
)
assert script.site_packages / 'easy-install.pth' in res.files_updated, (
str(res)
)
assert (
script.site_packages / 'LocalExtras.egg-link' in res.files_created
), str(res)
assert script.site_packages / 'simple' in res.files_created, str(res)
def test_install_local_editable_with_subdirectory(script):
version_pkg_path = _create_test_package_with_subdirectory(script,
'version_subdir')
result = script.pip(
'install', '-e',
'%s#egg=version_subpkg&subdirectory=version_subdir' %
('git+file://%s' % version_pkg_path,)
)
result.assert_installed('version-subpkg', sub_dir='version_subdir')
|
Python
| 0
|
@@ -4685,24 +4685,275 @@
str(res)%0A%0A%0A
+def test_install_collected_dependancies_first(script):%0A result = script.pip(%0A 'install', 'paramiko',%0A )%0A text = %5Bline for line in result.stdout.split('%5Cn')%0A if 'Installing' in line%5D%5B0%5D%0A assert text.endswith('paramiko')%0A%0A%0A
def test_ins
|
e7a632718f379fb1ede70d1086f55279e4251e11
|
fix geotag access - not an obj
|
cinder/scheduler/filters/geo_tags_filter.py
|
cinder/scheduler/filters/geo_tags_filter.py
|
# Copyright (c) 2014 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class GeoTagsFilter(filters.BaseHostFilter):
"""GeoTags Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient capacity."""
#(licostan): Add geotag data to the host_state instead of
#querying it...
#TODO: add scheduler hints to cinder.
metadata_hints = filter_properties.get('metadata') or {}
gt_hints = metadata_hints.get('geo_tags', None)
context = filter_properties['context']
geo_tag = db.geo_tag_get_by_node_name(context, host_state.host)
if not geo_tag:
LOG.info('NO GEO TAG FOUND FOR %s' % host_state.host)
return True
#do other geotags check here based on gt-hints
if geo_tag.valid_invalid.lower() == 'valid':
LOG.info('GEO TAG FOUND FOR %s' % host_state.host)
return True
LOG.info('GEO TAG INVALID FOR %s' % host_state.host)
return False
|
Python
| 0.000001
|
@@ -1536,17 +1536,18 @@
geo_tag
-.
+%5B'
valid_in
@@ -1551,16 +1551,18 @@
_invalid
+'%5D
.lower()
|
16ab5dcf1f6e52f89435adccdfa7021ce24e29a8
|
fix formatting via make fix
|
tests/metal/contrib/test_baselines.py
|
tests/metal/contrib/test_baselines.py
|
import numpy as np
import torch
from metal.end_model import SparseLogisticRegression
def test_sparselogreg(self):
"""Confirm sparse logreg can overfit, works on padded data"""
F = 1000 # total number of possible features
N = 50 # number of data points
S = [10, 100] # range of features per data point
X = np.zeros((N, S[1]))
for i in range(N):
Si = np.random.randint(S[0], S[1])
X[i, :Si] = np.random.randint(F, size=(1, Si))
X = torch.from_numpy(X).long()
Y = torch.from_numpy(np.random.randint(1, 3, size=(N,)))
em = SparseLogisticRegression(
seed=1, input_dim=F, padding_idx=0, verbose=False
)
em.train_model((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005)
self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0)
score = em.score((X, Y), verbose=False)
self.assertGreater(score, 0.95)
|
Python
| 0
|
@@ -1,13 +1,12 @@
-%0A
import numpy
|
9535fcb8cb811ed570bf20dd29857722dc46a1f3
|
update expressions test to use unittest2 assertItemsEqual
|
tests/modeltests/expressions/tests.py
|
tests/modeltests/expressions/tests.py
|
from django.test import TestCase
from django.db.models import F
from django.core.exceptions import FieldError
from models import Employee, Company
class ExpressionsTestCase(TestCase):
fixtures = ['f_expression_testdata.json']
def assertItemsEqual(self, a, b):
#fixme, replace with unittest2 function
return self.assertEqual(sorted(a), sorted(b))
def test_basic_f_expression(self):
company_query = Company.objects.values('name','num_employees',
'num_chairs'
).order_by('name',
'num_employees',
'num_chairs')
# We can filter for companies where the number of employees is
# greater than the number of chairs.
self.assertItemsEqual(company_query.filter(
num_employees__gt=F('num_chairs')),
[{'num_chairs': 5, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 1, 'name': u'Test GmbH',
'num_employees': 32}])
# We can set one field to have the value of another field Make
# sure we have enough chairs
company_query.update(num_chairs=F('num_employees'))
self.assertItemsEqual(company_query,
[{'num_chairs': 2300, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 3, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 32, 'name': u'Test GmbH',
'num_employees': 32}])
# We can perform arithmetic operations in expressions. Make
# sure we have 2 spare chairs
company_query.update(num_chairs=F('num_employees')+2)
self.assertItemsEqual(company_query,
[{'num_chairs': 2302, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 5, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 34, 'name': u'Test GmbH',
'num_employees': 32}])
# Law of order of operations is followed
company_query.update(num_chairs=F('num_employees') +
2 * F('num_employees'))
self.assertItemsEqual(company_query,
[{'num_chairs': 6900, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 9, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 96, 'name': u'Test GmbH',
'num_employees': 32}])
# Law of order of operations can be overridden by parentheses
company_query.update(num_chairs=((F('num_employees') + 2) *
F('num_employees')))
self.assertItemsEqual(company_query,
[{'num_chairs': 5294600, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 15, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 1088, 'name': u'Test GmbH',
'num_employees': 32}])
# The relation of a foreign key can become copied over to an
# other foreign key.
self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3)
self.assertEqual(repr([c.point_of_contact for
c in Company.objects.all()]),
'[<Employee: Joe Smith>, <Employee: Frank Meyer>, <Employee: Max Mustermann>]')
def test_f_expression_spanning_join(self):
# F Expressions can also span joins
self.assertQuerysetEqual(
Company.objects.filter(
ceo__firstname=F('point_of_contact__firstname')
).distinct().order_by('name'),
['<Company: Foobar Ltd.>', '<Company: Test GmbH>'])
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name='foo')
self.assertEqual(Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
u'foo')
self.assertRaises(FieldError,
Company.objects.exclude(ceo__firstname=F('point_of_contact__firstname')).update,
name=F('point_of_contact__lastname'))
def test_f_expression_update_attribute(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name='Test GmbH')
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F('num_employees') + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
# F expressions cannot be used to update attributes which are
# foreign keys, or attributes which involve joins.
test_gmbh.point_of_contact = None
test_gmbh.save()
self.assertEqual(test_gmbh.point_of_contact, None)
self.assertRaises(ValueError,
setattr,
test_gmbh, 'point_of_contact', F('ceo'))
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F('ceo__last_name')
self.assertRaises(FieldError,
test_gmbh.save)
# F expressions cannot be used to update attributes on objects
# which do not yet exist in the database
acme = Company(name='The Acme Widget Co.', num_employees=12,
num_chairs=5, ceo=test_gmbh.ceo)
acme.num_employees = F('num_employees') + 16
self.assertRaises(TypeError,
acme.save)
|
Python
| 0.000001
|
@@ -231,149 +231,8 @@
'%5D%0A%0A
- def assertItemsEqual(self, a, b):%0A #fixme, replace with unittest2 function%0A return self.assertEqual(sorted(a), sorted(b))%0A%0A
|
0341c38dff42ae5e86353c6d53c2d30aabca555e
|
update py-jupyter-client and new setuptools dependency (#13425)
|
var/spack/repos/builtin/packages/py-jupyter-client/package.py
|
var/spack/repos/builtin/packages/py-jupyter-client/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJupyterClient(PythonPackage):
"""Jupyter protocol client APIs"""
homepage = "https://github.com/jupyter/jupyter_client"
url = "https://github.com/jupyter/jupyter_client/archive/4.4.0.tar.gz"
version('4.4.0', sha256='2fda7fe1af35f0b4a77c4a2fd4ee38ac3666ed7f4d92a5b6ff8aaf764c38e199')
version('4.3.0', sha256='90b6ea3ced910ed94c5d558373490a81b33c672d877c1ffdc76b281e3216f1f6')
version('4.2.2', sha256='bf3e8ea4c44f07dbe2991e41031f6dab242734be424f4d40b72cc58a12c7d2ca')
version('4.2.1', sha256='547d443fb38ea667b468a6625ac374d476f8ac90fe17c3e35d75cab3cb8d40ba')
version('4.2.0', sha256='00eab54615fb10f1e508d8e7a952fbeeb2a82cd67b17582bd61be51a08a61d89')
version('4.1.1', sha256='ca6f3f66d5dc1e9bca81696ae607a93d652210c3ee9385a7c31c067d5ba88e6e')
version('4.1.0', sha256='ecf76a159381ec9880fd2c31388c6983b1d855f92f0292cf0667a90dd63f51c0')
version('4.0.0', sha256='33b15abb1307d8d3716b0d3b5d07aa22fdfbbf65a9f1aedf478a274a6adc11c0')
depends_on('python@2.7:2.8,3.3:')
depends_on('py-traitlets', type=('build', 'run'))
depends_on('py-jupyter-core', type=('build', 'run'))
depends_on('py-pyzmq@13:', type=('build', 'run'))
|
Python
| 0
|
@@ -431,16 +431,112 @@
ar.gz%22%0A%0A
+ version('5.3.4', sha256='2af6f0e0e4d88009b11103490bea0bfb405c1c470e226c2b7b17c10e5dda9734')%0A
vers
@@ -1332,16 +1332,112 @@
.8,3.3:'
+, type=('build', 'run'))%0A depends_on('python@2.7:2.8,3.5:', type=('build', 'run'), when='@5:'
)%0A de
@@ -1575,28 +1575,232 @@
3:', type=('build', 'run'))%0A
+ depends_on('py-python-dateutil@2.1:', type=('build', 'run'), when='@5:')%0A depends_on('py-tornado@4.1:', type=('build', 'run'), when='@5:')%0A depends_on('py-setuptools', type='build', when='@5:')%0A
|
79bf0829769e456750d7904866e65ae289f1cd46
|
add missing logfile write flushes
|
_log/dslog.py
|
_log/dslog.py
|
# -*- coding:utf8 -*-
import io
import os
import sys
import traceback
LOG_STDOUT=sys.stdout
LOG_STDERR=sys.stderr
logging_to_file=True
logfile="devsetup.log"
def init(project_folder, write_to_log=True):
global LOG_STDOUT
global LOG_STDERR
global logging_to_file
global logfile
# special case - we want to log directly to the screen
if not write_to_log:
logging_to_file = False
return
# if we get here, then we are logging
# to a file
# where will the logfile live?
log_filename = os.path.join(project_folder, logfile)
# create it
logfile_handle = io.open(log_filename, "w")
# all done
LOG_STDOUT = logfile_handle
LOG_STDERR = logfile_handle
def convert_command_to_string(cmd):
retval=''
for arg in cmd:
# are we appending to the return value?
if len(retval) > 0:
retval=retval+' '
# does the arg need quoting?
if ' ' in arg:
if "'" in arg:
retval=retval+"'" + arg + "'"
else:
retval=retval+'"' + arg + '"'
else:
retval=retval+arg
# all done
return retval
def log_command_output(output):
global LOG_STDOUT
for line in output:
LOG_STDOUT.write(unicode(line))
def log_command_start(cmd):
global LOG_STDOUT
# write the command to the logfile
LOG_STDOUT.write(unicode("$ " + convert_command_to_string(cmd) + "\n"))
LOG_STDOUT.flush()
def log_command_result(retval):
global LOG_STDOUT
LOG_STDOUT.write(unicode('# ... command exited with value ' + str(retval) + "\n\n"))
LOG_STDOUT.flush()
def log_comment(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# " + msg + "\n"))
def log_comment_result(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# ... " + msg + "\n\n"))
def log_last_exception():
if logging_to_file:
LOG_STDOUT.write(unicode("This resulted in the following exception:\n\n"))
output = traceback.format_exc()
LOG_STDOUT.write(unicode(output))
def log_new_operation(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n"))
LOG_STDOUT.write(unicode("# " + operation + "\n\n"))
def log_operation_okay(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# OKAY: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
def log_operation_failed(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# FAILED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
def log_operation_skipped(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# SKIPPED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
|
Python
| 0.000031
|
@@ -1117,24 +1117,45 @@
ode(line))%0A%0A
+%09LOG_STDOUT.flush()%0A%0A
def log_comm
@@ -1608,24 +1608,45 @@
msg + %22%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef log_co
@@ -1773,32 +1773,53 @@
+ msg + %22%5Cn%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef log_last_e
@@ -1997,16 +1997,37 @@
output))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef lo
@@ -2238,24 +2238,45 @@
n + %22%5Cn%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef log_op
@@ -2486,32 +2486,53 @@
=-=-=-=-=%5Cn%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef log_operat
@@ -2750,16 +2750,37 @@
=%5Cn%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()
%0A%0Adef lo
@@ -2930,32 +2930,32 @@
ration + %22%5Cn%22))%0A
-
%09%09LOG_STDOUT.wri
@@ -2988,28 +2988,50 @@
=-=-=-=-=-=-=-=-=-=-=%5Cn%5Cn%22))
+%0A%09%09LOG_STDOUT.flush()%0A
|
186c509f14968e9d51a6a7d3a7a23ed07eabc286
|
Enable spam bug detection in all products (#1106)
|
auto_nag/scripts/spambug.py
|
auto_nag/scripts/spambug.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import people
from auto_nag.bugbug_utils import get_bug_ids_classification
from auto_nag.bzcleaner import BzCleaner
from auto_nag.utils import nice_round
COMMENT = """
The [Bugbug](https://github.com/mozilla/bugbug/) bot thinks this bug is invalid.
If you think the bot is wrong, please reopen the bug and move it back to its prior component.
Be aware this is a production bug database used by the Mozilla community to develop Firefox, and other products.
Filing test bugs here wastes the time of all our contributors, volunteers, as well as paid employees.
If you continue to abuse bugzilla.mozilla.org your account will be disabled.
""".strip()
class SpamBug(BzCleaner):
def __init__(self):
super().__init__()
self.autofix_bugs = {}
self.people = people.People.get_instance()
def description(self):
return "[Using ML] Detect spam bugs"
def columns(self):
return ["id", "summary", "confidence"]
def sort_columns(self):
return lambda p: (-p[2], -int(p[0]))
def handle_bug(self, bug, data):
reporter = bug["creator"]
if self.people.is_mozilla(reporter):
return None
return bug
def get_bz_params(self, date):
start_date, _ = self.get_dates(date)
return {
"include_fields": ["id", "groups", "summary", "creator"],
# Ignore closed bugs.
"bug_status": "__open__",
"f1": "reporter",
"v1": "%group.editbugs%",
"o1": "notsubstring",
"f2": "creation_ts",
"o2": "greaterthan",
"v2": start_date,
}
def get_bugs(self, date="today", bug_ids=[]):
# Retrieve the bugs with the fields defined in get_bz_params
raw_bugs = super().get_bugs(date=date, bug_ids=bug_ids, chunk_size=7000)
if len(raw_bugs) == 0:
return {}
# Extract the bug ids
bug_ids = list(raw_bugs.keys())
# Classify those bugs
bugs = get_bug_ids_classification("spambug", bug_ids)
for bug_id in sorted(bugs.keys()):
bug_data = bugs[bug_id]
if not bug_data.get("available", True):
# The bug was not available, it was either removed or is a
# security bug
continue
if not {"prob", "index"}.issubset(bug_data.keys()):
raise Exception(f"Invalid bug response {bug_id}: {bug_data!r}")
bug = raw_bugs[bug_id]
prob = bug_data["prob"]
if prob[1] < self.get_config("confidence_threshold"):
continue
self.autofix_bugs[bug_id] = {
"id": bug_id,
"summary": bug["summary"],
"confidence": nice_round(prob[1]),
}
return self.autofix_bugs
def get_autofix_change(self):
result = {}
for bug_id in self.autofix_bugs:
result[bug_id] = {
"comment": {
"body": COMMENT.format(self.autofix_bugs[bug_id]["confidence"])
},
"product": "Invalid Bugs",
"component": "General",
"version": "unspecified",
"milestone": "---",
"status": "RESOLVED",
"resolution": "INVALID",
}
return result
if __name__ == "__main__":
SpamBug().run()
|
Python
| 0
|
@@ -1091,16 +1091,74 @@
bugs%22%0A%0A
+ def has_default_products(self):%0A return False%0A%0A
def
|
2cdec051040aeb49cde4e4d8425b675b1c395d3c
|
Allow specifying edge properties (color/width)
|
autonetkit/ank_messaging.py
|
autonetkit/ank_messaging.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import autonetkit.ank_json
import autonetkit.config as config
import autonetkit.log as log
from autonetkit.ank_utils import call_log
use_http_post = config.settings['Http Post']['active']
if use_http_post:
import urllib
@call_log
def format_http_url(host=None, port=None, route='publish'):
if not host and not port:
host = config.settings['Http Post']['server']
port = config.settings['Http Post']['port']
return 'http://%s:%s/%s' % (host, port, route)
default_http_url = format_http_url()
@call_log
def update_http(
anm=None,
nidb=None,
http_url=None,
uuid=None,
):
if http_url is None:
http_url = default_http_url
if anm and nidb:
body = autonetkit.ank_json.dumps(anm, nidb)
elif anm:
body = autonetkit.ank_json.dumps(anm)
else:
import json
body = json.dumps({}) # blank to test visualisation server running
if uuid is None:
uuid = get_uuid(anm)
params = urllib.urlencode({'body': body, 'type': 'anm',
'uuid': uuid})
try:
data = urllib.urlopen(http_url, params).read()
log.debug(data)
except IOError, e:
log.info('Unable to connect to visualisation server %s'
% http_url)
return
if not anm:
# testing
log.info('Visualisation server running')
@call_log
def get_uuid(anm):
try:
return config.settings['Http Post']['uuid']
except KeyError:
log.warning('UUID not set, returning singleuser uuid')
return 'singleuser'
@call_log
def highlight(
nodes=None,
edges=None,
paths=None,
path=None,
uuid='singleuser',
http_url=None,
):
if http_url is None:
http_url = default_http_url
if not paths:
paths = []
if path:
paths.append(path)
if nodes is None:
nodes = []
if edges is None:
edges = []
def nfilter(n):
try:
return n.id
except AttributeError:
return n # likely already a node id (string)
def efilter(e):
try:
return (e.src.id, e.dst.id)
except AttributeError:
return e # likely already edge (src, dst) id tuple (string)
nodes = [nfilter(n) for n in nodes]
edges = [efilter(e) for e in edges]
filtered_paths = []
for path in paths:
# TODO: tidy this logic
if isinstance(path, dict) and 'path' in path:
path_data = path # use as-s
else:
import random
is_verified = bool(random.randint(0, 1))
# path_data = {'path': path, 'verified': is_verified}
path_data = {'path': path}
path_data['path'] = [nfilter(n) for n in path_data['path']]
filtered_paths.append(path_data)
# TODO: remove "highlight" from json, use as url params to distinguish
import json
body = json.dumps({'nodes': nodes, 'edges': edges,
'paths': filtered_paths})
params = urllib.urlencode({'body': body, 'type': 'highlight',
'uuid': uuid})
# TODO: split this common function out, create at runtime so don't need to keep reading config
try:
data = urllib.urlopen(http_url, params).read()
except IOError, e:
log.info('Unable to connect to HTTP Server %s: %s' % (http_url,
e))
|
Python
| 0
|
@@ -2364,24 +2364,515 @@
nodes%5D%0A
+#TODO: allow node annotations also%0A%0A filtered_edges = %5B%5D%0A for edge in edges:%0A if isinstance(edge, dict) and 'edge' in edge:%0A edge_data = dict(edge) # use as-is (but make copy)%0A else:%0A edge_data = %7B'edge': edge%7D # no extra data%0A%0A edge_data%5B'src'%5D = edge_data%5B'edge'%5D.src.id%0A edge_data%5B'dst'%5D = edge_data%5B'edge'%5D.dst.id%0A del edge_data%5B'edge'%5D # remove now have extracted the src/dst%0A filtered_edges.append(edge_data)%0A%0A #
edges = %5Befi
@@ -2891,24 +2891,24 @@
e in edges%5D%0A
-
filtered
@@ -3054,21 +3054,27 @@
_data =
+dict(
path
+)
# use a
@@ -3075,17 +3075,34 @@
use as-
-s
+is (but make copy)
%0A
@@ -3493,24 +3493,24 @@
import json%0A
-
body = j
@@ -3545,16 +3545,25 @@
edges':
+filtered_
edges,%0A
|
bbe835c8aa561d8db58e116f0e55a5b19c4f9ca4
|
Fix sitemap memory consumption during generation
|
firecares/sitemaps.py
|
firecares/sitemaps.py
|
from django.contrib import sitemaps
from firecares.firestation.models import FireDepartment
from django.db.models import Max
from django.core.urlresolvers import reverse
class BaseSitemap(sitemaps.Sitemap):
protocol = 'https'
def items(self):
return ['media', 'models_performance_score', 'models_community_risk', 'safe_grades', 'login', 'contact_us',
'firedepartment_list']
def priority(self, item):
return 1
def location(self, item):
return reverse(item)
class DepartmentsSitemap(sitemaps.Sitemap):
protocol = 'https'
max_population = 1
def items(self):
queryset = FireDepartment.objects.filter(archived=False)
self.max_population = queryset.aggregate(Max('population'))['population__max']
return queryset
def location(self, item):
return item.get_absolute_url()
def priority(self, item):
if item.featured is True:
return 1
if item.population is None:
return 0
# adding a bit to the total so featured items are always above others
priority = item.population / float(self.max_population + 0.1)
return priority
def lastmod(self, item):
return item.modified
|
Python
| 0
|
@@ -688,16 +688,55 @@
d=False)
+.only('population', 'featured', 'name')
%0A
|
2596cdfa5cb194e2fbfd7e2317f53938b222203f
|
Change template variable to TEMPLATE_DIR.
|
firmant/writers/j2.py
|
firmant/writers/j2.py
|
# Copyright (c) 2010, Robert Escriva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Firmant nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from jinja2 import Environment
from jinja2 import FileSystemLoader
from firmant.i18n import _
from firmant.writers import EntryWriter
from firmant.writers import Writer
from firmant import utils
class Jinja2Base(Writer):
@property
def environment(self):
loader = FileSystemLoader(self.settings['JINJA2_TEMPLATE_DIR'])
env = Environment(loader=loader)
return env
@property
def template_mapper(self):
'''Return the a class for mapping entries/feeds/tags to templates.
'''
default = Jinja2TemplateMapper(self.settings)
return self.settings.get('TEMPLATE_MAPPER', default)
def save_to_disk(self, path, data):
'''Save the data to ``path`` on disk.
:func:`firmant.utils.safe_mkdir` will be called on the
:func:`os.path.dirname` of ``path``.
'''
try:
utils.safe_mkdir(os.path.dirname(path))
except OSError:
raise
self.log.error(_('cannot create dir: %s') % path)
return False
try:
f = open(path, 'w+')
except IOError:
self.log.error(_('cannot open file: %s') % path)
return False
f.write(data.encode('utf-8'))
f.close()
return True
class Jinja2TemplateMapper(object):
'''Class to map entries to templates.
Eventually it will be possible to have per-entry templates override the
defaults.
'''
def __init__(self, settings):
self.settings = settings
def single_entry(self, entry):
'''Call this to get the template that corresponds to ``entry``.
'''
return 'entries/single.html'
def entry_year(self, year):
'''Call this to get the template that corresponds to ``year``.
``year`` should be a string representing the year.
'''
return 'entries/year.html'
def entry_month(self, year, month):
'''Call this to get the template that corresponds to ``year`` and
``month``.
``year`` and ``month`` should both be a string representing a particular
month.
'''
return 'entries/month.html'
def entry_day(self, year, month, day):
'''Call this to get the template that corresponds to ``year``,
``month``, and ``day``.
``year``, ``month``, ``day`` should all be strings representing a
particular day.
'''
return 'entries/day.html'
class Jinja2SingleEntry(EntryWriter, Jinja2Base):
def write(self):
env = self.environment
if not self.write_preconditions(): return
for entry in self.entries:
self.log_processing(entry)
path = os.path.join(self.settings['OUTPUT_DIR'], self.path(entry))
path = os.path.join(path, 'index.html')
mapr = self.template_mapper
tmpl = env.get_template(mapr.single_entry(entry))
data = tmpl.render({'entry': entry})
self.save_to_disk(path, data)
class Jinja2ArchiveYearsEntry(EntryWriter, Jinja2Base):
def write(self):
env = self.environment
if not self.write_preconditions(): return
years = EntryWriter.split_years(self.entries)
mapr = self.template_mapper
for year, entries in years:
year = str(year)
tmpl = env.get_template(mapr.entry_year(year))
data = tmpl.render({'entries': entries, 'year': year})
path = os.path.join(self.settings['OUTPUT_DIR'], year, 'index.html')
self.log.info(_('processing yearly archive: %s') % path)
self.save_to_disk(path, data)
class Jinja2ArchiveMonthsEntry(EntryWriter, Jinja2Base):
def write(self):
env = self.environment
if not self.write_preconditions(): return
months = EntryWriter.split_months(self.entries)
mapr = self.template_mapper
for (year, month), entries in months:
year = str(year)
month = str(month)
tmpl = env.get_template(mapr.entry_month(year, month))
data = tmpl.render({'entries': entries, 'year': year,
'month': month})
path = os.path.join(self.settings['OUTPUT_DIR'], year, month)
path = os.path.join(path, 'index.html')
self.log.info(_('processing monthly archive: %s') % path)
self.save_to_disk(path, data)
class Jinja2ArchiveDaysEntry(EntryWriter, Jinja2Base):
def write(self):
env = self.environment
if not self.write_preconditions(): return
days = EntryWriter.split_days(self.entries)
mapr = self.template_mapper
for (year, month, day), entries in days:
year = str(year)
month = str(month)
day = str(day)
tmpl = env.get_template(mapr.entry_day(year, month, day))
data = tmpl.render({'entries': entries, 'year': year,
'month': month, 'day': day})
path = os.path.join(self.settings['OUTPUT_DIR'], year, month, day)
path = os.path.join(path, 'index.html')
self.log.info(_('processing daily archive: %s') % path)
self.save_to_disk(path, data)
|
Python
| 0
|
@@ -1880,15 +1880,8 @@
gs%5B'
-JINJA2_
TEMP
|
9c7d1deba7dbde9285e49cb2966b1d242ac8ddc2
|
Use sphinxapi if available
|
flask_sphinxsearch.py
|
flask_sphinxsearch.py
|
import sphinxsearch
from flask import current_app
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Sphinx(object):
"""
Simple wrapper around the `SphinxClient` object.
Usage:
from flask.ext.sphinxsearch import Sphinx
from myapp import app
sphinx = Sphinx(myapp)
print sphinx.client.Query("query")
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.config.setdefault('SPHINX_HOST', 'localhost')
app.config.setdefault('SPHINX_PORT', 3312)
def connect(self):
client = sphinxsearch.SphinxClient()
client.SetServer(
current_app.config['SPHINX_HOST'],
current_app.config['SPHINX_PORT'])
return client
@property
def client(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'sphinxclient'):
ctx.sphinxclient = self.connect()
return ctx.sphinxclient
# set constants on the Sphinx object, for ease of use
for key in dir(sphinxsearch):
if key == key.upper():
setattr(Sphinx, key,
getattr(sphinxsearch, key))
|
Python
| 0
|
@@ -1,12 +1,78 @@
+try:%0A import sphinxapi as sphinxsearch%0Aexcept ImportError:%0A
import sphin
|
726fa619627f449371f8cdd6df266d4c92aaad5d
|
Fix flaky NL test
|
samples/snippets/ocr_nl/main_test.py
|
samples/snippets/ocr_nl/main_test.py
|
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for main."""
import re
import zipfile
import main
_TEST_IMAGE_URI = 'gs://{}/language/image8.png'
def test_batch_empty():
for batch_size in range(1, 10):
assert len(
list(main.batch([], batch_size=batch_size))) == 0
def test_batch_single():
for batch_size in range(1, 10):
batched = tuple(main.batch([1], batch_size=batch_size))
assert batched == ((1,),)
def test_single_image_returns_text(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
assert image_path in texts
_, document = main.extract_description(texts[image_path])
assert "daughter" in document
assert "Bennet" in document
assert "hat" in document
def test_single_nonimage_returns_error():
vision_api_client = main.VisionApi()
texts = vision_api_client.detect_text(['README.md'])
assert "README.md" not in texts
def test_text_returns_entities():
text = "Holmes and Watson walked to the cafe."
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(text)
assert len(entities) == 2
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'holmes'
assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes'
def test_entities_list(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
locale, document = main.extract_description(texts[image_path])
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(document)
assert entities
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'bennet'
assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet'
def test_main(remote_resource, tmpdir, capsys):
images_path = str(tmpdir.mkdir('images'))
# First, pull down some test data
zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir)
# Extract it to the image directory
with zipfile.ZipFile(zip_path) as zfile:
zfile.extractall(images_path)
main.main(images_path, str(tmpdir.join('ocr_nl.db')))
stdout, _ = capsys.readouterr()
assert re.search(r'google was found with count', stdout)
|
Python
| 0.999382
|
@@ -1802,20 +1802,16 @@
ert
-len(
entities
) ==
@@ -1810,14 +1810,8 @@
ties
-) == 2
%0A
|
e959f849550fe4cfd2f2230c149a9bc0cb01bfe4
|
bump version
|
jose/__init__.py
|
jose/__init__.py
|
__version__ = "2.0.0"
__author__ = 'Michael Davis'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Michael Davis'
from .exceptions import JOSEError
from .exceptions import JWSError
from .exceptions import ExpiredSignatureError
from .exceptions import JWTError
|
Python
| 0
|
@@ -13,17 +13,17 @@
= %222.0.
-0
+1
%22%0A__auth
|
7f38e297dcfc9a664af092f48a9dc596f5f6c27b
|
Fix PermissionError: [Errno 13] Permission denied on Windows
|
scipy/sparse/tests/test_matrix_io.py
|
scipy/sparse/tests/test_matrix_io.py
|
import numpy as np
import tempfile
from numpy.testing import assert_array_almost_equal, run_module_suite, assert_
from scipy.sparse import csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz
def _save_and_load(matrix):
with tempfile.NamedTemporaryFile(suffix='.npz') as file:
file = file.name
save_npz(file, matrix)
loaded_matrix = load_npz(file)
return loaded_matrix
def _check_save_and_load(dense_matrix):
for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
matrix = matrix_class(dense_matrix)
loaded_matrix = _save_and_load(matrix)
assert_(type(loaded_matrix) is matrix_class)
assert_(loaded_matrix.shape == dense_matrix.shape)
assert_(loaded_matrix.dtype == dense_matrix.dtype)
assert_array_almost_equal(loaded_matrix.toarray(), dense_matrix)
def test_save_and_load_random():
N = 10
np.random.seed(0)
dense_matrix = np.random.random((N, N))
dense_matrix[dense_matrix > 0.7] = 0
_check_save_and_load(dense_matrix)
def test_save_and_load_empty():
dense_matrix = np.zeros((4,6))
_check_save_and_load(dense_matrix)
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4,6))
dense_matrix[1,2] = 1
_check_save_and_load(dense_matrix)
if __name__ == "__main__":
run_module_suite()
|
Python
| 0
|
@@ -1,12 +1,22 @@
+import os%0A
import numpy
@@ -256,102 +256,84 @@
ix):
- %0A with tempfile.NamedTemporaryFile(suffix='.npz') as file:%0A file = file.name
+%0A fd, tmpfile = tempfile.mkstemp(suffix='.npz')%0A os.close(fd)%0A try:
%0A
@@ -342,24 +342,27 @@
save_npz(
+tmp
file, matrix
@@ -396,16 +396,59 @@
oad_npz(
+tmpfile)%0A finally:%0A os.remove(tmp
file)%0A
|
b5e234185d8032db5d7193a46326cac7a17acedf
|
Add days_back argument to consume, and set null emails to '' rather than None
|
scrapi/consumers/scitech/consumer.py
|
scrapi/consumers/scitech/consumer.py
|
from scrapi_tools import lint
from scrapi_tools.document import RawDocument, NormalizedDocument
from lxml import etree
import requests
import datetime
import re
TODAY = datetime.date.today()
YESTERDAY = TODAY - datetime.timedelta(1)
NAME = 'SciTech'
def consume(start_date=YESTERDAY.strftime('%m/%d/%Y'), end_date=None, **kwargs):
"""A function for querying the SciTech Connect database for raw XML. The XML is chunked into smaller pieces, each representing data
about an article/report. If there are multiple pages of results, this function iterates through all the pages."""
base_url = 'http://www.osti.gov/scitech/scitechxml'
parameters = kwargs
parameters['EntryDateFrom'] = start_date
parameters['EntryDateTo'] = end_date
parameters['page'] = 0
morepages = 'true'
xml_list = []
elements_url = 'http://purl.org/dc/elements/1.1/'
while morepages == 'true':
xml = requests.get(base_url, params=parameters).text
xml_root = etree.XML(xml.encode('utf-8'))
for record in xml_root.find('records'):
xml_list.append(RawDocument({
'doc': etree.tostring(record, encoding='ASCII'),
'source': NAME,
'doc_id': record.find(str(etree.QName(elements_url, 'ostiId'))).text,
'filetype': 'xml',
}))
parameters['page'] += 1
morepages = xml_root.find('records').attrib['morepages']
return xml_list
def normalize(raw_doc, timestamp):
"""A function for parsing the list of XML objects returned by the consume function.
Returns a list of Json objects in a format that can be recognized by the OSF scrapi."""
raw_doc = raw_doc.get('doc')
terms_url = 'http://purl.org/dc/terms/'
elements_url = 'http://purl.org/dc/elements/1.1/'
record = etree.XML(raw_doc)
contributor_list = record.find(str(etree.QName(elements_url, 'creator'))).text.split(';')
# for now, scitech does not grab emails, but it could soon?
contributors = []
for name in contributor_list:
name = name.strip()
if name[0] in ['/', ',', 'et. al']:
continue
if '[' in name:
name = name[:name.index('[')].strip()
contributor = {}
contributor['full_name'] = name
contributor['email'] = None
contributors.append(contributor)
tags = record.find(str(etree.QName(elements_url, 'subject'))).text
tags = re.split(',(?!\s\&)|;', tags) if tags is not None else []
tags = [tag.strip() for tag in tags]
return NormalizedDocument({
'title': record.find(str(etree.QName(elements_url, 'title'))).text,
'contributors': contributors,
'properties': {
'doi': record.find(str(etree.QName(elements_url, 'doi'))).text,
'description': record.find(str(etree.QName(elements_url, 'description'))).text,
'article_type': record.find(str(etree.QName(elements_url, 'type'))).text,
'url': record.find(str(etree.QName(terms_url, 'identifier-purl'))).text,
'date_entered': record.find(str(etree.QName(elements_url, 'dateEntry'))).text,
'research_org': record.find(str(etree.QName(terms_url, 'publisherResearch'))).text,
'research_sponsor': record.find(str(etree.QName(terms_url, 'publisherSponsor'))).text,
'tags': tags,
'date_published': record.find(str(etree.QName(elements_url, 'date'))).text
},
'meta': {},
'id': record.find(str(etree.QName(elements_url, 'ostiId'))).text,
'source': NAME,
'timestamp': str(timestamp)
})
if __name__ == '__main__':
print(lint(consume, normalize))
|
Python
| 0.000354
|
@@ -262,49 +262,19 @@
ume(
-start_date=YESTERDAY.strftime('%25m/%25d/%25Y')
+days_back=1
, en
@@ -552,16 +552,94 @@
es.%22%22%22%0A%0A
+ start_date = (TODAY - datetime.timedelta(days_back)).strftime('%25m/%25d/%25Y')%0A
base
@@ -2360,20 +2360,18 @@
ail'%5D =
-None
+''
%0A
|
ea8aee109883d0f1efc9041e5219f893512d2e26
|
Fix comment
|
src/olympia/reviews/serializers.py
|
src/olympia/reviews/serializers.py
|
import re
from urllib2 import unquote
from django.utils.translation import ugettext as _
from rest_framework import serializers
from rest_framework.relations import PrimaryKeyRelatedField
from olympia.addons.serializers import SimpleVersionSerializer
from olympia.reviews.forms import ReviewForm
from olympia.reviews.models import Review
from olympia.users.serializers import BaseUserSerializer
from olympia.versions.models import Version
class BaseReviewSerializer(serializers.ModelSerializer):
# title and body are TranslatedFields, but there is never more than one
# translation for each review - it's essentially useless. Because of that
# we use a simple CharField in the API, hiding the fact that it's a
# TranslatedField underneath.
addon = serializers.SerializerMethodField()
body = serializers.CharField(allow_null=True, required=False)
is_latest = serializers.BooleanField(read_only=True)
previous_count = serializers.IntegerField(read_only=True)
title = serializers.CharField(allow_null=True, required=False)
user = BaseUserSerializer(read_only=True)
class Meta:
model = Review
fields = ('id', 'addon', 'body', 'created', 'is_latest',
'previous_count', 'title', 'user')
def get_addon(self, obj):
# We only return the addon id and slug for convenience, so just return
# them directly to avoid instantiating a full serializer. Also avoid
# database queries if possible by re-using the addon object from the
# view if there is one.
addon = self.context['view'].get_addon_object() or obj.addon
return {
'id': addon.id,
'slug': addon.slug
}
def validate(self, data):
data = super(BaseReviewSerializer, self).validate(data)
request = self.context['request']
data['user_responsible'] = request.user
# There are a few fields that need to be set at creation time and never
# modified afterwards:
if not self.partial:
# Because we want to avoid extra queries, addon is a
# serializerMethodField, which means it needs to be validated
# manually. Fortunately the view does most of the work for us.
data['addon'] = self.context['view'].get_addon_object()
if data['addon'] is None:
raise serializers.ValidationError(
{'addon': _('This field is required.')})
# Get the user from the request, don't allow clients to pick one
# themselves.
data['user'] = request.user
# Also include the user ip adress.
data['ip_address'] = request.META.get('REMOTE_ADDR', '')
else:
# When editing, you can't change the add-on.
if self.context['request'].data.get('addon'):
raise serializers.ValidationError(
{'addon': _(u"You can't change the add-on of a review once"
u" it has been created.")})
# Clean up body and automatically flag the review if an URL was in it.
body = data.get('body', '')
if body:
if '<br>' in body:
data['body'] = re.sub('<br>', '\n', body)
# Unquote the body when searching for links, in case someone tries
# 'example%2ecom'.
if ReviewForm.link_pattern.search(unquote(body)) is not None:
data['flag'] = True
data['editorreview'] = True
return data
class ReviewSerializerReply(BaseReviewSerializer):
"""Serializer used for replies only."""
body = serializers.CharField(
allow_null=False, required=True, allow_blank=False)
def to_representation(self, obj):
should_access_deleted = getattr(
self.context['view'], 'should_access_deleted_reviews', False)
if obj.deleted and not should_access_deleted:
return None
return super(ReviewSerializerReply, self).to_representation(obj)
def validate(self, data):
# review_object is set on the view by the reply() method.
data['reply_to'] = self.context['view'].review_object
# When a reply is made on top of an existing deleted reply, we make
# an edit instead, so we need to make sure `deleted` is reset.
data['deleted'] = False
if data['reply_to'].reply_to:
# Only one level of replying is allowed, so if it's already a
# reply, we shouldn't allow that.
raise serializers.ValidationError(
_(u"You can't reply to a review that is already a reply."))
data = super(ReviewSerializerReply, self).validate(data)
return data
class ReviewVersionSerializer(SimpleVersionSerializer):
class Meta:
model = Version
fields = ('id', 'version')
def to_internal_value(self, data):
"""Resolve the version only by `id`."""
# Version queryset is unfiltered, the version is checked more
# thoroughly in `ReviewSerializer.validate()` method.
field = PrimaryKeyRelatedField(queryset=Version.unfiltered)
return field.to_internal_value(data)
class ReviewSerializer(BaseReviewSerializer):
reply = ReviewSerializerReply(read_only=True)
rating = serializers.IntegerField(min_value=1, max_value=5)
version = ReviewVersionSerializer()
class Meta:
model = Review
fields = BaseReviewSerializer.Meta.fields + (
'rating', 'reply', 'version')
def validate_version(self, version):
if self.partial:
raise serializers.ValidationError(
_(u"You can't change the version of the add-on reviewed once "
u"the review has been created."))
addon = self.context['view'].get_addon_object()
if not addon:
# BaseReviewSerializer.validate() should complain about that, not
# this method.
return None
if version.addon_id != addon.pk or not version.is_public():
raise serializers.ValidationError(
_(u"This version of the add-on doesn't exist or isn't "
u"public."))
return version
def validate(self, data):
data = super(ReviewSerializer, self).validate(data)
if not self.partial:
if data['addon'].authors.filter(pk=data['user'].pk).exists():
raise serializers.ValidationError(
_(u"You can't leave a review on your own add-on."))
review_exists_on_this_version = Review.objects.filter(
addon=data['addon'], user=data['user'],
version=data['version']).exists()
if review_exists_on_this_version:
raise serializers.ValidationError(
_(u"You can't leave more than one review for the same "
u"version of an add-on."))
return data
|
Python
| 0
|
@@ -2116,17 +2116,17 @@
#
-s
+S
erialize
|
81b64f139dba88b744e6067f7a48ce1bdaff785c
|
Change variable names.
|
avenue/web.py
|
avenue/web.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from flask import render_template, make_response, redirect
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = api.read_data('forum')
threads = data['threads']
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = data['tags'][post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html_title = '%s :: %s :: %s' % (thread['title'], data['forum'],
data['site'])
main_title = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html_title,
'main' : main_title,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=data['navbar'],
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a function
that they will act on.
'''
def url_page_function(text):
'''Returns a function that is associated with the URL
page. This function is called when the URL page is
requested. The anonymous (lambda) function does a
particular action given a particular string, text. It's
set up this way because the text fed into the action
function is always the same for a particular web page.
'''
return lambda: action(text)
for url in urls:
app.add_url_rule(url, url, url_page_function(urls[url]))
forum_set_tags()
action_list = [('redirect', redirect),
('forum_urls', forum_page),
('css', lambda theme:
api.make_css(data['style'][theme]))]
for action in action_list:
setup_url_rule(data['urls'][action[0]], action[1])
|
Python
| 0.000001
|
@@ -1213,22 +1213,16 @@
html
-_title
= '%25s :
@@ -1271,49 +1271,8 @@
m'%5D,
-%0A
dat
@@ -1278,25 +1278,24 @@
ta%5B'site'%5D)%0A
-%0A
main
@@ -1294,22 +1294,16 @@
main
-_title
= '%25s -
@@ -1374,22 +1374,16 @@
: html
-_title
,%0A
@@ -1413,14 +1413,8 @@
main
-_title
,%0A
|
bc62bd28340d27fbfde164ea3c2f184922ddb9e9
|
add Spirent like profile
|
scripts/astf/http_manual_tunables.py
|
scripts/astf/http_manual_tunables.py
|
# Example for creating your program by specifying buffers to send, without relaying on pcap file
from trex_astf_lib.api import *
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>'+('*'*11*1024)+'</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def create_profile(self):
# client commands
prog_c = ASTFProgram()
prog_c.connect();
prog_c.send(http_req)
prog_c.recv(len(http_response))
prog_c.delay(10);
prog_s = ASTFProgram()
prog_s.recv(len(http_req))
prog_s.send(http_response)
prog_s.wait_for_peer_close()
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
info = ASTFGlobalInfo()
info.tcp.mss = 1100
info.tcp.initwnd = 1
info.tcp.no_delay = 1
# template
temp_c = ASTFTCPClientTemplate(program=prog_c, ip_gen=ip_gen)
temp_s = ASTFTCPServerTemplate(program=prog_s) # using default association
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
# profile
profile = ASTFProfile(default_ip_gen=ip_gen, templates=template,
default_c_glob_info=info,
default_s_glob_info=info)
return profile
def get_profile(self, **kwargs):
return self.create_profile()
def register():
return Prof1()
|
Python
| 0
|
@@ -1025,16 +1025,42 @@
p_req))%0A
+ prog_s.delay(10);%0A
@@ -1559,16 +1559,161 @@
= 1100%0A
+ info.tcp.rxbufsize = 1102 # split the buffer to MSS and ack every buffer, no need the no_delay option%0A info.tcp.txbufsize = 1100%0A
@@ -1733,32 +1733,33 @@
wnd = 1%0A
+#
info.tcp.no_dela
@@ -1763,16 +1763,47 @@
elay = 1
+%0A info.tcp.do_rfc1323 =0
%0A%0A
|
fbc1ee6391734175a603aff5c95108a499d6001b
|
add more agronomic fields
|
scripts/cscap/set_dashboard_links.py
|
scripts/cscap/set_dashboard_links.py
|
"""
Assign links in the dashboard to generated spreadsheets?
"""
import util
import ConfigParser
import gdata.spreadsheets.client
import gdata.docs.client
config = ConfigParser.ConfigParser()
config.read('mytokens.cfg')
# Get me a client, stat
spr_client = util.get_spreadsheet_client(config)
docs_client = util.get_docs_client(config)
# Go get row 1
cell_feed = spr_client.get_cells( config.get('cscap', 'dashboard'),
'od6', query=gdata.spreadsheets.client.CellQuery(
min_row=1, max_row=1))
column_ids = [""] * 100
for entry in cell_feed.entry:
pos = entry.title.text
text = entry.cell.input_value
column_ids[ int(entry.cell.col) ] = text
lookuprefs = {
'agr1': 'Agronomic Data',
'agr34': 'Agronomic Data',
'agr37': 'Agronomic Data',
'agr38': 'Agronomic Data',
'agr39': 'Agronomic Data',
'agr40': 'Agronomic Data',
'soil2': 'Soil Bulk Density and Water Retention Data',
'soil15': 'Soil Nitrate Data',
'soil22': 'Soil Nitrate Data',
}
varconv = {
'soil2': 'waterretentionat0bar',
'soil15': 'soilnitratespringsampling',
'soil22': 'soil22soilammoniumoptional',
}
def do_row(row):
cell_feed = spr_client.get_cells( config.get('cscap', 'dashboard'),
'od6', query=gdata.spreadsheets.client.CellQuery(
min_row=row, max_row=row))
firstcolumn = cell_feed.entry[0]
varname = firstcolumn.cell.input_value.split()[0].lower()
spreadtitle = lookuprefs.get(varname)
if spreadtitle is None:
print 'ERROR: Do not know how to reference %s in lookuprefs' % (
varname,)
return
for entry in cell_feed.entry[1:]:
siteid = column_ids[ int(entry.cell.col) ]
query = gdata.docs.client.DocsQuery(show_collections='true',
title='%s %s' % (siteid, spreadtitle))
# We need to go search for the spreadsheet
resources = docs_client.GetAllResources(query=query)
if len(resources) == 0:
print 'Can not find spread title: |%s %s|' % (siteid, spreadtitle,)
continue
if len(resources) == 2:
print 'Duplicate spread title: |%s %s|' % (siteid, spreadtitle,)
for res in resources:
print siteid, res.title.text, res.get_html_link().href
continue
# Get the list feed for this spreadsheet
list_feed = spr_client.get_list_feed(
resources[0].get_id().split("/")[-1][14:], 'od7')
misses = 0
na = False
lookupcol = varconv.get(varname, varname)
for entry2 in list_feed.entry:
data = entry2.to_dict()
if not data.has_key(lookupcol):
na = True
break
if data[lookupcol] is None:
misses += 1
if na:
print 'Could not find header: %s in spreadtitle: %s %s' % (lookupcol,
siteid, spreadtitle)
uri = resources[0].get_html_link().href
if na:
entry.cell.input_value = 'N/A'
elif misses == 0:
entry.cell.input_value = 'Complete!'
else:
entry.cell.input_value = '=hyperlink("%s", "Entry")' % (uri,)
spr_client.update(entry)
for i in range(6,7):
do_row(i)
|
Python
| 0
|
@@ -777,32 +777,1336 @@
gronomic Data',%0A
+ 'agr2': 'Agronomic Data',%0A 'agr3': 'Agronomic Data',%0A 'agr4': 'Agronomic Data',%0A 'agr5': 'Agronomic Data',%0A 'agr6': 'Agronomic Data',%0A 'agr7': 'Agronomic Data',%0A 'agr8': 'Agronomic Data',%0A 'agr9': 'Agronomic Data',%0A 'agr10': 'Agronomic Data',%0A 'agr11': 'Agronomic Data',%0A 'agr12': 'Agronomic Data',%0A 'agr13': 'Agronomic Data',%0A 'agr14': 'Agronomic Data',%0A 'agr15': 'Agronomic Data',%0A 'agr16': 'Agronomic Data',%0A 'agr17': 'Agronomic Data',%0A 'agr18': 'Agronomic Data',%0A 'agr19': 'Agronomic Data',%0A 'agr20': 'Agronomic Data',%0A 'agr21': 'Agronomic Data',%0A 'agr22': 'Agronomic Data',%0A 'agr23': 'Agronomic Data',%0A 'agr24': 'Agronomic Data',%0A 'agr25': 'Agronomic Data',%0A 'agr26': 'Agronomic Data',%0A 'agr27': 'Agronomic Data',%0A 'agr28': 'Agronomic Data',%0A 'agr29': 'Agronomic Data',%0A 'agr30': 'Agronomic Data',%0A 'agr31': 'Agronomic Data',%0A 'agr32': 'Agronomic Data',%0A 'agr33': 'Agronomic Data',%0A
'a
@@ -4962,11 +4962,12 @@
nge(
-6,7
+9,12
):%0A
|
b1277cd79102a30a894e370ab15773e6d86569ec
|
fix n/a causing issues for OT0010 ingest, sigh
|
scripts/ingestors/other/parse0010.py
|
scripts/ingestors/other/parse0010.py
|
"""ISU Agronomy Hall Vantage Pro 2 OT0010"""
from __future__ import print_function
import datetime
import re
import os
import sys
import pytz
from pyiem.datatypes import speed, temperature, humidity
from pyiem.observation import Observation
from pyiem.meteorology import dewpoint
from pyiem.util import get_dbconn
def main():
"""Go Main Go"""
iemaccess = get_dbconn('iem')
cursor = iemaccess.cursor()
valid = datetime.datetime.utcnow()
valid = valid.replace(tzinfo=pytz.utc)
valid = valid.astimezone(pytz.timezone("America/Chicago"))
fn = valid.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/text/ot/ot0010.dat")
if not os.path.isfile(fn):
sys.exit(0)
lines = open(fn, "r").readlines()
lastline = lines[-1].strip()
tokens = re.split(r"[\s+]+", lastline)
if len(tokens) != 20:
return
tparts = re.split(":", tokens[3])
valid = valid.replace(hour=int(tparts[0]),
minute=int(tparts[1]), second=0, microsecond=0)
iem = Observation("OT0010", "OT", valid)
iem.data['tmpf'] = float(tokens[4])
iem.data['max_tmpf'] = float(tokens[5])
iem.data['min_tmpf'] = float(tokens[6])
iem.data['relh'] = int(tokens[7])
iem.data['dwpf'] = dewpoint(temperature(iem.data['tmpf'], 'F'),
humidity(iem.data['relh'], '%')).value("F")
iem.data['sknt'] = speed(float(tokens[8]), 'mph').value('KT')
iem.data['drct'] = int(tokens[9])
iem.data['max_sknt'] = speed(float(tokens[10]), 'mph').value('KT')
iem.data['alti'] = float(tokens[12])
iem.data['pday'] = float(tokens[13])
iem.data['srad'] = float(tokens[18])
iem.save(cursor)
cursor.close()
iemaccess.commit()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1631,24 +1631,57 @@
ta%5B'srad'%5D =
+ None if tokens%5B18%5D == 'n/a' else
float(token
|
76e46db3248f8612c01a3fd598c95e08f864bc08
|
version 1.0.0
|
src/stratis_cli/_version.py
|
src/stratis_cli/_version.py
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Version information.
.. moduleauthor:: mulhern <amulhern@redhat.com>
"""
__version_info__ = (0, 9, 0)
__version__ = '.'.join(str(x) for x in __version_info__)
|
Python
| 0
|
@@ -683,12 +683,12 @@
= (
-0, 9
+1, 0
, 0)
|
c7a79f81734f360a232b2f91630872ad56a1ffa4
|
clean up audio init
|
amen/audio.py
|
amen/audio.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import librosa
from amen.timing_list import TimingList
class Audio(object):
"""
Audio object: should wrap the output from libRosa.
"""
def __init__(self, file_path, convert_to_mono=False, sample_rate=22050):
"""
Opens a file path, loads it with librosa.
"""
self.file_path = file_path
y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate)
self.sample_rate = float(sr)
self.raw_samples = y
if convert_to_mono:
self.num_channels = 1
else:
self.num_channels = 2
self.duration = len(self.raw_samples) / self.sample_rate
self.timings = self.create_timings()
def create_timings(self):
timings = {}
timings['beats'] = TimingList('beats', self.get_beats(), self)
return timings
def get_beats(self):
y_mono = librosa.to_mono(self.raw_samples)
tempo, beat_frames = librosa.beat.beat_track(
y=y_mono, sr=self.sample_rate, trim=False)
# convert frames to times
beat_times = librosa.frames_to_time(beat_frames, sr=self.sample_rate)
# make the list of (start, duration)s that TimingList expects
starts_durs = []
for i, start in enumerate(beat_times[:-1]):
starts_durs.append((start, beat_times[i+1] - start))
# now get the last one
starts_durs.append((beat_times[-1], self.duration - beat_times[-1]))
return starts_durs
|
Python
| 0.000025
|
@@ -532,174 +532,91 @@
-if convert_to_mono:%0A self.num_channels = 1%0A else:%0A self.num_channels = 2%0A self.duration = len(self.raw_samples) / self.sample_rate
+self.num_channels = y.ndim%0A self.duration = librosa.get_duration(y=y, sr=sr)
%0A
|
4aca30e376b2310e2436fdb799bf3cae1c9a1d2b
|
Define global variables to clean up tests
|
dakota_utils/tests/test_file.py
|
dakota_utils/tests/test_file.py
|
#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
def test_remove_file_does_not_exist():
'''
Tests deleting a nonexistent file with remove_file().
'''
fname = 'vwbwguv00240cnwuncdsv'
remove_file(os.path.join(os.environ['_test_tmp_dir'], fname))
def test_remove_directory_does_not_exist():
'''
Tests deleting a nonexistent directory with remove_directory().
'''
dname = 'vwbwguv00240cnwuncdsv'
remove_directory(os.path.join(os.environ['_test_tmp_dir'], dname))
def test_remove_does_not_exist():
'''
Tests deleting a nonexistent file or directory with remove().
'''
name = 'vwbwguv00240cnwuncdsv'
remove(os.path.join(os.environ['_test_tmp_dir'], name))
def test_remove_file():
'''
Tests that remove_file() deletes a file.
'''
bname = 'delete_me'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove_file(fname)
assert_false(os.path.exists(fname))
def test_remove_directory():
'''
Tests that remove_directory() deletes a directory.
'''
bname = 'delete_me'
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove_directory(dname)
assert_false(os.path.exists(dname))
def test_remove_a_file():
'''
Tests that remove() deletes a file. (Uses touch)
'''
bname = 'delete_me'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove(fname)
assert_false(os.path.exists(fname))
def test_remove_a_directory():
'''
Tests that remove() deletes a directory.
'''
bname = 'delete_me'
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove(dname)
assert_false(os.path.exists(dname))
def test_touch():
'''
Tests that touch() makes a file. (Uses remove)
'''
bname = 'a_file'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
assert_true(os.path.exists(fname))
remove(fname)
|
Python
| 0.000003
|
@@ -224,16 +224,96 @@
port *%0A%0A
+nondir = 'vwbwguv00240cnwuncdsv'%0Anonfile = nondir + '.pro'%0Abname = 'delete_me'%0A%0A
def setu
@@ -1153,44 +1153,8 @@
%0A
- fname = 'vwbwguv00240cnwuncdsv'%0A
@@ -1207,20 +1207,22 @@
_dir'%5D,
-fnam
+nonfil
e))%0A%0Adef
@@ -1354,44 +1354,8 @@
%0A
- dname = 'vwbwguv00240cnwuncdsv'%0A
@@ -1413,21 +1413,22 @@
_dir'%5D,
-dname
+nondir
))%0A%0Adef
@@ -1551,38 +1551,66 @@
-name = 'vwbwguv00240cnwuncdsv'
+remove(os.path.join(os.environ%5B'_test_tmp_dir'%5D, nonfile))
%0A
@@ -1660,19 +1660,21 @@
dir'%5D, n
-ame
+ondir
))%0A%0Adef
@@ -1762,32 +1762,8 @@
%0A
- bname = 'delete_me'%0A
@@ -2008,32 +2008,8 @@
%0A
- bname = 'delete_me'%0A
@@ -2253,32 +2253,8 @@
'''%0A
- bname = 'delete_me'%0A
@@ -2486,32 +2486,8 @@
%0A
- bname = 'delete_me'%0A
@@ -2711,29 +2711,8 @@
'''%0A
- bname = 'a_file'%0A
@@ -2753,37 +2753,40 @@
test_tmp_dir'%5D,
-bname
+'a_file'
)%0A touch(fnam
|
489f230b618a84f1cd94aad73038ada73e526a4a
|
fix a crash
|
dashboard/containers/web/app.py
|
dashboard/containers/web/app.py
|
from flask import Flask, render_template, send_from_directory
import rethinkdb as r
import json
import os
app = Flask(__name__, static_url_path='')
@app.route('/css/<path>/')
def send_css(path):
return send_from_directory('static/css', path)
@app.route('/js/<path>/')
def send_js(path):
return send_from_directory('static/js', path)
@app.route('/')
def dashboard():
db_host = os.environ.get("RETHINKDB_DRIVER_SERVICE_HOST", 'db')
r.connect(db_host, 28015).repl()
db = r.db('fontbakery')
# db.table('cached_stats').index_create('familyname').run();
fonts_prod = list(db.table('cached_stats').order_by(index='familyname').filter({"commit": "prod"}).run())
return render_template("dashboard.html", prod=fonts_prod)
@app.route('/testsuite/')
def testsuite_overview():
if 1: #try:
db_host = os.environ.get("RETHINKDB_DRIVER_SERVICE_HOST", 'db')
r.connect(db_host, 28015).repl()
db = r.db('fontbakery')
targets = db.table('check_results').filter({"commit": "prod"}).run()
families = []
checks = {}
num_targets = 0
for target in targets:
num_targets +=1
if target['familyname'] not in families:
families.append(target['familyname'])
for check in target['results']:
desc = check['description']
result = check['result']
if desc not in checks.keys():
checks[desc] = {'OK':0,
'Total': 0,
'ERROR': 0,
'WARNING': 0,
'SKIP':0,
'HOTFIX': 0,
'INFO': 0}
checks[desc][result] += 1
checks[desc]['Total'] += 1
return render_template("testsuite.html",
checks=checks,
num_targets=num_targets,
num_families=len(families)
)
# except:
# return render_template("under_deployment.html")
@app.route('/details/<familyname>/errorlog/')
def family_error_log(familyname):
db_host = os.environ.get("RETHINKDB_DRIVER_SERVICE_HOST", 'db')
r.connect(db_host, 28015).repl()
db = r.db('fontbakery')
family = db.table('fb_log').filter({"familyname": familyname}).run()
logs = list(family)
return render_template("error_log.html",
logs=logs)
@app.route('/details/<familyname>/')
def family_details(familyname):
if 1: #try:
db_host = os.environ.get("RETHINKDB_DRIVER_SERVICE_HOST", 'db')
r.connect(db_host, 28015).repl()
db = r.db('fontbakery')
fonts_prod = list(db.table('check_results').filter({"commit": "prod", "familyname": familyname}).run())
fonts_dev = list(db.table('check_results').filter({"HEAD": True, "familyname": familyname}).run())
family_prod = db.table('cached_stats').filter({"commit": "prod", "familyname": familyname}).run().next()
family_dev = []
try:
family_dev = db.table('cached_stats').filter({"HEAD": True, "familyname": familyname}).run().next()
except:
pass
chart_data = [["Results", "Occurrences"]]
delta = {}
for k in family_prod['summary']:
if k != "Total":
chart_data.append([k, family_prod['summary'][k]])
if family_dev != []:
delta[k] = (family_dev['summary'][k] - family_prod['summary'][k])
for f in fonts_dev + fonts_prod:
if '-' in f['fontname'] and '.ttf' in f['fontname']:
f['stylename'] = f['fontname'].split('-')[1].split('.ttf')[0]
else:
f['stylename'] = "{} (bad name)".format(f['fontname'])
# I think that the rearrangement of data below could be avoided by crafting a smarter database schema...
fonts = []
for p in fonts_prod:
for d in fonts_dev:
if d['stylename'] == p['stylename']:
fonts.append([p, d])
return render_template("family_details.html",
delta=delta,
fonts=fonts,
familyname=familyname,
chart_data=json.dumps(chart_data),
giturl=family_prod['giturl'])
# except:
# return render_template("under_deployment.html")
if __name__ == "__main__":
app.run("0.0.0.0", debug=True)
|
Python
| 0.0005
|
@@ -3254,16 +3254,47 @@
ev != %5B%5D
+ and k in family_dev%5B'summary'%5D
:%0A
|
50b8195bf34244d7e5cc9595818227db83999566
|
Fix type error updating the failures count.
|
api/common.py
|
api/common.py
|
#!/usr/bin/python
# Copyright (C) 2012 Humbug, Inc. All rights reserved.
import simplejson
import requests
import time
import traceback
import urlparse
import sys
# Check that we have a recent enough version
# Older versions don't provide the 'json' attribute on responses.
assert(requests.__version__ > '0.12')
class HumbugAPI():
def __init__(self, email, api_key, verbose=False, retry_on_errors=True,
site="https://humbughq.com", client="API"):
self.api_key = api_key
self.email = email
self.verbose = verbose
self.base_url = site
self.retry_on_errors = retry_on_errors
self.client_name = client
def do_api_query(self, request, url, longpolling = False):
had_error_retry = False
request["email"] = self.email
request["api-key"] = self.api_key
request["client"] = self.client_name
request["failures"] = 0
for (key, val) in request.iteritems():
if not (isinstance(val, str) or isinstance(val, unicode)):
request[key] = simplejson.dumps(val)
while True:
try:
res = requests.post(urlparse.urljoin(self.base_url, url), data=request,
verify=True, timeout=55)
# On 50x errors, try again after a short sleep
if str(res.status_code).startswith('5') and self.retry_on_errors:
if self.verbose:
if not had_error_retry:
sys.stdout.write("connection error %s -- retrying." % (res.status_code,))
had_error_retry = True
request["failures"] += 1
else:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
continue
except (requests.exceptions.Timeout, requests.exceptions.SSLError) as e:
# Timeouts are either a Timeout or an SSLError; we
# want the later exception handlers to deal with any
# non-timeout other SSLErrors
if (isinstance(e, requests.exceptions.SSLError) and
str(e) != "The read operation timed out"):
raise
if longpolling:
# When longpolling, we expect the timeout to fire,
# and the correct response is to just retry
continue
else:
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except requests.exceptions.ConnectionError:
if self.retry_on_errors:
if self.verbose:
if not had_error_retry:
sys.stdout.write("connection error -- retrying.")
had_error_retry = True
request["failures"] += 1
else:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(1)
continue
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except Exception:
# We'll split this out into more cases as we encounter new bugs.
return {'msg': "Unexpected error:\n%s" % traceback.format_exc(),
"result": "unexpected-error"}
if self.verbose and had_error_retry:
print "Success!"
if res.json is not None:
return res.json
return {'msg': res.text, "result": "http-error",
"status_code": res.status_code}
def send_message(self, request):
return self.do_api_query(request, "/api/v1/send_message")
def get_messages(self, request = {}):
return self.do_api_query(request, "/api/v1/get_messages",
longpolling=True)
def get_profile(self, request = {}):
return self.do_api_query(request, "/api/v1/get_profile")
def get_public_streams(self, request = {}):
return self.do_api_query(request, "/api/v1/get_public_streams")
def get_subscriptions(self, request = {}):
return self.do_api_query(request, "/api/v1/get_subscriptions")
def subscribe(self, streams):
request = {'streams': streams}
return self.do_api_query(request, "/api/v1/subscribe")
def call_on_each_message(self, callback, options = {}):
max_message_id = None
while True:
if max_message_id is not None:
options["first"] = "0"
options["last"] = str(max_message_id)
res = self.get_messages(options)
if 'error' in res.get('result'):
if self.verbose:
if res["result"] == "http-error":
print "HTTP error fetching messages -- probably a server restart"
elif res["result"] == "connection-error":
print "Connection error fetching messages -- probably server is temporarily down?"
else:
print "Server returned error:\n%s" % res["msg"]
# TODO: Make this back off once it's more reliable
time.sleep(1)
continue
for message in sorted(res['messages'], key=lambda x: int(x["id"])):
max_message_id = max(max_message_id, int(message["id"]))
callback(message)
|
Python
| 0
|
@@ -887,40 +887,8 @@
name
-%0A request%5B%22failures%22%5D = 0
%0A%0A
@@ -1057,16 +1057,49 @@
s(val)%0A%0A
+ request%5B%22failures%22%5D = 0%0A%0A
|
1c56aeb3d96dbb26da62203d690b4ff49b4b5c0e
|
bump version to 0.5.2
|
abstar/version.py
|
abstar/version.py
|
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
__version__ = '0.5.1'
|
Python
| 0.000001
|
@@ -202,10 +202,10 @@
= '0.5.
-1
+2
'
|
609cffb674ba0494bbe450d8ce7839168a3d5a0a
|
remove unnecessary code from forms
|
accounts/forms.py
|
accounts/forms.py
|
# -*- coding: utf-8 -*-
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from django import forms
from django.utils.translation import ugettext_lazy as _
class ProfileEditForm(forms.ModelForm):
email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$")
password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False)
password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False)
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'picture',
'occupation', 'city', 'site', 'biography',)
def clean_username(self):
return self.instance.username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfileEditForm, self).save(commit=commit)
|
Python
| 0.000007
|
@@ -21,17 +21,8 @@
-*-%0A
-try:%0A
from
@@ -68,110 +68,8 @@
del%0A
-except ImportError:%0A from django.contrib.auth.models import User%0Aelse:%0A User = get_user_model()%0A
from
@@ -146,16 +146,40 @@
y as _%0A%0A
+User = get_user_model()%0A
%0Aclass P
|
35848b160341f3528cdeac59f2027820a33ba39d
|
Add the always-'0' indicator field to the header record.
|
account_hmrc_esl_declaration/wizard/account_vat_esl.py
|
account_hmrc_esl_declaration/wizard/account_vat_esl.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# ECSL Export for HMRC
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import csv
import re
from operator import methodcaller
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from openerp import models, fields, api
from ..maybe import odoo_maybe
_INDICATOR_MAP = {
# Mapping from transaction_indicator_type to the code used in the CSV file
'b2b_goods': '0',
'triangular': '2',
'b2b_services': '3',
}
class AccountVatESLWizard(models.TransientModel):
# Based on odoo/addons/account/wizard/account_vat.py from upstream Odoo.
# Code used and modified under AGPL v3.
_name = 'account.vat.esl'
_description = 'EC Sales Declaration'
_inherit = 'account.common.report'
period_from = fields.Many2one(string='Period', required=True) # We only care about one period
based_on = fields.Selection(
# Looking at how account.vat.declaration uses this, I think this field may be completely
# redundant. It's in the model, but hidden to the user in the UI. So perhaps only
# 'invoices' makes sense here.
default='invoices',
required=True,
readonly=True,
selection=[
('invoices', 'Invoices'),
('payments', 'Payments'),
],
string='Based on',
)
chart_tax_id = fields.Many2one(
comodel_name='account.tax.code',
string='Chart of Tax',
required=True,
default=methodcaller('_default_chart_of_taxes'),
)
def _default_chart_of_taxes(self):
taxes = self.env['account.tax.code'].search(
[
('company_id', '=', self.env.user.company_id.id),
('name', '=ilike', '%Total value of EC sales, ex VAT%'),
],
limit=1,
)
return taxes and taxes.id or False
@api.multi
def create_esl(self):
"""This should be triggered by the form.
"""
self.ensure_one()
return self.env['report'].get_action(self, 'account_hmrc_esl_declaration.esl_csv')
def declaration_year(self):
"""Return year of declaration in YYYY format."""
# NOTE This assumes period name is in MM/YYYY format
return self.period_from.name.split('/')[1]
def declaration_month(self):
"""Return month of declaration in MM format."""
# NOTE This assumes period name is in MM/YYYY format
return self.period_from.name.split('/')[0]
@api.multi
def esl_csv_records(self):
"""Return the CSV records in HMRC-compatible format as a list of rows.
"""
self.ensure_one()
company = self.chart_tax_id.company_id
title_record = ['HMRC_CAT_ESL_BULK_SUBMISSION_FILE']
header_record = [
odoo_maybe(company.vat, strip_leading_letters),
company.subsidiary_identifier,
self.declaration_year(),
self.declaration_month(),
'GBP',
company.name[:35], # NOTE truncating might not be sufficient
]
return [ title_record, header_record ] + self._detail_records()
@api.multi
def _detail_records(self):
self.ensure_one()
return [
# TODO
]
@api.multi
def esl_csv_data(self):
"""Return the CSV data as a string.
"""
data = StringIO()
csv.writer(data).writerows(self.esl_csv_records())
return data.getvalue()
def strip_leading_letters(instr):
"""Strip the leading letters off a string.
>>> strip_leading_letters('GB12345678')
'12345678'
"""
return re.sub(r'^[A-Z]+', r'', instr, count=1)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -3889,16 +3889,97 @@
ficient%0A
+ '0', # %22the indicator field (this will always be '0')%22%0A
|
67be76a3d65fa846c8888ef5415ec3df5ef9ab87
|
Add test for expired tokens
|
accounts/tests.py
|
accounts/tests.py
|
"""accounts app unittests
"""
import base64
from time import sleep
from django.contrib.auth import get_user_model
from django.test import TestCase
from accounts.token import LoginTokenGenerator
TEST_EMAIL = 'newvisitor@example.com'
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
class UserModelTest(TestCase):
"""Tests for passwordless user model.
"""
def test_user_valid_with_only_email(self):
"""Should not raise if the user model is happy with email only.
"""
user = get_user_model()(email=TEST_EMAIL)
user.full_clean()
def test_users_are_authenticated(self):
"""User objects should be authenticated for views/templates.
"""
user = get_user_model()()
self.assertTrue(user.is_authenticated())
class TokenGeneratorTest(TestCase):
"""Tests for login token model.
"""
def setUp(self):
self.generator = LoginTokenGenerator()
def test_unique_tokens_generated(self):
"""Tokens generated one second apart should differ.
"""
token1 = self.generator.create_token(TEST_EMAIL)
sleep(1)
token2 = self.generator.create_token(TEST_EMAIL)
self.assertNotEqual(token1, token2)
def test_email_recovered_from_token(self):
"""A consumed token should yield the original email address.
"""
token = self.generator.create_token(TEST_EMAIL)
email = self.generator.consume_token(token)
self.assertEqual(email, TEST_EMAIL)
def test_modified_token_fails(self):
"""A modified token returns None instead of an email.
"""
token = self.generator.create_token(TEST_EMAIL)
split_token = base64.urlsafe_b64decode(
token.encode()
).decode().split('@')
split_token[0] = 'maliciousvisitor'
malicious_token = base64.urlsafe_b64encode(
'@'.join(split_token).encode()
).decode()
self.assertIsNone(self.generator.consume_token(malicious_token))
|
Python
| 0
|
@@ -1947,32 +1947,87 @@
oken(TEST_EMAIL)
+%0A%0A # Modify the email address which is 'signed'.
%0A split_t
@@ -2116,16 +2116,17 @@
it('@')%0A
+%0A
@@ -2271,24 +2271,25 @@
).decode()%0A
+%0A
self
@@ -2349,8 +2349,342 @@
token))%0A
+%0A def test_expired_token_fails(self):%0A %22%22%22A token which has expired returns None instead of an email.%0A%0A %22%22%22%0A token = self.generator.create_token(TEST_EMAIL)%0A sleep(1) # Ensure the token is more than 0 seconds old.%0A email = self.generator.consume_token(token, 0)%0A self.assertIsNone(email)%0A
|
5a4f05cb0f3a00a2d4faf828bd7850085c302541
|
Implement functionality to delete logs created by digital justice users
|
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
|
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
|
import sys
from django.core.management.base import BaseCommand
from dateutil.relativedelta import relativedelta
from legalaid.models import Case
from cla_butler.tasks import DeleteOldData
class FindAndDeleteCasesUsingCreationTime(DeleteOldData):
def get_eligible_cases(self):
two_years = self.now - relativedelta(years=2)
return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years)
class Command(BaseCommand):
help = (
"Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed"
)
def handle(self, *args, **kwargs):
instance = FindAndDeleteCasesUsingCreationTime()
cases = instance.get_eligible_cases()
django_command = sys.argv[1]
if django_command == "test": # If command is run in test
if args and args[0] == "delete":
instance.run()
else:
return cases
else: # If command is run in terminal
if args and args[0] == "delete":
if len(args) > 1 and args[1] == "no-input":
instance.run()
else:
answer = raw_input(
"Number of cases that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format(
cases.count()
)
)
if answer == "Yes":
instance.run()
else:
print("Number of cases to be deleted: " + str(cases.count()))
|
Python
| 0.002474
|
@@ -139,16 +139,52 @@
rt Case%0A
+from cla_eventlog.models import Log%0A
from cla
@@ -464,16 +464,150 @@
years)%0A%0A
+ def get_digital_justice_user_logs(self):%0A return Log.objects.filter(created_by__email__endswith=%22digital.justice.gov.uk%22)%0A%0A
%0Aclass C
@@ -638,17 +638,38 @@
help =
-(
+%22%22%22%0A Use cases:
%0A
@@ -669,17 +669,19 @@
-%22
+1.
Find or
@@ -781,22 +781,1368 @@
ng fixed
-%22
%0A
+ 2. Delete logs created by users with a @digital.justice.gov.uk email%0A %22%22%22%0A%0A def handle_test_command(self, args, cases):%0A digital_justice_user_logs = self.instance.get_digital_justice_user_logs()%0A if args%5B0%5D == %22delete%22:%0A self.instance.run()%0A elif args%5B0%5D == %22delete-logs%22:%0A self.instance._delete_objects(digital_justice_user_logs)%0A%0A def handle_terminal_command(self, args, cases):%0A digital_justice_user_logs = self.instance.get_digital_justice_user_logs()%0A if args%5B0%5D == %22delete%22:%0A if len(args) %3E 1 and args%5B1%5D == %22no-input%22:%0A self.instance.run()%0A else:%0A answer = raw_input(%0A %22Number of cases that will be deleted: %7B0%7D%5CnAre you sure about this? (Yes/No) %22.format(%0A cases.count()%0A )%0A )%0A if answer == %22Yes%22:%0A self.instance.run()%0A elif args%5B0%5D == %22delete-logs%22:%0A answer = raw_input(%0A %22Number of digital justice user logs that will be deleted: %7B0%7D%5CnAre you sure about this? (Yes/No) %22.format(%0A digital_justice_user_logs.count()%0A )%0A )%0A if answer == %22Yes%22:%0A self.instance._delete_objects(digital_justice_user_logs
)%0A%0A d
@@ -2175,32 +2175,37 @@
wargs):%0A
+self.
instance = FindA
@@ -2253,16 +2253,21 @@
cases =
+self.
instance
@@ -2415,63 +2415,62 @@
args
- and args%5B0%5D == %22delete%22:%0A instance.run(
+:%0A self.handle_test_command(args, cases
)%0A
@@ -2584,470 +2584,66 @@
args
- and args%5B0%5D == %22delete%22:%0A if len(args) %3E 1 and args%5B1%5D == %22no-input%22:%0A instance.run()%0A else:%0A answer = raw_input(%0A %22Number of cases that will be deleted: %7B0%7D%5CnAre you sure about this? (Yes/No) %22.format(%0A cases.count()%0A )%0A )%0A if answer == %22Yes%22:%0A instance.run(
+:%0A self.handle_terminal_command(args, cases
)%0A
|
f5c5c7de8af6ae5251ac1d878569c2692e119a04
|
Set the login/logout URLs so authentication works.
|
adapt/settings.py
|
adapt/settings.py
|
"""
Django settings for adapt project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'changeme')
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'adapt',
'clients',
'reports',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'adapt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adapt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Detroit'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Parse database configuration from $DATABASE_URL
import dj_database_url
db_config = dj_database_url.config()
if db_config:
DATABASES['default'] = db_config
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
if os.getenv('DEV'):
DEBUG = True
else:
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60*60*24*365
|
Python
| 0
|
@@ -3410,8 +3410,66 @@
*24*365%0A
+%0ALOGIN_URL = '/admin/login/'%0ALOGOUT_URL = '/admin/logout/'
|
5530b4f99c22d9734cafefa89c6b7fc12ce339ed
|
Fix bug in create_html_page - undefined variable (#8700)
|
contrib/confluence/src/python/pants/contrib/confluence/util/confluence_util.py
|
contrib/confluence/src/python/pants/contrib/confluence/util/confluence_util.py
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import getpass
import logging
import mimetypes
from os.path import basename
from urllib.parse import quote_plus
from xmlrpc.client import Binary
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
log = logging.getLogger(__name__)
# Copied from `twitter.common.confluence`
# Copyright 2012 Twitter, Inc.
"""Code to ease publishing text to Confluence wikis."""
mimetypes.init()
class ConfluenceError(Exception):
"""Indicates a problem performing an action with confluence."""
class Confluence:
"""Interface for fetching and storing data in confluence."""
def __init__(self, api_entrypoint, server_url, session_token, content_format='markdown'):
"""Initialize with an established confluence connection."""
self._api_entrypoint = api_entrypoint
self._server_url = server_url
self._session_token = session_token
self._content_format = content_format
@staticmethod
def login(confluence_url, user=None, api_entrypoint='confluence2'):
"""Prompts the user to log in to confluence, and returns a Confluence object.
:param confluence_url: Base url of wiki, e.g. https://confluence.atlassian.com/
:param user: Username
:param api_entrypoint: 'confluence1' or None results in Confluence 3.x. The default
value is 'confluence2' which results in Confluence 4.x or 5.x
:rtype: returns a connected Confluence instance
raises ConfluenceError if login is unsuccessful.
"""
server = ServerProxy(confluence_url + '/rpc/xmlrpc')
user = user or getpass.getuser()
password = getpass.getpass('Please enter confluence password for %s: ' % user)
if api_entrypoint in (None, 'confluence1'):
# TODO(???) didn't handle this JSirois review comment:
# Can you just switch on in create_html_page?
# Alternatively store a lambda here in each branch.
api = server.confluence1
fmt = 'markdown'
elif api_entrypoint == 'confluence2':
api = server.confluence2
fmt = 'xhtml'
else:
raise ConfluenceError("Don't understand api_entrypoint %s" % api_entrypoint)
try:
return Confluence(api, confluence_url, api.login(user, password), fmt)
except XMLRPCError as e:
raise ConfluenceError('Failed to log in to %s: %s' % (confluence_url, e))
@staticmethod
def get_url(server_url, wiki_space, page_title):
""" return the url for a confluence page in a given space and with a given
title. """
return '%s/display/%s/%s' % (server_url, wiki_space, quote_plus(page_title))
def logout(self):
"""Terminates the session and connection to the server.
Upon completion, the invoking instance is no longer usable to communicate with confluence.
"""
self._api_entrypoint.logout(self._session_token)
def getpage(self, wiki_space, page_title):
""" Fetches a page object.
Returns None if the page does not exist or otherwise could not be fetched.
"""
try:
return self._api_entrypoint.getPage(self._session_token, wiki_space, page_title)
except XMLRPCError as e:
log.warning('Failed to fetch page %s: %s' % (page_title, e))
return None
def storepage(self, page):
"""Stores a page object, updating the page if it already exists.
returns the stored page, or None if the page could not be stored.
"""
try:
return self._api_entrypoint.storePage(self._session_token, page)
except XMLRPCError as e:
log.error('Failed to store page %s: %s' % (page.get('title', '[unknown title]'), e))
return None
def removepage(self, page):
"""Deletes a page from confluence.
raises ConfluenceError if the page could not be removed.
"""
try:
self._api_entrypoint.removePage(self._session_token, page)
except XMLRPCError as e:
raise ConfluenceError('Failed to delete page: %s' % e)
def create(self, space, title, content, parent_page=None, **pageoptions):
""" Create a new confluence page with the given title and content. Additional page options
available in the xmlrpc api can be specified as kwargs.
returns the created page or None if the page could not be stored.
raises ConfluenceError if a parent page was specified but could not be found.
"""
pagedef = dict(
space = space,
title = title,
url = Confluence.get_url(self._server_url, space, title),
content = content,
contentStatus = 'current',
current = True
)
pagedef.update(**pageoptions)
if parent_page:
# Get the parent page id.
parent_page_obj = self.getpage(space, parent_page)
if parent_page_obj is None:
raise ConfluenceError('Failed to find parent page %s in space %s' % (parent_page, space))
pagedef['parentId'] = parent_page_obj['id']
# Now create the page
return self.storepage(pagedef)
def create_html_page(self, space, title, html, parent_page=None, **pageoptions):
if self._content_format == 'markdown':
content = '{html}\n\n%s\n\n{html}' % html
elif self._content_format == 'xhtml':
content = '''<ac:macro ac:name="html">
<ac:plain-text-body><![CDATA[%s]]></ac:plain-text-body>
</ac:macro>''' % html
else:
raise ConfluenceError("Don't know how to convert %s to HTML" % format)
return self.create(space, title, content, parent_page, **pageoptions)
def addattachment(self, page, filename):
"""Add an attachment to an existing page.
Note: this will first read the entire file into memory"""
mime_type = mimetypes.guess_type(filename, strict=False)[0]
if not mime_type:
raise ConfluenceError('Failed to detect MIME type of %s' % filename)
try:
with open(filename, 'rb') as f:
file_data = f.read()
attachment = dict(fileName=basename(filename), contentType=mime_type)
return self._api_entrypoint.addAttachment(self._session_token,
page['id'],
attachment,
Binary(file_data))
except (IOError, OSError) as e:
log.error('Failed to read data from file %s: %s' % (filename, str(e)))
return None
except XMLRPCError:
log.error('Failed to add file attachment %s to page: %s' %
(filename, page.get('title', '[unknown title]')))
return None
|
Python
| 0
|
@@ -5226,16 +5226,17 @@
ntent =
+f
'''%3Cac:m
@@ -5295,18 +5295,22 @@
!%5BCDATA%5B
-%25s
+%7Bhtml%7D
%5D%5D%3E%3C/ac:
@@ -5350,23 +5350,16 @@
acro%3E'''
- %25 html
%0A els
@@ -5385,24 +5385,25 @@
luenceError(
+f
%22Don't know
@@ -5421,28 +5421,39 @@
ert
-%25s to HTML%22 %25 format
+%7Bself._content_format%7D to HTML%22
)%0A
|
3e7d52cc9715963eb9a31c2a5e4085d323ddb78f
|
Fix how silence_coroutine handles exceptions on handler
|
again/decorate.py
|
again/decorate.py
|
from functools import wraps
RED = '\033[91m'
BLUE = '\033[94m'
BOLD = '\033[1m'
END = '\033[0m'
def _default_handler(e, *args, **kwargs): pass
def silence(target_exceptions:list, exception_handler=_default_handler):
def decor(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if isinstance(target_exceptions, list):
for each in target_exceptions:
if isinstance(e, each):
return exception_handler(e, *args, **kwargs)
else:
if isinstance(e, target_exceptions):
return exception_handler(e, *args, **kwargs)
raise e
return wrapper
return decor
def silence_coroutine(target_exceptions:list, exception_handler=_default_handler):
def decor(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
yield from func(*args, **kwargs)
except Exception as e:
if isinstance(target_exceptions, list):
for each in target_exceptions:
if isinstance(e, each):
yield from exception_handler(e, *args, **kwargs)
else:
if isinstance(e, target_exceptions):
raise e
return wrapper
return decor
def log(fn):
"""
logs parameters and result - takes no arguments
"""
def func(*args, **kwargs):
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self":
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__code__.co_name, arg_string))
if len(kwargs):
string = (RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(fn.__code__.co_name,
arg_string, kwargs))
print(string)
result = fn(*args, **kwargs)
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__code__.co_name, result)
print(string)
return result
return func
def logx(supress_args=[], supress_all_args=False, supress_result=False, receiver=None):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__code__.co_name, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__code__.co_name,
arg_string, kwargs))
if receiver:
receiver(string)
else:
print(string)
result = fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__code__.co_name, result)
if receiver:
receiver(string)
else:
print(string)
return result
return func
return decorator
def value_check(arg_name, pos, allowed_values):
"""
allows value checking at runtime for args or kwargs
"""
def decorator(fn):
# brevity compromised in favour of readability
def logic(*args, **kwargs):
arg_count = len(args)
if arg_count:
if pos < arg_count:
if args[pos] in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError(
"'{0}' at position {1} not in allowed values {2}".format(args[pos], pos, allowed_values))
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if value in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError("'{0}' is not an allowed kwarg".format(arg_name))
else:
# partially applied functions because of incomplete args, let python handle this
return fn(*args, **kwargs)
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if value in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError("'{0}' is not an allowed kwarg".format(arg_name))
return logic
return decorator
def type_check(arg_name, pos, reqd_type):
"""
allows type checking at runtime for args or kwargs
"""
def decorator(fn):
# brevity compromised in favour of readability
def logic(*args, **kwargs):
arg_count = len(args)
if arg_count:
if pos < arg_count:
if isinstance(args[pos], reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' at position {1} not of type {2}".format(args[pos], pos, reqd_type))
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if isinstance(value, reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' is not of type {1}".format(arg_name, reqd_type))
else:
# partially applied functions because of incomplete args, let python handle this
return fn(*args, **kwargs)
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if isinstance(value, reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' is not of type {1}".format(arg_name, reqd_type))
return logic
return decorator
|
Python
| 0.000536
|
@@ -133,16 +133,20 @@
kwargs):
+%0A
pass%0A%0A%0A
@@ -1286,26 +1286,22 @@
-yield from
+return
excepti
@@ -1402,32 +1402,101 @@
et_exceptions):%0A
+ return exception_handler(e, *args, **kwargs)%0A
@@ -7250,29 +7250,28 @@
logic%0A%0A return decorator
-%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.