commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
da26428a6f7adf58e7cfed8ece61fc42ed76345e
|
Remove commented out code. pep8/pyflakes
|
src/recore/amqp.py
|
src/recore/amqp.py
|
# -*- coding: utf-8 -*-
# Copyright Β© 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import json
import pika
import recore.fsm
import threading
MQ_CONF = {}
connection = None
out = logging.getLogger('recore.amqp')
def init_amqp(mq):
"""Open a channel to our AMQP server"""
import recore.amqp
recore.amqp.MQ_CONF = mq
creds = pika.credentials.PlainCredentials(mq['NAME'], mq['PASSWORD'])
params = pika.ConnectionParameters(
host=str(mq['SERVER']),
credentials=creds)
out.debug('Attemtping to open channel...')
connect_string = "amqp://%s:******@%s:%s/%s" % (
mq['NAME'], mq['SERVER'], mq['PORT'], mq['EXCHANGE'])
recore.amqp.connection = pika.SelectConnection(parameters=params,
on_open_callback=on_open)
return recore.amqp.connection
def on_open(connection):
"""
Call back when a connection is opened.
"""
out.debug("Opened AMQP connection")
connection.channel(on_channel_open)
def on_channel_open(channel):
"""
Call back when a channel is opened.
"""
out.debug("MQ channel opened. Declaring exchange ...")
channel.exchange_declare(exchange=MQ_CONF['EXCHANGE'],
durable=True,
exchange_type='topic')
consumer_tag = channel.basic_consume(
receive,
queue=MQ_CONF['QUEUE'])
# def watch_the_queue(channel, connection, queue_name):
# """Begin consuming messages `queue_name` on the bus. Set our default
# callback handler
# """
# channel.basic_consume(receive,
# queue=queue_name)
# try:
# notify = logging.getLogger('recore.stdout')
# notify.info('FSM online and listening for messages')
# out = logging.getLogger('recore')
# out.debug('Consuming messages from queue: %s' % queue_name)
# except KeyboardInterrupt:
# channel.close()
# connection.close()
# pass
def receive(ch, method, properties, body):
"""
Callback for watching the FSM queue
"""
out = logging.getLogger('recore')
notify = logging.getLogger('recore.stdout')
msg = json.loads(body)
topic = method.routing_key
out.debug("Message: %s" % msg)
ch.basic_ack(delivery_tag=method.delivery_tag)
if topic == 'job.create':
id = None
try:
# We need to get the name of the temporary
# queue to respond back on.
notify.info("new job create for: %s" % msg['project'])
out.info(
"New job requested, starting release "
"process for %s ..." % msg["project"])
notify.debug("Job message: %s" % msg)
reply_to = properties.reply_to
id = recore.job.create.release(
ch, msg['project'], reply_to, msg['dynamic'])
except KeyError, ke:
notify.info("Missing an expected key in message: %s" % ke)
out.error("Missing an expected key in message: %s" % ke)
return
if id:
# Skip this try/except until we work all the bugs out of the FSM
# try:
runner = recore.fsm.FSM(id)
runner.start()
while runner.isAlive():
runner.join(0.3)
# except Exception, e:
# notify.error(str(e))
else:
out.warn("Unknown routing key %s. Doing nothing ...")
notify.info("IDK what this is: %s" % topic)
notify.info("end receive() routine")
out.debug("end receive() routine")
|
Python
| 0.000071
|
@@ -772,25 +772,8 @@
fsm%0A
-import threading%0A
%0A%0AMQ
@@ -1135,55 +1135,9 @@
ds)%0A
- out.debug('Attemtping to open channel...')
%0A
+
@@ -1243,24 +1243,109 @@
EXCHANGE'%5D)%0A
+ out.debug('Attemtping to open channel with connect string: %25s' %25 connect_string)%0A
recore.a
@@ -1514,16 +1514,17 @@
ection%0A%0A
+%0A
def on_o
@@ -2075,596 +2075,32 @@
'%5D)%0A
-%0A%0A# def watch_the_queue(channel, connection, queue_name):%0A# %22%22%22Begin consuming messages %60queue_name%60 on the bus. Set our default%0A# callback handler%0A# %22%22%22%0A# channel.basic_consume(receive,%0A# queue=queue_name)%0A# try:%0A# notify = logging.getLogger('recore.stdout')%0A# notify.info('FSM online and listening for messages')%0A# out = logging.getLogger('recore')%0A# out.debug('Consuming messages from queue: %25s' %25 queue_name)%0A# except KeyboardInterrupt:%0A# channel.close()%0A# connection.close()%0A# pass
+ return consumer_tag%0A
%0A%0Ade
|
af06b3ec3594564ba4f822760f3b436dd7fa0b73
|
drop _ScanIterPairs helper
|
aioredis/util.py
|
aioredis/util.py
|
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
# NOTE: never put here anything else;
# just this basic types
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode('utf-8'),
int: lambda val: str(val).encode('utf-8'),
float: lambda val: str(val).encode('utf-8'),
}
def _bytes_len(sized):
return str(len(sized)).encode('utf-8')
def encode_command(*args):
"""Encodes arguments into redis bulk-strings array.
Raises TypeError if any of args not of bytearray, bytes, float, int, or str
type.
"""
buf = bytearray()
def add(data):
return buf.extend(data + b'\r\n')
add(b'*' + _bytes_len(args))
for arg in args:
if type(arg) in _converters:
barg = _converters[type(arg)](arg)
add(b'$' + _bytes_len(barg))
add(barg)
else:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding):
if isinstance(obj, bytes):
return obj.decode(encoding)
elif isinstance(obj, list):
return [decode(o, encoding) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _BaseScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
class _ScanIter(_BaseScanIter):
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
class _ScanIterPairs(_BaseScanIter):
async def __anext__(self):
while not self._ret and self._cur:
self._cur, ret = await self._scan(self._cur)
self._ret = list(zip(ret[::2], ret[1::2]))
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
"""Parse Redis connection URI.
Parse according to IANA specs:
* https://www.iana.org/assignments/uri-schemes/prov/redis
* https://www.iana.org/assignments/uri-schemes/prov/rediss
Also more rules applied:
* empty scheme is treated as unix socket path no further parsing is done.
* 'unix://' scheme is treated as unix socket path and parsed.
* Multiple query parameter values and blank values are considered error.
* DB number specified as path and as query parameter is considered error.
* Password specified in userinfo and as query parameter is
considered error.
"""
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
|
Python
| 0
|
@@ -2126,20 +2126,16 @@
%0Aclass _
-Base
ScanIter
@@ -2136,16 +2136,17 @@
anIter:%0A
+%0A
__sl
@@ -2335,42 +2335,8 @@
lf%0A%0A
-%0Aclass _ScanIter(_BaseScanIter):%0A%0A
@@ -2397,32 +2397,32 @@
and self._cur:%0A
+
self
@@ -2635,395 +2635,8 @@
t%0A%0A%0A
-class _ScanIterPairs(_BaseScanIter):%0A%0A async def __anext__(self):%0A while not self._ret and self._cur:%0A self._cur, ret = await self._scan(self._cur)%0A self._ret = list(zip(ret%5B::2%5D, ret%5B1::2%5D))%0A if not self._cur and not self._ret:%0A raise StopAsyncIteration # noqa%0A else:%0A ret = self._ret.pop(0)%0A return ret%0A%0A%0A
def
|
c7feaaf8bdec70c9c5eda402eedb411c5976abfd
|
Fix a typo. Cut some unnecessary output.
|
src/count/countGoodKeysByTarget.py
|
src/count/countGoodKeysByTarget.py
|
"""Given a SEQ(Text, Text) input file to use as an RDD, where the
value field is supposed to be a dictionary in JSON, count the number
of occurances of each unique key in the set of dictionaries, for each
publisher. Print the resulting map (key => count), sorted by key."""
import argparse
import json
import sys
from pyspark import SparkContext
def getKeysByTarget(value):
global goodJsonRecords, badJsonRecords, noExtractionsCount, noTitleCount, noTitleAttribsCount, noTitleAttribsTargetCount, noUrlCount
try:
d = json.loads(value)
goodJsonRecords += 1
except:
badJsonRecords += 1
return iter([])
goodTargetName = False
if "extractions" not in d:
targetName = "(No extractions)"
noExtractionsCount += 1
extractions = None
else:
extractions = d["extractions"]
if "title" not in extractions:
targetName = "(No title)"
noTitleCount += 1
elif "attribs" not in extractions["title"]:
targetName = "(No title attribs)"
noTitleAttribsCount += 1
elif "target" not in extractions["title"]["attribs"]:
targetName = "(No title attribs target)"
noTitleAttribsTargetCount += 1
else:
targetName = extractions["title"]["attribs"]["target"]
goodTargetName = True
if not goodTargetName:
if "url" not in d:
noUrlCount =+ 1
else:
# Go for URL:
url = d["url"]
httpPart, emptyPart, domainName, remainder = url.split("/", 3)
if domainName:
targetName = domainName + " " + targetName
results = [ json.dumps(targetName + ": " + key) for key in d.keys() ]
if extractions:
results.extend([ json.dumps(targetName + ": extractions: " + key) for key in extractions.keys() ])
return iter(results)
def getKeysByDomainName(value):
global goodJsonRecords, badJsonRecords, noExtractionsCount, noUrlCount, noDomainNameCount
try:
d = json.loads(value)
goodJsonRecords += 1
except:
badJsonRecords += 1
return iter([])
if "url" not in d:
domainName = "(no url)"
noUrlCount =+ 1
else:
url = d["url"]
splitUrl = url.split("/")
if len(splitUrl) < 3:
domainName="(bad url)"
else:
httpPart, emptyPart, domainName = splitUrl[:3]
if not domainName:
domainName="(no domain name)"
# Reduce the domain name in an ad-hoc way:
components = domainName.split(".")
if len(components) >= 2:
if components[-2] in ["com"] and len(components) >= 3:
domainName = ".".join(components[-3:])
else:
domainName = ".".join(components[-2:])
if "extractions" not in d:
noExtractionsCount += 1
extractions = None
else:
extractions = d["extractions"]
results = [ json.dumps(domainName + ": " + key) for key in d.keys() ]
if extractions:
results.extend([ json.dumps(domainName + ": extractions: " + key) for key in extractions.keys() ])
return iter(results)
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', help="Seq input file on cluster.", required=True)
parser.add_argument('-u','--byUrl', help="Group by URL domain name.", required=False, action='store_true')
args = parser.parse_args()
sc = SparkContext()
global goodJsonRecords, badJsonRecords, noExtractionsCount, noTitleCount, noTitleAttribsCount, noTitleAttribsTargetCount, noUrlCount, noDomainNameCount
goodJsonRecords = sc.accumulator(0)
badJsonRecords = sc.accumulator(0)
noUrlCount = sc.accumulator(0)
noDomainNameCount = sc.accumulator(0)
noExtractionsCount = sc.accumulator(0)
noTitleCount = sc.accumulator(0)
noTitleAttribsCount = sc.accumulator(0)
noTitleAttribsTargetCount = sc.accumulator(0)
data = sc.sequenceFile(args.input, "org.apache.hadoop.io.Text", "org.apache.hadoop.io.Text")
if args.byUrl:
keyCounts = data.values().flatMap(getKeysByDomainName).countByValue()
else:
keyCounts = data.values().flatMap(getKeysByTarget).countByValue()
print "========================================"
print "goodJsonRecords = %d" % goodJsonRecords.value
print "badJsonRecords = %d" % badJsonRecords.value
print "noUrlCount = %d" % noUrlCount.value
print "noComainNameCount = %d" % noDomainNameCount.value
print "noExtractionsCount = %d" % noExtractionsCount.value
print "noTitleCount = %d" % noTitleCount.value
print "noTitleAttribsCount = %d" % noTitleAttribsCount.value
print "noTitleAttribsTargetCount = %d" % noTitleAttribsTargetCount.value
print "========================================"
for k in sorted(keyCounts):
print k, keyCounts[k]
print "========================================"
sc.stop()
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.003516
|
@@ -4618,17 +4618,17 @@
rint %22no
-C
+D
omainNam
@@ -4721,32 +4721,59 @@
ionsCount.value%0A
+ if not args.byUrl:%0A
print %22noTit
@@ -4799,32 +4799,36 @@
itleCount.value%0A
+
print %22noTit
@@ -4868,32 +4868,36 @@
ribsCount.value%0A
+
print %22noTit
|
cb3b30ee147938380930ecb81f2160b311dcad8c
|
Check index data size
|
karabo_data/validation.py
|
karabo_data/validation.py
|
from argparse import ArgumentParser
from functools import partial
from glob import glob
import h5py
import numpy as np
import os
import sys
from .reader import DataCollection, H5File, FileAccess
class ValidationError(Exception):
def __init__(self, problems):
self.problems = problems
def __str__(self):
lines = []
for prob in self.problems:
lines.extend(['', prob['msg']])
for k, v in sorted(prob.items()):
if k != 'msg':
lines.append(" {}: {}".format(k, v))
return '\n'.join(lines)
class FileValidator:
def __init__(self, file: FileAccess):
self.file = file
self.filename = file.filename
self.problems = []
def validate(self):
problems = self.run_checks()
if problems:
raise ValidationError(problems)
def run_checks(self):
self.problems = []
self.check_indices()
self.check_trainids()
return self.problems
def record(self, msg, **kwargs):
self.problems.append(dict(msg=msg, file=self.filename, **kwargs))
def check_trainids(self):
ds_path = 'INDEX/trainId'
train_ids = self.file.file[ds_path][:]
if (train_ids == 0).any():
first0 = train_ids.tolist().index(0)
if not (train_ids[first0:] == 0).all():
self.record(
'Zeroes in trainId index before last train ID', dataset=ds_path
)
nonzero_tids = train_ids[train_ids != 0]
else:
nonzero_tids = train_ids
if len(nonzero_tids) > 1:
non_incr = (nonzero_tids[1:] <= nonzero_tids[:-1]).nonzero()[0]
if non_incr.size > 0:
pos = non_incr[0]
self.record(
'Train IDs are not strictly increasing, e.g. at {} ({} >= {})'.format(
pos, nonzero_tids[pos], nonzero_tids[pos + 1]
),
dataset=ds_path,
)
def check_indices(self):
for src in self.file.instrument_sources:
src_groups = set()
for key in self.file.get_keys(src):
ds_path = 'INSTRUMENT/{}/{}'.format(src, key.replace('.', '/'))
group = key.split('.', 1)[0]
src_groups.add((src, group))
first, count = self.file.get_index(src, group)
data_dim0 = self.file.file[ds_path].shape[0]
if np.any((first + count) > data_dim0):
max_end = (first + count).max()
self.record(
'Index referring to data ({}) outside dataset ({})'.format(
max_end, data_dim0
),
dataset=ds_path,
)
for src, group in src_groups:
record = partial(self.record, dataset='INDEX/{}/{}'.format(src, group))
first, count = self.file._read_index(src, group)
check_index_contiguous(first, count, record)
def check_index_contiguous(firsts, counts, record):
probs = []
if firsts[0] != 0:
record("Index doesn't start at 0")
gaps = firsts[1:].astype(np.int64) - (firsts + counts)[:-1]
gap_ixs = (gaps > 0).nonzero()[0]
if gap_ixs.size > 0:
pos = gap_ixs[0]
record("Gaps ({}) in index, e.g. at {} ({} + {} < {})".format(
gap_ixs.size, pos, firsts[pos], counts[pos], firsts[pos+1]
))
overlap_ixs = (gaps < 0).nonzero()[0]
if overlap_ixs.size > 0:
pos = overlap_ixs[0]
record("Overlaps ({}) in index, e.g. at {} ({} + {} > {})".format(
overlap_ixs.size, pos, firsts[pos], counts[pos], firsts[pos + 1]
))
return probs
class RunValidator:
def __init__(self, run_dir: str):
files = []
self.files_excluded = []
self.run_dir = run_dir
for path in glob(os.path.join(run_dir, '*.h5')):
try:
fa = FileAccess(h5py.File(path, 'r'))
except Exception as e:
self.files_excluded.append((path, e))
else:
files.append(fa)
self.run = DataCollection(files)
self.problems = []
def validate(self):
problems = self.run_checks()
if problems:
raise ValidationError(problems)
def run_checks(self):
self.problems = []
self.check_files_openable()
self.check_files()
return self.problems
def check_files_openable(self):
for path, err in self.files_excluded:
self.problems.append(dict(msg="Could not open file", file=path, error=err))
if not self.run.files:
self.problems.append(
dict(msg="No usable files found", directory=self.run_dir)
)
def check_files(self):
for f in self.run.files:
fv = FileValidator(f)
self.problems.extend(fv.run_checks())
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
ap = ArgumentParser(prog='karabo-data-validate')
ap.add_argument('path', help="HDF5 file or run directory of HDF5 files.")
args = ap.parse_args(argv)
path = args.path
if os.path.isdir(path):
print("Checking run directory:", path)
validator = RunValidator(path)
else:
print("Checking file:", path)
validator = FileValidator(H5File(path).files[0])
try:
validator.validate()
print("No problems found")
except ValidationError as ve:
print("Validation failed!")
print(str(ve))
return 1
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000001
|
@@ -3039,31 +3039,889 @@
ile.
-_read_index(src, group)
+get_index(src, group)%0A%0A if (first.ndim != 1) or (count.ndim != 1):%0A record(%0A %22Index first / count are not 1D%22,%0A first_shape=first.shape,%0A count_shape=count.shape,%0A )%0A continue%0A%0A if first.shape != count.shape:%0A record(%0A %22Index first & count have different number of entries%22,%0A first_shape=first.shape,%0A count_shape=count.shape,%0A )%0A%0A if first.shape != self.file.train_ids.shape:%0A record(%0A %22Index has wrong number of entries%22,%0A index_shape=first.shape,%0A trainids_shape=self.file.train_ids.shape,%0A )%0A
%0A
|
ce2cf07d9fa9dc3bdd229b1cbb56745784e3049d
|
Fix stray char.
|
law/sandbox/docker.py
|
law/sandbox/docker.py
|
# -*- coding: utf-8 -*-
"""
Docker sandbox implementation.
"""
__all__ = ["DockerSandbox"]
from law.sandbox.base import Sandbox
class DockerSandbox(Sandbox):
sandbox_type = "docker"
@property
def image(self):
return self.name
def cmd(self, task, task_cmd):
# get args for the docker command as configured in the task
docker_args = getattr(task, "docker_args", ["--rm"]):
if isinstance(docker_args, (list, tuple)):
docker_args = " ".join(str(arg) for arg in docker_args)
cmd = "docker run {docker_args} {image} \"{task_cmd}\""
cmd = cmd.format(docker_args=docker_args, image=self.image, task_cmd=task_cmd)
return cmd
|
Python
| 0
|
@@ -412,17 +412,16 @@
%22--rm%22%5D)
-:
%0A
|
33d817f78c1b4a1ce18247020caa8d5144f38210
|
modify score in annotation.qc.uniq() to include gene length
|
annotation/qc.py
|
annotation/qc.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run quality control (QC) on gene annotation. MAKER output was used during
testing. Several aspects of annotation QC are implemented in this script.
- Trim UTRs. MAKER sometimes predict UTRs that extend into other genes.
- Remove overlapping models.
"""
import sys
from jcvi.formats.gff import Gff, get_piles, make_index, import_feats, \
populate_children
from jcvi.formats.base import must_open
from jcvi.utils.range import Range, range_minmax, range_chain
from jcvi.apps.base import OptionParser, ActionDispatcher
def main():
actions = (
('trimUTR', 'remove UTRs in the annotation set'),
('uniq', 'remove overlapping gene models'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def uniq(args):
"""
%prog uniq gffile
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best EAD score.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = Gff(gffile)
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
score = 100 - int(round(float(g.attributes["_AED"][0]) * 100))
gene_register[g.parent] = score
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [Range(x.seqid, x.start, x.end, \
gene_register[x.accn], x.accn) for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print >> fw, "\n".join(sorted(removed))
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene")
def get_cds_minmax(g, cid, level=2):
cds = [x for x in g.children(cid, level) if x.featuretype == "CDS"]
cdsranges = [(x.start, x.end) for x in cds]
return range_minmax(cdsranges)
def trim(c, start, end):
cstart, cend = c.start, c.end
# Trim coordinates for feature c based on overlap to start and end
c.start, c.end = max(cstart, start), min(cend, end)
if c.start != cstart or c.end != cend:
print >> sys.stderr, c.id, \
"[{0}, {1}] => [{2}, {3}]".format(cstart, cend, c.start, c.end)
else:
print >> sys.stderr, c.id, "no change"
def trimUTR(args):
"""
%prog trimUTR gffile
Remove UTRs in the annotation set.
"""
p = OptionParser(trimUTR.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
g = make_index(gffile)
gff = Gff(gffile)
mRNA_register = {}
fw = must_open(opts.outfile, "w")
for c in gff:
cid, ctype = c.accn, c.type
if ctype == "gene":
start, end = get_cds_minmax(g, cid)
trim(c, start, end)
elif ctype == "mRNA":
start, end = get_cds_minmax(g, cid, level=1)
trim(c, start, end)
mRNA_register[cid] = (start, end)
elif ctype != "CDS":
start, end = mRNA_register[c.parent]
trim(c, start, end)
if c.start > c.end:
print >> sys.stderr, cid, \
"destroyed [{0} > {1}]".format(c.start, c.end)
else:
print >> fw, c
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -454,16 +454,53 @@
st_open%0A
+from jcvi.formats.sizes import Sizes%0A
from jcv
@@ -869,24 +869,33 @@
uniq gffile
+ cdsfasta
%0A%0A Remove
@@ -1270,18 +1270,76 @@
est
-EAD score.
+combined score. Score is defined by:%0A%0A score = (1 - AED) * length
%0A
@@ -1447,33 +1447,33 @@
if len(args) !=
-1
+2
:%0A sys.ex
@@ -1499,32 +1499,41 @@
())%0A%0A gffile,
+ cdsfasta
= args%0A gff
@@ -1542,24 +1542,60 @@
Gff(gffile)%0A
+ sizes = Sizes(cdsfasta).mapping%0A
gene_reg
@@ -1685,32 +1685,14 @@
-score = 100 - int(round(
+aed =
floa
@@ -1721,16 +1721,8 @@
%5B0%5D)
- * 100))
%0A
@@ -1752,21 +1752,41 @@
rent%5D =
-score
+(1 - aed) * sizes%5Bg.accn%5D
%0A%0A al
|
699085edd1db5aa7a827a16ffffcbcc9a69cbf52
|
Add forgotten imports for bucketlist endpoints
|
app/endpoints.py
|
app/endpoints.py
|
from flask import request, Blueprint
from flask_restful import Api
from controllers.accounts_manager import LoginResource, RegisterResource
from controllers.bucketlist import GetAllBucketLists, GetBucketList
from controllers.bucketlist_items import BucketListItems
bucketlist_blueprint = Blueprint('bucket_list', __name__)
api = Api(bucketlist_blueprint)
# login routes
api.add_resource(RegisterResource, '/auth/register')
api.add_resource(LoginResource, '/auth/login')
# bucketlist routes
api.add_resource(BucketListsResource, '/bucketlists')
api.add_resource(BucketListResource, '/bucketlists/<int:id>')
# bucketlist items routes
api.add_resource(BucketListItems,
'/bucketlists/<int:bucketlist_id>/items',
'/bucketlists/<int:bucketlist_id>/items/<int:item_id>')
|
Python
| 0
|
@@ -173,14 +173,8 @@
ort
-GetAll
Buck
@@ -184,13 +184,18 @@
ists
-, Get
+Resource,
Buck
@@ -200,16 +200,24 @@
cketList
+Resource
%0Afrom co
|
6908060af5b872e54d42f63e580591931b7ff230
|
Check empty string
|
museum_site/scroll.py
|
museum_site/scroll.py
|
from django.db import models
class Scroll(models.Model):
# Constants
SCROLL_TOP = """```
ββ€ββββββββββββββββββββββββββββββββββββββββββββββ€β‘
β Scroll ### β
βββββββββββββββββββββββββββββββββββββββββββββββ‘
β β’ β’ β’ β’ β’ β’ β’ β’ β’β"""
SCROLL_BOTTOM = """\n β β’ β’ β’ β’ β’ β’ β’ β’ β’β
ββ§ββββββββββββββββββββββββββββββββββββββββββββββ§β‘```"""
# Fields
identifier = models.IntegerField()
content = models.TextField(
default="",
help_text="Lines starting with @ will be skipped. Initial whitespace is trimmed by DB, so an extra @ line is a fix."
)
source = models.CharField(max_length=160)
published = models.BooleanField(default=False)
suggestion = models.CharField(max_length=500, blank=True, default="")
class Meta:
ordering = ["-id"]
def __str__(self):
return "Scroll #{} ID:{} Pub:{}".format(self.identifier, self.id, self.published)
def lines(self):
return self.content.split("\n")
def render_for_discord(self):
lines = self.lines()
output = self.SCROLL_TOP.replace("###", ("000"+str(self.identifier))[-3:])
for line in lines:
line = line.replace("\r", "")
line = line.replace("\n", "")
if line[0] == "@":
continue
output += "\n β " + (line + " " * 42)[:42] + " β "
output += self.SCROLL_BOTTOM
return output
|
Python
| 0.026724
|
@@ -1324,16 +1324,25 @@
if
+line and
line%5B0%5D
|
43a348865dcc21e9d88ebf05fd794fed2b7b350c
|
Update suite
|
mx.irbuilder/suite.py
|
mx.irbuilder/suite.py
|
suite = {
"mxversion" : "5.70.2",
"name" : "java-llvm-ir-builder",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "sulong",
"version" : "38a5bad302f48d676f15a0b3fd9b02f6f3a8abdd",
"urls" : [
{
"url" : "https://github.com/pointhi/sulong",
"kind" : "git"
},
]
},
],
},
"javac.lint.overrides" : "none",
"projects" : {
"at.pointhi.irbuilder.irwriter" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"sulong:SULONG",
],
"checkstyle" : "at.pointhi.irbuilder.irwriter",
"javaCompliance" : "1.8",
"license" : "BSD-new",
},
"at.pointhi.irbuilder.test": {
"subDir": "projects",
"sourceDirs": ["src"],
"dependencies": [
"at.pointhi.irbuilder.irwriter",
"sulong:SULONG",
"sulong:SULONG_TEST",
"mx:JUNIT",
],
"checkstyle": "at.pointhi.irbuilder.irwriter",
"javaCompliance": "1.8",
"license": "BSD-new",
},
},
"distributions" : {
"IRWRITER" : {
"path" : "build/irwriter.jar",
"subDir" : "graal",
"sourcesPath" : "build/irbuilder.src.zip",
"mainClass" : "at.pointhi.irbuilder.irwriter.SourceParser",
"dependencies" : [
"at.pointhi.irbuilder.irwriter"
],
"distDependencies" : [
"sulong:SULONG",
]
},
"IRWRITER_TEST" : {
"path" : "build/irwriter_test.jar",
"subDir" : "graal",
"sourcesPath" : "build/irwriter_test.src.zip",
"dependencies" : [
"at.pointhi.irbuilder.test"
],
"exclude" : [
"mx:JUNIT"
],
"distDependencies" : [
"IRWRITER",
"sulong:SULONG",
"sulong:SULONG_TEST",
]
},
}
}
|
Python
| 0.000001
|
@@ -234,48 +234,48 @@
: %22
-38a5bad302f48d676f15a0b3fd9b02f6f3a8abdd
+f25a652b20e9c2c7d99fbd3844b64a44da5547a6
%22,%0A
|
e00b7c612f34c938a3d42dada006874ffea021c8
|
complete localizer
|
app/localizer.py
|
app/localizer.py
|
# -*- coding: utf-8 -*-
"""
localizer
localize bounding boxes and pad rest of image with zeros (255, 255, 255)
"""
import os
import cv2
import numpy as np
from app.cv.serializer import deserialize_json
from app.settings import CV_SAMPLE_PATH, BOUNDINGBOX
test_image = CV_SAMPLE_PATH + 'pos/img_00003.jpg'
class Localizer(object):
def __init__(self, path_to_image):
self.image = cv2.imread(path_to_image, -1)
self.fname = os.path.split(path_to_image)[1]
self.bboxes = \
deserialize_json(BOUNDINGBOX)[self.fname]['annotations']
@property
def factory(self):
"""yield bounding boxes"""
for bbox in self.bboxes:
x = int(bbox['x'])
y = int(bbox['y'])
height = int(bbox['height'])
width = int(bbox['width'])
yield x, x + width, y, y + height
def new_image(self):
background = np.zeros(shape=self.image.shape)
# highlight image with (1, 1, 1) on background of zeros
for x, x_end, y, y_end in self.factory:
background[x: x_end, y: y_end] = [1, 1, 1]
# mirrir original image's bounding boxes into new
self.output_image = np.mutiply(self.image, background)
def show(self):
cv2.imshow("Display window", self.output_image)
cv2.waitKey(0)
# # image read as it is in as BGR
# image = cv2.imread(test_image, -1)
# b = image[2: 10, 3: 11, :]
# print(b)
# c = np.zeros(shape=(8, 8, 3))
# c[3, 3] = (1, 1, 1)
# d = np.multiply(b, c)
# print(d)
|
Python
| 0.000001
|
@@ -153,226 +153,329 @@
np%0A
-%0Afrom app.cv.serializer import deserialize_json%0Afrom app.settings import CV_SAMPLE_PATH, BOUNDINGBOX%0A%0Atest_image = CV_SAMPLE_PATH + 'pos/img_00003.jpg'%0A%0A%0Aclass Localizer(object):%0A%0A def __init__(self, path_to_image):
+import multiprocessing as mp%0A%0Afrom app.pipeline import generate_data_skeleton%0Afrom app.cv.serializer import deserialize_json%0Afrom app.settings import BOUNDINGBOX, IMAGE_PATH%0A%0A%0Aclass Localizer(object):%0A%0A def __init__(self, path_to_image):%0A # cv2 loads image in BGR channel order%0A self.path = path_to_image
%0A
@@ -575,16 +575,34 @@
age)%5B1%5D%0A
+%0A try:%0A
@@ -617,16 +617,20 @@
xes = %5C%0A
+
@@ -689,16 +689,108 @@
ations'%5D
+%0A except IndexError:%0A self.bboxes = None%0A%0A self.output_image = None
%0A%0A @p
@@ -805,16 +805,28 @@
def
+coordinates_
factory(
@@ -1101,17 +1101,17 @@
def
-new_image
+declutter
(sel
@@ -1122,26 +1122,28 @@
-background
+filter_layer
= np.ze
@@ -1234,16 +1234,44 @@
f zeros%0A
+ if self.bboxes:%0A
@@ -1301,16 +1301,28 @@
in self.
+coordinates_
factory:
@@ -1338,52 +1338,67 @@
-background%5Bx: x
+ filter_layer%5By: y
_end,
-y: y
+x: x
_end
+, :
%5D =
-%5B1
+(1.
, 1
+.
, 1
-%5D%0A%0A
+.)%0A
@@ -1407,64 +1407,189 @@
#
-mirrir original image's bounding boxes into new%0A
+elementwise multiplication of filter layer and original image%0A self.output_image = cv2.convertScaleAbs(self.image * filter_layer)%0A elif not self.bboxes:%0A
self
@@ -1576,32 +1576,34 @@
oxes:%0A
+
self.output_imag
@@ -1610,42 +1610,38 @@
e =
-np.mutiply(self.image, background)
+self.image%0A return self
%0A%0A
@@ -1682,22 +1682,14 @@
ow(%22
-Display window
+output
%22, s
@@ -1734,205 +1734,320 @@
0)%0A%0A
-%0A# # image read as it is in as BGR%0A# image = cv2.imread(test_image, -1)%0A# b = image%5B2: 10, 3: 11, :%5D%0A# print(b)%0A# c = np.zeros(shape=(8, 8, 3))%0A# c%5B3, 3%5D = (1, 1, 1)%0A# d = np.multiply(b, c)%0A# print(d
+ def write(self):%0A print('writing %7B%7D'.format(self.path))%0A cv2.imwrite(self.path, self.output_image)%0A%0A%0Adef localize(path_to_image):%0A Localizer(path_to_image).declutter().write()%0A%0A%0Apaths_to_images = generate_data_skeleton(IMAGE_PATH)%5B0%5D%0A%0Awith mp.Pool(10) as p:%0A p.map(localize, paths_to_images
)%0A
|
73b9246164994049d291d5b482d4dbf2ca41a124
|
Rename master branch to main
|
tests/app/test_accessibility_statement.py
|
tests/app/test_accessibility_statement.py
|
import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run(
[f"git diff --exit-code origin/master -- {statement_file_path}"], stdout=subprocess.PIPE, shell=True
)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode("utf-8")
today = datetime.now().strftime("%d %B %Y")
with open(statement_file_path, "r") as statement_file:
current_review_date = re.search(
(r'"Last updated": "(\d{1,2} [A-Z]{1}[a-z]+ \d{4})"'), statement_file.read()
).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert '"Last updated": "' in raw_diff
|
Python
| 0.999013
|
@@ -195,20 +195,18 @@
ainst ma
-ster
+in
for a f
@@ -319,12 +319,10 @@
n/ma
-ster
+in
--
|
bb3208c3a60de65a7288e494e718e340b55ad27c
|
Update bad path test
|
tests/commands/load/test_load_case_cmd.py
|
tests/commands/load/test_load_case_cmd.py
|
# -*- coding: utf-8 -*-
import os
import tempfile
import pytest
from scout.commands import cli
from scout.demo import load_path, ped_path
from scout.parse import case
from scout.server.extensions import store
def test_load_case_no_yaml_no_ped(mock_app, institute_obj):
"""Test loading a case into scout without any config file"""
case_owner = institute_obj["_id"]
# WHEN case is loaded without any config file
runner = mock_app.test_cli_runner()
result = runner.invoke(cli, ["load", "case", "--owner", case_owner])
# THEN it should return error
assert result.exit_code != 0
assert "Please provide either scout config or ped" in result.output
def test_load_case_from_ped(mock_app, institute_obj, case_obj):
"""Test loading a case into scout from a ped file. It requires providing case genome build in the prompt."""
# GIVEN a database with no cases
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
case_owner = institute_obj["_id"]
# WHEN case is loaded using a ped file it will also require a genome build
runner = mock_app.test_cli_runner()
result = runner.invoke(
cli, ["load", "case", "--owner", case_owner, "--ped", ped_path], input="37"
)
# THEN case should be saved correctly
assert result.exit_code == 0
case_obj = store.case_collection.find_one()
# WITH the expected genome build
assert case_obj["genome_build"] == 37
def test_load_case_from_yaml(mock_app, institute_obj, case_obj):
"""Testing the scout load case command"""
runner = mock_app.test_cli_runner()
assert runner
# remove case from real populated database using adapter
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
res = store.institute_collection.find({"_id": "cust000"})
assert sum(1 for i in res) == 1
# Make sure the scout config file is available
assert os.path.exists(load_path)
# Test command to upload case using demo resources:
result = runner.invoke(cli, ["load", "case", load_path])
assert result.exit_code == 0
assert sum(1 for i in store.case_collection.find()) == 1
def test_load_case_KeyError(mock_app, institute_obj, case_obj, monkeypatch):
"""Test loading a case with a config file that will trigger keyError"""
runner = mock_app.test_cli_runner()
# GIVEN a patched scout add_smn_info function that will raise KeyError
def mock_smn_info(*args):
raise KeyError
monkeypatch.setattr(case, "add_smn_info", mock_smn_info)
# GIVEN a database with no cases
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
# WHEN case is loaded using a a yaml file
runner = mock_app.test_cli_runner()
result = runner.invoke(cli, ["load", "case", load_path])
# THEN it should trigger KeyError
assert result.exit_code == 1
assert "KeyError" in result.output
def test_load_case_SyntaxError(mock_app, institute_obj, case_obj, monkeypatch):
"""Test loading a case with a config file that will trigger KeyError"""
runner = mock_app.test_cli_runner()
# GIVEN a patched `parse_case` function that will raise KeyError
def mock_parse_case(*args):
raise SyntaxError
monkeypatch.setattr(case, "add_smn_info", mock_parse_case)
# GIVEN a database with no cases
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
# WHEN case is loaded using a a yaml file
runner = mock_app.test_cli_runner()
result = runner.invoke(cli, ["load", "case", load_path])
# THEN call will fail with KeyError
assert result.exit_code == 1
assert "SyntaxError" in result.output
def test_load_case_KeyMissing(mock_app, institute_obj, case_obj):
# GIVEN a config setup with 'sample_id' missing
runner = mock_app.test_cli_runner()
assert runner
# remove case from real populated database using adapter
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
res = store.institute_collection.find({"_id": "cust000"})
assert sum(1 for i in res) == 1
# Make sure the scout config file is available
assert os.path.exists(load_path)
temp_conf = os.path.join(tempfile.gettempdir(), "temp.conf")
content = []
with open(load_path) as f:
content = f.readlines()
# Remove a mandatory key value from config value content
content.remove("family: 'internal_id'\n")
with open(temp_conf, mode="wt") as f:
f.write("".join(content))
# WHEN: config is loaded
result = runner.invoke(cli, ["load", "case", temp_conf])
# THEN KeyError is caught and exit value is non-zero
assert result.exit_code != 0
def test_load_case_NoConf(mock_app, institute_obj, case_obj):
# GIVEN a load command missing path to config
runner = mock_app.test_cli_runner()
assert runner
# remove case from real populated database using adapter
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
res = store.institute_collection.find({"_id": "cust000"})
assert sum(1 for i in res) == 1
no_load_path = ""
# WHEN load command is run
result = runner.invoke(cli, ["load", "case", no_load_path])
# THEN error in exit status
assert result.exit_code != 0
def test_load_case_BadPath(mock_app, institute_obj, case_obj):
# GIVEN a config setup with an incorrect path configured
runner = mock_app.test_cli_runner()
assert runner
# remove case from real populated database using adapter
store.delete_case(case_id=case_obj["_id"])
assert store.case_collection.find_one() is None
res = store.institute_collection.find({"_id": "cust000"})
assert sum(1 for i in res) == 1
# Make sure the scout config file is available
assert os.path.exists(load_path)
temp_conf = os.path.join(tempfile.gettempdir(), "temp.conf")
content = []
with open(load_path) as f:
content = f.readlines()
# Remove a mandatory key value from config value content
content.remove("vcf_snv: scout/demo/643594.clinical.vcf.gz\n")
content.append("vcf_snv: scout/demo/incorrect_path/643594.clinical.vcf.gz\n")
with open(temp_conf, mode="wt") as f:
f.write("".join(content))
# WHEN: config is loaded
result = runner.invoke(cli, ["load", "case", temp_conf])
# THEN KeyError is caught and exit value is non-zero
assert result.exit_code == 1
assert "Exception: bad path" in result.output
|
Python
| 0.000001
|
@@ -6592,26 +6592,20 @@
t %22E
-xception: bad path
+rror opening
%22 in
|
561bf654507ab0ed3176fb4bc1b9e0976ff7c72a
|
Make whitespace flake8 compliant
|
tests/integration/long/test_large_data.py
|
tests/integration/long/test_large_data.py
|
import Queue
from struct import pack
import unittest
import cassandra
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.decoder import dict_factory
from cassandra.query import SimpleStatement
from tests.integration.long.utils import create_schema
# Converts an integer to an string of letters
def create_column_name(i):
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
column_name = ''
while True:
column_name += letters[i % 10]
i /= 10
if not i:
break
return column_name
class LargeDataTests(unittest.TestCase):
def setUp(self):
self.keyspace = 'large_data'
def wide_rows(self, session, table, key):
# Write via async futures
futures = Queue.Queue(maxsize=121)
for i in range(100000):
if i > 0 and i % 120 == 0:
# clear the existing queue
while True:
try:
futures.get_nowait().result()
except Queue.Empty:
break
statement = SimpleStatement('INSERT INTO %s (k, i) VALUES (%s, %s)'
% (table, key, i),
consistency_level=ConsistencyLevel.QUORUM)
future = session.execute_async(statement)
futures.put_nowait(future)
while True:
try:
futures.get_nowait().result()
except Queue.Empty:
break
# Read
results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, key))
# Verify
for i, row in enumerate(results):
self.assertEqual(row['i'], i)
def wide_batch_rows(self, session, table, key):
# Write
statement = 'BEGIN BATCH '
for i in range(2000):
statement += 'INSERT INTO %s (k, i) VALUES (%s, %s) ' % (table, key, i)
statement += 'APPLY BATCH'
statement = SimpleStatement(statement, consistency_level=ConsistencyLevel.QUORUM)
session.execute(statement)
# Read
results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, key))
# Verify
i = 0
for row in results:
self.assertEqual(row['i'], i)
i += 1
def wide_byte_rows(self, session, table, key):
# Build small ByteBuffer sample
bb = '0xCAFE'
# Write
for i in range(1000000):
statement = SimpleStatement('INSERT INTO %s (k, i, v) VALUES (%s, %s, %s)'
% (table, key, i, str(bb)),
consistency_level=ConsistencyLevel.QUORUM)
session.execute(statement)
# Read
results = session.execute('SELECT i FROM %s WHERE k=%s' % (table, key))
# Verify
bb = pack('>H', 0xCAFE)
i = 0
for row in results:
self.assertEqual(row['i'], bb)
i += 1
def large_text(self, session, table, key):
# Create ultra-long text
text = 'a' * 1000000
# Write
session.execute(SimpleStatement("INSERT INTO %s (k, txt) VALUES (%s, '%s')"
% (table, key, text),
consistency_level=ConsistencyLevel.QUORUM))
# Read
result = session.execute('SELECT * FROM %s WHERE k=%s' % (table, key))
# Verify
for row in result:
self.assertEqual(row['txt'], text)
def wide_table(self, session, table, key):
# Write
insert_statement = 'INSERT INTO %s (key, '
insert_statement += ', '.join(create_column_name(i) for i in range(330))
insert_statement += ') VALUES (%s, '
insert_statement += ', '.join(str(i) for i in range(330))
insert_statement += ')'
insert_statement = insert_statement % (table, key)
session.execute(SimpleStatement(insert_statement, consistency_level=ConsistencyLevel.QUORUM))
# Read
result = session.execute('SELECT * FROM %s WHERE key=%s' % (table, key))
# Verify
for row in result:
for i in range(330):
self.assertEqual(row[create_column_name(i)], i)
def test_wide_rows(self):
table = 'wide_rows'
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
create_schema(session, self.keyspace)
session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table)
self.wide_rows(session, table, 0)
def test_wide_batch_rows(self):
table = 'wide_batch_rows'
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
create_schema(session, self.keyspace)
session.execute('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table)
self.wide_batch_rows(session, table, 0)
def test_wide_byte_rows(self):
table = 'wide_byte_rows'
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
create_schema(session, self.keyspace)
session.execute('CREATE TABLE %s (k INT, i INT, v BLOB, PRIMARY KEY(k, i))' % table)
self.wide_byte_rows(session, table, 0)
def test_large_text(self):
table = 'large_text'
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
create_schema(session, self.keyspace)
session.execute('CREATE TABLE %s (k int PRIMARY KEY, txt text)' % table)
self.large_text(session, table, 0)
def test_wide_table(self):
table = 'wide_table'
cluster = Cluster()
session = cluster.connect()
session.row_factory = dict_factory
create_schema(session, self.keyspace)
table_declaration = 'CREATE TABLE %s (key INT PRIMARY KEY, '
table_declaration += ' INT, '.join(create_column_name(i) for i in range(330))
table_declaration += ' INT)'
session.execute(table_declaration % table)
self.wide_table(session, table, 0)
|
Python
| 0.00578
|
@@ -4319,19 +4319,16 @@
)%5D, i)%0A%0A
-%0A%0A%0A
def
@@ -4653,33 +4653,32 @@
ion, table, 0)%0A%0A
-%0A
def test_wid
@@ -5013,33 +5013,32 @@
ion, table, 0)%0A%0A
-%0A
def test_wid
@@ -5727,17 +5727,16 @@
le, 0)%0A%0A
-%0A
def
|
5f5bdcf5c6b6fb70dc94945d463c5200a46699d6
|
revert unfinished task test
|
tests/integration/unfinished_task_test.py
|
tests/integration/unfinished_task_test.py
|
# third party
import pytest
# syft absolute
import syft as sy
from syft.core.node.common.action.save_object_action import SaveObjectAction
from syft.core.store.storeable_object import StorableObject
@pytest.mark.general
def test_unfinished_task(get_clients) -> None:
print("running test_unfinished_task")
client = get_clients(1)[0]
list_pointer = sy.lib.python.List().send(client)
int_pointer = sy.lib.python.Int(1).send(client)
int_pointer.block_with_timeout(secs=10)
int_obj = int_pointer.get()
list_pointer.append(int_pointer)
storeable_object = StorableObject(id=int_pointer.id_at_location, data=int_obj)
save_object_action = SaveObjectAction(obj=storeable_object, address=client.address)
client.send_immediate_msg_without_reply(msg=save_object_action)
list_pointer.block_with_timeout(secs=10)
assert list_pointer.get() == [1]
|
Python
| 0.000167
|
@@ -1,8 +1,30 @@
+# stdlib%0Aimport time%0A%0A
# third
@@ -472,46 +472,20 @@
-int_pointer.block_with_timeout(secs=10
+time.sleep(5
)%0A
@@ -798,47 +798,20 @@
-list_pointer.block_with_timeout(secs=10
+time.sleep(5
)%0A
|
da3a4e8036a5933a9ce00f42795c8ca398925c38
|
Update geogig_init_repo.py
|
lib/rogue/geogig_init_repo.py
|
lib/rogue/geogig_init_repo.py
|
from base64 import b64encode
from optparse import make_option
import json
import urllib
import urllib2
import argparse
import time
import os
import subprocess
#==#
import _geogig_init_repo
#==#
parser = argparse.ArgumentParser(description='Initialize GeoGig repository and optionally add to GeoServer instance. If you want to add the GeoGig repo include the optional parameters.')
parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--name", help="The name of the GeoGig repo and data store in GeoServer.")
parser.add_argument("--geoserver", help="The url of the GeoServer servicing the GeoGig repository.")
parser.add_argument("--workspace", help="The GeoServer workspace to use for the data store.")
#parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--username", help="The username to use for basic auth requests.")
parser.add_argument("--password", help="The password to use for basic auth requests.")
parser.add_argument('--verbose', '-v', default=0, action='count', help="Print out intermediate status messages.")
parser.add_argument("--publish_datastore", default=0, action='count', help="Publish datastore in GeoServer for GeoGig repository")
parser.add_argument('--publish_layers', default=0, action='count', help="Publish layers from GeoGig data store")
args = parser.parse_args()
#==#
_geogig_init_repo.run(args)
|
Python
| 0.000001
|
@@ -585,17 +585,24 @@
rgument(
-%22
+'-gs', '
--geoser
@@ -604,17 +604,17 @@
eoserver
-%22
+'
, help=%22
@@ -693,17 +693,24 @@
rgument(
-%22
+'-ws', '
--worksp
@@ -712,17 +712,17 @@
orkspace
-%22
+'
, help=%22
|
64cbe20e2a415d4ee294862acc02a6a7682d7af3
|
Isolate memcache test from network
|
tests/nydus/db/backends/memcache/tests.py
|
tests/nydus/db/backends/memcache/tests.py
|
from __future__ import absolute_import
from tests import BaseTest
from nydus.db import create_cluster
from nydus.db.base import BaseCluster
from nydus.db.backends.memcache import Memcache
import mock
import pylibmc
class MemcacheTest(BaseTest):
def setUp(self):
self.memcache = Memcache(num=0)
def test_provides_retryable_exceptions(self):
self.assertEquals(Memcache.retryable_exceptions, frozenset([pylibmc.Error]))
def test_provides_identifier(self):
self.assertEquals(self.memcache.identifier, str(self.memcache.identifier))
@mock.patch('pylibmc.Client')
def test_client_instantiates_with_kwargs(self, Client):
client = Memcache(num=0)
client.connect()
self.assertEquals(Client.call_count, 1)
Client.assert_any_call(['localhost:11211'], binary=True, behaviors=None)
def test_with_cluster(self):
p = BaseCluster(hosts={0: self.memcache})
self.assertEquals(p.get('MemcacheTest_with_cluster'), None)
@mock.patch('pylibmc.Client')
def test_map_does_pipeline(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
2: {'binary': True},
3: {'binary': True},
}
})
with cluster.map() as conn:
conn.set('a', 1)
conn.set('b', 2)
conn.set('c', 3)
conn.set('d', 4)
conn.set('e', 5)
conn.set('f', 6)
conn.set('g', 7)
self.assertEqual(Client().set.call_count, 7)
self.assertEqual(Client.call_count, 5)
self.assertEqual(len(conn.get_results()), 7)
@mock.patch('pylibmc.Client')
def test_pipeline_get_multi(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
}
})
keys = ['a', 'b', 'c', 'd', 'e', 'f']
with cluster.map() as conn:
for key in keys:
conn.get(key)
self.assertEqual(len(conn.get_results()), len(keys))
self.assertEqual(Client().get.call_count, 0)
# Note: This is two because it should execute the command once for each
# of the two servers.
self.assertEqual(Client().get_multi.call_count, 2)
|
Python
| 0.000001
|
@@ -846,24 +846,62 @@
iors=None)%0A%0A
+ @mock.patch('pylibmc.Client.get')%0A
def test
@@ -914,24 +914,29 @@
cluster(self
+, get
):%0A p
@@ -988,65 +988,166 @@
-self.assertEquals(p.get('MemcacheTest_with_cluster'), Non
+result = p.get('MemcacheTest_with_cluster')%0A get.assert_called_once_with('MemcacheTest_with_cluster')%0A self.assertEquals(result, get.return_valu
e)%0A%0A
|
a19a52a42486eaa8e849d2f0a175f9a76497029d
|
bump version number
|
intercom/__init__.py
|
intercom/__init__.py
|
__version__ = "0.0.6"
|
Python
| 0.000004
|
@@ -16,6 +16,7 @@
0.0.
-6%22
+7%22%0A
|
4324418262824f59e9b38dc01673f694d434f7d4
|
add check
|
lesscpy/plib/call.py
|
lesscpy/plib/call.py
|
"""
"""
import re
from urllib.parse import quote as urlquote
from .node import Node
import lesscpy.lessc.utility as utility
import lesscpy.lessc.color as Color
class Call(Node):
def parse(self, scope):
name = ''.join(self.tokens.pop(0))
parsed = self.process(self.tokens, scope)
if name == '%(':
name = 'sformat'
elif name == '~':
name = 'e'
color = Color.Color()
args = [t for t in parsed
if type(t) is not str or t not in '(),']
if hasattr(self, name):
try:
return getattr(self, name)(*args)
except ValueError:
pass
if hasattr(color, name):
try:
return getattr(color, name)(*args)
except ValueError:
pass
return name + ''.join([p for p in parsed])
def e(self, string):
""" Less Escape.
@param string: value
@return string
"""
return utility.destring(string.strip('~'))
def sformat(self, *args):
""" String format
@param list: values
@return string
"""
format = args[0]
items = []
m = re.findall('(%[asdA])', format)
i = 1
for n in m:
v = {
'%d' : int,
'%A' : urlquote,
'%s' : utility.destring,
}.get(n, str)(args[i])
items.append(v)
i += 1
format = format.replace('%A', '%s')
return format % tuple(items)
def increment(self, v):
""" Increment function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n+1, u)
def decrement(self, v):
""" Decrement function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n-1, u)
def add(self, *args):
""" Add integers
@param list: values
@return: int
"""
return sum([int(v) for v in args])
def round(self, v):
""" Round number
@param Mixed: value
@return: rounded value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(round(float(n)), u)
|
Python
| 0
|
@@ -196,24 +196,56 @@
lf, scope):%0A
+ if not self.parsed:%0A
name
@@ -275,16 +275,20 @@
pop(0))%0A
+
@@ -333,24 +333,28 @@
pe)%0A
+
+
if name == '
@@ -358,16 +358,20 @@
= '%25(':%0A
+
@@ -391,16 +391,20 @@
format'%0A
+
@@ -425,32 +425,36 @@
~':%0A
+
name = 'e'%0A
@@ -448,16 +448,20 @@
e = 'e'%0A
+
@@ -490,16 +490,20 @@
+
+
args = %5B
@@ -521,16 +521,20 @@
parsed %0A
+
@@ -582,32 +582,36 @@
'(),'%5D%0A
+
+
if hasattr(self,
@@ -622,32 +622,36 @@
e):%0A
+
try:%0A
@@ -647,32 +647,36 @@
+
+
return getattr(s
@@ -685,32 +685,36 @@
f, name)(*args)%0A
+
exce
@@ -736,32 +736,36 @@
+
pass%0A if
@@ -753,32 +753,36 @@
pass%0A
+
+
if hasattr(color
@@ -794,32 +794,36 @@
e):%0A
+
try:%0A
@@ -819,32 +819,36 @@
+
+
return getattr(c
@@ -870,32 +870,36 @@
gs)%0A
+
except ValueErro
@@ -893,32 +893,36 @@
ept ValueError:%0A
+
@@ -934,22 +934,33 @@
-return
+ self.parsed =
name +
@@ -988,16 +988,43 @@
arsed%5D)%0A
+ return self.parsed%0A
%0A
|
53a6446064fc055313e2377773242773f3c75870
|
Remove worthless logging
|
investor/Investor.py
|
investor/Investor.py
|
#!/usr/bin/env python3
import datetime
import json
import requests
import logging
import time
# investor imports
from investor import LoanFilter
from investor import Loan
class Investor:
'A simple class to interact with your LendingClub account'
def __init__(self, iid, authKey, investAmt=25, productionMode=False):
self.iid = iid
self.headers = { 'Authorization' : authKey, 'Accept' : 'application/json', 'Content-type' : 'application/json' }
self.endpoint_root = 'https://api.lendingclub.com/api/investor/v1/'
self.investAmt = investAmt
self.productionMode = productionMode
self.logger = logging.getLogger(__name__)
self.time_delay = datetime.timedelta(seconds=1) # We must wait one second between requests
self.last_request_ts = datetime.datetime.min # No requests have been made yet
self.filters = []
self.my_note_ids = [ x['loanId'] for x in self.get_notes_owned() ]
def __set_ts(self):
self.last_request_ts = datetime.datetime.now()
return
def __get_ts(self):
return self.last_request_ts
def __execute_delay(self):
cur_time = datetime.datetime.now()
delta = cur_time - self.__get_ts()
if delta < self.time_delay:
# Round up sleep time to the nearest second
sleep_time = (delta + datetime.timedelta(milliseconds=999)).seconds
time.sleep(sleep_time)
return
def __execute_get(self, url):
self.__execute_delay()
endpoint = self.endpoint_root + url
response = requests.get(endpoint, headers=self.headers)
self.logger.debug('Endpoint: %s\nHeaders: %s' % (endpoint, self.headers))
self.__set_ts()
if not response:
self.logger.error('Error occurred during GET: %s\n HTTP response: %s' % (url, response.status_code))
return response.text
def __execute_post(self, url, payload=None):
self.__execute_delay()
endpoint = self.endpoint_root + url
response = requests.post(endpoint, data=payload, headers=self.headers)
self.logger.debug('Endpoint: %s\nData: %s\nHeaders: %s' % (endpoint, payload, self.headers))
self.__set_ts()
if not response:
self.logger.error('Error occurred during POST: %s\n HTTP response: %s' % (url, response.status_code))
return response.text
def __apply_filters(self, loans):
# First, filter out loans we already own
num_loans = len(loans)
loans = [ loan for loan in loans if loan['id'] not in self.my_note_ids ]
if num_loans != len(loans):
self.logger.info('Filtering out loan(s) already invested in')
# Second, apply user defined filters
for f in self.filters:
loans = [ loan for loan in loans if f.apply(loan) ]
return loans
def __get_loans(self, showAll=False):
loans = []
listings_json = self.__execute_get('loans/listing?showAll=%s' % (showAll))
try:
raw_loans = json.loads(listings_json)['loans']
loans = [ Loan.Loan(raw_loan) for raw_loan in raw_loans ]
except:
# Key error, most likely
self.logger.warning('Loan retrieval failed. Response text:\n -- %s' % (listings_json))
return loans
def add_filters(self, filters):
self.filters.extend(filters)
def test_filters(self):
loans = self.__get_loans(showAll=True)
for f in self.filters:
self.logger.info('Testing filter: %s' % (f))
for l in loans:
f.apply(l)
def get_cash(self):
cash = self.__execute_get('accounts/%s/availablecash' % (self.iid))
if not cash:
return 0
return json.loads(cash)['availableCash']
def get_new_loans(self, showAll=False):
for _ in range(1,140):
loans = self.__get_loans(showAll)
loans = self.__apply_filters(loans)
if len(loans):
return loans
return []
def get_notes_owned(self):
mynotes = self.__execute_get('accounts/%s/notes' % (self.iid))
if mynotes:
return [ Loan.Loan(raw_loan) for raw_loan in json.loads(mynotes)['myNotes'] ]
else:
self.logger.warning('Error retrieving owned notes: %s' % (mynotes))
return None
def submit_order(self, loans, portfolio=None):
if self.productionMode:
# Portfolio parameter can either be a dictionary or portfolio ID
portfolioId = None
if isinstance(portfolio, dict):
portfolioId = portfolio['portfolioId']
elif isinstance(portfolio, str):
portfolioId = portfolio
elif portfolio is not None:
self.logger.error('Invalid portfolio type passed to submit_order()')
# Construction order payload
loan_dict = [ { 'loanId' : loan['id'], 'requestedAmount' : self.investAmt } for loan in loans ]
if portfolioId:
for loan in loans:
loan.update({ 'portfolioId' : portfolioId })
order = json.dumps({ "aid" : self.iid, "orders" : loan_dict })
return self.__execute_post('accounts/%s/orders' % (self.iid), payload=order)
else:
self.logger.info('Running in test mode. Skipping loan order')
return None
def add_funds(self, amount):
if self.productionMode:
payload = json.dumps({ 'amount' : amount, 'transferFrequency' : 'LOAD_NOW' })
return self.__execute_post('accounts/%s/funds/add' % (self.iid), payload=payload)
else:
self.logger.info('Running in test mode. Skipping money transfer.')
return None
def get_pending_transfers(self):
xfers = json.loads(self.__execute_get('accounts/%s/funds/pending' % (self.iid)))
if 'transfers' in xfers:
return xfers['transfers']
else:
return []
def get_portfolios(self):
portfolios = json.loads(self.__execute_get('accounts/%s/portfolios' % (self.iid)))
try:
return portfolios['myPortfolios']
except KeyError:
return []
def get_portfolio(self, name, create=False):
# Return requested portfolio, if it exists
portfolios = self.get_portfolios()
for p in portfolios:
if p['portfolioName'] == name:
return p
# Portfolio doesn't exist
if create:
return self.create_portfolio(name)
def create_portfolio(self, portfolioName, portfolioDescription=None):
if self.productionMode:
payload = json.dumps({ 'aid' : self.iid, 'portfolioName' : portfolioName, 'portfolioDescription' : portfolioDescription })
return self.__execute_post('accounts/%d/portfolios' % (self.iid), payload=payload)
else:
self.logger.info('Running in test mode. Skipping portfolio creation.')
return None
|
Python
| 0.000001
|
@@ -2330,103 +2330,8 @@
ds %5D
-%0A%09%09if num_loans != len(loans):%0A%09%09%09self.logger.info('Filtering out loan(s) already invested in')
%0A%0A%09%09
|
b4abd0045178f3368fb1ddc0ba5b96094c933c22
|
Verify domain exists before scanning
|
musubi/scan.py
|
musubi/scan.py
|
"""
Scan multiple DNSBLs for IP addresss or domain.
Copyright (c) 2012, Rob Cakebread
All rights reserved.
If you give the domain, musubi will try to find all your IP addresses
for each mail server by querying MX DNS records and then doing a lookup
for the IPs. If your mail server uses round-robin DNS, this of course
won't find all the IPs. You must find out the IP CIDR range and then
give that, e.g.
musubi scan 192.0.64.0/24
"""
import logging
import dns
from cliff.lister import Lister
from .dnsbl import Base
from IPy import IP
import requests
from .netdns import get_mx_hosts, ips_from_domains, get_txt, build_query, \
net_calc
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
DNSBL_LIST = 'http://musubi.cakebread.info/dnsbl.txt'
# Try to get list of working DNSBLs checked hourly, experimental.
# TODO Add options to use local list, pipe in, etc.
req = requests.get(DNSBL_LIST)
if req.status_code == 200:
BASE_DNSBLS = req.text.split()
else:
from .dnsbllist import BASE_DNSBLS
class Scan(Lister):
"""Scan multiple DNSBLs by IP or domain"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Scan, self).get_parser(prog_name)
parser.add_argument('scan', nargs='?', default=None)
return parser
def dnsbl_check(self, ip):
backend = Base(ip=ip, dnsbls=BASE_DNSBLS)
return backend.check()
def dnsbl_scanner(self, rdata, ip):
for dnsbl, blacklisted in self.dnsbl_check(ip):
# Scan.log.debug('Testing: %s' % dnsbl)
if blacklisted:
Scan.log.debug('blacklisted: %s' % dnsbl)
try:
query = build_query(ip, dnsbl)
txt = get_txt(query)[0]
except dns.resolver.NoAnswer:
Scan.log.debug("No TXT record for %s" % query)
rdata.append(
(ip,
dnsbl,
blacklisted,
txt,)
)
return rdata
def take_action(self, parsed_args):
"""This could be a lot prettier if I used these as arguments
instead of trying to detect input type --IP --domain --range
It's just easier to use without them, hmm.
"""
arg = parsed_args.scan
rdata = []
if "/" in arg:
# CIDR notation
ips = net_calc(arg)
else:
try:
# Throw exception if it's not an IP and then try domain name
ip = IP(arg)
ips = [ip]
except ValueError:
hosts = get_mx_hosts(arg)
ips = ips_from_domains(hosts)
for ip in ips:
ip = str(ip)
rdata = self.dnsbl_scanner(rdata, ip)
if not len(rdata):
# TODO: Check cliff docs for better way to exit if no results!
rdata.append((("", "", "", "")))
Scan.log.debug(rdata)
return (('IP', 'DNSBL Host', 'Response Code', 'DNS TXT Record'), rdata)
|
Python
| 0
|
@@ -646,16 +646,31 @@
net_calc
+, verify_domain
%0A%0Areques
@@ -2677,16 +2677,59 @@
eError:%0A
+ if verify_domain(arg):%0A
@@ -2758,24 +2758,28 @@
_hosts(arg)%0A
+
@@ -2812,16 +2812,112 @@
(hosts)%0A
+ else:%0A raise RuntimeError('Can not lookup domain: %25s' %25 arg)%0A
@@ -3109,16 +3109,16 @@
esults!%0A
-
@@ -3154,16 +3154,109 @@
, %22%22)))%0A
+ #raise RuntimeError('%25s is not listed on any DNSBLs monitored by Musubi.' %25 arg)%0A
|
17ef07228722df5c15d48dc799074e1b0136831d
|
fix dict.has_key usage for python3
|
myhdl/_enum.py
|
myhdl/_enum.py
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module that implements enum.
"""
from __future__ import absolute_import
from myhdl._bin import bin
from myhdl._Signal import _Signal
from myhdl._compat import string_types
class EnumType(object):
def __init__(self):
raise TypeError("class EnumType is only intended for type checking on subclasses")
class EnumItemType(object):
def __init__(self):
raise TypeError("class EnumItemType is only intended for type checking on subclasses")
supported_encodings = ("binary", "one_hot", "one_cold")
def enum(*names, **kwargs):
# args = args
encoding = kwargs.get('encoding', None)
if encoding is not None and encoding not in supported_encodings:
raise ValueError("Unsupported enum encoding: %s\n Supported encodings: %s" % \
(encoding, supported_encodings))
if encoding in ("one_hot", "one_cold"):
nrbits = len(names)
else: # binary as default
nrbits = len(bin(len(names)-1))
codedict = {}
i = 0
for name in names:
if not isinstance(name, string_types):
raise TypeError()
if codedict.has_key(name):
raise ValueError("enum literals should be unique")
if encoding == "one_hot":
code = bin(1<<i, nrbits)
elif encoding == "one_cold":
code = bin(~(1<<i), nrbits)
else: # binary as default
code = bin(i, nrbits)
if len(code) > nrbits:
code = code[-nrbits:]
codedict[name] = code
i += 1
class EnumItem(EnumItemType):
def __init__(self, index, name, val, type):
self._index = index
self._name = name
self._val = val
self._nrbits = type._nrbits
self._nritems = type._nritems
self._type = type
def __repr__(self):
return self._name
__str__ = __repr__
def __hex__(self):
return hex(int(self._val, 2))
__str__ = __repr__
def _toVerilog(self, dontcare=False):
val = self._val
if dontcare:
if encoding == "one_hot":
val = val.replace('0', '?')
elif encoding == "one_cold":
val = val.replace('1', '?')
return "%d'b%s" % (self._nrbits, val)
def _toVHDL(self):
return self._name
def __copy__(self):
return self
def __deepcopy__(self, memo=None):
return self
def _notImplementedCompare(self, other):
raise NotImplementedError
__le__ = __ge__ = __lt__ = __gt__ = _notImplementedCompare
def __eq__(self, other):
if isinstance(other, _Signal):
other = other._val
if not isinstance(other, EnumItemType) or type(self) is not type(other):
raise TypeError("Type mismatch in enum item comparison")
return self is other
def __ne__(self, other):
if isinstance(other, _Signal):
other = other._val
if not isinstance(other, EnumItemType) or type(self) is not type(other):
raise TypeError("Type mismatch in enum item comparison")
return self is not other
class Enum(EnumType):
def __init__(self, names, codedict, nrbits, encoding):
self.__dict__['_names'] = names
self.__dict__['_nrbits'] = nrbits
self.__dict__['_nritems'] = len(names)
self.__dict__['_codedict'] = codedict
self.__dict__['_encoding'] = encoding
self.__dict__['_name'] = None
for index, name in enumerate(names):
val = codedict[name]
self.__dict__[name] = EnumItem(index, name, val, self)
def __setattr__(self, attr, val):
raise AttributeError("Cannot assign to enum attributes")
def __len__(self):
return len(self._names)
def __repr__(self):
return "<Enum: %s>" % ", ".join(names)
__str__ = __repr__
def _setName(self, name):
typename = "t_enum_%s" % name
self.__dict__['_name'] = typename
_toVHDL = __str__
def _toVHDL(self):
typename = self.__dict__['_name']
str = "type %s is (\n " % typename
str += ",\n ".join(self._names)
str += "\n);"
if self._encoding is not None:
codes = " ".join([self._codedict[name] for name in self._names])
str += '\nattribute enum_encoding of %s: type is "%s";' % (typename, codes)
return str
return Enum(names, codedict, nrbits, encoding)
|
Python
| 0.000009
|
@@ -2010,24 +2010,32 @@
if
+name in
codedict
.has_key
@@ -2030,22 +2030,8 @@
dict
-.has_key(name)
:%0A
|
d23a53f5c97a3939952ecb8f39d24603fe0d4bab
|
bump `datadog-checks-base` version (#9718)
|
mysql/setup.py
|
mysql/setup.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "mysql", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=20.1.0'
setup(
name='datadog-mysql',
version=ABOUT['__version__'],
description='The MySQL check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent mysql check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.mysql'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
Python
| 0
|
@@ -756,17 +756,17 @@
ase%3E=20.
-1
+2
.0'%0A%0Aset
|
1ecf42f474b17e01de12d235a29b08e7f18d0726
|
bump version to v1.10.3
|
ndd/package.py
|
ndd/package.py
|
# -*- coding: utf-8 -*-
"""Template package file"""
__title__ = 'ndd'
__version__ = '1.10.2'
__author__ = 'Simone Marsili'
__summary__ = ''
__url__ = 'https://github.com/simomarsili/ndd'
__email__ = 'simo.marsili@gmail.com'
__license__ = 'BSD 3-Clause'
__copyright__ = 'Copyright (c) 2020, Simone Marsili'
__classifiers__ = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
|
Python
| 0
|
@@ -83,17 +83,17 @@
= '1.10.
-2
+3
'%0A__auth
|
527ccd5790aa08d33387b43fd25beb2ed20335c7
|
remove defaults, use self.asserts
|
tensorflow/python/ops/script_ops_test.py
|
tensorflow/python/ops/script_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for script operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.platform import test
class NumpyFunctionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless_flag(self):
call_count = 0
def plus(a, b):
global call_count
call_count += 1
return a + b
@def_function.function
def tensor_plus_stateful(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=True)
@def_function.function
def tensor_plus_stateless(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=False)
@def_function.function(autograph=False)
def tensor_double_plus_stateless(a, b):
sum1 = tensor_plus_stateless(a, b)
sum2 = tensor_plus_stateless(a, b)
return sum1 + sum2
# different argument
tensor_double_plus_stateless(
constant_op.constant(1, dtype=dtypes.int32),
constant_op.constant(2, dtype=dtypes.int32),
)
assert call_count == 1 # +1 as only the first one was executed
@def_function.function(autograph=False)
def tensor_double_plus_stateful(a, b):
sum1 = tensor_plus_stateful(a, b)
sum2 = tensor_plus_stateful(a, b)
return sum1 + sum2
tensor_double_plus_stateful(
constant_op.constant(3, dtype=dtypes.int32),
constant_op.constant(4, dtype=dtypes.int32),
)
assert call_count == 3 # +2 as it is stateful, both were executed
if __name__ == "__main__":
test.main()
|
Python
| 0.000028
|
@@ -2216,36 +2216,16 @@
nstant(1
-, dtype=dtypes.int32
),%0A
@@ -2247,36 +2247,16 @@
nstant(2
-, dtype=dtypes.int32
),%0A )
@@ -2252,39 +2252,49 @@
t(2),%0A )%0A
+self.
assert
-
+Equal(
call_count == 1
@@ -2291,13 +2291,12 @@
ount
- ==
+,
1
+)
#
@@ -2588,36 +2588,16 @@
nstant(3
-, dtype=dtypes.int32
),%0A
@@ -2623,28 +2623,8 @@
nt(4
-, dtype=dtypes.int32
),%0A
@@ -2658,15 +2658,25 @@
+self.
assert
-
+Equal(
call
@@ -2685,13 +2685,12 @@
ount
- ==
+,
3
+)
#
@@ -2732,17 +2732,16 @@
cuted%0A%0A%0A
-%0A
if __nam
|
10f0807b9ab85bfa6f6bbb4ed533e1a8af642571
|
fix bug in raw service
|
lib/svtplay_dl/service/raw.py
|
lib/svtplay_dl/service/raw.py
|
from __future__ import absolute_import
import os
import re
from svtplay_dl.service import Service
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.dash import dashparse
class Raw(Service):
def get(self):
if self.exclude():
return
extention = False
filename = os.path.basename(self.url[:self.url.rfind("/")])
if self.options.output and os.path.isdir(self.options.output):
self.options.output = os.path.join(os.path.dirname(self.options.output), filename)
extention = True
elif self.options.output is None:
self.options.output = filename
extention = True
streams = []
if re.search(".f4m", self.url):
if extention:
self.options.output = "{0}.flv".format(self.options.output)
streams.append(hdsparse(self.options, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url))
if re.search(".m3u8", self.url):
streams.append(hlsparse(self.options, self.http.request("get", self.url), self.url))
if re.search(".mpd", self.url):
streams.append(dashparse(self.options, self.http.request("get", self.url), self.url))
for stream in streams:
for n in list(stream.keys()):
yield stream[n]
|
Python
| 0
|
@@ -1317,16 +1317,43 @@
treams:%0A
+ if stream:%0A
@@ -1386,16 +1386,20 @@
eys()):%0A
+
|
905c6d82c0b568788cd755cb5a98b0e24550f9a5
|
test .to() method on particle collection
|
streams/nbody/tests/test_particles.py
|
streams/nbody/tests/test_particles.py
|
# coding: utf-8
""" """
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ...misc.units import UnitSystem
from ..particles import *
usys = UnitSystem(u.kpc, u.Myr, u.M_sun)
def test_particlecollection_init():
# Init with individual arrays of ndim=1
r = np.random.random(3)*u.kpc
v = np.random.random(3)*u.km/u.s
m = np.random.random()*u.M_sun
with pytest.raises(ValueError):
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.kpc/u.Myr
m = np.random.random(10)*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
assert np.all(pc.r.value == r.value)
assert np.all(pc.v.value == v.value)
assert np.all(pc.m.value == m.value)
assert np.all(pc._r == r.value)
assert np.all(pc._v == v.value)
assert np.all(pc._m == m.value)
def test_acceleration():
r = np.array([[1.,0.],
[0, 1.],
[-1., 0.],
[0., -1.]])*u.kpc
v = np.zeros_like(r.value)*u.km/u.s
m = np.random.random()*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc.acceleration_at(np.array([0.,0.])*u.kpc, m=1.*u.M_sun)
a = pc.acceleration_at(np.array([[0.5,0.5], [0.0,0.0], [-0.5, -0.5]])*u.kpc,
m=[1.,1.,1.]*u.M_sun)
def test_merge():
# test merging two particle collections
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc1 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc2 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc_merged = pc1.merge(pc2)
assert pc_merged._r.shape == (20,3)
|
Python
| 0
|
@@ -2123,12 +2123,444 @@
pe == (20,3)
+%0A%0Adef test_to():%0A r = np.random.random(size=(10,3))*u.kpc%0A v = np.random.random(size=(10,3))*u.kpc/u.Myr%0A m = np.random.random(10)*u.M_sun%0A %0A pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)%0A %0A usys2 = UnitSystem(u.km, u.s, u.kg) %0A pc2 = pc.to(usys2)%0A %0A assert np.all(pc2._r == r.to(u.km).value)%0A assert np.all(pc2._v == v.to(u.km/u.s).value)%0A assert np.all(pc2._m == m.to(u.kg).value)
|
6a2aa6051c7922d1b2b37824d92634a4880e9ff2
|
Correct semantic version format.
|
tensorflow_probability/python/version.py
|
tensorflow_probability/python/version.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define TensorFlow Probability version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '5'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0.dev'
__version__ = '.'.join(s for s in [
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
_VERSION_SUFFIX,
] if s) # Prevent trailing dot when version suffix is empty.
|
Python
| 0.00053
|
@@ -1235,17 +1235,17 @@
, '0.4.0
-.
+-
dev'%0A__v
@@ -1268,19 +1268,8 @@
oin(
-s for s in
%5B%0A
@@ -1326,19 +1326,21 @@
ERSION,%0A
-
+%5D)%0Aif
_VERSIO
@@ -1351,68 +1351,67 @@
FFIX
-,%0A%5D if s) # Prevent trailing dot when version suffix is empty.
+:%0A __version__ = '%7B%7D-%7B%7D'.format(__version__, _VERSION_SUFFIX)
%0A
|
82665b999fb07e3ebc41de8132ba9d22dc04140c
|
Change version number back to 0.8.0.dev
|
neo/version.py
|
neo/version.py
|
# -*- coding: utf-8 -*-
version = '0.7.1'
|
Python
| 0.000001
|
@@ -34,9 +34,13 @@
'0.
-7.1
+8.0.dev
'%0A
|
a42ae6f2c761809813b9851bc1e449e3dac685ba
|
Remove mongo _id from resource.
|
superdesk/items.py
|
superdesk/items.py
|
from datetime import datetime
from flask import request, url_for
from . import mongo
from . import rest
from .auth import auth_required
from .utils import get_random_string
from .io.reuters_token import ReutersTokenProvider
tokenProvider = ReutersTokenProvider()
class ItemConflictException(Exception):
pass
def format_item(item):
item.setdefault('self_url', url_for('item', guid=item.get('guid')))
for content in item.get('contents', []):
if content.get('href'):
content['href'] = '%s?auth_token=%s' % (content.get('href'), tokenProvider.get_token())
return item
def save_item(data):
now = datetime.utcnow()
data.setdefault('guid', generate_guid())
data.setdefault('firstCreated', now)
data.setdefault('versionCreated', now)
item = mongo.db.items.find_one({'guid': data.get('guid')})
if item and item.get('versionCreated').time() >= data.get('versionCreated').time():
raise ItemConflictException()
elif item:
data['_id'] = item.get('_id')
mongo.db.items.save(data)
return data
def update_item(data, guid):
data.pop('_id', None)
data['versionCreated'] = datetime.utcnow()
item = mongo.db.items.find_one({'guid': guid})
item.update(data)
mongo.db.items.save(item)
return item
def generate_guid():
guid = get_random_string()
while mongo.db.items.find_one({'guid': guid}):
guid = get_random_string()
return guid
def get_last_updated():
item = mongo.db.items.find_one(fields=['versionCreated'], sort=[('versionCreated', -1)])
if item:
return item.get('versionCreated')
class ItemListResource(rest.Resource):
def get_query(self):
query = {}
query.setdefault('itemClass', 'icls:composite')
if request.args.get('q'):
query['headline'] = {'$regex': request.args.get('q'), '$options': 'i'}
if request.args.get('itemClass'):
query['itemClass'] = {'$in': request.args.get('itemClass').split(",")}
return query
@auth_required
def get(self):
skip = int(request.args.get('skip', 0))
limit = int(request.args.get('limit', 25))
query = self.get_query()
raw_items = mongo.db.items.find(query).sort('versionCreated', -1).skip(skip).limit(limit + 1)
items = [format_item(item) for item in raw_items]
return {'items': items[:limit], 'has_next': len(items) > limit, 'has_prev': skip > 0}
@auth_required
def post(self):
item = save_item(request.get_json())
return item, 201
class ItemResource(rest.Resource):
def _get_item(self, guid):
return mongo.db.items.find_one_or_404({'guid': guid})
@auth_required
def get(self, guid):
item = self._get_item(guid)
return format_item(item)
@auth_required
def put(self, guid):
data = request.get_json()
item = update_item(data, guid)
return format_item(item)
|
Python
| 0
|
@@ -333,16 +333,42 @@
(item):%0A
+ item.pop('_id', None)%0A
item
@@ -1127,34 +1127,8 @@
d):%0A
- data.pop('_id', None)%0A
@@ -2245,23 +2245,21 @@
).sort('
-version
+first
Created'
|
d1cc7c4cc0122c3f1d3e21941ad0726b30718b25
|
use num
|
test/functional/feature_llmqdkgerrors.py
|
test/functional/feature_llmqdkgerrors.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DashTestFramework
'''
llmq-dkgerrors.py
Simulate and check DKG errors
'''
class LLMQDKGErrors(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(4, 3, [["-whitelist=127.0.0.1"]] * 4, fast_dip3_enforcement=True)
def run_test(self):
self.sync_blocks(self.nodes, timeout=60*5)
self.confirm_mns()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine one quorum without simulating any errors
qh = self.mine_quorum()
self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
# Lets omit the contribution
self.mninfo[0].node.quorum('dkgsimerror', 'contribution-omit', True)
qh = self.mine_quorum(expected_contributions=2)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)
# Lets lie in the contribution but provide a correct justification
self.mninfo[0].node.quorum('dkgsimerror', 'contribution-omit', False)
self.mninfo[0].node.quorum('dkgsimerror', 'contribution-lie', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=2, expected_justifications=1)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
# Lets lie in the contribution and then omit the justification
self.mninfo[0].node.quorum('dkgsimerror', 'justify-omit', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=2)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)
# Heal some damage (don't get PoSe banned)
self.heal_masternodes(33)
# Lets lie in the contribution and then also lie in the justification
self.mninfo[0].node.quorum('dkgsimerror', 'justify-omit', False)
self.mninfo[0].node.quorum('dkgsimerror', 'justify-lie', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=2, expected_justifications=1)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)
# Lets lie about another MN
self.mninfo[0].node.quorum('dkgsimerror', 'contribution-lie', False)
self.mninfo[0].node.quorum('dkgsimerror', 'justify-lie', False)
self.mninfo[0].node.quorum('dkgsimerror', 'complain-lie', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=1, expected_justifications=2)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
# Lets omit 1 premature commitments
self.mninfo[0].node.quorum('dkgsimerror', 'complain-lie', False)
self.mninfo[0].node.quorum('dkgsimerror', 'commit-omit', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=0, expected_justifications=0, expected_commitments=2)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
# Lets lie in 1 premature commitments
self.mninfo[0].node.quorum('dkgsimerror', 'commit-omit', False)
self.mninfo[0].node.quorum('dkgsimerror', 'commit-lie', True)
qh = self.mine_quorum(expected_contributions=3, expected_complaints=0, expected_justifications=0, expected_commitments=2)
self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
def assert_member_valid(self, quorumHash, proTxHash, expectedValid):
q = self.nodes[0].quorum('info', 100, quorumHash, True)
for m in q['members']:
if m['proTxHash'] == proTxHash:
if expectedValid:
assert(m['valid'])
else:
assert(not m['valid'])
else:
assert(m['valid'])
def heal_masternodes(self, blockCount):
# We're not testing PoSe here, so lets heal the MNs :)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
self.wait_for_sporks_same()
for i in range(blockCount):
self.bump_mocktime(1)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
def confirm_mns(self):
while True:
diff = self.nodes[0].protx("diff", 1, self.nodes[0].getblockcount())
found_unconfirmed = False
for mn in diff["mnList"]:
if int(mn["confirmedHash"], 16) == 0:
found_unconfirmed = True
break
if not found_unconfirmed:
break
self.nodes[0].generate(1)
self.sync_blocks()
if __name__ == '__main__':
LLMQDKGErrors().main()
|
Python
| 0.00001
|
@@ -957,36 +957,33 @@
ribution-omit',
-True
+1
)%0A qh = s
@@ -1233,37 +1233,33 @@
ribution-omit',
-False
+0
)%0A self.m
@@ -1306,36 +1306,33 @@
tribution-lie',
-True
+1
)%0A qh = s
@@ -1622,36 +1622,33 @@
'justify-omit',
-True
+1
)%0A qh = s
@@ -2005,37 +2005,33 @@
'justify-omit',
-False
+0
)%0A self.m
@@ -2077,28 +2077,25 @@
stify-lie',
-True
+1
)%0A qh
@@ -2359,37 +2359,33 @@
tribution-lie',
-False
+0
)%0A self.m
@@ -2431,29 +2431,25 @@
stify-lie',
-False
+0
)%0A se
@@ -2500,28 +2500,25 @@
plain-lie',
-True
+1
)%0A qh
@@ -2793,21 +2793,17 @@
n-lie',
-False
+0
)%0A
@@ -2861,20 +2861,17 @@
-omit',
-True
+1
)%0A
@@ -3175,21 +3175,17 @@
-omit',
-False
+0
)%0A
@@ -3242,20 +3242,17 @@
t-lie',
-True
+1
)%0A
|
b3761729b156367229b5cd8895d225cb13d3267a
|
Fix example `Set-Based Column Map Expectation` template import (#6134)
|
examples/expectations/set_based_column_map_expectation_template.py
|
examples/expectations/set_based_column_map_expectation_template.py
|
"""
This is a template for creating custom SetBasedColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_set_based_column_map_expectations
"""
from great_expectations.expectations.regex_based_column_map_expectation import (
SetBasedColumnMapExpectation,
)
# <snippet>
# This class defines the Expectation itself
class ExpectColumnValuesToBeInSomeSet(SetBasedColumnMapExpectation):
"""TODO: Add a docstring here"""
# These values will be used to configure the metric created by your expectation
set_ = []
set_camel_name = "SetName"
set_semantic_name = None
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = []
# Here your regex is used to create a custom metric for this expectation
map_metric = SetBasedColumnMapExpectation.register_metric(
set_camel_name=set_camel_name,
set_=set_,
)
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": ["set-based"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@your_name_here", # Don't forget to add your github handle here!
],
}
# </snippet>
if __name__ == "__main__":
ExpectColumnValuesToBeInSomeSet().print_diagnostic_checklist()
|
Python
| 0
|
@@ -312,21 +312,19 @@
tations.
-regex
+set
_based_c
|
bfc291e36abc95683a6483f8bb5ff0a2c6465631
|
Add missing "cpp" fragment to `swift_module_alias`.
|
swift/internal/swift_module_alias.bzl
|
swift/internal/swift_module_alias.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the `swift_module_alias` rule."""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(":api.bzl", "swift_common")
load(":compiling.bzl", "new_objc_provider")
load(":derived_files.bzl", "derived_files")
load(":linking.bzl", "register_libraries_to_link")
load(":providers.bzl", "SwiftInfo", "SwiftToolchainInfo")
load(":utils.bzl", "compact", "create_cc_info", "get_providers")
def _swift_module_alias_impl(ctx):
deps = ctx.attr.deps
module_mapping = {
dep[SwiftInfo].module_name: dep.label
for dep in deps
if dep[SwiftInfo].module_name
}
module_name = ctx.attr.module_name
if not module_name:
module_name = swift_common.derive_module_name(ctx.label)
# Print a warning message directing users to the new modules that they need to
# import. This "nag" is intended to prevent users from misusing this rule to
# simply forward imported modules.
warning = """\n
WARNING: The Swift target \"{target}\" (defining module {module_name}) is \
deprecated. Please update your BUILD targets and Swift code to import the \
following dependencies instead:\n\n""".format(
target = str(ctx.label),
module_name = module_name,
)
for dep_module_name, dep_target in module_mapping.items():
warning += ' - "{target}" (import {module_name})\n'.format(
target = str(dep_target),
module_name = dep_module_name,
)
print(warning + "\n")
# Generate a source file that imports each of the deps using `@_exported`.
reexport_src = derived_files.reexport_modules_src(ctx.actions, ctx.label.name)
ctx.actions.write(
content = "\n".join([
"@_exported import {}".format(module)
for module in module_mapping.keys()
]),
output = reexport_src,
)
swift_toolchain = ctx.attr._toolchain[SwiftToolchainInfo]
feature_configuration = swift_common.configure_features(
ctx = ctx,
requested_features = ctx.features,
swift_toolchain = swift_toolchain,
unsupported_features = ctx.disabled_features,
)
compilation_outputs = swift_common.compile(
actions = ctx.actions,
bin_dir = ctx.bin_dir,
copts = ["-parse-as-library"],
deps = deps,
feature_configuration = feature_configuration,
genfiles_dir = ctx.genfiles_dir,
module_name = module_name,
srcs = [reexport_src],
swift_toolchain = swift_toolchain,
target_name = ctx.label.name,
)
library_to_link = register_libraries_to_link(
actions = ctx.actions,
alwayslink = False,
cc_feature_configuration = swift_common.cc_feature_configuration(
feature_configuration = feature_configuration,
),
is_dynamic = False,
is_static = True,
library_name = ctx.label.name,
objects = compilation_outputs.object_files,
swift_toolchain = swift_toolchain,
)
providers = [
DefaultInfo(
files = depset(compact([
compilation_outputs.swiftdoc,
compilation_outputs.swiftmodule,
library_to_link.dynamic_library,
library_to_link.pic_static_library,
])),
),
create_cc_info(
cc_infos = get_providers(deps, CcInfo),
compilation_outputs = compilation_outputs,
libraries_to_link = [library_to_link],
),
swift_common.create_swift_info(
swiftdocs = [compilation_outputs.swiftdoc],
swiftmodules = [compilation_outputs.swiftmodule],
swift_infos = get_providers(deps, SwiftInfo),
),
]
# Propagate an `objc` provider if the toolchain supports Objective-C interop,
# which allows `objc_library` targets to import `swift_library` targets.
if swift_toolchain.supports_objc_interop:
providers.append(new_objc_provider(
deps = deps,
include_path = ctx.bin_dir.path,
link_inputs = compilation_outputs.linker_inputs,
linkopts = compilation_outputs.linker_flags,
module_map = compilation_outputs.generated_module_map,
static_archives = compact([library_to_link.pic_static_library]),
swiftmodules = [compilation_outputs.swiftmodule],
objc_header = compilation_outputs.generated_header,
))
return providers
swift_module_alias = rule(
attrs = dicts.add(
swift_common.toolchain_attrs(),
{
"module_name": attr.string(
doc = """
The name of the Swift module being built.
If left unspecified, the module name will be computed based on the target's
build label, by stripping the leading `//` and replacing `/`, `:`, and other
non-identifier characters with underscores.
""",
),
"deps": attr.label_list(
doc = """
A list of targets that are dependencies of the target being built, which will be
linked into that target. Allowed kinds are `swift_import` and `swift_library`
(or anything else propagating `SwiftInfo`).
""",
providers = [[SwiftInfo]],
),
},
),
doc = """
Creates a Swift module that re-exports other modules.
This rule effectively creates an "alias" for one or more modules such that a
client can import the alias module and it will implicitly import those
dependencies. It should be used primarily as a way to migrate users when a
module name is being changed. An alias that depends on more than one module can
be used to split a large module into smaller, more targeted modules.
Symbols in the original modules can be accessed through either the original
module name or the alias module name, so callers can be migrated separately
after moving the physical build target as needed. (An exception to this is
runtime type metadata, which only encodes the module name of the type where the
symbol is defined; it is not repeated by the alias module.)
This rule unconditionally prints a message directing users to migrate from the
alias to the aliased modules---this is intended to prevent misuse of this rule
to create "umbrella modules".
> Caution: This rule uses the undocumented `@_exported` feature to re-export the
> `deps` in the new module. You depend on undocumented features at your own
> risk, as they may change in a future version of Swift.
""",
implementation = _swift_module_alias_impl,
)
|
Python
| 0.00031
|
@@ -7042,24 +7042,49 @@
Swift.%0A%22%22%22,%0A
+ fragments = %5B%22cpp%22%5D,%0A
implemen
|
407bb78c34b769f8d993853761234c60e1fbeabd
|
Update util.py
|
tabpy-server/tabpy_server/app/util.py
|
tabpy-server/tabpy_server/app/util.py
|
import csv
import logging
import os
from datetime import datetime
from OpenSSL import crypto
logger = logging.getLogger(__name__)
def log_and_raise(msg, exception_type):
'''
Log the message and raise an exception of specified type
'''
logger.fatal(msg)
raise exception_type(msg)
def validate_cert(cert_file_path):
with open(cert_file_path, 'r') as f:
cert_buf = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_buf)
date_format, encoding = '%Y%m%d%H%M%SZ', 'ascii'
not_before = datetime.strptime(
cert.get_notBefore().decode(encoding), date_format)
not_after = datetime.strptime(
cert.get_notAfter().decode(encoding), date_format)
now = datetime.now()
https_error = 'Error using HTTPS: '
if now < not_before:
log_and_raise(https_error +
'The certificate provided is not valid until {}.'.format(
not_before), RuntimeError)
if now > not_after:
log_and_raise(https_error +
f'The certificate provided expired on {not_after}.',
RuntimeError)
def parse_pwd_file(pwd_file_name):
'''
Parses passwords file and returns set of credentials.
Parameters
----------
pwd_file_name : str
Passwords file name.
Returns
-------
succeeded : bool
True if specified file was parsed successfully.
False if there were any issues with parsing specified file.
credentials : dict
Credentials from the file. Empty if succeeded is False.
'''
logger.info('Parsing passwords file {}...'.format(pwd_file_name))
if not os.path.isfile(pwd_file_name):
logger.fatal('Passwords file {} not found'.format(pwd_file_name))
return False, {}
credentials = {}
with open(pwd_file_name) as pwd_file:
pwd_file_reader = csv.reader(pwd_file, delimiter=' ')
for row in pwd_file_reader:
# skip empty lines
if len(row) == 0:
continue
# skip commented lines
if row[0][0] == '#':
continue
if len(row) != 2:
logger.error(
'Incorrect entry "{}" '
'in password file'.format(row))
return False, {}
login = row[0].lower()
if login in credentials:
logger.error(
'Multiple entries for username {} '
'in password file'.format(login))
return False, {}
if(len(row[1]) > 0):
credentials[login] = row[1]
logger.debug('Found username {}'.format(login))
else:
logger.warning('Found username {} but no password'
.format(row[0]))
return False, {}
logger.info("Authentication is enabled")
return True, credentials
|
Python
| 0.000001
|
@@ -2895,28 +2895,16 @@
lse, %7B%7D%0A
-
%0A log
|
99e9ef79178d6e2dffd8ec7ed12b3edbd8b7d0f1
|
Add basket total to context
|
longclaw/longclawbasket/views.py
|
longclaw/longclawbasket/views.py
|
from django.shortcuts import render
from django.views.generic import ListView
from longclaw.longclawbasket.models import BasketItem
from longclaw.longclawbasket import utils
class BasketView(ListView):
model = BasketItem
template_name = "longclawbasket/basket.html"
def get_context_data(self, **kwargs):
items, _ = utils.get_basket_items(self.request)
return {"basket": items}
|
Python
| 0.99994
|
@@ -378,29 +378,115 @@
-return %7B%22basket%22: items
+total_price = sum(item.total() for item in items)%0A return %7B%22basket%22: items, %22total_price%22: total_price
%7D%0A
|
1d07732e0fae0dca9eae1d89de913a1e124e32fc
|
Disable some prod optimisations
|
lutrisweb/settings/production.py
|
lutrisweb/settings/production.py
|
import os
from base import * # noqa
DEBUG = False
MEDIA_URL = '//lutris.net/media/'
FILES_ROOT = '/srv/files'
ALLOWED_HOSTS = ['.lutris.net', '.lutris.net.', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lutris',
'USER': 'lutris',
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
'CONN_MAX_AGE': 600,
}
}
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
STEAM_API_KEY = os.environ['STEAM_API_KEY']
|
Python
| 0
|
@@ -390,16 +390,17 @@
+#
'CONN_MA
@@ -712,16 +712,18 @@
er',%0A)%0A%0A
+#
SESSION_
|
a281790b2ed4d16ab7d7611a9faa6f418f53826e
|
Create log if it doesnβt exist
|
swutils.py
|
swutils.py
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
swutils
~~~~~~~
Provides methods for interacting with ScraperWiki boxes
Examples:
Schedule a job::
job = lambda x: 'hello %s' % x
exception_handler = ExceptionHandler('reubano@gmail.com').handler
run_or_schedule(job, True, exception_handler)
Attributes:
SCHEDULE_TIME (str): Time of the day to run the scheduled job.
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import time
import smtplib
import logging
import schedule as sch
import scraperwiki
from os import environ
from email.mime.text import MIMEText
from testfixtures import LogCapture
__version__ = '0.6.2'
__title__ = 'swutils'
__author__ = 'Reuben Cummings'
__description__ = 'ScraperWiki box utility library'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
SCHEDULE_TIME = '10:30'
class ExceptionHandler(object):
"""Creates a logging exception handler with email notifications
Note: the following doctests assume you have a running Postfix server
https://www.garron.me/en/mac/postfix-relay-gmail-mac-os-x-local-smtp.html
"""
def __init__(self, to, logfile='log.txt', logname=''):
""" ExceptionHandler constructor
Args:
to (str): The email recipient
logfile (str): The logfile (default: log.txt)
logname (str): The logger name (default: '')
Examples:
>>> ExceptionHandler('reubano@gmail.com') # doctest: +ELLIPSIS
<swutils.ExceptionHandler object at 0x...>
"""
logging.basicConfig(filename=logfile, level=logging.DEBUG)
self.to = to
self.logfile = logfile
self.logger = logging.getLogger(logname)
def email(self, subject=None, text=None, host='localhost'):
""" Sends the email notification
Args:
subject (str): The email subject (default: localhost).
text (str): The email content (default: None).
host (str): The email host server (default: localhost).
Examples:
>>> to = 'reubano@gmail.com'
>>> ExceptionHandler(to).email('hello world') # doctest: +ELLIPSIS
<smtplib.SMTP instance at 0x...>
"""
user = environ.get('USER')
body = 'https://scraperwiki.com/dataset/%s\n\n%s' % (user, text)
msg = MIMEText(body)
msg['From'] = '%s@scraperwiki.com' % user
msg['Subject'] = subject or 'scraperwiki box %s failed' % user
msg['To'] = self.to
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(host)
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
return s
def handler(self, func):
""" Creates the exception handler
Args:
func (func): The func to catch exceptions from
Examples:
>>> import os
>>> from tempfile import NamedTemporaryFile
>>> f = NamedTemporaryFile(delete=False)
>>> to = 'reubano@gmail.com'
>>> exc_handler = ExceptionHandler(to, f.name).handler
>>> job = exc_handler(lambda x: x * 2)
>>> print(job(2))
4
>>> with LogCapture() as l:
... job(None)
... print(l)
root ERROR
unsupported operand type(s) for *: 'NoneType' and 'int'
>>> os.unlink(f.name)
Returns:
func: the exception handler
"""
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
except Exception as e:
self.logger.exception(str(e))
scraperwiki.status('error', 'Error collecting data')
with open(self.logfile, 'rb') as f:
self.email(text=f.read())
else:
scraperwiki.status('ok')
return res
return wrapper
def run_or_schedule(job, schedule=False, exception_handler=None):
""" Runs a job and optionally schedules it to run later
Args:
job (func): The func to run
schedule (bool): Schedule `func` to run in the future (default: False)
exception_handler (func): The exception handler to wrap the function in
(default: None)
Examples:
>>> from pprint import pprint
>>> from functools import partial
>>> job = partial(pprint, 'hello world')
>>> run_or_schedule(job)
u'hello world'
>>> exception_handler = ExceptionHandler('reubano@gmail.com').handler
>>> run_or_schedule(job, False, exception_handler)
u'hello world'
"""
if exception_handler and schedule:
job = exception_handler(job)
job()
if schedule:
sch.every(1).day.at(SCHEDULE_TIME).do(job)
while True:
sch.run_pending()
time.sleep(1)
|
Python
| 0
|
@@ -1815,24 +1815,94 @@
ger(logname)
+%0A open(logfile, 'w').close() if not p.exists(logfile) else None
%0A%0A def em
|
59a34073226d317c8d02e60ee13ac1e7a84cb41c
|
Patch for an Issue #1057
|
lib/core/bigarray.py
|
lib/core/bigarray.py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import cPickle as pickle
except:
import pickle
import os
import tempfile
from lib.core.exception import SqlmapSystemException
from lib.core.settings import BIGARRAY_CHUNK_LENGTH
from lib.core.settings import BIGARRAY_TEMP_PREFIX
class Cache(object):
"""
Auxiliary class used for storing cached chunks
"""
def __init__(self, index, data, dirty):
self.index = index
self.data = data
self.dirty = dirty
class BigArray(list):
"""
List-like class used for storing large amounts of data (disk cached)
"""
def __init__(self):
self.chunks = [[]]
self.cache = None
self.filenames = set()
self._os_remove = os.remove
def append(self, value):
self.chunks[-1].append(value)
if len(self.chunks[-1]) >= BIGARRAY_CHUNK_LENGTH:
filename = self._dump(self.chunks[-1])
self.chunks[-1] = filename
self.chunks.append([])
def extend(self, value):
for _ in value:
self.append(_)
def pop(self):
if len(self.chunks[-1]) < 1:
self.chunks.pop()
with open(self.chunks[-1], "rb") as fp:
self.chunks[-1] = pickle.load(fp)
return self.chunks[-1].pop()
def index(self, value):
for index in xrange(len(self)):
if self[index] == value:
return index
return ValueError, "%s is not in list" % value
def _dump(self, value):
try:
handle, filename = tempfile.mkstemp(prefix=BIGARRAY_TEMP_PREFIX)
self.filenames.add(filename)
os.close(handle)
with open(filename, "w+b") as fp:
pickle.dump(value, fp, pickle.HIGHEST_PROTOCOL)
return filename
except IOError, ex:
errMsg = "exception occurred while storing data "
errMsg += "to a temporary file ('%s')" % ex
raise SqlmapSystemException, errMsg
def _checkcache(self, index):
if (self.cache and self.cache.index != index and self.cache.dirty):
filename = self._dump(self.cache.data)
self.chunks[self.cache.index] = filename
if not (self.cache and self.cache.index == index):
with open(self.chunks[index], "rb") as fp:
self.cache = Cache(index, pickle.load(fp), False)
def __getstate__(self):
return self.chunks, self.filenames
def __setstate__(self, state):
self.__init__()
self.chunks, self.filenames = state
def __getslice__(self, i, j):
retval = BigArray()
i = max(0, len(self) + i if i < 0 else i)
j = min(len(self), len(self) + j if j < 0 else j)
for _ in xrange(i, j):
retval.append(self[_])
return retval
def __getitem__(self, y):
if y < 0:
y += len(self)
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
chunk = self.chunks[index]
if isinstance(chunk, list):
return chunk[offset]
else:
self._checkcache(index)
return self.cache.data[offset]
def __setitem__(self, y, value):
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
chunk = self.chunks[index]
if isinstance(chunk, list):
chunk[offset] = value
else:
self._checkcache(index)
self.cache.data[offset] = value
self.cache.dirty = True
def __repr__(self):
return "%s%s" % ("..." if len(self.chunks) > 1 else "", self.chunks[-1].__repr__())
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def __len__(self):
return len(self.chunks[-1]) if len(self.chunks) == 1 else (len(self.chunks) - 1) * BIGARRAY_CHUNK_LENGTH + len(self.chunks[-1])
|
Python
| 0
|
@@ -1269,16 +1269,37 @@
s.pop()%0A
+ try:%0A
@@ -1350,32 +1350,36 @@
+
self.chunks%5B-1%5D
@@ -1396,16 +1396,231 @@
oad(fp)%0A
+ except IOError, ex:%0A errMsg = %22exception occurred while retrieving data %22%0A errMsg += %22from a temporary file ('%25s')%22 %25 ex%0A raise SqlmapSystemException, errMsg%0A
@@ -2625,32 +2625,53 @@
ndex == index):%0A
+ try:%0A
with
@@ -2701,32 +2701,36 @@
%5D, %22rb%22) as fp:%0A
+
@@ -2778,16 +2778,231 @@
, False)
+%0A except IOError, ex:%0A errMsg = %22exception occurred while retrieving data %22%0A errMsg += %22from a temporary file ('%25s')%22 %25 ex%0A raise SqlmapSystemException, errMsg
%0A%0A de
|
cd9fc5a6ea8925de67041408d96a63beccf573a2
|
add docopt
|
taksman.py
|
taksman.py
|
#!/usr/bin/env python
import os
import errno
import re
from pprint import pprint
def show_by_course(tasks):
courses = set(tasks[name].get('course') for name in tasks)
courses -= set([None])
courses = sorted(courses)
for course in courses:
print
print "Course: %s" % course
course_tasks = filter(
lambda name: tasks[name].get('course') == course,
tasks)
for name in course_tasks:
print "> %s" % name
def read_tasks(db_root):
""" Load tasks from db. """
entry_names = os.listdir(os.path.join(db_root, "entry"))
entry_paths = {filename: os.path.join(db_root, "entry", filename) for filename in entry_names}
tasks = {name: read_task(entry_paths[name]) for name in entry_names}
return tasks
def read_task(filepath):
""" Read a task from a file. """
task = {}
task['body'] = ""
with open(filepath, 'r') as f:
reading_headers = True
for line in f.readlines():
header_match = re.match(r"(?P<field>\w+): +(?P<value>.*)$", line)
if reading_headers and header_match:
field = header_match.group('field')
value = header_match.group('value')
assert field != 'body'
assert field not in task
task[field] = value.rstrip()
else:
reading_headers = False
task['body'] += line.rstrip() + "\n"
task['body'] = task['body'].rstrip()
return task
def ensure_db(db_root):
""" Make the storage directories exist. """
mkdir_p(os.path.join(db_root, "entry"))
mkdir_p(os.path.join(db_root, "done"))
mkdir_p(os.path.join(db_root, "template"))
def mkdir_p(path):
""" no error if existing, make parent directories as needed """
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if __name__ == "__main__":
db_root = "tasks"
ensure_db(db_root)
tasks = read_tasks(db_root)
# pprint(tasks)
show_by_course(tasks)
|
Python
| 0
|
@@ -15,16 +15,243 @@
python%0A
+%22%22%22 Assignment management tool for school.%0AUsage:%0A taksman.py (-h %7C --help)%0A taksman.py add %3Centry%3E%0A taksman.py course%0A taksman.py date%0A taksman.py debug%0A%0AExamples:%0A taksman.py add 033-reading%0A%0AOptions:%0A -h, --help%0A%22%22%22%0A%0A
import o
@@ -300,16 +300,42 @@
t pprint
+%0Afrom docopt import docopt
%0A%0Adef sh
@@ -2343,50 +2343,340 @@
ot)%0A
+%0A
-# pprint(tasks)%0A show_by_course(tasks)
+arguments = docopt(__doc__)%0A if arguments%5B'debug'%5D:%0A pprint(tasks)%0A elif arguments%5B'add'%5D:%0A raise Exception(%22not implemented%22)%0A elif arguments%5B'course'%5D:%0A show_by_course(tasks)%0A elif arguments%5B'course'%5D:%0A raise Exception(%22not implemented%22)%0A else:%0A print %22Whoops, unhandled input.%22
%0A
|
616f2419774136b6cd98bc6dbee31bf39a99acea
|
add zhihu special spider
|
DataHouse/zhihu/zhihu_special_spider.py
|
DataHouse/zhihu/zhihu_special_spider.py
|
"""
a web spider for Zhihu Special
"""
import random
import os
import time
import logging
import requests
from pymongo import MongoClient
import pandas as pd
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='zhihu_course.log',
filemode='w')
def crawl(pagenum):
url_pattern = 'https://api.zhihu.com/lives/special_lists?limit=%d&offset=10&subtype=special_list' % pagenum
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.zhihu.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
cookies = dict(
cookies_are='')
response = requests.get(url=url_pattern, headers=headers, cookies=cookies)
if response.status_code == 200:
live_json = response.json()
time.sleep(random.randint(2, 5)) # a range between 2s and 5s
return live_json
else:
print('ERROR, code is %d' % response.status_code)
return None
def recursive_crawl():
"""
recursively crawl all Zhihu special data
:return:
"Version:1.0
"""
offset = 10
while True:
try:
obj = crawl(offset)
if obj is not None and len(obj['data']) > 0:
for _ in obj['data']:
insert_item(_)
print('insert one item successfully~')
offset += 10
else:
break
except:
logging.error('https://api.zhihu.com/lives/special_lists?limit=10&offset=%d&subtype=special_list' % offset)
def insert_item(item):
"""
insert an item into MongoDB
:param item:
:return:
:Version:1.0
"""
client = MongoClient()
db = client.zhihu.special
result = db.insert_one(item)
if __name__ == '__main__':
recursive_crawl()
|
Python
| 0
|
@@ -1678,16 +1678,58 @@
data'%5D:%0A
+ if _ is not None:%0A
@@ -1755,16 +1755,20 @@
item(_)%0A
+
|
362312ad1a26dbecf0c4942c9a6e7042cbaab3bd
|
Test the rest of Roman masters
|
test-mm.py
|
test-mm.py
|
from psautohint import autohint
from psautohint import psautohint
def getFonts(masters, baseDir):
options = autohint.ACOptions()
options.quiet = True
fonts = []
infos = []
for master in masters:
path = "%s/%s/font.ufo" % (baseDir, master)
font = autohint.openUFOFile(path, None, False, options)
font.useProcessedLayer = False
names = font.getGlyphList()
_, fontDictList = font.getfdInfo(font.getPSName(), path, False, False, [], [], names)
info = fontDictList[0].getFontInfo()
fonts.append(font)
infos.append(info)
return fonts, infos
def getGlyphList(fonts):
glyphList = fonts[0].getGlyphList()
assert all([font.getGlyphList() == glyphList for font in fonts])
return glyphList
def mmHint(masters, fonts, infos, glyphList):
hinted = []
for name in glyphList:
glyphs = []
print("Hinting %s" % name)
for i, (font, info) in enumerate(zip(fonts, infos)):
glyph = font.convertToBez(name, False, True)[0]
if not glyph:
glyph = "%%%s\n" % name
if i == 0:
glyph = psautohint.autohint(info, [glyph], False, False, False, False)[0]
glyphs.append(glyph)
try:
glyphs = _psautohint.autohintmm(infos[0], [glyphs], masters, True)
except:
for i, glyph in enumerate(glyphs):
print(masters[i])
print(glyph)
raise
hinted.append(glyphs)
return hinted
def main():
masters = ["Black", "ExtraLight"]
fonts, infos = getFonts(masters, "tests/data/source-code-pro")
glyphList = getGlyphList(fonts)
hinted = mmHint(masters, fonts, infos, glyphList)
if __name__ == "__main__":
main()
|
Python
| 0.000002
|
@@ -1573,27 +1573,77 @@
= %5B%22
-Black%22, %22ExtraLight
+Regular%22, %22Light%22, %22ExtraLight%22, %22Medium%22, %22Semibold%22, %22Bold%22, %22Black
%22%5D%0A
|
6aa5e2c95c0f529aa2803395779ca7274d5795b1
|
Bump version to 1.0.1-machtfit-67
|
src/oscar/__init__.py
|
src/oscar/__init__.py
|
import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 66)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.dashboard',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
|
Python
| 0
|
@@ -115,17 +115,17 @@
tfit', 6
-6
+7
)%0A%0A%0Adef
|
20f705417f9fce36c1c568d1fc2ee1a65373c336
|
Fix MRPC link (#1247)
|
tensor2tensor/data_generators/mrpc.py
|
tensor2tensor/data_generators/mrpc.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the MSR Paraphrase Corpus."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
EOS = text_encoder.EOS
@registry.register_problem
class MSRParaphraseCorpus(text_problems.TextConcat2ClassProblem):
"""MSR Paraphrase Identification problems."""
# Link to data from GLUE: https://gluebenchmark.com/tasks
DEV_IDS = ("https://firebasestorage.googleapis.com/v0/b/"
"mtl-sentence-representations.appspot.com/o/"
"data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-"
"4bd7-99a5-5e00222e0faf")
MRPC_TRAIN = ("https://s3.amazonaws.com/senteval/senteval_data/"
"msr_paraphrase_train.txt")
MRPC_TEST = ("https://s3.amazonaws.com/senteval/senteval_data/"
"msr_paraphrase_test.txt")
DATA_DIR = "MRPC"
@property
def is_generate_per_split(self):
return True
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 10,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def approx_vocab_size(self):
return 2**13 # 8k vocab suffices for this small dataset.
@property
def num_classes(self):
return 2
def class_labels(self, data_dir):
del data_dir
return ["not_paraphrase", "paraphrase"]
def _maybe_download_corpora(self, tmp_dir):
mrpc_dir = os.path.join(tmp_dir, self.DATA_DIR)
tf.gfile.MakeDirs(mrpc_dir)
mrpc_train_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
mrpc_dev_ids_finalpath = os.path.join(mrpc_dir, "dev_ids.tsv")
def download_file(tdir, filepath, url):
if not tf.gfile.Exists(filepath):
generator_utils.maybe_download(tdir, filepath, url)
download_file(mrpc_dir, mrpc_train_finalpath, self.MRPC_TRAIN)
download_file(mrpc_dir, mrpc_test_finalpath, self.MRPC_TEST)
download_file(mrpc_dir, mrpc_dev_ids_finalpath, self.DEV_IDS)
return mrpc_dir
def example_generator(self, filename, dev_ids):
for idx, line in enumerate(tf.gfile.Open(filename, "rb")):
if idx == 0: continue # skip header
if six.PY2:
line = unicode(line.strip(), "utf-8")
else:
line = line.strip().decode("utf-8")
l, id1, id2, s1, s2 = line.split("\t")
if dev_ids and [id1, id2] not in dev_ids:
continue
inputs = [[s1, s2], [s2, s1]]
for inp in inputs:
yield {
"inputs": inp,
"label": int(l)
}
def generate_samples(self, data_dir, tmp_dir, dataset_split):
mrpc_dir = self._maybe_download_corpora(tmp_dir)
filesplit = "msr_paraphrase_train.txt"
dev_ids = []
if dataset_split != problem.DatasetSplit.TRAIN:
for row in tf.gfile.Open(os.path.join(mrpc_dir, "dev_ids.tsv")):
dev_ids.append(row.strip().split("\t"))
filename = os.path.join(mrpc_dir, filesplit)
for example in self.example_generator(filename, dev_ids):
yield example
@registry.register_problem
class MSRParaphraseCorpusCharacters(MSRParaphraseCorpus):
"""MSR Paraphrase Identification problems, character level"""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def global_task_id(self):
return problem.TaskID.EN_SIM
|
Python
| 0
|
@@ -1440,16 +1440,24 @@
a%252F
-WNLI.zip
+mrpc_dev_ids.tsv
?alt
@@ -1473,21 +1473,21 @@
ken=
-068ad0a0-ded7
+ec5c0836-31d5
-%22%0A
@@ -1504,29 +1504,29 @@
%224
-bd7-99a5-5e00222e0faf
+8f4-b431-7480817f1adc
%22)%0A
|
d9c2bb2de79db80bc94509cb6a23de7f85e6e899
|
update tests
|
tests/test_pecanstreet_dataset_adapter.py
|
tests/test_pecanstreet_dataset_adapter.py
|
import sys
sys.path.append('../')
from disaggregator import PecanStreetDatasetAdapter
import unittest
class PecanStreetDatasetAdapterTestCase(unittest.TestCase):
def setUp(self):
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
self.psda = PecanStreetDatasetAdapter(db_url)
def test_get_table_names(self):
s_tables = self.psda.get_table_names('shared')
c_tables = self.psda.get_table_names('curated')
r_tables = self.psda.get_table_names('raw')
self.assertIn('group1_disaggregated_2012_12', c_tables,
'curated schema has correct tables')
self.assertIn('egauge_15min_2013', r_tables,
'raw schema has correct tables')
self.assertIn('validated_01_2014', s_tables,
'shared schema has correct tables')
def test_table_metadata(self):
ids,cols = self.psda.get_table_metadata('shared','validated_01_2014')
self.assertIn(744,ids,'shared table 01 2014 has dataid 744')
self.assertIn('use',cols,'shared table 01 2014 has column "use"')
self.assertIn('air1',cols,'shared table 01 2014 has column "air1"')
pass
def test_get_month_traces(self):
# traces = self.pdsa.get_month_traces('shared','validated_01_2014')
# trace = p.get_month_traces_wo_time_align('shared',str(tables[0]),i[0])
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -1414,16 +1414,36 @@
pass%0A%0A
+fast = TestSuite()%0A%0A
if __nam
|
e1da85d46f84a35198959881b55196db4e0a67c4
|
Fix loading of description.yaml
|
lava_results_app/utils.py
|
lava_results_app/utils.py
|
import os
import yaml
import logging
import subprocess
from django.utils.translation import ungettext_lazy
from django.conf import settings
from django.http import Http404
from linaro_django_xmlrpc.models import AuthToken
def help_max_length(max_length):
return ungettext_lazy( # pylint: disable=no-member
u"Maximum length: {0} character",
u"Maximum length: {0} characters",
max_length).format(max_length)
class StreamEcho(object): # pylint: disable=too-few-public-methods
def write(self, value): # pylint: disable=no-self-use,
return value
def description_filename(job):
filename = os.path.join(job.output_dir, 'description.yaml')
if not os.path.exists(filename):
return None
return filename
def description_data(job):
logger = logging.getLogger('lava_results_app')
filename = description_filename(job)
if not filename:
return {}
try:
data = yaml.load(open(filename, 'r'))
except yaml.YAMLError:
logger.error("Unable to parse description for %s" % job.id)
return {}
if not data:
return {}
return data
# FIXME: relocate these two functions into dbutils to avoid needing django settings here.
# other functions in utils can be run outside django. Remove import of AuthToken.
def anonymous_token(request, job):
querydict = request.GET
user = querydict.get('user', default=None)
token = querydict.get('token', default=None)
# safe to call with (None, None) - returns None
auth_user = AuthToken.get_user_for_secret(username=user, secret=token)
if not user and not job.is_public:
raise Http404("Job %d requires authentication to view." % job.id)
if not auth_user:
raise Http404("User '%s' is not able to view job %d" % (user, job.id))
return auth_user
def check_request_auth(request, job):
if job.is_public:
return
if not request.user.is_authenticated():
# handle anonymous access
auth_user = anonymous_token(request, job)
if not auth_user or not job.can_view(auth_user):
raise Http404("User '%s' is not able to view job %d" % (request.user, job.id))
elif not job.can_view(request.user):
raise Http404("User '%s' is not able to view job %d" % (request.user.username, job.id))
def debian_package_version():
"""
Relies on Debian Policy rules for the existence of the
changelog. Distributions not derived from Debian will
return an empty string.
"""
changelog = '/usr/share/doc/lava-server/changelog.Debian.gz'
if os.path.exists(changelog):
deb_version = subprocess.check_output((
'dpkg-query', '-W', "-f=${Version}\n", 'lava-server')).strip().decode('utf-8')
# example version returned would be '2016.11-1'
return deb_version
|
Python
| 0.000155
|
@@ -753,24 +753,1137 @@
filename%0A%0A%0A
+class V2Loader(yaml.Loader):%0A def remove_pipeline_module(self, suffix, node):%0A if 'lava_dispatcher.pipeline' in suffix:%0A suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')%0A return self.construct_python_object(suffix, node)%0A%0A def remove_pipeline_module_name(self, suffix, node):%0A if 'lava_dispatcher.pipeline' in suffix:%0A suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')%0A return self.construct_python_name(suffix, node)%0A%0A def remove_pipeline_module_new(self, suffix, node):%0A if 'lava_dispatcher.pipeline' in suffix:%0A suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')%0A return self.construct_python_object_new(suffix, node)%0A%0A%0AV2Loader.add_multi_constructor(%0A u'tag:yaml.org,2002:python/name:',%0A V2Loader.remove_pipeline_module_name)%0AV2Loader.add_multi_constructor(%0A u'tag:yaml.org,2002:python/object:',%0A V2Loader.remove_pipeline_module)%0AV2Loader.add_multi_constructor(%0A u'tag:yaml.org,2002:python/object/new:',%0A V2Loader.remove_pipeline_module_new)%0A%0A%0A
def descript
@@ -2081,16 +2081,33 @@
me, 'r')
+, Loader=V2Loader
)%0A ex
|
52ebe157585019c9be01b22638fff924ba328892
|
Increase delay (to fix tests that are failing randomly on travis but are always passing on my locale machine)
|
test/test_modes/test_goto_assignments.py
|
test/test_modes/test_goto_assignments.py
|
"""
Test the autocomplete mode
"""
from pyqode.core.api import TextHelper
from pyqode.qt import QtCore, QtWidgets
from pyqode.qt.QtTest import QTest
from pyqode.python import modes as pymodes
from test.helpers import editor_open
def get_mode(editor):
return editor.modes.get(pymodes.GoToAssignmentsMode)
@editor_open(__file__)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
@editor_open(__file__)
def test_goto_variable(editor):
editor.clear()
code = "a = 15\nprint(a)"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(2, len('print(a)') - 2)
mode.request_goto()
QTest.qWait(1000)
assert TextHelper(editor).current_line_nbr() == 0
out = False
def _on_out_of_doc(*args):
global out
out = True
@editor_open(__file__)
def test_goto_out_of_doc(editor):
global out
out = False
editor.clear()
code = "import logging\nlogging.basicConfig()"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, len('logging.basicConfig()') - 4)
mode.out_of_doc.connect(_on_out_of_doc)
assert out is False
mode.request_goto()
QTest.qWait(1000)
assert out is True
flg_multi = False
def accept_dlg():
global flg_multi
flg_multi = True
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QDialog):
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Return)
@editor_open(__file__)
def test_multiple_results(editor):
global flg_multi
editor.clear()
code = "import os\nos.path.abspath('..')"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, 4)
QTest.qWait(1000)
mode.request_goto()
assert flg_multi is False
QtCore.QTimer.singleShot(1000, accept_dlg)
QTest.qWait(1000)
assert flg_multi is True
@editor_open(__file__)
def test_make_unique(editor):
seq = ['a', 'b', 'c', 'a']
mode = get_mode(editor)
new_seq = mode._unique(seq)
assert len(new_seq) == len(seq) - 1
|
Python
| 0
|
@@ -708,33 +708,33 @@
QTest.qWait(
-1
+5
000)%0A assert
@@ -1237,33 +1237,33 @@
QTest.qWait(
-1
+5
000)%0A assert
|
93f912b9eb3a17ab24b0a7a67ad2297a7bae6e91
|
Fix .aar building on Mac
|
tensorflow/lite/java/aar_with_jni.bzl
|
tensorflow/lite/java/aar_with_jni.bzl
|
"""Generate zipped aar file including different variants of .so in jni folder."""
load("@build_bazel_rules_android//android:rules.bzl", "android_binary")
def aar_with_jni(
name,
android_library,
headers = None,
flatten_headers = False):
"""Generates an Android AAR given an Android library target.
Args:
name: Name of the generated .aar file.
android_library: The `android_library` target to package. Note that the
AAR will contain *only that library's .jar` sources. It does not
package the transitive closure of all Java source dependencies.
headers: Optional list of headers that will be included in the
generated .aar file. This is useful for distributing self-contained
.aars with native libs that can be used directly by native clients.
flatten_headers: Whether to flatten the output paths of included headers.
"""
# Generate dummy AndroidManifest.xml for dummy apk usage
# (dummy apk is generated by <name>_dummy_app_for_so target below)
native.genrule(
name = name + "_binary_manifest_generator",
outs = [name + "_generated_AndroidManifest.xml"],
cmd = """
cat > $(OUTS) <<EOF
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="dummy.package.for.so">
<uses-sdk android:minSdkVersion="999"/>
</manifest>
EOF
""",
)
# Generate dummy apk including .so files and later we extract out
# .so files and throw away the apk.
android_binary(
name = name + "_dummy_app_for_so",
manifest = name + "_generated_AndroidManifest.xml",
custom_package = "dummy.package.for.so",
deps = [android_library],
# In some platforms we don't have an Android SDK/NDK and this target
# can't be built. We need to prevent the build system from trying to
# use the target in that case.
tags = [
"manual",
"no_cuda_on_cpu_tap",
],
)
srcs = [android_library + ".aar", name + "_dummy_app_for_so_unsigned.apk"]
cmd = """
cp $(location {0}.aar) $(location :{1}.aar)
chmod +w $(location :{1}.aar)
origdir=$$PWD
cd $$(mktemp -d)
unzip $$origdir/$(location :{1}_dummy_app_for_so_unsigned.apk) "lib/*"
cp -r lib jni
zip -r $$origdir/$(location :{1}.aar) jni/*/*.so
""".format(android_library, name)
if headers:
srcs += headers
cmd += """
mkdir headers
"""
for src in headers:
if flatten_headers:
cmd += """
cp -rL $$origdir/$(location {0}) headers/$$(basename $(location {0}))
""".format(src)
else:
cmd += """
mkdir -p headers/$$(dirname $(location {0}))
cp -rL $$origdir/$(location {0}) headers/$(location {0})
""".format(src)
cmd += "zip -r $$origdir/$(location :{0}.aar) headers".format(name)
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".aar"],
tags = ["manual"],
cmd = cmd,
)
|
Python
| 0.000002
|
@@ -2565,33 +2565,33 @@
cp -
-r
+R
L $$origdir/$(lo
@@ -2805,17 +2805,17 @@
cp -
-r
+R
L $$orig
|
eb5d7f91286779ff0f3b6d7c829967f74ef1db7a
|
replace managers by plain functions
|
testbot.py
|
testbot.py
|
# -*- coding: utf-8 -*-
from bot import Tofbot
import unittest
from collections import namedtuple
def print_resp(msg):
print (" -> %s" % msg)
class TestTofbot(Tofbot):
def __init__(self, nick, name, chan, origin):
chans = [chan]
self.nick = nick
Tofbot.__init__(self, nick, name, chans, debug=False)
self.chan = chan
self.origin = origin
self.cb = None
def msg(self, chan, msg):
if self.cb:
self.cb(msg)
else:
print_resp(msg)
def send(self, msg):
print ("<- %s" % msg)
self.dispatch(self.origin, [msg, 'PRIVMSG', self.chan])
def kick(self, msg=None):
if msg is None:
msg = self.nick
self.dispatch(self.origin, [msg, 'KICK', self.chan, self.nick])
class BotAction:
def __init__(self, bot, action):
"""
If length=None, just expect one and return it (not a list).
"""
self.bot = bot
self.action = action
self.msgs = []
def __enter__(self):
def capture_out(msg):
self.msgs.append(msg)
self.bot.cb = capture_out
self.action()
return self.msgs
def __exit__(self, *args):
pass
def bot_input(bot, msg):
return BotAction(bot, lambda: bot.send(msg))
def bot_kick(bot, msg=None):
return BotAction(bot, lambda: bot.kick(msg))
class TestCase(unittest.TestCase):
def setUp(self):
nick = "testbot"
name = "Test Bot"
chan = "#chan"
Origin = namedtuple('Origin', ['sender', 'nick'])
origin = Origin('sender', 'nick')
self.bot = TestTofbot(nick, name, chan, origin)
cmds = ['!set autoTofadeThreshold 100']
for cmd in cmds:
self.bot.dispatch(origin, [cmd, 'BOTCONFIG', 'PRIVMSG', '#config'])
self.bot.joined = True
def _io(self, inp, outp):
"""
Test that a given input produces a given output.
"""
with bot_input(self.bot, inp) as l:
if isinstance(outp, str):
outp = [outp]
self.assertEqual(l, outp)
def test_set_allowed(self):
msg = "!set autoTofadeThreshold 9000"
self.bot.send(msg)
self._io("!get autoTofadeThreshold", "autoTofadeThreshold = 9000")
def test_kick(self):
with bot_kick(self.bot) as l:
self.assertEqual(l, ["respawn, LOL"])
def test_kick_reason(self):
with bot_kick(self.bot, "tais toi") as l:
self.assertEqual(l, ["comment Γ§a, tais toi ?"])
def test_dassin(self):
self._io("tu sais", "je n'ai jamais Γ©tΓ© aussi heureux que ce matin-lΓ ")
def test_donnezmoi(self):
self._io("donnez moi un lol", ['L', 'O', 'L'])
|
Python
| 0.000041
|
@@ -808,259 +808,52 @@
)%0A%0A%0A
-class BotAction:%0A def __init__(self,
+def
bot
-,
+_
action
-):%0A %22%22%22%0A If length=None, just expect one and return it (not a list).%0A %22%22%22%0A self.bot = bot%0A self.action = action%0A self.msgs = %5B%5D%0A%0A def __enter__(self):%0A
+(bot, action):%0A msgs = %5B%5D%0A%0A
@@ -882,25 +882,16 @@
- self.
msgs.app
@@ -900,33 +900,24 @@
d(msg)%0A%0A
- self.
bot.cb = cap
@@ -929,25 +929,16 @@
out%0A
- self.
action()
@@ -946,75 +946,21 @@
-
return
-self.
msgs%0A%0A
- def __exit__(self, *args):%0A pass%0A%0A
%0Adef
@@ -984,36 +984,37 @@
sg):%0A return
-BotA
+bot_a
ction(bot, lambd
@@ -1077,12 +1077,13 @@
urn
-BotA
+bot_a
ctio
@@ -1702,28 +1702,27 @@
%22%22%22%0A
-with
+l =
bot_input(s
@@ -1734,22 +1734,16 @@
ot, inp)
- as l:
%0A
@@ -1743,20 +1743,16 @@
-
if isins
@@ -1781,20 +1781,16 @@
-
outp = %5B
@@ -1795,20 +1795,16 @@
%5Boutp%5D%0A
-
@@ -2036,36 +2036,35 @@
(self):%0A
-with
+l =
bot_kick(self.b
@@ -2066,27 +2066,17 @@
elf.bot)
- as l:%0A
+%0A
@@ -2158,12 +2158,11 @@
-with
+l =
bot
@@ -2192,19 +2192,9 @@
oi%22)
- as l:%0A
+%0A
|
935754be7aa6efc5b39096edee24c3d987ae32e7
|
fix #55 : default value issue fixed
|
myql/contrib/table/binder.py
|
myql/contrib/table/binder.py
|
from base import Base, BaseInput, BasePaging
from xml.etree import cElementTree as xtree
class Binder(Base):
"""Class describing binders : select, insert, update, delete
name : select, insert, update, delete
itemPath : dotted path i.e : products.product
produces : json or xml
urls : list of urls related to the api
inputs : list of InputKey object
"""
def __init__(self, name, itemPath, produces, pollingFrequencySeconds=30, urls=[], inputs=[], paging=None):
"""Initializes the class
"""
self.name = name
self.itemPath = itemPath
self.pollingFrequencySeconds = str(pollingFrequencySeconds)
self.produces = produces
self.urls = urls
self.inputs = inputs
self.paging = paging
# Builds the element tree
self.etree = self._buildElementTree()
# Adding urls
if urls:
[ self.addUrl(url) for url in urls ]
# Adding inputs passed as parameters
if inputs:
[ self.addInput(key) for key in inputs ]
# Adding paging
if paging:
self.addPaging(paging)
def __repr__(self):
return "<Binder:{0}>".format(self.name)
def _buildElementTree(self,):
"""Builds ElementTree out of Binder object
"""
t_binder = xtree.Element(self.name)
for item in self.__dict__.items():
if item[0] not in ('name', 'inputs', 'urls', 'paging'):
t_binder.set(*item)
return t_binder
def addUrl(self, url):
"""Adds url to binder
"""
if not url in self.urls:
self.urls.append(url)
root = self.etree
t_urls = root.find('urls')
if not t_urls:
t_urls = xtree.SubElement(root, 'urls')
t_url = xtree.SubElement(t_urls, 'url')
t_url.text = url
return True
def removeUrl(self, url):
"""Removes a specified url of a binder
"""
root = self.etree
t_urls = root.find('urls')
if not t_urls:
return False
for t_url in t_urls.findall('url'):
if t_url.text == url.strip():
t_urls.remove(t_url)
if url in self.urls:
self.urls.remove(url)
return True
return False
def addInput(self, key):
"""Add key element to the binder
"""
if not key in self.inputs:
self.inputs.append(key)
root = self.etree
t_input = root.find('inputs')
if not t_input :
t_input = xtree.SubElement(root, 'inputs')
t_input.append(key.etree)
return True
def removeInput(self, key_id):
"""Removes an input from a binder
"""
root = self.etree
t_inputs = root.find('inputs')
keys = t_inputs.findall('key')
key = [ key for key in keys if key.get('id') == key_id ]
try:
t_inputs.remove(key[0])
return True
except Exception, e:
print(e)
return False
def addPaging(self, paging):
"""Adds paging to binder
"""
if not self.paging:
self.paging = paging
root = self.etree
try:
root.append(paging.etree)
return True
except Exception, e:
print(e)
return False
def removePaging(self,):
"""Removes paging from Binder
"""
root = self.etree
t_paging = root.find('paging')
try:
root.remove(t_paging)
return True
except Exception, e:
print(e)
return False
class InputKey(BaseInput):
"""Class representing a key of an Input
"""
def __init__(self, *args, **kwargs):
super(InputKey, self).__init__('key', *args, **kwargs)
class InputValue(BaseInput):
"""Class representing value under an Input
"""
def __init__(self, *args, **kwargs):
super(InputValue, self).__init__('value', *args, **kwargs)
class InputMap(BaseInput):
"""Class representing map under an Input
"""
def __init__(self, *args, **kwargs):
super(InputMap, self).__init__('map', *args, **kwargs)
class PagingPage(BasePaging):
def __init__(self, start, pageSize, total):
super(PagingPage, self).__init__('page', start=start, pageSize=pageSize, total=total)
class PagingOffset(BasePaging):
def __init__(self, matrix, start, pageSize, total):
super(PagingOffset, self).__init__('offset', matrix, start=start, pageSize=pageSize, total=total)
self.matrix = str(matrix).lower()
self.etree.set('matrix', self.matrix)
class PagingUrl(BasePaging):
def __init__(self, nextpage):
super(PagingUrl, self).__init__('url', nextpage=nextpage)
class BinderMeta(type):
INPUT_KEY = ['name', 'itemPath', 'produces', 'pollingFrequencySeconds', 'urls', 'keys', 'pages']
def __new__(cls, name, bases, dct):
if name != 'BinderModel':
binder_attr = {key: value for (key, value) in dct.items() if key in cls.INPUT_KEY}
binder_attr['inputs'] = [ value for value in dct.values() if isinstance(value, BaseInput)]
paging = [ value for value in dct.values() if isinstance(value, BasePaging)]
if paging :
binder_attr['paging'] = paging[0]
binder = Binder(**binder_attr)
if dct.get('function',None):
binder.addFunction(func_code='', from_file=dct['function'])
dct = { key : value for (key, value) in dct.items() if key in ('__module__', '__metaclass__')}
dct['binder'] = binder
# Add KeyException Management
return super(BinderMeta,cls).__new__(cls, name, (Binder,), dct)
def toxml(cls,):
return xtree.tostring(cls.binder.etree)
class BinderModel(Binder):
__metaclass__ = BinderMeta
|
Python
| 0.000001
|
@@ -475,9 +475,8 @@
nds=
-3
0, u
@@ -678,16 +678,51 @@
Seconds)
+ if pollingFrequencySeconds else ''
%0A
@@ -1522,16 +1522,28 @@
paging')
+ and item%5B1%5D
:%0A
|
dca9df0e0a01fb383500bd8e8172263e19a1d453
|
Fix tests
|
lcapy/tests/test_super.py
|
lcapy/tests/test_super.py
|
from lcapy import *
import unittest
import sympy as sym
class LcapyTester(unittest.TestCase):
"""Unit tests for lcapy
"""
def assertEqual2(self, ans1, ans2, comment):
try:
self.assertEqual(ans1, ans2, comment)
except AssertionError as e:
ans1.pprint()
ans2.pprint()
raise AssertionError(e)
def test_Voltage_properties(self):
self.assertEqual(SuperpositionVoltage(3).is_dc, True, "Voltage(3).is_dc")
self.assertEqual(SuperpositionVoltage(PhasorVoltage(3)).is_ac, True, "Voltage(Vphasor(3)).is_ac")
self.assertEqual(SuperpositionVoltage(ConstantVoltage(2), PhasorVoltage(3)).is_ac, False,
"Voltage(Vconst(2), Vphasor(3)).is_ac")
self.assertEqual(SuperpositionVoltage(ConstantVoltage(2), PhasorVoltage(3)).is_ac, False,
"Voltage(Vconst(2), Vphasor(3)).is_dc")
def test_Voltage_add_sub_dc(self):
self.assertEqual2(SuperpositionVoltage(3).dc, 3, "Voltage(3).dc")
self.assertEqual2(SuperpositionVoltage(2, 3).dc, 5, "Voltage(2, 3).dc")
self.assertEqual2(SuperpositionVoltage(2, 3).ac, {}, "Voltage(2, 3).ac")
self.assertEqual2(-SuperpositionVoltage(2).dc, -2, "-Voltage(2).dc")
self.assertEqual2(SuperpositionVoltage(2) + SuperpositionVoltage(3), SuperpositionVoltage(5),
"Voltage(2) + Voltage(3)")
self.assertEqual2(SuperpositionVoltage(2) - SuperpositionVoltage(3), SuperpositionVoltage(-1),
"Voltage(2) - Voltage(3)")
def test_Current_add_sub_dc(self):
self.assertEqual2(SuperpositionCurrent(3).dc, 3, "Current(3).dc")
self.assertEqual2(SuperpositionCurrent(2, 3).dc, 5, "Current(2, 3).dc")
self.assertEqual2(SuperpositionCurrent(2, 3).ac, {}, "Current(2, 3).ac")
self.assertEqual2(-SuperpositionCurrent(2).dc, -2, "-Current(2).dc")
self.assertEqual2(SuperpositionCurrent(2) + SuperpositionCurrent(3), SuperpositionCurrent(5),
"Current(2) + Current(3)")
self.assertEqual2(SuperpositionCurrent(2) - SuperpositionCurrent(3), SuperpositionCurrent(-1),
"Current(2) - Current(3)")
def test_Voltage_noise(self):
self.assertEqual((AngularFourierDomainNoiseVoltage(3) + AngularFourierDomainNoiseVoltage(4)).expr, AngularFourierDomainNoiseVoltage(5).expr, "Vnoisy(3) + Vnoisy(4)")
self.assertEqual((SuperpositionVoltage(AngularFourierDomainNoiseVoltage(3)) + SuperpositionVoltage(AngularFourierDomainNoiseVoltage(4))).n.expr,
SuperpositionVoltage(AngularFourierDomainNoiseVoltage(5)).n.expr,
"Voltage(Vnoisy(3)) + Voltage(Vnoisy(4))")
def test_Voltage_has(self):
a = SuperpositionVoltage('3 * exp(-t) * t * a')
self.assertEqual(a.has(3), True, "has(3)")
self.assertEqual(a.has(4), False, "has(4)")
self.assertEqual(a.has(t), True, "has(t)")
self.assertEqual(a.has_symbol(t), True, "has_symbol(t)")
self.assertEqual(a.has_symbol('a'), True, "has_symbol(a)")
self.assertEqual(a.has_symbol('b'), False, "has_symbol(b)")
def test_Voltage_transform(self):
V1 = SuperpositionVoltage('3 * exp(-2 * t)')
self.assertEqual(V1.transform(s), 3 / (s + 2), 'transform(s)')
self.assertEqual(V1.transform(jomega), 3 / (j * omega + 2), 'transform(jomega)')
V2 = SuperpositionVoltage('3 * exp(-2 * t) * u(t)')
self.assertEqual(V2.transform(s), 3 / (s + 2), 'transform(s)')
self.assertEqual(V2.transform(jomega), 3 / (j * omega + 2), 'transform(jomega)')
self.assertEqual(simplify(V2.transform(f) - 3 / (j * 2 * pi * f + 2)), 0, 'transform(f)')
def test_Voltage_subs(self):
a = SuperpositionVoltage('V1')
b = a.subs('V1', 1)
c = SuperpositionVoltage(1)
self.assertEqual(b, c, "Voltage.subs")
def test_voltage_decompose(self):
V1 = SuperpositionVoltage('1 + 3 * u(t) + cos(2 * pi * 3 * t)')
self.assertEqual(V1.dc, 1, '.dc')
self.assertEqual(V1.transient, expr('3 * u(t)'), '.transient')
def test_Voltage_oneport(self):
V1 = V(3)
self.assertEqual(V1.V.oneport().V, V1.V, 'oneport')
def test_Current_oneport(self):
I1 = I(3)
self.assertEqual(I1.I.oneport().I, I1.I, 'oneport')
def test_Vname(self):
self.assertEqual(Vname('V', 't'), 'v(t)', 'v(t)')
self.assertEqual(Vname('V', 's'), 'V(s)', 'V(s)')
self.assertEqual(Vname('V', 'dc'), 'V', 'V')
def test_Iname(self):
self.assertEqual(Iname('I', 't'), 'i(t)', 'i(t)')
self.assertEqual(Iname('I', 's'), 'I(s)', 'I(s)')
self.assertEqual(Iname('I', 'dc'), 'I', 'I')
def test_Voltage_phasor(self):
V = SuperpositionVoltage(3 * sin(7 * t) + 2 * cos(14 * t))
self.assertEqual(V[7].magnitude, expr(3), 'magnitude')
self.assertEqual(V[14].omega, 14, 'omega')
|
Python
| 0.000003
|
@@ -13,16 +13,122 @@
mport *%0A
+from lcapy.phasor import PhasorDomainVoltage%0Afrom lcapy.superposition_voltage import SuperpositionVoltage%0A
import u
@@ -640,24 +640,30 @@
ltage(Phasor
+Domain
Voltage(3)).
@@ -768,32 +768,38 @@
ltage(2), Phasor
+Domain
Voltage(3)).is_a
@@ -946,16 +946,22 @@
, Phasor
+Domain
Voltage(
@@ -4781,24 +4781,65 @@
)', 'V(s)')%0A
+ # TODO: remove cache requirement%0A
self
@@ -4862,24 +4862,36 @@
me('V', 'dc'
+, cache=True
), 'V', 'V')
@@ -5091,16 +5091,28 @@
I', 'dc'
+, cache=True
), 'I',
|
b60ee89e5a0c8d689a08994325210b60e4a7fd23
|
add Cell properties edit_url, self_url and new_url
|
src/ekklesia_portal/helper/cell.py
|
src/ekklesia_portal/helper/cell.py
|
from __future__ import annotations
from webob import Request
from typing import Any, Iterable, Dict, Type, ClassVar
import case_conversion
import inspect
import jinja2
from ekklesia_portal.helper.utils import cached_property
from markupsafe import Markup
_cell_registry: Dict[Any, Cell] = {}
def find_cell_by_model_instance(model) -> Cell:
return _cell_registry[model.__class__]
class CellMeta(type):
"""
Registers Cell types that are bound to a Model class.
"""
def __init__(cls, name, bases, attrs, **kwargs):
model = getattr(cls, 'model', None)
if model:
_cell_registry[model] = cls
if 'template_prefix' not in cls.__dict__:
module_path = cls.__module__.split('.')
if module_path[1] == 'concepts':
cls.template_prefix = module_path[2]
else:
cls.template_prefix = None
return super().__init__(name, bases, attrs)
def __new__(meta, name, bases, dct):
# only for subclasses, not for Cell class
if bases:
for k, v in dct.items():
if (not k.startswith('_')
and inspect.isfunction(v)
and not hasattr(v, '_view')
and len(inspect.signature(v).parameters) == 1):
# turn functions with single argument (self) into cached properties
dct[k] = cached_property(v)
return super().__new__(meta, name, bases, dct)
class Cell(metaclass=CellMeta):
"""
View model base class which is basically a wrapper around a template.
Templates can access attributes of the cell and some selected model properties directly.
"""
model: ClassVar[Any]
model_properties: Iterable[str] = []
layout = True
template_prefix: ClassVar[str]
#: class that should be used to mark safe HTML output. Must be a subclass of str.
markup_class: Type[str] = Markup
def __init__(self,
model,
request: Request,
collection: Iterable=None,
layout: bool=None,
parent: Cell=None,
template_path: str=None,
**options) -> None:
"""
"""
self._model = model
self._request = request
self.current_user = request.current_user
self._app = request.app
self._s = request.app.settings
self.parent = parent
self.collection = collection
self._template_path = template_path
self.options = options
# if no parent is set, the layout is enabled by default. This can be overriden by the `layout` arg
if layout is not None:
self.layout = layout
elif parent is None:
self.layout = True
else:
self.layout = False
@property
def template_path(self) -> str:
if self._template_path is None:
cell_name = self.__class__.__name__
if not cell_name.endswith('Cell'):
raise Exception('Cell name does not end with Cell, you must override template_path!')
name = case_conversion.snakecase(cell_name[:-len('Cell')])
if self.template_prefix is not None:
self._template_path = f"{self.template_prefix}/{name}.j2.jade"
else:
self._template_path = f"{name}.j2.jade"
return self._template_path
def render_template(self, template_path) -> str:
return self.markup_class(self._request.render_template(template_path, _cell=self))
def show(self):
return self.render_template(self.template_path)
# template helpers
def link(self, model, name='', *args, **kwargs) -> str:
return self._request.link(model, name, *args, **kwargs)
def class_link(self, model_class, variables: Dict[str, Any], name='', *args, **kwargs) -> str:
return self._request.class_link(model_class, variables, name, *args, **kwargs)
def cell(self, model, layout: bool=None, view_name='', **options) -> Cell:
"""Look up a cell by model and create an instance.
The parent cell is set to self which also means that it will be rendered without layout by default.
"""
cell_class = find_cell_by_model_instance(model)
return cell_class(model, self._request, layout=layout, parent=self, **options)
def render_cell(self,
model=None,
view_name: str=None,
collection: Iterable=None,
separator: str=None,
layout: bool=None,
**options) -> str:
"""Look up a cell by model and render it to HTML.
The parent cell is set to self which also means that it will be rendered without layout by default.
"""
view_method = view_name if view_name is not None else 'show'
if collection is not None:
if model is not None:
raise ValueError("model and collection arguments cannot be used together!")
parts = [getattr(self.cell(item, layout=layout, **options), view_method)() for item in collection]
if separator is None:
separator = "\n"
return self.markup_class(separator.join(parts))
else:
return getattr(self.cell(model, layout=layout, **options), view_method)()
@staticmethod
def view(func):
"""Decorator for cell methods that can be used as alternative views.
"""
func._view = True
return func
@cached_property
def self_link(self) -> str:
return self.link(self._model)
# magic starts here...
def __getattr__(self, name):
if name in self.model_properties:
return getattr(self._model, name)
raise AttributeError(f"{self.__class__.__name__} has no attribute '{name}'." \
" Is it from the model? Did you forget to add it to 'model_properties'?")
def __getitem__(self, name):
try:
return getattr(self, name)
except AttributeError as e:
# standard __getitem__ raises an KeyError, let's do the same
raise KeyError(e.args[0])
def __contains__(self, name):
return name in self.model_properties or hasattr(self, name)
class JinjaCellContext(jinja2.runtime.Context):
"""
Custom jinja context with the ability to look up template variables in a cell (view model)
"""
def __init__(self, environment, parent, name, blocks):
super().__init__(environment, parent, name, blocks)
self._cell = parent.get('_cell')
def resolve_or_missing(self, key):
if self._cell is not None:
if key == "_request":
return self._cell._request
if key in self._cell:
return self._cell[key]
return super().resolve_or_missing(key)
def __contains__(self, name):
if self._cell and name in self._cell:
return True
return super().__contains__(name)
class JinjaCellEnvironment(jinja2.Environment):
"""
Example jinja environment class which uses the JinjaCellContext
"""
context_class = JinjaCellContext
|
Python
| 0
|
@@ -5704,16 +5704,305 @@
model)%0A%0A
+ @cached_property%0A def self_url(self) -%3E str:%0A return self.link(self._model)%0A%0A @cached_property%0A def edit_url(self) -%3E str:%0A return self.link(self._model, '+edit')%0A%0A @cached_property%0A def new_url(self) -%3E str:%0A return self.link(self._model, '+new')%0A%0A
# ma
|
48e280177123902001e4ff6fb3e178190b435054
|
fix test for Exscript.workqueue.MainLoop.
|
tests/Exscript/workqueue/MainLoopTest.py
|
tests/Exscript/workqueue/MainLoopTest.py
|
import sys, unittest, re, os.path, threading
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from Exscript.workqueue import MainLoop
class MainLoopTest(unittest.TestCase):
CORRELATE = MainLoop
def setUp(self):
pass
def testMainLoop(self):
lock = threading.Lock()
data = {'sum': 0, 'randsum': 0}
ml = MainLoop.MainLoop()
nop = lambda x: None
for i in range(12345):
ml.enqueue(nop, name = 'test', times = 1, data = None)
self.assertEqual(0, data['sum'])
# Note: Further testing is done in WorkQueueTest.py
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MainLoopTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
|
Python
| 0
|
@@ -163,16 +163,62 @@
MainLoop
+%0Afrom Exscript.workqueue.Job import ProcessJob
%0A%0Aclass
@@ -444,16 +444,26 @@
ainLoop(
+ProcessJob
)%0A
|
f62c53af583657ee13d220edbb25803bbc3c9c22
|
Fix style
|
tests/cupy_tests/core_tests/test_core.py
|
tests/cupy_tests/core_tests/test_core.py
|
import unittest
import numpy
import cupy
from cupy.core import core
class TestGetSize(unittest.TestCase):
def test_none(self):
self.assertEqual(core.get_size(None), ())
def test_list(self):
self.assertEqual(core.get_size([1, 2]), (1, 2))
def test_tuple(self):
self.assertEqual(core.get_size((1, 2)), (1, 2))
def test_int(self):
self.assertEqual(core.get_size(1), (1,))
def test_invalid(self):
with self.assertRaises(ValueError):
core.get_size(1.0)
class TestInternalProd(unittest.TestCase):
def test_empty(self):
self.assertEqual(core.internal_prod([]), 1)
def test_one(self):
self.assertEqual(core.internal_prod([2]), 2)
def test_two(self):
self.assertEqual(core.internal_prod([2, 3]), 6)
class TestGetStridesForNocopyReshape(unittest.TestCase):
def test_different_size(self):
a = core.ndarray((2, 3))
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 5)),
[])
def test_one(self):
a = core.ndarray((1,), dtype=cupy.int32)
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 1, 1)),
[4, 4, 4])
def test_normal(self):
# TODO(nno): write test for normal case
pass
class TestGetContiguousStrides(unittest.TestCase):
def test_zero(self):
self.assertEqual(core._get_contiguous_strides((), 1), [])
def test_one(self):
self.assertEqual(core._get_contiguous_strides((1,), 2), [2])
def test_two(self):
self.assertEqual(core._get_contiguous_strides((1, 2), 3), [6, 3])
def test_three(self):
self.assertEqual(core._get_contiguous_strides((1, 2, 3), 4),
[24, 12, 4])
class TestGetCContiguity(unittest.TestCase):
def test_zero_in_shape(self):
self.assertTrue(core._get_c_contiguity((1, 0, 1), (1, 1, 1), 3))
def test_normal(self):
# TODO(unno): write test for normal case
pass
class TestInferUnknownDimension(unittest.TestCase):
def test_known_all(self):
self.assertEqual(core._infer_unknown_dimension((1, 2, 3), 6),
[1, 2, 3])
def test_multiple_unknown(self):
with self.assertRaises(ValueError):
core._infer_unknown_dimension((-1, 1, -1), 10)
def test_infer(self):
self.assertEqual(core._infer_unknown_dimension((-1, 2, 3), 12),
[2, 2, 3])
class TestArray(unittest.TestCase):
def test_unsupported_type(self):
arr = numpy.ndarray((2,3), dtype=object)
with self.assertRaises(ValueError):
core.array(arr)
|
Python
| 0.000001
|
@@ -2603,16 +2603,17 @@
rray((2,
+
3), dtyp
|
523216bbf6f21757651e41ac307bc296041b7963
|
load nonlinux_config if the platform is not linux
|
tests/docker/test_async_docker_client.py
|
tests/docker/test_async_docker_client.py
|
import os
import warnings
from tornado.testing import AsyncTestCase, gen_test
from remoteappmanager.docker.async_docker_client import AsyncDockerClient
from tests.docker.config import nonlinux_config
from tests import utils
class TestAsyncDockerClient(AsyncTestCase):
def setUp(self):
super().setUp()
# Due to a python requests design choice, we receive a warning about
# leaking connection. This is expected and pretty much out of our
# authority but it can be annoying in tests, hence we suppress the
# warning. See issue simphony-remote/10
warnings.filterwarnings(action="ignore",
message="unclosed",
category=ResourceWarning)
def tearDown(self):
super().tearDown()
warnings.filterwarnings(action="default",
message="unclosed",
category=ResourceWarning)
@gen_test
def test_info(self):
client = AsyncDockerClient()
client.client = utils.mock_docker_client()
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
@gen_test
def test_real_connection(self):
config = None
if "DOCKER_HOST" not in os.environ:
config = nonlinux_config()
if not os.path.exists(config.tls_cert):
self.skipTest("Certificates are not available. Skipping.")
client = AsyncDockerClient(config=config)
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
|
Python
| 0.000477
|
@@ -3,16 +3,27 @@
port os%0A
+import sys%0A
import w
@@ -1325,16 +1325,17 @@
= None%0A
+%0A
@@ -1368,16 +1368,44 @@
.environ
+ and sys.platform != 'linux'
:%0A
|
6bec22cd51288c94dff40cf0c973b975538040d5
|
Increase timeout for test_long_running_job test
|
tests/integration/minion/test_timeout.py
|
tests/integration/minion/test_timeout.py
|
# -*- coding: utf-8 -*-
'''
Tests for various minion timeouts
'''
# Import Python libs
from __future__ import absolute_import
import os
import sys
import salt.utils.platform
# Import Salt Testing libs
from tests.support.case import ShellCase
class MinionTimeoutTestCase(ShellCase):
'''
Test minion timing functions
'''
def test_long_running_job(self):
'''
Test that we will wait longer than the job timeout for a minion to
return.
'''
# Launch the command
sleep_length = 30
if salt.utils.platform.is_windows():
popen_kwargs = {'env': dict(os.environ, PYTHONPATH=';'.join(sys.path))}
else:
popen_kwargs = None
ret = self.run_salt(
'minion test.sleep {0}'.format(sleep_length),
timeout=45,
catch_stderr=True,
popen_kwargs=popen_kwargs,
)
self.assertTrue(isinstance(ret[0], list), 'Return is not a list. Minion'
' may have returned error: {0}'.format(ret))
self.assertEqual(len(ret[0]), 2, 'Standard out wrong length {}'.format(ret))
self.assertTrue('True' in ret[0][1], 'Minion did not return True after '
'{0} seconds. ret={1}'.format(sleep_length, ret))
|
Python
| 0.000008
|
@@ -822,10 +822,10 @@
out=
-45
+90
,%0A
|
d03c3ed9212fc341964295ff167c512daecbb8bb
|
Improve auto_send validation
|
sbc_email/models/correspondence.py
|
sbc_email/models/correspondence.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Roman Zoller, Emanuel Cino, Michael Sandoz
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import models, fields, api
class Correspondence(models.Model):
_inherit = 'correspondence'
email_id = fields.Many2one('mail.mail', 'E-mail')
email_sent_date = fields.Datetime(
'E-mail sent',
related='email_id.sent_date', store=True)
email_read = fields.Boolean(
compute='_compute_email_read', store=True
)
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.multi
@api.depends('email_id.state')
def _compute_email_read(self):
for letter in self:
email = letter.email_id
if email and email.state == 'received':
letter.email_read = True
else:
letter.email_read = False
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.one
def process_letter(self):
""" Method called when B2S letter is Published. This will send the
letter to the sponsor via Sendgrid e-mail.
:param: download_image: Set to False to avoid downloading the
letter image from GMC and attaching it.
"""
super(Correspondence, self).process_letter()
partner = self.correspondant_id
if partner.email and partner.delivery_preference == 'digital' and not\
self.email_id:
template = False
if self.partner_needs_explanations():
template = self.env.ref('sbc_email.change_system')
else:
template = self.env.ref('sbc_email.new_letter')
# Create email
email_vals = {
'email_from': self.env['ir.config_parameter'].get_param(
'sbc_email.from_address'),
'recipient_ids': [(4, partner.id)],
}
# EXCEPTION FOR DEMAUREX : send to Delafontaine
if partner.ref == '1502623':
email_vals['email_to'] = 'eric.delafontaine@aligro.ch'
del email_vals['recipient_ids']
self.email_id = self.env['mail.compose.message'].with_context(
lang=partner.lang).create_emails(
template, self.id, email_vals)
if self._can_auto_send():
self.email_id.send_sendgrid()
def get_image(self, user=None):
""" Mark the e-mail as read. """
data = super(Correspondence, self).get_image(user)
# User is None if the sponsor called the service.
if self.email_id and self.email_id.state == 'sent' and user is None:
self.email_id.state = 'received'
return data
@api.multi
def partner_needs_explanations(self):
""" Returns true if the partner never received explanations
about the new correspondence system. The partner should have a
sponsorship that began before the transition of system.
"""
self.ensure_one()
partner_id = self.correspondant_id.id
oldest_sponsorship = self.env['recurring.contract'].search([
('correspondant_id', '=', partner_id),
('type', 'like', 'S')], order='activation_date asc', limit=1)
activation_date = fields.Date.from_string(
oldest_sponsorship.activation_date)
transition_date = fields.Date.from_string('2016-01-25')
other_letters = self.search([
('correspondant_id', '=', partner_id),
('direction', '=', 'Beneficiary To Supporter'),
('id', '!=', self.id)])
return (activation_date < transition_date and not other_letters)
##########################################################################
# PRIVATE METHODS #
##########################################################################
def _can_auto_send(self):
""" Tells if we can automatically send the letter by e-mail or should
require manual validation before.
"""
self.ensure_one()
valid = False
# If sponsor does not need translation, valid is True by default
common = self.supporter_languages_ids & self.beneficiary_language_ids
if common:
valid = (
self.sponsorship_id.state == 'active' and
self.communication_type_ids.name != 'Final Letter' and
self.correspondant_id.ref != '1502623' # Demaurex
)
else:
# Check that the translation is filled
valid = self.page_ids.filtered('translated_text')
return valid
|
Python
| 0.000001
|
@@ -4800,72 +4800,52 @@
-# If sponsor does not need translation, valid is True by default
+partner_langs = self.supporter_languages_ids
%0A
@@ -4862,35 +4862,20 @@
n =
-self.suppo
+pa
rt
+n
er_lang
-uages_id
s &
@@ -4923,16 +4923,79 @@
common:%0A
+ types = self.communication_type_ids.mapped('name')%0A
@@ -5086,58 +5086,155 @@
-self.communication_type_ids.name != 'Final Letter'
+'Final Letter' not in types and%0A 'New Sponsor Letter' not in types and%0A self.translation_language_id in partner_langs
and
@@ -5441,16 +5441,84 @@
d_text')
+ and %5C%0A self.translation_language_id in partner_langs
%0A
|
0bb2ebc52e720a3d693ca14f3621fd710ea36d4b
|
use make_result_iq
|
tests/twisted/vcard/test-avatar-async.py
|
tests/twisted/vcard/test-avatar-async.py
|
"""
Test support for retrieving avatars asynchronously using RequestAvatars.
"""
import base64
import hashlib
from servicetest import EventPattern
from gabbletest import exec_test, acknowledge_iq
def test(q, bus, conn, stream):
conn.Connect()
_, iq_event = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'))
acknowledge_iq(stream, iq_event.stanza)
handle = conn.RequestHandles(1, ['bob@foo.com'])[0]
conn.Avatars.RequestAvatars([handle])
iq_event = q.expect('stream-iq', to='bob@foo.com', query_ns='vcard-temp',
query_name='vCard')
iq = iq_event.stanza
vcard = iq_event.query
photo = vcard.addElement('PHOTO')
photo.addElement('TYPE', content='image/png')
photo.addElement('BINVAL', content=base64.b64encode('hello'))
iq['type'] = 'result'
stream.send(iq)
event = q.expect('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
# Request again; this request should be satisfied from the avatar cache.
conn.Avatars.RequestAvatars([handle])
event = q.demand('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
|
Python
| 0.000003
|
@@ -191,16 +191,32 @@
ledge_iq
+, make_result_iq
%0A%0Adef te
@@ -725,16 +725,39 @@
iq =
+ make_result_iq(stream,
iq_even
@@ -764,16 +764,17 @@
t.stanza
+)
%0A vca
@@ -784,20 +784,28 @@
= iq
-_event.query
+.firstChildElement()
%0A
@@ -959,34 +959,8 @@
'))%0A
- iq%5B'type'%5D = 'result'%0A
|
cf4d8318557d971cee1869fe8cbac82cc6316020
|
Change expected exception
|
plotly/tests/test_core/test_file/test_file.py
|
plotly/tests/test_core/test_file/test_file.py
|
"""
test_meta:
==========
A module intended for use with Nose.
"""
import random
import string
import requests
from unittest import TestCase
from nose.plugins.attrib import attr
import plotly.plotly as py
from plotly.exceptions import PlotlyRequestError
@attr('slow')
class FolderAPITestCase(TestCase):
def setUp(self):
py.sign_in('PythonTest', '9v9f20pext')
def _random_filename(self):
choice_chars = string.ascii_letters + string.digits
random_chars = [random.choice(choice_chars) for _ in range(10)]
unique_filename = 'Valid Folder ' + ''.join(random_chars)
return unique_filename
def test_create_folder(self):
try:
py.file_ops.mkdirs(self._random_filename())
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_create_nested_folders(self):
first_folder = self._random_filename()
nested_folder = '{0}/{1}'.format(first_folder, self._random_filename())
try:
py.file_ops.mkdirs(nested_folder)
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_duplicate_folders(self):
first_folder = self._random_filename()
py.file_ops.mkdirs(first_folder)
try:
py.file_ops.mkdirs(first_folder)
except requests.exceptions.RequestException as e:
self.assertTrue(400 <= e.response.status_code < 500)
else:
self.fail('Expected this to fail!')
|
Python
| 0.000002
|
@@ -1470,44 +1470,26 @@
ept
-requests.exceptions.RequestException
+PlotlyRequestError
as
@@ -1532,17 +1532,8 @@
= e.
-response.
stat
|
80c5f67f483ad24308c3a348b1c5a82780459c6b
|
Modify cql_sac_benchmark to use rlds data.
|
tf_agents/benchmark/cql_sac_benchmark.py
|
tf_agents/benchmark/cql_sac_benchmark.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executes CQL-SAC benchmarks.
Benchmarks for CQL Kumar20 are based on https://arxiv.org/abs/2006.04779.
"""
import os
import time
from absl import logging
import gin
import tensorflow as tf
from tf_agents.benchmark import utils
from tf_agents.benchmark.perfzero_benchmark import PerfZeroBenchmark
from tf_agents.examples.cql_sac.kumar20 import cql_sac_train_eval
# TODO(b/205172779): Data needs moved to a team based URL.
# LINT.IfChange
TRANSITIONS_DIR_NAME = 'transitions'
# LINT.ThenChange(tf_agents/copy.bara.sky)
class CqlSacKumar20Return(PerfZeroBenchmark):
"""Benchmark return tests for CQL-SAC Kumar20."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""Benchmarks for CQL-SAC Kumar20.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more named
arguments before updating the constructor.
"""
self.root_data_dir = os.path.join(root_data_dir, TRANSITIONS_DIR_NAME)
super(CqlSacKumar20Return, self).__init__(output_dir=output_dir)
def benchmark_halfcheetah_medium_v0(self):
"""Benchmarks MuJoCo HalfCheetah to 1M steps."""
self.setUp()
output_dir = self._get_test_output_dir('halfcheetah_medium_v0_02_eval')
dataset_path = self.root_data_dir
start_time_sec = time.time()
gin.parse_config_file(
'tf_agents/examples/cql_sac/kumar20/configs/mujoco_medium.gin'
)
cql_sac_train_eval.train_eval(
dataset_path=dataset_path,
root_dir=output_dir,
env_name='halfcheetah-medium-v0',
num_gradient_updates=500000, # Number of iterations.
learner_iterations_per_call=500,
data_shuffle_buffer_size=10000,
data_num_shards=50,
data_parallel_reads=500,
data_prefetch=1000000,
eval_interval=10000)
wall_time_sec = time.time() - start_time_sec
event_file = utils.find_event_log(os.path.join(output_dir, 'eval'))
values, _ = utils.extract_event_log_values(
event_file, 'Metrics/AverageReturn', start_step=10000)
# Min/Max ranges are very large to only hard fail if very broken. The system
# monitoring the results owns looking for anomalies. These numbers are based
# on the results that we were getting in MLCompass as of 04-NOV-2021.
# Results at 500k steps and 1M steps are similar enough to not make it worth
# running 1M.
metric_500k = self.build_metric(
'average_return_at_env_step500000',
values[500000],
min_value=4400,
max_value=5400)
self.report_benchmark(
wall_time=wall_time_sec, metrics=[metric_500k], extras={})
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.test.main()
|
Python
| 0.00147
|
@@ -969,164 +969,8 @@
al%0A%0A
-# TODO(b/205172779): Data needs moved to a team based URL.%0A# LINT.IfChange%0ATRANSITIONS_DIR_NAME = 'transitions'%0A# LINT.ThenChange(tf_agents/copy.bara.sky)%0A%0A
%0Acla
@@ -1515,83 +1515,8 @@
%22%22%22%0A
- self.root_data_dir = os.path.join(root_data_dir, TRANSITIONS_DIR_NAME)%0A
@@ -1776,46 +1776,8 @@
l')%0A
- dataset_path = self.root_data_dir%0A
@@ -1948,43 +1948,8 @@
al(%0A
- dataset_path=dataset_path,%0A
@@ -2011,24 +2011,82 @@
medium-v0',%0A
+ dataset_name='d4rl_mujoco_halfcheetah/v0-medium',%0A
num_
|
8ea43f44d6bca215909f6a3435b89f596442a863
|
add get_permission_object to PollExportView, fixes #3700
|
meinberlin/apps/polls/exports.py
|
meinberlin/apps/polls/exports.py
|
from django.utils.translation import ugettext as _
from rules.contrib.views import PermissionRequiredMixin
from adhocracy4.comments.models import Comment
from adhocracy4.exports import mixins
from adhocracy4.exports import views as export_views
from adhocracy4.polls import models as poll_models
from meinberlin.apps.users.models import User
class PollCommentExportView(
PermissionRequiredMixin,
mixins.ExportModelFieldsMixin,
mixins.UserGeneratedContentExportMixin,
mixins.ItemExportWithLinkMixin,
mixins.CommentExportWithRepliesToMixin,
export_views.BaseItemExportView
):
model = Comment
fields = ['id', 'comment', 'created']
permission_required = 'a4projects.change_project'
def get_permission_object(self):
return self.module.project
def get_queryset(self):
comments = (
Comment.objects.filter(poll__module=self.module) |
Comment.objects.filter(parent_comment__poll__module=self.module)
)
return comments
def get_virtual_fields(self, virtual):
virtual.setdefault('id', _('ID'))
virtual.setdefault('comment', _('Comment'))
virtual.setdefault('created', _('Created'))
return super().get_virtual_fields(virtual)
@property
def raise_exception(self):
return self.request.user.is_authenticated
class PollExportView(
PermissionRequiredMixin,
export_views.BaseItemExportView
):
permission_required = 'a4projects.change_project'
def get_queryset(self):
creators_vote = poll_models.Vote.objects.filter(
choice__question__poll=self.poll).values_list('creator', flat=True)
creators_answer = poll_models.Answer.objects.filter(
question__poll=self.poll).values_list('creator', flat=True)
creator_ids = list(set(creators_vote).union(set(creators_answer)))
return User.objects.filter(pk__in=creator_ids)
@property
def poll(self):
return poll_models.Poll.objects.get(module=self.module)
@property
def questions(self):
return self.poll.questions.all()
def get_virtual_fields(self, virtual):
virtual = super().get_virtual_fields(virtual)
for question in self.questions:
if question.is_open:
virtual = \
self.get_virtual_field_open_question(virtual, question)
else:
virtual = \
self.get_virtual_field_choice_question(virtual, question)
return virtual
def get_virtual_field_choice_question(self, virtual, choice_question):
for choice in choice_question.choices.all():
identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk)
virtual[(choice, False)] = identifier
if choice.is_other_choice:
identifier_answer = identifier + '_text'
virtual[(choice, True)] = identifier_answer
return virtual
def get_virtual_field_open_question(self, virtual, open_question):
identifier = 'Q' + str(open_question.pk)
virtual[(open_question, False)] = identifier
identifier_answer = identifier + '_text'
virtual[(open_question, True)] = identifier_answer
return virtual
def get_field_data(self, user, field):
field_object, is_text_field = field
if type(field_object) == poll_models.Choice:
votes_qs = poll_models.Vote.objects.filter(
choice=field_object,
creator=user)
if not is_text_field:
value = int(votes_qs.exists())
else:
vote = votes_qs.first()
if vote:
value = poll_models.OtherVote.objects.get(vote=vote).answer
else:
value = ''
else: # field_object is question
answers_qs = poll_models.Answer.objects.filter(
question=field_object,
creator=user)
if not is_text_field:
value = int(answers_qs.exists())
else:
answer = answers_qs.first()
if answer:
value = answer.answer
else:
value = ''
return value
|
Python
| 0
|
@@ -1510,32 +1510,105 @@
hange_project'%0A%0A
+ def get_permission_object(self):%0A return self.module.project%0A%0A
def get_quer
|
6cfc94d8a03439c55808090aa5e3a4f35c288887
|
Use assert_allclose so we can see the appveyor failure
|
menpodetect/tests/opencv_test.py
|
menpodetect/tests/opencv_test.py
|
from menpodetect.opencv import (load_opencv_frontal_face_detector,
load_opencv_eye_detector)
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
def test_frontal_face_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
def test_frontal_face_detector_min_neighbors():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy, min_neighbours=100)
assert len(pcs) == 0
assert takeo_copy.n_channels == 3
def test_eye_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_eye_detector()
pcs = opencv_detector(takeo_copy, min_size=(5, 5))
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
|
Python
| 0
|
@@ -1,8 +1,50 @@
+from numpy.testing import assert_allclose%0A
from men
@@ -944,33 +944,42 @@
)%0A assert
-
+_allclose(
len(pcs)
== 1%0A as
@@ -958,37 +958,37 @@
llclose(len(pcs)
+,
-==
1
+)
%0A assert take
|
b8d0a7cbac6ab2415a1d059a1f68428e9312f3cb
|
Make our error page handlers work on Django 2.0 (#969)
|
judge/views/error.py
|
judge/views/error.py
|
import traceback
from django.shortcuts import render
from django.utils.translation import gettext as _
def error(request, context, status):
return render(request, 'error.html', context=context, status=status)
def error404(request):
# TODO: "panic: go back"
return render(request, 'generic-message.html', {
'title': _('404 error'),
'message': _('Could not find page "%s"') % request.path
}, status=404)
def error403(request):
return error(request, {'id': 'unauthorized_access',
'description': _('no permission for %s') % request.path,
'code': 403}, 403)
def error500(request):
return error(request, {'id': 'invalid_state',
'description': _('corrupt page %s') % request.path,
'traceback': traceback.format_exc(),
'code': 500}, 500)
|
Python
| 0
|
@@ -227,24 +227,40 @@
r404(request
+, exception=None
):%0A # TOD
@@ -462,32 +462,48 @@
error403(request
+, exception=None
):%0A return er
|
4e92dabe65416a3a751a0b38e75512b6daa1ba38
|
Remove useless imports
|
ticketshop/ticketapp/tests/test_views.py
|
ticketshop/ticketapp/tests/test_views.py
|
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.messages.storage.base import Message
from django.contrib.messages.constants import ERROR
from django.test import TestCase
from ..models import TicketType, Ticket, TicketPurchase, Coupon
class TicketPurchaseViewTest(TestCase):
def test_getForm(self):
"""
Test that we can get the purchase form
"""
self.assertContains(self.client.get("/"), "name")
class TestConfirmationView(TestCase):
def setUp(self):
# It appears that client.session only work
# for non annonymous users: setup Test User
User.objects.create_user('user', 'user@site.com', 'password')
# Login
self.client.login(username='user', password='password')
# Create data
tt = TicketType.objects.create( name = "Standard ticket", price = 100 )
self.purchase = TicketPurchase.objects.create(
name = "Bruce Wayne",
email = "bruce@wayneenterprise.com" )
self.purchase.ticket_set.create( name = "Batman", ticket_type = tt )
self.purchase.ticket_set.create( name = "Catwoman", ticket_type = tt )
self.invoice_id = self.purchase.invoice_id
def test_itRedirectToTheHomePageWhenThereIsNoSessionData(self):
"""
Test that /confirm/ redirect to / when the session doesn,t
contain any purchase data
"""
self.assertRedirects(self.client.get('/confirm/'), '/')
def test_itDisplaysTheContactName(self):
"""
Test that the view displays the contact name
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "Bruce Wayne" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
def test_itDisplaysTheTotal(self):
"""
Test that the view displays the total amount
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "<b>Total:</b> 200 SEK" )
class TestPaypalView(TestCase):
def test_2(self):
self.client.get("/paypal/")
|
Python
| 0.000007
|
@@ -71,117 +71,8 @@
User
-%0Afrom django.contrib.messages.storage.base import Message%0Afrom django.contrib.messages.constants import ERROR
%0A%0Afr
@@ -145,32 +145,16 @@
cket
-, TicketPurchase, Coupon
+Purchase
%0A%0Acl
|
bec1d224771daefd9ce18c81b14f550e59b1577a
|
DidelEntity.__getattr__ raises the correct exception
|
didel/base.py
|
didel/base.py
|
# -*- coding: UTF-8 -*-
try:
from urlparse import urljoin
except ImportError: # Python 3
from urllib.parse import urljoin
from bs4 import BeautifulSoup
ROOT_URL = 'http://didel.script.univ-paris-diderot.fr'
class DidelError(Exception):
"""
Base exception for Didel errors
"""
pass
class DidelEntity(object):
"""
Common base for all fetchable entities. It provides a convenient way to
fetch a page describing an entity and populate the object with it.
Usage: ::
class MyEntity(DidelEntity):
def __init__(self, someArg):
self.path = '/foo/bar/qux/%s.html' % someArg
super(MyEntity, self).__init__()
def populate(self, soup, session, **kw):
# populate the object with ``soup``
self.title = soup.select('h1')[0].get_text()
The entity can then be populated: ::
s = Session()
m = MyEntity("foo")
m.fetch(s)
print m.title
"""
def __init__(self, *args, **kwargs):
self._resources = {}
def fetch(self, session):
"""
Fetch ``self.path`` using the given session and call ``self.populate``
on the returned text.
It sets ``self.session`` to the given session and ``self._populated``
to ``True``.
"""
if not hasattr(self, 'populate') or self.is_populated():
return False
if not hasattr(self, 'path'):
return False
url = urljoin(ROOT_URL, self.path)
resp = session.get(url)
if not resp.ok:
return False
soup = BeautifulSoup(resp.text, 'lxml')
setattr(self, 'session', session)
self.populate(soup, session)
setattr(self, '_populated', True)
return True
def populate(self, soup, session, **kwargs):
"""
This should be implemented by subclasses
"""
raise NotImplementedError
def is_populated(self):
"""
Test if the element has been populated
"""
return hasattr(self, '_populated')
def add_resource(self, name, value):
"""
Add a subresource to this element. It should be a ``DidelEntity``.
``name`` will be used as an attribute name which will, when first
acceded, populate the subresource and cache it.
"""
self._resources[name] = value
def __getattr__(self, name):
"""
Lazily populate subresources when they're acceded
"""
if name not in self._resources:
raise TypeError("'%s' has no attribute '%s'" % (self, name))
if not self.is_populated():
raise DidelError('%s is not populated' % repr(self))
res = self._resources[name]
res.fetch(self.session)
setattr(self, name, res)
return res
def __getitem__(self, idx):
"""
Lazily populate subresources when they're acceded
"""
el = super(DidelEntity, self).__getitem__(idx)
el.fetch(self.session)
return el
|
Python
| 0.195904
|
@@ -2597,11 +2597,16 @@
ise
-Typ
+Attribut
eErr
|
d65bc5b70dfa381f650dc4c1e136680b8f6c9649
|
Improve documentation in example
|
kafka/transaction.py
|
kafka/transaction.py
|
"""
Transactional commit and rollback semantics for consumer.
"""
from logging import getLogger
from kafka.common import check_error, OffsetCommitRequest, OffsetOutOfRangeError
class KafkaTransaction(object):
"""
Provides transactional commit/rollback semantics around a `SimpleConsumer`.
Usage assumes that `auto_commit` is disabled, that messages are consumed in
batches, and that the consuming process will record its own successful
processing of each message. Both the commit and rollback operations respect
a "high-water mark" to ensure that last unsuccessfully processed message
will be retried.
Example:
consumer = SimpleConsumer(client, group, topic, auto_commit=False)
consumer.provide_partition_info()
while some_condition:
with KafkaTransaction(consumer) as transaction:
messages = consumer.get_messages(count, block=False)
for partition, message in messages:
if can_process(message.value):
transaction.mark(partition, message.offset)
else:
break
if not transaction:
sleep(delay)
These semantics allow for deferred message processing (e.g. if `can_process`
compares message time to clock time) and for repeated processing of the last
unsuccessful message (until some external error is resolved).
"""
def __init__(self, consumer):
"""
:param consumer: an instance of `SimpleConsumer`
"""
self.consumer = consumer
self.initial_offsets = None
self.high_water_mark = None
self.logger = getLogger("kafka.transaction")
def mark(self, partition, offset):
"""
Set the high-water mark in the current transaction.
In order to know the current partition, it is helpful to initialize
the consumer to provide partition info via:
consumer.provide_partition_info()
"""
max_offset = max(offset + 1, self.high_water_mark.get(partition, 0))
self.logger.debug("Setting high-water mark to: %s",
{partition: max_offset})
self.high_water_mark[partition] = max_offset
def __nonzero__(self):
"""
Return whether any operations were marked in the transaction.
"""
return bool(self.high_water_mark)
def __enter__(self):
"""
Start a new transaction:
- Record the initial offsets for rollback
- Reset the high-water mark
"""
self.initial_offsets = dict(self.consumer.offsets)
self.high_water_mark = dict()
self.logger.debug("Starting transaction at: %s", self.initial_offsets)
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
End a transaction.
- If there was no exception, commit up to the current high-water mark.
- If there was an offset of range error, attempt to find the correct
initial offset.
- If there was any other error, roll back to the initial offsets.
"""
if exc_type is None:
self.commit()
elif isinstance(exc_value, OffsetOutOfRangeError):
self.handle_out_of_range()
return True
else:
self.rollback()
def commit(self):
"""
Commit this transaction:
- If the high-water mark has moved, commit up to and position the
consumer at the high-water mark.
- Otherwise, reset to the consumer to the initial offsets.
"""
if self.high_water_mark:
self.logger.info("Committing transaction: %s", self.high_water_mark)
self.commit_partition_offsets(self.high_water_mark)
self.update_consumer_offsets(self.high_water_mark)
else:
self.update_consumer_offsets(self.initial_offsets)
def rollback(self):
"""
Rollback this transaction:
- Position the consumer at the initial offsets.
"""
self.logger.info("Rolling back transaction: %s", self.initial_offsets)
self.update_consumer_offsets(self.initial_offsets)
def commit_partition_offsets(self, partition_offsets):
"""
Commit explicit partition/offset pairs.
"""
self.logger.debug("Committing partition offsets: %s", partition_offsets)
commit_requests = [
OffsetCommitRequest(self.consumer.topic, partition, offset, None)
for partition, offset in partition_offsets.items()
]
commit_responses = self.consumer.client.send_offset_commit_request(
self.consumer.group,
commit_requests,
)
for commit_response in commit_responses:
check_error(commit_response)
def update_consumer_offsets(self, partition_offsets):
"""
Update consumer offsets to explicit positions.
"""
self.logger.debug("Updating consumer offsets to: %s", partition_offsets)
for partition, offset in partition_offsets.items():
self.consumer.offsets[partition] = offset
# consumer keeps other offset states beyond its `offsets` dictionary,
# a relative seek with zero delta forces the consumer to reset to the
# current value of the `offsets` dictionary
self.consumer.seek(0, 1)
def handle_out_of_range(self):
"""
Handle out of range condition by seeking to the beginning of valid
ranges.
This assumes that an out of range doesn't happen by seeking past the end
of valid ranges -- which is far less likely.
"""
self.logger.info("Seeking beginning of partition on out of range error")
self.consumer.seek(0, 0)
|
Python
| 0.000002
|
@@ -758,16 +758,60 @@
n_info()
+%0A consumer.fetch_last_known_offsets()
%0A%0A
@@ -1062,14 +1062,8 @@
sage
-.value
):%0A
|
2b383dd8d4a00b5f355d83cd0cb2e142e312d754
|
fix drop database function
|
dbaas/drivers/mysqldb.py
|
dbaas/drivers/mysqldb.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import _mysql as mysqldb
import _mysql_exceptions
from contextlib import contextmanager
from . import BaseDriver, DatabaseInfraStatus, AuthenticationError, ConnectionError, GenericDriverError, \
DatabaseAlreadyExists, CredentialAlreadyExists, InvalidCredential, DatabaseStatus
from util import make_db_random_password
LOG = logging.getLogger(__name__)
MYSQL_TIMEOUT = 5
ER_DB_CREATE_EXISTS = 1007
ER_ACCESS_DENIED_ERROR = 1045
ER_CAN_NOT_CONNECT = 2003
ER_CANNOT_USER = 1396
class MySQL(BaseDriver):
default_port = 3306
def get_connection(self):
my_instance = self.databaseinfra.instances.get(databaseinfra__name=self.databaseinfra.name)
return "mysql://<user>:<password>@%s" % (my_instance.address)
def __get_admin_connection(self, instance=None):
if instance:
return instance.address, instance.port
my_instance = self.databaseinfra.instances.get(databaseinfra__name=self.databaseinfra.name)
return my_instance.address, my_instance.port
def __mysql_client__(self, instance, database='mysql'):
connection_address, connection_port = self.__get_admin_connection(instance)
try:
LOG.debug('Connecting to mysql databaseinfra %s', self.databaseinfra)
client = mysqldb.connect(host=connection_address, port=int(connection_port),
user=self.databaseinfra.user, passwd=self.databaseinfra.password,
db=database, connect_timeout=MYSQL_TIMEOUT)
LOG.debug('Successfully connected to mysql databaseinfra %s', self.databaseinfra)
return client
except Exception, e:
raise e
@contextmanager
def mysqldb(self, instance=None, database=None):
client = None
try:
yield self.__mysql_client__(instance)
except _mysql_exceptions.OperationalError, e:
if e.args[0] == ER_ACCESS_DENIED_ERROR:
raise AuthenticationError(e.args[1])
elif e.args[0] == ER_CAN_NOT_CONNECT:
raise ConnectionError(e.args[1])
else:
raise GenericDriverError(e.args)
finally:
try:
if client:
LOG.debug('Disconnecting mysql databaseinfra %s', self.databaseinfra)
client.close()
except:
LOG.warn('Error disconnecting from databaseinfra %s. Ignoring...', self.databaseinfra, exc_info=True)
def check_status(self, instance=None):
with self.mysqldb(instance=instance) as client:
try:
client.query("""SELECT 1""")
except _mysql_exceptions.OperationalError, e:
raise ConnectionError(e.args[1])
def info(self):
databaseinfra_status = DatabaseInfraStatus(databaseinfra_model=self.databaseinfra)
with self.mysqldb() as client:
client.query("SELECT VERSION()")
r = client.store_result()
databaseinfra_status.version = r.fetch_row()[0][0]
client.query("SHOW DATABASES")
r = client.store_result()
my_all_dbs = r.fetch_row(maxrows=0, how=1)
client.query("SELECT table_schema 'Database', SUM( data_length + index_length) 'Size' \
FROM information_schema.TABLES GROUP BY table_schema")
r = client.store_result()
db_sizes = r.fetch_row(maxrows=0, how=1)
all_dbs = {}
for database in db_sizes:
all_dbs[database['Database']] = int(database['Size'])
for database in my_all_dbs:
db_status = DatabaseStatus(database)
db_status.total_size_in_bytes = 0
if database['Database'] in all_dbs:
db_status.used_size_in_bytes = all_dbs[database['Database']]
else:
db_status.used_size_in_bytes = 0
databaseinfra_status.databases_status[database['Database']] = db_status
databaseinfra_status.used_size_in_bytes = sum(all_dbs.values())
return databaseinfra_status
def create_database(self, database):
LOG.info("creating database %s" % database.name)
with self.mysqldb(database=database) as mysql_database:
try:
mysql_database.query("CREATE DATABASE %s" % database.name)
except _mysql_exceptions.ProgrammingError, e:
if e.args[0] == ER_DB_CREATE_EXISTS:
raise DatabaseAlreadyExists(e.args[1])
else:
raise GenericDriverError(e.args)
def create_user(self, credential, roles=["ALL PRIVILEGES"]):
LOG.info("creating user %s to %s" % (credential.user, credential.database))
with self.mysqldb(database=credential.database) as mysql_database:
try:
# the first release allow every host to connect to the database
mysql_database.query("GRANT %s ON %s.* TO '%s'@'%%' IDENTIFIED BY '%s'" %
(','.join(roles), credential.database, credential.user, credential.password, ))
except:
raise CredentialAlreadyExists()
def remove_database(self, database):
LOG.info("removing database %s" % database.name)
with self.mysqldb() as mysql_database:
try:
mysql_database.query("DROP DATABASE %s" % database.name)
except _mysql_exceptions.OperationalError, e:
print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@ tratado"
print dir(e)
print type(e)
print "args"
print e.args
print "message"
print e.message
except Exception, e:
print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@ nao tratado"
print dir(e)
print type(e)
def update_user(self, credential):
self.create_user(credential)
def remove_user(self, credential):
LOG.info("removing user %s from %s" % (credential.user, credential.database))
with self.mysqldb(database=credential.database) as mysql_database:
try:
mysql_database.query("DROP USER '%s'@'%%'" % credential.user)
except _mysql_exceptions.OperationalError, e:
if e.args[0] == ER_CANNOT_USER:
raise InvalidCredential(e.args[1])
else:
raise GenericDriverError(e.args)
def change_default_pwd(self, instance):
with self.mysqldb(instance=instance) as client:
new_password = make_db_random_password()
client.query("SET PASSWORD FOR '%s'@'%%' = PASSWORD('%s')" %
(instance.databaseinfra.user, new_password))
return new_password
|
Python
| 0
|
@@ -369,16 +369,38 @@
seStatus
+, DatabaseDoesNotExist
%0Afrom ut
@@ -515,16 +515,41 @@
= 1007%0A
+ER_DB_DROP_EXISTS = 1008%0A
ER_ACCES
@@ -580,49 +580,49 @@
_CAN
-_
NOT_
-CONNECT = 2003
+USER = 1396
%0AER_CAN
+_
NOT_
-USER = 1396
+CONNECT = 2003
%0A%0A%0Ac
@@ -5721,170 +5721,100 @@
-print %22@@@@@@@@@@@@@@@@@@@@@@@@@@@@ tratado%22%0A print dir(e)%0A%0A print type(e)%0A print %22args%22%0A print
+if e.args%5B0%5D == ER_DB_DROP_EXISTS:%0A raise DatabaseDoesNotExist(
e.args
+%5B1%5D)
%0A
@@ -5830,23 +5830,13 @@
-print %22message%22
+else:
%0A
@@ -5852,179 +5852,43 @@
-print e.message%0A except Exception, e:%0A print %22@@@@@@@@@@@@@@@@@@@@@@@@@@@@ nao tratado%22%0A print dir(e)%0A print type(e
+ raise GenericDriverError(e.args
)%0A%0A
|
ac0a9cec36925f630735f5109e5507923ddd0067
|
Remove stray debug print
|
openprescribing/dmd/management/commands/import_ncso_concessions.py
|
openprescribing/dmd/management/commands/import_ncso_concessions.py
|
# coding=utf8
import io
import re
from backports import csv
from django.core.management import BaseCommand
from dmd.models import NCSOConcession, DMDVmpp
def convert_ncso_name(name):
# Some NCSO records have non-breaking spaces
name = name.replace(u'\xa0', '')
# Some NCSO records have multiple spaces
name = re.sub(' +', ' ', name)
# Some NCSO records are "(new)"
name = name.replace(' (new)', '')
# dm+d uses "microgram" or "micrograms", usually with these rules
name = name.replace('mcg ', 'microgram ')
name = name.replace('mcg/', 'micrograms/')
# dm+d uses "microgram" rather than "0.X.mg"
name = name.replace('0.5mg', '500microgram')
name = name.replace('0.25mg', '250microgram')
# dm+d uses "square cm"
name = name.replace('sq cm', 'square cm')
# dm+d records measured in mg/ml have a space before the final "ml"
# eg: Abacavir 20mg/ml oral solution sugar free 240 ml
name = re.sub(r'(\d)ml$', r'\1 ml', name)
# dm+d records have "gram$" not "g$"
# eg: Estriol 0.01% cream 80 gram
name = re.sub(r'(\d)g$', r'\1 gram', name)
# Misc.
name = name.replace('Oral Susp SF', 'oral suspension sugar free')
name = name.replace('gastro- resistant', 'gastro-resistant')
name = name.replace('/ml', '/1ml')
return name
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--filename', required=True)
def handle(self, *args, **kwargs):
filename = kwargs['filename']
match = re.search('ncso_concessions_(\d{4}_\d{2}).csv', filename)
year_and_month = match.groups()[0]
vmpps = DMDVmpp.objects.values('nm', 'vppid')
with io.open(filename, encoding='utf8') as f:
fieldnames = ['drug', 'pack_size', 'price_concession']
reader = csv.DictReader(f, fieldnames=fieldnames)
for record in reader:
print(record)
match = re.match(u'Β£(\d+)\.(\d\d)', record['price_concession'])
price_concession_pence = 100 * int(match.groups()[0]) \
+ int(match.groups()[1])
if NCSOConcession.objects.filter(
year_and_month=year_and_month,
drug=record['drug'],
pack_size=record['pack_size'],
).exists():
continue
concession = NCSOConcession(
year_and_month=year_and_month,
drug=record['drug'],
pack_size=record['pack_size'],
price_concession_pence=price_concession_pence
)
ncso_name_raw = u'{} {}'.format(
record['drug'],
record['pack_size']
)
ncso_name = convert_ncso_name(ncso_name_raw)
for vmpp in vmpps:
# NCSO records are inconsistent with slashes
vpmm_name_reg = vmpp['nm'].lower()
vpmm_name_reg = re.sub(' */ *', '/', vpmm_name_reg)
ncso_name_reg = ncso_name.lower()
ncso_name_reg = re.sub(' */ *', '/', ncso_name_reg)
if (vpmm_name_reg == ncso_name_reg or
vpmm_name_reg.startswith(ncso_name_reg + ' ')):
concession.vmpp_id = vmpp['vppid']
break
else:
previous_concession = NCSOConcession.objects.filter(
drug=concession.drug,
pack_size=concession.pack_size,
).first()
if previous_concession is not None:
concession.vmpp_id = previous_concession.vmpp_id
else:
print(u'No match found for {}'.format(ncso_name_raw))
concession.save()
|
Python
| 0.000003
|
@@ -1919,38 +1919,8 @@
er:%0A
- print(record)%0A
|
f7e93c3d4f9699d8d6b1ea3d0c9587b59e9c6552
|
disable run once variable
|
src/netius/servers/wsgi.py
|
src/netius/servers/wsgi.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Netius System. If not, see <http://www.gnu.org/licenses/>.
__author__ = "JoΓ£o MagalhΓ£es joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import sys
import netius
import http
class WSGIServer(http.HTTPServer):
"""
Base class for the creation of a wsgi compliant server
the server should be initialized with the "target" app
object as reference and a mount point.
"""
def __init__(self, app, mount = "", *args, **kwargs):
http.HTTPServer.__init__(self, *args, **kwargs)
self.app = app
self.mount = mount
self.mount_l = len(mount)
def on_data_http(self, connection, parser):
http.HTTPServer.on_data_http(self, connection, parser)
# clojure method to be used to close the current connection in
# case that's required by the current connection headers
def close(connection):
self.delay(connection.close)
# method created as a clojure that handles the starting of
# response as defined in the wsgi standards
def start_response(status, headers):
return self._start_response(connection, status, headers)
# retrieves the path for the current request and then retrieves
# the query string part for it also, after that computes the
# path info value as the substring of the path without the mount
path = parser.get_path()
query = parser.get_query()
path_info = path[self.mount_l:]
# initializes the environment map (structure) with all the cgi based
# variables that should enable the application to handle the request
# and respond to it in accordance
environ = dict(
REQUEST_METHOD = parser.method.upper(),
SCRIPT_NAME = self.mount,
PATH_INFO = path_info,
QUERY_STRING = query,
CONTENT_TYPE = parser.headers.get("content-type", ""),
CONTENT_LENGTH = "" if parser.content_l == -1 else parser.content_l,
SERVER_NAME = netius.NAME,
SERVER_PORT = self.port,
SERVER_PROTOCOL = parser.version_s
)
# updates the environment map with all the structures referring
# to the wsgi specifications note that the message is retrieved
# as a buffer to be able to handle the file specific operations
environ["wsgi.version"] = (1, 0)
environ["wsgi.url_scheme"] = "https" if connection.ssl else "http"
environ["wsgi.input"] = parser.get_message_b()
environ["wsgi.errors"] = sys.stderr
environ["wsgi.multithread"] = True
environ["wsgi.multiprocess"] = True
environ["wsgi.run_once"] = True
# iterates over all the header values that have been received
# to set them in the environment map to be used by the wsgi
# infra-structure, not that their name is capitalized as defined
# in the standard specification
for key, value in parser.headers.iteritems():
key = "HTTP_" + key.replace("-", "_").upper()
environ[key] = value
# runs the app logic with the provided environment map and start
# response clojure and then iterates over the complete set of values
# in the returned iterator to send the messages to the other end
sequence = self.app(environ, start_response)
for value in sequence: connection.send(value)
# in case the connection is not meant to be kept alive must
# must set the callback of the flush operation to the close
# function so that the connection is closed
if parser.keep_alive: callback = None
else: callback = close
# runs the flush operation in the connection setting the proper
# callback method for it so that the connection state is defined
# in the proper way (closed or kept untouched)
connection.flush(callback = callback)
def _start_response(self, connection, status, headers):
# retrieves the parser object from the connection and uses
# it to retrieve the string version of the http version
parser = connection.parser
version_s = parser.version_s
# verifies if the current connection is using a chunked based
# stream as this will affect some of the decisions
is_chunked = connection.is_chunked()
# converts the provided list of header tuples into a key
# values based map so that it may be used more easily
headers = dict(headers)
# checks if the provided headers map contains the definition
# of the content length in case it does not unsets the keep
# alive setting in the parser because the keep alive setting
# requires the content length to be defined or the target
# encoding type to be chunked
has_length = "Content-Length" in headers
if not has_length: parser.keep_alive = is_chunked
# applies the base (static) headers to the headers map and then
# applies the parser based values to the headers map, these
# values should be dynamic and based in the current state
# finally applies the connection related headers to the current
# map of headers so that the proper values are filtered and added
self._apply_base(headers)
self._apply_parser(parser, headers)
self._apply_connection(connection, headers)
# creates the list that will hold the various header string and
# that is going to be used as buffer and then generates the various
# lines for the headers and sets them in the buffer list
buffer = []
buffer.append("%s %s\r\n" % (version_s, status))
for key, value in headers.iteritems():
buffer.append("%s: %s\r\n" % (key, value))
buffer.append("\r\n")
# joins the header strings list as the data string that contains
# the headers and then sends the value through the connection
data = "".join(buffer)
connection.send_plain(data)
if __name__ == "__main__":
import logging
def app(environ, start_response):
status = "200 OK"
contents = "Hello World"
content_l = len(contents)
headers = (
("Content-Length", content_l),
("Content-type", "text/plain"),
("Connection", "keep-alive")
)
start_response(status, headers)
return (contents,)
server = WSGIServer(app = app, level = logging.INFO)
server.serve(env = True)
|
Python
| 0.000001
|
@@ -3940,19 +3940,20 @@
nce%22%5D =
-Tru
+Fals
e%0D%0A%0D%0A
|
31e3f4486eba2d933582a00a643700ac2f51ab56
|
add blank string for null colmun
|
optional/_data_generation/create_SNPChrPosOnRef_bcp_with_allele.py
|
optional/_data_generation/create_SNPChrPosOnRef_bcp_with_allele.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import gzip
from pyfasta import Fasta
path_to_fasta = sys.argv[1]
path_to_bcp = sys.argv[2]
# GRCh37.p13
# $ wget -r ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/vertebrates_mammals/Homo_sapiens/GRCh37.p13/Primary_Assembly/assembled_chromosomes/FASTA/
# $ for x in {1..22} X Y; do gzip -dc chr${x}.fa.gz >> GRCh37.p13.fa; done
# path_to_fasta = 'path_to_/GRCh37.p13.fa'
r = re.compile('Homo sapiens chromosome ([0-9XY]+),')
fasta = Fasta(path_to_fasta, key_fn=lambda key: r.search(key).group(1))
def get_allele(chrom, pos):
return fasta.sequence({'chr': str(chrom), 'start': int(pos), 'stop': int(pos)}, one_based=True)
if __name__ == '__main__':
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef_GRCh37p13.bcp.gz' # GRCh37.p13
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef.bcp.gz' # GRCh38
with gzip.open(path_to_bcp) as fin:
for line in fin:
record = line.split('\t')
# No chrom & pos
if record[1] in ('NotOn', 'Multi', 'Un'):
print '\t'.join([record[0], record[1]])
# chrom == Pseudoautosomal Region (PAR)
elif record[1] == 'PAR':
allele = get_allele('Y', int(record[2])+1) # chrom = Y (PAR) # TODO: or skip?
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# chrom == MT # TODO: add chrMT.fa
elif record[1] == 'MT':
allele = ''
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# No pos
elif record[2] == '':
print '\t'.join([record[0], record[1]])
else:
allele = get_allele(str(record[1]), int(record[2])+1) # 0-based to 1-based
print '\t'.join([record[0], record[1], record[2], record[3], allele]) # snp_id, chr, pos(0-based), orien, allele
|
Python
| 0.999683
|
@@ -1110,32 +1110,44 @@
rd%5B0%5D, record%5B1%5D
+, '', '', ''
%5D)%0A%0A
@@ -1740,16 +1740,28 @@
ecord%5B1%5D
+, '', '', ''
%5D)%0A%0A
|
6572d939bf841da1a63791f8cf21050bdbd71601
|
Convert for+if to list comprehension
|
plugins/VersionUpgrade/VersionUpgrade21to22/Profile.py
|
plugins/VersionUpgrade/VersionUpgrade21to22/Profile.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
import UM.Settings.SettingsError #To indicate that a file is of incorrect format.
import configparser #To read config files.
import io #To write config files to strings as if they were files.
## Creates a new profile instance by parsing a serialised profile in version 1
# of the file format.
#
# \param serialised The serialised form of a profile in version 1.
# \return A profile instance, or None if the file format is incorrect.
def importFrom(serialised):
try:
return Profile(serialised)
except (configparser.Error, SettingsError.InvalidFormatError, SettingsError.InvalidVersionError):
return None
## A representation of a profile used as intermediary form for conversion from
# one format to the other.
class Profile:
## Reads version 1 of the file format, storing it in memory.
#
# \param serialised A string with the contents of a machine instance file.
def __init__(self, serialised):
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(serialised)
# Check correctness.
if not parser.has_section("general"):
raise SettingsError.InvalidFormatError("general")
if not parser.has_option("general", "version") or int(parser.get("general", "version")) != 1: # Hard-coded profile version here. If this number changes the entire function needs to change.
raise SettingsError.InvalidVersionError("Version upgrade intermediary version 1")
# Parse the general section.
self._name = parser.get("general", "name")
self._type = parser.get("general", "type", fallback = None)
if "weight" in parser["general"]:
self._weight = int(parser.get("general", "weight"))
else:
self._weight = None
self._machine_type_id = parser.get("general", "machine_type", fallback = None)
self._machine_variant_name = parser.get("general", "machine_variant", fallback = None)
self._machine_instance_name = parser.get("general", "machine_instance", fallback = None)
if "material" in parser["general"]:
self._material_name = parser.get("general", "material")
elif self._type == "material":
self._material_name = parser.get("general", "name", fallback = None)
else:
self._material_name = None
# Parse the settings.
self._settings = {}
if parser.has_section("settings"):
for key, value in parser["settings"].items():
self._settings[key] = value
# Parse the defaults and the disabled defaults.
self._changed_settings_defaults = {}
if parser.has_section("defaults"):
for key, value in parser["defaults"].items():
self._changed_settings_defaults[key] = value
self._disabled_settings_defaults = []
if parser.has_section("disabled_defaults"):
disabled_defaults_string = parser.get("disabled_defaults", "values")
for item in disabled_defaults_string.split(","):
if item != "":
self._disabled_settings_defaults.append(item)
## Serialises this profile as file format version 2.
#
# \return A serialised form of this profile, serialised in version 2 of
# the file format.
def export(self):
import VersionUpgrade21to22 # Import here to prevent circular dependencies.
config = configparser.ConfigParser(interpolation = None)
config.add_section("general")
config.set("general", "version", "2") # Hard-coded profile version 2
config.set("general", "name", self._name)
if self._type:
config.set("general", "type", self._type)
if self._weight:
config.set("general", "weight", self._weight)
if self._machine_type_id:
config.set("general", "machine_type", self._machine_type_id)
if self._machine_variant_name:
config.set("general", "machine_variant", self._machine_variant_name)
if self._machine_instance_name:
config.set("general", "machine_instance", self._machine_instance_name)
if self._material_name and self._type != "material":
config.set("general", "material", self._material_name)
if self._settings:
VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._settings)
config.add_section("settings")
for key, value in self._settings.items():
config.set("settings", key, str(value))
if self._changed_settings_defaults:
VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._changed_settings_defaults)
config.add_section("defaults")
for key, value in self._changed_settings_defaults.items():
config.set("defaults", key, str(value))
if self._disabled_settings_defaults:
VersionUpgrade21to22.VersionUpgrade21to22.translateSettingNames(self._disabled_settings_defaults)
config.add_section("disabled_defaults")
disabled_defaults_string = str(self._disabled_settings_defaults[0]) # Must be at least 1 item, otherwise we wouldn't enter this if statement.
for item in self._disabled_settings_defaults[1:]:
disabled_defaults_string += "," + str(item)
output = io.StringIO()
config.write(output)
return output.getvalue()
|
Python
| 0
|
@@ -3097,32 +3097,73 @@
s%22)%0A
+self._disabled_settings_defaults = %5Bitem
for item in disa
@@ -3197,106 +3197,41 @@
%22,%22)
-:%0A if item != %22%22:%0A self._disabled_settings_defaults.append(item)
+ if item != %22%22%5D # Split by comma.
%0A%0A
|
364a3ac475eac1895fa58b48003ee8b786d012cc
|
Version bump
|
oauth_api/__init__.py
|
oauth_api/__init__.py
|
__version__ = '0.5.2'
|
Python
| 0.000001
|
@@ -16,7 +16,7 @@
0.5.
-2
+3
'%0A
|
1f98e497136ce3d9da7e63a6dc7c3f67fedf50b5
|
Save the observation if the form was valid.
|
observations/views.py
|
observations/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from braces.views import LoginRequiredMixin
from .forms import ObservationForm, BatchUploadForm
class AddObservationView(FormView):
"""
Add a single observation.
"""
form_class = ObservationForm
template_name = "observations/add_observation.html"
success_url = reverse_lazy('observations:add_observation')
class UploadObservationsView(LoginRequiredMixin, FormView):
"""
Upload a file of observations.
"""
form_class = BatchUploadForm
template_name = "observations/upload_observations.html"
success_url = reverse_lazy('observations:upload_observations')
def form_valid(self, form):
form.process_file()
messages.success(self.request, _("File uploaded successfully!"))
return super(UploadObservationsView, self).form_valid(form)
|
Python
| 0.000001
|
@@ -582,16 +582,239 @@
tion')%0A%0A
+ def form_valid(self, form):%0A observation = form.save(commit=False)%0A observation.observer = self.request.observer%0A observation.save()%0A return super(AddObservationView, self).form_valid(form)%0A%0A
%0Aclass U
|
6353dd8caa3656b8c37280bcccd56cfaa78ff67a
|
Add API for making authenticated API requests
|
valohai_cli/api.py
|
valohai_cli/api.py
|
import platform
from urllib.parse import urljoin, urlparse
import requests
from requests.auth import AuthBase
from valohai_cli import __version__ as VERSION
from valohai_cli.exceptions import APIError, ConfigurationError
from valohai_cli.settings import settings
class TokenAuth(AuthBase):
def __init__(self, netloc, token):
super(TokenAuth, self).__init__()
self.netloc = netloc
self.token = token
def __call__(self, request):
if not request.headers.get('Authorization') and urlparse(request.url).netloc == self.netloc:
if self.token:
request.headers['Authorization'] = 'Token %s' % self.token
return request
class APISession(requests.Session):
def __init__(self, base_url, token=None):
super(APISession, self).__init__()
self.base_url = base_url
self.base_netloc = urlparse(self.base_url).netloc
self.auth = TokenAuth(self.base_netloc, token)
self.headers['Accept'] = 'application/json'
self.headers['User-Agent'] = 'valohai-cli/%s (%s)' % (
VERSION,
';'.join(platform.uname()),
)
def prepare_request(self, request):
url_netloc = urlparse(request.url).netloc
if not url_netloc:
request.url = urljoin(self.base_url, request.url)
return super(APISession, self).prepare_request(request)
def request(self, method, url, **kwargs):
handle_errors = bool(kwargs.pop('handle_errors', True))
resp = super(APISession, self).request(method, url, **kwargs)
if handle_errors and resp.status_code >= 400:
raise APIError(resp)
return resp
@classmethod
def from_settings(cls):
host = settings.get('host')
token = settings.get('token')
if not (host and token):
raise ConfigurationError('You\'re not logged in; try `vh login` first.')
return APISession(host, token)
|
Python
| 0.000001
|
@@ -69,16 +69,62 @@
equests%0A
+from click.globals import get_current_context%0A
from req
@@ -304,16 +304,57 @@
ettings%0A
+from valohai_cli.utils import force_text%0A
%0A%0Aclass
@@ -1767,57 +1767,197 @@
sp%0A%0A
- @classmethod%0A def from_settings(cls):
+%0Adef _get_current_api_session():%0A %22%22%22%0A Get an API session, either from the Click context cache, or a new one from the config.%0A%0A :return: API session%0A :rtype: APISession
%0A
+%22%22%22%0A
@@ -1988,20 +1988,16 @@
t')%0A
-
-
token =
@@ -2018,28 +2018,24 @@
token')%0A
-
-
if not (host
@@ -2043,28 +2043,24 @@
and token):%0A
-
rais
@@ -2136,39 +2136,710 @@
- return APISession(host, token
+ctx = get_current_context(silent=True) or object()%0A cache_key = force_text('_api_session_%25s_%25s' %25 (host, token))%0A session = getattr(ctx, cache_key, None)%0A if not session:%0A session = APISession(host, token)%0A setattr(ctx, cache_key, session)%0A return session%0A%0A%0Adef request(method, url, **kwargs):%0A %22%22%22%0A Make an authenticated API request.%0A%0A See the documentation for %60requests.Session.request()%60.%0A%0A :param method: HTTP Method%0A :param url: URL%0A :param kwargs: Other kwargs, see %60requests.Session.request()%60%0A :return: requests.Response%0A :rtype: requests.Response%0A %22%22%22%0A session = _get_current_api_session()%0A return session.request(method, url, **kwargs
)%0A
|
513b2ca1d3499e3786f1769ce67c41ba16b70419
|
switch the default prompt to "" from None
|
virtualenv/core.py
|
virtualenv/core.py
|
import sys
import click
from virtualenv import __version__
from virtualenv.builders.legacy import LegacyBuilder
from virtualenv.builders.venv import VenvBuilder
def select_builder(python, builders=None):
# Determine what Python we're going to be using. If this is None we'll use
# the Python which we're currently running under.
if python is None:
python = sys.executable
# If we were not given a list of builders we'll default to one that
# contains both of our builders
if builders is None:
builders = [VenvBuilder, LegacyBuilder]
# Loop over our builders and return the first one that is acceptable for
# the target Python.
for builder in builders:
if builder.check_available(python):
return builder
# If we got to this point then we haven't selected a builder then we need
# to raise an error.
raise RuntimeError("No available builders for the target Python.")
def create(destination, python=None, **kwargs):
# Determine which builder to use based on the capabiltiies of the target
# python.
builder_type = select_builder(python)
# Instantiate our selected builder with the values given to us, and then
# create our virtual environment using the given builder.
builder = builder_type(python=python, **kwargs)
builder.create(destination)
@click.command(
context_settings={
"help_option_names": ["-h", "--help"],
},
epilog=(
"Once an environment has been created, you may wish to activate it by "
"sourcing an activate script in its bin directory."
),
)
@click.version_option(version=__version__)
@click.option("-v", "--verbose", count=True, help="Increase verbosity.")
@click.option("-q", "--quiet", count=True, help="Decrease verbosity.")
@click.option(
"-p", "--python",
help=(
"The Python interpreter to use in the newly created virtual "
"environment."
),
)
@click.option(
"--clear",
is_flag=True,
help="Clear out the virtual environment and start from scratch.",
)
@click.option(
"--system-site-packages/--no-site-packages",
default=False,
help="Give the virtual environment access to the global site-packages.",
)
@click.option(
"--always-copy",
is_flag=True, help="Always copy files rather than symlinking.",
)
@click.option(
"--relocatable",
is_flag=True,
help=(
"Make an EXISTING virtualenv environment relocatable. This fixes up "
"scripts and makes all .pth files relative."
),
)
@click.option(
"--setuptools/--no-setuptools",
default=True,
help="Install setuptools into the new virtual environment.",
)
@click.option(
"--pip/--no-pip",
default=True,
help="Install pip into the new virtual environment.",
)
@click.option(
"--extra-search-dir",
multiple=True,
help=(
"Directory to look for setuptools/pip distributions in. This option "
"can be used multiple times."
),
)
@click.option(
"--prompt",
help="Provides an alternative prompt prefix for this environment.",
)
@click.argument("destination")
def cli(destination,
verbose=0,
quiet=0,
python=None,
system_site_packages=False,
clear=False,
always_copy=False,
prompt=None,
relocatable=False,
extra_search_dir=None,
pip=True,
setuptools=True):
"""
Creates virtual python environments in a target directory.
"""
create(
destination,
python=python,
system_site_packages=system_site_packages,
clear=clear,
pip=pip,
setuptools=setuptools,
extra_search_dirs=extra_search_dir,
prompt=prompt,
)
|
Python
| 0.999907
|
@@ -3021,16 +3021,32 @@
rompt%22,%0A
+ default=%22%22,%0A
help
|
1b2a1bb5f4c99f80c3664a40796939732e9fe91c
|
bump dev version
|
bndl/__init__.py
|
bndl/__init__.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging.config
import os.path
from bndl.util.conf import Config, String
from bndl.util.log import install_trace_logging
from bndl.util.objects import LazyObject
# Expose a global BNDL configuration
conf = LazyObject(Config)
# Configure Logging
logging_conf = String('logging.conf')
install_trace_logging()
logging.captureWarnings(True)
if os.path.exists(conf['bndl.logging_conf']):
logging.config.fileConfig(conf['bndl.logging_conf'], disable_existing_loggers=False)
# BNDL version info
__version_info__ = (0, 6, 0)
__version__ = '.'.join(map(str, __version_info__))
|
Python
| 0
|
@@ -1074,12 +1074,20 @@
(0,
-6
+7
, 0
+, 'dev2'
)%0A__
|
ee3ee6810f1f8fcc535e29f0f2a2af425dcea7c4
|
add db_handler instance to lint_github
|
lintable_lintball/lintball.py
|
lintable_lintball/lintball.py
|
# Copyright 2015-2016 Capstone Team G
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import List
from uuid import uuid4
from lintable_git.git_handler import GitHandler
from lintable_lintball.lint_error import LintError
from lintable_lintball.lint_report import LintReport
from lintable_lintball.lint_wrapper import LintWrapper
from lintable_lintball.runner import runner
from lintable_linters.whitespace_file_linter import WhitespaceFileLinter
from lintable_processes.log_handler import LogHandler
from lintable_processes.process_handler import ProcessHandler
@runner.task(serializer='json')
def lint_github(payload: json, task_id=uuid4()):
if payload['action'] != 'opened' and payload['action'] != 'synchronized':
return
repo_url = 'https://github.com/{full_name}.git'.format(
full_name=payload['repository']['full_name'])
sha1_a = payload['pull_request']['head']['sha']
sha1_b = payload['pull_request']['base']['sha']
process_handler = ProcessHandler(repo=repo_url, uuid=task_id,
logger=LogHandler(logging.getLogger()))
git_handler = GitHandler(process_handler=process_handler,
repo_url=repo_url,
sha1_a=sha1_a,
sha1_b=sha1_b)
lint_process(git_handler, process_handler)
return
def lint_process(git_handler: GitHandler,
process_handler: ProcessHandler,
linters=None):
if linters is None:
linters = [WhitespaceFileLinter()]
git_handler.started()
git_handler.clone_repo()
git_handler.retrieve_changed_files_from_commit()
lintball(process_handler, linters)
return
def lintball(handler: ProcessHandler, linters: List[LintWrapper]):
a_path = os.path.join(handler.local_path, 'a')
b_path = os.path.join(handler.local_path, 'b')
lint_errors = {}
for filename in handler.files:
a_file = os.path.join(a_path, filename)
b_file = os.path.join(b_path, filename)
a_results = lint(a_file, linters, handler) if os.path.exists(
a_file) else []
b_results = lint(b_file, linters, handler) if os.path.exists(
b_file) else []
lint_errors[filename] = [results for results in b_results if
results not in a_results]
lint_report = LintReport(errors=lint_errors)
handler.report(lint_report)
handler.finish()
return
def lint(filename: str, linters: List[LintWrapper], handler: ProcessHandler) -> List[LintError]:
lint_errors = []
for linter in linters:
handler.lint_file(linter=str(linter), file=filename)
lint_errors.extend(linter.lint(filename))
return lint_errors
|
Python
| 0
|
@@ -663,16 +663,101 @@
uuid4%0A%0A
+from lintable_db.database import DatabaseHandler%0Afrom lintable_db.models import User%0A
from lin
@@ -1072,16 +1072,68 @@
eLinter%0A
+from lintable_processes.db_handler import DBHandler%0A
from lin
@@ -1322,16 +1322,49 @@
uid4()):
+%0A logger = logging.getLogger()
%0A%0A if
@@ -1451,16 +1451,352 @@
return%0A%0A
+ github_id = payload%5B'repository'%5D%5B'owner'%5D%5B'id'%5D%0A%0A owner = DatabaseHandler.get_user(github_id)%0A%0A oauth_key = owner.get_oauth_token() if isinstance(owner, User) else None%0A%0A if oauth_key is None:%0A logger.error('Unable to locate oauth_token for %7Buser%7D with id of %7Bid%7D'.format(user=owner, id=github_id))%0A return%0A%0A
repo
@@ -1811,16 +1811,27 @@
https://
+%7Boauth_key%7D
github.c
@@ -1858,16 +1858,45 @@
format(%0A
+ oauth_key=oauth_key,%0A
@@ -2043,24 +2043,83 @@
e'%5D%5B'sha'%5D%0A%0A
+ repo_id = payload%5B'pull_request'%5D%5B'repository'%5D%5B'id'%5D%0A%0A
process_
@@ -2157,16 +2157,53 @@
epo_url,
+%0A
uuid=ta
@@ -2268,31 +2268,86 @@
ler(logg
-ing.getLogger()
+er),%0A db=DBHandler(repo_id=repo_id
))%0A%0A
|
01fe200a09b0e5987116364c954da576200893f8
|
Version 1.1.1
|
livinglots_owners/__init__.py
|
livinglots_owners/__init__.py
|
__version__ = '1.1.0'
|
Python
| 0
|
@@ -16,7 +16,7 @@
1.1.
-0
+1
'%0A
|
891e37943b568fa342e01c5f4439501713bf4681
|
fix paths
|
data_processing.py
|
data_processing.py
|
#!/usr/bin/python
"""
Flask app to run data retrieval tasks for Open Humans
"""
import imp
import json
import logging
import os
import pkgutil
import sys
from functools import partial
import requests
from celery.signals import (after_setup_logger, after_task_publish,
task_postrun, task_prerun)
from flask import Flask, request
from flask_sslify import SSLify
from raven.contrib.flask import Sentry
from werkzeug.contrib.fixers import ProxyFix
from celery_worker import make_worker
app = Flask(__name__)
DEBUG = os.getenv('DEBUG', False)
PORT = os.getenv('PORT', 5000)
# A mapping of name/source argument pairs to send to the create_datafiles
# method
EXTRA_DATA = {
'american_gut': {
'survey_ids': 'surveyIds',
},
'wildlife': {
'files': 'files',
},
}
DATAFILES = {}
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO)
logging.info('Starting data-processing')
# trust X-Forwarded-For on Heroku for better debugging information with Sentry
if os.getenv('HEROKU') == 'true':
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.update(
DEBUG=DEBUG,
CELERY_BROKER_URL=os.environ.get('CLOUDAMQP_URL', 'amqp://'),
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_SEND_EVENTS=False,
CELERYD_LOG_COLOR=True,
BROKER_POOL_LIMIT=0)
sentry = Sentry(app)
sslify = SSLify(app)
celery_worker = make_worker(app)
@after_setup_logger.connect
def after_setup_logger_cb(logger, **kwargs):
"""
Update the Celery logger's level.
"""
if DEBUG:
logger.setLevel(logging.DEBUG)
def debug_json(value):
"""
Return a human-readable representation of JSON data.
"""
return json.dumps(value, sort_keys=True, indent=2, separators=(',', ': '))
def make_task_data(task_id, task_state):
"""
Format task data for the Open Humans update endpoint.
"""
return {
'task_data': {
'task_id': task_id,
'task_state': task_state,
}
}
@celery_worker.task
def task_update(update_url, task_data):
"""
The 'after_task_publish' signal runs synchronously so we use celery itself
to run it asynchronously.
"""
logging.info('Sending queued update')
requests.post(update_url, json=task_data)
@after_task_publish.connect
def task_sent_handler_cb(sender=None, body=None, **other_kwargs):
"""
Send update that task has been sent to queue.
"""
if sender == 'data_processing.task_update':
return
logging.debug('after_task_publish body: %s', debug_json(body))
update_url = body['kwargs'].get('update_url')
task_id = body['kwargs'].get('task_id')
if not update_url or not task_id:
return
task_data = make_task_data(task_id, 'QUEUED')
logging.info('Scheduling after_task_publish update')
task_update.apply_async(args=[update_url, task_data], queue='priority')
@task_prerun.connect
def task_prerun_handler_cb(sender=None, kwargs=None, **other_kwargs):
"""
Send update that task is starting run.
"""
if sender == task_update:
return
logging.debug('task_prerun kwargs: %s', debug_json(kwargs))
update_url = kwargs.get('update_url')
task_id = kwargs.get('task_id')
if not update_url or not task_id:
return
task_data = make_task_data(task_id, 'INITIATED')
logging.info('Scheduling task_prerun update')
task_update.apply_async(args=[update_url, task_data], queue='priority')
@task_postrun.connect
def task_postrun_handler_cb(sender=None, state=None, kwargs=None,
**other_kwargs):
"""
Send update that task run is complete.
"""
if sender == task_update:
return
logging.debug('task_postrun kwargs: %s', debug_json(kwargs))
update_url = kwargs.get('update_url')
task_id = kwargs.get('task_id')
if not update_url or not task_id:
return
task_data = make_task_data(task_id, state)
logging.info('Scheduling task_postrun update')
task_update.apply_async(args=[update_url, task_data], queue='priority')
def load_sources():
"""
A generator that iterates and loads all of the modules in the sources/
directory.
"""
source_path = [os.path.join(sys.path[0], 'sources')]
for _, name, _ in pkgutil.iter_modules(source_path):
f, pathname, desc = imp.find_module(name, source_path)
yield (name, imp.load_module(name, f, pathname, desc))
@celery_worker.task
def datafiles_task(name, **task_params):
"""
A Celery task that runs a create_datafiles method and applies the argument
mappings from EXTRA_DATA.
"""
mapping = EXTRA_DATA.get(name)
if mapping:
for key, value in mapping.items():
if value not in task_params['data']:
return
task_params[key] = task_params['data'][value]
DATAFILES[name](sentry=sentry, **task_params)
def generic_handler(name):
datafiles_task.delay(name, **request.json['task_params'])
return '{} dataset started'.format(name)
def add_rules():
for name, source in load_sources():
DATAFILES[name] = source.create_datafiles
app.add_url_rule('/{}/'.format(name),
name,
partial(generic_handler, name),
methods=['GET', 'POST'])
@app.route('/', methods=['GET', 'POST'])
def index():
"""
Main page for the app, primarily to give Pingdom something to monitor.
"""
return 'Open Humans Data Processing'
add_rules()
|
Python
| 0.000267
|
@@ -4327,19 +4327,59 @@
oin(
-sys.path%5B0%5D
+%0A os.path.dirname(os.path.abspath(__file__))
, 's
@@ -5230,24 +5230,67 @@
_sources():%0A
+ logging.info('Adding %22%25s%22', name)%0A%0A
DATA
|
b159d28dc965e60843f2617b4ae40d6c04cd2604
|
Optimize sensitive areas API
|
geotrek/api/v2/views/sensitivity.py
|
geotrek/api/v2/views/sensitivity.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F, Case, When
from django_filters.rest_framework.backends import DjangoFilterBackend
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from geotrek.api.v2 import serializers as api_serializers, \
viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Buffer, GeometryType
from geotrek.sensitivity import models as sensitivity_models
from ..filters import GeotrekQueryParamsFilter, GeotrekInBBoxFilter, GeotrekSensitiveAreaFilter
class SensitiveAreaViewSet(api_viewsets.GeotrekViewset):
filter_backends = (
DjangoFilterBackend,
GeotrekQueryParamsFilter,
GeotrekInBBoxFilter,
GeotrekSensitiveAreaFilter,
)
serializer_class = api_serializers.SensitiveAreaListSerializer
serializer_detail_class = api_serializers.SensitiveAreaListSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
authentication_classes = []
queryset = sensitivity_models.SensitiveArea.objects.existing() \
.filter(published=True) \
.prefetch_related('species') \
.annotate(geom_type=GeometryType(F('geom'))) \
.annotate(geom2d_transformed=Case(
When(geom_type='POINT', then=Transform(Buffer(F('geom'), F('species__radius'), 4), settings.API_SRID)),
When(geom_type='POLYGON', then=Transform(F('geom'), settings.API_SRID))
))
|
Python
| 0.000001
|
@@ -1106,24 +1106,74 @@
hed=True) %5C%0A
+ .select_related('species', 'structure') %5C%0A
.pre
@@ -1194,16 +1194,27 @@
'species
+__practices
') %5C%0A
|
9433fa8970341cb2d024bceb0e23e93fbfb71393
|
Update python test
|
solidity/python/FormulaTestSale.py
|
solidity/python/FormulaTestSale.py
|
from sys import argv
from decimal import Decimal
from random import randrange
from Formula import calculateSaleReturn
def formulaTest(supply,reserve,ratio,amount):
fixed = Decimal(calculateSaleReturn(supply,reserve,ratio,amount))
real = Decimal(reserve)*(1-(1-Decimal(amount)/Decimal(supply))**(100/Decimal(ratio)))
if fixed > real:
error = []
error.append('error occurred on:')
error.append('supply = {}'.format(supply))
error.append('reserve = {}'.format(reserve))
error.append('ratio = {}'.format(ratio))
error.append('amount = {}'.format(amount))
error.append('fixed = {}'.format(fixed))
error.append('real = {}'.format(real))
raise BaseException('\n'.join(error))
return fixed/real
size = int(argv[1]) if len(argv) > 1 else 0
if size == 0:
size = input('How many test-cases would you like to execute? ')
n = 0
worstAccuracy = 1
numOfFailures = 0
while n < size: # avoid creating a large range in memory
supply = randrange(2,10**26)
reserve = randrange(1,10**23)
ratio = randrange(1,99)
amount = randrange(1,supply)
try:
accuracy = formulaTest(supply,reserve,ratio,amount)
worstAccuracy = min(worstAccuracy,accuracy)
except Exception,error:
accuracy = 0
numOfFailures += 1
except BaseException,error:
print error
break
print 'Test #{}: accuracy = {:.12f}, worst accuracy = {:.12f}, num of failures = {}'.format(n,accuracy,worstAccuracy,numOfFailures)
n += 1
|
Python
| 0.000001
|
@@ -919,14 +919,8 @@
)%0A%0A%0A
-n = 0%0A
wors
@@ -955,64 +955,32 @@
= 0%0A
-while n %3C size: # avoid creating a large range in memory
+%0A%0Afor n in xrange(size):
%0A
@@ -1511,15 +1511,4 @@
es)%0A
- n += 1%0A
|
a0bac1e9020233f886f939fa6b0b7b15f7ea70f9
|
set daemon to false when executing standard commands
|
lux/core/commands/__init__.py
|
lux/core/commands/__init__.py
|
'''
.. autoclass:: Command
:members:
:member-order: bysource
'''
import argparse
import logging
from pulsar import Setting, Application, ImproperlyConfigured, isawaitable
from pulsar.utils.config import Config, LogLevel, Debug, LogHandlers
from lux import __version__
from lux.utils.async import maybe_green
class ConfigError(Exception):
def __init__(self, config_file):
self.config_file = config_file
class CommandError(ImproperlyConfigured):
pass
def service_parser(services, description, help=True):
description = description or 'services to run'
p = argparse.ArgumentParser(
description=description, add_help=help)
p.add_argument('service', nargs='?', choices=services,
help='Service to run')
return p
class ConsoleParser(object):
'''A class for parsing the console inputs.
Used as base class for both :class:`.Command` and :class:`.App`
'''
help = None
option_list = ()
default_option_list = (LogLevel(),
LogHandlers(default=['console']),
Debug())
@property
def config_module(self):
raise NotImplementedError
def get_version(self):
raise NotImplementedError
def get_parser(self, **params):
parser = argparse.ArgumentParser(**params)
parser.add_argument('--version',
action='version',
version=self.get_version(),
help="Show version number and exit")
config = Setting('config',
('-c', '--config'),
default=self.config_module,
desc=('python dotted path to a Lux/Pulsar config '
' file, where settings can be specified.'))
config.add_argument(parser, True)
for opt in self.default_option_list:
opt.add_argument(parser, True)
for opt in self.option_list:
opt.add_argument(parser, True)
return parser
class LuxApp(Application):
def __call__(self):
try:
return super().__call__()
except ImproperlyConfigured:
pass
def on_config(self, actor):
"""This is just a dummy app and therefore we don't want to add
it to the arbiter monitor collection
"""
return False
class LuxCommand(ConsoleParser):
'''Signature class for lux commands.
A :class:`.LuxCommand` is never initialised directly, instead,
the :meth:`.Application.get_command` method is used to retrieve it and
executed by its callable method.
.. attribute:: name
Command name, given by the module name containing the Command.
.. attribute:: app
The :class:`.Application` running this :class:`.Command`.
.. attribute:: stdout
The file object corresponding to the output streams of this command.
Default: ``sys.stdout``
.. attribute:: stderr
The file object corresponding to the error streams of this command.
Default: ``sys.stderr``
'''
pulsar_config_include = ('log_level', 'log_handlers', 'debug', 'config')
def __init__(self, name, app):
self.name = name
self.app = app
def __call__(self, argv, **params):
app = self.pulsar_app(argv)
app()
# Make sure the wsgi handler is created
assert self.app.wsgi_handler()
result = maybe_green(self.app, self.run, app.cfg, **params)
if isawaitable(result) and not self.app._loop.is_running():
result = self.app._loop.run_until_complete(result)
return result
def get_version(self):
"""Return the :class:`.Command` version.
By default it is the same version as lux.
"""
return __version__
@property
def config_module(self):
return self.app.config_module
def run(self, argv, **params):
'''Run this :class:`Command`.
This is the only method which needs implementing by subclasses.
'''
raise NotImplementedError
@property
def logger(self):
return logging.getLogger('lux.%s' % self.name)
def write(self, stream=''):
'''Write ``stream`` to the :attr:`stdout`.'''
self.app.write(stream)
def write_err(self, stream=''):
'''Write ``stream`` to the :attr:`stderr`.'''
self.app.write_err(stream)
def pulsar_app(self, argv, application=None, log_name='lux', **kw):
app = self.app
if application is None:
application = LuxApp
cfg = Config(include=self.pulsar_config_include)
else:
cfg = application.cfg.copy()
for setting in self.option_list:
cfg.settings[setting.name] = setting.copy()
return application(callable=app.callable,
description=self.help,
epilog=app.config.get('EPILOG'),
cfg=cfg,
argv=argv,
log_name=log_name,
version=app.meta.version,
debug=app.debug,
config=app.config_module,
**kw)
|
Python
| 0.000006
|
@@ -3360,16 +3360,47 @@
p(argv)%0A
+ app.cfg.daemon = False%0A
|
6d31622fa4170966cf67a6cfa77895efa796991e
|
put proper dotpay gateway link
|
getpaid/backends/dotpay/__init__.py
|
getpaid/backends/dotpay/__init__.py
|
import datetime
from decimal import Decimal
import hashlib
import logging
from django.utils import six
from six.moves.urllib.parse import urlencode
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from getpaid import signals
from getpaid.backends import PaymentProcessorBase
from getpaid.utils import get_domain
logger = logging.getLogger('getpaid.backends.dotpay')
class DotpayTransactionStatus:
STARTED = 1
FINISHED = 2
REJECTED = 3
REFUNDED = 4
RECLAMATION = 5
class PaymentProcessor(PaymentProcessorBase):
BACKEND = 'getpaid.backends.dotpay'
BACKEND_NAME = _('Dotpay')
BACKEND_ACCEPTED_CURRENCY = ('PLN', 'EUR', 'USD', 'GBP', 'JPY', 'CZK', 'SEK')
BACKEND_LOGO_URL = 'getpaid/backends/dotpay/dotpay_logo.png'
_ALLOWED_IP = ('195.150.9.37', )
_ACCEPTED_LANGS = ('pl', 'en', 'de', 'it', 'fr', 'es', 'cz', 'ru', 'bg')
_GATEWAY_URL = 'https://ssl.dotpay.eu/'
_ONLINE_SIG_FIELDS = ('id', 'control', 't_id', 'amount', 'email', 'service', 'code', 'username', 'password', 't_status')
@staticmethod
def compute_sig(params, fields, PIN):
text = PIN + ":" + (u":".join(map(lambda field: params.get(field, ''), fields)))
return hashlib.md5(text.encode('utf8')).hexdigest()
@staticmethod
def online(params, ip):
allowed_ip = PaymentProcessor.get_backend_setting('allowed_ip', PaymentProcessor._ALLOWED_IP)
if len(allowed_ip) != 0 and ip not in allowed_ip:
logger.warning('Got message from not allowed IP %s' % str(allowed_ip))
return 'IP ERR'
PIN = PaymentProcessor.get_backend_setting('PIN', '')
if params['md5'] != PaymentProcessor.compute_sig(params, PaymentProcessor._ONLINE_SIG_FIELDS, PIN):
logger.warning('Got message with wrong sig, %s' % str(params))
return u'SIG ERR'
try:
params['id'] = int(params['id'])
except ValueError:
return u'ID ERR'
if params['id'] != int(PaymentProcessor.get_backend_setting('id')):
return u'ID ERR'
from getpaid.models import Payment
try:
payment = Payment.objects.get(pk=int(params['control']))
except (ValueError, Payment.DoesNotExist):
logger.error('Got message for non existing Payment, %s' % str(params))
return u'PAYMENT ERR'
amount, currency = params.get('orginal_amount', params['amount'] + ' PLN').split(' ')
if currency != payment.currency.upper():
logger.error('Got message with wrong currency, %s' % str(params))
return u'CURRENCY ERR'
payment.external_id = params.get('t_id', '')
payment.description = params.get('email', '')
if int(params['t_status']) == DotpayTransactionStatus.FINISHED:
payment.amount_paid = Decimal(amount)
payment.paid_on = datetime.datetime.utcnow().replace(tzinfo=utc)
if payment.amount <= Decimal(amount):
# Amount is correct or it is overpaid
payment.change_status('paid')
else:
payment.change_status('partially_paid')
elif int(params['t_status']) in [DotpayTransactionStatus.REJECTED, DotpayTransactionStatus.RECLAMATION, DotpayTransactionStatus.REFUNDED]:
payment.change_status('failed')
return u'OK'
def get_URLC(self):
urlc = reverse('getpaid-dotpay-online')
if PaymentProcessor.get_backend_setting('force_ssl', False):
return u'https://%s%s' % (get_domain(), urlc)
else:
return u'http://%s%s' % (get_domain(), urlc)
def get_URL(self, pk):
url = reverse('getpaid-dotpay-return', kwargs={'pk': pk})
if PaymentProcessor.get_backend_setting('force_ssl', False):
return u'https://%s%s' % (get_domain(), url)
else:
return u'http://%s%s' % (get_domain(), url)
def get_gateway_url(self, request):
"""
Routes a payment to Gateway, should return URL for redirection.
"""
params = {
'id': PaymentProcessor.get_backend_setting('id'),
'description': self.get_order_description(self.payment, self.payment.order),
'amount': self.payment.amount,
'currency': self.payment.currency,
'type': 0, # show "return" button after finished payment
'control': self.payment.pk,
'URL': self.get_URL(self.payment.pk),
'URLC': self.get_URLC(),
}
user_data = {
'email': None,
'lang': None,
}
signals.user_data_query.send(sender=None, order=self.payment.order, user_data=user_data)
if user_data['email']:
params['email'] = user_data['email']
if user_data['lang'] and user_data['lang'].lower() in PaymentProcessor._ACCEPTED_LANGS:
params['lang'] = user_data['lang'].lower()
elif PaymentProcessor.get_backend_setting('lang', False) and \
PaymentProcessor.get_backend_setting('lang').lower() in PaymentProcessor._ACCEPTED_LANGS:
params['lang'] = PaymentProcessor.get_backend_setting('lang').lower()
if PaymentProcessor.get_backend_setting('onlinetransfer', False):
params['onlinetransfer'] = 1
if PaymentProcessor.get_backend_setting('p_email', False):
params['p_email'] = PaymentProcessor.get_backend_setting('p_email')
if PaymentProcessor.get_backend_setting('p_info', False):
params['p_info'] = PaymentProcessor.get_backend_setting('p_info')
if PaymentProcessor.get_backend_setting('tax', False):
params['tax'] = 1
gateway_url = PaymentProcessor.get_backend_setting('gateway_url', self._GATEWAY_URL)
if PaymentProcessor.get_backend_setting('method', 'get').lower() == 'post':
return gateway_url, 'POST', params
elif PaymentProcessor.get_backend_setting('method', 'get').lower() == 'get':
for key in params.keys():
params[key] = six.text_type(params[key]).encode('utf-8')
return gateway_url + '?' + urlencode(params), "GET", {}
else:
raise ImproperlyConfigured('Dotpay payment backend accepts only GET or POST')
|
Python
| 0
|
@@ -1050,10 +1050,13 @@
pay.
-eu
+pl/t2
/'%0A
|
e364bdf7723ca45ac1000eda13a76cf1b19f0ad8
|
Remove a debug print
|
plugins/plugin_node_manager/src/plugin_node_manager/launch_item.py
|
plugins/plugin_node_manager/src/plugin_node_manager/launch_item.py
|
#!/usr/bin/env python
################################################################################
#
# Copyright Airbus Group SAS 2015
# All rigths reserved.
#
# File Name : setup.py
# Authors : Martin Matignon
#
# If you find any bug or if you have any question please contact
# Adolfo Suarez Roos <adolfo.suarez@airbus.com>
# Martin Matignon <martin.matignon.external@airbus.com>
#
#
################################################################################
import rospy
import time
import os
import roslaunch
import subprocess
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
def widget_creator(obj_ui):
widget = QWidget()
layout = QHBoxLayout(widget)
layout.setSpacing(6)
layout.setContentsMargins(0, 0, 0, 0)
spacer_left = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
spacer_right = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
layout.addItem(spacer_left)
layout.addWidget(obj_ui)
layout.addItem(spacer_right)
return widget
class LaunchItem:
def __init__(self, launch, machine):
self.launch_name = QLabel(launch)
self.launch_name.setContentsMargins(0,0,10,0)
self.launch_name.setMinimumHeight(40)
self.combo_machines = QComboBox()
self.combo_machines.setMinimumHeight(40)
self.combo_machines.addItem('cobotgui-dev:127.0.0.1')
self.combo_machines.addItem('cobot:192.168.0.1')
rsc = os.path.join(get_pkg_dir('plugin_node_manager'),'resources')
icon_launch = QIcon(rsc+'/launch.png')
self.button_launch = QPushButton()
self.button_launch.setIcon(icon_launch)
self.button_launch.setIconSize(QSize(30,30))
self.button_launch.setFixedSize(QSize(100,40))
self.button_launch.clicked.connect(self._launch_node_slot)
self.button_launch_widget = widget_creator(self.button_launch)
def _launch_node_slot(self):
print 'coucou'
rospy.loginfo('%s::_launch_node()'%self.launch_name.text())
subprocess.Popen(['roslaunch',
'node_launchers',
self.launch_name.text()])
#End of file
|
Python
| 0.000028
|
@@ -2087,40 +2087,8 @@
%0A
- print 'coucou'%0A %0A
|
3c916451ebb584a72fb0a92c2a577427ff10003c
|
Make Height Change Also Be A Valid Ping
|
dataserv/Farmer.py
|
dataserv/Farmer.py
|
import hashlib
from dataserv.run import db
from datetime import datetime
from sqlalchemy import DateTime
from dataserv.Validator import is_btc_address
def sha256(content):
"""Finds the sha256 hash of the content."""
content = content.encode('utf-8')
return hashlib.sha256(content).hexdigest()
class Farmer(db.Model):
id = db.Column(db.Integer, primary_key=True)
btc_addr = db.Column(db.String(35), unique=True)
last_seen = db.Column(DateTime, default=datetime.utcnow)
height = db.Column(db.Integer, default=0)
def __init__(self, btc_addr, last_seen=None):
"""
A farmer is a un-trusted client that provides some disk space
in exchange for payment. We use this object to keep track of
farmers connected to this node.
"""
self.btc_addr = btc_addr
self.last_seen = last_seen
def __repr__(self):
return '<Farmer BTC Address: %r>' % self.btc_addr
def is_btc_address(self):
"""Check if the address is a valid Bitcoin public key."""
return is_btc_address(self.btc_addr)
def validate(self, register=False):
"""Make sure this farmer fits the rules for this node."""
# check if this is a valid BTC address or not
if not self.is_btc_address():
raise ValueError("Invalid BTC Address.")
elif self.exists() and register:
raise LookupError("Address Already Is Registered.")
elif not self.exists() and not register:
raise LookupError("Address Not Registered.")
def register(self):
"""Add the farmer to the database."""
self.validate(True)
# If everything works correctly then commit to database.
db.session.add(self)
db.session.commit()
def exists(self):
"""Check to see if this address is already listed."""
query = db.session.query(Farmer.btc_addr)
return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0
def lookup(self):
"""Return the Farmer object for the bitcoin address passed."""
self.validate()
farmer = Farmer.query.filter_by(btc_addr=self.btc_addr).first()
return farmer
def ping(self):
"""
Keep-alive for the farmer. Validation can take a long time, so
we just want to know if they are still there.
"""
farmer = self.lookup()
farmer.last_seen = datetime.utcnow()
db.session.commit()
# TODO: Actually do an audit.
def audit(self):
"""
Complete a cryptographic audit of files stored on the farmer. If
the farmer completes an audit we also update when we last saw them.
"""
self.ping()
def set_height(self, height):
"""Set the farmers advertised height."""
self.validate()
farmer = self.lookup()
farmer.height = height
db.session.commit()
return self.height
|
Python
| 0
|
@@ -2820,24 +2820,75 @@
validate()%0A%0A
+ self.ping() # also serves as a valid ping%0A
farm
|
efc6f47616bc2128eee885383f43cf9d42bdb374
|
Include non partners but owners or payers
|
mchimp_generationsocis-sql.py
|
mchimp_generationsocis-sql.py
|
#!/usr/bin/env python
#-*- coding: utf8 -*-
import psycopg2
import config
import dbutils
import codecs
import sys
from consolemsg import step, error, fail, warn
def esPersonaFisica(soci) :
return 0 if soci.nif[2] in "ABCDEFGHJNPQRSUVW" else 1
def ambPuntDeMilers(numero) :
return '{:,}'.format(numero).replace(',','.')
db = psycopg2.connect(**config.psycopg)
with db.cursor() as cursor :
cursor.execute("""\
SELECT
soci_id,
name,
nsoci,
nif,
lang,
consumannual,
ncontractes,
email,
already_invested IS NOT NULL AS already_invested,
FALSE
FROM (
SELECT DISTINCT ON (sub.soci_id)
sub.soci_id as soci_id,
sub.name AS name,
sub.nsoci AS nsoci,
sub.nif AS nif,
sub.lang AS lang,
sub.consumannual AS consumannual,
sub.ncontractes AS ncontractes,
address.email,
FALSE
FROM (
SELECT
soci.id AS soci_id,
soci.name AS name,
soci.ref AS nsoci,
soci.vat AS nif,
soci.lang AS lang,
SUM(cups.conany_kwh) AS consumannual,
COUNT(cups.conany_kwh) AS ncontractes,
FALSE
FROM res_partner AS soci
LEFT JOIN
giscedata_polissa AS pol ON (
pol.titular = soci.id OR
pol.pagador = soci.id
)
LEFT JOIN
giscedata_cups_ps AS cups ON cups.id = pol.cups
LEFT JOIN
res_partner_category_rel AS cat ON cat.partner_id = soci.id
WHERE
cat.category_id = 8 AND
soci.active AND
pol.active AND
pol.state = 'activa' AND
cups.active AND
TRUE
GROUP BY
soci.id
ORDER BY
soci.id ASC
) AS sub
LEFT JOIN
res_partner_address AS address ON (address.partner_id = sub.soci_id)
WHERE
address.active AND
address.email IS NOT NULL AND
address.email != '' AND
TRUE
GROUP BY
sub.soci_id,
sub.name,
sub.nsoci,
sub.nif,
sub.lang,
sub.consumannual,
sub.ncontractes,
address.email,
TRUE
) AS result
LEFT JOIN (
SELECT DISTINCT
partner_id AS already_invested
FROM payment_line AS line
LEFT JOIN
payment_order AS remesa ON remesa.id = line.order_id
WHERE
remesa.mode = 19
) AS investments ON already_invested = soci_id
WHERE
TRUE
ORDER BY
name ASC
;
""")
shareUse = 170
recommendedPercent = 70
shareCost = 100
print u'\t'.join(unicode(x) for x in [
'ID',
'Name',
'Call name',
'Soci',
'NIF',
'E-mail',
'Language',
'Legal entity',
'Contracts',
'Anual use',
'Recommended shares',
'Covered use',
'Recommended investment',
'Already invested',
'Unknown use',
'Small use',
])
for line in dbutils.fetchNs(cursor) :
try:
totalUse = line.consumannual
if totalUse is None:
warn("Soci {} amb consum null".format(
line.nsoci))
totalUse = 0
# continue
if totalUse * recommendedPercent < shareUse * 100 :
error("El soci {} no te prou consum ({})".format(line.nsoci, totalUse))
# continue
if line.nif[:2] != 'ES':
warn("Soci amb un VAT code no espanyol: {}".format(line.nif[:2]))
recommendedShares = (totalUse*recommendedPercent/100) // shareUse
recommendedInvestment = recommendedShares * shareCost
print '\t'.join(
str(x)
.replace('\t',' ')
.replace('\n',' ')
.replace('\r',' ')
for x in [
line.soci_id,
line.name,
line.name.split(',')[-1].strip() if esPersonaFisica(line) else '',
line.nsoci[1:].lstrip('0'),
line.nif[2:],
line.email,
line.lang,
0 if esPersonaFisica(line) else 1,
line.ncontractes,
ambPuntDeMilers(totalUse),
ambPuntDeMilers(recommendedShares),
ambPuntDeMilers(recommendedShares * shareUse),
ambPuntDeMilers(recommendedInvestment),
1 if line.already_invested else 0,
1 if totalUse is None else 0,
1 if totalUse * recommendedPercent < shareUse * 100 else 0,
])
except Exception as e:
import traceback
error("Error processant soci {}\n{}\n{}".format(
line.nsoci,
e,
"\n".join(traceback.format_stack()),
))
|
Python
| 0
|
@@ -154,16 +154,54 @@
il, warn
+%0Afrom namespace import namespace as ns
%0A%0Adef es
@@ -696,16 +696,62 @@
vested,%0A
+ ARRAY%5B8%5D @%3E categories AS essoci,%0A
@@ -1117,32 +1117,60 @@
address.email,%0A
+ categories,%0A
@@ -1520,32 +1520,94 @@
AS ncontractes,%0A
+ ARRAY_AGG(cat.category_id) as categories,%0A
@@ -2029,16 +2029,36 @@
S cat ON
+%0A
cat.par
@@ -2101,52 +2101,8 @@
ERE%0A
- cat.category_id = 8 AND%0A
@@ -2916,32 +2916,60 @@
address.email,%0A
+ categories,%0A
@@ -3871,32 +3871,54 @@
'Small use',%0A
+ 'Is Partner',%0A
%5D)%0A%0A%0A
@@ -4636,29 +4636,8 @@
st%0A%0A
- %0A
@@ -5585,32 +5585,69 @@
100 else 0,%0A
+ 1 if line.essoci else 0 %0A
%5D)%0A
@@ -5886,16 +5886,55 @@
)) %0A
+ error(ns(cas=line).dump())%0A
%0A%0A%0A%0A%0A%0A%0A%0A
|
31caceefaa2f6b6dc7d2601d8537e613ce600743
|
Use account's static groups instead of a conversation's groups for dialogue group state
|
go/apps/dialogue/view_definition.py
|
go/apps/dialogue/view_definition.py
|
import json
from django.http import HttpResponse
from django.forms import Form
from go.api.go_api import client
from go.api.go_api.client import GoApiError
from go.conversation.view_definition import (
ConversationViewDefinitionBase, ConversationTemplateView)
class DialogueEditView(ConversationTemplateView):
"""This app is a unique and special snowflake, so it gets special views.
"""
view_name = 'edit'
path_suffix = 'edit/'
template_base = 'dialogue'
def get(self, request, conversation):
r = client.rpc(
request.session.session_key, 'conversation.dialogue.get_poll',
[request.user_api.user_account_key,
conversation.key])
if r.status_code != 200:
raise GoApiError(
"Failed to load dialogue from Go API:"
" (%r) %r." % (r.status_code, r.text))
model_data = {
'campaign_id': request.user_api.user_account_key,
'conversation_key': conversation.key,
'groups': [g.get_data() for g in conversation.get_groups()],
'urls': {
'show': self.get_view_url(
'show',
conversation_key=conversation.key)
}
}
model_data.update(r.json['result']['poll'])
return self.render_to_response({
'conversation': conversation,
'session_id': request.session.session_key,
'model_data': json.dumps(model_data),
})
class UserDataView(ConversationTemplateView):
view_name = 'user_data'
path_suffix = 'users.csv'
def get(self, request, conversation):
# TODO: write new CSV data export
csv_data = "TODO: write data export."
return HttpResponse(csv_data, content_type='application/csv')
class SendDialogueForm(Form):
# TODO: Something better than this?
pass
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = DialogueEditView
extra_views = (
UserDataView,
)
action_forms = {
'send_jsbox': SendDialogueForm,
}
|
Python
| 0.000001
|
@@ -873,16 +873,129 @@
text))%0A%0A
+ contact_store = conversation.user_api.contact_store%0A groups = contact_store.list_static_groups()%0A%0A
@@ -1170,33 +1170,14 @@
in
-conversation.get_
groups
-()
%5D,%0A
|
efcf5628225e5c07bcc1f3a9741557a12d4421aa
|
Remove dictionary pruning
|
src/project/corpus.py
|
src/project/corpus.py
|
import sys
import codecs
from os import listdir
from os.path import isdir, isfile, join, splitext
from random import sample
from math import ceil
from nltk.corpus import stopwords
from gensim.interfaces import TransformationABC
from gensim.corpora import Dictionary, MmCorpus, TextCorpus
from gensim.utils import ClippedCorpus
ignore_words = stopwords.words("english")
class Corpus(object):
"""Wrapper class around Corpus streaming"""
def __init__(self, directory=None, dictionary=None, distributions=None, corpus=None, max_docs=None):
if directory:
docs = self.get_docs(directory, distributions, max_docs)
if not dictionary:
""" Construct dictionary without having all texts in memory, based off the example in the Gensim docs"""
dictionary = Dictionary(filter_common(codecs.open(doc, encoding='utf-8').read().lower().split()) for doc in docs)
once_words = [id for id, freq in dictionary.dfs.iteritems() if freq is 1]
dictionary.filter_tokens(once_words) # Exclude if appears once
dictionary.compactify() # Remove gaps in ids left by removing words
dictionary.filter_extremes(no_below=20) # Filter if in less than 10 docs
self.dictionary = dictionary
else:
self.dictionary = Dictionary.load(dictionary)
self.docs = PaperCorpus(docs)
elif dictionary and corpus:
self.dictionary = Dictionary.load(dictionary)
self.docs = MmCorpus(corpus)
else:
self.dictionary = Dictionary([])
self.docs = PaperCorpus([])
self.transformation = IdentityTransformation()
self.train_time = None
self.sim_index = None
return
def __iter__(self):
# Apply transformation to corpus if it exists
self.docs = self.transformation[self.docs]
if type(self.docs) is PaperCorpus:
# Need to convert to a vector representation if still in plain text
for doc in self.docs.get_texts():
yield self.dictionary.doc2bow(doc)
else:
docs = self.transformation[self.docs]
for doc in docs:
yield doc
def save(self, dictionary_file="corpus.dict", corpus_file="corpus.mm", sup_file=None):
if dictionary_file:
Dictionary.save(self.dictionary, dictionary_file)
if corpus_file:
MmCorpus.serialize(corpus_file, self)
if sup_file and type(self.docs) is PaperCorpus:
self.docs.save(sup_file)
@classmethod
def load(cls, dictionary_file=None, corpus_file=None, sup_file=None):
if isfile(dictionary_file) and isfile(corpus_file):
return cls(dictionary=dictionary_file, corpus=corpus_file)
return False
def __len__(self):
return len(self.docs)
def transform_corpus(self, transformation):
"""
Function to transform one corpus representation into another. Applying Transformations can be can be costly
as they are done on the fly. Save to disk first if access will be frequent
:param transformation: Transformation to be applied to the corpus
:return:
"""
self.docs = self.transformation[self.docs]
transformed_model = transformation(self.docs)
self.transformation = transformed_model
return
def clip_corpus(self, max_docs=None):
"""
Function to clip a copus to a max size, if max_doc is none then the the corpus remains it's current size
:param max_docs:
:return:
"""
self.docs = ClippedCorpus(self.docs, max_docs)
def get_train_time(self):
return self.train_time
def _build_sim_index(self, index_dir=None, num_features=None):
pass
@staticmethod
def _is_corpus_file(directory, doc):
return isfile(join(directory, doc)) and splitext(doc)[-1] == ".txt"
@staticmethod
def get_docs(directory, distributions=None, max_docs=None):
if distributions:
if max_docs and distributions and max_docs <= distributions["total"]:
max_dis = max_docs / distributions["total"]
else:
max_dis = 1
docs = list()
for name in distributions:
if name is "total":
continue
current_dir = join(directory, name)
temp = [join(current_dir, doc) for doc in listdir(current_dir) if Corpus._is_corpus_file(current_dir, doc)]
select_amount = int(ceil(len(temp) * max_dis))
docs.extend(sample(temp, select_amount))
else:
docs = [join(directory, doc) for doc in listdir(directory) if Corpus._is_corpus_file(directory, doc)]
return docs
class PaperCorpus(TextCorpus):
# Wrap plain text document streaming - allows us to apply transformations to it
def get_texts(self):
for doc in self.input:
handle = codecs.open(doc, encoding='utf-8')
yield filter_common(handle.read().lower().split())
def save(self, sup_file):
file_log = open(sup_file, 'a+')
for doc in self.input:
file_log.write("%s\n" % doc)
file_log.close()
class IdentityTransformation(TransformationABC):
# Identity transformation which returns the input corpus
def __getitem__(self, vec):
return vec
def corpus_equal(corpus1, corpus2):
if len(corpus1) == len(corpus2):
for doc1, doc2 in zip(corpus1, corpus2):
if doc1 != doc2:
return False
return True
def filter_common(word_list):
words = [word for word in word_list if len(word) > 1]
return words
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
load_corpus = Corpus()
corpus = Corpus(directory=sys.argv[1])
# TODO: Write proper tests
# corpus.transform_corpus(models.TfidfModel)
corpus.save(dictionary_file=sys.argv[2], corpus_file=sys.argv[3])
load_corpus.load(dictionary_file=sys.argv[2], corpus_file=sys.argv[3])
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -908,24 +908,40 @@
doc in docs
+, prune_at=None.
)%0A
@@ -1258,16 +1258,29 @@
below=20
+, keep_n=None
) # Fil
|
513817ef4ede24ce7609afb9d025107d8f96532b
|
Fix test on Windows
|
gouda/tests/test_decode_barcodes.py
|
gouda/tests/test_decode_barcodes.py
|
import unittest
import shutil
from pathlib import Path
from gouda.engines import ZbarEngine
from gouda.scripts.decode_barcodes import main
from utils import temp_directory_with_files
TESTDATA = Path(__file__).parent.joinpath('test_data')
@unittest.skipUnless(ZbarEngine.available(), 'ZbarEngine unavailable')
class TestRename(unittest.TestCase):
def test_rename(self):
"File is renamed with value of barcode"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_multiple(self):
"File with multiple barcodes results in renamed / copied to three files"
with temp_directory_with_files(TESTDATA.joinpath('BM001128287.jpg')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['BM001128286.jpg', 'BM001128287.jpg', 'BM001128288.jpg'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_with_collisions(self):
"Files with same barcode values results in just a single rename"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png', 'first copy.png', 'second copy.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_avoid_collisions(self):
"Files with same barcode values results in new files with suffixes"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir), '--avoid-collisions'])
print([path.name for path in sorted(tempdir.iterdir())])
self.assertEqual(
['Stegosaurus-1.png', 'Stegosaurus-2.png', 'Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1917,32 +1917,54 @@
empdir.iterdir()
+, key=lambda p: p.name
)%5D%0A )
|
db22f7a508524409f5e03fdbcbf6a394670ebbde
|
Use built-in auth views
|
sweettooth/auth/urls.py
|
sweettooth/auth/urls.py
|
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'login/$', 'django.contrib.auth.views.login', dict(template_name='login.html'), name='login'),
url(r'logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'register/$', 'auth.views.register', name='register'),
)
|
Python
| 0.000001
|
@@ -174,16 +174,24 @@
.login',
+%0A
dict(te
@@ -288,16 +288,59 @@
logout',
+%0A dict(template_name='logout.html'),
name='l
|
91a30d8e5cd18e3c5e6c5f00e48f44d6b33346b5
|
clean up cache initialization in mail completer
|
roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py
|
roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py
|
from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
import string
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.on_event(context)
return self.__candidates
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
|
Python
| 0.000001
|
@@ -721,32 +721,316 @@
self, context):%0A
+ self.__cache()%0A%0A def gather_candidates(self, context):%0A result = self.__pattern.search(context%5B'input'%5D)%0A if result is not None:%0A if not self.__candidates:%0A self.__cache()%0A return self.__candidates%0A%0A def __cache(self):%0A
self.__c
@@ -1439,253 +1439,8 @@
ss%0A%0A
- def gather_candidates(self, context):%0A result = self.__pattern.search(context%5B'input'%5D)%0A if result is not None:%0A if not self.__candidates:%0A self.on_event(context)%0A return self.__candidates%0A%0A
|
87d2780a710e98c3b824583a2cf2607461bce35c
|
remove an unnecessary import
|
roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py
|
roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py
|
from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
import string
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.sorters = ['sorter_smart']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__cache()
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.__cache()
return self.__candidates
def __cache(self):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
|
Python
| 0.000005
|
@@ -151,22 +151,8 @@
open
-%0Aimport string
%0A%0Acl
|
92f0dd46bbc1f6fa8d9539d026d6ec1e968cbcfc
|
Drop inaccessible code from singleton.py
|
sympy/core/singleton.py
|
sympy/core/singleton.py
|
"""Singleton mechanism"""
from .assumptions import ManagedProperties
class SingletonRegistry(object):
"""
A map from singleton classes to the corresponding instances.
"""
def __init__(self):
self._classes_to_install = {}
# Dict of classes that have been registered, but that have not have been
# installed as an attribute of this SingletonRegistry.
# Installation automatically happens at the first attempt to access the
# attribute.
# The purpose of this is to allow registration during class
# initialization during import, but not trigger object creation until
# actual use (which should not happen until after all imports are
# finished).
def register(self, cls):
self._classes_to_install[cls.__name__] = cls
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
def __getattr__(self, name):
"""Python calls __getattr__ if no attribute of that name was installed
yet.
This __getattr__ checks whether a class with the requested name was
already registered but not installed; if no, raises an AttributeError.
Otherwise, retrieves the class, calculates its singleton value, installs
it as an attribute of the given name, and unregisters the class."""
if name not in self._classes_to_install:
raise AttributeError(
"Attribute '%s' was not installed on SymPy registry %s" % (
name, self))
class_to_install = self._classes_to_install[name]
value_to_install = class_to_install()
self.__setattr__(name, value_to_install)
del self._classes_to_install[name]
return value_to_install
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(ManagedProperties):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Examples
========
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> class MySingleton(Basic, metaclass=Singleton):
... pass
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
Notes
=====
Instance creation is delayed until the first time the value is accessed.
This metaclass is a subclass of ManagedProperties because that is the
metaclass of many classes that need to be Singletons (Python does not allow
subclasses to have a different metaclass than the superclass, except the
subclass may use a subclassed metaclass).
"""
_instances = {}
"Maps singleton classes to their instances."
def __new__(cls, *args, **kwargs):
result = super(Singleton, cls).__new__(cls, *args, **kwargs)
S.register(result)
return result
def __call__(self, *args, **kwargs):
# Called when application code says SomeClass(), where SomeClass is a
# class of which Singleton is the metaclas.
# __call__ is invoked first, before __new__() and __init__().
if self not in Singleton._instances:
# Invokes the standard constructor of SomeClass.
Singleton._instances[self] = \
super(Singleton, self).__call__(*args, **kwargs)
return Singleton._instances[self]
# Inject pickling support.
def __getnewargs__(self):
return ()
self.__getnewargs__ = __getnewargs__
|
Python
| 0.000001
|
@@ -898,80 +898,8 @@
j)%0A%0A
- def __delattr__(self, name):%0A delattr(self.__class__, name)%0A%0A
@@ -3543,141 +3543,4 @@
lf%5D%0A
-%0A # Inject pickling support.%0A def __getnewargs__(self):%0A return ()%0A self.__getnewargs__ = __getnewargs__%0A
|
9b3702c9240ed533ccf9f08ba666103234a53c6c
|
Return the room_alias when GETing public rooms.
|
synapse/storage/room.py
|
synapse/storage/room.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 matrix.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from sqlite3 import IntegrityError
from synapse.api.errors import StoreError
from synapse.api.events.room import RoomTopicEvent
from ._base import SQLBaseStore, Table
import collections
import json
import logging
logger = logging.getLogger(__name__)
class RoomStore(SQLBaseStore):
@defer.inlineCallbacks
def store_room(self, room_id, room_creator_user_id, is_public):
"""Stores a room.
Args:
room_id (str): The desired room ID, can be None.
room_creator_user_id (str): The user ID of the room creator.
is_public (bool): True to indicate that this room should appear in
public room lists.
Raises:
StoreError if the room could not be stored.
"""
try:
yield self._simple_insert(RoomsTable.table_name, dict(
room_id=room_id,
creator=room_creator_user_id,
is_public=is_public
))
except IntegrityError:
raise StoreError(409, "Room ID in use.")
except Exception as e:
logger.error("store_room with room_id=%s failed: %s", room_id, e)
raise StoreError(500, "Problem creating room.")
def store_room_config(self, room_id, visibility):
return self._simple_update_one(
table=RoomsTable.table_name,
keyvalues={"room_id": room_id},
updatevalues={"is_public": visibility}
)
def get_room(self, room_id):
"""Retrieve a room.
Args:
room_id (str): The ID of the room to retrieve.
Returns:
A namedtuple containing the room information, or an empty list.
"""
query = RoomsTable.select_statement("room_id=?")
return self._execute(
RoomsTable.decode_single_result, query, room_id,
)
@defer.inlineCallbacks
def get_rooms(self, is_public, with_topics):
"""Retrieve a list of all public rooms.
Args:
is_public (bool): True if the rooms returned should be public.
with_topics (bool): True to include the current topic for the room
in the response.
Returns:
A list of room dicts containing at least a "room_id" key, and a
"topic" key if one is set and with_topic=True.
"""
room_data_type = RoomTopicEvent.TYPE
public = 1 if is_public else 0
latest_topic = ("SELECT max(room_data.id) FROM room_data WHERE "
+ "room_data.type = ? GROUP BY room_id")
query = ("SELECT rooms.*, room_data.content FROM rooms LEFT JOIN "
+ "room_data ON rooms.room_id = room_data.room_id WHERE "
+ "(room_data.id IN (" + latest_topic + ") "
+ "OR room_data.id IS NULL) AND rooms.is_public = ?")
res = yield self._execute(
self.cursor_to_dict, query, room_data_type, public
)
# return only the keys the specification expects
ret_keys = ["room_id", "topic"]
# extract topic from the json (icky) FIXME
for i, room_row in enumerate(res):
try:
content_json = json.loads(room_row["content"])
room_row["topic"] = content_json["topic"]
except:
pass # no topic set
# filter the dict based on ret_keys
res[i] = {k: v for k, v in room_row.iteritems() if k in ret_keys}
defer.returnValue(res)
class RoomsTable(Table):
table_name = "rooms"
fields = [
"room_id",
"is_public",
"creator"
]
EntryType = collections.namedtuple("RoomEntry", fields)
|
Python
| 0.000003
|
@@ -3249,20 +3249,161 @@
tent
- FROM rooms
+, room_alias FROM rooms %22%0A + %22LEFT JOIN %22%0A + %22room_aliases ON room_aliases.room_id = rooms.room_id %22%0A + %22
LEFT
@@ -3823,16 +3823,30 @@
%22topic%22
+, %22room_alias%22
%5D%0A%0A
|
32fba62d157953eaeea6e5885a7ea860632a1945
|
rename filter function and set the second parameter as required
|
sync_settings/helper.py
|
sync_settings/helper.py
|
# -*- coding: utf-8 -*-
import os, re
from urllib import parse
def getDifference (setA, setB):
return list(filter(lambda el: el not in setB, setA))
def getHomePath (fl = ""):
if isinstance(fl, str) and fl != "":
return joinPath((os.path.expanduser('~'), fl))
return os.path.expanduser('~')
def existsPath(path, isFolder = False):
opath = os.path
if isinstance(path, str) and path != "" and opath.exists(path):
if (isFolder and opath.isdir(path)): return True
if (not isFolder and opath.isfile(path)): return True
return False
def joinPath (pathTuple):
if isinstance(pathTuple, tuple) and len(pathTuple) > 1:
return os.path.join(*pathTuple)
return None
def getFiles (path):
if existsPath(path, True):
f = []
for root, dirs, files in os.walk(path):
f.extend([joinPath((root, file)) for file in files])
return f
return []
def excludeByPatterns (elements, patterns = []):
isValidElements = isinstance(elements, list) and len(elements) > 0
isValidPattern = isinstance(patterns, list) and len(patterns) > 0
results = []
if isValidElements and isValidPattern:
for element in elements:
for pattern in patterns:
extension = '.' + element.split(os.extsep)[-1]
filename = os.path.basename(element)
if element.startswith(pattern) and existsPath(pattern, True) and existsPath(joinPath((pattern, filename))):
results.append(element)
elif (extension == pattern or element == pattern) and existsPath(element):
results.append(element)
return getDifference(elements, results)
return elements
def encodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.quote(path)
return None
def decodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.unquote(path)
return None
|
Python
| 0
|
@@ -884,16 +884,21 @@
exclude
+Files
ByPatter
@@ -919,21 +919,16 @@
patterns
- = %5B%5D
):%0A isV
|
9d53e369e9757c659c72ca1e8bbb8eea8080ab2d
|
Add possiblity to pass an output stream to the update method
|
updater.py
|
updater.py
|
import configparser
import hashlib
import json
import os
import requests
def go_through_files(cur_dir, data, repo_name, bw_list, is_whitelist):
updated = False
for content in data:
path = os.path.join(cur_dir, content['name'])
print(path)
# check if file is in the black/whitelist
if (content["name"] in bw_list) != is_whitelist:
print("file found in blacklist/not found in whitelist")
continue
# if there is a directory go through it per recursive call
if(content["type"] == "dir"):
print("file is directory")
os.makedirs(path, exist_ok=True)
resp = requests.get(url=content['url'])
if go_through_files(path, json.loads(resp.text), repo_name, bw_list, is_whitelist):
updated = True
continue
try:
# check if the file is there
# hash the current file
with open(path, "r", encoding="utf-8") as f:
sha1 = hashlib.sha1()
sha1.update(f.read().encode("utf-8"))
hashoff = format(sha1.hexdigest())
except IOError: # if no file is offline always download
hashoff = None
# download the most recent file
resp = requests.get(url=content["download_url"])
if hashoff:
# hash the most recent file
sha1 = hashlib.sha1()
sha1.update(resp.text.encode('utf-8'))
hashon = format(sha1.hexdigest())
# compare hash of the offline and online file and overwrite if they are
# different
if not hashoff or (hashon != hashoff):
updated = True
print("updating {}", path)
with open(path, "w", encoding="utf-8") as f:
f.write(resp.text)
else:
print("no difference found")
return updated
def update():
config = configparser.ConfigParser()
config.read_file(open('updater.settings'))
is_whitelist = config.getboolean("Section1", "whitelist")
repo_name = config.get("Section1", "repo")
bw_list = str(config.get("Section1", "list")).split("\n")
# get a list of files in the repo
resp = requests.get(url="https://api.github.com/repos/" + repo_name + "/contents")
data = json.loads(resp.text)
# check these files
return go_through_files("", data, repo_name, bw_list, is_whitelist)
if __name__ == '__main__':
update()
|
Python
| 0
|
@@ -49,16 +49,27 @@
mport os
+%0Aimport sys
%0A%0Aimport
@@ -144,24 +144,32 @@
is_whitelist
+, output
):%0A updat
@@ -276,16 +276,29 @@
int(path
+, file=output
)%0A%0A
@@ -461,24 +461,37 @@
n whitelist%22
+, file=output
)%0A
@@ -644,16 +644,29 @@
rectory%22
+, file=output
)%0A
@@ -849,16 +849,24 @@
hitelist
+, output
):%0A
@@ -1797,16 +1797,29 @@
%7D%22, path
+, file=output
)%0A
@@ -1957,16 +1957,29 @@
e found%22
+, file=output
)%0A re
@@ -2004,16 +2004,33 @@
update(
+output=sys.stdout
):%0A c
@@ -2535,16 +2535,24 @@
hitelist
+, output
)%0A%0A%0Aif _
|
e4bde56715b838116e2a0c06be20c8391570d0ab
|
Refactor popup creation into a function
|
updates.py
|
updates.py
|
#!/usr/bin/python3
# requires system Python and the python3-apt package
from collections import OrderedDict # Starting with Python 3.7, we could just use vanilla dicts
import apt # ImportError? apt install python3-apt
def describe(pkg):
# Python 3.7 equivalent:
# return {"Name": pkg.name, "Installed": pkg.installed.version, "Candidate": pkg.candidate.version}
return OrderedDict((("Name", pkg.name), ("Current", pkg.installed.version), ("Target", pkg.candidate.version)))
def show_packages(scr, upgrades, auto):
def print(s="", *args):
scr.addstr(str(s) + "\n", *args)
desc = [describe(pkg) for pkg in upgrades]
widths = OrderedDict((x, len(x)) for x in desc[0]) # Start with header widths
for d in desc:
for col in d:
widths[col] = max(widths[col], len(d[col]))
fmt = "[%s] " + " ".join("%%-%ds" % col for col in widths.values())
print(fmt % ("*", *widths), curses.A_BOLD)
print("--- " + " ".join("-" * col for col in widths.values()))
# TODO: Also adjust for insufficient width? Currently will quietly
# truncate lines at the available width, which isn't bad if it's
# just a character or two, but could be wasteful with long pkgnames.
pkg = 0
action = [" "] * len(upgrades)
lastheight = None
popup = None
def toggle(pkg, act):
action[pkg] = " " if action[pkg] == act else act
scr.addstr(pkg % perpage + 2, 1, action[pkg])
while True:
height, _ = scr.getmaxyx()
if height != lastheight:
# Note that a resize event is sent through as a pseudo-key, so
# this will trigger immediately, without waiting for the next
# actual key.
lastheight, lastpage = height, None
scr.setscrreg(0, height - 1)
perpage = min(height - 8, len(upgrades))
scr.move(perpage + 2, 0)
scr.clrtobot()
print()
if auto: print("Plus %d auto-installed packages." % auto)
print("Select packages to upgrade, then Enter to apply.")
print("Press ? for help, or I for more info on a package [TODO]")
pagestart = pkg - pkg % perpage
if pagestart != lastpage:
lastpage = pagestart
# Update (only if the page has changed)
for i, d in enumerate(desc[pagestart : pagestart + perpage]):
scr.addstr(i + 2, 0, fmt % (action[pagestart + i], *d.values()))
# Erase any spare space, including the mandatory blank at the end
for i in range(i + 1, perpage + 1):
# Is this the best way to clear a line??
scr.move(i + 2, 0)
scr.clrtoeol()
scr.setscrreg(2, perpage + 4)
scr.move((pkg % perpage) + 2, 1)
key = scr.getkey()
if popup:
# Restricted key handling when a popup is open
if key in "?Qq":
popup = None
scr.touchwin()
scr.refresh()
curses.curs_set(2)
continue
if key == "Q" or key == "q": return []
if key == "\n": break
if key == "KEY_UP": pkg = (pkg - 1) % len(upgrades)
if key == "KEY_DOWN": pkg = (pkg + 1) % len(upgrades)
if key == "KEY_MOUSE": TODO = curses.getmouse()
if key == " ": toggle(pkg, "I")
if key == "?":
popup = curses.newwin(5, 20, 2, 1)
popup.erase()
popup.border()
popup.touchwin()
popup.refresh()
curses.curs_set(0)
if key == "I" or key == "i":
# TODO: Show a new window with package info
# Show the from and to versions, optionally the changelog,
# and ideally, the list of other packages that would be
# upgraded along with this one (its out-of-date deps).
pass
# TODO: Have a way to mark auto from here? What about remove?
# action[pkg] = "A"
# Remove should be equiv of "apt --purge autoremove pkgname" if poss
# (but ideally shouldn't disrupt other autoremovables).
# scr.addstr(height - 2, 0, repr(key)); scr.clrtoeol()
return [pkg for pkg, ac in zip(upgrades, action) if ac == "I"]
def main():
cache = apt.Cache()
cache.open()
upgrades = []
auto = 0
for pkg in cache:
if not pkg.is_installed: continue # This is checking upgrades only
if pkg.candidate == pkg.installed: continue # Already up-to-date
if pkg.is_auto_installed:
# Ignore (but summarize) autoinstalled packages
auto += 1
continue
upgrades.append(pkg)
if not upgrades:
print("Everything up-to-date.")
return
global curses; import curses
upgrades = curses.wrapper(show_packages, upgrades, auto)
if not upgrades: return
# if "simulate": print(upgrades); return
for pkg in upgrades:
pkg.mark_upgrade()
# TODO: Show progress while it downloads? Not sure why the default progress
# isn't being shown. Might need to subclass apt.progress.text.AcquireProgress?
cache.commit()
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -1353,16 +1353,257 @@
n%5Bpkg%5D)%0A
+%09def make_popup(lines):%0A%09%09nonlocal popup%0A%09%09popup = curses.newwin(min(height - 3, 8), width - 4, 2, 2)%0A%09%09popup.erase()%0A%09%09popup.border()%0A%09%09for i, line in enumerate(lines):%0A%09%09%09popup.addstr(i + 1, 1, line)%0A%09%09popup.refresh()%0A%09%09curses.curs_set(0)%0A
%09while T
@@ -1617,17 +1617,21 @@
height,
-_
+width
= scr.g
@@ -1639,16 +1639,44 @@
tmaxyx()
+ # Also used by make_popup()
%0A%09%09if he
@@ -3210,137 +3210,81 @@
%0A%09%09%09
-popup = curses.newwin(5, 20, 2, 1)%0A%09%09%09popup.erase()%0A%09%09%09popup.border()%0A%09%09%09popup.touchwin()%0A%09%09%09popup.refresh()%0A%09%09%09curses.curs_set(0
+make_popup(%5B%22Hello, world%22, %22Testing testing%22, %22This would be help info%22%5D
)%0A%09%09
|
c3c703c6d8b434da40beef6202bf2cbdc01e50a1
|
Add configured tests
|
gym/wrappers/tests/test_wrappers.py
|
gym/wrappers/tests/test_wrappers.py
|
import gym
from gym import error
from gym import wrappers
from gym.wrappers import SkipWrapper
import tempfile
import shutil
def test_skip():
every_two_frame = SkipWrapper(2)
env = gym.make("FrozenLake-v0")
env = every_two_frame(env)
obs = env.reset()
env.render()
def test_no_double_wrapping():
temp = tempfile.mkdtemp()
try:
env = gym.make("FrozenLake-v0")
env = wrappers.Monitor(env, temp)
try:
env = wrappers.Monitor(env, temp)
except error.DoubleWrapperError:
pass
else:
assert False, "Should not allow double wrapping"
env.close()
finally:
shutil.rmtree(temp)
if __name__ == '__main__':
test_no_double_wrapping()
|
Python
| 0.000001
|
@@ -282,16 +282,612 @@
nder()%0A%0A
+def test_configured():%0A env = gym.make(%22FrozenLake-v0%22)%0A env = wrappers.TimeLimit(env)%0A env.configure()%0A%0A # Make sure all layers of wrapping are configured%0A assert env._configured%0A assert env.env._configured%0A env.close()%0A%0Adef test_double_configured():%0A env = gym.make(%22FrozenLake-v0%22)%0A every_two_frame = SkipWrapper(2)%0A env = every_two_frame(env)%0A%0A env = wrappers.TimeLimit(env)%0A env.configure()%0A%0A # Make sure all layers of wrapping are configured%0A assert env._configured%0A assert env.env._configured%0A assert env.env.env._configured%0A env.close()%0A
%0Adef tes
@@ -1256,16 +1256,16 @@
inally:%0A
+
@@ -1288,63 +1288,4 @@
mp)%0A
-%0A%0Aif __name__ == '__main__':%0A test_no_double_wrapping()%0A
|
fa0174185832fac608cc1b65255231a73aac630a
|
fix evacuate call on branched lient
|
healing/handler_plugins/evacuate.py
|
healing/handler_plugins/evacuate.py
|
from healing.handler_plugins import base
from healing import exceptions
from healing.openstack.common import log as logging
from healing import utils
LOG = logging.getLogger(__name__)
class Evacuate(base.HandlerPluginBase):
"""evacuate VM plugin.
Data format in action_meta is:
'evacuate_host': True if evacuating the entire host
"""
DESCRIPTION = "Evacuate VM (shared storage)"
NAME = "evacuate"
def start(self, ctx, data):
""" do something... spawn thread?
:param data ActionData Object
shared_storage?
"""
if not self.can_execute(data):
raise exceptions.ActionInProgress()
self.register_action(data)
try:
client = utils.get_nova_client(ctx)
lista = client.servers.evacuate(data.target_resource,
on_shared_storage=True,
find_host=True)
self.current_action.output = "Output: " + str(lista)
except Exception as e:
LOG.exception(e)
self.current_action.output = e.message
self.stop(data, True)
return None
self.stop(data)
return self.current_action.id
def stop(self, data, error=False, message=None):
#this will work if not in thread probably, if we change this
#add the id to the data and context
if error:
self.current_action.error()
else:
self.current_action.stop()
self.current_action.save()
LOG.debug("Task stopped")
def can_execute(self, data, ctx=None):
"""
:param data ActionData Obj
move to parent?
"""
return super(Evacuate, self).can_execute(data, ctx=ctx)
|
Python
| 0
|
@@ -810,16 +810,23 @@
vacuate(
+server=
data.tar
@@ -883,16 +883,27 @@
+ host=None,
on_shar
@@ -921,68 +921,8 @@
True
-,%0A find_host=True
)%0A
|
5336ff3967f4e297237045ca0914ae5257e3a767
|
fix csv output in one autoplot
|
htdocs/plotting/auto/scripts/p92.py
|
htdocs/plotting/auto/scripts/p92.py
|
import psycopg2.extras
import pyiem.nws.vtec as vtec
import datetime
import pandas as pd
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['data'] = True
d['cache'] = 3600
d['description'] = """This map depicts the number of days since a
Weather Forecast Office has issued a given VTEC product."""
d['arguments'] = [
dict(type='phenomena', name='phenomena',
default='TO', label='Select Watch/Warning Phenomena Type:'),
dict(type='significance', name='significance',
default='W', label='Select Watch/Warning Significance Level:'),
]
return d
def plotter(fdict):
""" Go """
import matplotlib
matplotlib.use('agg')
from pyiem.plot import MapPlot
utc = datetime.datetime.utcnow()
bins = [0, 1, 14, 31, 91, 182, 273, 365, 730, 1460, 2920, 3800]
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phenomena = fdict.get('phenomena', 'TO')
significance = fdict.get('significance', 'W')
cursor.execute("""
select wfo, extract(days from ('TODAY'::date - max(issue))) as m
from warnings where significance = %s and phenomena = %s
GROUP by wfo ORDER by m ASC
""", (significance, phenomena))
data = {}
rows = []
for row in cursor:
wfo = row[0] if row[0] != 'JSJ' else 'SJU'
rows.append(dict(wfo=wfo, days=row[1]))
data[wfo] = max([row[1], 0])
df = pd.DataFrame(rows)
m = MapPlot(sector='nws', axisbg='white', nocaption=True,
title='Days since Last %s %s by NWS Office' % (
vtec._phenDict.get(phenomena, phenomena),
vtec._sigDict.get(significance, significance)),
subtitle='Valid %s' % (utc.strftime("%d %b %Y %H%M UTC"),))
m.fill_cwas(data, bins=bins, ilabel=True, units='Days',
lblformat='%.0f')
return m.fig, df
|
Python
| 0.000046
|
@@ -1568,16 +1568,54 @@
me(rows)
+%0A df.set_index('wfo', inplace=True)
%0A%0A m
|
a8d639cbac2439c0079b86b72dd3daee6505e9d0
|
Update version file
|
version.py
|
version.py
|
"""Versioning controlled via Git Tag, check setup.py"""
__version__ = "0.3.2"
|
Python
| 0
|
@@ -73,7 +73,7 @@
0.3.
-2
+3
%22%0A
|
360523b6d71c06f13f2a26b775c52cf598f380a0
|
Update version.py to include public domain declaration.
|
version.py
|
version.py
|
"""Calculate the current package version number based on git tags.
This module provides `read_version_git` to read the output of "git describe"
and modify its output modified to conform to the versioning scheme that
setuptools uses (see PEP 386). Releases must be tagged with the following
format:
v<num>(.<num>)+ [ {a|b|c|rc} <num> (.<num>)* ]
This module also provides `read_version_file` and `write_version_file` to
read and write the version number to a file. These functions should be used
to write the git version to a file so that it can be used in a release
distribution and can be read at runtime.
To use this module, import it in your setup.py file, define a
get_version() function, and use its result as your package version:
import os
import version
here = os.path.abspath(os.path.dirname(__file__))
def get_version(*file_paths):
try:
# read version from git tags
ver = version.read_version_git()
except:
# read version from file
ver = version.read_version_file(here, *file_paths)
else:
# write version to file if we got it successfully from git
version.write_version_file(ver, here, *file_paths)
return ver
setup(
name="<SAMPLE>"
version=get_version('<SAMPLE>', '_version.py'),
.
.
.
)
This will automatically update the '<SAMPLE>/_version.py' file, where
'<SAMPLE>' is assumed to be the package directory. The '_version.py' file
should *not* be checked into git but it *should* be included in sdist
tarballs (this will be done automatically if written as a '.py' file in the
package directory as suggested). You should also include this module in your
manifest. To do these things, run:
echo include version.py >> MANIFEST.in
echo _version.py >> <SAMPLE>/.gitignore
You can also import the package version at runtime by including the line
from ._version import __version__
in the __init__.py file of your package <SAMPLE>.
With that setup, a new release can be labelled by simply invoking:
git tag -s v1.0
The original idea for this module is due to Douglas Creager, with PEP 386
modifications by Michal Nazarewicz. Here is a nice write-up of the original:
http://dcreager.net/2010/02/10/setuptools-git-version-numbers/
"""
import codecs
import os
import re
import subprocess
# http://www.python.org/dev/peps/pep-0386/
_PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?'
_PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (
_PEP386_SHORT_VERSION_RE)
_GIT_DESCRIPTION_RE = r'^v(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (
_PEP386_SHORT_VERSION_RE)
# read version number using 'git describe'
def read_version_git():
# read version number using 'git describe'
cmd = 'git describe --tags --long --match v[0-9]*.*'.split()
try:
git_description = subprocess.check_output(cmd).decode().strip()
except subprocess.CalledProcessError:
raise RuntimeError('Unable to get version number from git tags')
desc_match = re.search(_GIT_DESCRIPTION_RE, git_description)
if not desc_match:
raise ValueError('Git description (%s) is not a valid PEP386 version' %
(git_description,))
commits = int(desc_match.group('commits'))
if not commits:
version = desc_match.group('ver')
else:
version = '%s.post%d.dev%d' % (
desc_match.group('ver'),
commits,
int(desc_match.group('sha'), 16)
)
return version
# write the version number to a source file
def write_version_file(version, *file_paths):
# write version number to source file
version_msg = '# Do not edit this file, versioning is governed by git tags'
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(*file_paths), 'w', 'latin1') as f:
f.write(version_msg + os.linesep
+ "__version__ = '{0}'".format(version))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def read_version_file(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(*file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in source file.")
if __name__ == '__main__':
print read_version_git()
|
Python
| 0
|
@@ -1,8 +1,55 @@
+# This file is placed into the public domain.%0A%0A
%22%22%22Calcu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.