commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
a8104d2765ef97b698f108192dfc0b334498151a
|
Add ability to look up other rietveld instances
|
my_reviews.py
|
my_reviews.py
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get rietveld stats.
Example:
- my_reviews.py -o me@chromium.org -Q for stats for last quarter.
"""
import datetime
import optparse
import os
import sys
import rietveld
def print_reviews(owner, reviewer, created_after, created_before):
"""Prints issues with the filter.
Set with_messages=True to search() call bellow if you want each message too.
If you only want issue numbers, use keys_only=True in the search() call.
You can then use remote.get_issue_properties(issue, True) to get the data per
issue.
"""
instance_url = 'codereview.chromium.org'
remote = rietveld.Rietveld(instance_url, None, None)
# See def search() in rietveld.py to see all the filters you can use.
for issue in remote.search(
owner=owner,
reviewer=reviewer,
created_after=created_after,
created_before=created_before,
keys_only=False,
with_messages=False,
):
# By default, hide commit-bot and the domain.
reviewers = set(r.split('@', 1)[0] for r in issue['reviewers'])
reviewers -= set(('commit-bot',))
# Strip time.
timestamp = issue['created'][:10]
# More information is available, print issue.keys() to see them.
print '%d: %s %s' % (issue['issue'], timestamp, ', '.join(reviewers))
def get_previous_quarter(today):
"""There are four quarters, 01-03, 04-06, 07-09, 10-12.
If today is in the last month of a quarter, assume it's the current quarter
that is requested.
"""
year = today.year
month = today.month - (today.month % 3)
if not month:
month = 12
year -= 1
previous_month = month - 2
return (
'%d-%02d-01' % (year, previous_month),
'%d-%02d-01' % (year, month))
def main():
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-o', '--owner')
parser.add_option('-r', '--reviewer')
parser.add_option('-c', '--created_after')
parser.add_option('-C', '--created_before')
parser.add_option('-Q', '--last_quarter', action='store_true')
# Remove description formatting
parser.format_description = lambda x: parser.description
options, args = parser.parse_args()
if args:
parser.error('Args unsupported')
if not options.owner and not options.reviewer:
options.owner = os.environ['EMAIL_ADDRESS']
if '@' not in options.owner:
parser.error('Please specify at least -o or -r')
print 'Defaulting to owner=%s' % options.owner
if options.last_quarter:
today = datetime.date.today()
options.created_after, options.created_before = get_previous_quarter(today)
print 'Using range %s to %s' % (
options.created_after, options.created_before)
print_reviews(
options.owner, options.reviewer,
options.created_after, options.created_before)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000001
|
@@ -424,16 +424,30 @@
d_before
+, instance_url
):%0A %22%22%22
@@ -731,51 +731,8 @@
%22%22%22%0A
- instance_url = 'codereview.chromium.org'%0A
re
@@ -2166,16 +2166,95 @@
_true')%0A
+ parser.add_option('-i', '--instance_url', default='codereview.chromium.org')%0A
# Remo
@@ -2996,24 +2996,52 @@
eated_before
+,%0A options.instance_url
)%0A return 0
|
30d4f070a1b10d0748dea10c244dc9ee0f5d2aa7
|
change STDOUT to PIPE to get ssh connection module working
|
lib/ansible/runner/connection/ssh.py
|
lib/ansible/runner/connection/ssh.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
################################################
import os
import time
import subprocess
import shlex
import pipes
import random
import select
import fcntl
from ansible import errors
class SSHConnection(object):
''' ssh based connections '''
def __init__(self, runner, host, port):
self.runner = runner
self.host = host
self.port = port
def connect(self):
''' connect to the remote host '''
self.common_args = ["-o", "StrictHostKeyChecking=no"]
if self.port is not None:
self.common_args += ["-o", "Port=%d" % (self.port)]
if self.runner.private_key_file is not None:
self.common_args += ["-o", "IdentityFile="+self.runner.private_key_file]
extra_args = os.getenv("ANSIBLE_SSH_ARGS", None)
if extra_args is not None:
self.common_args += shlex.split(extra_args)
else:
self.common_args += ["-o", "ControlMaster=auto",
"-o", "ControlPersist=60s",
"-o", "ControlPath=/tmp/ansible-ssh-%h-%p-%r"]
self.userhost = "%s@%s" % (self.runner.remote_user, self.host)
return self
def exec_command(self, cmd, tmp_path,sudo_user,sudoable=False):
''' run a command on the remote host '''
ssh_cmd = ["ssh", "-tt", "-q"] + self.common_args + [self.userhost]
if self.runner.sudo and sudoable:
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required. The "--"
# tells sudo that this is the end of sudo options and the command
# follows. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
prompt = '[sudo via ansible, key=%s] password: ' % randbits
sudocmd = 'sudo -k && sudo -p "%s" -u %s -- "$SHELL" -c %s' % (
prompt, sudo_user, pipes.quote(cmd))
sudo_output = ''
ssh_cmd.append(sudocmd)
p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if self.runner.sudo_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while not sudo_output.endswith(prompt):
rfd, wfd, efd = select.select([p.stdout], [],
[p.stdout], self.runner.timeout)
if p.stdout in rfd:
chunk = p.stdout.read()
if not chunk:
raise errors.AnsibleError('ssh connection closed waiting for sudo password prompt')
sudo_output += chunk
else:
(stdout, stderr) = p.communicate()
raise errors.AnsibleError('ssh connection error waiting for sudo password prompt')
p.stdin.write(self.runner.sudo_pass + '\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
else:
ssh_cmd.append(cmd)
p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
p.stdin.close()
stdout = ''
while p.poll() is None:
rfd, wfd, efd = select.select([p.stdout], [], [p.stdout], 1)
if p.stdout in rfd:
stdout += os.read(p.stdout.fileno(), 1024)
if p.returncode != 0 and stdout.find('Bad configuration option: ControlPersist') != -1:
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" before running again')
return ('', stdout, '')
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
sftp_cmd = ["sftp"] + self.common_args + [self.userhost]
p = subprocess.Popen(sftp_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate("put %s %s\n" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
sftp_cmd = ["sftp"] + self.common_args + [self.userhost]
p = subprocess.Popen(sftp_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate("get %s %s\n" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
def close(self):
''' terminate the connection '''
pass
|
Python
| 0
|
@@ -3207,22 +3207,20 @@
process.
-STDOUT
+PIPE
)%0A
@@ -4424,14 +4424,12 @@
ess.
-STDOUT
+PIPE
)%0A%0A
|
8a11d39892f9fb2b7506e0381144606e5f4e0a34
|
Use highest protocol for log dumping
|
libs/blocks/blocks/extensions/saveload.py
|
libs/blocks/blocks/extensions/saveload.py
|
"""Extensions for saving and loading the state of a training process."""
import os.path
import logging
from six.moves import cPickle
from blocks.extensions import SimpleExtension, TrainingExtension
from blocks.utils import reraise_as
from blocks.serialization import secure_dump, load, load_parameter_values
logger = logging.getLogger(__name__)
LOADED_FROM = "loaded_from"
SAVED_TO = "saved_to"
class Checkpoint(SimpleExtension):
"""Saves a pickled version of the main loop to the disk.
The pickled main loop can be later reloaded and training can be
resumed.
Makes a `SAVED_TO` record in the log with the serialization destination
in the case of success and ``None`` in the case of failure. The
value of the record is a tuple of paths to which saving was done
(there can be more than one if the user added a condition
with an argument, see :meth:`do` docs).
Parameters
----------
path : str
The destination path for pickling.
save_separately : list of str, optional
The list of the main loop's attributes to be pickled separately
to their own files. The paths will be formed by adding
the attribute name preceded by an underscore before the
`path` extension. The whole main loop will still be pickled
as usual.
use_cpickle : bool
See documentation of :func:`~blocks.serialization.dump`.
Notes
-----
Using pickling for saving the whole main loop object comes with
certain limitations:
* Theano computation graphs build in the GPU-mode
(`theano.config.device == "gpu"`) can not be used in the usual mode
(and vice-versa). Therefore using this extension binds you to using
only one kind of device.
"""
def __init__(self, path, save_separately=None, use_cpickle=False,
**kwargs):
kwargs.setdefault("after_training", True)
super(Checkpoint, self).__init__(**kwargs)
if not save_separately:
save_separately = []
self.path = path
self.save_separately = save_separately
self.use_cpickle = use_cpickle
def save_separately_filenames(self, path):
"""Compute paths for separately saved attributes.
Parameters
----------
path : str
Path to which the main checkpoint file is being saved.
Returns
-------
paths : dict
A dictionary mapping attribute names to derived paths
based on the `path` passed in as an argument.
"""
root, ext = os.path.splitext(path)
return {attribute: root + "_" + attribute + ext
for attribute in self.save_separately}
def do(self, callback_name, *args):
"""Pickle the main loop object to the disk.
If `*args` contain an argument from user, it is treated as
saving path to be used instead of the one given at the
construction stage.
"""
_, from_user = self.parse_args(callback_name, args)
try:
path = self.path
if from_user:
path, = from_user
secure_dump(self.main_loop, path, use_cpickle=self.use_cpickle)
filenames = self.save_separately_filenames(path)
for attribute in self.save_separately:
secure_dump(getattr(self.main_loop, attribute),
filenames[attribute], cPickle.dump)
except Exception:
path = None
raise
finally:
already_saved_to = self.main_loop.log.current_row.get(SAVED_TO, ())
self.main_loop.log.current_row[SAVED_TO] = (already_saved_to +
(path,))
class Load(TrainingExtension):
"""Loads a saved checkpoint into the main loop.
Makes a `LOADED_FROM` record in the log with the dump path.
Parameters
----------
path : str
The path to the folder with dump.
load_iteration_state : bool
If `True`, load the iteration state. This can be useful when your
model has very long epochs, and you want to resume when you were in
the middle of one. Defaults to `False`.
load_log : bool
If `True`, load the old log and continue logging from there.
Convenient because you end up with a single log of the entire
training history. Defaults to `False`.
Notes
-----
Requires the model to be created entirely using bricks, with a unique
path/name for each brick, so that the parameters can be matched to
their values.
In order to load the iteration state and the log, the saved model needs
to be unpickled. Note that resuming training this way is still not
entirely seamless because e.g. extensions will not be reloaded.
"""
def __init__(self, path, load_iteration_state=False, load_log=False,
**kwargs):
super(Load, self).__init__(**kwargs)
self.path = path
self.load_iteration_state = load_iteration_state
self.load_log = load_log
def load_to(self, main_loop):
main_loop.model.set_parameter_values(load_parameter_values(self.path))
if self.load_iteration_state or self.load_log:
with open(self.path, "rb") as source:
loaded_main_loop = load(source)
if self.load_log:
main_loop.log = loaded_main_loop.log
if self.load_iteration_state:
main_loop.iteration_state = loaded_main_loop.iteration_state
def before_training(self):
if not os.path.exists(self.path):
logger.warning("No dump found")
return
logger.info("loading model from {}".format(self.path))
try:
self.load_to(self.main_loop)
self.main_loop.log.current_row[LOADED_FROM] = self.path
except Exception:
reraise_as("Failed to load the state")
|
Python
| 0.000012
|
@@ -3448,16 +3448,57 @@
kle.dump
+,%0A protocol=-1
)%0A
|
f4408cb2feb5a28a5117fefebe782a61ea80de96
|
fix res_company
|
hr_employee_time_clock/models/__init__.py
|
hr_employee_time_clock/models/__init__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import hr_timesheet_sheet
from . import hr_attendance
from . import hr_holidays_public
from . import employee_attendance_analytic
from . import resource_calendar
from . import hr_holidays
from . import account_analytic_line
from . import hr_department
from . import hr_employee
from . import hr_timesheet_sheet_day
from . import hr_timesheet_sheet_account
# from . import res_company
from . import hr_contract
# from . import res_config_settings
from . import res_users
|
Python
| 0.000001
|
@@ -1362,18 +1362,16 @@
account%0A
-#
from . i
|
6c80293a9895ccd611cdc4533d4474ad21e0523d
|
Mark failing baikal tests as xfail
|
tests/storage/dav/test_caldav.py
|
tests/storage/dav/test_caldav.py
|
import datetime
from textwrap import dedent
import pytest
import requests.exceptions
from . import dav_server
from . import DAVStorageTests
from .. import format_item
from tests import EVENT_TEMPLATE
from tests import TASK_TEMPLATE
from tests import VCARD_TEMPLATE
from vdirsyncer import exceptions
from vdirsyncer.storage.dav import CalDAVStorage
class TestCalDAVStorage(DAVStorageTests):
storage_class = CalDAVStorage
@pytest.fixture(params=["VTODO", "VEVENT"])
def item_type(self, request):
return request.param
def test_doesnt_accept_vcard(self, item_type, get_storage_args):
s = self.storage_class(item_types=(item_type,), **get_storage_args())
try:
s.upload(format_item(VCARD_TEMPLATE))
except (exceptions.Error, requests.exceptions.HTTPError):
pass
assert not list(s.list())
# The `arg` param is not named `item_types` because that would hit
# https://bitbucket.org/pytest-dev/pytest/issue/745/
@pytest.mark.parametrize(
"arg,calls_num",
[
(("VTODO",), 1),
(("VEVENT",), 1),
(("VTODO", "VEVENT"), 2),
(("VTODO", "VEVENT", "VJOURNAL"), 3),
((), 1),
],
)
def test_item_types_performance(
self, get_storage_args, arg, calls_num, monkeypatch
):
s = self.storage_class(item_types=arg, **get_storage_args())
old_parse = s._parse_prop_responses
calls = []
def new_parse(*a, **kw):
calls.append(None)
return old_parse(*a, **kw)
monkeypatch.setattr(s, "_parse_prop_responses", new_parse)
list(s.list())
assert len(calls) == calls_num
@pytest.mark.xfail(
dav_server == "radicale", reason="Radicale doesn't support timeranges."
)
def test_timerange_correctness(self, get_storage_args):
start_date = datetime.datetime(2013, 9, 10)
end_date = datetime.datetime(2013, 9, 13)
s = self.storage_class(
start_date=start_date, end_date=end_date, **get_storage_args()
)
too_old_item = format_item(
dedent(
"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:19970714T170000Z
DTEND:19970715T035959Z
SUMMARY:Bastille Day Party
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
"""
).strip()
)
too_new_item = format_item(
dedent(
"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:20150714T170000Z
DTEND:20150715T035959Z
SUMMARY:Another Bastille Day Party
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
"""
).strip()
)
good_item = format_item(
dedent(
"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:20130911T170000Z
DTEND:20130912T035959Z
SUMMARY:What's with all these Bastille Day Partys
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
"""
).strip()
)
s.upload(too_old_item)
s.upload(too_new_item)
expected_href, _ = s.upload(good_item)
((actual_href, _),) = s.list()
assert actual_href == expected_href
def test_invalid_resource(self, monkeypatch, get_storage_args):
calls = []
args = get_storage_args(collection=None)
def request(session, method, url, **kwargs):
assert url == args["url"]
calls.append(None)
r = requests.Response()
r.status_code = 200
r._content = b"Hello World."
return r
monkeypatch.setattr("requests.sessions.Session.request", request)
with pytest.raises(ValueError):
s = self.storage_class(**args)
list(s.list())
assert len(calls) == 1
@pytest.mark.skipif(dav_server == "icloud", reason="iCloud only accepts VEVENT")
@pytest.mark.skipif(
dav_server == "fastmail", reason="Fastmail has non-standard hadling of VTODOs."
)
def test_item_types_general(self, s):
event = s.upload(format_item(EVENT_TEMPLATE))[0]
task = s.upload(format_item(TASK_TEMPLATE))[0]
s.item_types = ("VTODO", "VEVENT")
def hrefs():
return {href for href, etag in s.list()}
assert hrefs() == {event, task}
s.item_types = ("VTODO",)
assert hrefs() == {task}
s.item_types = ("VEVENT",)
assert hrefs() == {event}
s.item_types = ()
assert hrefs() == {event, task}
|
Python
| 0.000003
|
@@ -534,16 +534,93 @@
.param%0A%0A
+ @pytest.mark.xfail(dav_server == %22baikal%22, reason=%22Baikal returns 500.%22)%0A
def
@@ -1314,24 +1314,101 @@
%5D,%0A )%0A
+ @pytest.mark.xfail(dav_server == %22baikal%22, reason=%22Baikal returns 500.%22)%0A
def test
@@ -4677,32 +4677,109 @@
VTODOs.%22%0A )%0A
+ @pytest.mark.xfail(dav_server == %22baikal%22, reason=%22Baikal returns 500.%22)%0A
def test_ite
|
c7299a7eb742f8f0a88e69369416829331eb67eb
|
Fix overextends tag with cached template loader
|
mezzanine/template/loader_tags.py
|
mezzanine/template/loader_tags.py
|
from __future__ import unicode_literals
from future.builtins import map
import os
from itertools import chain
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.template.loader_tags import ExtendsNode
from mezzanine import template
register = template.Library()
class OverExtendsNode(ExtendsNode):
"""
Allows the template ``foo/bar.html`` to extend ``foo/bar.html``,
given that there is another version of it that can be loaded. This
allows templates to be created in a project that extend their app
template counterparts, or even app templates that extend other app
templates with the same relative name/path.
We use our own version of ``find_template``, that uses an explict
list of template directories to search for the template, based on
the directories that the known template loaders
(``app_directories`` and ``filesystem``) use. This list gets stored
in the template context, and each time a template is found, its
absolute path gets removed from the list, so that subsequent
searches for the same relative name/path can find parent templates
in other directories, which allows circular inheritance to occur.
Django's ``app_directories``, ``filesystem``, and ``cached``
loaders are supported. The ``eggs`` loader, and any loader that
implements ``load_template_source`` with a source string returned,
should also theoretically work.
"""
def find_template(self, name, context, peeking=False):
"""
Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur.
"""
# These imports want settings, which aren't available when this
# module is imported to ``add_to_builtins``, so do them here.
import django.template.loaders.app_directories as app_directories
from mezzanine.conf import settings
# Store a dictionary in the template context mapping template
# names to the lists of template directories available to
# search for that template. Each time a template is loaded, its
# origin directory is removed from its directories list.
context_name = "OVEREXTENDS_DIRS"
if context_name not in context:
context[context_name] = {}
if name not in context[context_name]:
all_dirs = (
list(chain.from_iterable(
[template_engine.get('DIRS', [])
for template_engine in settings.TEMPLATES])) +
list(app_directories.get_app_template_dirs('templates')))
# os.path.abspath is needed under uWSGI, and also ensures we
# have consistent path separators across different OSes.
context[context_name][name] = list(map(os.path.abspath, all_dirs))
# Build a list of template loaders to use. For loaders that wrap
# other loaders like the ``cached`` template loader, unwind its
# internal loaders and add those instead.
loaders = []
loader_names = set(chain.from_iterable(
[template_engine.get('OPTIONS', {}).get('loaders', [])
for template_engine in settings.TEMPLATES]))
loader_names.update( # default template loaders
['django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'])
for loader_name in loader_names:
loader = context.template.engine.find_template_loader(loader_name)
loaders.extend(getattr(loader, "loaders", [loader]))
# Go through the loaders and try to find the template. When
# found, removed its absolute path from the context dict so
# that it won't be used again when the same relative name/path
# is requested.
for loader in loaders:
dirs = context[context_name][name]
try:
source, path = loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
else:
# Only remove the absolute path for the initial call in
# get_parent, and not when we're peeking during the
# second call.
if not peeking:
remove_path = os.path.abspath(path[:-len(name) - 1])
context[context_name][name].remove(remove_path)
return Template(source)
raise TemplateDoesNotExist(name)
def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
for node in template.nodelist:
if (isinstance(node, ExtendsNode) and
node.parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template
@register.tag
def overextends(parser, token):
"""
Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once "
"in the same template" % bits[0])
return OverExtendsNode(nodelist, parent_name, None)
|
Python
| 0
|
@@ -3297,413 +3297,21 @@
-loader_names = set(chain.from_iterable(%0A %5Btemplate_engine.get('OPTIONS', %7B%7D).get('loaders', %5B%5D)%0A for template_engine in settings.TEMPLATES%5D))%0A loader_names.update( # default template loaders%0A %5B'django.template.loaders.filesystem.Loader',%0A 'django.template.loaders.app_directories.Loader'%5D)%0A for loader_name in loader_names:%0A loader =
+for loader in
con
@@ -3331,21 +3331,16 @@
.engine.
-find_
template
@@ -3350,21 +3350,10 @@
ader
-(loader_name)
+s:
%0A
|
f71a4ca03b8c7c63816bab57a71f9d28a7139e2d
|
Add justification for utility method as comment.
|
contrib/vcloud/vcloud_util.py
|
contrib/vcloud/vcloud_util.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import collections
import os
import sys
import benchexec.util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError('Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), "Inconsistent exit codes {} and {} from VerifierCloud".format(old, new)
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
def parse_frequency_value(s):
if not s:
return s
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = float(s[:pos])
unit = s[pos:].strip()
if not unit or unit == "Hz":
return int(number)
elif unit == "kHz":
return int(number * 1000)
elif unit == "MHz":
return int(number * 1000 * 1000)
elif unit == "GHz":
return int(number * 1000 * 1000 * 1000)
else:
raise ValueError(
"unknown unit: {} (allowed are Hz, kHz, MHz, and GHz)".format(unit)
)
def is_windows():
return os.name == "nt"
def force_linux_path(path):
if is_windows():
return path.replace("\\", "/")
return path
|
Python
| 0
|
@@ -2005,24 +2005,107 @@
y_value(s):%0A
+ # Contrary to benchexec.util.parse_frequency_value, this handles float values.%0A
if not s
|
96158b6b5a153db6b9a5e5d40699efefc728a9b3
|
Make our LiveWidget handle a 'topics' property along with 'topic'
|
moksha/api/widgets/live/live.py
|
moksha/api/widgets/live/live.py
|
# This file is part of Moksha.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luke Macken <lmacken@redhat.com>
import moksha
from tw.api import Widget
from moksha.exc import MokshaException
from moksha.api.widgets.stomp import StompWidget, stomp_subscribe, stomp_unsubscribe
class LiveWidget(Widget):
""" A live streaming widget.
This widget handles automatically subscribing your widget to any given
topics, and registers all of the stomp callbacks.
"""
engine_name = 'mako'
def update_params(self, d):
""" Register this widgets stomp callbacks """
super(LiveWidget, self).update_params(d)
topics = d.get('topic', getattr(self, 'topic', None))
if not topics:
raise MokshaException('You must specify a `topic` to subscribe to')
topics = isinstance(topics, list) and topics or [topics]
for callback in StompWidget.callbacks:
if callback == 'onmessageframe':
for topic in topics:
cb = getattr(self, 'onmessage').replace('${id}', self.id)
moksha.stomp[callback][topic].append(cb)
elif callback == 'onconnectedframe':
moksha.stomp['onconnectedframe'].append(stomp_subscribe(topics))
elif callback in self.params:
moksha.stomp[callback].append(getattr(self, callback))
def get_topics(self):
topics = []
for key in ('topic', 'topics'):
if hasattr(self, key):
topic = getattr(self, key)
if topic:
if isinstance(topic, basestring):
map(topics.append, topic.split())
else:
topics += topic
return topics
# Moksha Topic subscription handling methods
subscribe_topics = stomp_subscribe
unsubscribe_topics = stomp_unsubscribe
|
Python
| 0
|
@@ -1351,15 +1351,57 @@
ic',
+ d.get('topics', getattr(self, 'topics',
None))
+))
%0A
|
410f02a4f657f9a8b9c839f3e08b176f443de9e8
|
Handle cases when searched word is only part of the people name.
|
linkedin_scraper/spiders/people_search.py
|
linkedin_scraper/spiders/people_search.py
|
from os import environ
from scrapy_splash import SplashRequest
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
class PeopleSearchSpider(InitSpider):
name = 'people_search'
allowed_domains = ['linkedin.com']
login_page = 'https://www.linkedin.com/uas/login'
def __init__(self, *args, **kwargs):
try:
self.username = (kwargs.pop('username', None) or
environ['SPIDER_USERNAME'])
self.password = (kwargs.pop('password', None) or
environ['SPIDER_PASSWORD'])
except KeyError:
raise Exception('Both username and password need to be specified '
'by -a option or SPIDER_<PARAM> environment var')
query = kwargs.pop('query', 'Mateusz+Moneta')
self.start_urls = [
'https://www.linkedin.com/vsearch/f?type=people&keywords=%s' % query
]
super().__init__(*args, **kwargs)
def init_request(self):
return Request(url=self.login_page, callback=self.login)
def login(self, response):
return FormRequest.from_response(
response, callback=self.check_login_response,
formdata={'session_key': self.username,
'session_password': self.password})
def parse(self, response):
for search_result in response.css('li.mod.result.people'):
*first_name, last_name = search_result.css('b::text').extract()
yield {
'first_name': ' '.join(first_name),
'last_name': last_name,
}
def check_login_response(self, response):
if b'Sign Out' in response.body:
self.logger.debug("Successfully logged in. Let's start crawling!")
return self.initialized()
self.logger.error('Login failed!')
def make_requests_from_url(self, url):
# Do SplashRequest instead of regular one to be able to evaluate
# JavaScript responsible for dynamic page generation.
return SplashRequest(url)
|
Python
| 0.000001
|
@@ -1446,71 +1446,162 @@
-*first_name, last_name = search_result.css('b::text').extract()
+names = search_result.css('a.title.main-headline').xpath(%0A 'string(.)').extract_first()%0A *first_name, last_name = names.split()%0A
%0A
|
0e0a0de8f6be116ba00a6938586dcbc315c4db3f
|
Clarify that symbolic representations are Tensors
|
cleverhans/model.py
|
cleverhans/model.py
|
from abc import ABCMeta
class Model(object):
"""
An abstract interface for model wrappers that exposes model symbols
needed for making an attack. This abstraction removes the dependency on
any specific neural network package (e.g. Keras) from the core
code of CleverHans. It can also simplify exposing the hidden features of a
model when a specific package does not directly expose them.
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
def __call__(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_probs(*args, **kwargs)
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output logits (i.e., the
values fed as inputs to the softmax layer).
"""
return self.get_layer(x, 'logits')
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, 'probs')
except NoSuchLayerError:
pass
except NotImplementedError:
pass
import tensorflow as tf
return tf.nn.softmax(self.get_logits(x))
def get_layer_names(self):
"""
:return: a list of names for the layers that can be exposed by this
model abstraction.
"""
if hasattr(self, 'layer_names'):
return self.layer_names
raise NotImplementedError('`get_layer_names` not implemented.')
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
raise NotImplementedError('`fprop` not implemented.')
def get_params(self):
"""
Provides access to the model's parameters.
:return: A list of all Variables defining the model parameters.
"""
raise NotImplementedError()
class CallableModelWrapper(Model):
def __init__(self, callable_fn, output_layer):
"""
Wrap a callable function that takes a tensor as input and returns
a tensor as output with the given layer name.
:param callable_fn: The callable function taking a tensor and
returning a given layer as output.
:param output_layer: A string of the output layer returned by the
function. (Usually either "probs" or "logits".)
"""
self.output_layer = output_layer
self.callable_fn = callable_fn
def get_layer_names(self):
return [self.output_layer]
def fprop(self, x):
return {self.output_layer: self.callable_fn(x)}
class NoSuchLayerError(ValueError):
"""Raised when a layer that does not exist is requested."""
|
Python
| 0.999999
|
@@ -923,32 +923,41 @@
c representation
+ (Tensor)
of the network
@@ -1070,32 +1070,41 @@
c representation
+ (Tensor)
of the hidden f
@@ -1232,16 +1232,25 @@
entation
+ (Tensor)
for thi
@@ -1504,32 +1504,41 @@
c representation
+ (Tensor)
of the network
@@ -1577,32 +1577,41 @@
c representation
+ (Tensor)
of the output l
@@ -1615,16 +1615,24 @@
t logits
+%0A
(i.e.,
@@ -1634,33 +1634,16 @@
.e., the
-%0A
values
@@ -1810,32 +1810,41 @@
c representation
+ (Tensor)
of the network
@@ -1883,32 +1883,41 @@
c representation
+ (Tensor)
of the output p
@@ -1914,16 +1914,24 @@
e output
+%0A
probabi
@@ -1943,32 +1943,16 @@
s (i.e.,
-%0A
the out
@@ -2708,24 +2708,33 @@
presentation
+ (Tensor)
of the netw
|
655fcce56abd0d3f0da9b52e911636d931157443
|
bump version
|
dockercloud/__init__.py
|
dockercloud/__init__.py
|
import base64
import logging
import os
import requests
from future.standard_library import install_aliases
install_aliases()
from dockercloud.api import auth
from dockercloud.api.service import Service
from dockercloud.api.container import Container
from dockercloud.api.repository import Repository
from dockercloud.api.node import Node
from dockercloud.api.action import Action
from dockercloud.api.nodecluster import NodeCluster
from dockercloud.api.nodetype import NodeType
from dockercloud.api.nodeprovider import Provider
from dockercloud.api.noderegion import Region
from dockercloud.api.tag import Tag
from dockercloud.api.trigger import Trigger
from dockercloud.api.stack import Stack
from dockercloud.api.exceptions import ApiError, AuthError, ObjectNotFound, NonUniqueIdentifier
from dockercloud.api.utils import Utils
from dockercloud.api.events import Events
from dockercloud.api.nodeaz import AZ
__version__ = '1.0.5'
dockercloud_auth = os.environ.get('DOCKERCLOUD_AUTH')
basic_auth = auth.load_from_file("~/.docker/config.json")
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_PASS'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_PASS')))
if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_APIKEY'):
basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_APIKEY')))
rest_host = os.environ.get("DOCKERCLOUD_REST_HOST") or 'https://cloud.docker.com/'
stream_host = os.environ.get("DOCKERCLOUD_STREAM_HOST") or 'wss://ws.cloud.docker.com/'
namespace = os.environ.get('DOCKERCLOUD_NAMESPACE')
user_agent = None
logging.basicConfig()
logger = logging.getLogger("python-dockercloud")
try:
requests.packages.urllib3.disable_warnings()
except:
pass
|
Python
| 0
|
@@ -930,9 +930,9 @@
1.0.
-5
+6
'%0A%0Ad
|
6589c5cc30c228e5aacd77184310e9afd9dc0345
|
Fix test
|
tests/test_contributors_views.py
|
tests/test_contributors_views.py
|
# -*- coding: utf-8 -*-
from nose.tools import * # noqa; PEP8 asserts
from tests.factories import ProjectFactory, NodeFactory, AuthUserFactory
from tests.base import OsfTestCase, fake
from framework.auth.decorators import Auth
from website.profile import utils
class TestContributorUtils(OsfTestCase):
def setUp(self):
super(TestContributorUtils, self).setUp()
self.project = ProjectFactory()
def test_serialize_user(self):
serialized = utils.serialize_user(self.project.creator, self.project)
assert_true(serialized['visible'])
assert_equal(serialized['permission'], 'admin')
def test_serialize_user_admin(self):
serialized = utils.serialize_user(self.project.creator, self.project, admin=True)
assert_false(serialized['visible'])
assert_equal(serialized['permission'], 'read')
class TestContributorViews(OsfTestCase):
def setUp(self):
super(TestContributorViews, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def test_get_contributors_no_limit(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
url = self.project.api_url_for('get_contributors')
res = self.app.get(url, auth=self.user.auth)
# Should be two visible contributors on the project
assert_equal(
len(res.json['contributors']),
2,
)
def test_get_contributors_with_limit(self):
# Add five contributors
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
# Set limit to three contributors
url = self.project.api_url_for('get_contributors', limit=3)
res = self.app.get(url, auth=self.user.auth)
# Should be three visible contributors on the project
assert_equal(
len(res.json['contributors']),
3,
)
# There should be two 'more' contributors not shown
assert_equal(
(res.json['more']),
2,
)
def test_get_contributors_from_parent(self):
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=True,
)
self.project.add_contributor(
AuthUserFactory(),
auth=self.auth,
visible=False,
)
self.project.save()
component = NodeFactory(parent=self.project, creator=self.user)
url = component.api_url_for('get_contributors_from_parent')
res = self.app.get(url, auth=self.user.auth)
# Should be one contributor to the parent who is both visible and
# not a contributor on the component
assert_equal(
len(res.json['contributors']),
1,
)
|
Python
| 0.000004
|
@@ -3500,19 +3500,19 @@
ould be
-one
+all
contrib
@@ -3519,46 +3519,38 @@
utor
- to the parent who is both visible and
+s, client-side handles marking
%0A
@@ -3559,14 +3559,8 @@
#
- not a
con
@@ -3571,25 +3571,46 @@
utor
- on the component
+s that are already added to the child.
%0A
@@ -3687,9 +3687,9 @@
-1
+2
,%0A
|
b0c5d485543e123c985336d054b6f20d60634221
|
Add new kumquat settings.py file from the origin. We need to find a other solution in the future to overwrite config files
|
copy/tmp/kumquat-settings.py
|
copy/tmp/kumquat-settings.py
|
"""
Django settings for kumquat_web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'messagegroups',
'kumquat',
'web',
'mysql',
'ftp',
'status',
'mail',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kumquat_web.urls'
WSGI_APPLICATION = 'kumquat_web.wsgi.application'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Redirect after LOGIN
LOGIN_REDIRECT_URL = '/'
# Logging
# https://docs.djangoproject.com/en/dev/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
# Only use logging if debug is false and it's a production environment
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
},
'handlers': {
# Log to stdout
'console': {
'class':'logging.StreamHandler',
'stream': sys.stdout,
},
# Log to syslog because this is much cleaner than extra file
'syslog': {
'class': 'logging.handlers.SysLogHandler',
'facility': 'local1',
'address': ('127.0.0.1', 514),
'formatter': 'simple',
},
},
'loggers': {
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['console', 'syslog'],
'level': 'ERROR',
'propagate': False,
},
},
}
|
Python
| 0
|
@@ -330,16 +330,56 @@
sys, os%0A
+from django.conf import global_settings%0A
BASE_DIR
@@ -1328,16 +1328,140 @@
es'),)%0A%0A
+TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (%0A%09'django_settings_export.settings_export',%0A)%0A%0A
# Intern
@@ -1747,31 +1747,8 @@
/'%0A%0A
-# Redirect after LOGIN%0A
LOGI
@@ -2836,8 +2836,196 @@
%0A%09%7D,%0A%7D%0A%0A
+# kumquat%0AKUMQUAT_WEBMAIL_URL = ''%0AKUMQUAT_PHPMYADMIN_URL = ''%0A%0A# Allow the following variables in the template%0ASETTINGS_EXPORT = %5B%0A%09'KUMQUAT_WEBMAIL_URL',%0A%09'KUMQUAT_PHPMYADMIN_URL',%0A%5D%0A
|
b41e563c866a8918a65253c1cbb1a1fa5c44c212
|
Placate angry bot
|
xunit-autolabeler-v2/ast_parser/python/test_data/new_tests/fixture_detection_test.py
|
xunit-autolabeler-v2/ast_parser/python/test_data/new_tests/fixture_detection_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def setup():
pass
def init():
pass
def setup_something():
pass
def init_something():
pass
def another_setup():
pass
def another_init():
pass
|
Python
| 0.999805
|
@@ -8,17 +8,17 @@
ight 202
-0
+1
Google
|
ef150166d49504475b3d37c8754e35e5f1356e9f
|
make sql form operations more closely match couch operations
|
corehq/form_processor/serializers.py
|
corehq/form_processor/serializers.py
|
from jsonfield import JSONField
from rest_framework import serializers
from corehq.apps.commtrack.models import StockState
from corehq.form_processor.models import (
CommCareCaseIndexSQL, CommCareCaseSQL, CaseTransaction,
XFormInstanceSQL, XFormOperationSQL, XFormAttachmentSQL,
LedgerValue)
class DeletableModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, instance=None, *args, **kwargs):
super(DeletableModelSerializer, self).__init__(instance=instance, *args, **kwargs)
if instance is not None and not instance.is_deleted:
self.fields.pop('deletion_id')
self.fields.pop('deleted_on')
class XFormOperationSQLSerializer(serializers.ModelSerializer):
class Meta:
model = XFormOperationSQL
class XFormAttachmentSQLSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="attachment_id")
class Meta:
model = XFormAttachmentSQL
fields = ('id', 'content_type', 'content_length')
class XFormInstanceSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='form_id')
doc_type = serializers.CharField()
history = XFormOperationSQLSerializer(many=True, read_only=True)
form = serializers.JSONField(source='form_data')
auth_context = serializers.DictField()
openrosa_headers = serializers.DictField()
external_blobs = serializers.JSONField(source='serialized_attachments')
class Meta:
model = XFormInstanceSQL
exclude = ('id', 'form_id')
def __init__(self, *args, **kwargs):
include_attachments = kwargs.pop('include_attachments', False)
if not include_attachments:
self.fields.pop('external_blobs')
super(XFormInstanceSQLSerializer, self).__init__(*args, **kwargs)
class XFormStateField(serializers.ChoiceField):
def __init__(self, **kwargs):
super(XFormStateField, self).__init__(XFormInstanceSQL.STATES, **kwargs)
def get_attribute(self, obj):
choice = super(serializers.ChoiceField, self).get_attribute(obj)
readable_state = []
for state, state_slug in self.choices.iteritems():
if choice & state:
readable_state.append(state_slug)
return ' / '.join(readable_state)
class JsonFieldSerializerMixin(object):
serializer_field_mapping = {}
serializer_field_mapping.update(DeletableModelSerializer.serializer_field_mapping)
serializer_field_mapping[JSONField] = serializers.JSONField
class XFormInstanceSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
state = XFormStateField()
class Meta:
model = XFormInstanceSQL
class CommCareCaseIndexSQLSerializer(serializers.ModelSerializer):
case_id = serializers.CharField()
relationship = serializers.CharField()
class Meta:
model = CommCareCaseIndexSQL
fields = ('case_id', 'identifier', 'referenced_id', 'referenced_type', 'relationship')
class CaseTransactionActionSerializer(serializers.ModelSerializer):
xform_id = serializers.CharField(source='form_id')
date = serializers.DateTimeField(source='server_date')
class Meta:
model = CaseTransaction
fields = ('xform_id', 'server_date', 'date', 'sync_log_id')
class CaseTransactionactionRawDocSerializer(JsonFieldSerializerMixin, CaseTransactionActionSerializer):
type = serializers.CharField(source='readable_type')
class Meta:
model = CaseTransaction
fields = ('form_id', 'server_date', 'date', 'sync_log_id', 'type', 'details')
class CommCareCaseSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
indices = CommCareCaseIndexSQLSerializer(many=True, read_only=True)
transactions = CaseTransactionactionRawDocSerializer(many=True, read_only=True, source='non_revoked_transactions')
class Meta:
model = CommCareCaseSQL
class CommCareCaseSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='case_id')
doc_type = serializers.CharField()
user_id = serializers.CharField(source='modified_by')
indices = CommCareCaseIndexSQLSerializer(many=True, read_only=True)
actions = CaseTransactionActionSerializer(many=True, read_only=True, source='non_revoked_transactions')
case_json = serializers.JSONField()
xform_ids = serializers.ListField()
class Meta:
model = CommCareCaseSQL
exclude = ('case_json',)
class CommCareCaseSQLAPISerializer(serializers.ModelSerializer):
"""This serializer is for presenting a case in json for APIs to access"""
user_id = serializers.CharField(source='modified_by')
date_closed = serializers.DateTimeField(source='closed_on')
date_modified = serializers.DateTimeField(source='modified_on')
properties = serializers.JSONField(source='get_properties_in_api_format')
server_date_modified = serializers.DateTimeField(source='server_modified_on')
server_date_opened = serializers.DateTimeField(source='opened_on')
indices = serializers.JSONField(source='get_index_map')
attachments = serializers.JSONField(source='get_attachment_map')
reverse_indices = serializers.JSONField(source='get_reverse_index_map')
def __init__(self, *args, **kwargs):
lite = kwargs.pop('lite', False)
if lite:
self.fields.pop('reverse_indices')
super(CommCareCaseSQLAPISerializer, self).__init__(*args, **kwargs)
class Meta:
model = CommCareCaseSQL
fields = (
'domain',
'case_id',
'user_id',
'closed',
'xform_ids',
'date_closed',
'date_modified',
'server_date_modified',
'server_date_opened',
'properties',
'indices',
'reverse_indices',
'attachments',
)
class LedgerValueSerializer(serializers.ModelSerializer):
_id = serializers.CharField(source='ledger_id')
case_id = serializers.CharField()
class Meta:
model = LedgerValue
exclude = ('id', 'case')
class StockStateSerializer(serializers.ModelSerializer):
_id = serializers.IntegerField(source='id')
entry_id = serializers.CharField(source='product_id')
location_id = serializers.CharField(source='sql_location.location_id')
balance = serializers.IntegerField(source='stock_on_hand')
last_modified = serializers.DateTimeField(source='last_modified_date')
domain = serializers.CharField()
class Meta:
model = StockState
exclude = (
'id',
'product_id',
'stock_on_hand',
'last_modified_date',
'sql_product',
'sql_location',
)
|
Python
| 0.000003
|
@@ -850,24 +850,76 @@
erializer):%0A
+ user = serializers.CharField(source=%22user_id%22)%0A%0A
class Me
@@ -952,24 +952,68 @@
perationSQL%0A
+ exclude = ('id', 'form', 'user_id')%0A
%0A%0Aclass XFor
|
d16e9decb8a8bc4f2ea8adc81327e2290a532c69
|
update method names
|
hwtLib/structManipulators/structReader.py
|
hwtLib/structManipulators/structReader.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import StaticForEach, connect
from hwt.hdlObjects.frameTmpl import FrameTmpl
from hwt.hdlObjects.transTmpl import TransTmpl
from hwt.hdlObjects.types.struct import HStruct
from hwt.interfaces.std import Handshaked, Signal
from hwt.interfaces.structIntf import StructIntf
from hwt.interfaces.utils import propagateClkRstn, addClkRstn
from hwt.synthesizer.interfaceLevel.unit import Unit
from hwt.synthesizer.param import Param
from hwtLib.amba.axiDatapumpIntf import AxiRDatapumpIntf
from hwtLib.amba.axis import AxiStream_withoutSTRB
from hwtLib.amba.axis_comp.frameParser import AxiS_frameParser
from hwtLib.handshaked.builder import HsBuilder
from hwtLib.handshaked.streamNode import StreamNode
class StructReader(AxiS_frameParser):
"""
This unit downloads required structure fields over rDatapump
interface from address specified by get interface
:ivar MAX_DUMMY_WORDS: Param, specifies maximum dummy bus words between fields
if there is more of ignored space transaction will be split to
:ivar ID: Param, id for transactions on bus
:ivar READ_ACK: Param, if true ready on "get" will be set only
when component is in idle (if false "get"
is regular handshaked interface)
:ivar SHARED_READY: Param, if this is true field interfaces
will be of type VldSynced and single ready signal
will be used for all else every interface
will be instance of Handshaked and it
will have it's own ready(rd) signal
:attention: interfaces of field will not send data in same time
.. aafig::
get (base addr) +---------+
+---------------- +------> field0 |
| | +---------+
bus req +--v---+-+
<------------+ | +---------+
| reader +----> field1 |
+------------> | +---------+
bus data +-------+-+
| +---------+
+------> field2 |
+---------+
:note: names in the picture are just illustrative
"""
def __init__(self, structT, tmpl=None, frames=None):
"""
:param structT: instance of HStruct which specifies data format to download
:param tmpl: instance of TransTmpl for this structT
:param frames: list of FrameTmpl instances for this tmpl
:note: if tmpl and frames are None they are resolved from structT parseTemplate
:note: this unit can parse sequence of frames, if they are specified by "frames"
:attention: interfaces for each field in struct will be dynamically created
:attention: structT can not contain fields with variable size like HStream
"""
Unit.__init__(self)
assert isinstance(structT, HStruct)
self._structT = structT
if tmpl is not None:
assert frames is not None, "tmpl and frames can be used only together"
else:
assert frames is None, "tmpl and frames can be used only together"
self._tmpl = tmpl
self._frames = frames
def _config(self):
self.ID = Param(0)
AxiRDatapumpIntf._config(self)
self.READ_ACK = Param(False)
self.SHARED_READY = Param(False)
def maxWordIndex(self):
return max(map(lambda f: f.endBitAddr - 1, self._frames)) // int(self.DATA_WIDTH)
def parseTemplate(self):
if self._tmpl is None:
self._tmpl = TransTmpl(self._structT)
if self._frames is None:
DW = int(self.DATA_WIDTH)
frames = FrameTmpl.framesFromTransTmpl(
self._tmpl,
DW,
trimPaddingWordsOnStart=True,
trimPaddingWordsOnEnd=True)
self._frames = list(frames)
def _declr(self):
addClkRstn(self)
self.dataOut = StructIntf(self._structT,
self.createInterfaceForField)
self.get = Handshaked() # data signal is addr of structure to download
self.get._replaceParam("DATA_WIDTH", self.ADDR_WIDTH)
self.parseTemplate()
with self._paramsShared():
# interface for communication with datapump
self.rDatapump = AxiRDatapumpIntf()
self.rDatapump.MAX_LEN.set(self.maxWordIndex() + 1)
self.parser = AxiS_frameParser(AxiStream_withoutSTRB,
self._structT,
tmpl=self._tmpl,
frames=self._frames)
self.parser.SYNCHRONIZE_BY_LAST.set(False)
if self.SHARED_READY:
self.ready = Signal()
def _impl(self):
propagateClkRstn(self)
req = self.rDatapump.req
req.id ** self.ID
req.rem ** 0
if self.READ_ACK:
get = self.get
else:
get = HsBuilder(self, self.get).buff().end
def f(frame, indx):
s = [req.addr ** (get.data + frame.startBitAddr // 8),
req.len ** (frame.getWordCnt() - 1),
req.vld ** get.vld
]
isLastFrame = indx == len(self._frames) - 1
if isLastFrame:
rd = req.rd
else:
rd = 0
s.append(get.rd ** rd)
ack = StreamNode(masters=[get], slaves=[self.rDatapump.req]).ack()
return s, ack
StaticForEach(self, self._frames, f)
r = self.rDatapump.r
connect(r, self.parser.dataIn, exclude=[r.id, r.strb])
for _, field in self._tmpl.walkFlatten():
myIntf = self.dataOut._fieldsToInterfaces[field.origin]
parserIntf = self.parser.dataOut._fieldsToInterfaces[field.origin]
myIntf ** parserIntf
if __name__ == "__main__":
from hwtLib.types.ctypes import uint16_t, uint32_t, uint64_t
from hwt.synthesizer.shortcuts import toRtl
s = HStruct(
(uint64_t, "item0"), # tuples (type, name) where type has to be instance of Bits type
(uint64_t, None), # name = None means this field will be ignored
(uint64_t, "item1"),
(uint64_t, None),
(uint16_t, "item2"),
(uint16_t, "item3"),
(uint32_t, "item4"),
(uint32_t, None),
(uint64_t, "item5"), # this word is split on two bus words
(uint32_t, None),
(uint64_t, None),
(uint64_t, None),
(uint64_t, None),
(uint64_t, "item6"),
(uint64_t, "item7"),
)
u = StructReader(s)
print(toRtl(u))
|
Python
| 0.000001
|
@@ -4070,31 +4070,20 @@
elf.
-createInterfaceForField
+_mkFieldIntf
)%0A%0A
|
7947ed60730d46810e768e6c7baa27f739e8259e
|
Break up caps-cache.py
|
tests/twisted/caps/caps-cache.py
|
tests/twisted/caps/caps-cache.py
|
"""
Test that requesting a caps set 1 time is enough with hash and that we need 5
confirmation without hash.
"""
from twisted.words.xish import xpath
from servicetest import EventPattern, assertEquals, assertContains
from gabbletest import exec_test, make_presence
import constants as cs
import ns
from caps_helper import (
compute_caps_hash, make_caps_disco_reply, fake_client_dataforms,
)
client = 'http://telepathy.freedesktop.org/fake-client'
features = [
'http://jabber.org/protocol/jingle',
'http://jabber.org/protocol/jingle/description/audio',
'http://www.google.com/transport/p2p',
]
def presence_and_disco(q, conn, stream, contact, disco,
caps, dataforms={}):
h = conn.RequestHandles(cs.HT_CONTACT, [contact])[0]
stream.send(make_presence(contact, status='hello'))
q.expect_many(
EventPattern('dbus-signal', signal='PresenceUpdate',
args=[{h:
(0L, {u'available': {'message': 'hello'}})}]),
EventPattern('dbus-signal', signal='PresencesChanged',
args=[{h:
(2, u'available', 'hello')}]))
# no special capabilities
assertEquals([(h, cs.CHANNEL_TYPE_TEXT, 3, 0)],
conn.Capabilities.GetCapabilities([h]))
# send updated presence with Jingle caps info
stream.send(make_presence(contact, status='hello', caps=caps))
if disco:
# Gabble looks up our capabilities
event = q.expect('stream-iq', to=contact, query_ns=ns.DISCO_INFO)
assertEquals(client + '#' + caps['ver'], event.query['node'])
# send good reply
result = make_caps_disco_reply(stream, event.stanza, features,
dataforms)
stream.send(result)
# we can now do audio calls
event = q.expect('dbus-signal', signal='CapabilitiesChanged')
assertContains((h, cs.CHANNEL_TYPE_STREAMED_MEDIA, 3, cs.MEDIA_CAP_AUDIO),
conn.Capabilities.GetCapabilities([h]))
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
caps = {
'node': client,
'ver': '0.1',
}
presence_and_disco(q, conn, stream, 'bob1@foo.com/Foo', True, caps)
presence_and_disco(q, conn, stream, 'bob2@foo.com/Foo', True, caps)
presence_and_disco(q, conn, stream, 'bob3@foo.com/Foo', True, caps)
presence_and_disco(q, conn, stream, 'bob4@foo.com/Foo', True, caps)
presence_and_disco(q, conn, stream, 'bob5@foo.com/Foo', True, caps)
# we have 5 different contacts that confirm
presence_and_disco(q, conn, stream, 'bob6@foo.com/Foo', False, caps)
presence_and_disco(q, conn, stream, 'bob7@foo.com/Foo', False, caps)
caps = {
'node': client,
'ver': compute_caps_hash([], features, fake_client_dataforms),
'hash': 'sha-1',
}
presence_and_disco(q, conn, stream, 'bilbo1@foo.com/Foo', True, caps,
fake_client_dataforms)
# 1 contact is enough with hash
presence_and_disco(q, conn, stream, 'bilbo2@foo.com/Foo', False, caps,
fake_client_dataforms)
presence_and_disco(q, conn, stream, 'bilbo3@foo.com/Foo', False, caps,
fake_client_dataforms)
if __name__ == '__main__':
exec_test(test)
|
Python
| 0
|
@@ -715,16 +715,266 @@
ms=%7B%7D):%0A
+ h = send_presence(q, conn, stream, contact, caps)%0A%0A if disco:%0A stanza = expect_disco(q, contact, caps)%0A send_disco_reply(stream, stanza, dataforms)%0A%0A expect_caps(q, conn, h)%0A%0Adef send_presence(q, conn, stream, contact, caps):%0A
h =
@@ -1625,38 +1625,70 @@
caps))%0A%0A
-if disco:%0A
+return h%0A%0Adef expect_disco(q, contact, caps):%0A
# Gabble
@@ -1718,20 +1718,16 @@
ies%0A
-
-
event =
@@ -1784,20 +1784,16 @@
O_INFO)%0A
-
asse
@@ -1851,24 +1851,94 @@
ode'%5D)%0A%0A
+return event.stanza%0A%0Adef send_disco_reply(stream, stanza, dataforms):%0A
# send g
@@ -1951,20 +1951,16 @@
ply%0A
-
-
result =
@@ -1990,22 +1990,16 @@
stream,
-event.
stanza,
@@ -2007,28 +2007,16 @@
eatures,
-%0A
datafor
@@ -2023,20 +2023,16 @@
ms)%0A
-
-
stream.s
@@ -2044,16 +2044,45 @@
esult)%0A%0A
+def expect_caps(q, conn, h):%0A
# we
|
4617750140daf87e6e686bce19497a0e4e8bea75
|
remove out of context request checking
|
tests/test_ember_osf_web.py
|
tests/test_ember_osf_web.py
|
import mock
from flask import request
from tests.base import OsfTestCase
from website.ember_osf_web.decorators import ember_flag_is_active
from osf_tests.factories import FlagFactory, UserFactory
from django.contrib.auth.models import Group
class TestEmberFlagIsActive(OsfTestCase):
def setUp(self):
super(TestEmberFlagIsActive, self).setUp()
self.flag = FlagFactory(name='active_flag')
FlagFactory(name='inactive_flag', everyone=False).save()
self.mock_func = lambda: 'test value'
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_use_ember_app(self, mock_use_ember_app):
ember_flag_is_active('active_flag')(self.mock_func)()
mock_use_ember_app.assert_called_with()
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_dont_use_ember_app(self, mock_use_ember_app):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=False)
ember_flag_is_active('inactive_flag')(self.mock_func)()
assert not mock_use_ember_app.called
@mock.patch('api.waffle.utils._get_current_user')
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_authenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
user = UserFactory()
mock__get_current_user.return_value = user
ember_flag_is_active('active_flag')(self.mock_func)()
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
assert request.user == user
mock_use_ember_app.assert_called_with()
@mock.patch('api.waffle.utils._get_current_user', return_value=None)
@mock.patch('website.ember_osf_web.decorators.use_ember_app')
def test_ember_flag_is_active_unauthenticated_user(self, mock_use_ember_app, mock__get_current_user):
# mock over external module 'waflle.flag_is_active` not ours
import api.waffle.utils
api.waffle.utils.waffle.flag_is_active = mock.Mock(return_value=True)
ember_flag_is_active('active_flag')(self.mock_func)()
group = Group.objects.create(name='foo')
self.flag.groups.add(group)
api.waffle.utils.waffle.flag_is_active.assert_called_with(request, 'active_flag')
assert not request.user.is_authenticated
mock_use_ember_app.assert_called_with()
|
Python
| 0.000001
|
@@ -1809,44 +1809,8 @@
g')%0A
- assert request.user == user%0A
@@ -2522,57 +2522,8 @@
g')%0A
- assert not request.user.is_authenticated%0A
|
dae16f72b9ca5d96c7f894601aa3a69facbbb00e
|
Fix memory limit in MongoDB while loading logs (#5)
|
scripts/load_logs_to_mongodb.py
|
scripts/load_logs_to_mongodb.py
|
import os
import sys
from datetime import datetime
from collections import defaultdict
from pymongo import MongoClient
logs_file = open(sys.argv[1])
article_urls = set()
article_views = defaultdict(list) # article_url: list of user's id's
article_times = {}
for line in logs_file:
try:
timestamp, url, user = line.strip().split('\t')
except IndexError:
continue
timestamp = timestamp.strip(' GET').strip('Z')
# Delete ms from timestamp
timestamp = ''.join(timestamp.split('.')[:-1])
event_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
if not url or not user:
continue
if not url.startswith('https://tvrain.ru/'):
continue
article_urls.add(url)
article_views[url].append(user)
# Save time of only first event
if url not in article_times:
article_times[url] = event_time
mongodb_client = MongoClient(os.environ['MONGODB_URL'])
db = mongodb_client.tvrain
parsed_articles = db.tvrain
articles = db.articles
for article in parsed_articles.find():
if article['url'] not in article_urls:
continue
articles.insert_one({
'_id': article['_id'],
'title': article['title'],
'text': article['text'],
'views': article_views[article['url']],
'time': article_times[article['url']]
})
|
Python
| 0
|
@@ -997,16 +997,53 @@
articles
+%0A# Clear articles%0Aarticles.remove(%7B%7D)
%0A%0Afor ar
@@ -1127,32 +1127,234 @@
continue%0A
+ views = article_views%5Barticle%5B'url'%5D%5D%0A compressed_views = %5B%5D%0A # Save only every 10th view%0A for i in range(len(views)):%0A if i %25 10 == 0:%0A compressed_views.append(views%5Bi%5D)%0A
articles.ins
@@ -1483,37 +1483,24 @@
s':
-article_views%5Barticle%5B'url'%5D%5D
+compressed_views
,%0A
|
3375c9cd3311bff8ff3ab07c361e18c68226784c
|
remove stray print
|
mc2/controllers/base/managers/rabbitmq.py
|
mc2/controllers/base/managers/rabbitmq.py
|
import base64
import hashlib
import random
import time
import uuid
from django.conf import settings
from pyrabbit.api import Client
from pyrabbit.http import HTTPError
class ControllerRabbitMQManager(object):
def __init__(self, controller):
"""
A helper manager to get to connect to RabbitMQ
:param controller Controller: A Controller model instance
"""
self.ctrl = controller
self.client = Client(
settings.RABBITMQ_API_HOST,
settings.RABBITMQ_API_USERNAME,
settings.RABBITMQ_API_PASSWORD)
print self.client
def _create_password(self):
# Guranteed random dice rolls
return base64.b64encode(
hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24]
def _create_username(self):
return base64.b64encode(str(
time.time() + random.random() * time.time())).strip('=').lower()
def create_rabbitmq_vhost(self):
"""
Attempts to create a new vhost. Returns false if vhost already exists.
The new username/password will be saved on the controller if a new
vhost was created
:returns: bool
"""
try:
self.client.get_vhost(self.ctrl.rabbitmq_vhost_name)
return False # already exists
except HTTPError:
pass
self.client.create_vhost(self.ctrl.rabbitmq_vhost_name)
# create user/pass
username = self._create_username()
password = self._create_password()
self.client.create_user(username, password)
# save newly created username/pass
self.ctrl.rabbitmq_vhost_username = username
self.ctrl.rabbitmq_vhost_password = password
self.ctrl.rabbitmq_vhost_host = settings.RABBITMQ_APP_HOST
self.ctrl.save()
self.client.set_vhost_permissions(
self.ctrl.rabbitmq_vhost_name, username, '.*', '.*', '.*')
return True
|
Python
| 0.000215
|
@@ -581,34 +581,8 @@
ORD)
-%0A print self.client
%0A%0A
|
6a9aceef33230a36b5d98eb1dae68884f57de53a
|
Remove print in test suite
|
tests/twisted/test-caps-cache.py
|
tests/twisted/test-caps-cache.py
|
"""
Test that requesting a caps set 1 time is enough with hash and that we need 5
confirmation without hash.
"""
import dbus
import sys
from twisted.words.xish import domish, xpath
from gabbletest import exec_test, make_result_iq
text = 'org.freedesktop.Telepathy.Channel.Type.Text'
sm = 'org.freedesktop.Telepathy.Channel.Type.StreamedMedia'
caps_iface = 'org.freedesktop.Telepathy.Connection.Interface.Capabilities'
def make_presence(from_jid, type, status):
presence = domish.Element((None, 'presence'))
if from_jid is not None:
presence['from'] = from_jid
if type is not None:
presence['type'] = type
if status is not None:
presence.addElement('status', content=status)
return presence
def presence_add_caps(presence, ver, client, hash=None):
c = presence.addElement(('http://jabber.org/protocol/caps', 'c'))
c['node'] = client
c['ver'] = ver
if hash is not None:
c['hash'] = hash
return presence
def _test_without_hash(q, bus, conn, stream, contact, contact_handle, client, disco):
presence = make_presence(contact, None, 'hello')
stream.send(presence)
event = q.expect('dbus-signal', signal='PresenceUpdate',
args=[{contact_handle: (0L, {u'available': {'message': 'hello'}})}])
# no special capabilities
basic_caps = [(contact_handle, text, 3, 0)]
assert conn.Capabilities.GetCapabilities([contact_handle]) == basic_caps
# send updated presence with Jingle caps info
presence = make_presence(contact, None, 'hello')
presence = presence_add_caps(presence, '0.1', client)
print str(presence)
stream.send(presence)
if disco:
# Gabble looks up our capabilities
event = q.expect('stream-iq', to=contact,
query_ns='http://jabber.org/protocol/disco#info')
query_node = xpath.queryForNodes('/iq/query', event.stanza)[0]
assert query_node.attributes['node'] == \
client + '#' + '0.1'
# send good reply
result = make_result_iq(stream, event.stanza)
query = result.firstChildElement()
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle'
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle/description/audio'
feature = query.addElement('feature')
feature['var'] = 'http://www.google.com/transport/p2p'
stream.send(result)
# we can now do audio calls
event = q.expect('dbus-signal', signal='CapabilitiesChanged')
def _test_with_hash(q, bus, conn, stream, contact, contact_handle, client, disco):
presence = make_presence(contact, None, 'hello')
stream.send(presence)
event = q.expect('dbus-signal', signal='PresenceUpdate',
args=[{contact_handle: (0L, {u'available': {'message': 'hello'}})}])
# no special capabilities
basic_caps = [(contact_handle, text, 3, 0)]
assert conn.Capabilities.GetCapabilities([contact_handle]) == basic_caps
# send updated presence with Jingle caps info
presence = make_presence(contact, None, 'hello')
c = presence.addElement(('http://jabber.org/protocol/caps', 'c'))
c['node'] = client
c['ver'] = 'CzO+nkbflbxu1pgzOQSIi8gOyDc=' # good hash
c['hash'] = 'sha-1'
stream.send(presence)
if disco:
# Gabble looks up our capabilities
event = q.expect('stream-iq', to=contact,
query_ns='http://jabber.org/protocol/disco#info')
query_node = xpath.queryForNodes('/iq/query', event.stanza)[0]
assert query_node.attributes['node'] == \
client + '#' + c['ver']
# send good reply
result = make_result_iq(stream, event.stanza)
query = result.firstChildElement()
query['node'] = client + '#' + c['ver']
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle'
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle/description/audio'
feature = query.addElement('feature')
feature['var'] = 'http://www.google.com/transport/p2p'
query.addRawXml("""
<x type='result' xmlns='jabber:x:data'>
<field var='FORM_TYPE' type='hidden'>
<value>urn:xmpp:dataforms:softwareinfo</value>
</field>
<field var='software'>
<value>A Fake Client with Twisted</value>
</field>
<field var='software_version'>
<value>5.11.2-svn-20080512</value>
</field>
<field var='os'>
<value>Debian GNU/Linux unstable (sid) unstable sid</value>
</field>
<field var='os_version'>
<value>2.6.24-1-amd64</value>
</field>
</x>
""")
stream.send(result)
# we can now do audio calls
event = q.expect('dbus-signal', signal='CapabilitiesChanged')
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
client = 'http://telepathy.freedesktop.org/fake-client'
_test_without_hash(q, bus, conn, stream, 'bob1@foo.com/Foo', 2L, client, 1)
_test_without_hash(q, bus, conn, stream, 'bob2@foo.com/Foo', 3L, client, 1)
_test_without_hash(q, bus, conn, stream, 'bob3@foo.com/Foo', 4L, client, 1)
_test_without_hash(q, bus, conn, stream, 'bob4@foo.com/Foo', 5L, client, 1)
_test_without_hash(q, bus, conn, stream, 'bob5@foo.com/Foo', 6L, client, 1)
# we have 5 different contacts that confirm
_test_without_hash(q, bus, conn, stream, 'bob6@foo.com/Foo', 7L, client, 0)
_test_with_hash(q, bus, conn, stream, 'bilbo1@foo.com/Foo', 8L, client, 1)
# 1 contact is enough with hash
_test_with_hash(q, bus, conn, stream, 'bilbo2@foo.com/Foo', 9L, client, 0)
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
|
Python
| 0.000003
|
@@ -1605,32 +1605,8 @@
nt)%0A
- print str(presence)%0A
|
2cee1d5bff32831a9c15755e7482057ac7b9a39a
|
Update packets.py
|
cs143sim/packets.py
|
cs143sim/packets.py
|
"""This module contains all packet definitions.
.. autosummary::
Packet
DataPacket
RouterPacket
.. moduleauthor:: Lan Hongjian <lanhongjianlr@gmail.com>
.. moduleauthor:: Yamei Ou <oym111@gmail.com>
.. moduleauthor:: Samuel Richerd <dondiego152@gmail.com>
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
.. moduleauthor:: Junlin Zhang <neicullyn@gmail.com>
"""
from cs143sim.constants import PACKET_SIZE
class Packet(object):
"""Representation of a quantum of information
Packets carry information along the network, between :class:`Hosts <.Host>`
or :class:`Routers <.Router>`.
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
:ivar destination: destination :class:`.Host` or :class:`.Router`
:ivar source: source :class:`.Host` or :class:`.Router`
:ivar str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp):
self.timestamp = timestamp
self.source = source
self.destination = destination
self.size = PACKET_SIZE
class DataPacket(Packet):
"""A packet used for transferring data
:param destination: destination :class:`.Host` or :class:`.Router`
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, destination, source, timestamp, acknowledgement, number):
# TODO: define number and acknowledgement in docstring
super(DataPacket, self).__init__(timestamp=timestamp, source=source,
destination=destination)
self.number = number
self.acknowledgement = acknowledgement
class RouterPacket(Packet):
"""A packet used to update routing tables
:param source: source :class:`.Host` or :class:`.Router`
:param str timestamp: time at which the packet was created
"""
def __init__(self, source, timestamp, router_table, acknowledgement):
# TODO: define router_table in docstring
super(RouterPacket, self).__init__(timestamp=timestamp, source=source,
destination=0)
self.router_table = router_table
self.acknowledgement = acknowledgement
|
Python
| 0.000001
|
@@ -2350,16 +2350,40 @@
r_table%0A
+ self.number = 0%0A
|
d043e50a636ffa06f3383a81c37eb8947f4f8fa1
|
Implement a method to switch a pair of engine and partition simultaneously
|
modin/__init__.py
|
modin/__init__.py
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import warnings
from packaging import version
import collections
from ._version import get_versions
def custom_formatwarning(msg, category, *args, **kwargs):
# ignore everything except the message
return "{}: {}\n".format(category.__name__, msg)
warnings.formatwarning = custom_formatwarning
# Filter numpy version warnings because they are not relevant
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="Large object of size")
warnings.filterwarnings(
"ignore",
message="The pandas.datetime class is deprecated and will be removed from pandas in a future version. "
"Import from datetime module instead.",
)
def get_execution_engine():
# In the future, when there are multiple engines and different ways of
# backing the DataFrame, there will have to be some changed logic here to
# decide these things. In the meantime, we will use the currently supported
# execution engine + backing (Pandas + Ray).
if "MODIN_ENGINE" in os.environ:
# .title allows variants like ray, RAY, Ray
return os.environ["MODIN_ENGINE"].title()
else:
if "MODIN_DEBUG" in os.environ:
return "Python"
else:
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) != version.parse("0.8.6"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
raise ImportError(
"Please `pip install modin[ray]` or `modin[dask]` to install an engine"
)
else:
if version.parse(dask.__version__) < version.parse(
"2.1.0"
) or version.parse(distributed.__version__) < version.parse("2.3.2"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
def get_partition_format():
# See note above about engine + backing.
return os.environ.get("MODIN_BACKEND", "Pandas").title()
class Publisher(object):
def __init__(self, name, value):
self.name = name
self.__value = value.title()
self.__subs = set()
self.__once = collections.defaultdict(set)
def subscribe(self, callback):
self.__subs.add(callback)
callback(self)
def once(self, onvalue, callback):
onvalue = onvalue.title()
if onvalue == self.__value:
callback(self)
else:
self.__once[onvalue].add(callback)
def get(self):
return self.__value
def put(self, value):
value = value.title() # normalize the value
oldvalue, self.__value = self.__value, value
if oldvalue != value:
for callback in self.__subs:
callback(self)
try:
once = self.__once[value]
except KeyError:
return
if once:
for callback in once:
callback(self)
del self.__once[value]
execution_engine = Publisher(name="execution_engine", value=get_execution_engine())
partition_format = Publisher(name="partition_format", value=get_partition_format())
# We don't want these used outside of this file.
del get_execution_engine
del get_partition_format
__version__ = get_versions()["version"]
del get_versions
|
Python
| 0
|
@@ -3738,11 +3738,23 @@
def
+_
put
+_nocallback
(sel
@@ -3866,24 +3866,91 @@
alue, value%0A
+ return oldvalue%0A%0A def _check_callbacks(self, oldvalue):%0A
if o
@@ -3957,19 +3957,26 @@
ldvalue
-!
+=
=
+self.__
value:%0A
@@ -3970,32 +3970,47 @@
lf.__value:%0A
+ return%0A
for call
@@ -4034,36 +4034,32 @@
bs:%0A
-
-
callback(self)%0A
@@ -4069,409 +4069,1270 @@
- try:%0A once = self.__once%5Bvalue%5D%0A except KeyError:%0A return%0A if once:%0A for callback in once:%0A callback(self)%0A del self.__once%5Bvalue%5D%0A%0A%0Aexecution_engine = Publisher(name=%22execution_engine%22, value=get_execution_engine())%0Apartition_format = Publisher(name=%22partition_format%22, value=get_partition_format())
+once = self.__once.pop(self.__value, ())%0A for callback in once:%0A callback(self)%0A%0A def put(self, value):%0A self._check_callbacks(self._put_nocallback(value))%0A%0A%0Aexecution_engine = Publisher(name=%22execution_engine%22, value=get_execution_engine())%0Apartition_format = Publisher(name=%22partition_format%22, value=get_partition_format())%0A%0A%0Adef set_backends(engine=None, partition=None):%0A %22%22%22%0A Method to set the _pair_ of execution engine and partition format simultaneously.%0A This is needed because there might be cases where switching one by one would be%0A impossible, as not all pairs of values are meaningful.%0A%0A The method returns pair of old values, so it is easy to return back.%0A %22%22%22%0A old_engine, old_partition = None, None%0A # defer callbacks until both entities are set%0A if engine is not None:%0A old_engine = execution_engine._put_nocallback(engine)%0A if partition is not None:%0A old_partition = partition_format._put_nocallback(engine)%0A # execute callbacks if something was changed%0A if old_engine is not None:%0A execution_engine._check_callbacks(old_engine)%0A if old_partition is not None:%0A partition_format._check_callbacks(old_partition)%0A%0A return old_engine, old_partition%0A
%0A%0A#
|
0c35c0f7fe126b87eccdf4f69933b84927956658
|
Fix account __type__
|
module/plugins/accounts/XFileSharingPro.py
|
module/plugins/accounts/XFileSharingPro.py
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.XFSPAccount import XFSPAccount
class XFileSharingPro(XFSPAccount):
__name__ = "XFileSharingPro"
__type__ = "crypter"
__version__ = "0.01"
__description__ = """XFileSharingPro dummy account plugin for hook"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def init(self):
pattern = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
self.HOSTER_NAME = re.match(pattern, self.pyfile.url).group(1).lower()
|
Python
| 0
|
@@ -180,15 +180,15 @@
= %22
-crypter
+account
%22%0A
@@ -207,17 +207,17 @@
_ = %220.0
-1
+2
%22%0A%0A _
@@ -256,13 +256,21 @@
Pro
-dummy
+multi-purpose
acc
@@ -284,17 +284,8 @@
ugin
- for hook
%22%22%22%0A
|
a396d3e7b4de10710c2f2e0beab0ef82acaf866b
|
Create first test
|
web/impact/impact/tests/test_track_api_calls.py
|
web/impact/impact/tests/test_track_api_calls.py
|
from django.test import (
RequestFactory,
TestCase,
)
from mock import patch
class TestTrackAPICalls(TestCase):
def test_when_user_auth(self):
pass
def test_when_no_user_auth(self):
pass
|
Python
| 0
|
@@ -23,28 +23,8 @@
t (%0A
- RequestFactory,%0A
@@ -56,16 +56,22 @@
port
+ mock,
patch%0A%0A
%0Acla
@@ -66,16 +66,68 @@
patch%0A%0A
+from impact.tests.api_test_case import APITestCase%0A%0A
%0Aclass T
@@ -143,16 +143,19 @@
PICalls(
+API
TestCase
@@ -165,51 +165,306 @@
-def test_when_user_auth(self):%0A pass
+@patch('impact.middleware.track_api_calls.TrackAPICalls.process_request.logger')%0A def test_when_user_authenticated(self, logger_info_patch):%0A with self.login(email=self.basic_user().email):%0A response = self.client.get(/)%0A logger_info_patch.info.assert_called_with()%0A
%0A%0A
@@ -491,16 +491,25 @@
ser_auth
+enticated
(self):%0A
|
200e75471f9c49d64cfa558ceed267b77a0b5627
|
Replace cumulative product with exponentiation.
|
cupy/_creation/matrix.py
|
cupy/_creation/matrix.py
|
import numpy
import cupy
from cupy import _core
def diag(v, k=0):
"""Returns a diagonal or a diagonal array.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
Returns:
cupy.ndarray: If ``v`` indicates a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` indicates a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`cupy.ndarray` object, then its view is
returned.
.. seealso:: :func:`numpy.diag`
"""
if isinstance(v, cupy.ndarray):
ndim = v.ndim
else:
ndim = numpy.ndim(v)
if ndim == 1:
v = cupy.array(v)
if ndim == 2:
# to save bandwidth, don't copy non-diag elements to GPU
v = numpy.array(v)
if ndim == 1:
size = v.size + abs(k)
ret = cupy.zeros((size, size), dtype=v.dtype)
ret.diagonal(k)[:] = v
return ret
elif ndim == 2:
return cupy.array(v.diagonal(k))
else:
raise ValueError('Input must be 1- or 2-d.')
def diagflat(v, k=0):
"""Creates a diagonal array from the flattened input.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. See :func:`cupy.diag` for detail.
Returns:
cupy.ndarray: A 2-D diagonal array with the diagonal copied from ``v``.
.. seealso:: :func:`numpy.diagflat`
"""
if numpy.isscalar(v):
v = numpy.asarray(v)
return cupy.diag(v.ravel(), k)
_tri_kernel = _core.ElementwiseKernel(
'int32 m, int32 k',
'T out',
'''
int row = i / m;
int col = i % m;
out = (col <= row + k);
''',
'cupy_tri',
)
def tri(N, M=None, k=0, dtype=float):
"""Creates an array with ones at and below the given diagonal.
Args:
N (int): Number of rows.
M (int): Number of columns. ``M == N`` by default.
k (int): The sub-diagonal at and below which the array is filled. Zero
is the main diagonal, a positive value is above it, and a negative
value is below.
dtype: Data type specifier.
Returns:
cupy.ndarray: An array with ones at and below the given diagonal.
.. seealso:: :func:`numpy.tri`
"""
if M is None:
M = N
out = cupy.empty((N, M), dtype=dtype)
return _tri_kernel(M, k, out)
def tril(m, k=0):
"""Returns a lower triangle of an array.
Args:
m (array-like): Array or array-like object.
k (int): The diagonal above which to zero elements. Zero is the main
diagonal, a positive value is above it, and a negative value is
below.
Returns:
cupy.ndarray: A lower triangle of an array.
.. seealso:: :func:`numpy.tril`
"""
m = cupy.asarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return cupy.where(mask, m, m.dtype.type(0))
def triu(m, k=0):
"""Returns an upper triangle of an array.
Args:
m (array-like): Array or array-like object.
k (int): The diagonal below which to zero elements. Zero is the main
diagonal, a positive value is above it, and a negative value is
below.
Returns:
cupy.ndarray: An upper triangle of an array.
.. seealso:: :func:`numpy.triu`
"""
m = cupy.asarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return cupy.where(mask, m.dtype.type(0), m)
def vander(x, N=None, increasing=False):
"""Returns a Vandermonde matrix.
Args:
x (array-like): 1-D array or array-like object.
N (int, optional): Number of columns in the output.
``N = len(x)`` by default.
increasing (bool, optional): Order of the powers of the columns.
If True, the powers increase from right to left,
if False (the default) they are reversed.
Returns:
cupy.ndarray: A Vandermonde matrix.
.. seealso:: :func:`numpy.vander`
"""
x = cupy.asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = cupy.empty((len(x), N), dtype=numpy.promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
cupy.cumprod(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
# TODO(okuta): Implement mat
# TODO(okuta): Implement bmat
|
Python
| 0.999999
|
@@ -4479,141 +4479,60 @@
-if N %3E 0:%0A tmp%5B:, 0%5D = 1%0A if N %3E 1:%0A tmp%5B:, 1:%5D = x%5B:, None%5D%0A cupy.cumprod(tmp%5B:, 1:%5D, out=tmp%5B:, 1:%5D, axis=1
+cupy.power(x.reshape(-1, 1), cupy.arange(N), out=tmp
)%0A%0A
|
1fc456f00d9895358ee52e967edfdfc2512315d0
|
Update stackexchange.py
|
data_loaders/stackexchange.py
|
data_loaders/stackexchange.py
|
#
# stackexchange.py
# Mich, 2015-03-12
# Copyright (c) 2015 Datacratic Inc. All rights reserved.
#
import requests
import json
from datetime import datetime
def load_data(mldb, payload):
mldb.log("StackExchange data loader")
payload = json.loads(payload)
assert payload['site'], mldb.log("payload: site is undefined")
page = 0
has_more = True
key = None
if 'key' in payload:
key = payload['key']
site = payload['site'].encode("utf-8")
mldb.log("Got site:" + site.encode("utf-8"))
dataset_id = site.encode("utf-8") + '_dataset'
dataset_config = {
'type' : 'mutable',
'id' : dataset_id,
'params': { 'artifactUri' : 'file:///var/mldb/' + site + '_dataset.beh.gz' }
}
url = "/v1/datasets/" + dataset_id
result = mldb.perform("GET", url, [], {})
if result['statusCode'] == 200:
mldb.log("Dataset was already loaded")
return {
'datasetId' : dataset_id,
'count' : '?',
'quotaRemaining' : '?'
}
dataset = mldb.create_dataset(dataset_config)
mldb.log("stackexchange data loader created dataset " + dataset_id)
now = datetime.now() # foo date, timeless features
count = 0
page = 0
quota_remaining = "Unknown"
while has_more:
page += 1
params = {
'site' : site,
'pagesize' : 100,
'page' : page
}
if key:
params['key'] = key
r = requests.get('https://api.stackexchange.com/2.2/questions',
params=params)
assert r.status_code == 200, mldb.log("Failed to fetch questions: "
+ r.content)
result = json.loads(r.content)
has_more = result['has_more']
quota_remaining = result['quota_remaining']
for question in result['items']:
if len(question['tags']) > 1:
triplet = [[question['question_id'], '1', now]]
for tag in question['tags']:
tag = tag.encode("utf-8")
if count == 0:
mldb.log("stackexchange data loader first line: {}, {}"
.format(tag, triplet))
dataset.record_row(tag, triplet)
if count == 0:
mldb.log("stackexchange data loader recorded first row")
count += 1
if count == 20000:
mldb.log("stackexchange data loader stopping at 20k lines")
has_more = False
break
else:
continue
break
dataset.commit()
mldb.log("Fetched {} tags".format(count))
return {
'datasetId' : dataset_id,
'count' : count,
'quotaRemaining' : quota_remaining
}
|
Python
| 0.000001
|
@@ -708,16 +708,17 @@
:///
-var/
mldb
+_data
/' +
|
55333de064cf81ee08d957f059bdd06641d50456
|
Document LocationT
|
transperth/location.py
|
transperth/location.py
|
import re
from lxml import etree
from collections import namedtuple
import requests
from . import BASE
from .utils import format_date, clean
from .exceptions import InvalidStopNumber, InvalidDirection
STOPNUM_RE = re.compile(r'\d{5}')
__all__ = [
'determine_location',
'Location',
'parse_locations',
'LocationT'
]
LocationT = namedtuple('LocationT', 'name,code')
is_location = lambda arg: isinstance(arg, Location)
are_locations = lambda *args: all(map(is_location, args))
def determine_location(from_loco, to_loco):
"""
Takes two location objects, and returns a dict of lists of LocationTs
mapping possible corresponding locations and their codes.
See :func:`parse_locations` for precise output format.
"""
assert are_locations(from_loco, to_loco)
URL = BASE + 'DesktopModules/JourneyPlanner/JP.aspx'
params = {
'jpDate': format_date(),
'jpDirection': 'B',
# 'jpAMPM': 'AM',
# 'jpHour': '11',
# 'jpMinute': '00',
'fSet': 'False',
'fGadget': 'False',
'mode': 't1,b1,f1,s1',
'jpnMaxJourneys': '5',
'jpMaxChanges': '-1',
'jpWalkChange': 'NORMAL',
'jpWheelchairOnly': '0'
}
params.update(from_loco.as_('from'))
params.update(to_loco.as_('to'))
return parse_locations(
requests.get(URL, params=params).text
)
def parse_locations(locations):
"""
Takes the (pure) XML from the locations request and returns in the format;
.. code-block:: python
{
"from": [
LocationT('<NAME>', '<CODE>')
]
}
"""
root = etree.XML(locations)
return {
element.tag.lower(): [
LocationT(
*clean(se.itertext())
)
for se in element
]
for element in root
}
class Location(object):
"""
Represents a location that has not been resolved into a (LocationT)
location code
"""
def __init__(self, data):
"""
It is recommend you use one of the specialised methods;
from_address, from_stop, or from_location
"""
self._data = {
'': 'Location', # this is required
'street': '',
'suburb': '',
'location': '',
'stop': ''
}
self._data.update(data)
@classmethod
def from_address(self, street: str, suburb: str) -> Location:
return Location({
'street': street,
'suburb': suburb,
'': 'Point'
})
@classmethod
def from_stop(self, stop_number: 'str or int') -> Location:
"""
Creates a Location from a transperth stop number.
Applies only to bus stops
"""
stop_number = str(stop_number)
if not STOPNUM_RE.match(stop_number):
raise InvalidStopNumber('Invalid stop number')
return Location({
'stop': stop_number,
'': 'Node'
})
@classmethod
def from_location(self, location: str) -> Location:
"""
Creates a Location from an arbibrary location, such as;
* Curtin University, Perth
* Arena Joondalup
:param location: arbibrary location
"""
return Location({
'location': location,
'': 'Location'
})
def as_(self, direction: str) -> dict:
"""
Formats the _data attribute so that it can be incorporated into
a request to transperth's api
.. code-block:: python
{
'to': str,
'toStreet': str,
'toSuburb': str,
'toLocation': str,
'toStop': str,
}
"""
if direction not in {'to', 'from'}:
raise InvalidDirection('tf casn')
self._data[''] = self._data[''].title()
return {
direction + k.title(): v
for k, v in self._data.items()
}
def __hash__(self):
items = sorted(
self._data.items(),
key=lambda i: i[0]
)
return hash(','.join(map(':'.join, items)))
def __eq__(self, other):
assert isinstance(other, Location)
return self.__hash__() == other.__hash__()
|
Python
| 0.000001
|
@@ -333,57 +333,8 @@
%0A%5D%0A%0A
-LocationT = namedtuple('LocationT', 'name,code')%0A
%0Ais_
@@ -1823,16 +1823,151 @@
%0A %7D%0A%0A
+class LocationT(namedtuple('LocationT', 'name,code')):%0A %22%22%22%0A Represents a location as considered by the transperth api%0A %22%22%22%0A%0A%0A
%0Aclass L
|
41e2fa118efc65a120af461e636c9ebe7e945ba4
|
Store original text in LogLine named tuples.
|
magichour/api/local/modelgen/preprocess.py
|
magichour/api/local/modelgen/preprocess.py
|
"""
This module contains functions for the initial preprocessing of log files. The functions here are responsible
for reading in data and converting them into LogLine named tuples. (see named tuple definition in
magichour.api.local.util.namedtuples). Transforms named tuples represent a preprocessing step that .
TODO: Add the ability to write custom preprocessing functions other than just transforms.
"""
import gzip
import re
import uuid
from magichour.api.local.util.log import get_logger
from magichour.api.local.util.namedtuples import LogLine, Transform
logger = get_logger(__name__)
def _read_lines(file_path):
fp = gzip.open(file_path, 'rb') if file_path.lower().endswith('.gz') else open(file_path, 'r')
for line in fp:
yield line
fp.close()
def read_log_file(file_path, ts_start_index, ts_end_index, ts_format=None, skip_num_chars=0, **kwargs):
"""
Function to create LogLine named tuples from an input log file. Output from this function and get_transforms() is
meant to be fed into get_transformed_lines() in order to apply the Transforms to the created LogLines. This
function can be used by itself if you don't want to apply any Transforms, but keep in mind that writing and
applying custom Transforms will assist the templating process.
We make no underlying assumptions about the log file format other than that there is a timestamp associated with
each line. The rest of each line is considered associated text.
This function is a generator yielding LogLines. If you require a full list of LogLines then you will need
to iterate through the generator.
Unless there is an exception, the file is closed internally to the function.
Args:
file_path: path to log file. Open using gzip if file_path ends with .gz
ts_start_index: starting index for parsing timestamp
ts_end_index: end index for parsing timestamp
ts_format: optional datetime format to pass to datetime.datetime.strptime to parse timestamp. If not
specified, then the entire timestamp is parsed as a float.
skip_num_chars: optional number of characters to skip parsing at the beginning of each line (Default = 0)
Returns:
a generator yielding LogLine objects created from each line in file_path
"""
for line in _read_lines(file_path):
line = line[skip_num_chars:]
# Strip out timestamp and use ts_format to create time object.
ts_str = line[ts_start_index:ts_end_index].strip()
if ts_format:
ts = time.mktime(datetime.datetime.strptime(ts_str, ts_format).timetuple())
else:
ts = float(ts_str)
text = line[:ts_start_index].join(line[ts_end_index:]).strip()
yield LogLine(str(uuid.uuid4()), ts, text, None, None , None)
def read_auditd_file(file_path, **kwargs):
for line in _read_lines(file_path):
ts = float(re.search(r'audit\(([0-9]+\.[0-9]+)', line).group(1))
yield LogLine(str(uuid.uuid4()), ts, line.rstrip(), None, None, None)
#####
def get_transforms(transforms_file):
"""
Reads transforms from a file and returns a list of Transform named tuples. The output is meant to be fed into
get_transformed_lines(). A Transform is a named tuple that represents a pattern to replace in a log line.
The pattern is replaced by a standard tuple specified in the Transform file.
The named tuple definition for Transform is:
Transform = namedtuple('Transform', ['id', 'type', 'name', 'transform', 'compiled'])
Args:
file_path: a path to a transforms file. See documentation for proper format for the transforms file.
Returns:
transforms: list of Transform named tuples
"""
transforms = []
with open(transforms_file, 'r') as fp:
for line in fp:
line = line.strip()
if len(line)==0 or line[0]=='#':
continue
t_id, t_type, t_name, t_transform = line.split(',', 3)
transform = Transform(t_id, t_type, t_name, r''+t_transform, re.compile(r''+t_transform))
transforms.append(transform)
return transforms
def transform_lines(lines, transforms):
"""
Function to return transformed LogLine named tuples by applying the specified Transforms on original
LogLines (as generated by get_lines()). Note that writing and applying custom Transforms will assist the
templating process and produce higher quality templates.
For Transform.type==REPLACE operations:
The Transform.transform regex must contain at least one capture group, which is saved and replaced with
the Transform.name.
This function is a generator yielding LogLines. If you require a full list of LogLines then you will need
to iterate through the generator.
See the comment in the function as to where to add additional transform types.
Args:
lines: iterable of LogLines named tuples.
transforms: iterable of Transform named tuples.
Returns:
a generator yielding LogLine objects
"""
for logline in lines:
replaceDict = {}
transformed = logline.text
for transform in transforms:
if transform.type == 'REPLACE':
# save first capture group of each match
matches = [m for m in transform.compiled.finditer(transformed)]
if matches:
replaceDict[transform.name] = [m.group(1) for m in matches]
for m in reversed(matches):
# replace first capture group of each match with transform.name; reverse order to keep match start/end aligned
transformed = transformed[:m.start(1)] + transform.name + transformed[m.end(1):]
# elif transform.type == 'EXAMPLE':
# Handle other transform types here.
# do stuff
else:
# catch misspelled transform types
raise NotImplementedError('%s Transform not implemented'%transform.type)
yield LogLine(str(uuid.uuid4()), logline.ts, transformed, None, replaceDict, None)
def cardinality_transformed_lines(lines, verbose=False):
"""
Diagnostic function to compute cardinality of transforms. Computes number of unique lines after
applying transform_lines().
Args:
lines: iterable of LogLine objects output by transform_lines()
verbose: True = print evaluated results (default=False)
Returns:
(countLines, countUniqueLines, percentUniqueLines, uniqLines)
countLines: count of lines
countUniqueLines: count of unique lines
percentUniqueLines: 100.0 * countUniqueLines / countLines
uniqLines: dictionary[uniq_line_text] = number of occurrences of uniq_line
"""
from collections import defaultdict
from pprint import pformat
uniqLines = defaultdict(int)
for logline in lines:
uniqLines[logline.text] += 1
countLines = len(lines)
countUniqueLines = len(uniqLines)
percentUniqueLines = 100.0 * countUniqueLines / countLines
logger.info("Transform cardinality: (%d / %d) = %f%%; (uniqueTransformedLines / totalLines) = %%uniqueTransformedLines" % (countUniqueLines, countLines, percentUniqueLines))
if verbose:
sorted_uniqLines = [(uniqLines[text], text) for text in sorted(uniqLines.keys())]
e = []
for occurrences, text in sorted_uniqLines:
e.append("%10d: %s" % (occurrences, text))
logger.info("Transformed Lines: %d" % countUniqueLines)
logger.info("\n"+pformat(e))
return (countLines, countUniqueLines, percentUniqueLines, uniqLines)
|
Python
| 0.000001
|
@@ -6150,20 +6150,28 @@
formed,
-None
+logline.text
, replac
|
92e2d5530915a60e4fa4f256787301d156cfd05d
|
Replace 'assertFalse(a in b)' with 'assertNotIn(a, b)'
|
tests/unit/core/test_test_set.py
|
tests/unit/core/test_test_set.py
|
# -*- coding:utf-8 -*-
#
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from stevedore import extension
import testtools
from bandit.blacklists import utils
from bandit.core import extension_loader
from bandit.core import test_properties as test
from bandit.core import test_set
@test.checks('Str')
@test.test_id('B000')
def test_plugin():
sets = []
sets.append(utils.build_conf_dict(
'telnet', 'B401', ['telnetlib'],
'A telnet-related module is being imported. Telnet is '
'considered insecure. Use SSH or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'marshal', 'B302', ['marshal.load', 'marshal.loads'],
'Deserialization with the marshal module is possibly dangerous.'
))
return {'Import': sets, 'ImportFrom': sets, 'Call': sets}
class BanditTestSetTests(testtools.TestCase):
def _make_test_manager(self, plugin):
return extension.ExtensionManager.make_test_instance(
[extension.Extension('test_plugin', None, test_plugin, None)])
def setUp(self):
super(BanditTestSetTests, self).setUp()
mngr = self._make_test_manager(mock.Mock)
self.patchExtMan = mock.patch('stevedore.extension.ExtensionManager')
self.mockExtMan = self.patchExtMan.start()
self.mockExtMan.return_value = mngr
self.old_ext_man = extension_loader.MANAGER
extension_loader.MANAGER = extension_loader.Manager()
self.config = mock.MagicMock()
self.config.get_setting.return_value = None
def tearDown(self):
self.patchExtMan.stop()
super(BanditTestSetTests, self).tearDown()
extension_loader.MANAGER = self.old_ext_man
def test_has_defaults(self):
ts = test_set.BanditTestSet(self.config)
self.assertEqual(len(ts.get_tests('Str')), 1)
def test_profile_include_id(self):
profile = {'include': ['B000']}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Str')), 1)
def test_profile_exclude_id(self):
profile = {'exclude': ['B000']}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Str')), 0)
def test_profile_include_none(self):
profile = {'include': []} # same as no include
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Str')), 1)
def test_profile_exclude_none(self):
profile = {'exclude': []} # same as no exclude
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Str')), 1)
def test_profile_has_builtin_blacklist(self):
ts = test_set.BanditTestSet(self.config)
self.assertEqual(len(ts.get_tests('Import')), 1)
self.assertEqual(len(ts.get_tests('ImportFrom')), 1)
self.assertEqual(len(ts.get_tests('Call')), 1)
def test_profile_exclude_builtin_blacklist(self):
profile = {'exclude': ['B001']}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Import')), 0)
self.assertEqual(len(ts.get_tests('ImportFrom')), 0)
self.assertEqual(len(ts.get_tests('Call')), 0)
def test_profile_exclude_builtin_blacklist_specific(self):
profile = {'exclude': ['B302', 'B401']}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(len(ts.get_tests('Import')), 0)
self.assertEqual(len(ts.get_tests('ImportFrom')), 0)
self.assertEqual(len(ts.get_tests('Call')), 0)
def test_profile_filter_blacklist_none(self):
ts = test_set.BanditTestSet(self.config)
blacklist = ts.get_tests('Import')[0]
self.assertEqual(len(blacklist._config['Import']), 2)
self.assertEqual(len(blacklist._config['ImportFrom']), 2)
self.assertEqual(len(blacklist._config['Call']), 2)
def test_profile_filter_blacklist_one(self):
profile = {'exclude': ['B401']}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests('Import')[0]
self.assertEqual(len(blacklist._config['Import']), 1)
self.assertEqual(len(blacklist._config['ImportFrom']), 1)
self.assertEqual(len(blacklist._config['Call']), 1)
def test_profile_filter_blacklist_include(self):
profile = {'include': ['B001', 'B401']}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests('Import')[0]
self.assertEqual(len(blacklist._config['Import']), 1)
self.assertEqual(len(blacklist._config['ImportFrom']), 1)
self.assertEqual(len(blacklist._config['Call']), 1)
def test_profile_filter_blacklist_all(self):
profile = {'exclude': ['B401', 'B302']}
ts = test_set.BanditTestSet(self.config, profile)
# if there is no blacklist data for a node type then we wont add a
# blacklist test to it, as this would be pointless.
self.assertEqual(len(ts.get_tests('Import')), 0)
self.assertEqual(len(ts.get_tests('ImportFrom')), 0)
self.assertEqual(len(ts.get_tests('Call')), 0)
def test_profile_blacklist_compat(self):
data = [utils.build_conf_dict(
'marshal', 'B302', ['marshal.load', 'marshal.loads'],
('Deserialization with the marshal module is possibly '
'dangerous.'))]
profile = {'include': ['B001'], 'blacklist': {'Call': data}}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests('Call')[0]
self.assertFalse('Import' in blacklist._config)
self.assertFalse('ImportFrom' in blacklist._config)
self.assertEqual(len(blacklist._config['Call']), 1)
|
Python
| 0.999999
|
@@ -6220,37 +6220,37 @@
self.assert
-False
+NotIn
('Import' in bla
@@ -6242,19 +6242,17 @@
'Import'
- in
+,
blackli
@@ -6286,13 +6286,13 @@
sert
-False
+NotIn
('Im
@@ -6300,19 +6300,17 @@
ortFrom'
- in
+,
blackli
|
2152a52f9e3bd2a1924fd76399fca2127c1961a9
|
Enable HTTPS option for social auth
|
tola/settings/local.py
|
tola/settings/local.py
|
from base import *
import os
from os.path import join, normpath
try:
DATABASES = {
'default': {
'ENGINE': os.environ["TOLA_DB_ENGINE"],
'NAME': os.environ["TOLA_DB_NAME"],
'USER': os.environ["TOLA_DB_USER"],
'PASSWORD': os.environ["TOLA_DB_PASS"],
'HOST': os.environ["TOLA_DB_HOST"],
'PORT': os.environ["TOLA_DB_PORT"],
}
}
except KeyError:
# Fallback for tests without environment variables configured
# Depends on os.environ for correct functionality
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tolaactivity',
}
}
print("DATABASES: {}".format(DATABASES))
# Hosts/domain names that are valid for this site
if os.getenv('TOLA_HOSTNAME') is not None:
ALLOWED_HOSTS = os.getenv('TOLA_HOSTNAME').split(',')
USE_X_FORWARDED_HOST = True if os.getenv('TOLA_USE_X_FORWARDED_HOST') == 'True' else False
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('test', 'test@test.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True if os.getenv('TOLA_DEBUG') == 'True' else False
CORS_ORIGIN_ALLOW_ALL = True
########## SOCIAL AUTH CLIENT CONFIG ###########
GOOGLE_STEP2_URI = ''
GOOGLE_CLIENT_ID = ''
GOOGLE_CLIENT_SECRET = ''
SOCIAL_AUTH_LOGIN_REDIRECT_URL = os.getenv('SOCIAL_AUTH_LOGIN_REDIRECT_URL')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.getenv('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.getenv('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
SOCIAL_AUTH_MICROSOFT_GRAPH_KEY = os.getenv('SOCIAL_AUTH_MICROSOFT_GRAPH_KEY')
SOCIAL_AUTH_MICROSOFT_GRAPH_SECRET = os.getenv('SOCIAL_AUTH_MICROSOFT_GRAPH_SECRET')
SOCIAL_AUTH_MICROSOFT_GRAPH_REDIRECT_URL = os.getenv('SOCIAL_AUTH_MICROSOFT_GRAPH_REDIRECT_URL')
SOCIAL_AUTH_FIELDS_STORED_IN_SESSION = ['redirect_after_login']
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
######## If report server then limit navigation and allow access to public dashboards
REPORT_SERVER = False
OFFLINE_MODE = False
NON_LDAP = True
LOCAL_API_TOKEN = "ABC"
# Configure templates for API only version
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [normpath(join(SITE_ROOT, 'templates2'))],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'builtins': [
'django.contrib.staticfiles.templatetags.staticfiles',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': os.getenv('TOLA_ERROR_LOG', 'tola_activity_error.log'),
},
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propagate': True,
},
},
}
TOLA_ACTIVITY_URL = os.getenv('TOLA_ACTIVITY_URL') # frontend URL
TOLA_TRACK_URL = os.getenv('TOLA_TRACK_URL')
TOLA_TRACK_TOKEN = os.getenv('TOLA_TRACK_TOKEN')
ELASTICSEARCH_URL = os.getenv('ELASTICSEARCH_URL')
ELASTICSEARCH_INDEX_PREFIX = os.getenv('ELASTICSEARCH_INDEX_PREFIX')
|
Python
| 0.000003
|
@@ -1559,16 +1559,121 @@
RET = ''
+%0A%0ASOCIAL_AUTH_REDIRECT_IS_HTTPS = True if os.getenv('SOCIAL_AUTH_REDIRECT_IS_HTTPS') == 'True' else False
%0ASOCIAL_
|
9e577694d2f8665599d590299e58355dd7472011
|
Fix less
|
cupy/logic/comparison.py
|
cupy/logic/comparison.py
|
from cupy.logic import ufunc
def allclose(a, b, rtol=1e-05, atol=1e-08):
# TODO(beam2d): Implement it
raise NotImplementedError
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, allocator=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equal(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
def array_equiv(a1, a2):
# TODO(beam2d): Implement it
raise NotImplementedError
greater = ufunc.create_comparison(
'greater', '>',
'''Tests elementwise if ``x1 > x2``.
.. seealso:: :data:`numpy.greater`
''')
greater_equal = ufunc.create_comparison(
'greater_equal', '>=',
'''Tests elementwise if ``x1 >= x2``.
.. seealso:: :data:`numpy.greater_equal`
''')
less = ufunc.create_comparison(
'less', '<'
'''Tests elementwise if ``x1 < x2``.
.. seealso:: :data:`numpy.less`
''')
less_equal = ufunc.create_comparison(
'less_equal', '<=',
'''Tests elementwise if ``x1 <= x2``.
.. seealso:: :data:`numpy.less_equal`
''')
equal = ufunc.create_comparison(
'equal', '==',
'''Tests elementwise if ``x1 == x2``.
.. seealso:: :data:`numpy.equal`
''')
not_equal = ufunc.create_comparison(
'not_equal', '!=',
'''Tests elementwise if ``x1 != x2``.
.. seealso:: :data:`numpy.equal`
''')
|
Python
| 0.000092
|
@@ -817,16 +817,17 @@
ss', '%3C'
+,
%0A '''
|
244f3262989b0331a120eb546ca22c9bea9194e4
|
add DownloadDelta to the admin
|
crate_project/apps/packages/admin.py
|
crate_project/apps/packages/admin.py
|
from django.contrib import admin
from packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI
from packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog
from packages.models import ReadTheDocsPackageSlug
class PackageURIAdmin(admin.TabularInline):
model = PackageURI
extra = 0
class PackageAdmin(admin.ModelAdmin):
inlines = [PackageURIAdmin]
list_display = ["name", "created", "modified", "downloads_synced_on"]
list_filter = ["created", "modified", "downloads_synced_on"]
search_fields = ["name"]
class ReleaseRequireInline(admin.TabularInline):
model = ReleaseRequire
extra = 0
class ReleaseProvideInline(admin.TabularInline):
model = ReleaseProvide
extra = 0
class ReleaseObsoleteInline(admin.TabularInline):
model = ReleaseObsolete
extra = 0
class ReleaseFileInline(admin.TabularInline):
model = ReleaseFile
extra = 0
class ReleaseURIInline(admin.TabularInline):
model = ReleaseURI
extra = 0
class ReleaseAdmin(admin.ModelAdmin):
inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline]
list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"]
list_filter = ["created", "modified", "hidden"]
search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"]
raw_id_fields = ["package"]
class TroveClassifierAdmin(admin.ModelAdmin):
list_display = ["trove"]
search_fields = ["trove"]
class ReleaseFileAdmin(admin.ModelAdmin):
list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["release__package__name", "filename", "digest"]
raw_id_fields = ["release"]
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "release", "type", "created", "modified"]
list_filter = ["type", "created", "modified"]
search_fields = ["package__name"]
raw_id_fields = ["package", "release"]
class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin):
list_display = ["package", "slug"]
search_fields = ["package__name", "slug"]
raw_id_fields = ["package"]
admin.site.register(Package, PackageAdmin)
admin.site.register(Release, ReleaseAdmin)
admin.site.register(ReleaseFile, ReleaseFileAdmin)
admin.site.register(TroveClassifier, TroveClassifierAdmin)
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
|
Python
| 0
|
@@ -240,16 +240,31 @@
s import
+ DownloadDelta,
ReadThe
@@ -1968,24 +1968,242 @@
release%22%5D%0A%0A%0A
+class DownloadDeltaAdmin(admin.ModelAdmin):%0A list_display = %5B%22file%22, %22date%22, %22delta%22%5D%0A list_filter = %5B%22date%22%5D%0A search_fields = %5B%22file__release__package__name%22, %22file__filename%22%5D%0A raw_id_fields = %5B%22file%22%5D%0A%0A%0A
class Change
@@ -2800,24 +2800,79 @@
ifierAdmin)%0A
+admin.site.register(DownloadDelta, DownloadDeltaAdmin)%0A
admin.site.r
|
4c703480fe395ddef5faa6d388a472b7053f26af
|
Add debug command line option.
|
jskom/__main__.py
|
jskom/__main__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import asyncio
import logging
from hypercorn.asyncio import serve
from hypercorn.config import Config
from jskom import app, init_app
log = logging.getLogger("jskom.main")
def run(host, port):
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
init_app()
config = Config()
config.bind = ["{}:{}".format(host, port)]
asyncio.run(serve(app, config), debug=True)
def main():
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-15s %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Jskom')
# use 127.0.0.1 instead of localhost to avoid delays related to ipv6.
# http://werkzeug.pocoo.org/docs/serving/#troubleshooting
parser.add_argument('--host', help='Hostname or IP to listen on',
default='127.0.0.1')
parser.add_argument('--port', help='Port to listen on',
type=int, default=5000)
args = parser.parse_args()
log.info("Using args: %s", args)
run(args.host, args.port)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -640,13 +640,12 @@
ing.
-DEBUG
+INFO
)%0A%0A
@@ -700,16 +700,133 @@
'Jskom')
+%0A parser.add_argument(%0A '--debug', help='Enable debug logging',%0A default=False, action='store_true')
%0A%0A #
@@ -979,16 +979,25 @@
rgument(
+%0A
'--host'
@@ -1034,32 +1034,16 @@
en on',%0A
-
@@ -1087,16 +1087,25 @@
rgument(
+%0A
'--port'
@@ -1132,32 +1132,16 @@
en on',%0A
-
@@ -1195,16 +1195,283 @@
e_args()
+%0A loglevel = logging.DEBUG if args.debug else logging.INFO%0A logging.getLogger().setLevel(loglevel)%0A%0A if not args.debug:%0A # asyncio logs quite verbose also on INFO level, so set to WARNING.%0A logging.getLogger('asyncio').setLevel(logging.WARNING)
%0A%0A lo
|
cdcc807ecd7126f533bbc01721276d62a4a72732
|
fix all_docs dbs to work after flip
|
corehq/couchapps/__init__.py
|
corehq/couchapps/__init__.py
|
from corehq.preindex import CouchAppsPreindexPlugin
from django.conf import settings
CouchAppsPreindexPlugin.register('couchapps', __file__, {
'form_question_schema': 'meta',
'users_extra': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'noneulized_users': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
'all_docs': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB),
})
|
Python
| 0
|
@@ -353,32 +353,12 @@
': (
-settings.USERS_GROUPS_DB
+None
, se
|
39f26d6bb46eeb96c54881ab9c0147051328b8e8
|
fix another misuse of the 1.0 DB API.
|
trac/tests/env.py
|
trac/tests/env.py
|
from __future__ import with_statement
from trac import db_default
from trac.core import ComponentManager
from trac.env import Environment
import os.path
import unittest
import tempfile
import shutil
class EnvironmentCreatedWithoutData(Environment):
def __init__(self, path, create=False, options=[]):
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
class EmptyEnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = EnvironmentCreatedWithoutData(env_path, create=True)
def tearDown(self):
with self.env.db_query as db:
db.close()
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() is False, self.env.get_version()
class EnvironmentTestCase(unittest.TestCase):
def setUp(self):
env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
self.env = Environment(env_path, create=True)
def tearDown(self):
with self.env.db_query as db:
db.close()
self.env.shutdown() # really closes the db connections
shutil.rmtree(self.env.path)
def test_get_version(self):
"""Testing env.get_version"""
assert self.env.get_version() == db_default.db_version
def test_get_known_users(self):
"""Testing env.get_known_users"""
with self.env.db_transaction as db:
db.executemany("INSERT INTO session VALUES (%s,%s,0)",
[('123', 0),('tom', 1), ('joe', 1), ('jane', 1)])
db.executemany("INSERT INTO session_attribute VALUES (%s,%s,%s,%s)",
[('123', 0, 'email', 'a@example.com'),
('tom', 1, 'name', 'Tom'),
('tom', 1, 'email', 'tom@example.com'),
('joe', 1, 'email', 'joe@example.com'),
('jane', 1, 'name', 'Jane')])
users = {}
for username, name, email in self.env.get_known_users():
users[username] = (name, email)
assert not users.has_key('anonymous')
self.assertEqual(('Tom', 'tom@example.com'), users['tom'])
self.assertEqual((None, 'joe@example.com'), users['joe'])
self.assertEqual(('Jane', None), users['jane'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EnvironmentTestCase, 'test'))
suite.addTest(unittest.makeSuite(EmptyEnvironmentTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
Python
| 0.000005
|
@@ -810,69 +810,8 @@
f):%0A
- with self.env.db_query as db:%0A db.close()%0A
@@ -1272,69 +1272,8 @@
f):%0A
- with self.env.db_query as db:%0A db.close()%0A
|
a27e667dedeaaa0aefadc3328149f311bb277c45
|
Update bottlespin.py
|
bottlespin/bottlespin.py
|
bottlespin/bottlespin.py
|
import discord
from discord.ext import commands
from random import choice
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
roles = [ctx.message.server.roles]
role = discord.Role.name
if role in roles:
await self.bot.say(str(role))
await self.bot.say(str(roles))
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in roles:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = choice(list(target))
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
|
Python
| 0
|
@@ -334,24 +334,62 @@
bottle%22%22%22%0A%0A
+ await self.bot.say(str(role))%0A
role
@@ -431,32 +431,41 @@
-role = discord.Role.name
+await self.bot.say(str(roles%5B1%5D))
%0A
|
7a25ace4851da30a252842b5d5e3a7efee90ce00
|
Raise error when /boundaries/set-slug URL points to a nonexistent set
|
boundaryservice/views.py
|
boundaryservice/views.py
|
from django.contrib.gis.db import models
from django.http import Http404
from boundaryservice.base_views import (ModelListView, ModelDetailView,
ModelGeoListView, ModelGeoDetailView)
from boundaryservice.models import BoundarySet, Boundary
class BoundarySetListView(ModelListView):
""" e.g. /boundary-set/ """
filterable_fields = ['name', 'domain', 'hierarchy']
model = BoundarySet
class BoundarySetDetailView(ModelDetailView):
""" e.g. /boundary-set/federal-electoral-districts/ """
model = BoundarySet
def get_object(self, request, qs, slug):
try:
return qs.get(slug=slug)
except BoundarySet.DoesNotExist:
raise Http404
class BoundaryListView(ModelGeoListView):
""" e.g. /boundary/federal-electoral-districts/
or /boundary/federal-electoral-districts/centroid """
filterable_fields = ['external_id', 'name']
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
default_geo_filter_field = 'shape'
model = Boundary
def filter(self, request, qs):
qs = super(BoundaryListView, self).filter(request, qs)
if 'intersects' in request.GET:
(set_slug, slug) = request.GET['intersects'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(models.Q(shape__covers=shape) | models.Q(shape__overlaps=shape))
if 'touches' in request.GET:
(set_slug, slug) = request.GET['touches'].split('/')
try:
shape = Boundary.objects.filter(slug=slug, set=set_slug).values_list('shape', flat=True)[0]
except IndexError:
raise Http404
qs = qs.filter(shape__touches=shape)
if 'sets' in request.GET:
set_slugs = request.GET['sets'].split(',')
qs = qs.filter(set__in=set_slugs)
return qs
def get_qs(self, request, set_slug=None):
qs = super(BoundaryListView, self).get_qs(request)
if set_slug:
return qs.filter(set=set_slug)
return qs
class BoundaryObjectGetterMixin(object):
model = Boundary
def get_object(self, request, qs, set_slug, slug):
try:
return qs.get(slug=slug, set=set_slug)
except Boundary.DoesNotExist:
raise Http404
class BoundaryDetailView(ModelDetailView, BoundaryObjectGetterMixin):
""" e.g. /boundary/federal-electoral-districts/outremont/ """
def __init__(self):
super(BoundaryDetailView, self).__init__()
self.base_qs = self.base_qs.defer('shape', 'simple_shape', 'centroid')
class BoundaryGeoDetailView(ModelGeoDetailView, BoundaryObjectGetterMixin):
""" e.g /boundary/federal-electoral-districts/outremont/shape """
allowed_geo_fields = ('shape', 'simple_shape', 'centroid')
|
Python
| 0
|
@@ -2157,16 +2157,117 @@
t_slug:%0A
+ if not BoundarySet.objects.filter(slug=set_slug).exists():%0A raise Http404%0A
|
c63463ff040f79c605d6c0414261527dda3ed00a
|
Switch to new babel version in require test.
|
tests/test_jsinterpreter.py
|
tests/test_jsinterpreter.py
|
import unittest
from dukpy._dukpy import JSRuntimeError
import dukpy
from diffreport import report_diff
class TestJSInterpreter(unittest.TestCase):
def test_interpreter_keeps_context(self):
interpreter = dukpy.JSInterpreter()
ans = interpreter.evaljs("var o = {'value': 5}; o")
assert ans == {'value': 5}
ans = interpreter.evaljs("o.value += 1; o")
assert ans == {'value': 6}
def test_call_python(self):
def _say_hello(num, who):
return 'Hello ' + ' '.join([who]*num)
interpreter = dukpy.JSInterpreter()
interpreter.export_function('say_hello', _say_hello)
res = interpreter.evaljs("call_python('say_hello', 3, 'world')")
assert res == 'Hello world world world', res
def test_module_loader(self):
interpreter = dukpy.JSInterpreter()
res = interpreter.evaljs('''
babel = require('babel-6.14.0.min');
babel.transform(dukpy.es6code, {presets: ["es2015"]}).code;
''', es6code='let i=5;')
expected = '''"use strict";
var i = 5;'''
assert res == expected, report_diff(expected, res)
def test_module_loader_unexisting(self):
interpreter = dukpy.JSInterpreter()
with self.assertRaises(JSRuntimeError) as err:
interpreter.evaljs("require('missing_module');")
assert 'cannot find module: missing_module' in str(err.exception)
|
Python
| 0
|
@@ -915,10 +915,10 @@
l-6.
-14
+26
.0.m
|
18fb4c40d4460edf437aabfe7b88afa2905972f6
|
fix test docstring + pyflakes
|
mne/tests/test_source_estimate.py
|
mne/tests/test_source_estimate.py
|
import os.path as op
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from ..datasets import sample
from .. import stats
from .. import read_stc, write_stc, read_w, write_w, SourceEstimate, morph_data
from ..source_estimate import spatio_temporal_tris_connectivity, \
spatio_temporal_src_connectivity
examples_folder = op.join(op.dirname(__file__), '..', '..', 'examples')
data_path = sample.data_path(examples_folder)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
def test_io_stc():
"""Test IO for STC files
"""
stc = read_stc(fname)
write_stc("tmp.stc", stc['tmin'], stc['tstep'],
stc['vertices'], stc['data'])
stc2 = read_stc("tmp.stc")
assert_array_almost_equal(stc['data'], stc2['data'])
assert_array_almost_equal(stc['tmin'], stc2['tmin'])
assert_array_almost_equal(stc['vertices'], stc2['vertices'])
assert_array_almost_equal(stc['tstep'], stc2['tstep'])
def test_io_w():
"""Test IO for w files
"""
w_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd-sensmap')
src = SourceEstimate(w_fname)
src.save('tmp', ftype='w')
src2 = SourceEstimate('tmp-lh.w')
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc = SourceEstimate(fname)
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
def test_morph_data():
"""Test morphing of data
"""
subject_from = 'sample'
subject_to = 'fsaverage'
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc_from = SourceEstimate(fname)
stc_to = morph_data(subject_from, subject_to, stc_from,
grade=3, smooth=12)
stc_to.save('%s_audvis-meg' % subject_to)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.99)
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
assert_array_equal(components,
[0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1])
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
|
Python
| 0.000001
|
@@ -232,25 +232,8 @@
stc,
- read_w, write_w,
Sou
@@ -1823,17 +1823,16 @@
a /=
-
2 * a%0A
@@ -2655,32 +2655,47 @@
ral connectivity
+ from triangles
%22%22%22%0A tris = n
@@ -3104,16 +3104,36 @@
ectivity
+ from source spaaces
%22%22%22%0A
|
3681ada3917d5811e1e959270e1df0edea7ebf55
|
Update __init__.py
|
mapclientplugins/smoothfitstep/__init__.py
|
mapclientplugins/smoothfitstep/__init__.py
|
'''
MAP Client Plugin
'''
__version__ = '0.1.0'
__author__ = 'Richard Christie'
__stepname__ = 'smoothfit'
__location__ = ''
# import class that derives itself from the step mountpoint.
from mapclientplugins.smoothfitstep import step
# Import the resource file when the module is loaded,
# this enables the framework to use the step icon.
from . import resources_rc
|
Python
| 0.000072
|
@@ -377,8 +377,10 @@
urces_rc
+%0D%0A
|
379d2df1041605d3c8a21d543f9955601ee07558
|
Add threading to syncer
|
imageledger/management/commands/syncer.py
|
imageledger/management/commands/syncer.py
|
from collections import namedtuple
import itertools
import logging
from elasticsearch import helpers
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from imageledger import models, search
console = logging.StreamHandler()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
MAX_CONNECTION_RETRIES = 10
RETRY_WAIT = 5 # Number of sections to wait before retrying
DEFAULT_CHUNK_SIZE = 1000
class Command(BaseCommand):
can_import_settings = True
requires_migrations_checks = True
def add_arguments(self, parser):
parser.add_argument("--verbose",
action="store_true",
default=False,
help="Be very chatty and run logging at DEBUG")
parser.add_argument("--chunk-size",
dest="chunk_size",
default=DEFAULT_CHUNK_SIZE,
type=int,
help="The number of records to batch process at once")
parser.add_argument("--with-fingerprinting",
dest="with_fingerprinting",
action="store_true",
help="Whether to run the expensive perceptual hash routine as part of syncing")
def handle(self, *args, **options):
if options['verbose']:
log.addHandler(console)
log.setLevel(logging.DEBUG)
self.sync_all_images(chunk_size=options['chunk_size'], with_fingerprinting=options['with_fingerprinting'])
def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=1000):
"""Sync all of the images, sorting from least-recently-synced"""
count = 0
while count < num_iterations:
imgs = models.Image.objects.all().order_by('-last_synced_with_source')[0:chunk_size]
for img in imgs:
img.sync(attempt_perceptual_hash=with_fingerprinting)
count += 1
|
Python
| 0.000001
|
@@ -59,16 +59,69 @@
logging
+%0Afrom multiprocessing.dummy import Pool as ThreadPool
%0A%0Afrom e
@@ -301,16 +301,17 @@
search%0A%0A
+%0A
console
@@ -1762,20 +1762,17 @@
rations=
-1000
+5
):%0A
@@ -1851,64 +1851,433 @@
-count = 0%0A while count %3C num_iterations:%0A
+pool = ThreadPool(4)%0A starts = %5Bi * chunk_size for i in range(0, num_iterations)%5D%0A pool.starmap(do_sync, zip(starts, itertools.repeat(chunk_size, num_iterations), itertools.repeat(with_fingerprinting, num_iterations)))%0A pool.close()%0A pool.join()%0A%0Adef do_sync(start, chunk_size, with_fingerprinting):%0A end = start + chunk_size%0A log.info(%22Starting sync in range from %25d to %25d...%22, start, end)%0A
@@ -2351,30 +2351,19 @@
e')%5B
-0:chunk_size%5D%0A
+start:end%5D%0A
@@ -2379,24 +2379,16 @@
n imgs:%0A
-
@@ -2445,31 +2445,4 @@
ng)%0A
- count += 1%0A
|
33b7e9371305c4171594c21c154cd5724ea013cb
|
allow segment and overlap be specified as a parameter
|
scripts/nanopolish_makerange.py
|
scripts/nanopolish_makerange.py
|
import sys
from Bio import SeqIO
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(sys.argv[1]), "fasta")]
SEGMENT_LENGTH = 50000
OVERLAP_LENGTH = 200
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
for n in xrange(0, length, SEGMENT_LENGTH):
if ( n + SEGMENT_LENGTH) > length:
print "%s:%d-%d" % (name, n, length - 1)
else:
print "%s:%d-%d" % (name, n, n + SEGMENT_LENGTH + OVERLAP_LENGTH)
|
Python
| 0
|
@@ -4,16 +4,32 @@
ort sys%0A
+import argparse%0A
from Bio
@@ -43,16 +43,386 @@
SeqIO%0A%0A
+parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')%0Aparser.add_argument('--segment-length', type=int, default=50000)%0Aparser.add_argument('--overlap-length', type=int, default=200)%0Aargs, extra = parser.parse_known_args()%0Aif len(extra) != 1:%0A sys.stderr.write(%22Error: a genome file is expected%5Cn%22)%0Afilename = extra%5B0%5D%0A%0A
recs = %5B
@@ -479,19 +479,16 @@
pen(
-sys.argv%5B1%5D
+filename
), %22
@@ -518,34 +518,64 @@
H =
-50000%0AOVERLAP_LENGTH = 200
+args.segment_length%0AOVERLAP_LENGTH = args.overlap_length
%0A%0Afo
|
0d474b3945c1b98d897c57184ff1b7a33f6c9a3d
|
add caucasus to eu tourism chart
|
dataviz/eutourism.py
|
dataviz/eutourism.py
|
import sys
sys.path.append('..')
from charts import *
atlas = pd.read_csv("datasets/countries.csv").split_columns('country', "|").split_rows('country').set_index('country')
df = pd.read_csv("datasets/eutourism.csv").set_index("Destination").select(lambda c: atlas.continent[c] == "Europe")
df = df.fillna({'2015': df['2014']})[['2015','2015.1']]
df.columns = ['arrivals', 'receipts']
df = pd.concat([df, atlas[['population']]], join='inner', axis=1).filter_boolean(lambda df: df.population > 100000)
df['arrivals_pc'] = df['arrivals'] / df['population'] * 1000000
df['receipts_pc'] = df['receipts'] / df['population'] * 1000000
arrivals = df[['arrivals']].sort_values('arrivals', ascending=False)
receipts = df[['receipts']].sort_values('receipts', ascending=False)
arrivals_pc = df[['arrivals_pc']].sort_values('arrivals_pc', ascending=False)
receipts_pc = df[['receipts_pc']].sort_values('receipts_pc', ascending=False)
def rlabel(df, r):
return Image.from_row([
Image.new("RGBA", (180,0), 0).pin(Image.from_text("{}. {}".format(r+1, df.index[r].replace("United Kingdom", "UK")).replace("Czech Republic", "Czechia").upper(), arial(18, bold=True), padding=2), (0,0), align=(0,0.5)),
Image.from_url_with_cache(atlas.flag[df.index[r]]).resize((80,60)).pad(1, "grey").pad((2,0), "white")
], padding=1, bg="white")
def clabel(formatter, filterfn, col, c, r, v):
return Image.from_text(formatter(v), arial(16, bold=False), col, padding=3) if filterfn(v) else None
imgs = []
for title, df, ft, mx in [("total tourist arrivals (in millions)", arrivals,lambda v: "{:.1f}m".format(v/1000), 6000),
("tourist arrivals per thousand population", arrivals_pc,lambda v: "{:,}".format(int(v)).replace(',', ' '), 500),
("total tourist receipts (in US$ billions)", receipts,lambda v: "${:.1f}b".format(v/1000), 5000),
("tourist receipts per capita", receipts_pc,lambda v: "${:,}".format(int(v)).replace(',', ' '), 600)
]:
img = bar_chart(df, 62, 600, horizontal=True, spacing=1, colors=["#003399"], ymax=df.iloc[0][0], rlabels=partial(rlabel, df),
clabels={ BarChartLabelPosition.INSIDE : partial(clabel, ft, lambda v: v > mx, "white"),
BarChartLabelPosition.OUTSIDE : partial(clabel, ft, lambda v: v <= mx, "black") })
img = Image.from_column([Image.from_text(title.upper(), arial(24, bold=True)), img], padding=5, bg="white")
imgs.append(img)
chart = Image.from_row(imgs, bg="white", padding=5)
title = Image.from_column([
Image.from_text("International tourism in Europe, 2015".upper(), arial(90, bold=True), padding=(5,10,5,2)),
Image.from_text("figures from the United Nations World Tourism Organization*", arial(60, italics=True), padding=(5,2,5,10))], bg="white")
footer = Image.from_text("* chart excludes countries and territories with fewer than 100 thousand residents; arrival figures for Sweden and Slovakia are from 2014.", arial(32, italics=True), "black", padding=10)
img = Image.from_column([title, chart, footer], bg="white", padding=20)
img = img.resize_fixed_aspect(scale=0.5)
img.place(Image.from_text("/u/Udzu", font("arial", 14), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/eutourists.png")
|
Python
| 0
|
@@ -283,16 +283,61 @@
%22Europe%22
+ or c in %5B%22Georgia%22, %22Azerbaijan%22, %22Armenia%22%5D
)%0Adf = d
|
9061c071981c1123df60963dcfa83663d4495223
|
Remove thumbnail testing workarounds.
|
coverart_redirect/request.py
|
coverart_redirect/request.py
|
# Copyright (C) 2011 Lukas Lalinsky
# Copyright (C) 2011 Robert Kaye
# Distributed under the MIT license, see the LICENSE file for details.
import re
import os
import sys
import coverart_redirect
from coverart_redirect.utils import statuscode
from wsgiref.util import shift_path_info, request_uri
# FIXME: fix http status codes.
class CoverArtRedirect(object):
''' Handles index and redirect requests '''
def __init__(self, config, conn):
self.config = config
self.conn = conn
self.cmd = None
self.proto = None
if not self.config.database.musicbrainz_schema:
print "please configure musicbrainz database schema"
sys.exit (1)
if not self.config.database.coverart_schema:
print "please configure cover art archive database schema"
sys.exit (1)
def resolve_mbid (self, entity, mbid):
"""Handle the GID redirect. Query the database to see if the given release has been
merged into another release. If so, return the redirected MBID, otherwise return
the original MBID. """
schema = self.config.database.musicbrainz_schema
mbid = mbid.lower ()
query = """
SELECT release.gid
FROM """ + schema + """.release
JOIN """ + schema + """.release_gid_redirect
ON release_gid_redirect.new_id = release.id
WHERE release_gid_redirect.gid = %(mbid)s;
"""
row = self.conn.execute (query, { "mbid": mbid }).first ()
if row:
return row[0];
return mbid
def resolve_cover(self, entity, mbid, type):
'''Get the frontiest or backiest cover image.'''
mbz = self.config.database.musicbrainz_schema
caa = self.config.database.coverart_schema
query = """
SELECT cover_art.id
FROM """ + caa + """.cover_art
JOIN """ + mbz + """.release ON release = release.id
WHERE release.gid = %(mbid)s
AND is_""" + type + """ = true;
"""
row = self.conn.execute (query, { "mbid": mbid }).first ()
if row:
return unicode(row[0]) + u".jpg"
return None
def handle_index(self):
'''Serve up the one static index page'''
try:
f = open(os.path.join(self.config.static_path, "index"))
except IOError:
return [statuscode (500), "Internal Server Error"]
txt = f.read()
f.close()
return [statuscode (200), txt]
def handle_dir(self, entity, mbid):
'''When the user requests no file, redirect to the root of the bucket to give the user an
index of what is in the bucked'''
return [statuscode (307), "%s/mbid-%s/index.json" % (self.config.s3.prefix, mbid)]
def handle_redirect(self, entity, mbid, filename):
""" Handle the 307 redirect. """
if not filename:
return [statuscode (400), "no filename specified"]
# ------------------------------------------------------------------------------------------------
# Remove me for deploying this service for real. This code is for testing only!
# ------------------------------------------------------------------------------------------------
# REMOVE ME for testing only!
filename = filename.replace("-250", "_thumb")
filename = filename.replace("-500", "")
return [statuscode (307), "%s/mbid-%s/mbid-%s-%s" % (
self.config.s3.prefix, mbid, mbid, filename)]
def handle(self, environ):
'''Handle a request, parse and validate arguments and dispatch the request'''
entity = shift_path_info(environ)
if not entity:
return self.handle_index()
if entity != 'release':
return [statuscode (400), "Only release entities are currently supported"]
req_mbid = shift_path_info(environ)
if not req_mbid:
return [statuscode (400), "no MBID specified."]
if not re.match('[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$', req_mbid):
return [statuscode (400), "invalid MBID specified."]
mbid = self.resolve_mbid (entity, req_mbid)
if not mbid:
return [statuscode (404), "No %s found with identifier %s" % (entity, req_mbid)]
filename = shift_path_info(environ)
if not filename:
return self.handle_dir(entity, mbid)
if filename.startswith ('front'):
filename = self.resolve_cover (entity, mbid, 'front')
if not filename:
return [statuscode (404),
"No front cover image found for %s with identifier %s" % (entity, req_mbid)]
elif filename.startswith ('back'):
filename = self.resolve_cover (entity, mbid, 'back')
if not filename:
return [statuscode (404),
"No back cover image found for %s with identifier %s" % (entity, req_mbid)]
(code, response) = self.handle_redirect(entity, mbid, filename.encode('utf8'))
return code, response
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
httpd.serve_forever()
|
Python
| 0
|
@@ -3040,451 +3040,8 @@
%22%5D%0A%0A
- # ------------------------------------------------------------------------------------------------%0A # Remove me for deploying this service for real. This code is for testing only!%0A # ------------------------------------------------------------------------------------------------%0A # REMOVE ME for testing only!%0A filename = filename.replace(%22-250%22, %22_thumb%22)%0A filename = filename.replace(%22-500%22, %22%22)%0A%0A
|
105a413b18456f9a505dd1ed4bf515987b4792d2
|
add --force option to management command to force all files to be pushed
|
mediasync/management/commands/syncmedia.py
|
mediasync/management/commands/syncmedia.py
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import mediasync
class Command(BaseCommand):
help = "Sync local media with S3"
args = '[options]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option("-f", "--force", dest="force", help="force files to sync", action="store_true"),
)
def handle(self, *args, **options):
force = options.get('force') or False
try:
mediasync.sync(force=force)
except ValueError, ve:
raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args))
|
Python
| 0
|
@@ -320,17 +320,17 @@
ption(%22-
-f
+F
%22, %22--fo
|
47322c47197ce5f0300ed8efd9333e19fa8d45fd
|
Set initial user
|
tracker/models.py
|
tracker/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.dispatch.dispatcher import receiver
from django.db.models.signals import post_save, pre_save
from django.core.mail import EmailMultiAlternatives, EmailMessage, send_mail
from django.core.validators import RegexValidator
class Issue(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='issue_user')
handler = models.ForeignKey(User, on_delete=models.CASCADE, related_name='issue_handler',
default=User.objects.filter(username='admin')[0].id)
issue_name = models.CharField(max_length=25, blank=False)
description = models.CharField(max_length=100, blank=False)
time_raised = models.DateTimeField(default=timezone.now)
PRIORITY = (
('low', 'Low'),
('medium', 'Medium'),
('high', 'High'),
)
priority = models.CharField(max_length=7, choices=PRIORITY, default='low')
STATUS = (
('in_progress', 'In Progress'),
('resolved', 'Resolved'),
)
status = models.CharField(max_length=12, choices=STATUS, default='in-progress')
time_resolved = models.DateTimeField(default=timezone.now)
comments = models.CharField(max_length=255, default='', blank=True)
is_seen = models.BooleanField(default=False)
def __unicode__(self):
return unicode(self.issue_name)
class Notification(models.Model):
user_to = models.ForeignKey(User, on_delete=models.CASCADE,related_name='notification_to')
note = models.CharField(max_length=255)
note_time = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return unicode(self.note)
class Maintenance(models.Model):
request_name = models.CharField(max_length=25)
requested_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='request_user')
maintainer = models.ManyToManyField('Maintainer', default=User.objects.filter(username='admin')[0].id, blank=True, related_name='maintainer')
description = models.CharField(max_length=200)
time_requested = models.DateTimeField(default=timezone.now)
APPROVAL_STATUS = (
('approved', 'Approved'),
('rejected', 'Rejected'),
)
approval_status = models.CharField(max_length=10, choices=APPROVAL_STATUS, null=True, blank=True)
PROGRESS = (
('in-progress', 'In Progress'),
('resolved', 'Resolved'),
)
progress = models.CharField(max_length=12, choices=PROGRESS, default='in-progress')
time_resolved = models.DateTimeField(default=timezone.now)
comments = models.CharField(max_length=150, default='', blank=True)
#photo = models.URLField(max_length=200)
is_seen = models.BooleanField(default=False)
def __unicode__(self):
return unicode(self.request_name)
class Maintainer(models.Model):
contact_regex = RegexValidator(regex=r'^\+?1?\d{9,15}', message='Phone number must be in the format +123456789 (from 9 to 15 digits).')
user = models.ForeignKey(User, on_delete=models.CASCADE)
#maintenance = models.ForeignKey(Maintenance, on_delete=models.CASCADE, related_name='maintenance')
contact = models.CharField(validators=[contact_regex], max_length=16)
def __unicode__(self):
return unicode(self.user)
class Document(models.Model):
title = models.CharField(max_length=60)
url = models.URLField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
document_category = models.ForeignKey('DocumentCategory', on_delete=models.PROTECT)
#keyword = models.CharField(max_length=100)
def __unicode__(self):
return unicode(self.title)
class DocumentCategory(models.Model):
category_name = models.CharField(max_length=50)
def __unicode__(self):
return unicode(self.category_name)
@receiver(pre_save, sender=Issue)
def issue_resolved(sender, instance, **kwargs):
if instance.status == 'resolved' or instance.comments:
instance.time_resolved = timezone.now()
instance.is_seen = True
@receiver(post_save, sender=Issue)
def issue_update(sender, instance, **kwargs):
if kwargs['created']:
subject = 'Issue Raised'
from_email = 'sterappdev@gmail.com'
to = instance.user.email
text_content = 'An issue has been raised on your site. Fulfill your responsibilities as admin.'
#email = EmailMessage(subject, text_content, from_email, [to],)
#email.send()
send_mail(subject, text_content, from_email, [to])
if instance.status == 'resolved':
message = 'Your issue, %s, has been resolved.' % instance.issue_name
new_notification = Notification.objects.create(
user_to = instance.user,
note = message,
note_time = timezone.now()
)
new_notification.save()
@receiver(post_save, sender=Maintenance)
def issue_saved(sender, instance, **kwargs):
if instance.approval_status:
user_email = instance.requested_user.email
subject = 'Status Update'
from_email = 'sterappdev@gmail.com'
text_content = 'There has been a change on your maintenance request: %s .' % (instance.request_name)
#email = EmailMessage(subject, text_content, from_email, [user_email], send(fail_silently)=True,)
#email.send()
send_mail(subject, text_content, from_email, [user_email])
|
Python
| 0.000001
|
@@ -580,32 +580,37 @@
er', %0A%09%09default=
+1)%0A%09#
User.objects.fil
@@ -1877,24 +1877,27 @@
ainer',
+%0A%09%09
default=
User.obj
@@ -1888,16 +1888,22 @@
default=
+1,%0A%09%09#
User.obj
@@ -1939,16 +1939,19 @@
%5B0%5D.id,
+%0A%09%09
blank=Tr
|
a8f7507d34ab4f2cbefeadc64ccf88fb90696966
|
Kill speedups, not available on Ubuntu.
|
convert.py
|
convert.py
|
# Convert NYC building footprints and addresses into importable OSM files.
from fiona import collection
from lxml import etree
from lxml.etree import tostring
from rtree import index
from shapely.geometry import asShape
from shapely import speedups
from sys import argv
from glob import glob
import re
from pprint import pprint
speedups.enable()
# Converts given building and address shapefiles into corresponding OSM XML
# files.
def convert(buildingIn, addressIn, osmOut):
# Load all addresses.
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
# Generates a new osm id.
osmIds = dict(node = -1, way = -1, rel = -1)
def newOsmId(type):
osmIds[type] = osmIds[type] - 1
return osmIds[type]
## Formats multi part house numbers
def formatHousenumber(p):
def suffix(part1, part2, hyphen_type=None):
if not part2:
return str(part1)
if hyphen_type == 'U': # unit numbers
return part1 + '-' + part2
if len(part2) == 1 and part2.isalpha(): # single letter extensions
return part1 + part2
return part1 + ' ' + part2 # All others
number = suffix(p['HOUSE_NUMB'], p['HOUSE_NU_1'], p['HYPHEN_TYP'])
if p['HOUSE_NU_2']:
number = number + ' - ' + suffix(p['HOUSE_NU_2'], p['HOUSE_NU_3'])
return number
# Converts an address
def convertAddress(address):
result = dict()
if all (k in address for k in ('HOUSE_NUMB', 'STREET_NAM')):
if address['HOUSE_NUMB']:
result['addr:housenumber'] = formatHousenumber(address)
if address['STREET_NAM']:
if re.match('^(\d+)\w\w$', address['STREET_NAM']): # Test for 2ND, 14TH, 21ST
streetname = address['STREET_NAM'].lower()
else:
streetname = address['STREET_NAM'].title()
result['addr:street'] = streetname
if address['ZIPCODE']:
result['addr:postcode'] = str(int(address['ZIPCODE']))
return result
# Appends new node or returns existing if exists.
nodes = {}
def appendNewNode(coords, osmXml):
rlon = int(float(coords[0]*10**7))
rlat = int(float(coords[1]*10**7))
if (rlon, rlat) in nodes:
return nodes[(rlon, rlat)]
node = etree.Element('node', visible = 'true', id = str(newOsmId('node')))
node.set('lon', str(coords[0]))
node.set('lat', str(coords[1]))
nodes[(rlon, rlat)] = node
osmXml.append(node)
return node
def appendNewWay(coords, osmXml):
way = etree.Element('way', visible='true', id=str(newOsmId('way')))
firstNid = 0
for i, coord in enumerate(coords):
if i == 0: continue # the first and last coordinate are the same
node = appendNewNode(coord, osmXml)
if i == 1: firstNid = node.get('id')
way.append(etree.Element('nd', ref=node.get('id')))
way.append(etree.Element('nd', ref=firstNid)) # close way
osmXml.append(way)
return way
# Appends an address to a given node or way.
def appendAddress(address, element):
for k, v in convertAddress(address['properties']).iteritems():
element.append(etree.Element('tag', k=k, v=v))
# Appends a building to a given OSM xml document.
def appendBuilding(building, address, osmXml):
# Export building, create multipolygon if there are interior shapes.
interiors = []
try:
way = appendNewWay(list(building['shape'].exterior.coords), osmXml)
for interior in building['shape'].interiors:
interiors.append(appendNewWay(list(interior.coords), osmXml))
except AttributeError:
way = appendNewWay(list(building['shape'][0].exterior.coords), osmXml)
for interior in building['shape'][0].interiors:
interiors.append(appendNewWay(list(interior.coords), osmXml))
if len(interiors) > 0:
relation = etree.Element('relation', visible='true', id=str(newOsmId('way')))
relation.append(etree.Element('member', type='way', role='outer', ref=way.get('id')))
for interior in interiors:
relation.append(etree.Element('member', type='way', role='inner', ref=interior.get('id')))
relation.append(etree.Element('tag', k='type', v='multipolygon'))
osmXml.append(relation)
way = relation
way.append(etree.Element('tag', k='building', v='yes'))
if 'HEIGHT_ROO' in building['properties']:
height = round(((building['properties']['HEIGHT_ROO'] * 12) * 0.0254), 1)
way.append(etree.Element('tag', k='height', v=str(height)))
if 'BIN' in building['properties']:
way.append(etree.Element('tag', k='nycdoitt:bin', v=str(building['properties']['BIN'])))
if address: appendAddress(address, way)
# Export buildings & addresses. Only export address with building if thre is exactly
# one address per building. Export remaining addresses as individual nodes.
addresses = []
osmXml = etree.Element('osm', version='0.6', generator='alex@mapbox.com')
for building in buildings:
address = None
if len(building['properties']['addresses']) == 1:
address = building['properties']['addresses'][0]
else:
addresses.extend(building['properties']['addresses'])
appendBuilding(building, address, osmXml)
if (len(addresses) > 0):
for address in addresses:
node = appendNewNode(address['geometry']['coordinates'], osmXml)
appendAddress(address, node)
with open(osmOut, 'w') as outFile:
outFile.writelines(tostring(osmXml, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
print "Exported " + osmOut
# Run conversions. Expects an chunks/addresses-[district id].shp for each
# chunks/buildings-[district id].shp. Optinally convert only one election district.
if (len(argv) == 2):
convert(
'chunks/buildings-%s.shp' % argv[1],
'chunks/addresses-%s.shp' % argv[1],
'osm/buildings-addresses-%s.osm' % argv[1])
else:
buildingFiles = glob("chunks/buildings-*.shp")
for buildingFile in buildingFiles:
matches = re.match('^.*-(\d+)\.shp$', buildingFile).groups(0)
convert(
buildingFile,
'chunks/addresses-%s.shp' % matches[0],
'osm/buildings-addresses-%s.osm' % matches[0])
|
Python
| 0
|
@@ -326,27 +326,8 @@
nt%0A%0A
-speedups.enable()%0A%0A
# Co
|
0be6bddf8c92c461af57e7c61c2378c817fb0143
|
Make oppetarkiv work with --all-episodes again
|
lib/svtplay_dl/service/oppetarkiv.py
|
lib/svtplay_dl/service/oppetarkiv.py
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.log import log
class OppetArkiv(Svtplay):
supported_domains = ['oppetarkiv.se']
def find_all_episodes(self, options):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
log.error("Couldn't find title")
return
program = match.group(1)
more = True
episodes = []
n = 0
if options.all_last > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while more:
url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort)
data = self.http.request("get", url).text
visa = re.search(r'svtXColorDarkLightGrey', data)
if not visa:
more = False
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == options.all_last:
break
episodes.append("http://www.oppetarkiv.se%s" % match.group(1))
n += 1
page += 1
return episodes
|
Python
| 0
|
@@ -693,28 +693,8 @@
(1)%0A
- more = True%0A
@@ -711,16 +711,16 @@
es = %5B%5D%0A
+
%0A
@@ -856,19 +856,19 @@
while
-mor
+Tru
e:%0A
@@ -1028,13 +1028,8 @@
url)
-.text
%0A
@@ -1041,111 +1041,87 @@
-visa = re.search(r'svtXColorDarkLightGrey', data)%0A if not visa:%0A more = False
+if data.status_code == 404:%0A break%0A%0A data = data.text
%0A
|
478c2ffc09fb189c4f36ccb82aad945b3db5f9b3
|
Fix secondaryFiles v1.1 updater (#1131)
|
cwltool/update.py
|
cwltool/update.py
|
from __future__ import absolute_import
import copy
import re
from typing import (Any, Callable, Dict, MutableMapping, MutableSequence,
Optional, Tuple, Union)
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad import validate
from schema_salad.ref_resolver import Loader # pylint: disable=unused-import
from six import string_types
from six.moves import urllib
from typing_extensions import Text
from schema_salad.sourceline import SourceLine
from .loghandler import _logger
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from .utils import visit_class, visit_field, aslist
def v1_0to1_1(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Text]
"""Public updater for v1.0 to v1.1."""
doc = copy.deepcopy(doc)
rewrite = {
"http://commonwl.org/cwltool#WorkReuse": "WorkReuse",
"http://arvados.org/cwl#ReuseRequirement": "WorkReuse",
"http://commonwl.org/cwltool#TimeLimit": "ToolTimeLimit",
"http://commonwl.org/cwltool#NetworkAccess": "NetworkAccess",
"http://commonwl.org/cwltool#InplaceUpdateRequirement": "InplaceUpdateRequirement",
"http://commonwl.org/cwltool#LoadListingRequirement": "LoadListingRequirement"
}
def rewrite_requirements(t):
if "requirements" in t:
for r in t["requirements"]:
if r["class"] in rewrite:
r["class"] = rewrite[r["class"]]
if "hints" in t:
for r in t["hints"]:
if r["class"] in rewrite:
r["class"] = rewrite[r["class"]]
if "steps" in t:
for s in t["steps"]:
rewrite_requirements(s)
def update_secondaryFiles(t):
if isinstance(t, MutableSequence):
return [{"pattern": p} for p in t]
else:
return {"pattern": t}
def fix_inputBinding(t):
for i in t["inputs"]:
if "inputBinding" in i:
ib = i["inputBinding"]
for k in list(ib.keys()):
if k != "loadContents":
_logger.warning(SourceLine(ib, k).makeError("Will ignore field '%s' which is not valid in %s inputBinding" %
(k, t["class"])))
del ib[k]
visit_class(doc, ("CommandLineTool","Workflow"), rewrite_requirements)
visit_class(doc, ("ExpressionTool","Workflow"), fix_inputBinding)
visit_field(doc, "secondaryFiles", update_secondaryFiles)
upd = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
upd = upd["$graph"]
for proc in aslist(upd):
proc.setdefault("hints", [])
proc["hints"].insert(0, {"class": "NetworkAccess", "networkAccess": True})
proc["hints"].insert(0, {"class": "LoadListingRequirement", "loadListing": "deep_listing"})
if "cwlVersion" in proc:
del proc["cwlVersion"]
return (doc, "v1.1")
def v1_1_0dev1to1_1(doc, loader, baseuri): # pylint: disable=unused-argument
return (doc, "v1.1")
UPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
DEVUPDATES = {
u"v1.0": v1_0to1_1,
u"v1.1.0-dev1": v1_1_0dev1to1_1,
u"v1.1": None
} # type: Dict[Text, Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]]
ALLUPDATES = UPDATES.copy()
ALLUPDATES.update(DEVUPDATES)
INTERNAL_VERSION = u"v1.1"
def identity(doc, loader, baseuri): # pylint: disable=unused-argument
# type: (Any, Loader, Text) -> Tuple[Any, Union[Text, Text]]
"""Default, do-nothing, CWL document upgrade function."""
return (doc, doc["cwlVersion"])
def checkversion(doc, # type: Union[CommentedSeq, CommentedMap]
metadata, # type: CommentedMap
enable_dev # type: bool
):
# type: (...) -> Tuple[Union[CommentedSeq, CommentedMap], Text]
"""Check the validity of the version of the give CWL document.
Returns the document and the validated version string.
"""
cdoc = None # type: Optional[CommentedMap]
if isinstance(doc, CommentedSeq):
if not isinstance(metadata, CommentedMap):
raise Exception("Expected metadata to be CommentedMap")
lc = metadata.lc
metadata = copy.deepcopy(metadata)
metadata.lc.data = copy.copy(lc.data)
metadata.lc.filename = lc.filename
metadata[u"$graph"] = doc
cdoc = metadata
elif isinstance(doc, CommentedMap):
cdoc = doc
else:
raise Exception("Expected CommentedMap or CommentedSeq")
version = metadata[u"cwlVersion"]
cdoc["cwlVersion"] = version
if version not in UPDATES:
if version in DEVUPDATES:
if enable_dev:
pass
else:
keys = list(UPDATES.keys())
keys.sort()
raise validate.ValidationException(
u"Version '%s' is a development or deprecated version.\n "
"Update your document to a stable version (%s) or use "
"--enable-dev to enable support for development and "
"deprecated versions." % (version, ", ".join(keys)))
else:
raise validate.ValidationException(
u"Unrecognized version %s" % version)
return (cdoc, version)
def update(doc, loader, baseuri, enable_dev, metadata):
# type: (Union[CommentedSeq, CommentedMap], Loader, Text, bool, Any) -> Union[CommentedSeq, CommentedMap]
if (metadata.get("http://commonwl.org/cwltool#original_cwlVersion") or
(isinstance(doc, CommentedMap) and doc.get("http://commonwl.org/cwltool#original_cwlVersion"))):
return doc
(cdoc, originalversion) = checkversion(doc, metadata, enable_dev)
version = originalversion
(cdoc, version) = checkversion(doc, metadata, enable_dev)
nextupdate = identity # type: Optional[Callable[[Any, Loader, Text], Tuple[Any, Text]]]
while nextupdate:
(cdoc, version) = nextupdate(cdoc, loader, baseuri)
nextupdate = ALLUPDATES[version]
cdoc[u"cwlVersion"] = version
metadata[u"cwlVersion"] = version
metadata[u"http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
cdoc[u"http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
return cdoc
|
Python
| 0
|
@@ -1864,34 +1864,109 @@
rn %5B
-%7B%22pattern%22: p%7D for p in t%5D
+update_secondaryFiles(p) for p in t%5D%0A elif isinstance(t, MutableMapping):%0A return t
%0A
|
a52b4097dfcb9fea26af0bc994426baecb97efc1
|
update image if streetview url
|
croplands_api/views/api/locations.py
|
croplands_api/views/api/locations.py
|
from croplands_api import api
from croplands_api.models import Location
from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post
from records import save_record_state_to_history
from croplands_api.tasks.records import get_ndvi
def process_records(result=None, **kwargs):
"""
This processes all records that may have been posted as a relation of the location.
:param result:
:param kwargs:
:return: None
"""
for record in result['records']:
save_record_state_to_history(record)
def merge_same_location_lat_long(data=None, **kwargs):
"""
This preprocessor checks if the location already exists.
:param data:
:param kwargs:
:return:
"""
# TODO
pass
def change_field_names(data=None, **kwargs):
if 'photos' in data:
data['images'] = data['photos']
del data['photos']
def create(app):
api.create_api(Location,
app=app,
collection_name='locations',
methods=['GET', 'POST', 'PATCH', 'DELETE'],
preprocessors={
'POST': [change_field_names, add_user_to_posted_data, debug_post],
'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations],
'PATCH_MANY': [api_roles('admin'), remove_relations],
'DELETE': [api_roles('admin')]
},
postprocessors={
'POST': [process_records],
'PATCH_SINGLE': [],
'PATCH_MANY': [],
'DELETE': []
},
results_per_page=10)
|
Python
| 0.000005
|
@@ -225,38 +225,81 @@
api.
-tasks.records import get_ndvi%0A
+utils.s3 import upload_image%0Aimport requests%0Aimport uuid%0Aimport cStringIO
%0A%0Ade
@@ -927,16 +927,496 @@
tos'%5D%0A%0A%0A
+def check_for_street_view_image(data=None, **kwargs):%0A if 'images' not in data:%0A return%0A%0A for image in data%5B'images'%5D:%0A if 'source' in image and image%5B'source'%5D == 'streetview':%0A try:%0A r = requests.get(image%5B'url'%5D)%0A if r.status_code == 200:%0A url = 'images/streetview/' + str(uuid.uuid4()) + '.jpg'%0A image%5B'url'%5D = url%0A except Exception as e:%0A print(e)%0A%0A%0A
def crea
@@ -1714,16 +1714,77 @@
bug_post
+,%0A check_for_street_view_image
%5D,%0A
@@ -1864,16 +1864,56 @@
dmin'%5D),
+%0A
remove_
|
1d305388fd1c673096e327ea2c0259b955d64156
|
Update test_step_7.py
|
pySDC/tests/test_tutorials/test_step_7.py
|
pySDC/tests/test_tutorials/test_step_7.py
|
import os
import subprocess
import pytest
from pySDC.tutorial.step_7.B_pySDC_with_mpi4pyfft import main as main_B
@pytest.mark.fenics
def test_A():
from pySDC.tutorial.step_7.A_pySDC_with_FEniCS import main as main_A
main_A()
@pytest.mark.parallel
def test_B():
main_B()
@pytest.mark.parallel
def test_C_1x1():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
# set up new/empty file for output
fname = 'step_7_C_out_1x1.txt'
f = open(fname, 'w')
f.close()
num_procs = 1
num_procs_space = 1
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_1x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_1x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 2
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.parallel
def test_C_2x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
import mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
cwd = '.'
fname = 'step_7_C_out_2x2.txt'
f = open(fname, 'w')
f.close()
num_procs = 4
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
|
Python
| 0.000014
|
@@ -228,24 +228,25 @@
main_A()%0A%0A
+%0A
@pytest.mark
@@ -283,16 +283,17 @@
in_B()%0A%0A
+%0A
@pytest.
|
928d498b5f67970f9ec75d62068e8cbec0fdc352
|
Update python3, flake8
|
ni_scanner.py
|
ni_scanner.py
|
from ConfigParser import SafeConfigParser
from utils.cli import CLI
from api.queue import Queue
from api.nerds import NerdsApi
from scanner.host import HostScanner
from scanner.exceptions import ScannerExeption
from utils.url import url_concat
import logging
FORMAT = '%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('ni_scanner')
def process_host(queue, nerds_api):
item = queue.next("Host")
while item:
try:
queue.processing(item)
scanner = HostScanner(item)
nerds = scanner.process()
if not nerds:
# Error occured :(
logger.error("Unable to scan item "+str(item))
queue.failed(item)
else:
logger.debug("Posting nerds data")
nerds_api.send(nerds)
queue.done(item)
except ScannerExeption as e:
logger.error("%s",e)
failed(queue,item)
except Exception as e:
logger.error("Unable to process host %s got error: %s",item,str(e))
failed(queue,item)
item = queue.next("Host")
def failed(queue,item):
try:
queue.failed(item)
except Exception as e:
logger.error("Problem with reaching NI, got error: %s", e)
def main():
args = CLI().options()
try:
config = SafeConfigParser()
config.readfp(open(args.config))
except IOError as (errno, strerror):
logger.error("Config file '%s' is missing", args.config)
return None
## ready :)
api_user = config.get("NI", "api_user")
api_key = config.get("NI", "api_key")
queue_url = url_concat(config.get("NI", "url"), "scan_queue/")
queue = Queue(queue_url, api_user, api_key)
nerds_url = url_concat(config.get("NI", "url"), "nerds/")
nerds_api = NerdsApi(nerds_url, api_user, api_key)
process_host(queue, nerds_api)
if __name__ == "__main__":
main()
|
Python
| 0.000004
|
@@ -381,17 +381,16 @@
ner')%0A%0A%0A
-%0A
def proc
@@ -475,17 +475,16 @@
try:
-
%0A
@@ -550,17 +550,16 @@
er(item)
-
%0A
@@ -704,10 +704,13 @@
tem
-%22+
+%25s%22,
str(
@@ -958,16 +958,17 @@
or(%22%25s%22,
+
e)%0A
@@ -979,32 +979,33 @@
failed(queue,
+
item)%0A ex
@@ -1096,13 +1096,15 @@
%25s%22,
+
item,
+
str(
@@ -1128,24 +1128,25 @@
ailed(queue,
+
item)%0A
@@ -1174,16 +1174,17 @@
Host%22)%0A%0A
+%0A
def fail
@@ -1192,16 +1192,17 @@
d(queue,
+
item):%0A
@@ -1479,29 +1479,8 @@
rror
- as (errno, strerror)
:%0A
@@ -1567,17 +1567,16 @@
ne%0A #
-#
ready :
@@ -1896,20 +1896,16 @@
pi_key)%0A
-
%0A pro
@@ -1933,16 +1933,17 @@
s_api)%0A%0A
+%0A
if __nam
|
720c8a36cfff3270ae54e8c91c18d8ef4fb1c330
|
Remove no longer needed empty .jslib output workaround.
|
build_defs/internal_do_not_use/j2cl_java_library.bzl
|
build_defs/internal_do_not_use/j2cl_java_library.bzl
|
"""Common utilities for creating J2CL targets and providers."""
load(":j2cl_transpile.bzl", "J2CL_TRANSPILE_ATTRS", "j2cl_transpile")
load(":j2cl_js_common.bzl", "J2CL_JS_ATTRS", "JS_PROVIDER_NAME", "j2cl_js_provider")
# Constructor for the Bazel provider for J2CL.
_J2clInfo = provider(fields = ["_J2clJavaInfo"])
LibraryInfo = provider(fields = ["file"])
def _impl_j2cl_library(ctx):
# Categorize the sources.
js_srcs = []
java_srcs = []
for src in ctx.files.srcs:
(js_srcs if src.extension in ["js", "zip"] else java_srcs).append(src)
# Validate the attributes.
if not java_srcs:
if ctx.files.deps:
fail("deps not allowed without java srcs")
if js_srcs:
fail("js sources not allowed without java srcs")
java_provider = _java_compile(ctx, java_srcs)
js_zip, library_info = j2cl_transpile(ctx, java_provider, js_srcs)
js_outputs = [js_zip] if java_srcs else []
library_info = [library_info] if java_srcs else []
# This is a workaround to b/35847804 to make sure the zip ends up in the runfiles.
js_runfiles = _collect_runfiles(ctx, js_outputs, ctx.attr.deps + ctx.attr.exports)
# Write an empty .jslib output (work around b/38349075 and maybe others).
ctx.actions.write(ctx.outputs.dummy_jslib, "")
return struct(
providers = [
DefaultInfo(
files = depset(js_outputs + [ctx.outputs.jar, ctx.outputs.dummy_jslib]),
runfiles = js_runfiles,
),
_J2clInfo(_J2clJavaInfo = java_provider),
LibraryInfo(file = library_info),
],
**j2cl_js_provider(ctx, srcs = js_outputs, deps = ctx.attr.deps, exports = ctx.attr.exports)
)
def _collect_runfiles(ctx, files, deps):
transitive_runfiles = [d[DefaultInfo].default_runfiles.files for d in deps]
return ctx.runfiles(
files = files,
transitive_files = depset(transitive = transitive_runfiles),
)
def _java_compile(ctx, java_srcs):
stripped_java_srcs = [_strip_gwt_incompatible(ctx, java_srcs)] if java_srcs else []
java_deps = [d[_J2clInfo]._J2clJavaInfo for d in ctx.attr.deps if _J2clInfo in d]
java_exports = [d[_J2clInfo]._J2clJavaInfo for d in ctx.attr.exports if _J2clInfo in d]
plugins = [p[JavaInfo] for p in ctx.attr.plugins]
exported_plugins = [p[JavaInfo] for p in ctx.attr.exported_plugins]
return java_common.compile(
ctx,
source_files = ctx.files.srcs_hack,
source_jars = stripped_java_srcs,
deps = java_deps,
exports = java_exports,
plugins = plugins,
exported_plugins = exported_plugins,
output = ctx.outputs.jar,
java_toolchain = ctx.attr._java_toolchain,
host_javabase = ctx.attr._host_javabase,
javac_opts = java_common.default_javac_opts(ctx, java_toolchain_attr = "_java_toolchain"),
)
def _strip_gwt_incompatible(ctx, java_srcs):
output_file = ctx.actions.declare_file(ctx.label.name + "_stripped-src.jar")
args = ctx.actions.args()
args.use_param_file("@%s", use_always = True)
args.set_param_file_format("multiline")
args.add("-d", output_file)
args.add_all(java_srcs)
ctx.actions.run(
progress_message = "Stripping @GwtIncompatible from %s" % ctx.label.name,
inputs = java_srcs,
outputs = [output_file],
executable = ctx.executable._stripper,
arguments = [args],
env = dict(LANG = "en_US.UTF-8"),
execution_requirements = {"supports-workers": "1"},
mnemonic = "J2cl",
)
return output_file
_J2CL_LIB_ATTRS = {
# TODO(goktug): Try to limit this further.
"srcs": attr.label_list(allow_files = [".java", ".js", ".srcjar", ".jar", ".zip"]),
"srcs_hack": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = [JS_PROVIDER_NAME]),
"exports": attr.label_list(providers = [JS_PROVIDER_NAME]),
"plugins": attr.label_list(providers = [JavaInfo]),
"exported_plugins": attr.label_list(providers = [JavaInfo]),
"javacopts": attr.string_list(),
"licenses": attr.license(),
"_java_toolchain": attr.label(
default = Label("@bazel_tools//tools/jdk:toolchain"),
),
"_host_javabase": attr.label(
default = Label("@bazel_tools//tools/jdk:current_host_java_runtime"),
cfg = "host",
),
"_stripper": attr.label(
default = Label("//build_defs/internal_do_not_use:GwtIncompatibleStripper", relative_to_caller_repository = False),
cfg = "host",
executable = True,
),
}
_J2CL_LIB_ATTRS.update(J2CL_TRANSPILE_ATTRS)
_J2CL_LIB_ATTRS.update(J2CL_JS_ATTRS)
j2cl_library = rule(
implementation = _impl_j2cl_library,
attrs = _J2CL_LIB_ATTRS,
fragments = ["java", "js"],
outputs = {
"jar": "lib%{name}.jar",
"srcjar": "lib%{name}-src.jar",
"zip_file": "%{name}.js.zip",
"dummy_jslib": "%{name}.jslib",
},
)
def _impl_java_import(ctx):
return struct(
providers = [_J2clInfo(_J2clJavaInfo = ctx.attr.jar[JavaInfo])],
**j2cl_js_provider(ctx)
)
# helper rule to convert a Java target to a J2CL target.
j2cl_java_import = rule(
implementation = _impl_java_import,
attrs = dict(J2CL_JS_ATTRS, **{
"jar": attr.label(providers = [JavaInfo]),
"licenses": attr.license(),
}),
fragments = ["java", "js"],
)
|
Python
| 0
|
@@ -1180,138 +1180,8 @@
s)%0A%0A
- # Write an empty .jslib output (work around b/38349075 and maybe others).%0A ctx.actions.write(ctx.outputs.dummy_jslib, %22%22)%0A%0A
@@ -1306,33 +1306,8 @@
.jar
-, ctx.outputs.dummy_jslib
%5D),%0A
@@ -4783,48 +4783,8 @@
p%22,%0A
- %22dummy_jslib%22: %22%25%7Bname%7D.jslib%22,%0A
|
c2b6210f7a6a2ed5fe3dcad4495161008d8cf399
|
Remove duplicate like
|
meetup_facebook_bot/messenger/messaging.py
|
meetup_facebook_bot/messenger/messaging.py
|
# -*- coding: utf-8 -*-
import json
import requests
def send_rate_menu(access_token, user_id, talk, db_session):
""" Makes use of Quick Replies:
https://developers.facebook.com/docs/messenger-platform/send-api-reference/quick-replies
"""
if talk.is_liked_by(user_id, db_session):
rate_button_title = 'Убрать лайк'
else:
rate_button_title = 'Поставить лайк'
rate_menu_message_body = {
'text': '',
'quick_replies': [
{
'content_type': 'text',
'title': rate_button_title,
'payload': 'like talk %d' % talk.id
},
{
'content_type': 'text',
'title': 'Отменить',
'payload': 'cancel payload'
}
]
}
return send_message_to_facebook(access_token, user_id, rate_menu_message_body)
def send_like_confirmation(access_token, user_id, talk, db_session):
if talk.is_liked_by(user_id, db_session):
like_text_message = 'Поставил лайк'
else:
like_text_message = 'Убрал лайк лайк'
like_message_body = {
"message": {
"text": like_text_message
}
}
return send_message_to_facebook(access_token, user_id, like_message_body)
def send_schedule(access_token, user_id, talks, db_session):
""" Makes use of Generic Template:
https://developers.facebook.com/docs/messenger-platform/send-api-reference/generic-template
"""
elements = []
for talk in talks:
number_of_likes = talk.count_likes(db_session)
element_subtitle = 'Лайков: %d\nСпикер: %s' % (number_of_likes, talk.speaker.name)
rate_button_title = 'Оценить'
element = {
'title': talk.title,
'subtitle': element_subtitle,
'buttons': [
{
'type': 'postback',
'title': 'Получить подробности',
'payload': 'info talk %d' % talk.id
},
{
'type': 'postback',
'title': rate_button_title,
'payload': 'rate talk %d' % talk.id
},
{
'type': 'postback',
'title': 'Задать вопрос',
'payload': 'ask talk %d' % talk.id
}
]
}
elements.append(element)
schedule_message_body = {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'generic',
'elements': elements
}
}
}
return send_message_to_facebook(access_token, user_id, schedule_message_body)
def send_talk_info(access_token, user_id, talk):
""" Send a simple Facebook message:
https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message
"""
title = talk.title
speaker = talk.speaker.name
description = talk.description or 'Нет описания.'
more_info_text = '"%s"\n\n%s:\n%s' % (title, speaker, description)
more_info = {
'text': more_info_text
}
return send_message_to_facebook(access_token, user_id, more_info)
def send_message_to_facebook(access_token, user_id, message_data):
headers = {
'Content-Type': 'application/json',
}
params = {
'access_token': access_token,
}
payload = {
'recipient': {
'id': user_id,
},
'message': message_data,
}
url = 'https://graph.facebook.com/v2.6/me/messages'
response = requests.post(url, headers=headers, params=params,
data=json.dumps(payload))
response.raise_for_status()
return response.json()
|
Python
| 0.00009
|
@@ -1095,13 +1095,8 @@
%D1%80%D0%B0%D0%BB
-%D0%BB%D0%B0%D0%B9%D0%BA
%D0%BB%D0%B0%D0%B9%D0%BA
|
496007543f941bb3ca46c011383f2673b9362e47
|
Bump development version
|
debreach/__init__.py
|
debreach/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.4.0'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
|
Python
| 0
|
@@ -113,9 +113,9 @@
1.4.
-0
+1
'%0Ave
|
e10ec71a57f8e0287335da3a82260e1fc8086963
|
Remove unnecessary pass statement (#1481)
|
thumbor/storages/file_storage.py
|
thumbor/storages/file_storage.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import hashlib
import os
from datetime import datetime
from json import dumps, loads
from os.path import dirname, exists, getmtime, splitext
from shutil import move
from uuid import uuid4
from thumbor import storages
from thumbor.utils import logger
class Storage(storages.BaseStorage):
async def put(self, path, file_bytes):
file_abspath = self.path_on_filesystem(path)
temp_abspath = f"{file_abspath}.{str(uuid4()).replace('-', '')}"
file_dir_abspath = dirname(file_abspath)
logger.debug("creating tempfile for %s in %s...", path, temp_abspath)
self.ensure_dir(file_dir_abspath)
with open(temp_abspath, "wb") as _file:
_file.write(file_bytes)
logger.debug("moving tempfile %s to %s...", temp_abspath, file_abspath)
move(temp_abspath, file_abspath)
return path
async def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
file_abspath = self.path_on_filesystem(path)
file_dir_abspath = dirname(file_abspath)
self.ensure_dir(file_dir_abspath)
if not self.context.server.security_key:
raise RuntimeError(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be "
"True if no SECURITY_KEY specified"
)
crypto_path = f"{splitext(file_abspath)[0]}.txt"
temp_abspath = f"{crypto_path}.{str(uuid4()).replace('-', '')}"
with open(temp_abspath, "wb") as _file:
try:
security_key = self.context.server.security_key.encode()
except (UnicodeDecodeError, AttributeError):
security_key = self.context.server.security_key
pass
_file.write(security_key)
move(temp_abspath, crypto_path)
logger.debug(
"Stored crypto at %s (security key: %s)",
crypto_path,
self.context.server.security_key,
)
return file_abspath
async def put_detector_data(self, path, data):
file_abspath = self.path_on_filesystem(path)
path = f"{splitext(file_abspath)[0]}.detectors.txt"
temp_abspath = f"{path}.{str(uuid4()).replace('-', '')}"
file_dir_abspath = dirname(file_abspath)
self.ensure_dir(file_dir_abspath)
with open(temp_abspath, "w", encoding="utf-8") as _file:
_file.write(dumps(data))
move(temp_abspath, path)
return file_abspath
async def get(self, path):
abs_path = self.path_on_filesystem(path)
resource_available = await self.exists(
path, path_on_filesystem=abs_path
)
if not resource_available:
return None
with open(self.path_on_filesystem(path), "rb") as source_file:
return source_file.read()
async def get_crypto(self, path):
file_abspath = self.path_on_filesystem(path)
crypto_file = f"{splitext(file_abspath)[0]}.txt"
if not exists(crypto_file):
return None
with open(crypto_file, "r", encoding="utf-8") as crypto_f:
return crypto_f.read()
async def get_detector_data(self, path):
file_abspath = self.path_on_filesystem(path)
path = f"{splitext(file_abspath)[0]}.detectors.txt"
resource_available = await self.exists(path, path_on_filesystem=path)
if not resource_available:
return None
with open(path, "r", encoding="utf-8") as detector_file:
return loads(detector_file.read())
def path_on_filesystem(self, path):
digest = hashlib.sha1(path.encode("utf-8")).hexdigest()
root_path = self.context.config.FILE_STORAGE_ROOT_PATH.rstrip("/")
return f"{root_path}/{digest[:2]}/{digest[2:]}"
async def exists(
self, path, path_on_filesystem=None
): # pylint: disable=arguments-differ
if path_on_filesystem is None:
path_on_filesystem = self.path_on_filesystem(path)
return os.path.exists(path_on_filesystem) and not self.__is_expired(
path_on_filesystem
)
async def remove(self, path):
n_path = self.path_on_filesystem(path)
return os.remove(n_path)
def __is_expired(self, path):
if self.context.config.STORAGE_EXPIRATION_SECONDS is None:
return False
timediff = datetime.now() - datetime.fromtimestamp(getmtime(path))
return (
timediff.total_seconds()
> self.context.config.STORAGE_EXPIRATION_SECONDS
)
|
Python
| 0.000099
|
@@ -1978,29 +1978,8 @@
key%0A
- pass%0A
|
9cc88d9162f35cf0f8828ded5566f52f3bd506bd
|
Fix #387: fix deluge JSON queries
|
modules/deluge.py
|
modules/deluge.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
import sys
import os
import cherrypy
import htpc
import urllib2
import gzip
import socket
from json import loads, dumps
import logging
import cookielib
from StringIO import StringIO
from cherrypy.lib.auth2 import require
class Deluge:
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
def __init__(self):
self.logger = logging.getLogger('modules.deluge')
htpc.MODULES.append({
'name': 'Deluge',
'id': 'deluge',
'test': htpc.WEBDIR + 'deluge/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'deluge_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'deluge_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'deluge_host'},
{'type': 'text', 'label': 'Port *', 'name': 'deluge_port'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'deluge_ssl'},
{'type': 'text', 'label': 'Basepath', 'name': 'deluge_basepath'},
{'type': 'password', 'label': 'Password', 'name': 'deluge_password'}
]})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('deluge.html').render(scriptname='deluge')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def connected(self):
return self.fetch('web.connected')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def connect(self,hostid):
return self.fetch('web.connect',[hostid])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_hosts(self):
return self.fetch('web.get_hosts')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def queue(self):
fields = ['progress','is_finished','ratio','name','download_payload_rate','upload_payload_rate','eta','state','hash','total_size']
return self.fetch('core.get_torrents_status', [[],fields])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stats(self):
fields = ["payload_download_rate","payload_upload_rate"]
return self.fetch('core.get_session_status',[fields])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def start(self, torrentId):
torrents = [torrentId]
return self.fetch('core.resume_torrent', [torrents])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stop(self, torrentId):
torrents = [torrentId]
return self.fetch('core.pause_torrent',[torrents])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def remove(self, torrentId, removeData):
removeDataBool = bool(int(removeData));
return self.fetch('core.remove_torrent', [torrentId,removeDataBool])
# Wrapper to access the Deluge Api
# If the first call fails, there probably is no valid Session ID so we try it again
def fetch(self, method, arguments=[]):
""" Do request to Deluge api """
self.logger.debug("Request deluge method: "+method)
# format post data
data = {'id':1,'method': method,'params':arguments}
response = self.read_data(data)
self.logger.debug ("response is %s" %response)
if response and response['error']:
self.auth()
response = self.read_data(data)
self.logger.debug ("response is %s" %response)
return response
def auth(self):
self.read_data({"method": "auth.login","params": [htpc.settings.get('deluge_password', '')],"id": 1})
def read_data(self,data):
try:
self.logger.debug("Read data from server")
host = htpc.settings.get('deluge_host', '')
port = str(htpc.settings.get('deluge_port', ''))
host = htpc.settings.get('deluge_host', '')
port = str(htpc.settings.get('deluge_port', ''))
deluge_basepath = str(htpc.settings.get('deluge_basepath', ''))
ssl = 's' if htpc.settings.get('deluge_ssl') else ''
url = 'http' + ssl + '://' + host + ':' + str(port) + deluge_basepath + '/json'
post_data = dumps(data)
buf = StringIO( self.opener.open(url, post_data,1).read())
f = gzip.GzipFile(fileobj=buf)
response = loads(f.read())
self.logger.debug ("response for %s is %s" %(data,response))
return response
except urllib2.URLError:
self.logger.error ("can't connect with %s" %data)
return {'result':{},'error':"can't connect with %s" %data}
except socket.timeout:
self.logger.error ("timeout when connect with %s" %data)
return {'result':{},'error':"can't connect with %s" %data}
|
Python
| 0
|
@@ -4372,24 +4372,125 @@
dumps(data)%0A
+ req = urllib2.Request(url, data=post_data, headers=%7B'Content-Type': 'application/json'%7D)%0A
@@ -4504,17 +4504,16 @@
tringIO(
-
self.ope
@@ -4525,23 +4525,21 @@
pen(
-url, post_data,
+req, timeout=
1).r
|
31d6ce09382035458eca2a310f99cb3c958ea604
|
Use main template environment for rendering document content
|
nib/render.py
|
nib/render.py
|
import jinja2
from jinja2 import Environment, FileSystemLoader, Template
from os import path
import time
jinja_filters = {}
def jinja(name):
def decorator(f):
jinja_filters[name] = f
return f
return decorator
class Render(object):
def __init__(self, options, documents):
self.options = options
self.documents = documents
self.loader = FileSystemLoader(path.abspath(options['template_path']))
self.env = Environment(loader=self.loader)
for name in jinja_filters:
self.env.filters[name] = jinja_filters[name]
self.site = dict(options['site'], documents=documents)
self.now = time.time()
def render_content(self, document):
params = {
'now': self.now,
'site': self.site,
'page': document,
}
params.update(document)
document.short = Template(document.short).render(**params)
document.content = Template(document.content).render(**params)
def render_template(self, document):
if 'template' in document:
template = self.env.get_template(document['template'])
params = {
'now': self.now,
'site': self.options['site'],
'page': document,
'content': document.content,
'short': document.short,
}
params.update(document)
return template.render(**params)
else:
return document.content
|
Python
| 0
|
@@ -893,32 +893,44 @@
ent.short =
-Template
+self.env.from_string
(document.sh
@@ -978,24 +978,36 @@
ntent =
-Template
+self.env.from_string
(documen
|
00ed3465351f3f99b736904baecc530c03cbc91b
|
test flac to flac conversion
|
unmollom/tests/test_speech_recognition.py
|
unmollom/tests/test_speech_recognition.py
|
# -*- coding: utf-8 -*-
__author__ = 'Flurin Rindisbacher'
import unittest
import os
import json
import uuid
from .. import speech_recognition
from ..exceptions import RecognitionException, CommunicationException
def server_response_stub(success_answer=True,response_text=''):
""" a stub for injecting our own server responses into
the GoogleSpeechRecognition class. testing Googles api does not really make sense"""
class ServerResponse(object):
def __init__(self, a, t):
self.ok = a
self.text = t
def send_request(url, data, headers):
return ServerResponse(success_answer, response_text)
return send_request
class TestGoogleSpeechRecognition(unittest.TestCase):
def setUp(self):
# set up some possible API server response
# request was unsuccessful
self.no_server_response = server_response_stub(False)
# server did response, but could not recognize the data
self.no_recognition = server_response_stub(True, '{"status":0,"id":"7eea7cfb6a09168431e8d76b10842947-1","hypotheses":[]}\n')
# server responds with code=200 but not in json
self.no_valid_json = server_response_stub(True, 'wtf?')
# server responds with json but without our expected elements
self.valid_but_wrong_json = server_response_stub(True, '{"blubb":0,"id":"7eea7cfb6a09168431e8d76b10842947-1"}\n')
# server successfully recognized "just an example"
self.recognition_success = server_response_stub(True, '{"status":0,"id":"7eea7cfb6a09168431e8d76b10842947-1","hypotheses":[{"utterance":"just an example","confidence":0.9}]}\n')
self.input_flac = os.path.dirname(os.path.realpath(__file__)) + '/files/1.flac'
def test_recognize_success(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.recognition_success
result = cls.recognize(self.input_flac, format='flac')
self.assertEqual(result['text'], 'just an example')
def test_recognize_failure(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.no_recognition
self.assertRaises(RecognitionException, cls.recognize, self.input_flac, 'flac')
def test_recognize_invalid_json(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.no_valid_json
self.assertRaises(RecognitionException, cls.recognize, self.input_flac, 'flac')
def test_recognize_valid_but_wrong_json(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.valid_but_wrong_json
self.assertRaises(RecognitionException, cls.recognize, self.input_flac, 'flac')
def test_recognize_no_server_response(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.no_server_response
self.assertRaises(CommunicationException, cls.recognize, self.input_flac, 'flac')
def test_recognize_wrong_file(self):
cls = speech_recognition.GoogleSpeechRecognition()
cls.send_request = self.no_server_response
self.assertRaises(IOError, cls.recognize, '/dev/nullwtf', 'flac')
def test_send_request(self):
cls = speech_recognition.GoogleSpeechRecognition()
randomdata = str(uuid.uuid4())
response = cls.send_request('https://httpbin.org/post', randomdata, cls.headers)
self.assertEqual(response.ok, True)
resp_obj = json.loads(response.text)
self.assertEqual(resp_obj['data'], randomdata)
for h in cls.headers:
self.assertEqual(resp_obj['headers'][h], cls.headers[h])
def test_conversion(self):
pass
# rename to test_compare_... to run this too.
# it's deactivated because its too slow
#def test_compare_recognition_functions(self):
def compare_recognition_functions(self):
"""
test the two recognize_*() functions
these are tested by calling the google API twice and comparing the result
"""
flac = open(self.input_flac,'rb').read()
result_file = speech_recognition.recognize_file(self.input_flac, 'flac')
result_data = speech_recognition.recognize(flac, 'flac')
self.assertEqual(result_file['text'], result_data['text'])
self.assertEqual(result_file['confidence'], result_data['confidence'])
|
Python
| 0.000001
|
@@ -3682,17 +3682,16 @@
rs%5Bh%5D)%0A%0A
-%0A
def
@@ -3705,16 +3705,31 @@
nversion
+_no_conv_needed
(self):%0A
@@ -3740,12 +3740,219 @@
-pass
+cls = speech_recognition.GoogleSpeechRecognition()%0A flac = open(self.input_flac, 'rb').read()%0A converted = cls.convert_to_flac(self.input_flac, 'flac')%0A self.assertEqual(flac, converted)%0A
%0A%0A
|
5ba7f7b0df17f95538c24b56c1c7f4a8a650bd16
|
Write str(eq) instead of srepr(eq)
|
transmute_expr.py
|
transmute_expr.py
|
import re
import json
import sympy
from sympy.codegen.ast import Assignment, CodeBlock
with open('transmute_data.json') as f:
DATA = json.load(f)
t= sympy.symbols('t')
# G = 1
# phi = sympy.MatrixSymbol('phi', G, 1)
phi = sympy.symbols('phi')
decay_rxs = ['bminus', 'bplus', 'ec', 'alpha', 'it', 'sf', 'bminus_n']
xs_rxs = ['gamma', 'z_2n', 'z_3n', 'alpha', 'fission', 'proton', 'gamma_1', 'z_2n_1']
gamma_base = '^gamma_([A-Z][a-z]?\d+)_'
def child_decays(nuc):
symbols = DATA['symbols']
expr = 0
for rx in decay_rxs:
r = re.compile(gamma_base + nuc + '_' + rx + '$')
for key in symbols:
m = r.match(key)
if m is not None:
parname = m.group(1)
gammaname = m.group(0)
break
else:
continue
gamma = symbols[gammaname]
lambda_par = symbols['lambda_' + parname]
par0 = sympy.symbols('{0}_0'.format(parname))
if lambda_par >= 0: # Avoid nan
expr += gamma * sympy.exp(lambda_par * t) * par0
return expr
def child_xss(nuc):
rxs = DATA['channels'][nuc]
terms = []
for rx in xs_rxs:
if rx not in rxs:
continue
parname = rxs[rx]
par0 = sympy.symbols('{0}_0'.format(parname))
# sigma_rx_par = sympy.MatrixSymbol('sigma_{0}_{1}'.format(rx, parname), 1, G)
sigma_rx_par = sympy.Symbol('sigma_{0}_{1}'.format(rx, parname))
# expr += sympy.exp((sigma_rx_par*phi)[0] * t) * par0
terms.append(sympy.exp((sigma_rx_par*phi) * t) * par0)
return sympy.Add(*terms)
def gennuc(nuc):
nuc0, nuc1 = sympy.symbols('{0}_0 {0}_1'.format(nuc))
lambda_nuc = DATA['symbols'].get('lambda_{0}'.format(nuc), sympy.oo)
# sigma_a_nuc = sympy.MatrixSymbol('sigma_a_{0}'.format(nuc), 1, G)
sigma_a_nuc = sympy.Symbol('sigma_a_{0}'.format(nuc))
# rhs = sympy.exp(-((sigma_a_nuc*phi)[0] + lambda_nuc)*t) * nuc0
if lambda_nuc == sympy.oo:
rhs = 0
else:
rhs = sympy.exp(-((sigma_a_nuc*phi) + lambda_nuc)*t) * nuc0
rhs += child_decays(nuc)
rhs += child_xss(nuc)
eq = Assignment(nuc1, rhs)
return eq
def generate_sigma_array():
sigma_symbols = [['sigma_{0}_{1}'.format(rx, nuc) for rx in xs_rxs + ['a']] for nuc in DATA['nucs']]
with open('sigma.json') as f:
sigma = json.load(f)
# We don't use all nucs
used_sigmas = set()
for i in sigma:
*_, nuc = i.rpartition('_')
if nuc in DATA['nucs']:
used_sigmas.add(i)
return [[sigma[i] if i in used_sigmas else 0.0 for i in j] for j in sigma_symbols]
if __name__ == '__main__':
system = CodeBlock(*list(map(gennuc, DATA['nucs'])))
sigma_symbols = sorted([i.name for i in system.free_symbols if
i.name.startswith('sigma')])
with open("sigma_array.txt", 'w') as f:
f.write('[' + ',\n'.join(map(str, generate_sigma_array())) + ']\n')
with open('system.txt', 'w') as f:
for eq in system.args:
f.write(sympy.srepr(eq) + '\n')
with open('system-C.txt', 'w') as f:
f.write(sympy.ccode(system))
#system_cse = system.cse()
#with open('system-cse.txt', 'w') as f:
# for eq in system_cse.args:
# f.write(str(eq) + '\n')
#with open('system-cse-C.txt', 'w') as f:
# f.write(sympy.ccode(system_cse))
|
Python
| 0.99951
|
@@ -3038,17 +3038,9 @@
te(s
-ympy.srep
+t
r(eq
|
00eea597cc43cb379bf8eca29be2ebe832dcb0f2
|
remove unused FileDecoder super-class in LiveDecoder
|
timeside/plugins/decoder/live.py
|
timeside/plugins/decoder/live.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2007-2013 Parisson
# Copyright (c) 2007 Olivier Guilyardi <olivier@samalyse.com>
# Copyright (c) 2007-2013 Guillaume Pellerin <pellerin@parisson.com>
# Copyright (c) 2010-2013 Paul Brossier <piem@piem.org>
#
# This file is part of TimeSide.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Authors:
# Paul Brossier <piem@piem.org>
# Guillaume Pellerin <yomguy@parisson.com>
# Thomas Fillon <thomas@parisson.com>
from __future__ import division
from timeside.core.decoder import Decoder, IDecoder, interfacedoc, implements
from timeside.core.tools.gstutils import MainloopThread, GLib, Gst
from timeside.plugins.decoder.file import FileDecoder
try:
import queue
except:
import Queue as queue
import threading
GST_APPSINK_MAX_BUFFERS = 10
QUEUE_SIZE = 10
# TODO:
# check if a soundcard device is available
# alsasrc = Gst.ElementFactory.make("alsasrc", "alsasrc")
# alsasrc.probe_get_values_name('device')
# ['hw:0,0']
class LiveDecoder(FileDecoder):
"""Live source Decoder based on Gstreamer
capturing audio from alsasrc
Construct a new LiveDecoder capturing audio from alsasrc
Parameters
----------
num_buffers : int, optional
Number of buffers to output before sending End Of Stream signal
(-1 = unlimited).
(Allowed values: >= -1, Default value: -1)
input_src : str, optional
Gstreamer source element
default to 'alsasrc'
possible values : 'autoaudiosrc', 'alsasrc', 'osssrc'
Examples
--------
>>> import timeside
>>> from timeside.core import get_processor
>>> live_decoder = get_processor('live_decoder')(num_buffers=5)
>>> waveform = get_processor('waveform_analyzer')()
>>> mp3_encoder = timeside.plugins.encoder.mp3.Mp3Encoder('/tmp/test_live.mp3',
... overwrite=True)
>>> pipe = (live_decoder | waveform | mp3_encoder)
>>> pipe.run() # doctest: +SKIP
>>> # Show the audio as captured by the decoder
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(a.results['waveform_analyzer'].time, # doctest: +SKIP
a.results['waveform_analyzer'].data) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
implements(IDecoder)
# IProcessor methods
@staticmethod
@interfacedoc
def id():
return "live_decoder"
@staticmethod
@interfacedoc
def version():
return "1.0"
def __init__(self, num_buffers=-1, input_src='alsasrc'):
super(Decoder, self).__init__()
self.num_buffers = num_buffers
self.uri = None
self.uri_start = 0
self.uri_duration = None
self.is_segment = False
self.input_src = input_src
self._sha1 = ''
def setup(self, channels=None, samplerate=None, blocksize=None):
self.eod = False
self.last_buffer = None
# a lock to wait wait for gstreamer thread to be ready
self.discovered_cond = threading.Condition(threading.Lock())
self.discovered = False
# the output data format we want
if blocksize:
self.output_blocksize = blocksize
if samplerate:
self.output_samplerate = int(samplerate)
if channels:
self.output_channels = int(channels)
# Create the pipe with standard Gstreamer uridecodbin
self.pipe = '''%s num-buffers=%d name=src
! audioconvert name=audioconvert
! audioresample
! appsink name=sink sync=False async=True
''' % (self.input_src, self.num_buffers)
self.pipeline = Gst.parse_launch(self.pipe)
if self.output_channels:
caps_channels = int(self.output_channels)
else:
caps_channels = "[ 1, 2 ]"
if self.output_samplerate:
caps_samplerate = int(self.output_samplerate)
else:
caps_samplerate = "{ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }"
sink_caps = Gst.Caps("""audio/x-raw,
format=F32LE,
channels=(int)%s,
rate=(int)%s""" % (caps_channels, caps_samplerate))
self.src = self.pipeline.get_by_name('src')
self.conv = self.pipeline.get_by_name('audioconvert')
self.conv.get_pad("sink").connect("notify::caps", self._notify_caps_cb)
self.sink = self.pipeline.get_by_name('sink')
self.sink.set_property("caps", sink_caps)
self.sink.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS)
self.sink.set_property("drop", False)
self.sink.set_property('emit-signals', True)
self.sink.connect("new-buffer", self._on_new_buffer_cb)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message', self._on_message_cb)
self.queue = queue.Queue(QUEUE_SIZE)
self.mainloop = GLib.MainLoop()
self.mainloopthread = MainloopThread(self.mainloop)
self.mainloopthread.start()
#self.mainloopthread = get_loop_thread()
##self.mainloop = self.mainloopthread.mainloop
# start pipeline
self.pipeline.set_state(Gst.State.PLAYING)
self.discovered_cond.acquire()
while not self.discovered:
# print 'waiting'
self.discovered_cond.wait()
self.discovered_cond.release()
if not hasattr(self, 'input_samplerate'):
if hasattr(self, 'error_msg'):
raise IOError(self.error_msg)
else:
raise IOError('no known audio stream found')
@interfacedoc
def process(self):
buf = self.queue.get()
if buf == Gst.MessageType.EOS:
return self.last_buffer, True
frames, eod = buf
return frames, eod
def release(self):
# TODO : check if stack support is needed here
#if self.stack:
# self.stack = False
# self.from_stack = True
pass
# IDecoder methods
if __name__ == "__main__":
import doctest
import timeside
doctest.testmod(timeside.plugins.decoder.live, verbose=True)
|
Python
| 0
|
@@ -1251,16 +1251,17 @@
ib, Gst%0A
+#
from tim
@@ -1610,16 +1610,18 @@
eDecoder
+:#
(FileDec
|
8a03a3fbcfdb22dc21e5539462a2b235e744abba
|
change open/close to with
|
output.py
|
output.py
|
def summarizeECG(instHR, avgHR, brady, tachy):
"""Create txt file summarizing ECG analysis
:param instHR: (int)
:param avgHR: (int)
:param brady: (int)
:param tachy: (int)
"""
#Calls hrdetector() to get instantaneous heart rate
#instHR = findInstHR()
#Calls findAvgHR() to get average heart rate
#avgHR = findAvgHR()
#Calls bradyTimes() to get times when bradycardia occurred
#brady = bradyTimes()
#Calls tachtimes() to get times when tachycardia occurred
#tachy = tachyTimes()
#Writes the output of the ECG analysis to an output file named ecgOutput.txt
ecgResults = open('ecgOutput.txt','w')
instHRstr = "Estimated instantaneous heart rate: %s" % str(instHR)
avgHRstr = "Estimated average heart rate: %s" % str(avgHR)
bradystr = "Bradycardia occurred at: %s" % str(brady)
tachystr = "Tachycardia occurred at: %s" % str(tachy)
ecgResults.write(instHRstr + ' BPM\n' + avgHRstr + ' BPM\n' + bradystr + ' sec\n' + tachystr + ' sec')
ecgResults.close()
|
Python
| 0
|
@@ -623,20 +623,12 @@
-ecgResults =
+with
ope
@@ -649,17 +649,36 @@
xt','w')
-%0A
+ as ecgResults:%0A
inst
@@ -744,16 +744,20 @@
HR)%0A
+
+
avgHRstr
@@ -803,24 +803,28 @@
str(avgHR)%0A
+
bradystr
@@ -869,16 +869,20 @@
(brady)%0A
+
tach
@@ -932,16 +932,20 @@
tachy)%0A%0A
+
ecgR
@@ -1047,27 +1047,4 @@
c')%0A
- ecgResults.close()%0A
|
91613b0f2c5d393312b5bae60e71fce783097c2d
|
Move ptpos_to_wn() to lelesk
|
coolisf/common.py
|
coolisf/common.py
|
# -*- coding: utf-8 -*-
'''
Common functions
Latest version can be found at https://github.com/letuananh/intsem.fx
References:
ACE:
http://moin.delph-in.net/AceOptions
@author: Le Tuan Anh <tuananh.ke@gmail.com>
@license: MIT
'''
# Copyright (c) 2015, Le Tuan Anh <tuananh.ke@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
########################################################################
import os
import gzip
import logging
from chirptext import FileHelper
# ----------------------------------------------------------------------
# Configuration
# ----------------------------------------------------------------------
def getLogger():
return logging.getLogger(__name__)
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def read_file(file_path, mode='rt'):
file_path = FileHelper.abspath(file_path) # normalize path
if not os.path.isfile(file_path):
raise Exception("Input file not found: {}".format(file_path))
if file_path.endswith('.gz'):
with gzip.open(file_path, mode) as infile:
return infile.read()
else:
return FileHelper.read(file_path, mode)
def write_file(content, path=None):
''' Write content to a file, or to console if no path is provided '''
if isinstance(content, str):
mode = 'wt'
else:
mode = 'wb'
if path:
getLogger().debug("Writing content to {}".format(path))
if path.endswith('.gz'):
with gzip.open(path, mode) as outfile:
outfile.write(content)
else:
with open(path, mode) as outfile:
outfile.write(content)
else:
print(content)
def overlap(cfrom1, cto1, cfrom2, cto2):
if cfrom1 is None or cto1 is None or cfrom2 is None or cto2 is None:
raise ValueError("cfrom:cto must be numbers")
return (cfrom1 <= cfrom2 < cto1) or (cfrom2 <= cfrom1 < cto2)
def tags_to_concepts(sent):
''' Take concepts from sentence-level tags and create token-level concepts '''
for tag in sent.tags:
tokens = [tk for tk in sent.tokens if overlap(tag.cfrom, tag.cto, tk.cfrom, tk.cto)]
if tokens:
sent.new_concept(tag.label, tokens=tokens)
return sent
def ptpos_to_wn(ptpos):
''' Penn Treebank Project POS to WN '''
# Ref: http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
# CC Coordinating conjunction
# CD Cardinal number
# DT Determiner
# EX Existential there
# FW Foreign word
# IN Preposition or subordinating conjunction
# -----------------------------
# JJ Adjective
# JJR Adjective, comparative
# JJS Adjective, superlative
# -----------------------------
# LS List item marker
# MD Modal
# -----------------------------
# NN Noun, singular or mass
# NNS Noun, plural
# NNP Proper noun, singular
# NNPS Proper noun, plural
# -----------------------------
# PDT Predeterminer
# POS Possessive ending
# PRP Personal pronoun
# PRP$ Possessive pronoun
# -----------------------------
# RB Adverb
# RBR Adverb, comparative
# RBS Adverb, superlative
# -----------------------------
# RP Particle
# SYM Symbol
# TO to
# UH Interjection
# -----------------------------
# VB Verb, base form
# VBD Verb, past tense
# VBG Verb, gerund or present participle
# VBN Verb, past participle
# VBP Verb, non3rd person singular present
# VBZ Verb, 3rd person singular present
# -----------------------------
# WDT Whdeterminer
# WP Whpronoun
# WP$ Possessive whpronoun
# WRB Whadverb
if ptpos.startswith('JJ'):
return 'a'
elif ptpos.startswith('NN'):
return 'n'
elif ptpos.startswith('RB'):
return 'r'
elif ptpos.startswith('VB'):
return 'v'
else:
return 'x'
def get_ep_lemma(ep):
''' Get lemma from a pyDelphin elementary predicate '''
# if ep.pred == 'named':
if ep.carg:
return ep.carg
elif ep.pred.pos == 'u' and ep.pred.sense == 'unknown' and "/" in ep.pred.lemma:
cutpoint = ep.pred.lemma.rfind('/')
return ep.pred.lemma[:cutpoint]
else:
return ep.pred.lemma
|
Python
| 0.00001
|
@@ -1500,16 +1500,52 @@
eHelper%0A
+from lelesk.util import ptpos_to_wn%0A
%0A%0A# ----
@@ -3394,1680 +3394,8 @@
t%0A%0A%0A
-def ptpos_to_wn(ptpos):%0A ''' Penn Treebank Project POS to WN '''%0A # Ref: http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html%0A # CC Coordinating conjunction%0A # CD Cardinal number%0A # DT Determiner%0A # EX Existential there%0A # FW Foreign word%0A # IN Preposition or subordinating conjunction%0A # -----------------------------%0A # JJ Adjective%0A # JJR Adjective, comparative%0A # JJS Adjective, superlative%0A # -----------------------------%0A # LS List item marker%0A # MD Modal%0A # -----------------------------%0A # NN Noun, singular or mass%0A # NNS Noun, plural%0A # NNP Proper noun, singular%0A # NNPS Proper noun, plural%0A # -----------------------------%0A # PDT Predeterminer%0A # POS Possessive ending%0A # PRP Personal pronoun%0A # PRP$ Possessive pronoun%0A # -----------------------------%0A # RB Adverb%0A # RBR Adverb, comparative%0A # RBS Adverb, superlative%0A # -----------------------------%0A # RP Particle%0A # SYM Symbol%0A # TO to%0A # UH Interjection%0A # -----------------------------%0A # VB Verb, base form%0A # VBD Verb, past tense%0A # VBG Verb, gerund or present participle%0A # VBN Verb, past participle%0A # VBP Verb, non%C2%AD3rd person singular present%0A # VBZ Verb, 3rd person singular present%0A # -----------------------------%0A # WDT Wh%C2%ADdeterminer%0A # WP Wh%C2%ADpronoun%0A # WP$ Possessive wh%C2%ADpronoun%0A # WRB Wh%C2%ADadverb%0A if ptpos.startswith('JJ'):%0A return 'a'%0A elif ptpos.startswith('NN'):%0A return 'n'%0A elif ptpos.startswith('RB'):%0A return 'r'%0A elif ptpos.startswith('VB'):%0A return 'v'%0A else:%0A return 'x'%0A%0A%0A
def
|
b0a1f10d60abc6c9fc7751e3bae492976d3f3306
|
Update version 1.0.0.dev3 -> 1.0.0.dev4
|
dimod/package_info.py
|
dimod/package_info.py
|
__version__ = '1.0.0.dev3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
|
Python
| 0.000001
|
@@ -21,9 +21,9 @@
.dev
-3
+4
'%0A__
|
eccc07a4639e1da98c09689295964e0f15c8068c
|
Add fix author functionality
|
dasem/runeberg.py
|
dasem/runeberg.py
|
"""runeberg.
Usage:
dasem.runeberg download-catalogue
dasem.runeberg catalogue-as-csv
Description
-----------
Runeberg is a digital library with primarily Nordic texts. It is available from
http://runeberg.org/
"""
from __future__ import absolute_import, division, print_function
from os.path import join
from re import DOTALL, UNICODE, findall
import sys
from pandas import DataFrame
import requests
from .config import data_directory
from .utils import make_data_directory
CATALOGUE_URL = 'http://runeberg.org/katalog.html'
CATALOGUE_FILENAME = 'katalog.html'
def fix_author(author):
"""Change surname-firstname order.
Parameters
----------
author : str
Author as string
Returns
-------
fixed_author : str
Changed author string.
Examples
--------
>>> author = 'Lybeck, Mikael)
>>> fix_author(author)
'Mikael Lybeck'
"""
author_parts = author.split(', ')
if author_parts == 2:
fixed_author = author_parts[1] + ' ' + author_parts[0]
else:
fixed_author = author
return fixed_author
class Runeberg(object):
"""Runeberg.
Examples
--------
>>> runeberg = Runeberg()
>>> catalogue = runeberg.catalogue()
>>> danish_catalogue = catalogue.ix[catalogue.language == 'dk', :]
>>> len(danish_catalogue) > 300
True
"""
def download_catalogue(self):
"""Download and store locally the Runeberg catalogue."""
make_data_directory(data_directory(), 'runeberg')
filename = join(data_directory(), 'runeberg', CATALOGUE_FILENAME)
response = requests.get(CATALOGUE_URL)
with open(filename, 'w') as f:
f.write(response.content)
def catalogue(self):
"""Retrieve and parse Runeberg catalogue.
Returns
-------
books : pandas.DataFrame
Dataframe with book information.
"""
response = requests.get(CATALOGUE_URL)
flags = DOTALL | UNICODE
tables = findall(r'<table.*?</table>', response.text, flags=flags)
rows = findall(r'<tr.*?</tr>', tables[1], flags=flags)
books = []
for row in rows[1:]:
elements = findall('<td.*?</td>', row, flags=flags)
book_id, title = findall(r'/(.*?)/">(.*?)<',
elements[4], flags=flags)[0]
try:
author_id, author = findall(r'/authors/(.*?).html">(.*?)<',
elements[6], flags=flags)[0]
except:
author_id, author = '', ''
book = {
'type': findall(r'alt="(.*?)">', elements[0], flags=flags)[0],
'book_id': book_id,
'title': title,
'author_id': author_id,
'author': author,
'year': elements[8][15:-5],
'language': elements[10][-9:-7]
}
books.append(book)
return DataFrame(books)
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
runeberg = Runeberg()
if arguments['download-catalogue']:
runeberg.download_catalogue()
elif arguments['catalogue-as-csv']:
print(runeberg.catalogue().to_csv(encoding=encoding))
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -952,16 +952,20 @@
%0A if
+len(
author_p
@@ -968,16 +968,17 @@
or_parts
+)
== 2:%0A
@@ -1740,32 +1740,49 @@
f catalogue(self
+, fix_author=True
):%0A %22%22%22Re
@@ -1928,16 +1928,166 @@
rmation.
+%0A fix_author : bool, optional%0A Determine if author names should be rearranged in firstname-surname%0A order %5Bdefault: True%5D
%0A%0A
@@ -2733,16 +2733,16 @@
except:%0A
-
@@ -2776,16 +2776,150 @@
'', ''%0A
+ if fix_author:%0A # fix_author name collision. TODO%0A author = globals()%5B'fix_author'%5D(author)%0A
|
6ab60cc808bed0ae9cc98f3cf0c5b139e394b67c
|
Raise VM images exceptions appropriately
|
test/common/vmimages.py
|
test/common/vmimages.py
|
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import subprocess
import shutil
import tempfile
import time
from common import testinfra
BASE = testinfra.TEST_DIR
IMAGES = os.path.join(BASE, "images")
DATA = os.path.join(os.environ.get("TEST_DATA", BASE), "images")
DEVNULL = open("/dev/null", "r+")
CONFIG = "~/.config/image-stores"
DEFAULT = "https://fedorapeople.org/groups/cockpit/images/"
def download(link, force, stores):
if not os.path.exists(DATA):
os.makedirs(DATA)
dest = os.path.join(DATA, os.readlink(link))
# we have the file but there is not valid link
if os.path.exists(dest) and not os.path.exists(link):
os.symlink(dest, os.path.join(IMAGES, os.readlink(link)))
# file already exists
if not force and os.path.exists(dest):
return
if not stores:
config = os.path.expanduser(CONFIG)
if os.path.exists(config):
with open(config, 'r') as fp:
stores = fp.read().strip().split("\n")
else:
stores = []
stores.append(DEFAULT)
for store in stores:
try:
source = os.path.join(store, os.path.basename(dest)) + ".xz"
subprocess.check_call(["curl", "-s", "-f", "-I", source], stdout=DEVNULL)
break
except:
continue
sys.stderr.write("{0}\n".format(source))
(fd, temp) = tempfile.mkstemp(suffix=".partial", prefix=os.path.basename(dest), dir=DATA)
try:
curl = subprocess.Popen(["curl", "-#", "-f", source], stdout=subprocess.PIPE)
unxz = subprocess.Popen(["unxz", "--stdout", "-"], stdin=curl.stdout, stdout=fd)
curl.stdout.close()
ret = curl.wait()
if ret != 0:
raise("curl: unable to download image (returned: %s)" % ret)
ret = unxz.wait()
if ret != 0:
raise("unxz: unable to unpack image (returned: %s)" % ret)
os.close(fd)
shutil.move(temp, dest)
finally:
# if we had an error and the temp file is left over, delete it
if os.path.exists(temp):
os.unlink(temp)
# Handle alternate TEST_DATA
image_file = os.path.join(IMAGES, os.readlink(link))
if not os.path.exists(image_file):
os.symlink(os.path.abspath(dest), image_file)
def prune_images(force, dryrun):
now = time.time()
targets = []
for filename in os.listdir(IMAGES):
path = os.path.join(IMAGES, filename)
# only consider original image entries as trustworthy sources and ignore non-links
if path.endswith(".qcow2") or path.endswith(".partial") or not os.path.islink(path):
continue
target = os.readlink(path)
# if the path isn't absolute, it can resolve to either the images directory or here (might be the same)
if not os.path.isabs(target):
targets.append(os.path.join(IMAGES, target))
targets.append(os.path.join(DATA, target))
else:
targets.append(target)
for filename in os.listdir(DATA):
path = os.path.join(DATA, filename)
if not force and os.lstat(path).st_mtime > now - testinfra.IMAGE_EXPIRE * 86400:
continue
if os.path.isfile(path) and (path.endswith(".qcow2") or path.endswith(".partial")) and path not in targets:
sys.stderr.write("Pruning {0}\n".format(filename))
if not dryrun:
os.unlink(path)
# now prune broken links
for filename in os.listdir(IMAGES):
path = os.path.join(IMAGES, filename)
# don't prune original image entries and ignore non-links
if not path.endswith(".qcow2") or not os.path.islink(path):
continue
# if the link isn't valid, prune
if not os.path.isfile(path):
sys.stderr.write("Pruning link {0}\n".format(path))
if not dryrun:
os.unlink(path)
def every_image():
result = []
for filename in os.listdir(IMAGES):
link = os.path.join(IMAGES, filename)
if os.path.islink(link):
result.append(filename)
return result
def download_images(image_list, force, store):
for image in image_list:
link = os.path.join(IMAGES, image)
if not os.path.islink(link):
BaseException("image link does not exist: " + image)
download(link, force, store)
|
Python
| 0
|
@@ -2440,16 +2440,26 @@
raise
+ Exception
(%22curl:
@@ -2570,16 +2570,26 @@
raise
+ Exception
(%22unxz:
@@ -5017,12 +5017,14 @@
-Ba
+rai
se
+
Exce
|
034fa60d73468df21b6f75eb7a8130ab9a40cbae
|
Fix #3225
|
module/plugins/hoster/FilerNet.py
|
module/plugins/hoster/FilerNet.py
|
# -*- coding: utf-8 -*-
import os
import re
from ..captcha.ReCaptcha import ReCaptcha
from ..internal.SimpleHoster import SimpleHoster
class FilerNet(SimpleHoster):
__name__ = "FilerNet"
__type__ = "hoster"
__version__ = "0.27"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?filer\.net/get/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Filer.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>'
OFFLINE_PATTERN = r'Nicht gefunden'
WAIT_PATTERN = r'var count = (\d+);'
LINK_FREE_PATTERN = LINK_PREMIUM_PATTERN = r'href="([^"]+)">Get download</a>'
def handle_free(self, pyfile):
inputs = self.parse_html_form(
input_names={'token': re.compile(r'.+')})[1]
if 'token' not in inputs:
self.error(_("Unable to detect token"))
self.data = self.load(pyfile.url, post={'token': inputs['token']})
inputs = self.parse_html_form(input_names={'hash': re.compile(r'.+')})[1]
if 'hash' not in inputs:
self.error(_("Unable to detect hash"))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge()
#: Avoid 'Correct catcha'
captcha_task = self.captcha.task
self.captcha.task = None
self.download(pyfile.url,
post={'g-recaptcha-response': response,
'hash': inputs['hash']})
#: Restore the captcha task
self.captcha.task = captcha_task
if self.scan_download(
{'html': re.compile(r'\A\s*<!DOCTYPE html')}) == "html":
self.log_warning(
_("There was HTML code in the downloaded file (%s)...bad captcha? The download will be restarted." %
self.pyfile.name))
os.remove(self.last_download)
self.retry_captcha()
else:
self.captcha.correct()
|
Python
| 0
|
@@ -238,9 +238,9 @@
%220.2
-7
+8
%22%0A
@@ -1345,29 +1345,16 @@
ml_form(
-%0A
input_na
@@ -1401,16 +1401,34 @@
if
+inputs is None or
'token'
@@ -1463,41 +1463,14 @@
elf.
-error(_(%22Unable to detect token%22)
+retry(
)%0A%0A
@@ -1637,16 +1637,34 @@
if
+inputs is None or
'hash' n
@@ -2193,33 +2193,16 @@
ownload(
-%0A
%7B'html':
|
5514b8345e575732985643c70b01c220a80b9a0b
|
Set a custom timeout for the elastic search indexer. (#51)
|
indexer_util/indexer_util/indexer_util.py
|
indexer_util/indexer_util/indexer_util.py
|
"""Utilities for Data Explorer indexers"""
import jsmin
import json
import logging
import os
import time
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
from elasticsearch.helpers import bulk
# Log to stderr.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)10s:%(lineno)s %(levelname)s %(message)s',
datefmt='%Y%m%d%H:%M:%S')
logger = logging.getLogger('indexer.util')
ES_TIMEOUT_SEC = 20
def parse_json_file(json_path):
"""Opens and returns JSON contents.
Args:
json_path: Relative or absolute path of JSON file
Returns:
Parsed JSON
"""
with open(json_path, 'r') as f:
# Remove comments using jsmin, as recommended by JSON creator
# (https://plus.google.com/+DouglasCrockfordEsq/posts/RK8qyGVaGSr).
jsonDict = json.loads(jsmin.jsmin(f.read()))
return jsonDict
# Keep in sync with convert_to_index_name() in data-explorer repo.
def _convert_to_index_name(s):
"""Converts a string to an Elasticsearch index name."""
# For Elasticsearch index name restrictions, see
# https://github.com/DataBiosphere/data-explorer-indexers/issues/5#issue-308168951
# Elasticsearch allows single quote in index names. However, they cause other
# problems. For example,
# "curl -XDELETE http://localhost:9200/nurse's_health_study" doesn't work.
# So also remove single quotes.
prohibited_chars = [
' ', '"', '*', '\\', '<', '|', ',', '>', '/', '?', '\''
]
for char in prohibited_chars:
s = s.replace(char, '_')
s = s.lower()
# Remove leading underscore.
if s.find('_', 0, 1) == 0:
s = s.lstrip('_')
print('Index name: %s' % s)
return s
def get_index_name(dataset_config_dir):
json_path = os.path.join(dataset_config_dir, 'dataset.json')
dataset_name = parse_json_file(json_path)['name']
return _convert_to_index_name(dataset_name)
def _wait_elasticsearch_healthy(es):
"""Waits for Elasticsearch to be healthy.
Args:
es: An Elasticsearch instance.
"""
# Don't print NewConnectionError's while we're waiting for Elasticsearch
# to come up.
start = time.time()
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
for _ in range(0, ES_TIMEOUT_SEC):
try:
es.cluster.health(wait_for_status='yellow')
print('Elasticsearch took %d seconds to come up.' %
(time.time() - start))
break
except ConnectionError:
print('Elasticsearch not up yet, will try again.')
time.sleep(1)
else:
raise EnvironmentError("Elasticsearch failed to start.")
logging.getLogger("elasticsearch").setLevel(logging.INFO)
def maybe_create_elasticsearch_index(elasticsearch_url, index_name):
"""Creates Elasticsearchindex if it doesn't already exist."""
es = Elasticsearch([elasticsearch_url])
_wait_elasticsearch_healthy(es)
if es.indices.exists(index=index_name):
logger.info(
'Using existing %s index at %s.' % (index_name, elasticsearch_url))
else:
logger.info(
'Creating %s index at %s.' % (index_name, elasticsearch_url))
es.indices.create(index=index_name, body={})
return es
def bulk_index(es, index_name, docs_by_id):
# Use generator so we can index arbitrarily large iterators (like tables),
# without having to load into memory.
def es_actions(docs_by_id):
for _id, doc in docs_by_id:
yield ({
'_op_type': 'update',
'_index': index_name,
# type will go away in future versions of Elasticsearch. Just
# use any string here.
'_type': 'type',
'_id': _id,
'doc': doc,
'doc_as_upsert': True
})
bulk(es, es_actions(docs_by_id))
|
Python
| 0
|
@@ -3889,16 +3889,108 @@
%7D)%0A%0A
+ # For writing large amounts of data, the default timeout of 10s is sometimes not enough%0A
bulk
@@ -4016,10 +4016,30 @@
s_by_id)
+, request_timeout=60
)%0A
|
518443854f7ef4466885d88cf7b379c626692da1
|
Add PlannedBudgetLimits to Budgets::Budget BudgetData
|
troposphere/budgets.py
|
troposphere/budgets.py
|
# Copyright (c) 2012-2018, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import boolean
class Spend(AWSProperty):
props = {
'Amount': (float, True),
'Unit': (basestring, True),
}
class CostTypes(AWSProperty):
props = {
'IncludeCredit': (boolean, False),
'IncludeDiscount': (boolean, False),
'IncludeOtherSubscription': (boolean, False),
'IncludeRecurring': (boolean, False),
'IncludeRefund': (boolean, False),
'IncludeSubscription': (boolean, False),
'IncludeSupport': (boolean, False),
'IncludeTax': (boolean, False),
'IncludeUpfront': (boolean, False),
'UseAmortized': (boolean, False),
'UseBlended': (boolean, False),
}
class TimePeriod(AWSProperty):
props = {
'End': (basestring, False),
'Start': (basestring, False),
}
class BudgetData(AWSProperty):
props = {
'BudgetLimit': (Spend, False),
'BudgetName': (basestring, False),
'BudgetType': (basestring, True),
'CostFilters': (dict, False),
'CostTypes': (CostTypes, False),
'TimePeriod': (TimePeriod, False),
'TimeUnit': (basestring, True),
}
class Notification(AWSProperty):
props = {
'ComparisonOperator': (basestring, True),
'NotificationType': (basestring, True),
'Threshold': (float, True),
'ThresholdType': (basestring, False),
}
class Subscriber(AWSProperty):
props = {
'Address': (basestring, True),
'SubscriptionType': (basestring, True),
}
class NotificationWithSubscribers(AWSProperty):
props = {
'Notification': (Notification, True),
'Subscribers': ([Subscriber], True),
}
class Budget(AWSObject):
resource_type = "AWS::Budgets::Budget"
props = {
'Budget': (BudgetData, True),
'NotificationsWithSubscribers':
([NotificationWithSubscribers], False),
}
|
Python
| 0.000006
|
@@ -21,9 +21,9 @@
-201
-8
+9
, Ma
@@ -108,16 +108,112 @@
icense.%0A
+#%0A# *** Do not modify - this file is autogenerated ***%0A# Resource specification version: 8.0.0%0A%0A
%0Afrom .
@@ -228,17 +228,30 @@
WSObject
-,
+%0Afrom . import
AWSProp
@@ -291,124 +291,38 @@
ean%0A
-%0A%0Aclass Spend(AWSProperty):%0A props = %7B%0A 'Amount': (float, True),%0A 'Unit': (basestring, True),%0A %7D
+from .validators import double
%0A%0A%0Ac
@@ -858,24 +858,142 @@
e),%0A %7D%0A%0A%0A
+class Spend(AWSProperty):%0A props = %7B%0A 'Amount': (double, True),%0A 'Unit': (basestring, True),%0A %7D%0A%0A%0A
class TimePe
@@ -1347,32 +1347,78 @@
tTypes, False),%0A
+ 'PlannedBudgetLimits': (dict, False),%0A
'TimePer
@@ -1663,13 +1663,14 @@
': (
-float
+double
, Tr
|
f612fafa3e4d7352b64d993390fb074686fe46b7
|
Update slack post format
|
chainer/ya/utils/slack.py
|
chainer/ya/utils/slack.py
|
import os
import requests
import json
from chainer.training import extension
class SlackPost(extension.Extension):
def __init__(self, token, channel, **kwargs):
self.token = token
self.channel = channel
self.priority = 50
def initialize(self, trainer):
try:
plot_report = trainer.get_extension("PlotReport")
except:
pass
else:
self.plotfilepath = os.path.join(trainer.out,
plot_report._file_name)
try:
args = trainer.get_extension("ArgumentBackup").args
except:
pass
else:
self.args = ["{}:\t{}".format(k,getattr(args, k))
for k in vars(args)]
def finalize(self):
msgs = ["Training finished"]
attachments = []
if hasattr(self, "args"):
msgs += self.args
if hasattr(self, "plotfilepath"):
data = {
"token": self.token,
"channels": self.channel,
"initial_comment": "\n".join(msgs),
}
files = {'file': open(self.plotfilepath, 'rb')}
requests.post("https://slack.com/api/files.upload",
data=data, files=files)
else:
data = {
"token": self.token,
"channel": self.channel,
"as_user": False,
"text": "\n".join(msgs),
"icon_url": "https://chainer.org/images/chainer_icon_red.png",
"unfurl_media": True,
"attachments": json.dumps(attachments),
"username": "Chainer Result",
}
requests.post("https://slack.com/api/chat.postMessage",
data=data)
|
Python
| 0
|
@@ -1,12 +1,24 @@
+import json%0A
import os%0Aim
@@ -15,16 +15,17 @@
port os%0A
+%0A
import r
@@ -32,28 +32,16 @@
equests%0A
-import json%0A
from cha
@@ -111,17 +111,16 @@
nsion):%0A
-%0A
def
@@ -681,16 +681,33 @@
args = %5B
+%0A
%22%7B%7D:%5Ct%7B%7D
@@ -717,16 +717,17 @@
ormat(k,
+
getattr(
@@ -739,33 +739,8 @@
k))
-%0A
for
@@ -755,16 +755,29 @@
rs(args)
+%0A
%5D%0A%0A d
@@ -1107,32 +1107,157 @@
%5Cn%22.join(msgs),%0A
+ %22icon_url%22: %22https://chainer.org/images/chainer_icon_red.png%22,%0A %22username%22: %22Chainer Result%22,%0A
%7D%0A
@@ -1317,32 +1317,38 @@
b')%7D%0A
+ ret =
requests.post(%22
@@ -1338,32 +1338,49 @@
= requests.post(
+%0A
%22https://slack.c
@@ -1404,58 +1404,266 @@
ad%22,
-%0A data=data, files=files)
+ data=data, files=files)%0A obj = ret.json()%0A attachments = %5B%7B%0A %22fallback%22: %22plot%22,%0A %22color%22: %22good%22,%0A %22title%22: %22Result%22,%0A %22image_url%22: obj%5B%22file%22%5D%5B%22url_private%22%5D,%0A %7D%5D
%0A
@@ -2091,16 +2091,22 @@
+ ret =
request
@@ -2112,16 +2112,33 @@
ts.post(
+%0A
%22https:/
@@ -2174,34 +2174,8 @@
ge%22,
-%0A
dat
|
48412195e020c7f2a549deb869d98f6a366d9552
|
improve workflow conversion
|
cwlupgrader/main.py
|
cwlupgrader/main.py
|
#!/usr/bin/env python
from __future__ import print_function
import ruamel.yaml
from typing import Any, Dict, Union
from collections import Mapping, MutableMapping, Sequence
import sys
import copy
def main(): # type: () -> int
for path in sys.argv[1:]:
with open(path) as entry:
document = ruamel.yaml.round_trip_load(entry)
if ('cwlVersion' in document
and document['cwlVersion'] == 'cwl:draft-3'):
draft3_to_v1_0(document)
else:
print("Skipping non draft-3 CWL document", file=sys.stderr)
print(ruamel.yaml.round_trip_dump(document))
return 0
def draft3_to_v1_0(document): # type: (Dict[str, Any]) -> None
_draft3_to_v1_0(document)
document['cwlVersion'] = 'v1.0'
def _draft3_to_v1_0(document):
# type: (MutableMapping[str, Any]) -> MutableMapping[str, Any]
if "class" in document:
if document["class"] == "Workflow":
for out in document["outputs"]:
out["outputSource"] = out["source"]
del out["source"]
elif document["class"] == "File":
document["location"] = document["path"]
del document["path"]
elif document["class"] == "CreateFileRequirement":
document["class"] = "InitialWorkDirRequirement"
document["listing"] = []
for filedef in document["fileDef"]:
document["listing"].append({
"entryname": filedef["filename"],
"entry": filedef["fileContent"]
})
del document["fileDef"]
elif document["class"] == "CommandLineTool":
setupCLTMappings(document)
if "secondaryFiles" in document:
for i, sf in enumerate(document["secondaryFiles"]):
if "$(" in sf or "${" in sf:
document["secondaryFiles"][i] = sf.replace(
'"path"', '"location"').replace(".path", ".location")
if "description" in document:
document["doc"] = document["description"]
del document["description"]
if isinstance(document, MutableMapping):
for key, value in document.items():
if isinstance(value, MutableMapping):
document[key] = _draft3_to_v1_0(value)
elif isinstance(value, list):
for index, entry in enumerate(value):
if isinstance(entry, MutableMapping):
value[index] = _draft3_to_v1_0(entry)
return document
def setupCLTMappings(document): # type: (MutableMapping[str, Any]) -> None
for paramType in ['inputs', 'outputs']:
params = {}
for param in document[paramType]:
paramID = param['id'].lstrip('#')
param['type'] = shortenType(param['type'])
if len(param) == 2 and 'type' in param:
params[paramID] = param['type']
else:
del param['id']
params[paramID] = param
document[paramType] = params
def shortenType(typeObj):
# type: (List[Any]) -> Union[str, List[Any]]
if isinstance(typeObj, str) or not isinstance(typeObj, Sequence):
return typeObj
newType = []
for entry in typeObj: # find arrays that we can shorten and do so
if isinstance(entry, Mapping):
if (entry['type'] == 'array' and
isinstance(entry['items'], str)):
entry = entry['items'] + '[]'
newType.extend([entry])
typeObj = newType
if len(typeObj) == 2:
if 'null' in typeObj:
typeCopy = copy.deepcopy(typeObj)
typeCopy.remove('null')
if isinstance(typeCopy[0], str):
return typeCopy[0] + '?'
return typeObj
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.000002
|
@@ -962,16 +962,55 @@
kflow%22:%0A
+ inputOutputClean(document)%0A
@@ -1082,33 +1082,37 @@
urce%22%5D = out
-%5B
+.pop(
%22source%22
%5D%0A
@@ -1103,43 +1103,157 @@
rce%22
-%5D%0A del out%5B%22source
+).lstrip('#')%0A for step in document%5B%22steps%22%5D:%0A step%5B%22out%22%5D = step.pop(%22outputs%22)%0A for inp in step%5B%22inputs
%22%5D
+:
%0A
@@ -1249,32 +1249,390 @@
puts%22%5D:%0A
+ inp%5B%22id%22%5D = inp%5B%22id%22%5D%5Blen(step%5B%22id%22%5D)+1:%5D # remove step id prefix%0A inp%5B%22source%22%5D = inp%5B%22source%22%5D.lstrip('#')%0A step%5B%22in%22%5D = step.pop(%22inputs%22)%0A if %22scatter%22 in step:%0A step%5B%22scatter%22%5D = step%5B%22scatter%22%5D%5B # remove step prefix%0A len(step%5B%22id%22%5D)*2+3:%5D%0A
elif document%5B%22c
@@ -1687,41 +1687,8 @@
%22%5D =
- document%5B%22path%22%5D%0A del
doc
@@ -1696,16 +1696,20 @@
ment
-%5B
+.pop(
%22path%22
-%5D
+)
%0A
@@ -2180,32 +2180,32 @@
-setupCLTMappings
+inputOutputClean
(documen
@@ -3042,24 +3042,24 @@
def
-setupCLTMappings
+inputOutputClean
(doc
@@ -3158,28 +3158,8 @@
'%5D:%0A
- params = %7B%7D%0A
@@ -3213,18 +3213,22 @@
param
-ID
+%5B'id'%5D
= param
@@ -3254,24 +3254,60 @@
+if 'type' in param:%0A
param%5B'type'
@@ -3340,235 +3340,8 @@
e'%5D)
-%0A if len(param) == 2 and 'type' in param:%0A params%5BparamID%5D = param%5B'type'%5D%0A else:%0A del param%5B'id'%5D%0A params%5BparamID%5D = param%0A document%5BparamType%5D = params
%0A%0Ade
@@ -4076,16 +4076,17 @@
ypeObj%0A%0A
+%0A
if __nam
|
ba84f4a1b11f486d211254721397be43f8c9b07a
|
update __manifest__.py
|
tko_coexiste_coa/__manifest__.py
|
tko_coexiste_coa/__manifest__.py
|
# -*- coding: utf-8 -*-
# © 2017 TKO <http://tko.tko-br.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Plano de Contas Brasileiro',
'summary': '',
'description': 'Plano de contas brasileiro adaptável a qualquer segmento.',
'author': 'TKO',
'category': 'l10n_br',
'license': 'AGPL-3',
'website': 'http://tko.tko-br.com',
'version': '10.0.0.0.0',
'application': False,
'installable': True,
'auto_install': False,
'depends': [
'account',
'br_account',
'account_parent',
],
'external_dependencies': {
'python': [],
'bin': [],
},
'init_xml': [],
'update_xml': [],
'css': [],
'demo_xml': [],
'test': [],
'data': [
'data/chart_data.xml',
'data/account.account.template.csv',
'data/chart_data_properties.xml',
# TODO Separate proprities for products vs. services (enhance data/chart_data_properties.xml)
# TODO Criar Contas Pai
# TODO Create & Import l10n_br Taxes
],
}
|
Python
| 0.000059
|
@@ -755,24 +755,66 @@
'data': %5B%0A
+ 'data/chart_data_properties.xml',%0A
'dat
@@ -881,49 +881,8 @@
v',%0A
- 'data/chart_data_properties.xml',
%0A
|
febb2e9369a706d7319d89851cac3dc9a1fd167e
|
add source of kyoko image
|
tsundiary/jinja_env.py
|
tsundiary/jinja_env.py
|
from tsundiary import app
app.jinja_env.globals.update(theme_nicename = {
'classic': 'Classic Orange',
'minimal': 'Minimal Black/Grey',
'misato-tachibana': 'Misato Tachibana',
'rei-ayanami': 'Rei Ayanami',
'saya': 'Saya',
'yuno': 'Yuno Gasai',
'kyoko-sakura': 'Kyoko Sakura',
'colorful': 'Based on favorite color'
})
app.jinja_env.globals.update(themes = ['classic', 'minimal', 'misato-tachibana', 'rei-ayanami', 'saya', 'yuno', 'colorful'])
app.jinja_env.globals.update(theme_creds = {
'misato-tachibana': '<a href="http://konachan.com/post/show/102801">Misato Tachibana vector source</a>',
'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei vector source</a>',
'saya': '<a href="http://www.zerochan.net/671274">Saya source</a>',
'kyoko-sakura': "An artist drew this Kyoko, I'm sure."
})
app.jinja_env.globals.update(theme_colors = [
('Red', '0,100,100'),
('Orange', '35,100,100'),
('Yellow', '50,100,100'),
('Green', '120,100,80'),
('Cyan', '180,100,80'),
('Blue', '215,100,100'),
('Purple', '270,100,100'),
('Black', '0,0,0'),
('Grey', '0,0,70'),
('White', '0,0,100'),
('Saya Green', '152,100,100'),
('Tsundiary Orange', '17,100,100'),
])
|
Python
| 0
|
@@ -838,46 +838,93 @@
a':
-%22An artist drew this Kyoko, I'm
+'%3Ca href=%22http://3071527.deviantart.com/art/kyoko-sakura-376238110%22%3EKyoko
s
+o
ur
-e.%22
+ce%3C/a%3E'
%0A%7D)%0A
|
fb9e2ec66f2c80b60ae565665f091b0ee47843a9
|
Remove six lib from install script
|
docs/scripts/install.py
|
docs/scripts/install.py
|
#!/usr/bin/env python
'''
File name: install.py
Author: Tim Anema
Date created: Sep 29, 2016
Date last modified: Nov 19 2020
Python Version: 2.x, 3.x
Description: Install script for themekit. It will download a release and make it executable
'''
import os, json, sys, hashlib
from six.moves.urllib.request import urlopen
class Installer(object):
LATEST_RELEASE_URL = "https://shopify-themekit.s3.amazonaws.com/releases/latest.json"
ARCH_MAPPING = {
"darwin x86_64": "darwin-amd64",
"darwin i386": "darwin-386",
"linux x86_64": "linux-amd64",
"linux i386": "linux-386",
"freebsd x86_64": "freebsd-amd64",
"freebsd i386": "freebsd-386"
}
def __init__(self, path="/usr/local/bin"):
self.install_path = os.path.expanduser(path)
self.bin_path = "%s/theme" % self.install_path
self.arch = self.__getArch()
print("Fetching release data")
self.release = json.loads(urlopen(Installer.LATEST_RELEASE_URL).read().decode("utf-8"))
print("Downloading version %s of Shopify Themekit" % self.release['version'])
self.__download()
print("Theme Kit has been installed at %s" % self.bin_path)
print('To verify themekit is working simply type "theme"')
def __getArch(self):
pipe = os.popen("echo \"$(uname) $(uname -m)\"")
arch_name = pipe.readline().strip().lower()
pipe.close()
if arch_name not in Installer.ARCH_MAPPING:
print("Cannot find binary to match your architecture [%s]" % arch_name)
sys.exit("Please open an issue at https://github.com/Shopify/themekit/issues")
return Installer.ARCH_MAPPING[arch_name]
def __findReleasePlatform(self):
for index, platform in enumerate(self.release['platforms']):
if platform['name'] == self.arch:
return platform
def __download(self):
platform = self.__findReleasePlatform()
data = urlopen(platform['url']).read()
if hashlib.md5(data).hexdigest() != platform['digest']:
sys.exit("Downloaded binary did not match checksum.")
else:
print("Validated binary checksum")
if not os.path.exists(self.install_path):
os.makedirs(self.install_path)
with open(self.bin_path, "wb") as themefile:
themefile.write(data)
os.chmod(self.bin_path, 0o755)
Installer()
|
Python
| 0
|
@@ -45,12 +45,9 @@
tall
-.py
%0A
+
@@ -123,19 +123,19 @@
ed:
-Nov 19 2020
+Sep 14 2018
%0A
@@ -157,14 +157,9 @@
: 2.
-x, 3.x
+7
%0A
@@ -290,54 +290,8 @@
ib%0A%0A
-from six.moves.urllib.request import urlopen%0A%0A
clas
@@ -927,23 +927,26 @@
n.loads(
-urlopen
+self.__req
(Install
@@ -972,15 +972,8 @@
RL).
-read().
deco
@@ -1943,15 +1943,18 @@
a =
-urlopen
+self.__req
(pla
@@ -1970,15 +1970,8 @@
l'%5D)
-.read()
%0A
@@ -2338,16 +2338,16 @@
e(data)%0A
-
@@ -2378,16 +2378,253 @@
0o755)%0A%0A
+ def __req(self, url):%0A if sys.version_info%5B0%5D %3C 3:%0A import urllib%0A return urllib.urlopen(url).read()%0A else:%0A import urllib.request%0A return urllib.request.urlopen(url).read()%0A%0A
Installe
|
ec74aa53c889c854f9dc056578748201e69db0fb
|
Add basic logging support
|
tweak/__init__.py
|
tweak/__init__.py
|
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, json, errno, collections, atexit
class Config(collections.MutableMapping):
"""
Provides a self-contained (no dependencies outside the standard library), Python 2 and 3 compatible configuration
manager. Automatically saves and restores your application's configuration in your user home directory. Uses JSON
or optionally YAML for serialization. Supports dict-like methods and access semantics.
Examples:
config = Config()
config.host, config.port = "example.com", 9000
config.nested_config = {}
config.nested_config.foo = True
After restarting your application:
config = Config()
print(config)
>>> {'host': 'example.com', 'port': 9000, 'nested_config': {'foo': True}}
"""
_config_home = os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
def __init__(self, name=os.path.basename(__file__), save_on_exit=True, autosave=False, use_yaml=False, _parent=None, _data=None):
"""
:param name:
Name of the application that this config belongs to. This will be used as the name of the config directory.
:param save_on_exit: If True, save() will be called at Python interpreter exit (using an atexit handler).
:param autosave: If True, save() will be called after each attribute assignment.
"""
self._config_dir = os.path.join(self._config_home, name)
self._use_yaml = use_yaml
self._config_file = os.path.join(self._config_dir, "config.yml" if use_yaml else "config.json")
if save_on_exit:
atexit.register(self.save)
self._save_on_exit, self._autosave = save_on_exit, autosave
self._parent = _parent
if self._parent is None:
try:
with open(self._config_file) as fh:
self._load(fh)
except Exception:
self._data = {}
else:
self._data = _data
def _load(self, stream):
if self._use_yaml:
import yaml
class ConfigLoader(yaml.Loader):
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return self._as_config(yaml.Loader.construct_mapping(loader, node))
ConfigLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, ConfigLoader.construct_mapping)
self._data = yaml.load(stream, ConfigLoader) or {}
else:
self._data = json.load(stream, object_hook=self._as_config)
def _dump(self, stream):
if self._use_yaml:
import yaml
def config_representer(dumper, obj):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, obj._data.items())
yaml.add_representer(self.__class__, config_representer)
yaml.dump(self._data, stream)
else:
json.dump(self._data, stream, default=lambda obj: obj._data)
def _as_config(self, d):
if isinstance(d, collections.MutableMapping):
return Config(save_on_exit=self._save_on_exit, autosave=self._autosave, _parent=self, _data=d)
return d
def save(self, mode=0o600):
"""
Serialize the config data to the user home directory.
:param mode: The octal Unix mode (permissions) for the config file.
"""
if self._parent is not None:
self._parent.save(mode=mode)
else:
try:
os.makedirs(self._config_dir)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(self._config_dir)):
raise
with open(self._config_file, "wb" if sys.version_info < (3, 0) else "w") as fh:
self._dump(fh)
os.chmod(self._config_file, mode)
def __getitem__(self, item):
if item not in self._data:
raise KeyError(item)
return self._data[item]
def __setitem__(self, key, value):
self._data[key] = self._as_config(value)
if self._autosave:
self.save()
def __getattr__(self, attr):
return self.__getitem__(attr)
def __setattr__(self, attr, value):
if attr.startswith("_"):
self.__dict__[attr] = value
else:
self.__setitem__(attr, value)
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
for item in self._data:
yield item
def __len__(self):
return len(self._data)
def __repr__(self):
return repr(self._data)
|
Python
| 0
|
@@ -149,17 +149,26 @@
, atexit
+, logging
%0A
-
%0Aclass C
@@ -972,16 +972,58 @@
onfig%22))
+%0A _logger = logging.getLogger(__name__)
%0A%0A de
@@ -2017,24 +2017,113 @@
f._load(fh)%0A
+ self._logger.info(%22Loaded configuration from %25s%22, self._config_file)%0A
@@ -4069,24 +4069,24 @@
f._dump(fh)%0A
-
@@ -4118,16 +4118,88 @@
e, mode)
+%0A self._logger.debug(%22Saved config to %25s%22, self._config_file)
%0A%0A de
|
3cf93f7f640ef04a1be31d515c19cffec19cec45
|
Remove logging import unused
|
searchlightclient/osc/plugin.py
|
searchlightclient/osc/plugin.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_SEARCH_API_VERSION = '1'
API_VERSION_OPTION = 'os_search_api_version'
API_NAME = 'search'
API_VERSIONS = {
'1': 'searchlightclient.v1.client.Client',
}
def make_client(instance):
"""Returns a search service client"""
search_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
# Set client http_log_debug to True if verbosity level is high enough
http_log_debug = utils.get_effective_log_level() <= logging.DEBUG
# Remember interface only if it is set
kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface)
client = search_client(
session=instance.session,
http_log_debug=http_log_debug,
region_name=instance._region_name,
**kwargs
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-search-api-version',
metavar='<search-api-version>',
default=utils.env(
'OS_SEARCH_API_VERSION',
default=DEFAULT_SEARCH_API_VERSION),
help='Search API version, default=' +
DEFAULT_SEARCH_API_VERSION +
' (Env: OS_SEARCH_API_VERSION)')
return parser
|
Python
| 0.000001
|
@@ -606,43 +606,8 @@
ls%0A%0A
-LOG = logging.getLogger(__name__)%0A%0A
DEFA
|
18d66a1325e9c8825c4b33ea5438fe0ec8fcab33
|
Don't swallow the underlying decrypt error
|
decrypt-windows-ec2-passwd.py
|
decrypt-windows-ec2-passwd.py
|
#!/usr/bin/env python
import base64, binascii, getpass, optparse, sys
from Crypto.PublicKey import RSA
def pkcs1_unpad(text):
#From http://kfalck.net/2011/03/07/decoding-pkcs1-padding-in-python
if len(text) > 0 and text[0] == '\x02':
# Find end of padding marked by nul
pos = text.find('\x00')
if pos > 0:
return text[pos+1:]
return None
def long_to_bytes (val, endianness='big'):
# From http://stackoverflow.com/questions/8730927/convert-python-long-int-to-fixed-size-byte-array
# one (1) hex digit per four (4) bits
try:
#Python < 2.7 doesn't have bit_length =(
width = val.bit_length()
except:
width = len(val.__hex__()[2:-1]) * 4
# unhexlify wants an even multiple of eight (8) bits, but we don't
# want more digits than we need (hence the ternary-ish 'or')
width += 8 - ((width % 8) or 8)
# format width specifier: four (4) bits per hex digit
fmt = '%%0%dx' % (width // 4)
# prepend zero (0) to the width, to zero-pad the output
s = binascii.unhexlify(fmt % val)
if endianness == 'little':
# see http://stackoverflow.com/a/931095/309233
s = s[::-1]
return s
def decryptPassword(rsaKey, password):
#Undo the whatever-they-do to the ciphertext to get the integer
encryptedData = base64.b64decode(password)
ciphertext = int(binascii.hexlify(encryptedData), 16)
#Decrypt it
plaintext = rsaKey.decrypt(ciphertext)
#This is the annoying part. long -> byte array
decryptedData = long_to_bytes(plaintext)
#Now Unpad it
unpaddedData = pkcs1_unpad(decryptedData)
#Done
return unpaddedData
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-k", "--key", dest="keyfile", default="~/.ssh/id_rsa", help="location of your ssh private key")
parser.add_option("-p", "--password", dest="password", help="encrypted password")
(options, args) = parser.parse_args()
if not options.keyfile or not options.password:
parser.print_help()
sys.exit(-1)
#Open your keyfile
try:
keyFile = open(options.keyfile)
except:
print "Could not find file", options.keyfile
sys.exit(-1)
#Read file
keyLines = keyFile.readlines()
#Import it
try:
key = RSA.importKey(keyLines, passphrase=getpass.getpass('Encrypted Key Password (leave blank if none): '))
except ValueError:
print "Could not import SSH Key (Is it an RSA key? Is it password protected?)"
sys.exit(-1)
#Decrypt it
print ""
print "Password:", decryptPassword(key, options.password)
|
Python
| 0.998796
|
@@ -2574,16 +2574,20 @@
lueError
+, ex
:%0D%0A
@@ -2666,17 +2666,26 @@
tected?)
-%22
+: %25s%22 %25 ex
%0D%0A
|
57cb5546d0e832bae8b2171d42fc4428ebc6dc74
|
add try for imports
|
tumb_borg/authorize.py
|
tumb_borg/authorize.py
|
#!/usr/bin/python
from tumblpy import Tumblpy as T
from urlparse import urlparse, parse_qs
def authorize(KEY, SECRET, CALLBACK):
def get_authorization_properties():
t = T(KEY, SECRET)
return t \
.get_authentication_tokens(
callback_url=CALLBACK)
auth_p = get_authorization_properties()
def get_auth_url():
print('Please connect with Tumblr via: \n%s' \
% auth_p['auth_url'])
result_url = \
raw_input("Copy and paste the accepting url: ")
return result_url
def query_string(url):
return { k: v[0] for k, v in
parse_qs(urlparse(url).query).items() }
def query_string_auth():
return query_string(get_auth_url())
def authorized_tokens():
q = query_string_auth()
t = T(KEY, SECRET,
q['oauth_token'],
auth_p['oauth_token_secret'])
return t.get_authorized_tokens(q['oauth_verifier'])
def authorized_t():
a = authorized_tokens()
return T(KEY, SECRET,
a['oauth_token'],
a['oauth_token_secret'])
return authorized_t()
|
Python
| 0
|
@@ -44,16 +44,93 @@
py as T%0A
+try:%0A from urllib.parse import urlparse, parse_qs%0Aexcept ImportError:%0A
from url
|
6e4ec337bf4c64768cd81df581c4dcf34131599d
|
use nano2sec, issue #33
|
buildtimetrend/stages.py
|
buildtimetrend/stages.py
|
# vim: set expandtab sw=4 ts=4:
'''
Reads timestamps.csv, calculates stage duration and saves the result
to an xml file
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import csv
from buildtimetrend.tools import split_timestamp
from buildtimetrend.tools import check_file
from lxml import etree
class Stages(object):
'''
Build stages object.
It gathers timestamps from a csv file and calculates stage duration.
Output stages in xml format.
'''
def __init__(self):
self.stages = []
self.started_at = None
self.finished_at = None
def read_csv(self, csv_filename):
'''
Gathers timestamps from a csv file and calculates stage duration.
Parameters :
- csv_filename : csv filename containing timestamps
Returns false if file doesn't exist, true if it was read successfully.
'''
# load timestamps file
if not check_file(csv_filename):
return False
# read timestamps, calculate stage duration
with open(csv_filename, 'rb') as csv_data:
timestamps = csv.reader(csv_data, delimiter=',', quotechar='"')
self.parse_timestamps(timestamps)
return True
def total_duration(self):
'''Calculate total duration of all stages'''
total_duration = 0
# calculate total duration
for stage in self.stages:
total_duration += stage["duration"]
return total_duration
def to_xml(self):
'''Generates xml object from stages dictionary'''
root = etree.Element("stages")
for stage in self.stages:
root.append(etree.Element(
"stage", name=stage["name"],
duration=str(stage["duration"])))
return root
def to_xml_string(self):
'''Generates xml string from stages dictionary'''
return etree.tostring(self.to_xml(), pretty_print=True)
def parse_timestamps(self, timestamps):
'''
Parse timestamps and calculate stage durations
The timestamp of each stage is used as both the start point of it's
stage and the endpoint of the previous stage.
On parsing each timestamp, the previous timestamp and previous
event name are used to calculate the duration of the previous stage.
For this reason, parsing the first timestamp line
doesn't produce a stage duration.
The parsing ends when an event with the name 'end' is encountered.
'''
previous_timestamp = 0
event_name = None
# iterate over all timestamps
for row in timestamps:
timestamp = int(row[1])
# list of possible end tags
end_tags = ['end', 'done', 'finished', 'completed']
# assign starting timestamp of first stage
# to started_at of the build job
if self.started_at is None:
self.started_at = split_timestamp(timestamp)
# skip calculating the duration of the first stage,
# the next timestamp is needed
if event_name is not None:
# finish parsing when an end timestamp is encountered
if event_name.lower() in end_tags:
self.finished_at = split_timestamp(previous_timestamp)
break
# calculate duration from current and previous timestamp
duration = timestamp - previous_timestamp
print 'Duration {0} : {1}s'.format(event_name, duration)
# add stage duration to stages dict
self.stages.append({
"name": event_name,
"started_at": split_timestamp(previous_timestamp),
"finished_at": split_timestamp(timestamp),
"duration": duration})
# event name of the timestamp is used in the next iteration
# the timestamp of the next stage is used as the ending timestamp
# of this stage
event_name = row[0]
previous_timestamp = timestamp
class Stage(object):
'''
Build stage object.
'''
def __init__(self):
self.data = {}
self.set_name("")
self.set_duration(0)
def set_name(self, name):
'''Set stage name'''
if name is None:
return False
self.data["name"] = str(name)
return True
def set_command(self, command):
'''Set stage command'''
if command is None:
return False
self.data["command"] = str(command)
return True
def set_started_at(self, timestamp):
'''Set time when stage was started'''
return self.set_timestamp("started_at", timestamp)
def set_started_at_nano(self, timestamp):
'''Set time when stage was started in nanoseconds'''
return self.set_timestamp_nano("started_at", timestamp)
def set_finished_at(self, timestamp):
'''Set time when stage was finished'''
return self.set_timestamp("finished_at", timestamp)
def set_finished_at_nano(self, timestamp):
'''Set time when stage was finished in nanoseconds'''
return self.set_timestamp_nano("finished_at", timestamp)
def set_timestamp(self, name, timestamp):
'''
Set timestamp
Param name timestamp name
Param timestamp seconds since epoch
'''
if timestamp is not None and name is not None:
try:
self.data[name] = split_timestamp(timestamp)
return True
except TypeError:
return False
return False
def set_timestamp_nano(self, name, timestamp):
'''
Set timestamp in nanoseconds
Param name timestamp name
Param timestamp nanoseconds since epoch
'''
return self.set_timestamp(name, float(timestamp) / float(1000000000))
def set_duration(self, duration):
'''Set stage duration in seconds'''
try:
duration = float(duration)
if duration >= 0:
self.data["duration"] = duration
return True
return False
except (ValueError, TypeError):
return False
def to_dict(self):
'''return stages data as dictionary'''
return self.data
|
Python
| 0
|
@@ -972,16 +972,58 @@
ck_file%0A
+from buildtimetrend.tools import nano2sec%0A
from lxm
@@ -6680,21 +6680,24 @@
p(name,
-float
+nano2sec
(timesta
@@ -6703,28 +6703,8 @@
amp)
- / float(1000000000)
)%0A%0A
|
483fea2f62ea336d6882076136de30d6cd719134
|
Update routers.py
|
directions/routers.py
|
directions/routers.py
|
"""
Classes for major routing providers
* Google
* Mapquest
* Mapquest Open
* Mapbox
Please consult the terms of service of each provider before using the service
Google - https://developers.google.com/maps/terms
Mapquest - Contact for licensed data agreement
Mapquest Open - http://developer.mapquest.com/web/info/terms-of-use
Mapbox - https://www.mapbox.com/tos/
"""
import itertools
import json
import polycomp
import requests
from base import Router, Route, Maneuver, Waypoint
class Google(Router):
url = 'http://maps.googleapis.com/maps/api/directions/json'
default_name = 'google'
def __init__(self, *args, **kwargs):
Router.__init__(self, *args, **kwargs)
# https://developers.google.com/maps/documentation/directions/
def _convert_coordinate(self, p, t=Waypoint.VIA):
if isinstance(p, basestring):
return p
if t == Waypoint.VIA:
via = 'via:'
else:
via = ''
# Google wants lat / lon
return '{via}{0[1]:.6f},{0[0]:.6f}'.format(p, via=via)
def _query_params(self, waypoints):
origin = waypoints[0]
destination = waypoints[-1]
vias = waypoints[1:-1]
# This assumes you're not running Python on a device with a location
# sensor.
payload = {
'origin': self._convert_coordinate(origin, t=None),
'destination': self._convert_coordinate(destination, t=None),
'sensor': 'false',
'units': 'metric',
}
if vias:
payload['waypoints'] = '|'.join(self._convert_coordinate(v)
for v in vias)
return payload
def raw_query(self, waypoints, **kwargs):
payload = self._query_params(waypoints)
payload.update(kwargs)
r = requests.get(self.url, params=payload)
r.raise_for_status()
return r.json()
def format_output(self, data):
routes = []
for r in data['routes']:
duration = sum(leg['duration']['value'] for leg in r['legs'])
distance = sum(leg['distance']['value'] for leg in r['legs'])
maneuvers = []
latlons = []
# Legs are the spans of the route between waypoints desired. If
# there are no waypoints, there will only be 1 leg
for leg in r['legs']:
for step in leg['steps']:
loc = step['start_location']
m = Maneuver((loc['lng'], loc['lat']),
text=step['html_instructions'])
maneuvers.append(m)
latlons.append(
polycomp.decompress(step['polyline']['points']))
# latlons is a list of list of lat/lon coordinate pairs. The end
# point of each list is the same as the first point of the next
# list. Get rid of the duplicates
lines = [x[:-1] for x in latlons]
lines.append([latlons[-1][-1]]) # Add the very last point
points = itertools.chain(*lines)
# Reverse lat/lon to be lon/lat for GeoJSON
coords = [tuple(reversed(c)) for c in points]
route = Route(coords, distance, duration, maneuvers=maneuvers)
routes.append(route)
return routes
class Mapquest(Router):
# http://www.mapquestapi.com/directions/
url = 'http://www.mapquestapi.com/directions/v2/route'
default_name = 'mapquest'
def __init__(self, key, *args, **kwargs):
Router.__init__(self, *args, **kwargs)
self.key = key
def _convert_location(self, location, t=Waypoint.VIA):
if t == Waypoint.VIA:
via = 'v'
else:
via = 's'
if isinstance(location, basestring):
return {'street': location, 'type': via}
else:
return {'latLng': {'lat': location[1], 'lng': location[0]},
'type': via}
def _format_waypoints(self, waypoints):
# Mapquest takes in locations as an array
locations = [self._convert_location(waypoints[0], t=Waypoint.STOP)]
if waypoints:
locations.extend(self._convert_location(loc, t=Waypoint.VIA)
for loc in waypoints[1:-1])
locations.append(self._convert_location(waypoints[-1],
t=Waypoint.STOP))
return locations
def raw_query(self, waypoints, **kwargs):
params = {
'key': self.key,
'inFormat': 'json',
'outFormat': 'json'
}
locations = self._format_waypoints(waypoints)
data = {
'locations': locations,
'options': {
'avoidTimedConditions': False,
'shapeFormat': 'cmp',
'generalize': 0, # No simplification
'unit': 'k',
'locale': 'es',
'narrativeType': 'text'
},
}
data = json.dumps(data, separators=(',', ':'))
r = requests.post(self.url,
params=params,
data=data)
r.raise_for_status()
data = r.json()
status_code = data['info']['statuscode']
if status_code != 0:
raise Exception(data['info']['messages'][0])
return data
def format_output(self, data):
latlons = polycomp.decompress(data['route']['shape']['shapePoints'])
coords = [tuple(reversed(c)) for c in latlons]
duration = data['route']['time']
distance = data['route']['distance'] * 1000 # km to m
maneuvers = []
for leg in data['route']['legs']:
for m_in in leg['maneuvers']:
loc = m_in['startPoint']
m = Maneuver((loc['lng'], loc['lat']),
text=m_in['narrative'],
icon=m_in['iconUrl']
)
maneuvers.append(m)
r = Route(coords, distance, duration, maneuvers=maneuvers)
return [r]
class MapquestOpen(Mapquest):
# http://open.mapquestapi.com/directions/
# This is the same interface as Mapquest (for now) but just hits
# a different url
url = 'http://open.mapquestapi.com/directions/v2/route'
default_name = 'mapquestopen'
class Mapbox(Router):
default_name = 'mapbox'
# https://www.mapbox.com/developers/api/directions/
def __init__(self, mapid, *args, **kwargs):
Router.__init__(self, *args, **kwargs)
self.mapid = mapid
def _convert_coordinate(self, p):
return '{0[0]},{0[1]}'.format(p)
def raw_query(self, waypoints, **kwargs):
baseurl = 'http://api.tiles.mapbox.com/v3/{mapid}/directions/driving/{waypoints}.json'
formatted_points = ';'.join(self._convert_coordinate(p)
for p in waypoints)
url = baseurl.format(mapid=self.mapid, waypoints=formatted_points)
payload = {'alternatives': 'false'}
r = requests.get(url, params=payload)
r.raise_for_status()
return r.json()
def format_output(self, data):
routes = []
for r in data['routes']:
maneuvers = [Maneuver(s['maneuver']['location']['coordinates'],
text=s['maneuver']['instruction'])
for s in r['steps']]
route = Route(r['geometry']['coordinates'],
r['distance'],
r['duration'], maneuvers=maneuvers)
routes.append(route)
return routes
|
Python
| 0.000001
|
@@ -6009,16 +6009,18 @@
conUrl'%5D
+,%0A
@@ -6032,32 +6032,105 @@
+distance=m_in%5B'distance'%5D,%0A time=m_in%5B'time'%5D
%0A
|
e5fd6111d164cee574cb929849934e0b2c7a70a1
|
Add ArticleImportView tests
|
molo/core/api/tests/test_views.py
|
molo/core/api/tests/test_views.py
|
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from mock import patch
from molo.core.api.tests.utils import mocked_requests_get
from molo.core.tests.base import MoloTestCaseMixin
class MainImportViewTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_raises_error_if_data_not_available(self):
form_data = {
"url": "http://localhost:8000/api/v2/pages/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertFormError(
response, "form", "url", [u"Please enter a valid URL."]
)
@patch("molo.core.api.forms.requests.get", side_effect=mocked_requests_get)
def test_redirects_to_parent_chooser(self, mock_get):
form_data = {
"url": "http://localhost:8000/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertContains(response, "Add Article")
class ArticleParentChooserView(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_redirects_to_first_page_if_session_not_set(self):
response = self.client.get(reverse("molo_api:article-parent-chooser"))
self.assertEqual(
response["Location"],
reverse("molo_api:main-import")
)
def test_redirects_to_article_import(self):
pass
|
Python
| 0
|
@@ -1510,20 +1510,24 @@
tChooser
-View
+TestCase
(MoloTes
@@ -2070,48 +2070,616 @@
)%0A%0A
- def test_redirects_to_article_import
+%0Aclass ArticleImportViewTestCase(MoloTestCaseMixin, TestCase):%0A%0A def setUp(self):%0A self.mk_main()%0A self.client = Client()%0A User.objects.create_superuser(%0A username=%22admin%22, email=%22admin@admin.com%22, password=%22admin%22%0A )%0A self.client.login(username=%22admin%22, password=%22admin%22)%0A%0A def test_redirects_to_main_page_if_session_not_set(self):%0A response = self.client.get(reverse(%22molo_api:article-import%22))%0A self.assertEqual(%0A response%5B%22Location%22%5D,%0A reverse(%22molo_api:main-import%22)%0A )%0A%0A def test_articles_can_be_imported
(sel
@@ -2694,9 +2694,8 @@
pass
-%0A
|
1fcb7c2e12d7f025b032e58b30a3e35b1fa25ba5
|
fix path in windows tests
|
tests/test_snapshot_diff.py
|
tests/test_snapshot_diff.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import pytest
from functools import partial
from .shell import mkdtemp, mkdir, touch, mv
from watchdog.utils.dirsnapshot import DirectorySnapshot
from watchdog.utils.dirsnapshot import DirectorySnapshotDiff
from watchdog.utils import platform
skip_on_windows = pytest.mark.skipif(platform.is_windows(),
reason="Can't detect moves on windows file systems")
windows_only = pytest.mark.skipif(not platform.is_windows(),
reason="Should detect moves instead")
def wait():
time.sleep(0.5)
@pytest.fixture()
def tmpdir():
return mkdtemp()
@pytest.fixture()
def p(tmpdir, *args):
"""
Convenience function to join the temporary directory path
with the provided arguments.
"""
return partial(os.path.join, tmpdir)
def test_move_to(p):
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
ref = DirectorySnapshot(p('dir2'))
mv(p('dir1/a'), p('dir2/b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('dir2')))
assert diff.files_created == [p('dir2/b')]
def test_move_from(p):
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
ref = DirectorySnapshot(p('dir1'))
mv(p('dir1/a'), p('dir2/b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('dir1')))
assert diff.files_deleted == [p('dir1/a')]
@windows_only
def test_move_on_windows(p):
touch(p('a'))
ref = DirectorySnapshot(p(''))
mv(p('a'), p('b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.files_created == [p('b')]
assert diff.files_deleted == [p('a')]
@skip_on_windows
def test_move_internal(p):
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
ref = DirectorySnapshot(p(''))
mv(p('dir1/a'), p('dir2/b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.files_moved == [(p('dir1/a'), p('dir2/b'))]
assert diff.files_created == []
assert diff.files_deleted == []
@skip_on_windows
def test_move_replace(p):
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
touch(p('dir2', 'b'))
ref = DirectorySnapshot(p(''))
mv(p('dir1/a'), p('dir2/b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.files_moved == [(p('dir1/a'), p('dir2/b'))]
assert diff.files_deleted == [p('dir2/b')]
assert diff.files_created == []
@windows_only
def test_move_replace_windows(p):
touch(p('a'))
wait() #set a and b to different timestamp
touch(p('b'))
ref = DirectorySnapshot(p(''))
mv(p('a'), p('b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.files_deleted == [p('a')]
assert diff.files_modified == [p('b')]
def test_dir_modify_on_create(p):
ref = DirectorySnapshot(p(''))
wait()
touch(p('a'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.dirs_modified == [p('')]
def test_dir_modify_on_move(p):
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
ref = DirectorySnapshot(p(''))
wait()
mv(p('dir1/a'), p('dir2/b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert set(diff.dirs_modified) == set([p('dir1'), p('dir2')])
@skip_on_windows
def test_detect_modify_for_moved_files(p):
touch(p('a'))
ref = DirectorySnapshot(p(''))
wait()
touch(p('a'))
mv(p('a'), p('b'))
diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p('')))
assert diff.files_moved == [(p('a'), p('b'))]
assert diff.files_modified == [p('a')]
|
Python
| 0.000001
|
@@ -1577,33 +1577,36 @@
)%0A mv(p('dir1
-/
+', '
a'), p('dir2/b')
@@ -1593,33 +1593,36 @@
', 'a'), p('dir2
-/
+', '
b'))%0A diff =
@@ -1863,33 +1863,36 @@
)%0A mv(p('dir1
-/
+', '
a'), p('dir2/b')
@@ -1879,33 +1879,36 @@
', 'a'), p('dir2
-/
+', '
b'))%0A diff =
@@ -2005,17 +2005,20 @@
%5Bp('dir1
-/
+', '
a')%5D%0A%0A%0A@
|
3a49982dfe1a94159bb2543540ae3638688c7c31
|
make RAOB download backend more time forgiving
|
cgi-bin/request/raob.py
|
cgi-bin/request/raob.py
|
#!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor()
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = datetime.datetime.strptime(form.getfirst('sts', ''),
'%m/%d/%Y %H:%M')
sts = sts.replace(tzinfo=pytz.utc)
ets = datetime.datetime.strptime(form.getfirst('ets', ''),
'%m/%d/%Y %H:%M')
ets = ets.replace(tzinfo=pytz.utc)
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -69,16 +69,27 @@
ork%0A%22%22%22%0A
+import sys%0A
import c
@@ -410,16 +410,30 @@
.cursor(
+'raobstreamer'
)%0A st
@@ -1350,74 +1350,187 @@
def
-main():%0A %22%22%22Go Main Go%22%22%22%0A form = cgi.FieldStorage()%0A sts
+friendly_date(form, key):%0A %22%22%22More forgiving date conversion%22%22%22%0A val = form.getfirst(key)%0A try:%0A val = val.strip()%0A if len(val.split()) == 1:%0A dt
= d
@@ -1559,33 +1559,24 @@
ime(
-form.getfirst('sts', '
+val, '%25m/%25d/%25Y
')
-,
%0A
@@ -1572,32 +1572,38 @@
%25d/%25Y')%0A
+else:%0A
@@ -1598,32 +1598,52 @@
-
+dt = datetime.datetime.strptime(val,
'%25m/%25d/
@@ -1661,17 +1661,19 @@
-sts = sts
+ dt = dt
.rep
@@ -1693,169 +1693,390 @@
ytz.
-utc
+UTC
)%0A e
-ts = datetime.datetime.strptime(form.getfirst('ets', ''),%0A '%25m/%25d/%25Y %25H:%25M')%0A ets = ets.replace(tzinfo=pytz.utc)
+xcept Exception as _exp:%0A ssw('Content-type: text/plain%5Cn%5Cn')%0A ssw(('Invalid %25s date provided, should be %22%25%25m/%25%25d/%25%25Y %25%25H:%25%25M%22'%0A ' in UTC timezone'%0A ) %25 (key, ))%0A sys.exit()%0A return dt%0A%0A%0Adef main():%0A %22%22%22Go Main Go%22%22%22%0A form = cgi.FieldStorage()%0A sts = friendly_date(form, 'sts')%0A ets = friendly_date(form, 'ets')%0A
%0A
|
2b3214d52d3bd1230fb0af4a010642391e0bbfbb
|
fix keys if error is returned (#5)
|
datpy.py
|
datpy.py
|
import subprocess
import time
import cPickle
try:
import ujson as json
except:
import json
try:
import pandas as pd
except:
pd = False
class DatException(Exception):
pass
def on_error(log):
message = log.get('message')
if not message:
message = 'Unknown error. Please contact us at #dat in freenode on irc'
raise DatException('Error: ' + message)
def returns_version(func):
def inner(*args, **kwargs):
self = args[0]
log = func(*args, **kwargs)
self.version = log.get("version")
if not self.version:
on_error(log)
return self.version
return inner
class Dat:
def __init__(self, path=None):
self.path = path
self.version = None
def init(self, **kwargs):
p = self.process("dat init --no-prompt", kwargs)
return stream_out(p)
def checkout(self, version, **kwargs):
self.version = version
p = self.process("dat checkout " + version, kwargs)
return stream_out(p)
def datasets(self, **kwargs):
p = self.process("dat datasets", kwargs)
res = stream_out(p)
return res['datasets']
def destroy(self, **kwargs):
p = self.process("dat destroy --no-prompt", kwargs)
return stream_out(p)
def status(self, **kwargs):
p = self.process("dat status", kwargs)
return stream_out(p)
def read(self, key, **kwargs):
p = self.process("dat read " + key, kwargs)
return stream_out(p, parse=False)
@returns_version
def write(self, filename, data=None, **kwargs):
p = self.process("dat write {0} -".format(filename), kwargs)
return stream_in(p, data)
@returns_version
def write_file(self, filename, **kwargs):
p = self.process("dat write " + filename, kwargs)
return stream_out(p)
def process(self, cmd, opts):
if self.path:
opts['path'] = self.path
return process(cmd, opts)
def dataset(self, name):
return Dataset(self, name)
class Dataset:
def __init__(self, dat, dataset):
self.dat = dat
self.dataset = dataset
def keys(self, **kwargs):
p = self.process("dat keys", kwargs)
res = stream_out(p)
return res['keys']
@returns_version
def import_file(self, filename, **kwargs):
p = self.process("dat import " + filename, kwargs)
return stream_out(p)
@returns_version
def import_dataframe(self, dataframe, **kwargs):
## TODO: make streaming better by using a generator
p = self.process("dat import -", kwargs)
return stream_in(p, dataframe.to_csv())
def export_dataframe(self, **kwargs):
if not pd:
raise Exception("Can't find pandas. Is it available on your path?")
output = self.export(**kwargs)
frame = pd.DataFrame.from_dict(output)
return frame
def export(self, **kwargs):
p = self.process("dat export", kwargs)
return stream_out(p)
def process(self, cmd, opts):
if self.dat.path:
opts['path'] = self.dat.path
opts['dataset'] = self.dataset
return process(cmd, opts)
def process(cmd, opts):
"""
Creates a process.
Adds options (provided as keyword args) to the given cmd.
Parameters
----------
cmd: str
the command to add options
opts: dict
the options to add
"""
if opts is None:
opts = {}
cmd += ' --json '
for key, val in opts.iteritems():
if (len(key) == 1):
cmd += " -{0} {1}".format(key, val)
else:
cmd += " --{0}={1}".format(key, val)
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def stream_in(p, data):
"""
Streams to dat from the given command into python
Parameters
----------
cmd: str
the command to execute
parse: boolean
TODO: if true, will try to parse the output from python generator or list
"""
stdout, stderr = p.communicate(input=data)
if p.returncode == 1:
raise DatException('Node.js error: ' + stderr)
else:
res = json.loads(stdout)
if type(res) == object and res.get('error'):
return on_error(res)
return res
def stream_out(p, parse=True):
"""
Streams the stdout from the given command into python
Parameters
----------
cmd: str
the command to execute
parse: boolean
to parse the file into json
"""
res = []
for line in iter(p.stdout.readline, ''):
if parse:
line = json.loads(line.rstrip())
else:
line = line
res.append(line)
if len(res) == 1:
res = res[0]
if type(res) == object and res.get('error'):
return on_error(res)
if not parse:
res = ''.join(res)
subprocess.Popen.terminate(p)
return res
|
Python
| 0
|
@@ -2070,32 +2070,56 @@
= stream_out(p)%0A
+ if 'keys' in res:%0A
return res%5B'
@@ -2124,16 +2124,31 @@
%5B'keys'%5D
+%0A return res
%0A%0A @ret
|
aaba385ba46e9d9ed118630181eaa546f310923e
|
use VERSION constant, issue #6
|
buildtimetrend/travis.py
|
buildtimetrend/travis.py
|
# vim: set expandtab sw=4 ts=4:
'''
Interface to Travis CI API.
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib2
import json
from buildtimetrend.tools import check_file
TRAVIS_ORG_API_URL = 'https://api.travis-ci.org/'
class TravisData(object):
'''
Gather data from Travis CI using the API
'''
def __init__(self, repo, build_id):
'''
Retrieve Travis CI build data using the API.
Param repo : github repository slug (fe. ruleant/buildtime-trend)
Param build_id : Travis CI build id (fe. 158)
'''
self.build_data = {}
self.jobs_data = {}
self.repo = repo
self.api_url = TRAVIS_ORG_API_URL
self.build_id = str(build_id)
def get_build_data(self):
'''
Retrieve Travis CI build data.
'''
request = 'repos/' + self.repo + '/builds?number=' + self.build_id
self.build_data = self.json_request(request)
def get_build_jobs(self):
'''
Retrieve Travis CI build job data.
'''
if len(self.build_data) > 0:
for job_id in self.build_data['builds'][0]['job_ids']:
self.get_job_data(job_id)
def get_job_data(self, job_id):
'''
Retrieve Travis CI job data.
'''
self.jobs_data[str(job_id)] = self.json_request('jobs/' + str(job_id))
def get_job_log(self, job_id):
'''
Retrieve Travis CI job log.
'''
request_url = self.api_url + 'jobs/' + str(job_id) + '/log'
print "Request build job log : " + request_url
return urllib2.urlopen(request_url)
def parse_job_log(self, job_id):
'''
Parse Travis CI job log.
'''
self.parse_job_log_stream(self.get_job_log(job_id))
def parse_job_log_file(self, filename):
'''
Open a Travis CI log file and parse it.
Parameters :
- filename : filename of Travis CI log
Returns false if file doesn't exist, true if it was read successfully.
'''
# load timestamps file
if not check_file(filename):
return False
# read timestamps, calculate stage duration
with open(filename, 'rb') as file_stream:
self.parse_job_log_stream(file_stream)
return True
def parse_job_log_stream(self, stream):
'''
Parse Travis CI job log stream.
'''
import re
for line in stream:
if 'travis_' in line:
print 'line : ' + line.replace('\x0d', '*').replace('\x1b', 'ESC')
# parse start fold tag
result = re.search(r'travis_fold:start:(\w+)\.(\d+)\x0d\x1b', line)
if result:
print
print 'start_tag : ' + result.group(1) + '.' + result.group(2)
# parse start time tag
result = re.search(r'travis_time:start:(.*)\x0d\x1b\[0K', line)
if result:
print
print 'start_hash : ' + result.group(1)
# parse command
result = re.search(r'\$\ (.*)', line)
if result:
print 'command : ' + result.group(1)
# parse end time tag
result = re.search(r'travis_time:end:(.*):start=(\d+),finish=(\d+),duration=(\d+)\x0d\x1b', line)
if result:
print 'end_hash : ' + result.group(1)
print 'start : ' + result.group(2)
print 'finish : ' + result.group(3)
print 'duration : ' + result.group(4)
# parse end fold tag
result = re.search(r'travis_fold:end:(\w+)\.(\d+)\x0d\x1b', line)
if result:
print 'end_tag : ' + result.group(1) + '.' + result.group(2)
def json_request(self, json_request):
'''
Retrieve Travis CI data using API.
'''
req = urllib2.Request(
self.api_url + json_request,
None,
{
# get version from Config class
'user-agent': 'buildtime-trend/0.2-dev',
'accept': 'application/vnd.travis-ci.2+json'
}
)
opener = urllib2.build_opener()
result = opener.open(req)
return json.load(result)
def get_started_at(self):
'''
Retrieve timestamp when build was started.
'''
if len(self.build_data) > 0:
return self.build_data['builds'][0]['started_at']
else:
return None
|
Python
| 0
|
@@ -881,16 +881,60 @@
eck_file
+%0Afrom buildtimetrend.settings import VERSION
%0A%0ATRAVIS
@@ -4856,56 +4856,8 @@
%7B%0A
- # get version from Config class%0A
@@ -4903,16 +4903,19 @@
end/
-0.2-dev'
+' + VERSION
,%0A
|
7124f56f3a9ac1185138d88c02648a3442c98606
|
Replace the inlined SDK path with apple_common call.
|
tools/build_defs/apple/swift.bzl
|
tools/build_defs/apple/swift.bzl
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylark rules for Swift."""
load("shared", "xcrun_action", "XCRUNWRAPPER_LABEL")
def _swift_target(cpu, sdk_version):
"""Returns a target triplet for Swift compiler."""
return "%s-apple-ios%s" % (cpu, sdk_version)
def _swift_library_impl(ctx):
"""Implementation for swift_library Skylark rule."""
cpu = ctx.fragments.apple.ios_cpu()
platform = ctx.fragments.apple.ios_cpu_platform()
sdk_version = ctx.fragments.apple.sdk_version_for_platform(platform)
target = _swift_target(cpu, sdk_version)
# Collect transitive dependecies.
dep_modules = []
dep_libs = []
for x in ctx.attr.deps:
swift_provider = x.swift
dep_libs.append(swift_provider.library)
dep_libs += swift_provider.transitive_libs
dep_modules.append(swift_provider.module)
dep_modules += swift_provider.transitive_modules
# TODO(b/28005753): Currently this is not really a library, but an object
# file, does not matter to the linker, but should be replaced with proper ar
# call.
output_lib = ctx.outputs.swift_lib
output_module = ctx.outputs.swift_module
srcs_args = [f.path for f in ctx.files.srcs]
# TODO(b/28005582): Instead of including a dir for each dependecy, output to
# a shared dir and include that?
include_dirs = set([x.dirname for x in dep_modules])
include_args = ["-I%s" % d for d in include_dirs]
args = [
"swift",
"-frontend",
"-emit-object",
"-emit-module-path", output_module.path,
"-module-name", ctx.label.name,
"-parse-as-library",
"-target", target,
# TODO(b/28049126): Replace this value with apple_toolchain call.
"-sdk", "__BAZEL_XCODE_SDKROOT__",
"-o", output_lib.path,
] + srcs_args + include_args
xcrun_action(ctx,
inputs = ctx.files.srcs + dep_modules + dep_libs,
outputs = (output_lib, output_module),
mnemonic = 'SwiftCompile',
arguments = args,
use_default_shell_env = False,
progress_message = ("Compiling Swift module %s (%d files)"
% (ctx.label.name, len(ctx.files.srcs))))
return struct(
swift=struct(
library=output_lib,
module=output_module,
transitive_libs=dep_libs,
transitive_modules=dep_modules),
objc_export=struct(
library=set([output_lib] + dep_libs),
)
)
swift_library = rule(
_swift_library_impl,
attrs = {
"srcs": attr.label_list(allow_files = FileType([".swift"])),
"deps": attr.label_list(providers=["swift"]),
"_xcrunwrapper": attr.label(
executable=True,
default=Label(XCRUNWRAPPER_LABEL))},
fragments = ["apple"],
outputs = {
"swift_lib": "%{name}.a",
"swift_module": "%{name}.swiftmodule",
},
)
"""
Builds a Swift module.
A module is a pair of static library (.a) + module header (.swiftmodule).
Dependant targets can import this module as "import RuleName".
Args:
srcs: Swift sources that comprise this module.
deps: Other Swift modules.
"""
|
Python
| 0.000011
|
@@ -2158,113 +2158,56 @@
-# TODO(b/28049126): Replace this value with apple_toolchain call.%0A %22-sdk%22, %22__BAZEL_XCODE_SDKROOT__%22
+%22-sdk%22, apple_common.apple_toolchain().sdk_dir()
,%0A
|
991320a060058fa1cd9e75d689c9ec99501bae08
|
Add exception and some comments
|
pygraphc/evaluation/DaviesBouldinIndex.py
|
pygraphc/evaluation/DaviesBouldinIndex.py
|
from __future__ import division
from pygraphc.similarity.CosineSimilarity import CosineSimilarity
from itertools import combinations, product
class DaviesBouldinIndex(object):
def __init__(self, clusters, preprocessed_logs, log_length):
self.clusters = clusters
self.preprocessed_logs = preprocessed_logs
self.log_length = log_length
self.cluster_centroids = {}
self.cluster_total_nodes = {}
self.total_cluster = 0
self.distance_buffer = {}
def __get_centroid(self, cluster=None):
centroid = ''
# centroid for a particular cluster
if cluster:
for log_id in cluster:
centroid += self.preprocessed_logs[log_id]
# centroid for the whole logs
else:
for log_id in self.preprocessed_logs:
centroid += self.preprocessed_logs[log_id]
return centroid
def __get_all_cluster_properties(self):
for cluster_id, log_ids in self.clusters.iteritems():
self.cluster_centroids[cluster_id] = self.__get_centroid(log_ids)
self.cluster_total_nodes[cluster_id] = len(log_ids)
self.total_cluster = len(self.clusters.keys())
def __get_distance(self, source, dest):
cs = CosineSimilarity()
distance = cs.get_cosine_similarity(source, dest)
self.distance_buffer[(source, dest)] = distance
return distance
def __check_distance(self, checked_pair):
if checked_pair in self.distance_buffer:
distance = self.distance_buffer[checked_pair]
else:
distance = None
return distance
def __get_dispersion(self):
cluster_dispersions = {}
for cluster_id, log_ids in self.clusters.iteritems():
distances = []
for log_id in log_ids:
distance = self.__check_distance((self.preprocessed_logs[log_id], self.cluster_centroids[cluster_id]))
if distance is None:
distance = self.__get_distance(self.preprocessed_logs[log_id], self.cluster_centroids[cluster_id])
distances.append(distance)
total_distance = sum(distances)
cluster_dispersions[cluster_id] = 1 / self.cluster_total_nodes[cluster_id] * total_distance
return cluster_dispersions
def __get_dissimilarity(self):
cluster_dissimilarity = {}
for cluster_id1, cluster_id2 in combinations(xrange(self.total_cluster), 2):
distance = self.__check_distance(())
if distance is None:
distance = self.__get_distance(self.cluster_centroids[cluster_id1], self.cluster_centroids[cluster_id2])
cluster_dissimilarity[(cluster_id1, cluster_id2)] = distance
return cluster_dissimilarity
def __get_similarity(self):
similarity = {}
cluster_dispersions = self.__get_dispersion()
cluster_dissimilarity = self.__get_dissimilarity()
for cluster_id1, cluster_id2 in combinations(xrange(self.total_cluster), 2):
similarity[(cluster_id1, cluster_id2)] = \
cluster_dispersions[cluster_id1] + cluster_dispersions[cluster_id2] / \
cluster_dissimilarity[(cluster_id1, cluster_id2)]
return similarity
def __get_r(self):
r = {}
similarity = self.__get_similarity()
similarity_keys = similarity.keys()
for cluster_id, log_ids in self.clusters.iteritems():
r_cluster = []
for cluster_id1, cluster_id2 in product(xrange(self.total_cluster), repeat=2):
if cluster_id == cluster_id1 and cluster_id1 != cluster_id2:
if (cluster_id1, cluster_id2) in similarity_keys:
r_cluster.append(similarity[(cluster_id1, cluster_id2)])
else:
r_cluster.append(similarity[(cluster_id2, cluster_id1)])
r[cluster_id] = max(r_cluster)
return r
def get_davies_bouldin(self):
self.__get_all_cluster_properties()
r = self.__get_r()
try:
db_index = 1 / self.total_cluster * sum(r.values())
except ZeroDivisionError:
db_index = 0.
return db_index
|
Python
| 0
|
@@ -680,34 +680,50 @@
centroid
-+
=
+ centroid + ' ' +
self.preprocess
@@ -869,10 +869,29 @@
oid
-+
=
+ ' '.join(%5Bcentroid,
sel
@@ -917,16 +917,18 @@
%5Blog_id%5D
+%5D)
%0A%0A
@@ -982,32 +982,65 @@
operties(self):%0A
+ # get cluster properties%0A
for clus
@@ -1323,24 +1323,68 @@
rce, dest):%0A
+ # get cosine similarity as distance%0A
cs =
@@ -1589,16 +1589,57 @@
_pair):%0A
+ # check distance is exist or not%0A
@@ -1829,32 +1829,93 @@
spersion(self):%0A
+ # get cluster dispersion (intra-cluster compactness)%0A
cluster_
@@ -2585,32 +2585,95 @@
milarity(self):%0A
+ # get cluster dissimilarity (inter-cluster separation)%0A
cluster_
@@ -2822,16 +2822,88 @@
stance((
+self.cluster_centroids%5Bcluster_id1%5D, self.cluster_centroids%5Bcluster_id2%5D
))%0A
@@ -3187,32 +3187,100 @@
milarity(self):%0A
+ # get similarity (ratio between compactness and separation)%0A
similari
@@ -3477,32 +3477,53 @@
l_cluster), 2):%0A
+ try:%0A
simi
@@ -3569,32 +3569,36 @@
+
+
cluster_dispersi
@@ -3661,32 +3661,36 @@
+
cluster_dissimil
@@ -3722,16 +3722,114 @@
er_id2)%5D
+%0A except ZeroDivisionError:%0A similarity%5B(cluster_id1, cluster_id2)%5D = 0.
%0A%0A
@@ -3864,32 +3864,48 @@
__get_r(self):%0A
+ # get R%0A
r = %7B%7D%0A
@@ -4569,16 +4569,16 @@
turn r%0A%0A
-
def
@@ -4595,32 +4595,67 @@
_bouldin(self):%0A
+ # get Davies-Bouldin index%0A
self.__g
|
88cc76833444b346dfe7ec614467d61ca82789ac
|
Revert "Revert "Improve logger/printer colours""
|
km3pipe/logger.py
|
km3pipe/logger.py
|
# Filename: logger.py
# pylint: disable=locally-disabled,C0103
"""
The logging facility.
"""
from hashlib import sha256
import socket
import logging
from .tools import colored, supports_color
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
loggers = {} # this holds all the registered loggers
# logging.basicConfig()
if supports_color():
logging.addLevelName(logging.INFO, "\033[1;32m%s\033[1;0m" %
logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;34m%s\033[1;0m" %
logging.getLevelName(logging.DEBUG))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" %
logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" %
logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.CRITICAL, "\033[1;101m%s\033[1;0m" %
logging.getLevelName(logging.CRITICAL))
class LogIO(object):
"""Read/write logging information.
"""
def __init__(self, node, stream,
url='pi2089.physik.uni-erlangen.de',
port=28777):
self.node = node
self.stream = stream
self.url = url
self.port = port
self.sock = None
self.connect()
def send(self, message, level='info'):
message_string = "+log|{0}|{1}|{2}|{3}\r\n" \
.format(self.stream, self.node, level, message)
try:
self.sock.send(message_string)
except socket.error:
print("Lost connection, reconnecting...")
self.connect()
self.sock.send(message_string)
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.url, self.port))
def get_logger(name):
"""Helper function to get a logger"""
if name in loggers:
return loggers[name]
logger = logging.getLogger(name)
logger.propagate = False
pre, suf = hash_coloured_escapes(name) if supports_color() else ('', '')
formatter = logging.Formatter('%(levelname)s->{}%(name)s:{} %(message)s'
.format(pre, suf))
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
loggers[name] = logger
return logger
def set_level(name, level):
"""Set the log level for given logger"""
get_logger(name).setLevel(level)
def get_printer(name, color=None, ansi_code=None, force_color=False):
"""Return a function which prints a message with a coloured name prefix"""
if force_color or supports_color():
if color is None and ansi_code is None:
name = hash_coloured(name)
else:
name = colored(name, color=color, ansi_code=ansi_code)
prefix = name + ': '
def printer(text):
print(prefix + str(text))
return printer
def hash_coloured(text):
"""Return a ANSI coloured text based on its hash"""
ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230
return colored(text, ansi_code=ansi_code)
def hash_coloured_escapes(text):
"""Return the ANSI hash colour prefix and suffix for a given text"""
ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230
prefix, suffix = colored('SPLIT', ansi_code=ansi_code).split('SPLIT')
return prefix, suffix
|
Python
| 0.000059
|
@@ -2217,21 +2217,23 @@
%0A pre
+1
, suf
+1
= hash_
@@ -2289,16 +2289,124 @@
'', '')%0A
+ pre2, suf2 = hash_coloured_escapes(name + 'salt') %5C%0A if supports_color() else ('', '')%0A
form
@@ -2449,12 +2449,18 @@
me)s
--%3E%7B%7D
+ %7B%7D%E2%97%8F%7B%7D%E2%97%8F%7B%7D
%25(na
@@ -2464,18 +2464,16 @@
(name)s:
-%7B%7D
%25(messa
@@ -2523,21 +2523,29 @@
rmat(pre
+1, pre2
, suf
+1
))%0A c
@@ -3042,34 +3042,187 @@
-name = hash_coloured(name)
+cpre_1, csuf_1 = hash_coloured_escapes(name)%0A cpre_2, csuf_2 = hash_coloured_escapes(name + 'salt')%0A name = cpre_1 + '%E2%97%8F' + cpre_2 + '%E2%97%8F' + csuf_1 + ' ' + name
%0A
|
722de274d3ee9866c7580a7f95e32de1777e6a3b
|
Add note
|
csscms/properties_scraper.py
|
csscms/properties_scraper.py
|
from pyquery import PyQuery as pq
"""A quick and dirty scraper for w3c's css properties list."""
def strip_all_prefixes(string):
bad_prefixes = [
'text-text-',
'pos-',
'font-font-',
'nav-',
'class-',
'gen-',
'tab-'
]
for prefix in bad_prefixes:
string = string.replace(prefix, '')
return string
def normalize_w3c_link(url):
url = strip_all_prefixes(url)
return '-'.join(url.replace(
'.asp', '').replace('css3_pr_', '').replace('pr_', '').split('_'))
def load_all_w3c_props(root_url, max_open=None):
table_class = '.reference.notranslate'
data = {}
urls = []
doc = pq(url=root_url)
links = pq(doc).find(table_class).find('a')
def _process(_, selector):
if selector is not None:
prop = pq(selector).find('td').eq(0).text().strip()
if len(prop) > 0:
return urls.append(prop)
else:
return ''
for k, link in enumerate(links):
if max_open is not None:
if k >= max_open:
break
url = pq(link).attr('href')
follow_doc = pq(url='{}/{}'.format(root_url, url))
pq(follow_doc).find(table_class).find('tr').each(_process)
# Normalize property from w3c's url structure
url = normalize_w3c_link(url)
# Push all current options
data[url] = {'dropdown': True, 'props': urls}
# Mutable container, empty it out for reuse
urls = []
return data
print(load_all_w3c_props('http://www.w3schools.com/cssref/'))
|
Python
| 0
|
@@ -32,16 +32,17 @@
pq%0A%0A%0A%22%22%22
+%0A
A quick
@@ -89,16 +89,140 @@
es list.
+%0ASee css_properties.py for the example output. This is meant to be run once, except when new properties%0Aneed to be scraped.%0A
%22%22%22%0A%0A%0Ade
|
e38dfc9bc39199a07ec509a6ba7d07a3bcc12a3d
|
Fix all Python DSL incompatibilities for running tests.
|
tools/build_rules/java_rules.bzl
|
tools/build_rules/java_rules.bzl
|
"""Module containing java macros."""
load("@bazel_skylib//lib:collections.bzl", "collections")
load("//tools/build_rules:module_rules_for_tests.bzl", "convert_module_deps_to_test")
def _add_immutables(deps_arg, **kwargs):
kwargs[deps_arg] = collections.uniq(kwargs.get(deps_arg, []) + [
'//src/com/facebook/buck/core/util/immutables:immutables',
'//third-party/java/errorprone:error-prone-annotations',
'//third-party/java/immutables:immutables',
'//third-party/java/guava:guava',
'//third-party/java/jsr:jsr305',
])
kwargs['plugins'] = collections.uniq(kwargs.get('plugins', []) + [
'//third-party/java/immutables:processor'
])
return kwargs
def java_immutables_library(name, **kwargs):
return native.java_library(
name=name,
**_add_immutables('deps', **kwargs))
def _shallow_dict_copy_without_key(table, key_to_omit):
"""Returns a shallow copy of dict with key_to_omit omitted."""
return {key: table[key] for key in table if key != key_to_omit}
def java_test(
name,
vm_args=None,
labels=None,
run_test_separately=False,
has_immutable_types=False,
module_deps=[],
# deps, provided_deps and plugins are handled in kwargs so that immutables can be handled there
**kwargs
):
"""java_test wrapper that provides sensible defaults for buck tests.
Args:
name: name
vm_args: vm_args
labels: labels
run_test_separately: run_test_separately
has_immutable_types: has_immutable_types
module_deps: A list of modules this test depends on
**kwargs: kwargs
"""
extra_labels = ['run_as_bundle']
if run_test_separately:
extra_labels.append('serialize')
if has_immutable_types:
kwargs = _add_immutables('deps', **kwargs)
if 'deps' in kwargs:
deps = kwargs['deps']
kwargs = _shallow_dict_copy_without_key(kwargs, 'deps')
else:
deps = []
if 'env' in kwargs:
env = kwargs['env']
kwargs = _shallow_dict_copy_without_key(kwargs, 'env')
else:
env = {}
if '//src/com/facebook/buck/step/external:external' in deps:
env['EXTERNAL_STEP_RUNNER_JAR_FOR_BUCK_TEST'] = (
'$(location //src/com/facebook/buck/step/external:executor)')
native.java_test(
name=name,
deps=deps + [
# When actually running Buck, the launcher script loads the bootstrapper,
# and the bootstrapper loads the rest of Buck. For unit tests, which don't
# run Buck, we have to add a direct dependency on the bootstrapper in case
# they exercise code that uses it.
'//src/com/facebook/buck/cli/bootstrapper:bootstrapper_lib',
] + convert_module_deps_to_test(module_deps),
vm_args=[
# Add -XX:-UseSplitVerifier by default to work around:
# http://arihantwin.blogspot.com/2012/08/getting-error-illegal-local-variable.html
'-XX:-UseSplitVerifier',
# Don't use the system-installed JNA; extract it from the local jar.
'-Djna.nosys=true',
# Add -Dsun.zip.disableMemoryMapping=true to work around a JDK issue
# related to modifying JAR/ZIP files that have been loaded into memory:
#
# http://bugs.sun.com/view_bug.do?bug_id=7129299
#
# This has been observed to cause a problem in integration tests such as
# CachedTestIntegrationTest where `buck build //:test` is run repeatedly
# such that a corresponding `test.jar` file is overwritten several times.
# The CompiledClassFileFinder in JavaTestRule creates a java.util.zip.ZipFile
# to enumerate the zip entries in order to find the set of .class files
# in `test.jar`. This interleaving of reads and writes appears to match
# the conditions to trigger the issue reported on bugs.sun.com.
#
# Currently, we do not set this flag in bin/buck_common, as Buck does not
# normally modify the contents of buck-out after they are loaded into
# memory. However, we may need to use this flag when running buckd where
# references to zip files may be long-lived.
#
# Finally, note that when you specify this flag,
# `System.getProperty("sun.zip.disableMemoryMapping")` will return `null`
# even though you have specified the flag correctly. Apparently sun.misc.VM
# (http://www.docjar.com/html/api/sun/misc/VM.java.html) saves the property
# internally, but removes it from the set of system properties that are
# publicly accessible.
'-Dsun.zip.disableMemoryMapping=true',
] + (vm_args or []),
env=env,
run_test_separately=run_test_separately,
labels = (labels or []) + extra_labels,
**kwargs
)
def standard_java_test(
name,
run_test_separately = False,
vm_args = None,
fork_mode = 'none',
labels = None,
with_test_data = False,
**kwargs
):
if vm_args == None:
vm_args = ['-Xmx256M']
test_srcs = native.glob(["*Test.java"])
if len(test_srcs) > 0:
java_test(
name = name,
srcs = test_srcs,
resources = native.glob(['testdata/**'], exclude_directories=True) if with_test_data else [],
vm_args = vm_args,
run_test_separately = run_test_separately,
fork_mode = fork_mode,
labels = labels or [],
**kwargs
)
def _add_pf4j_plugin_framework(**kwargs):
kwargs["provided_deps"] = collections.uniq(kwargs.get("provided_deps", []) + [
"//third-party/java/pf4j:pf4j",
])
kwargs["plugins"] = collections.uniq(kwargs.get("plugins", []) + [
"//third-party/java/pf4j:processor",
])
kwargs["annotation_processor_params"] = collections.uniq(kwargs.get("annotation_processor_params", []) + [
"pf4j.storageClassName=org.pf4j.processor.ServiceProviderExtensionStorage",
])
return kwargs
def _add_buck_modules_annotation_processor(**kwargs):
kwargs["plugins"] = list(collections.uniq(kwargs.get("plugins", []) + [
"//src/com/facebook/buck/module/annotationprocessor:annotationprocessor",
]))
return kwargs
def java_library_with_plugins(name, **kwargs):
kwargs_with_immutables = _add_immutables("provided_deps", **kwargs)
kawgs_with_plugins = _add_pf4j_plugin_framework(**kwargs_with_immutables)
kawgs_with_buck_modules_annotation_processor = _add_buck_modules_annotation_processor(**kawgs_with_plugins)
return native.java_library(
name=name,
**kawgs_with_buck_modules_annotation_processor
)
|
Python
| 0.000007
|
@@ -5068,34 +5068,8 @@
**'%5D
-, exclude_directories=True
) if
|
4f9b97766f33dca8d83e528ab7a2953b9168b92c
|
Add preprocess logs and check number of processors
|
pygraphc/preprocess/ParallelPreprocess.py
|
pygraphc/preprocess/ParallelPreprocess.py
|
from re import sub
from nltk import corpus
import multiprocessing
class ParallelPreprocess(object):
def __init__(self, log_file, count_groups=None):
self.log_file = log_file
self.logs = []
self.log_length = 0
self.unique_events = []
self.unique_events_length = 0
self.event_attributes = {}
self.count_groups = count_groups
def __call__(self, line):
return self.__get_events(line)
def __read_log(self):
"""Read a log file.
"""
with open(self.log_file, 'rb') as f:
self.logs = f.readlines()
self.log_length = len(self.logs)
@staticmethod
def __get_events(logs_with_id):
log_index, line = logs_with_id
line = line.replace('.', '')
line = sub('[^a-zA-Z]', ' ', line)
line = line.replace('_', ' ')
# remove word with length only 1 character
line_split = line.split()
for index, word in enumerate(line_split):
if len(word) == 1:
line_split[index] = ''
# remove more than one space
line = ' '.join(line_split)
line = ' '.join(line.split())
# remove stopwords
stopwords = corpus.stopwords.words('english')
stopwords_result = [w.lower() for w in line.split() if w.lower() not in stopwords]
preprocessed_events = ' '.join(stopwords_result)
preprocessed_with_id = (log_index, preprocessed_events)
return preprocessed_with_id
def get_unique_events(self):
self.__read_log()
logs_with_id = []
for index, log in enumerate(self.logs):
logs_with_id.append((index, log))
pool = multiprocessing.Pool(processes=4)
events = pool.map(self, logs_with_id)
pool.close()
pool.join()
# get graph event_attributes
unique_events_only = {}
unique_event_id = 0
unique_events_list = []
for log_id, event in events:
event_split = event.split()
if event not in unique_events_only.values():
unique_events_only[unique_event_id] = event
self.event_attributes[unique_event_id] = {'preprocessed_event': event_split,
'cluster': unique_event_id,
'member': [log_id]}
unique_event_id += 1
unique_events_list.append(event_split)
else:
for index, attr in self.event_attributes.iteritems():
if event_split == attr['preprocessed_event']:
attr['member'].append(log_id)
# transpose unique events list
unique_events_transpose = map(list, zip(*unique_events_list))
# check if each transposed list has the same elements
true_status = []
for index, transposed in enumerate(unique_events_transpose):
status = all(item == transposed[0] for item in transposed)
if status:
true_status.append(index)
# remove repetitive words
for index, attr in self.event_attributes.iteritems():
attr['preprocessed_event'] = [y for x, y in enumerate(attr['preprocessed_event']) if x not in true_status]
attr['preprocessed_event'] = ' '.join(attr['preprocessed_event'])
# get unique events for networkx
self.unique_events_length = unique_event_id
for index, attr in self.event_attributes.iteritems():
self.unique_events.append((index, attr))
return self.unique_events
def get_unique_events_nopreprocess(self):
unique_event_id = 0
for event_id, words_split in self.count_groups.iteritems():
attr = {'preprocessed_event': ' '.join(list(words_split)),
'cluster': event_id,
'original_id': event_id}
self.unique_events.append((unique_event_id, attr))
self.event_attributes[unique_event_id] = attr
unique_event_id += 1
self.unique_events_length = len(self.unique_events)
return self.unique_events
|
Python
| 0
|
@@ -224,32 +224,66 @@
.log_length = 0%0A
+ self.preprocess_logs = %7B%7D%0A
self.uni
@@ -294,24 +294,24 @@
events = %5B%5D%0A
-
self
@@ -1560,32 +1560,52 @@
e_events(self):%0A
+ # read logs%0A
self.__r
@@ -1731,24 +1731,112 @@
dex, log))%0A%0A
+ # run preprocessing in parallel%0A total_cpu = multiprocessing.cpu_count()%0A
pool
@@ -1873,9 +1873,17 @@
ses=
-4
+total_cpu
)%0A
@@ -2794,16 +2794,16 @@
vent'%5D:%0A
-
@@ -2849,16 +2849,116 @@
og_id)%0A%0A
+ # get preprocessed logs as dictionary%0A self.preprocess_logs%5Blog_id%5D = event%0A%0A
|
d13204abb2cf5d341eff78416dd442c303042697
|
Modify add_occupant method to raise exception in case of a duplicate
|
classes/room.py
|
classes/room.py
|
class Room(object):
def __init__(self, room_name, room_type, max_persons):
self.room_name = room_name
self.room_type = room_type
self.max_persons = max_persons
self.persons = []
def add_occupant(self, person):
if len(self.persons) < self.max_persons:
self.persons.append(person)
print (person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " has been allocated " + self.room_type + " " + self.room_name.title())
else:
raise Exception(self.room_type.title() + " " + self.room_name.title() + " is at full capacity")
|
Python
| 0
|
@@ -244,16 +244,59 @@
erson):%0A
+ if person not in self.persons:%0A
@@ -340,32 +340,36 @@
ns:%0A
+
self.persons.app
@@ -380,16 +380,20 @@
person)%0A
+
@@ -577,16 +577,20 @@
itle())%0A
+
@@ -607,16 +607,20 @@
+
raise Ex
@@ -707,8 +707,239 @@
acity%22)%0A
+ else:%0A raise Exception(person.person_type.title() + %22 %22 + person.person_name.title() + %22 %22 + person.person_surname.title() + %22 is already among the occupants in %22 + self.room_type + %22 %22 + self.room_name.title())%0A
|
372eb236eb85a74f85ce489270b24cc789b3fb0e
|
Add docstrings to parser
|
npc/parser.py
|
npc/parser.py
|
#!/usr/bin/env python3.5
import re
import itertools
from os import path, walk
from collections import defaultdict
def get_characters(search_paths = ['.'], ignore_paths = []):
return itertools.chain.from_iterable((_parse_path(path, ignore_paths) for path in search_paths))
def _parse_path(start_path, ignore_paths = [], include_bare = False):
"""Parse all the character files in a directory
Set include_bare to True to scan files without an extension in addition to
.nwod files.
"""
if path.isfile(start_path):
return [_parse_character(start_path)]
characters = []
for dirpath, _, files in _walk_ignore(start_path, ignore_paths):
for name in files:
target_path = path.join(dirpath, name)
if target_path in ignore_paths:
continue
base, ext = path.splitext(name)
if ext == '.nwod' or (include_bare and not ext):
data = _parse_character(target_path)
characters.append(data)
return characters
def _walk_ignore(root, ignore):
def included(d):
return (path.join(dirpath, d) not in ignore) and (dirpath not in ignore)
for dirpath, dirnames, filenames in walk(root, followlinks=True):
dirnames[:] = [d for d in dirnames if included(d)]
yield dirpath, dirnames, filenames
def _parse_character(char_file_path):
"""Parse a single character file"""
name_re = re.compile('(?P<name>\w+(\s\w+)*)(?: - )?.*')
section_re = re.compile('^--.+--\s*$')
tag_re = re.compile('^@(?P<tag>\w+)\s+(?P<value>.*)$')
# Group-like tags. These all accept an accompanying `rank` tag.
group_tags = ['group', 'court', 'motley']
# derive character name from basename
basename = path.basename(char_file_path)
match = name_re.match(path.splitext(basename)[0])
name = match.group('name')
# rank uses a dict keyed by group name instead of an array
# description is always a plain string
char_properties = defaultdict(list)
char_properties.update({'name': [name], 'description': '', 'rank': defaultdict(list)})
with open(char_file_path, 'r') as char_file:
last_group = ''
previous_line_empty = False
for line in char_file:
# stop processing once we see game stats
if section_re.match(line):
break
match = tag_re.match(line)
if match:
tag = match.group('tag')
value = match.group('value')
if tag == 'changeling':
bits = value.split(maxsplit=1)
char_properties['type'].append('Changeling')
if len(bits):
char_properties['seeming'].append(bits[0])
if len(bits) > 1:
char_properties['kith'].append(bits[1])
continue
if tag == 'realname':
char_properties['name'][0] = value
continue
if tag in group_tags:
last_group = value
if tag == 'rank':
if last_group:
char_properties['rank'][last_group].append(value)
continue
else:
if line == "\n":
if not previous_line_empty:
previous_line_empty = True
else:
continue
else:
previous_line_empty = False
char_properties['description'] += line
continue
char_properties[tag].append(value)
char_properties['description'] = char_properties['description'].strip()
char_properties['path'] = char_file_path
return char_properties
|
Python
| 0.000002
|
@@ -170,16 +170,287 @@
= %5B%5D):%0A
+ %22%22%22%0A Get data from character files%0A%0A Args:%0A search_paths (list): Paths to search for character files%0A ignore_paths (list): Paths to exclude from the search%0A%0A Returns:%0A List of dictionaries containing parsed character information%0A %22%22%22%0A
retu
@@ -616,24 +616,29 @@
se):%0A %22%22%22
+%0A
Parse all th
@@ -655,18 +655,21 @@
r files
-in
+under
a direc
@@ -682,40 +682,158 @@
-Set include_bare to True to scan
+Args:%0A start_path (str): Path to search%0A ignore_paths (list): Pathsh to exclude%0A include_bare (bool): Whether to attempt to parse
fil
@@ -845,16 +845,28 @@
thout an
+%0A
extensi
@@ -882,20 +882,16 @@
ition to
-%0A
.nwod f
@@ -895,16 +895,92 @@
d files.
+%0A%0A Returns:%0A List of dictionaries containing parsed character data
%0A %22%22%22
@@ -1556,24 +1556,595 @@
-def included(d):
+%22%22%22%0A Recursively traverse a directory tree while ignoring certain paths.%0A%0A Args:%0A root (str): Directory to start at%0A ignore (list): Paths to skip over%0A%0A Yields:%0A A tuple (path, %5Bdirs%5D, %5Bfiles%5D) as from %60os.walk%60.%0A %22%22%22%0A def included(d):%0A %22%22%22%0A Determine whether a path should be searched%0A%0A Only skips this path if it, or its parent, is explicitly in the %60ignore%60%0A list.%0A%0A Args:%0A d (str): The path to check%0A%0A Returns:%0A True if d should be searched, false if it should be ignored%0A %22%22%22
%0A
@@ -2440,16 +2440,21 @@
%0A %22%22%22
+%0A
Parse a
@@ -2474,16 +2474,149 @@
ter file
+%0A%0A Args:%0A char_file_path (str): Path to the character file to parse%0A%0A Returns:%0A Dictionary of character data%0A
%22%22%22%0A
|
90d3f00cd8fea8fab9274069ac06ea461f8e4dfd
|
Send only pics and gifs to OOO_B_R.
|
channels/ooo_b_r/app.py
|
channels/ooo_b_r/app.py
|
#encoding:utf-8
from utils import get_url, weighted_random_subreddit
# Group chat https://yal.sh/dvdahoy
t_channel = '-1001065558871'
subreddit = weighted_random_subreddit({
'ANormalDayInRussia': 1.0,
'ANormalDayInAmerica': 0.1,
'ANormalDayInJapan': 0.01
})
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
if what == 'text':
return False
elif what == 'other':
return False
elif what == 'album':
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
Python
| 0
|
@@ -447,210 +447,8 @@
k)%0A%0A
- if what == 'text':%0A return False%0A elif what == 'other':%0A return False%0A elif what == 'album':%0A r2t.send_album(url)%0A return True%0A elif what in ('gif', 'img'):%0A
@@ -497,35 +497,4 @@
xt)%0A
- else:%0A return False%0A
|
35d8defa245d9ad1a038b297e9cfe711b65beb3c
|
fix sorting function
|
monolithe/specifications/specification.py
|
monolithe/specifications/specification.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from monolithe.lib import SDKUtils
from .specification_api import SpecificationAPI
from .specification_attribute import SpecificationAttribute
class Specification(object):
""" Defines a specification object
"""
def __init__(self, filename, monolithe_config=None, data=None):
""" Initializes a model object
Example:
name: EnterpriseNetwork
instance_name: enterprise_network
entity_name_plural: EnterpriseNetworks
instance_name_plural: enterprise_networks
rest_name: enterprisenetwork
resource_name: enterprisenetworks
package: network
"""
self.monolithe_config = monolithe_config
self.filename = filename
self.allows_create = False
self.allows_delete = False
self.allows_get = False
self.allows_update = False
self.description = None
self.entity_name_plural = None # the original name in plural
self.extends = []
self.instance_name = None # Name of the object as an instance
self.instance_name_plural = None # Name of the object as an instance of array or fetcher
self.is_root = False
self.package = None
self.resource_name = None # The name of the resource used in URI
self.rest_name = None # The remote name of the object
self._entity_name = None # The original name of the object
self.attributes = []
self.child_apis = []
self.parent_apis = []
if data:
self.from_dict(data=data)
@property
def entity_name(self):
"""
"""
return self._entity_name
@entity_name.setter
def entity_name(self, value):
"""
"""
self._entity_name = value
language = self.monolithe_config.language if self.monolithe_config else 'python'
if value:
self.instance_name = SDKUtils.get_name_in_language(name=value, language=language)
self.entity_name_plural = SDKUtils.get_entity_name_plural(singular_name=value)
self.instance_name_plural = SDKUtils.get_name_in_language(name=self.entity_name_plural, language=language)
def to_dict(self):
""" Transform the current specification to a dictionary
"""
data = {"model": {}}
data["model"]["description"] = self.description
data["model"]["entity_name"] = self.entity_name
data["model"]["package"] = self.package
data["model"]["resource_name"] = self.resource_name
data["model"]["rest_name"] = self.rest_name
data["model"]["extends"] = self.extends
data["model"]["get"] = self.allows_get
data["model"]["update"] = self.allows_update
data["model"]["create"] = self.allows_create
data["model"]["delete"] = self.allows_delete
data["model"]["root"] = self.is_root
data["attributes"] = []
for attribute in self.attributes:
data["attributes"].append(attribute.to_dict())
data["children"] = []
for api in self.child_apis:
data["children"].append(api.to_dict())
return data
def from_dict(self, data):
""" Fill the current object with information from the specification
"""
if "model" in data:
model = data["model"]
self.description = model["description"] if "description" in model else None
self.package = model["package"] if "package" in model else None
self.extends = model["extends"] if "extends" in model else []
self.entity_name = model["entity_name"] if "entity_name" in model else None
self.rest_name = model["rest_name"] if "rest_name" in model else None
self.resource_name = model["resource_name"] if "resource_name" in model else None
self.allows_get = model["get"] if "get" in model else False
self.allows_create = model["create"] if "create" in model else False
self.allows_update = model["update"] if "update" in model else False
self.allows_delete = model["delete"] if "delete" in model else False
self.is_root = model["root"] if "root" in model else False
if "attributes" in data:
self.attributes = self._get_attributes(data["attributes"])
if "children" in data:
self.child_apis = self._get_apis(data["children"])
def _get_apis(self, apis):
""" Process apis for the given model
Args:
model: the model processed
apis: the list of apis availble for the current model
relations: dict containing all relations between resources
"""
ret = []
for data in apis:
ret.append(SpecificationAPI(specification=self, data=data))
return sorted(ret, key=lambda x: getattr(x, "specification"))
def _get_attributes(self, attributes):
"""
"""
ret = []
for data in attributes:
ret.append(SpecificationAttribute(specification=self, data=data))
return sorted(ret, key=lambda x: getattr(x, "name"))
|
Python
| 0.000074
|
@@ -6522,36 +6522,32 @@
sorted(ret,
-key=
lambda x
: getattr(x,
@@ -6538,36 +6538,41 @@
da x
-: getattr(x, %22specification%22
+, y: cmp(x.rest_name, y.rest_name
))%0A%0A
@@ -6799,20 +6799,16 @@
et,
-key=
lambda x
: ge
@@ -6807,26 +6807,30 @@
da x
-: getattr(x, %22
+, y: cmp(x.name, y.
name
-%22
))%0A
|
e309ed0a2f1f991e4015fcede373dccfe3843d97
|
Change version tag.
|
core/info/info.py
|
core/info/info.py
|
# -*- coding: utf-8 -*-
"""Informations.
+ Pyslvs version.
+ Module versions.
+ Help descriptions.
+ Check for update function.
"""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2018"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from sys import version_info
import platform
import argparse
import requests
from core.QtModules import (
QProgressDialog,
qVersion,
PYQT_VERSION_STR
)
Qt_Version = qVersion().strip()
PyQt_Version = PYQT_VERSION_STR.strip()
VERSION = (18, 3, 0, 'dev')
INFO = (
"Pyslvs {}.{}.{}({})".format(*VERSION),
"OS Type: {} {} [{}]".format(platform.system(), platform.release(), platform.machine()),
"Python Version: {v.major}.{v.minor}.{v.micro}({v.releaselevel})".format(v=version_info),
"Python Compiler: {}".format(platform.python_compiler()),
"Qt Version: {}".format(Qt_Version),
"PyQt Version: {}".format(PyQt_Version)
)
POWERBY = (
"Python IDE Eric 6",
"PyQt 5",
"dxfwrite",
"Cython",
"PyZMQ",
"openpyxl",
"psutil",
"peewee",
"Lark-parser",
"NetworkX",
"Pydot"
)
"""--help arguments"""
parser = argparse.ArgumentParser(
description="Pyslvs - Open Source Planar Linkage Mechanism Simulation and Mechanical Synthesis System. ",
epilog="Power by {}.".format(", ".join(POWERBY))
)
parser.add_argument('-v', '--version', action='version', help="show version infomations and exit", version=INFO[0])
parser.add_argument('r', metavar='FILE PATH', default=False, nargs='?', type=str, help="read workbook from the file path")
parser.add_argument('-i', metavar='START PATH', default=False, nargs='?', type=str, help="start Pyslvs in the specified path")
parser.add_argument('-w', action='store_true', help="show rebuild warning of canvas")
parser.add_argument('-f', '--fusion', action='store_true', help="run Pyslvs in Fusion style")
parser.add_argument('--full-screen', action='store_true', help="start Pyslvs with full-screen mode")
parser.add_argument('--server', metavar='PORT', default=False, nargs='?', type=str, help="start ZMQ server")
parser.add_argument('-d', '--debug-mode', action='store_true', help="do not connect to GUI console when opening")
parser.add_argument('-t', '--test', action='store_true', help="startup the program to test imported modules")
ARGUMENTS = parser.parse_args()
def check_update(progdlg: QProgressDialog) -> [str, bool]:
"""Check for update."""
m = progdlg.maximum()
from core.QtModules import QCoreApplication
for i in range(m):
QCoreApplication.processEvents()
if progdlg.wasCanceled():
return
next = list(VERSION[:m])
next[i] += 1
url = "https://github.com/KmolYuan/Pyslvs-PyQt5/releases/tag/v{}.{:02}.{}".format(*next)
request = requests.get(url)
progdlg.setValue(i + 1)
if request.status_code == 200:
progdlg.setValue(m)
return url
return False
|
Python
| 0
|
@@ -512,11 +512,15 @@
0, '
-dev
+release
')%0A%0A
|
35d2a174d671e29e08ad512f9bee08e150d39984
|
Save original, then parse amounts.
|
db/db.py
|
db/db.py
|
#!/usr/bin/python
import sys
import copy
import json
import getpass
import aesjsonfile
sys.path.append("../")
import config
def parse_amount(amount):
if type(amount) == int:
return amount
if "." not in amount:
amount += ".00"
return int(amount.replace("$","").replace(",","").replace(".",""))
class DB(object):
def __init__(self, username, password):
self.username = username
self.password = password
self.db = aesjsonfile.load("%s/%s.json"%(config.dbdir, self.username), self.password)
self.db.setdefault("transactions",[])
self.db.setdefault("balances",{})
self.db.setdefault("accounts",[])
def save(self):
aesjsonfile.dump("%s/%s.json"%(config.dbdir, self.username), self.db, self.password)
def accountstodo(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
trans = self.search({"account":acct["name"]},limit=5)
acct["seenids"] = [x["id"] for x in trans]
if trans:
acct["lastcheck"] = trans[0]["date"]
return ret
def accounts(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
acct.pop("password",None)
acct["subaccounts"] = []
for sub in self.db["balances"].get(acct["name"],{}):
acct["subaccounts"].append({"name": sub, "amount": self.db["balances"][acct["name"]][sub][0]["amount"],
"date": self.db["balances"][acct["name"]][sub][0]["lastdate"]})
return ret
def search(self, query={}, startdate="0", enddate = "9999", limit=100):
ret = []
for trans in self.db["transactions"]:
if trans["date"] < startdate or trans["date"] > enddate:
continue
if type(query) in [ str, unicode ]:
if query not in json.dumps(trans.values()):
continue
elif query and type(query) == dict:
for k in query:
if not trans.get(k) or query[k] not in trans[k]:
continue
ret.append(trans)
if len(trans) >= limit:
break
return ret
def getallids(self):
return [x["id"] for x in self.db["transactions"]]
def newtransactions(self, data):
for trans in data.get("transactions",[]):
if trans["id"] not in self.getallids():
for k,v in trans.iteritems():
trans["orig_"+k] = v
self.db["transactions"].append(trans)
self.db["transactions"].sort(cmp=lambda x,y: cmp(x["date"],y["date"]) or cmp(x["id"],y["id"]), reverse=True)
for bal in data.get("balances",[]):
amount = parse_amount(bal["balance"])
oldbal = self.db["balances"].setdefault(bal["account"],{}).setdefault(bal["subaccount"],[])
if oldbal and oldbal[0]["amount"] == amount:
oldbal[0]["lastdate"] = bal["date"]
else:
oldbal.insert(0, {"amount": amount, "firstdate": bal["date"], "lastdate": bal["date"]})
self.save()
return True
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(1)
password = getpass.getpass()
db = DB(sys.argv[1],password)
print "accountstodo"
print json.dumps(db.accountstodo(), indent=2)
print "accounts"
print json.dumps(db.accounts(), indent=2)
print json.dumps(db.search(limit=10), indent=2)
|
Python
| 0
|
@@ -2559,16 +2559,139 @@
+k%5D = v%0A
+ trans%5B%22orig_amount_str%22%5D = trans%5B%22amount%22%5D%0A trans%5B%22amount%22%5D = parse_amount(trans%5B%22amount%22%5D)%0A
|
d966b0973da71f5c883697ddd12c2728b2a04cce
|
Improve git tag to version conversion
|
ci/cleanup-binary-tags.py
|
ci/cleanup-binary-tags.py
|
#!/usr/bin/env python3
import os
import subprocess
import re
import semver
def tag_to_version(tag):
version = re.sub(r'binary-', '', tag)
version = re.sub(r'-[x86|i686].*', '', version)
return version
subprocess.check_call('git pull --tags', shell=True)
tags = subprocess.check_output(
'git tag --list | grep binary', shell=True).decode('UTF-8').splitlines()
versions = sorted(list(set([tag_to_version(tag) for tag in tags])),
key=semver.parse_version_info)
versions_to_delete = versions[:-3]
cmd_delete_local = 'git tag --delete'
cmd_delete_remote = 'git push --delete '
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
if GITHUB_TOKEN:
cmd_delete_remote += (
'https://{}@github.com/autozimu/LanguageClient-neovim.git'
.format(GITHUB_TOKEN))
else:
cmd_delete_remote += 'origin'
for tag in tags:
if tag_to_version(tag) in versions_to_delete:
cmd_delete_local += ' ' + tag
cmd_delete_remote += ' ' + tag
if not cmd_delete_local.endswith('delete'):
subprocess.check_call(cmd_delete_local, shell=True)
if not (cmd_delete_remote.endswith('origin') or
cmd_delete_remote.endswith('.git')):
subprocess.check_call(cmd_delete_remote, shell=True)
|
Python
| 0.000001
|
@@ -104,116 +104,44 @@
-version = re.sub(r'binary-', '', tag)%0A version = re.sub(r'-%5Bx86%7Ci686%5D.*', '', version)%0A return version
+return tag.split('-')%5B1%5D.lstrip('v')
%0A%0A%0As
|
634c126a3a775419ae35365a3ea8ce6e1f3e4156
|
Update acg_gamer_com_tw_acgDetail.py
|
my-ACG/util/acg_gamer_com_tw_acgDetail.py
|
my-ACG/util/acg_gamer_com_tw_acgDetail.py
|
import argparse
import logging
import re
import requests
import pywikibot
from bs4 import BeautifulSoup
class AcgGamerComTwAcgDetail:
RATING_IMG = {
'ALL': 0,
'6TO12': 6,
'12TO18': 12,
'15TO18': 15,
'18UP': 18,
}
RATING_ITEM = {
0: 'Q46',
6: 'Q47',
12: 'Q48',
15: 'Q49',
18: 'Q50',
}
def getData(self, url):
text = requests.get(url).text
soup = BeautifulSoup(text, 'html.parser')
data = {}
box1listA = soup.find('ul', {'class': 'ACG-box1listA'})
episodes = re.search(r'播出集數:(\d+)', box1listA.text)
if episodes:
data['episodes'] = int(episodes.group(1))
year = re.search(r'當地(?:首播|發售):(\d{4})-(\d{2})-(\d{2})', box1listA.text)
if year:
data['year'] = year.group(1) + year.group(2) + year.group(3)
box1mark = soup.find('p', {'id': 'ACG-box1mark'})
for img in box1mark.findAll('img'):
m = re.search(r'TW-(.+?)\.gif', img.get('src'))
if m:
data['rating'] = self.RATING_IMG[m.group(1)]
break
return data
def _get_wbtime(self, year):
if len(year) == 4:
return pywikibot.WbTime(year=int(year), calendarmodel='http://www.wikidata.org/entity/Q1985727')
if len(year) == 6:
return pywikibot.WbTime(year=int(year[0:4]), month=int(year[4:6]), calendarmodel='http://www.wikidata.org/entity/Q1985727')
if len(year) == 8:
return pywikibot.WbTime(year=int(year[0:4]), month=int(year[4:6]), day=int(year[6:8]), calendarmodel='http://www.wikidata.org/entity/Q1985727')
return None
def updateItem(self, datasite, item):
itemlabel = item.get()['labels']['zh-tw']
logging.info('%s %s', item.id, itemlabel)
claims = item.get()['claims']
if 'P1' not in claims:
logging.error('\t No acg gamer claims')
return
url = claims['P1'][0].getTarget()
data = self.getData(url)
# 台灣分級
if 'rating' in data:
rating_exists = False
if 'P23' in claims:
for claim in claims['P23']:
if claim.getTarget().id == self.RATING_ITEM[data['rating']]:
rating_exists = True
if len(claim.sources) == 0:
rating_source = pywikibot.page.Claim(datasite, 'P1')
rating_source.setTarget(url)
logging.info('\t Add source to rating')
claim.addSource(rating_source)
if not rating_exists:
new_claim = pywikibot.page.Claim(datasite, 'P23')
new_claim.setTarget(pywikibot.ItemPage(datasite, self.RATING_ITEM[data['rating']]))
rating_source = pywikibot.page.Claim(datasite, 'P1')
rating_source.setTarget(url)
new_claim.addSource(rating_source)
logging.info('\t Add new rating %s', data['rating'])
item.addClaim(new_claim, summary='新增台灣分級')
# 年份
if 'year' in data:
if 'P29' in claims:
if claims['P29'][0].getTarget().precision < 11:
wbtime = self._get_wbtime(data['year'])
if wbtime:
logging.info('\t Update year to %s', data['year'])
claims['P29'][0].changeTarget(wbtime, summary='更新年份')
else:
wbtime = self._get_wbtime(data['year'])
if wbtime:
new_claim = pywikibot.page.Claim(datasite, 'P29')
new_claim.setTarget(wbtime)
logging.info('\t Add new year %s', data['year'])
item.addClaim(new_claim, summary='新增年份')
# 總集數
if 'episodes' in data:
# 已看集數
if 'P28' not in claims:
new_claim = pywikibot.page.Claim(datasite, 'P28')
new_claim.setTarget(pywikibot.WbQuantity(0, site=datasite))
logging.info('\t Add seen episodes')
item.addClaim(new_claim, summary='新增已看集數')
new_episodes = data['episodes']
if 'P27' in claims:
episodesValue = claims['P27'][0].getTarget()
old_episodes = episodesValue.amount
if new_episodes > old_episodes:
episodesValue.amount = new_episodes
logging.info('\t Update episodes from %s to %s', old_episodes, new_episodes)
claims['P27'][0].changeTarget(episodesValue, summary='更新總集數')
else:
new_claim = pywikibot.page.Claim(datasite, 'P27')
new_claim.setTarget(pywikibot.WbQuantity(new_episodes, site=datasite))
logging.info('\t Add new episodes %s', new_episodes)
item.addClaim(new_claim, summary='新增總集數')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('url')
args = parser.parse_args()
print(AcgGamerComTwAcgDetail().getData(args.url))
|
Python
| 0.000001
|
@@ -46,34 +46,33 @@
ort
-requests%0A%0Aimport pywikibot
+pywikibot%0Aimport requests
%0Afro
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.