commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
255202553c4fe336cfc3e2526806ae41cec2556c
|
Set default timezone on windows every time
|
module/web/settings.py
|
module/web/settings.py
|
# -*- coding: utf-8 -*-
# Django settings for pyload project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os.path
import sys
import django
SERVER_VERSION = "0.3.2"
PROJECT_DIR = os.path.dirname(__file__)
#chdir(dirname(abspath(__file__)) + sep)
PYLOAD_DIR = os.path.join(PROJECT_DIR,"..","..")
sys.path.append(PYLOAD_DIR)
sys.path.append(os.path.join(PYLOAD_DIR, "module"))
import InitHomeDir
sys.path.append(pypath)
from module.ConfigParser import ConfigParser
config = ConfigParser()
#DEBUG = config.get("general","debug")
try:
import module.web.ServerThread
if not module.web.ServerThread.core:
raise Exception
PYLOAD = module.web.ServerThread.core.server_methods
except:
import xmlrpclib
ssl = ""
if config.get("ssl", "activated"):
ssl = "s"
server_url = "http%s://%s:%s@%s:%s/" % (
ssl,
config.username,
config.password,
config.get("remote", "listenaddr"),
config.get("remote", "port")
)
PYLOAD = xmlrpclib.ServerProxy(server_url, allow_none=True)
TEMPLATE = config.get('webinterface','template')
DL_ROOT = os.path.join(PYLOAD_DIR, config.get('general','download_folder'))
LOG_ROOT = os.path.join(PYLOAD_DIR, config.get('log','log_folder'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
#DATABASE_NAME = os.path.join(PROJECT_DIR, 'pyload.db') # Or path to database file if using sqlite3.
DATABASE_NAME = 'pyload.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
if django.VERSION[0] > 1 or django.VERSION[1] > 1:
zone = None
else:
zone = 'Europe'
TIME_ZONE = zone
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = config.get("general","language")
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, "media/")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
#MEDIA_URL = 'http://localhost:8000/media'
MEDIA_URL = '/media/' + config.get('webinterface','template') + '/'
#MEDIA_URL = os.path.join(PROJECT_DIR, "media/")
LOGIN_REDIRECT_URL = "/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '+u%%1t&c7!e$0$*gu%w2$@to)h0!&x-r*9e+-=wa4*zxat%x^t'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.csrf.middleware.CsrfViewMiddleware',
'django.contrib.csrf.middleware.CsrfResponseMiddleware'
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.admin',
'pyload',
'ajax',
'cnl',
)
AUTH_PROFILE_MODULE = 'pyload.UserProfile'
LOGIN_URL = '/login/'
|
Python
| 0.000001
|
@@ -108,21 +108,16 @@
mport os
-.path
%0D%0Aimport
@@ -2501,16 +2501,17 @@
ne.%0D%0Aif
+(
django.V
@@ -2548,16 +2548,37 @@
N%5B1%5D %3E 1
+) and os.name != %22nt%22
:%0D%0A z
|
1f59ad4a5fa14a420c683cfd8713c0eb31a9acec
|
Bump version number for aio.
|
rest_framework_swagger/__init__.py
|
rest_framework_swagger/__init__.py
|
VERSION = '0.3.2'
DEFAULT_SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '',
'api_key': '',
'token_type': 'Token',
'enabled_methods': ['get', 'post', 'put', 'patch', 'delete'],
'is_authenticated': False,
'is_superuser': False,
'permission_denied_handler': None,
'resource_access_handler': None,
'template_path': 'rest_framework_swagger/index.html',
'doc_expansion': 'none',
'base_path': ''
}
try:
from django.conf import settings
from django.test.signals import setting_changed
def load_settings(provided_settings):
global SWAGGER_SETTINGS
SWAGGER_SETTINGS = provided_settings
for key, value in DEFAULT_SWAGGER_SETTINGS.items():
if key not in SWAGGER_SETTINGS:
SWAGGER_SETTINGS[key] = value
def reload_settings(*args, **kwargs):
setting, value = kwargs['setting'], kwargs['value']
if setting == 'SWAGGER_SETTINGS':
load_settings(value)
load_settings(getattr(settings,
'SWAGGER_SETTINGS',
DEFAULT_SWAGGER_SETTINGS))
setting_changed.connect(reload_settings)
except:
SWAGGER_SETTINGS = DEFAULT_SWAGGER_SETTINGS
|
Python
| 0
|
@@ -12,9 +12,16 @@
0.3.
-2
+5-aio-v1
'%0A%0AD
|
59d98ffb3fb376826084682ebbbcaba43886619e
|
fix bug in SPLIT_PATCH_TXT_RE
|
restfulgit/porcelain/converters.py
|
restfulgit/porcelain/converters.py
|
# coding=utf-8
from __future__ import absolute_import, unicode_literals, print_function, division
import re
from flask import url_for
from restfulgit.plumbing.retrieval import get_commit
from restfulgit.plumbing.converters import convert_commit as _plumbing_convert_commit
from restfulgit.porcelain.retrieval import get_repo_description, get_diff
GIT_STATUS_TO_NAME = {
'M': 'modified',
'A': 'added',
'R': 'renamed',
'D': 'removed',
}
SPLIT_PATCH_TXT_RE = re.compile(r'^\+\+\+\ b\/(.*?)\n(@@.*?)(?=\n^diff|\n\Z)', re.M | re.S)
def convert_repo(repo_key):
description = get_repo_description(repo_key)
return {
"name": repo_key,
"full_name": repo_key,
"description": description,
"url": url_for('porcelain.get_repo_info', _external=True, repo_key=repo_key),
"branches_url": (url_for('porcelain.get_branches', _external=True, repo_key=repo_key).rstrip('/') + '{/branch}'),
"blobs_url": (url_for('plumbing.get_blob', _external=True, repo_key=repo_key, sha='').rstrip('/') + '{/sha}'),
"commits_url": (url_for('porcelain.get_commit', _external=True, repo_key=repo_key, branch_or_tag_or_sha='').rstrip('/') + '{/sha}'),
"git_commits_url": (url_for('plumbing.get_commit', _external=True, repo_key=repo_key, sha='').rstrip('/') + '{/sha}'),
"git_refs_url": (url_for('plumbing.get_refs', _external=True, repo_key=repo_key).rstrip('/') + '{/sha}'),
"git_tags_url": (url_for('plumbing.get_tag', _external=True, repo_key=repo_key, sha='').rstrip('/') + '{/sha}'),
"tags_url": url_for('porcelain.get_tags', _external=True, repo_key=repo_key),
"trees_url": (url_for('plumbing.get_tree', _external=True, repo_key=repo_key, sha='').rstrip('/') + '{/sha}'),
}
def convert_branch_summary(repo_key, branch):
url = url_for('porcelain.get_commit', _external=True, repo_key=repo_key, branch_or_tag_or_sha=unicode(branch.target))
return {
"name": branch.branch_name,
"commit": {
"sha": unicode(branch.target),
"url": url,
}
}
def convert_branch_verbose(repo_key, repo, branch):
url = url_for('porcelain.get_branch', _external=True, repo_key=repo_key, branch_name=branch.branch_name)
return {
"name": branch.branch_name,
"commit": convert_commit(repo_key, repo, branch.get_object()),
"url": url,
"_links": {
# For some reason GitHub API for branch does the self-link like this
# instead of with "url" as everywhere else.
"self": url,
}
}
def _filename_to_patch_from(diff):
matches = re.findall(SPLIT_PATCH_TXT_RE, diff.patch)
return dict(m for m in matches)
def _convert_patch(repo_key, commit, patch, filename_to_patch):
deleted = patch.status == 'D'
commit_sha = unicode(commit.id if not deleted else commit.parent_ids[0])
result = {
"sha": patch.new_oid if not deleted else patch.old_oid,
"status": GIT_STATUS_TO_NAME[patch.status],
"filename": patch.new_file_path,
"additions": patch.additions,
"deletions": patch.deletions,
"changes": patch.additions + patch.deletions,
"raw_url": url_for('porcelain.get_raw',
_external=True,
repo_key=repo_key,
branch_or_tag_or_sha=commit_sha,
file_path=patch.new_file_path),
"contents_url": url_for('porcelain.get_contents',
_external=True,
repo_key=repo_key,
file_path=patch.new_file_path,
ref=commit_sha),
}
if patch.new_file_path in filename_to_patch:
result['patch'] = filename_to_patch[patch.new_file_path]
return result
def convert_commit(repo_key, repo, commit, include_diff=False):
plain_commit_json = _plumbing_convert_commit(repo_key, commit, porcelain=True)
result = {
"commit": plain_commit_json,
"sha": plain_commit_json['sha'],
"author": plain_commit_json['author'],
"committer": plain_commit_json['committer'],
"url": url_for('porcelain.get_commit', _external=True,
repo_key=repo_key, branch_or_tag_or_sha=unicode(commit.id)),
"parents": [{
"sha": unicode(c.id),
"url": url_for('porcelain.get_commit', _external=True,
repo_key=repo_key, branch_or_tag_or_sha=unicode(c.id))
} for c in commit.parents],
}
if include_diff:
diff = get_diff(repo, commit)
patches = list(diff)
filename_to_patch = _filename_to_patch_from(diff)
patches_additions = sum(patch.additions for patch in patches)
patches_deletions = sum(patch.deletions for patch in patches)
result.update({
"stats": {
"additions": patches_additions,
"deletions": patches_deletions,
"total": patches_additions + patches_deletions,
},
"files": [_convert_patch(repo_key, commit, patch, filename_to_patch) for patch in patches],
})
return result
def convert_blame(repo_key, repo, blame, raw_lines, start_line):
annotated_lines = []
commit_shas = set()
for line_num, line in enumerate(raw_lines, start=start_line):
hunk = blame.for_line(line_num)
commit_sha = hunk.final_commit_id
commit_shas.add(commit_sha)
annotated_lines.append({
'commit': commit_sha,
'origPath': hunk.orig_path,
'lineNum': line_num,
'line': line,
})
return {
'lines': annotated_lines,
'commits': {
commit_sha: _plumbing_convert_commit(repo_key, get_commit(repo, commit_sha))
for commit_sha in commit_shas
}
}
|
Python
| 0.000001
|
@@ -496,17 +496,21 @@
%5C+%5C b%5C/(
-.
+%5B%5E%5Cn%5D
*?)%5Cn(@@
|
62a978256476754a7f604b2f872b7bd221930ac2
|
add test_debian_repo and test_nested_debian_repo
|
merfi/tests/test_repocollector.py
|
merfi/tests/test_repocollector.py
|
from merfi.collector import RepoCollector
from os.path import join, dirname
class TestRepoCollector(object):
def setup(self):
self.repos = RepoCollector(path='/', _eager=False)
def test_simple_tree(self, deb_repotree):
repos = RepoCollector(path=deb_repotree)
# The root of the deb_repotree fixture is itself a repository.
assert [r.path for r in repos] == [deb_repotree]
def test_path_is_absolute(self):
assert self.repos._abspath('/') == '/'
def test_path_is_not_absolute(self):
assert self.repos._abspath('directory').startswith('/')
def test_debian_release_files(self, deb_repotree):
repos = RepoCollector(deb_repotree)
release_files = repos.debian_release_files
# The root of the deb_repotree fixture is itself a repository.
expected = [
join(deb_repotree, 'dists', 'trusty', 'Release'),
join(deb_repotree, 'dists', 'xenial', 'Release'),
]
assert set(release_files) == set(expected)
def test_debian_nested_release_files(self, nested_deb_repotree):
# go one level up
path = dirname(nested_deb_repotree)
repos = RepoCollector(path)
release_files = repos.debian_release_files
expected = [
join(path, 'jewel', 'dists', 'trusty', 'Release'),
join(path, 'jewel', 'dists', 'xenial', 'Release'),
join(path, 'luminous', 'dists', 'trusty', 'Release'),
join(path, 'luminous', 'dists', 'xenial', 'Release'),
]
assert set(release_files) == set(expected)
|
Python
| 0
|
@@ -34,17 +34,26 @@
ollector
+, DebRepo
%0A
-
from os.
@@ -610,16 +610,226 @@
h('/')%0A%0A
+ def test_debian_repo(self, deb_repotree):%0A repos = RepoCollector(deb_repotree)%0A # The root of the deb_repotree fixture is itself a repository.%0A assert repos == %5BDebRepo(deb_repotree)%5D%0A%0A
def
@@ -1188,32 +1188,32 @@
se'),%0A %5D%0A
-
assert s
@@ -1248,16 +1248,370 @@
ected)%0A%0A
+ def test_nested_debian_repo(self, nested_deb_repotree):%0A # go one level up%0A path = dirname(nested_deb_repotree)%0A repos = RepoCollector(path)%0A # Verify that we found the two repo trees.%0A expected = %5BDebRepo(join(path, 'jewel')),%0A DebRepo(join(path, 'luminous'))%5D%0A assert repos == expected%0A%0A
def
|
461bb058e19495dd0f6b56e701631de79cec39ad
|
reorder arguements passed to jinja2_cli.utils.parse_and_load_contexts to omit passing enc parameter
|
jinja2_cli/utils.py
|
jinja2_cli/utils.py
|
"""
:copyright: (c) 2012 - 2015 by Satoru SATOH <ssato@redhat.com>
:license: BSD-3
"""
from __future__ import absolute_import
import codecs
import glob
import os.path
import sys
from .compat import ENCODING, from_iterable
try:
from anyconfig.api import container, load
except ImportError:
container = dict
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ("Could not import any json module to load contexts!"
" Aborting...")
def load(filepath, _ftype):
return json.load(open(filepath))
def get_locale_sensitive_stdout(encoding=ENCODING):
return codecs.getwriter(encoding)(sys.stdout)
def uniq(xs):
"""Remove duplicates in given list with its order kept.
>>> uniq([])
[]
>>> uniq([1, 4, 5, 1, 2, 3, 5, 10])
[1, 4, 5, 2, 3, 10]
"""
acc = xs[:1]
for x in xs[1:]:
if x not in acc:
acc += [x]
return acc
def chaincalls(callables, x):
"""
:param callables: callable objects to apply to x in this order
:param x: Object to apply callables
"""
for c in callables:
assert callable(c), "%s is not callable object!" % str(c)
x = c(x)
return x
def normpath(path):
"""Normalize given path in various different forms.
>>> normpath("/tmp/../etc/hosts")
'/etc/hosts'
>>> normpath("~root/t")
'/root/t'
"""
if "~" in path:
fs = [os.path.expanduser, os.path.normpath, os.path.abspath]
else:
fs = [os.path.normpath, os.path.abspath]
return chaincalls(fs, path)
def flip(xy):
(x, y) = xy
return (y, x)
def concat(xss):
"""
>>> concat([[]])
[]
>>> concat((()))
[]
>>> concat([[1,2,3],[4,5]])
[1, 2, 3, 4, 5]
>>> concat([[1,2,3],[4,5,[6,7]]])
[1, 2, 3, 4, 5, [6, 7]]
>>> concat(((1,2,3),(4,5,[6,7])))
[1, 2, 3, 4, 5, [6, 7]]
>>> concat(((1,2,3),(4,5,[6,7])))
[1, 2, 3, 4, 5, [6, 7]]
>>> concat((i, i*2) for i in range(3))
[0, 0, 1, 2, 2, 4]
"""
return list(from_iterable(xs for xs in xss))
def parse_filespec(fspec, sep=':', gpat='*'):
"""
Parse given filespec `fspec` and return [(filetype, filepath)].
Because anyconfig.api.load should find correct file's type to load by the
file extension, this function will not try guessing file's type if not file
type is specified explicitly.
:param fspec: filespec
:param sep: a char separating filetype and filepath in filespec
:param gpat: a char for glob pattern
>>> parse_filespec("base.json")
[('base.json', None)]
>>> parse_filespec("json:base.json")
[('base.json', 'json')]
>>> parse_filespec("yaml:foo.yaml")
[('foo.yaml', 'yaml')]
>>> parse_filespec("yaml:foo.dat")
[('foo.dat', 'yaml')]
# FIXME: How to test this?
# >>> parse_filespec("yaml:bar/*.conf")
# [('bar/a.conf', 'yaml'), ('bar/b.conf', 'yaml')]
TODO: Allow '*' (glob pattern) in filepath when escaped with '\\', etc.
"""
tp = (ft, fp) = tuple(fspec.split(sep)) if sep in fspec else (None, fspec)
return [(fs, ft) for fs in sorted(glob.glob(fp))] \
if gpat in fspec else [flip(tp)]
def parse_and_load_contexts(contexts, enc=ENCODING, werr=False):
"""
:param contexts: list of context file specs
:param enc: Input encoding of context files (dummy param)
:param werr: Exit immediately if True and any errors occurrs
while loading context files
"""
ctx = container()
if contexts:
for fpath, ftype in concat(parse_filespec(f) for f in contexts):
diff = load(fpath, ftype)
ctx.update(diff)
return ctx
# vim:sw=4:ts=4:et:
|
Python
| 0.000001
|
@@ -3301,16 +3301,28 @@
ntexts,
+werr=False,
enc=ENCO
@@ -3325,28 +3325,16 @@
ENCODING
-, werr=False
):%0A %22
@@ -3388,70 +3388,8 @@
ecs%0A
- :param enc: Input encoding of context files (dummy param)%0A
@@ -3449,16 +3449,16 @@
occurrs%0A
-
@@ -3481,24 +3481,86 @@
ntext files%0A
+ :param enc: Input encoding of context files (dummy param)%0A
%22%22%22%0A
|
ecece212605bb588212a70588dc7fd4b67e85cc9
|
Corrected first two lines
|
roles/common/tests/test_default.py
|
roles/common/tests/test_default.py
|
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_hosts_file(File):
f = File('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
|
Python
| 0.999691
|
@@ -1,10 +1,8 @@
-import
+from
tes
@@ -32,59 +32,48 @@
nner
-%0A%0Atestinfra_hosts = testinfra.utils.ansible_runner.
+ import AnsibleRunner%0Atestinfra_hosts =
Ansi
@@ -82,21 +82,16 @@
eRunner(
-%0A
'.molecu
|
19fd0b75e07311bb3eb863d132125325e3478424
|
Fix typo in docstring
|
byceps/services/user_avatar/models.py
|
byceps/services/user_avatar/models.py
|
"""
byceps.services.user_avatar.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from flask import current_app, url_for
from sqlalchemy.ext.hybrid import hybrid_property
from ...database import db, generate_uuid
from ...util.image.models import ImageType
from ...util.instances import ReprBuilder
class Avatar(db.Model):
"""A avatar image uploaded by a user."""
__tablename__ = 'user_avatars'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
creator_id = db.Column(db.Uuid, db.ForeignKey('users.id'), nullable=False)
_image_type = db.Column('image_type', db.Unicode(4), nullable=False)
def __init__(self, creator_id, image_type):
self.creator_id = creator_id
self.image_type = image_type
@hybrid_property
def image_type(self):
image_type_str = self._image_type
if image_type_str is not None:
return ImageType[image_type_str]
@image_type.setter
def image_type(self, image_type):
self._image_type = image_type.name if (image_type is not None) else None
@property
def filename(self):
name_without_suffix = str(self.id)
suffix = '.' + self.image_type.name
return Path(name_without_suffix).with_suffix(suffix)
@property
def path(self):
path = current_app.config['PATH_USER_AVATAR_IMAGES']
return path / self.filename
@property
def url(self):
path = 'users/avatars/{}'.format(self.filename)
return url_for('global_file', filename=path)
def __repr__(self):
return ReprBuilder(self) \
.add_with_lookup('id') \
.add('image_type', self.image_type.name) \
.build()
class AvatarSelection(db.Model):
"""The selection of an avatar image to be used for a user."""
__tablename__ = 'user_avatar_selections'
user_id = db.Column(db.Uuid, db.ForeignKey('users.id'), primary_key=True)
user = db.relationship('User', backref=db.backref('avatar_selection', uselist=False))
avatar_id = db.Column(db.Uuid, db.ForeignKey('user_avatars.id'), unique=True, nullable=False)
avatar = db.relationship(Avatar)
def __init__(self, user_id, avatar_id):
self.user_id = user_id
self.avatar_id = avatar_id
AvatarCreationTuple = namedtuple('AvatarCreationTuple', 'created_at, url')
|
Python
| 0.013244
|
@@ -507,16 +507,17 @@
%22%22%22A
+n
avatar
|
fae78c016fd3c5fdbcb9a5f6365c9b93586de36e
|
Update generation.py
|
cea/optimization/master/generation.py
|
cea/optimization/master/generation.py
|
"""
Create individuals
"""
from __future__ import division
import random
from cea.optimization.constants import DH_CONVERSION_TECHNOLOGIES_SHARE, DC_CONVERSION_TECHNOLOGIES_SHARE
from cea.optimization.master.validation import validation_main
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "thomas@arch.ethz.ch"
__status__ = "Production"
def generate_main(individual_with_names_dict,
column_names,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network):
"""
Creates an individual configuration for the evolutionary algorithm.
The individual is divided into four parts namely Heating technologies, Cooling Technologies, Heating Network
and Cooling Network
Heating Technologies: This block consists of heating technologies associated with % of the peak capacity each
technology is going to supply, i.e. 10.1520.2030, which translates into technology 1 corresponding to 15% of peak
capacity, technology 2 corresponding to 20% and technology 3 corresponding to 0%. 0% can also be just done by replacing
3 with 0. The technologies block is then followed by supply temperature of the DHN and the number of units it is
supplied to among AHU, ARU, SHU. So if it is 6 degrees C supplied by DHN to AHU and ARU, it is represented as 6.02.
The temperature is represented with 1 decimal point.
Cooling Technologies: This follows the same syntax as heating technologies, but will be represented with cooling
technologies. The block length of heating and cooling can be different.
Heating Network: Network of buildings connected to centralized heating
Cooling Network: Network of buildings connected to centralized cooling. Both these networks can be different, and will
always have a fixed length corresponding to the total number of buildings in the neighborhood
:param nBuildings: number of buildings
:type nBuildings: int
:return: individual: representation of values taken by the individual
:rtype: list
"""
# POPULATE INDIVIDUAL WE KEEP A DATAFRAME SO IT IS EASIER FOR THE PROGRAMMER TO KNOW WHAT IS GOING ON
if district_heating_network and district_cooling_network:
populated_individual_with_name_dict = populate_individual(individual_with_names_dict,
DH_CONVERSION_TECHNOLOGIES_SHARE,
column_names_buildings_heating)
populated_individual_with_name_dict = populate_individual(populated_individual_with_name_dict,
DC_CONVERSION_TECHNOLOGIES_SHARE,
column_names_buildings_cooling)
elif district_heating_network:
populated_individual_with_name_dict = populate_individual(individual_with_names_dict,
DH_CONVERSION_TECHNOLOGIES_SHARE,
column_names_buildings_heating)
elif district_cooling_network:
populated_individual_with_name_dict = populate_individual(individual_with_names_dict,
DC_CONVERSION_TECHNOLOGIES_SHARE,
column_names_buildings_cooling)
populated_individual_with_name_dict = validation_main(populated_individual_with_name_dict,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network
)
# CONVERT BACK INTO AN INDIVIDUAL STRING IMPORTANT TO USE column_names to keep the order
individual = []
for column in column_names:
individual.append(populated_individual_with_name_dict[column])
return individual
def populate_individual(empty_individual_with_names_dict,
name_share_conversion_technologies,
columns_buildings_name):
# do it for the share of the units that are activated
for column, limits in name_share_conversion_technologies.iteritems():
lim_inf = limits["liminf"]
lim_sup = limits["limsup"]
empty_individual_with_names_dict[column] = round(random.uniform(lim_inf, lim_sup),2)
# do it for the buildings
for column in columns_buildings_name:
empty_individual_with_names_dict[column] = random.randint(0, 1)
return empty_individual_with_names_dict
def individual_to_barcode(individual,
building_names_all,
building_names_heating,
building_names_cooling,
column_names,
column_names_buildings_heating,
column_names_buildings_cooling):
"""
Reads the 0-1 combination of connected/disconnected buildings
and creates a list of strings type barcode i.e. ("12311111123012")
:param individual: list containing the combination of connected/disconnected buildings
:type individual: list
:return: indCombi: list of strings
:rtype: list
"""
# pair individual values with their names
individual_with_name_dict = dict(zip(column_names, individual))
DHN_barcode = ""
for name in column_names_buildings_heating:
if name in individual_with_name_dict.keys():
DHN_barcode += str(int(individual_with_name_dict[name]))
DCN_barcode = ""
for name in column_names_buildings_cooling:
if name in individual_with_name_dict.keys():
DCN_barcode += str(int(individual_with_name_dict[name]))
# calc building connectivity
building_connectivity_dict = calc_building_connectivity_dict(building_names_all,
building_names_heating,
building_names_cooling,
DHN_barcode,
DCN_barcode)
return DHN_barcode, DCN_barcode, individual_with_name_dict, building_connectivity_dict
def calc_building_connectivity_dict(building_names_all,
building_names_heating,
building_names_cooling,
DHN_barcode,
DCN_barcode):
data_heating_connections = []
data_cooling_connections = []
data_connectivity_heating = dict(zip(building_names_heating, DHN_barcode))
data_connectivity_cooling = dict(zip(building_names_cooling, DCN_barcode))
for building in building_names_all:
if building in data_connectivity_heating.keys():
data_heating_connections.append(data_connectivity_heating[building])
else:
data_heating_connections.append('0') #if it is not inside the network then it is disconnected
if building in data_connectivity_cooling.keys():
data_cooling_connections.append(data_connectivity_cooling[building])
else:
data_cooling_connections.append('0') #if it is not inside the network then it is disconnected
building_connectivity_dict = {
"Name": building_names_all,
"DH_connectivity": data_heating_connections,
"DC_connectivity": data_cooling_connections,
}
return building_connectivity_dict
|
Python
| 0.000001
|
@@ -4779,78 +4779,8 @@
():%0A
- lim_inf = limits%5B%22liminf%22%5D%0A lim_sup = limits%5B%22limsup%22%5D%0A
@@ -4851,24 +4851,16 @@
orm(
-lim_inf, lim_sup
+0.0, 1.0
),2)
|
ef03541b2b25ab9cf34deec554a19a32dad7fbec
|
Add new line to end of init file for Meta Writer application
|
tools/python/odin_data/meta_writer/__init__.py
|
tools/python/odin_data/meta_writer/__init__.py
|
from pkg_resources import require
require('pygelf==0.3.1')
require("h5py==2.8.0")
require('pyzmq==16.0.2')
|
Python
| 0
|
@@ -99,8 +99,9 @@
16.0.2')
+%0A
|
5eaf4ed148f36f6cf578c9d943ee32652628de64
|
Fix broken tests
|
xero/exceptions.py
|
xero/exceptions.py
|
from six.moves.urllib.parse import parse_qs
from xml.dom.minidom import parseString
import json
class XeroException(Exception):
def __init__(self, response, msg=None):
self.response = response
super(XeroException, self).__init__(msg)
class XeroNotVerified(Exception):
# Credentials haven't been verified
pass
class XeroBadRequest(XeroException):
# HTTP 400: Bad Request
def __init__(self, response):
if response.headers['content-type'].startswith('application/json'):
data = json.loads(response.text)
msg = "%s: %s" % (data['Type'], data['Message'])
self.errors = [err['Message']
for elem in data['Elements']
for err in elem['ValidationErrors']
]
super(XeroBadRequest, self).__init__(response, msg=msg)
elif response.headers['content-type'].startswith('text/html'):
payload = parse_qs(response.text)
self.errors = [
payload['oauth_problem'][0],
]
super(XeroBadRequest, self).__init__(response, payload['oauth_problem_advice'][0])
else:
# Extract the messages from the text.
# parseString takes byte content, not unicode.
dom = parseString(response.text.encode(response.encoding))
messages = dom.getElementsByTagName('Message')
msg = messages[0].childNodes[0].data
self.errors = [
m.childNodes[0].data for m in messages[1:]
]
super(XeroBadRequest, self).__init__(response, msg)
class XeroUnauthorized(XeroException):
# HTTP 401: Unauthorized
def __init__(self, response):
payload = parse_qs(response.text)
self.problem = payload['oauth_problem'][0]
super(XeroUnauthorized, self).__init__(response, payload['oauth_problem_advice'][0])
class XeroForbidden(XeroException):
# HTTP 403: Forbidden
def __init__(self, response):
super(XeroForbidden, self).__init__(response, response.text)
class XeroNotFound(XeroException):
# HTTP 404: Not Found
def __init__(self, response):
super(XeroNotFound, self).__init__(response, response.text)
class XeroUnsupportedMediaType(XeroException):
# HTTP 415: UnsupportedMediaType
def __init__(self, response):
super(XeroUnsupportedMediaType, self).__init__(response, response.text)
class XeroInternalError(XeroException):
# HTTP 500: Internal Error
def __init__(self, response):
super(XeroInternalError, self).__init__(response, response.text)
class XeroNotImplemented(XeroException):
# HTTP 501
def __init__(self, response):
# Extract the useful error message from the text.
# parseString takes byte content, not unicode.
dom = parseString(response.text.encode(response.encoding))
messages = dom.getElementsByTagName('Message')
msg = messages[0].childNodes[0].data
super(XeroNotImplemented, self).__init__(response, msg)
class XeroRateLimitExceeded(XeroException):
# HTTP 503 - Rate limit exceeded
def __init__(self, response, payload):
self.problem = payload['oauth_problem'][0]
super(XeroRateLimitExceeded, self).__init__(response, payload['oauth_problem_advice'][0])
class XeroNotAvailable(XeroException):
# HTTP 503 - Not available
def __init__(self, response):
super(XeroNotAvailable, self).__init__(response, response.text)
class XeroExceptionUnknown(XeroException):
# Any other exception.
pass
|
Python
| 0.000555
|
@@ -762,32 +762,74 @@
%5D%0A %5D%0A
+ self.problem = self.errors%5B0%5D%0A
supe
@@ -1077,32 +1077,74 @@
,%0A %5D%0A
+ self.problem = self.errors%5B0%5D%0A
supe
@@ -1619,32 +1619,74 @@
%5D%0A %5D%0A
+ self.problem = self.errors%5B0%5D%0A
supe
|
514074dee639b30fb56ec664804bdd3f533befda
|
Apply `cacheonceproperty` on props of Tree & Chunk.
|
xmlpumpkin/tree.py
|
xmlpumpkin/tree.py
|
# encoding: utf-8
from lxml import etree
XML_ENCODING = 'utf-8'
class Tree(object):
"""Tree accessor for CaboCha xml."""
def __init__(self, cabocha_xml):
self._element = etree.fromstring(
cabocha_xml.encode(XML_ENCODING),
)
@property
def chunks(self):
chunk_elems = self._element.findall('.//chunk')
chunks = tuple([Chunk(elem, self) for elem in chunk_elems])
return chunks
@property
def root(self):
for chunk in self.chunks:
if chunk.link_to_id == -1:
return chunk
return None
def chunk_by_id(self, chunk_id):
for chunk in self.chunks:
if chunk.id == chunk_id:
return chunk
return None
class Chunk(object):
"""CaboCha chunk object representation."""
def __init__(self, element, parent):
self._element = element
self._parent = parent
def __eq__(self, other):
return self._element == other._element
@property
def id(self):
return int(self._element.attrib['id'])
@property
def link_to_id(self):
return int(self._element.attrib['link'])
@property
def linked_from_ids(self):
return tuple([chunk.id for chunk in self.linked])
@property
def func_id(self):
return int(self._element.attrib['func'])
@property
def dep(self):
return self._parent.chunk_by_id(self.link_to_id)
@property
def linked(self):
to_id = self.id
return [
chunk for chunk
in self._parent.chunks
if chunk.link_to_id == to_id
]
@property
def surface(self):
tokens = self._tokens()
texts = [t.text for t in tokens]
return u''.join(texts)
@property
def func_surface(self):
tid = self.func_id
tokens = self._tokens()
for tok in tokens:
if int(tok.attrib['id']) == tid:
return tok.text
def _tokens(self):
return self._element.findall('.//tok')
|
Python
| 0
|
@@ -35,16 +35,53 @@
t etree%0A
+from .utils import cacheonceproperty%0A
%0A%0AXML_EN
@@ -294,32 +294,41 @@
)%0A%0A @
+cacheonce
property%0A def
@@ -490,24 +490,33 @@
hunks%0A%0A @
+cacheonce
property%0A
@@ -1065,24 +1065,33 @@
ement%0A%0A @
+cacheonce
property%0A
@@ -1150,32 +1150,41 @@
ib%5B'id'%5D)%0A%0A @
+cacheonce
property%0A def
@@ -1249,32 +1249,41 @@
%5B'link'%5D)%0A%0A @
+cacheonce
property%0A def
@@ -1362,32 +1362,41 @@
.linked%5D)%0A%0A @
+cacheonce
property%0A def
@@ -1458,32 +1458,41 @@
%5B'func'%5D)%0A%0A @
+cacheonce
property%0A def
@@ -1558,32 +1558,41 @@
nk_to_id)%0A%0A @
+cacheonce
property%0A def
@@ -1763,24 +1763,33 @@
%5D%0A%0A @
+cacheonce
property%0A
@@ -1913,21 +1913,30 @@
texts)%0A%0A
-
@
+cacheonce
property
|
7aae3f244f15d31e4d5a0c844df5cbbb5a594e84
|
update mongostring
|
mongo.py
|
mongo.py
|
import os
import sys
import pymongo
from bson import BSON
from bson import json_util
MONGODB_URI_REMOTE = 'mongodb://Lars_2009:Lars65535@euve76271.serverprofi24.de:21060/larscgmtest'
MONGODB_URI_LOCAL = 'mongodb://aps:aps@127.0.0.1:27017/aps'
def getlast3():
try:
client = pymongo.MongoClient(MONGODB_URI_LOCAL)
except:
print('Error: Unable to Connect')
connection = None
db = client['aps']
cursor = db.entries.find({'type':'cal'}).sort('date', -1).limit(3)
for doc in cursor:
print (doc)
client.close()
if __name__ == '__main__':
getlast3()
|
Python
| 0.000001
|
@@ -83,107 +83,8 @@
il%0A%0A
-MONGODB_URI_REMOTE = 'mongodb://Lars_2009:Lars65535@euve76271.serverprofi24.de:21060/larscgmtest' %0A
MONG
|
46f3869465ff939bdc667c3e787a415197e679c2
|
fix device busy bugs
|
PyDockerMonitor/containerScheduler.py
|
PyDockerMonitor/containerScheduler.py
|
#!/usr/bin/python
import logging
from hostStatusUpdateResponse import ContainerCommand, HostResponse, ContainerResponse
from hostToContainerManager import CTContainerStatus
from collections import deque
from YarnCommand import YarnCommandType
log=logging.getLogger("RMDocker.ContainerScheduler")
MAX_BOOST=1
class ContainerScheduler:
name = "BASE_SCHEDULER"
def __init__(self,hostToContainerManager):
self.hostToCommands = {}
self.hostToContainerManager = hostToContainerManager
def register(self,host):
self.hostToCommands[host]=deque()
def deregister(self,host):
log.info("deregister host %s from ContainerScheduler",host)
del self.hostToCommands[host]
##if receive heartbeat from host, then make response
def notify(self, host, id=None, command=None, cgroupKeyValues=None):
##if we receive other commands from user or other schedulers
if id is not None:
self.hostToCommands[host].append(ContainerScheduler._make_contaienrResponse_(id,command,cgroupKeyValues))
else:
pass
##scheduler yarn commands
def schedule(self,command):
containerId = command.get_id()
if self.hostToContainerManager.getContainerByName(containerId) is None:
log.info("can not find container %s",containerId)
return False
container = self.hostToContainerManager.getContainerByName(containerId)
if command.get_type() == YarnCommandType.DEHYDRATE:
if container.getStatus() == CTContainerStatus.SUSPEND:
log.info("contianer %s is suspending, can not suspend again",containerId)
return False
else:
log.info("successfully suspend container %s",containerId)
self.suspendContainerResponse(container)
elif command.get_type() == YarnCommandType.RESUME:
if container.getStatus() != CTContainerStatus.SUSPEND:
log.info("contianer %s is not suspending, can not resume again",containerId)
return False
else:
log.info("successfully resume contaienr %s",containerId)
self.resumContainerResponse(container)
elif command.get_type() == YarnCommandType.UPDATE:
##TODO support in future release
pass
return True
@staticmethod
def getContainerMemoryUsage(container):
memory = 0
try:
memory=int(container.getCgroupValue("memory","memory.usage_in_bytes"))
except Exception as error:
log.eror("keyvalue error %s",error)
return memory/(1024*1024)
@staticmethod
def getContainerSwapUsage(container):
swap = 0
try:
swap= int(container.getCgroupValue("memory","memory.stat").strip().split(":")[1])
except Exception as error:
log.error("KeyValue eror %s",error)
return swap/(1024*1024)
@staticmethod
def getContainerMemoryLimit(container):
usage = 0
try:
usage= int(container.getCgroupValue("memory","memory.limit_in_bytes"))
except Exception as error:
log.error("KeyValue eror %s",error)
return usage/(1024*1024)
@staticmethod
def isToSuspend(container):
##get memory usage
memory_usage = ContainerScheduler.getContainerMemoryUsage(container) ##get swap usage
swap_usage = ContainerScheduler.getContainerSwapUsage(container)
##get memory limit
memory_limit = ContainerScheduler.getContainerMemoryLimit(container)
##if consume more than 500mb and memory usage is full
if memory_usage + swap_usage > memory_limit and swap_usage >= 500:
log.info("container %s is swapping",container.getName())
return True
else:
return False
def suspendContainerResponse(self,container):
log.info("enter suspend")
##set memory 1% of total memory
memory_value = int(ContainerScheduler.getContainerMemoryLimit(container))
old_limit = str(int(ContainerScheduler.getContainerMemoryLimit(container)))+"m"
container.put("memory","memory.limit_in_bytes",old_limit)
##set cpu usage 1% of total cpu frequency,suspense cpu first
quota = "10000"
period= "1000000"
cgroupCpuKeyValue ={
"cpu" :{
"cpu.cfs_period_us" :period,
"cpu.cfs_quota_us" :quota
}
}
containerCpuResponse = ContainerScheduler._make_containerResponse_(
id = container.getID(),
command = ContainerCommand.UPDATE_CGROUP_PARAMETER,
cgroupKeyValues=cgroupCpuKeyValue
)
self.hostToCommands[container.getHost()].append(containerCpuResponse)
##suspense memory incrementally
while memory_value > 128:
memory_value = memory_value * 0.9
memory_value_str = str(memory_value)+"m"
cgroupMemoryKeyValue={
"memory":{
"memory.limit_in_bytes":memory_value_str
}
}
containerMemoryResponse = ContainerScheduler._make_containerResponse_(
id = container.getID(),
command = ContainerCommand.UPDATE_CGROUP_PARAMETER,
cgroupKeyValues=cgroupMemoryKeyValue
)
self.hostToCommands[container.getHost()].append(containerMemoryResponse)
container.setStatus(CTContainerStatus.SUSPEND)
log.info("suspend container %s",container.getName())
def resumContainerResponse(self,container):
##set memory 1% of total memory
limit = container.get("memory","memory.limit_in_bytes")
##set cpu usage 100% of total cpu frequency
quota = "-1"
period= "1000000"
cgroupKeyValues={"memory":{
"memory.limit_in_bytes":limit
},
"cpu" :{
"cpu.cfs_period_us" :period,
"cpu.cfs_quota_us" :quota
}
}
containerResponse = ContainerScheduler._make_containerResponse_(
id = container.getID(),
command = ContainerCommand.UPDATE_CGROUP_PARAMETER,
cgroupKeyValues=cgroupKeyValues
)
log.info("resume container %s %s",container.getName(),limit)
container.setStatus(CTContainerStatus.RUN)
self.hostToCommands[container.getHost()].append(containerResponse)
def boostContainerResponse(self,container,limit):
limit=int(limit)
log.info("boost container %s",container.getName())
cgroupKeyValues={"memory":{"memory.limit_in_bytes":str(limit)+"m"}}
containerResponse = ContainerScheduler._make_containerResponse_(
id = container.getID(),
command = ContainerCommand.UPDATE_CGROUP_PARAMETER,
cgroupKeyValues=cgroupKeyValues
)
container.setStatus(CTContainerStatus.BOOST)
self.hostToCommands[container.getHost()].append(containerResponse)
def _make_hostResponse_(self,host):
commands = []
if len(self.hostToCommands[host]) > 0:
log.info("we have %d commands to host %s",len(self.hostToCommands[host]),host)
while len(self.hostToCommands[host]) > 0:
commands.append(self.hostToCommands[host].popleft())
hostResponse = HostResponse(host,commands)
return hostResponse
@staticmethod
def _make_containerResponse_(id, command=None, cgroupKeyValues=None):
containerResponse = ContainerResponse(id,command,cgroupKeyValues)
return containerResponse
|
Python
| 0.000001
|
@@ -5399,16 +5399,20 @@
value =
+int(
memory_v
@@ -5421,16 +5421,17 @@
ue * 0.9
+)
%0A
|
de3b4775b7dbcecc9c42e18c59b35485f83ca74a
|
Update max-chunks-to-make-sorted-i.py
|
Python/max-chunks-to-make-sorted-i.py
|
Python/max-chunks-to-make-sorted-i.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
result, max_i = 0, 0
for i, v in enumerate(arr):
max_i = max(max_i, v)
if max_i == i:
result += 1
return result
|
Python
| 0.000001
|
@@ -22,16 +22,894 @@
: O(1)%0A%0A
+# Given an array arr that is a permutation of %5B0, 1, ..., arr.length - 1%5D,%0A# we split the array into some number of %22chunks%22 (partitions), and individually sort each chunk.%0A# After concatenating them, the result equals the sorted array.%0A#%0A# What is the most number of chunks we could have made?%0A#%0A# Example 1:%0A#%0A# Input: arr = %5B4,3,2,1,0%5D%0A# Output: 1%0A# Explanation:%0A# Splitting into two or more chunks will not return the required result.%0A# For example, splitting into %5B4, 3%5D, %5B2, 1, 0%5D will result in %5B3, 4, 0, 1, 2%5D, which isn't sorted.%0A#%0A# Example 2:%0A#%0A# Input: arr = %5B1,0,2,3,4%5D%0A# Output: 4%0A# Explanation:%0A# We can split into two chunks, such as %5B1, 0%5D, %5B2, 3, 4%5D.%0A# However, splitting into %5B1, 0%5D, %5B2%5D, %5B3%5D, %5B4%5D is the highest number of chunks possible.%0A#%0A# Note:%0A# - arr will have length in range %5B1, 10%5D.%0A# - arr%5Bi%5D will be a permutation of %5B0, 1, ..., arr.length - 1%5D.%0A%0A
class So
|
846a683a4bd3b14d6585d9daad1a3790816719c8
|
set initial and default for byhour, byminute and bysecond to 0
|
planner/serializers/serializers.py
|
planner/serializers/serializers.py
|
from rest_framework import serializers
from planner.models.models import SimpleRule, RuleSetElement, \
RuleSet, BaseRule, DateTimeRule
import re
def pattern_validate(nbmax):
"""
returns a validator function for an interger
or a list of integers ; the length of the list
is lower than nbmax+2
"""
def validate(value):
pattern = "^(-?\d{1,2}(,-?\d{1,2}){0,"+str(nbmax)+"})?$"
if not re.match(pattern,value) :
raise serializers.ValidationError("Doesn't respect pattern : \
integer or sequence of integers (comma separated)")
return validate
class SimpleRuleSerializer(serializers.HyperlinkedModelSerializer):
byweekday = serializers.CharField(allow_blank=True)
bymonth = serializers.CharField(allow_blank=True)
bysetpos = serializers.CharField(allow_blank=True)
bymonthday = serializers.CharField(allow_blank=True)
byyearday = serializers.CharField(allow_blank=True)
byweekno = serializers.CharField(allow_blank=True)
byhour = serializers.CharField(allow_blank=True)
byminute = serializers.CharField(allow_blank=True)
bysecond = serializers.CharField(allow_blank=True)
byeaster = serializers.CharField(allow_blank=True)
def validate_byweekday(self,value):
weekday_pattern = "^$|^(MO|TU|WE|TH|FR|SA|SU)(\([+|-]\d\)){0,1}(,(MO|TU|WE|TH|FR|SA|SU)(\([+|-]\d\)){0,1})*$"
if not re.match(weekday_pattern,value):
raise serializers.ValidationError("Day not in \
'MO','TU','WE','TH','FR','SA','SU','MO(+1)', ...")
return value
def validate_month(self, value):
validation = pattern_validate(12)
validation(value)
return value
def validate_bysetpos(self, value):
validation = pattern_validate(15)
validation(value)
return value
def validate_bymonthday(self, value):
validation = pattern_validate(31)
validation(value)
return value
def validate_byyearday(self, value):
validation = pattern_validate(365)
validation(value)
return value
def validate_byweekno(self, value):
validation = pattern_validate(52)
validation(value)
return value
def validate_byhour(self, value):
validation = pattern_validate(24)
validation(value)
return value
def validate_byminute(self, value):
validation = pattern_validate(60)
validation(value)
return value
def validate_bysecond(self, value):
validation = pattern_validate(60)
validation(value)
return value
def validate_byeaster(self, value):
validation = pattern_validate(60)
validation(value)
return value
class Meta:
model = SimpleRule
fields = (
'url', 'content', 'name_fr', 'name_en', 'freq', 'wkst', 'byweekday', 'bymonth',
'bysetpos', 'bymonthday', 'byyearday', 'byweekno', 'byhour', 'byminute',
'bysecond', 'byeaster', 'next10')
class RuleSetElementSerializer(serializers.ModelSerializer):
class Meta:
model = RuleSetElement
fields = ('url', 'direction', 'ruleset', 'baserule', 'order')
class RuleSetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RuleSet
fields = ('url', 'name', 'elements',)
class BaseRuleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BaseRule
fields = ('id',)
def to_representation(self, obj):
if isinstance(obj, SimpleRule):
return SimpleRuleSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, DateTimeRule):
return DateTimeRuleSerializer(obj, context=self.context).to_representation(obj)
return super(BaseRuleSerializer, self).to_representation(obj)
class DateTimeRuleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DateTimeRule
fields=('url','datetime',)
|
Python
| 0.000001
|
@@ -1050,32 +1050,58 @@
allow_blank=True
+, default='0', initial='0'
)%0A byminute =
@@ -1131,32 +1131,58 @@
allow_blank=True
+, default='0', initial='0'
)%0A bysecond =
@@ -1212,32 +1212,58 @@
allow_blank=True
+, default='0', initial='0'
)%0A byeaster =
|
32fccb04bac6be7e79f6b05b727e5e847fef498c
|
Update misc.py
|
misc/misc.py
|
misc/misc.py
|
Python
| 0
|
@@ -1 +1,996 @@
+import discord%0Afrom discord.ext import commands%0Aimport random%0Aimport time%0A%0Aclass misc:%0A %22%22%22My custom cog that does stuff%22%22%22%0A def __init__(self, bot):%0A self.bot = bot%0A self.bank = Bank(bot, %22data/economy/bank.json%22)%0A%0A def role_colour():%0A #Rand between 0 - 256%0A a = random.randrange(0,256)%0A b = random.randrange(0,256)%0A c = random.randrange(0,256)%0A%0A if a != 0 or b != 0 or c != 0:%0A choice = random.randrange(1,4)%0A if choice === 1:%0A a = 0%0A if choice === 2:%0A b = 0%0A if choice === 3:%0A c = 0%0A%0A return a, b, c%0A%0A def change_colour(r, g, b):%0A picked_role = bot.role(%22400618311861272577%22)%0A bot.edit_role(role=picked_role, colour=bot.colour(r, g, b))%0A%0A def colour_loop():%0A while true:%0A change_colour(role_colour())%0A time.sleep(5)%0A%0A colour_loop()%0A %0Adef setup(bot):%0A bot.add_cog(Counter(bot))
%0A
|
|
4ec433f504bc6659889daff4097732287b4b563a
|
Allow for executeCommand to take the stdin input.
|
utils/lit/lit/util.py
|
utils/lit/lit/util.py
|
import errno
import itertools
import math
import os
import platform
import signal
import subprocess
import sys
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except UnicodeError:
return str(bytes)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out,_ = p.communicate()
return convert_string(out)
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False;
return True;
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('\nSlowest %s:' % title)
print(hr)
for name,value in items[-20:]:
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1)))
print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
'*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
out,err = p.communicate()
exitCode = p.wait()
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
|
Python
| 0.000002
|
@@ -1335,17 +1335,16 @@
me(path)
-
%0A if
@@ -4801,24 +4801,36 @@
ne, env=None
+, input=None
):%0A p = s
@@ -5091,24 +5091,35 @@
communicate(
+input=input
)%0A exitCo
|
4b9948e665c78df468917b0906afc288244fa303
|
add doc back in.
|
osbs/exceptions.py
|
osbs/exceptions.py
|
"""
Exceptions raised by OSBS
"""
class OsbsException(Exception):
pass
class OsbsResponseException(OsbsException):
def __init__ (self, message, status_code, *args, **kwargs):
super (OsbsResponseException, self).__init__ (message, *args, **kwargs)
self.status_code = status_code
class OsbsNetworkException(OsbsException):
def __init__ (self, url, message, status_code, *args, **kwargs):
super (OsbsNetworkException, self).__init__ (message, *args, **kwargs)
self.url = url
self.status_code = status_code
|
Python
| 0
|
@@ -106,32 +106,91 @@
OsbsException):%0A
+ %22%22%22 OpenShift didn't respond with OK (200) status %22%22%22%0A%0A
def __init__
|
a7c11da50ecea2bbf8983ebbea0c3e91fcfb500a
|
Fix typo.
|
swig/python/package/test/codegen/codegen.py
|
swig/python/package/test/codegen/codegen.py
|
# simple code generation example
# note: to keep things simple, unused arithmetic operators
# for the format description are not implemented
# a realistic example would implement all operators
import formast
def api_name(name):
return "_".join(part.lower() for part in name.split())
class CodeGenIndent:
"""Implements indented printing."""
def __init__(self, parent=None):
if parent is None:
self.indent = 0
self.lines = []
else:
self.indent = parent.indent
self.lines = parent.lines
def print_(self, txt=None):
if txt is not None:
self.lines.append(" " * self.indent + txt)
else:
self.lines.append("")
def __str__(self):
return "\n".join(self.lines)
class CodeGenExprEval(formast.Visitor):
"""Generate code for expression."""
stack = []
def expr_uint(self, v):
self.stack.append(str(v))
def expr_id(self, i):
self.stack.append("self.%s" % api_name(i))
def expr_logical_and(self, e1, e2):
self.expr(e2)
self.expr(e1)
self.stack.append("(%s and %s)" % (self.stack.pop(), self.stack.pop()))
def expr_compare_le(self, e1, e2):
self.expr(e2)
self.expr(e1)
self.stack.append("(%s <= %s)" % (self.stack.pop(), self.stack.pop()))
def expr_compare_ge(self, e1, e2):
self.expr(e2)
self.expr(e1)
self.stack.append("(%s >= %s)" % (self.stack.pop(), self.stack.pop()))
class CodeGenClassInit(formast.Visitor, CodeGenIndent):
"""Generate __init__ body."""
def __init__(self, parent=None):
formast.Visitor.__init__(self)
CodeGenIndent.__init__(self, parent=parent)
def stats_attr(self, a):
if not a.arr1.is_initialized():
self.print_("self.%s = %s()" % (api_name(a.name), a.class_name))
else:
self.print_("self.%s = []" % api_name(a.name))
def stats_if(self, if_):
self.stats(if_.then)
if if_.else_.is_initialized():
self.stats(if_.else_.get())
class CodeGenClassRead(formast.Visitor, CodeGenIndent):
"""Generate read body."""
def __init__(self, parent=None):
formast.Visitor.__init__(self)
CodeGenIndent.__init__(self, parent=parent)
def stats_attr(self, a):
# everything is an integer, so this is rather simple
if not a.arr1.is_initialized():
self.print_("self.%s = struct.unpack('<i', stream.read(4))" % api_name(a.name))
else:
expr_eval = CodeGenExprEval()
expr_eval.expr(a.arr1.get())
self.print_("self.%s = [" % api_name(a.name))
self.indent += 1
self.print_("struct.unpack('<i', stream.read(4))")
self.print_("for i in range(%s)]" % expr_eval.stack.pop())
self.indent -= 1
# arr2 not used
def stats_if(self, if_):
expr_eval = CodeGenExprEval()
expr_eval.expr(if_.expr)
self.print_("if %s:" % expr_eval.stack.pop())
self.indent += 1
self.stats(if_.then)
self.indent -= 1
# else not used
class CodeGenModule(CodeGenIndent, formast.Visitor):
"""Generate module."""
def __init__(self, parent=None):
formast.Visitor.__init__(self)
CodeGenIndent.__init__(self, parent=parent)
def top_class(self, c):
self.print_("class %s:" % c.name)
self.indent += 1
if c.stats.is_initialized():
self.print_()
self.print_("def __init__(self):")
self.indent += 1
codegeninit = CodeGenClassInit(parent=self)
codegeninit.stats(c.stats.get())
self.indent -= 1
self.print_()
self.print_("def write(self, stream):")
self.indent += 1
codegenread = CodeGenClassRead(parent=self)
codegenread.stats(c.stats.get())
self.indent -= 1
else:
self.print_("pass")
self.indent -= 1
if __name__ == "__main__":
top = formast.Top()
with open("integers.xml", "rb") as stream:
formast.XmlParser().parse_string(stream.read(), top)
codegen = CodeGenModule()
codegen.top(top)
print(codegen)
|
Python
| 0.001604
|
@@ -3795,13 +3795,12 @@
def
-write
+read
(sel
|
e9a1ee7faef9b208e83173c39c62926553ab6b5f
|
mark issue as closed if resolution type is finished or fixed
|
src/survivor/tasks/sync.py
|
src/survivor/tasks/sync.py
|
"""
Synchronises local database with JIRA.
"""
import argparse
import iso8601
import itertools
from jira.client import JIRA
from survivor import config, init
from survivor.models import User, Issue
# max number of issues to have jira return for the project
MAX_ISSUE_RESULTS = 99999
def create_user(jira_user):
"Creates a `survivor.models.User` from a `jira.resources.User`."
user = User(login=jira_user.name)
user.name = jira_user.displayName
user.email = jira_user.emailAddress
user.avatar_url = jira_user.avatarUrls.__dict__['48x48']
return user.save()
def get_or_create_user(jira_user):
"""
Get or create a `survivor.models.User` from a partially-loaded
`jira.resources.User`.
"""
try:
return User.objects.get(login=jira_user.name)
except User.DoesNotExist:
return create_user(jira_user)
def create_issue(jira_issue):
"Creates a `survivor.models.Issue` from a `jira.resources.Issue`."
issue = Issue(key=jira_issue.key,
title=jira_issue.fields.description,
state=jira_issue.fields.status.name.lower(),
opened=iso8601.parse_date(jira_issue.fields.created),
updated=iso8601.parse_date(jira_issue.fields.updated),
url=jira_issue.self)
issue.reporter = get_or_create_user(jira_issue.fields.reporter)
if jira_issue.fields.resolutiondate:
issue.closed = iso8601.parse_date(jira_issue.fields.resolutiondate)
if jira_issue.fields.assignee:
issue.assignee = get_or_create_user(jira_issue.fields.assignee)
# TODO comments, labels
return issue.save()
def sync(types, verbose=False):
"Refresh selected collections from JIRA."
jira_project = config['jira.project']
jira_username = config['jira.username']
jira_password = config['jira.password']
jira_server = config['jira.server']
jira = JIRA(basic_auth=(jira_username, jira_password), options={'server': jira_server})
if 'users' in types:
User.drop_collection()
# FIXME: can this come from config?
for jira_user in jira.search_assignable_users_for_projects('', jira_project):
try:
user = create_user(jira_user)
except:
print 'Error creating user: %s' % jira_user.name
raise
if verbose: print 'created user: %s' % jira_user.name
if 'issues' in types:
Issue.drop_collection()
issues = jira.search_issues(
'project=%s and (status=OPEN or status=CLOSED)' % jira_project,
maxResults=MAX_ISSUE_RESULTS
)
for jira_issue in issues:
try:
issue = create_issue(jira_issue)
except:
print 'Error creating %s' % jira_issue.key
raise
if verbose: print 'created issue: %s' % jira_issue.key
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Synchronises local DB with JIRA')
argparser.add_argument('model', nargs='*', help='model types to sync')
argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output')
args = argparser.parse_args()
types = args.model or ('users', 'issues')
init()
sync(types, args.verbose)
|
Python
| 0
|
@@ -1409,16 +1409,152 @@
ondate:%0A
+ resolution_type = jira_issue.fields.resolution.name%0A if resolution_type == %22Finished%22 or resolution_type == %22Fixed%22:%0A
|
4c2b854354c14c7a00b3b3efb7ecbfd0712e6a16
|
make parse.py importable
|
parse.py
|
parse.py
|
#!/usr/bin/env python
import logging
import argparse
import os
import datetime
import re
import mmap
import contextlib
import multiprocessing
import itertools
from xml.sax import SAXException
import sys
sys.path.append( '.' )
sys.path.append( './lib/' )
from grant_handler import PatentGrant
from patSQL import *
from argconfig_parse import ArgHandler
xmlclasses = [AssigneeXML, CitationXML, ClassXML, InventorXML, \
PatentXML, PatdescXML, LawyerXML, ScirefXML, UsreldocXML]
regex = re.compile(r"""([<][?]xml version.*?[>]\s*[<][!]DOCTYPE\s+([A-Za-z-]+)\s+.*?/\2[>])""", re.S+re.I)
def list_files(directories, patentroot, xmlregex):
"""
Returns listing of all files within all directories relative to patentroot
whose filenames match xmlregex
"""
files = [patentroot+'/'+directory+'/'+fi for directory in directories for fi in \
os.listdir(patentroot+'/'+directory) \
if re.search(xmlregex, fi, re.I) != None]
if not files:
logging.error("No files matching {0} found in {1}/{2}".format(XMLREGEX,PATENTROOT,DIRECTORIES))
sys.exit(1)
return files
def parse_file(filename):
if not filename: return
parsed_xmls = []
size = os.stat(filename).st_size
with open(filename,'r') as f:
with contextlib.closing(mmap.mmap(f.fileno(), size, access=mmap.ACCESS_READ)) as m:
res = [x[0] for x in regex.findall(m)]
parsed_xmls.extend(res)
return parsed_xmls
def parallel_parse(filelist):
if not filelist: return
pool = multiprocessing.Pool(multiprocessing.cpu_count())
parsed = pool.imap(parse_file, filelist)
return itertools.chain.from_iterable(parsed)
def apply_xmlclass(us_patent_grant):
parsed_grants = []
try:
patobj = PatentGrant(us_patent_grant, True)
except Exception as e:
print e
return
for xmlclass in xmlclasses:
try:
parsed_grants.append(xmlclass(patobj))
except Exception as inst:
logging.error(type(inst))
logging.error(" - Error: %s" % (us_patent_grant[175:200]))
return parsed_grants
def parse_patent(grant_list):
parsed_grants = map(apply_xmlclass, grant_list)
parsed_grants = filter(lambda x: x, parsed_grants)
return itertools.chain.from_iterable(parsed_grants)
def load_sql(patent):
patent.insert_table()
# TODO: unittest
def build_tables(parsed_grants):
map(load_sql, parsed_grants)
def commit_tables():
assignee_table.commit();
citation_table.commit();
class_table.commit();
inventor_table.commit();
patent_table.commit();
patdesc_table.commit();
lawyer_table.commit();
sciref_table.commit();
usreldoc_table.commit();
if __name__ == '__main__':
args = ArgHandler(sys.argv[1:])
if args.invalid_config():
args.get_help()
DIRECTORIES = args.get_directory_list()
XMLREGEX = args.get_xmlregex()
PATENTROOT = args.get_patentroot()
VERBOSITY = args.get_verbosity()
logfile = "./" + 'xml-parsing.log'
logging.basicConfig(filename=logfile, level=VERBOSITY)
t1 = datetime.datetime.now()
files = list_files(DIRECTORIES, PATENTROOT, XMLREGEX)
parsed_xmls = parallel_parse(files)
parsed_grants = parse_patent(parsed_xmls)
build_tables(parsed_grants)
commit_tables()
#total_patents = len(parsed_xmls)
#total_errors = len(parsed_xmls) * len(xmlclasses) - len(parsed_grants)
#logging.info("Parsing started at %s", str(datetime.datetime.today()))
#logging.info("Time Elapsed: %s", datetime.datetime.now()-t1)
#logging.info("Total Patent Files: %d" % (len(files)))
#logging.info("Total Errors: %d", total_errors)
#logging.info("Total Patents: %d", total_patents)
|
Python
| 0.000134
|
@@ -2735,16 +2735,327 @@
mit();%0A%0A
+def main(directories, patentroot, xmlregex, verbosity):%0A logging.basicConfig(filename=logfile, level=verbosity)%0A files = list_files(directories, patentroot, xmlregex)%0A parsed_xmls = parallel_parse(files)%0A parsed_grants = parse_patent(parsed_xmls)%0A build_tables(parsed_grants)%0A commit_tables()%0A
%0Aif __na
@@ -3370,712 +3370,55 @@
-logging.basicConfig(filename=logfile, level=VERBOSITY)%0A%0A t1 = datetime.datetime.now()%0A%0A files = list_files(DIRECTORIES, PATENTROOT, XMLREGEX)%0A parsed_xmls = parallel_parse(files)%0A parsed_grants = parse_patent(parsed_xmls)%0A build_tables(parsed_grants)%0A commit_tables()%0A%0A #total_patents = len(parsed_xmls)%0A #total_errors = len(parsed_xmls) * len(xmlclasses) - len(parsed_grants)%0A%0A #logging.info(%22Parsing started at %25s%22, str(datetime.datetime.today()))%0A #logging.info(%22Time Elapsed: %25s%22, datetime.datetime.now()-t1)%0A #logging.info(%22Total Patent Files: %25d%22 %25 (len(files)))%0A #logging.info(%22Total Errors: %25d%22, total_errors)%0A #logging.info(%22Total Patents: %25d%22, total_patents
+main(DIRECTORIES, PATENTROOT, XMLREGEX, VERBOSITY
)%0A
|
0903a4227cc8facc6edc9e6dab43ef79a211f24c
|
Fix logic error in reporting
|
zuul/lib/gerrit.py
|
zuul/lib/gerrit.py
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import select
import json
import time
import subprocess
import Queue
import paramiko
import logging
import pprint
# TODO: switch this to paramiko?
class GerritWatcher(threading.Thread):
log = logging.getLogger("gerrit.GerritWatcher")
def __init__(self, gerrit, username, server, port=29418, keyfile=None):
threading.Thread.__init__(self)
self.username = username
self.keyfile = keyfile
self.server = server
self.port = port
self.proc = None
self.poll = select.poll()
self.gerrit = gerrit
def _open(self):
self.log.debug("Opening ssh connection to %s" % self.server)
cmd = ['/usr/bin/ssh', '-p', str(self.port)]
if self.keyfile:
cmd += ['-i', self.keyfile]
cmd += ['-l', self.username, self.server,
'gerrit', 'stream-events']
self.proc = subprocess.Popen(cmd,
bufsize=1,
stdin=None,
stdout=subprocess.PIPE,
stderr=None,
)
self.poll.register(self.proc.stdout)
def _close(self):
self.log.debug("Closing ssh connection")
try:
self.poll.unregister(self.proc.stdout)
except:
pass
try:
self.proc.kill()
except:
pass
self.proc = None
def _read(self):
l = self.proc.stdout.readline()
data = json.loads(l)
self.log.debug("Received data from Gerrit event stream: \n%s" % pprint.pformat(data))
self.gerrit.addEvent(data)
def _listen(self):
while True:
ret = self.poll.poll()
for (fd, event) in ret:
if fd == self.proc.stdout.fileno():
if event == select.POLLIN:
self._read()
else:
raise Exception("event on ssh connection")
def _run(self):
try:
if not self.proc:
self._open()
self._listen()
except:
self.log.exception("Exception on ssh event stream:")
self._close()
time.sleep(5)
def run(self):
while True:
self._run()
class Gerrit(object):
log = logging.getLogger("gerrit.Gerrit")
def __init__(self, hostname, username, keyfile=None):
self.username = username
self.hostname = hostname
self.keyfile = keyfile
self.watcher_thread = None
self.event_queue = None
def startWatching(self):
self.event_queue = Queue.Queue()
self.watcher_thread = GerritWatcher(
self,
self.username,
self.hostname,
keyfile=self.keyfile)
self.watcher_thread.start()
def addEvent(self, data):
return self.event_queue.put(data)
def getEvent(self):
return self.event_queue.get()
def review(self, project, change, message, action={}):
cmd = 'gerrit review --project %s --message "%s"' % (
project, message)
for k,v in action.items():
if v == True:
cmd += ' --%s' % k
else:
cmd += ' --%s %s' % (k, v)
cmd += ' %s' % change
out, err = self._ssh(cmd)
return err
def query(self, change):
cmd = 'gerrit query --format json %s"' % (
change)
out, err = self._ssh(cmd)
if not out:
return False
lines = out.split('\n')
if not lines:
return False
data = json.loads(lines[0])
if not data:
return False
self.log.debug("Received data from Gerrit query: \n%s" % (
pprint.pformat(data)))
return data
def _ssh(self, command):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(self.hostname,
username=self.username,
port=29418)
self.log.debug("SSH command:\n%s" % command)
stdin, stdout, stderr = client.exec_command(command)
out = stdout.read()
self.log.debug("SSH received stdout:\n%s" % out)
ret = stdout.channel.recv_exit_status()
self.log.debug("SSH exit status: %s" % ret)
err = stderr.read()
self.log.debug("SSH received stderr:\n%s" % err)
if ret:
raise Exception("Gerrit error executing %s" % command)
return (out, err)
|
Python
| 0.001162
|
@@ -3864,18 +3864,18 @@
if v
-==
+is
True:%0A
|
bbf8886a2cbf4fa371f0a67157fdd3df3dfa47dd
|
Fix broken MLflow DB README link in CLI docs (#2377)
|
mlflow/db.py
|
mlflow/db.py
|
import click
import mlflow.store.db.utils
@click.group("db")
def commands():
"""
Commands for managing an MLflow tracking database.
"""
pass
@commands.command()
@click.argument("url")
def upgrade(url):
"""
Upgrade the schema of an MLflow tracking database to the latest supported version.
**IMPORTANT**: Schema migrations can be slow and are not guaranteed to be transactional -
**always take a backup of your database before running migrations**. The migrations README,
which is located at
https://github.com/mlflow/mlflow/blob/master/mlflow/store/db_migrations/README, describes
large migrations and includes information about how to estimate their performance and
recover from failures.
"""
if mlflow.store.db.utils._is_initialized_before_mlflow_1(url):
mlflow.store.db.utils._upgrade_db_initialized_before_mlflow_1(url)
mlflow.store.db.utils._upgrade_db(url)
|
Python
| 0
|
@@ -608,16 +608,19 @@
s/README
+.md
, descri
|
f3439c00fbc5e2379150afd777ee29b92bf02b73
|
make executable
|
piopticon.py
|
piopticon.py
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import imutils
import cv2
import sys
import os
import datetime
import time
import json
import argparse
import dropbox
from dropbox.files import WriteMode
from dropbox.exceptions import ApiError, AuthError
from twilio.rest import TwilioRestClient
conf = json.load(open("config.json"))
min_text_seconds = 3600
min_upload_seconds = 3.0
min_motion_frames = 8
camera_warmup_time = 2
delta_thresh = 5
min_area = 5000
parser = argparse.ArgumentParser()
parser.add_argument('-showvideo', action="store_true", default=False)
args = vars(parser.parse_args())
client = TwilioRestClient(conf["twilio_sid"], conf["twilio_token"])
dbx = dropbox.Dropbox(conf["dropbox_token"])
try:
dbx.users_get_current_account()
except AuthError as err:
sys.exit("ERROR: Invalid access token; try re-generating an access token from the app console on the web.")
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 16
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(camera_warmup_time)
avg = None
lastUploaded = datetime.datetime.now()
lastTexted = datetime.datetime.now()
motionCounter = 0
try:
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = f.array
timestamp = datetime.datetime.now()
motion = False
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if avg is None:
print "Starting background model"
#avg = gray
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
#frameDelta = cv2.absdiff(avg, gray)
thresh = cv2.threshold(frameDelta, delta_thresh, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < min_area:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
motion = True
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
if motion:
if motionCounter >= min_motion_frames:
motionCounter = 0
if (timestamp - lastTexted).seconds >= min_text_seconds:
client.messages.create(
to="4404768415",
from_="+12164506265",
body="Motion Detected"
)
lastTexted = timestamp
if (timestamp - lastUploaded).seconds >= min_upload_seconds:
lastUploaded = timestamp
localName = "{}.jpg".format(timestamp.strftime("%I:%M:%S%p"))
# dbxName = "/"+localName
dbxName = "/{}/{}".format(timestamp.strftime("%Y-%B-%d"), localName)
cv2.imwrite(localName, frame)
with open(localName, 'r') as f:
# We use WriteMode=overwrite to make sure that the settings in the file
# are changed on upload
print("Uploading " + localName + " to Dropbox as " + dbxName + "...")
try:
dbx.files_upload(f, dbxName, mode=WriteMode('overwrite'))
except ApiError as err:
# This checks for the specific error where a user doesn't have
# enough Dropbox space quota to upload this file
if err.error.is_path() and err.error.get_path().error.is_insufficient_space():
sys.exit("ERROR: Cannot back up; insufficient space.")
elif err.user_message_text:
print(err.user_message_text)
sys.exit()
else:
print(err)
sys.exit()
os.remove(localName)
else:
motionCounter += 1
if args['showvideo']:
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
rawCapture.truncate(0)
except KeyboardInterrupt:
print "exiting"
rawCapture.truncate(0)
|
Python
| 0.99998
|
@@ -1,20 +1,39 @@
+#!/usr/bin/python%0A%0A
from picamera.array
@@ -152,24 +152,25 @@
import time%0A
+%0A
import json%0A
|
d5f979236089e7cb3de90b03303e1c3af967331c
|
add UW-Madison, minor formatting
|
uw_si2/rest/rester.py
|
uw_si2/rest/rester.py
|
from __future__ import division, unicode_literals
import six, bson, os
from bson.json_util import dumps, loads
from mpcontribs.rest.rester import MPContribsRester
from mpcontribs.io.core.utils import get_short_object_id
from mpcontribs.io.archieml.mpfile import MPFile
from pandas import Series
class UWSI2Rester(MPContribsRester):
"""UW/SI2-specific convenience functions to interact with MPContribs REST interface"""
z = loads(open(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'z.json'
), 'r').read())
def get_uwsi2_contributions(self):
"""
- [<host(pretty-formula)>] <mp_cat_id-linked-to-materials-details-page> <cid-linked-to-contribution-details-page>
|- <solute> <D0-value> <Q-value> <toggle-in-graph>
|- ...
- ...
"""
labels = ["Solute element name", "Solute D0 [cm^2/s]", "Solute Q [eV]"]
data = []
for doc in self.query_contributions(
criteria={'project': 'LBNL'},
projection={'_id': 1, 'mp_cat_id': 1, 'content': 1}
):
mpfile = MPFile.from_contribution(doc)
mp_id = mpfile.ids[0]
table = mpfile.tdata[mp_id]['data_supporting'][labels]
table.columns = ['El.', 'D0 [cm^2/s]', 'Q [eV]']
anums = [self.z[el] for el in table['El.']]
table.insert(0, 'Z', Series(anums, index=table.index))
table.sort_values('Z', inplace=True)
table.reset_index(drop=True, inplace=True)
hdata = mpfile.hdata[mp_id]
data.append({
'mp_id': mp_id, 'cid': doc['_id'],
'short_cid': get_short_object_id(doc['_id']),
'formula': hdata['formula'],
'table': table
})
return data
|
Python
| 0.00243
|
@@ -991,14 +991,39 @@
t':
-'LBNL'
+%7B'$in': %5B'LBNL', 'UW-Madison'%5D%7D
%7D,%0A
@@ -1287,25 +1287,24 @@
l.', 'D0 %5Bcm
-%5E
2/s%5D', 'Q %5Be
|
2482cfc3eb18b892e47960321af851d7e8797a74
|
Fix potential KeyError
|
samantha/plugins/twitch_plugin.py
|
samantha/plugins/twitch_plugin.py
|
"""A service to test loading services. It doesn't do anything."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import json
import logging
import time
# related third party imports
import requests
# application specific imports
import samantha.context as context
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools import eventbuilder
try:
import samantha.variables_private as variables_private
SECRETS = {
"oauth_token": variables_private.twitch_oauth_token,
"client_id": variables_private.twitch_client_id,
"api_version": 5
}
except (ImportError, AttributeError):
variables_private = None
SECRETS = None
__version__ = "1.3.20"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
if variables_private is None:
LOGGER.error("Couldn't access the private variables.")
if SECRETS is None:
LOGGER.error("Couldn't access the API-Key and/or client-ID.")
PLUGIN = Plugin("Twitch", SECRETS is not None, LOGGER, __file__)
STREAM_LIST = context.get_children("media.twitch", default={})
FAILS = 0
# Fail-counter. Used to track consecutive fails of the plugin. Since the list
# of streams is accessed every minute it's not too bad if ine of the checks
# completely fails, as long as the following ones succeed again.
# As soon as the failcounter reaches 3 the user will be notified, before that
# he/she doesn't have to be bothered.
@subscribe_to(["system.onstart", "time.schedule.min"])
def check_followed_streams(key, data):
"""Check for new online streams on twitch.tv."""
global FAILS
def _fail(msg):
"""Handle fails of the parent function. """
global FAILS
FAILS += 1 # raise the failcounter
if FAILS <= 3:
LOGGER.warning("Contacting Twitch failed/returned invalid data on "
"the last %d attempts. Current error: %s",
FAILS, msg)
else:
LOGGER.error("Contacting Twitch failed/returned invalid data on "
"the last %d attempts. Current error: %s", FAILS, msg)
return msg
# Make the http-request
url = "https://api.twitch.tv/kraken/streams/followed"
req = None
tries = 0
while tries <= 3 and req is None:
try:
tries += 1
req = requests.get(url, params=SECRETS, timeout=15)
tries = 0
except (requests.exceptions.ConnectionError,
requests.exceptions.SSLError,
requests.exceptions.Timeout) as e:
LOGGER.warning("Connecting to Twitch failed on attempt %d. "
"Retrying in two seconds. Error: %s", tries, e)
time.sleep(2)
if req is None:
LOGGER.warning("Connecting to Twitch failed three times in a row.")
return _fail("Warn: Connecting to Twitch failed three times in a row.")
# Replace null-fields with "null"-strings
text = req.text.replace('null', '"null"')
try:
data = json.loads(text)
except ValueError as e:
# Thrown by json if parsing a string fails due to an invalid format.
LOGGER.warning("The call to Twitch's API returned invalid data. "
"Error: %s Data: %s", e, text)
return _fail("Warn: The call to Twitch's API returned invalid data.")
new_streamlist = {}
# parse the data
if "streams" in data:
# If so, streams are available
data = data["streams"]
FAILS = 0 # reset the failcounter
for item in data:
# Get the account name (unique!) for the current item
channelname = item["channel"]["name"] \
.encode("utf-8").decode("utf-8")
current_game = item["channel"]["game"] \
.encode("utf-8").decode("utf-8")
# save the stream's data in a new list
new_streamlist[channelname] = item
if (channelname not in STREAM_LIST or
STREAM_LIST[channelname] is None):
# The stream came online since the last check
LOGGER.debug(u"'%s' is now online. Playing '%s'",
channelname, current_game)
eventbuilder.eEvent(
sender_id=PLUGIN.name,
keyword="media.twitch.availability.online.{}".format(
channelname),
data=item).trigger()
context.set_property(
"media.twitch.{}".format(channelname), item)
if channelname in STREAM_LIST:
# remove the channel from STREAM_LIST so that it can be
# refilled with the new data
del STREAM_LIST[channelname]
else:
# The channel was already online at the last check
if current_game == STREAM_LIST[channelname]["channel"]["game"]:
# The game hasn't changed
LOGGER.debug("'%s' is still playing '%s'.",
channelname, current_game)
else:
# The game changed
LOGGER.debug("'%s' is now playing '%s'",
channelname, current_game)
eventbuilder.eEvent(
sender_id=PLUGIN.name,
keyword="media.twitch.gamechange.{}".format(
channelname),
data=item).trigger()
context.set_property(
"media.twitch.{}".format(channelname), item)
# remove the channel from STREAM_LIST so that it can be
# refilled with the new data
del STREAM_LIST[channelname]
else:
LOGGER.warning("The data didn't include the 'streams' field.")
return _fail("Warn: The data didn't include the 'streams' field.")
while len(STREAM_LIST) > 0:
# STREAM_LIST now contains only those streams that were online
# during the last check but have gone offline since.
channelname, channeldata = STREAM_LIST.popitem()
if channeldata is not None:
LOGGER.debug("'%s' is now offline.", channelname)
key = "media.twitch.availability.offline.{}".format(channelname)
eventbuilder.eEvent(sender_id=PLUGIN.name,
keyword=key,
data=channeldata).trigger()
context.set_property(
"media.twitch.{}".format(channelname), None)
# update the existing STREAM_LIST with the new streams
for channelname in new_streamlist:
STREAM_LIST[channelname] = new_streamlist[channelname]
return "Streams updated successfully."
|
Python
| 0.998171
|
@@ -877,17 +877,17 @@
= %221.3.2
-0
+1
%22%0A%0A# Ini
@@ -5810,32 +5810,132 @@
nelname), item)%0A
+ if channelname in STREAM_LIST:%0A # refilled with the new data%0A
@@ -5998,49 +5998,8 @@
- # refilled with the new data%0A
|
af54f9666b15cd68e5404b60f495f6d51c1470b1
|
Fix upload_manual_flac command to add its arguments
|
WhatManager2/management/commands/upload_manual_flac.py
|
WhatManager2/management/commands/upload_manual_flac.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
import requests
from WhatManager2.utils import wm_unicode
from home.models import get_what_client
from what_transcode.tasks import TranscodeSingleJob
def _add_to_wm_transcode(what_id):
print 'Adding {0} to wm'.format(what_id)
post_data = {
'what_id': what_id,
}
response = requests.post('https://karamanolev.com/wm/transcode/request', data=post_data,
auth=('', ''))
response_json = response.json()
if response_json['message'] != 'Request added.':
raise Exception('Cannot add {0} to wm: {1}'.format(what_id, response_json['message']))
def add_to_wm_transcode(what_id):
for i in range(2):
try:
_add_to_wm_transcode(what_id)
return
except Exception:
print 'Error adding to wm, trying again in 2 sec...'
time.sleep(3)
_add_to_wm_transcode(what_id)
def report_progress(msg):
print msg
class Command(BaseCommand):
help = 'Help you create a torrent and add it to WM'
def handle(self, *args, **options):
if len(args) != 1:
print u'Pass only the source directory.'
return 1
source_dir = wm_unicode(args[0])
if source_dir.endswith('/'):
source_dir = source_dir[:-1]
what = get_what_client(lambda: None)
job = TranscodeSingleJob(what, None, report_progress, None, None, source_dir)
job.create_torrent()
raw_input('Please upload the torrent and press enter...')
job.move_torrent_to_dest()
add_to_wm_transcode(job.new_torrent['torrent']['id'])
|
Python
| 0
|
@@ -55,16 +55,33 @@
literals
+%0A%0Aimport requests
%0Aimport
@@ -85,17 +85,16 @@
rt time%0A
-%0A
from dja
@@ -142,25 +142,8 @@
nd%0A%0A
-import requests%0A%0A
from
@@ -180,17 +180,16 @@
unicode%0A
-%0A
from hom
@@ -1152,65 +1152,257 @@
def
-handle(self, *args, **options):%0A if len(args) != 1
+add_arguments(self, parser):%0A parser.add_argument('source_dir', required=True, help='Source directory for the torrent.')%0A%0A def handle(self, *args, **options):%0A source_dir = wm_unicode(options%5B'source_dir'%5D)%0A if not source_dir
:%0A
@@ -1477,48 +1477,8 @@
n 1%0A
- source_dir = wm_unicode(args%5B0%5D)
%0A
|
fdeb06bdf33a55413f1f8f8cd780c84438ad2277
|
add missing import
|
src/zeit/content/cp/browser/blocks/av.py
|
src/zeit/content/cp/browser/blocks/av.py
|
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import zeit.content.cp.interfaces
import zope.app.pagetemplate
import zope.formlib.form
class EditProperties(zope.formlib.form.SubPageEditForm):
template = zope.app.pagetemplate.ViewPageTemplateFile(
'av.edit-properties.pt')
form_fields = zope.formlib.form.Fields(
zeit.content.cp.interfaces.IAVBlock).omit('media_type')
close = False
@property
def form(self):
return super(EditProperties, self).template
@zope.formlib.form.action(_('Apply'))
def handle_edit_action(self, action, data):
self.close = True
# XXX: dear zope.formlib, are you serious?!
return super(EditProperties, self).handle_edit_action.success(data)
|
Python
| 0.000042
|
@@ -59,16 +59,69 @@
SE.txt%0A%0A
+from zeit.content.cp.i18n import MessageFactory as _%0A
import z
|
db1f0556f72eb84e4273ff8925494de81bf21898
|
rename paths / meta not needed
|
src/learn/dev_ben/generate_training_data.py
|
src/learn/dev_ben/generate_training_data.py
|
import os
import sgf
from time import strftime
from os.path import dirname, abspath
from src.play.model.Board import Board
size = 9
EMPTY_val = 0 # 0.45
BLACK_val = 1 # -1.35
WHITE_val = -1 # 1.05
data_dir = os.path.join(dirname(dirname(dirname(dirname(abspath(__file__))))), 'data')
paths = [
os.path.join(data_dir, 'game_57083.sgf'),
os.path.join(data_dir, 'game_100672.sgf'),
]
training_data_dir = os.path.join(data_dir, 'training_data')
if not os.path.exists(training_data_dir): # create the folder if it does not exist yet
os.makedirs(training_data_dir)
training_data_file = open(
os.path.join(training_data_dir, str(len(paths)) + '_games_' + strftime('%d-%m-%Y_%H-%M-%S') + '.csv'), 'w')
for path in paths:
sgf_file = open(path, 'r')
training_data_file.write(os.path.basename(path) + '\n')
collection = sgf.parse(sgf_file.read())
game_tree = collection.children[0]
meta = game_tree.nodes[0].properties
moves = game_tree.nodes[1:]
# see SGF properties here: www.red-bean.com/sgf/properties.html
board = Board([[EMPTY_val] * size] * size)
training_data_file.write(board.matrix2csv() + '\n')
for move in moves:
keys = move.properties.keys()
if 'B' not in keys and 'W' not in keys: # don't know how to deal with special stuff yet
continue
# can't rely on the order in keys(), apparently must extract it like this
player_color = 'B' if 'B' in move.properties.keys() else 'W'
sgf_move = move.properties[player_color][0]
if len(sgf_move) is 2: # otherwise its a pass
loc = ord(sgf_move[1]) - ord('a'), ord(sgf_move[0]) - ord('a')
player_val = BLACK_val if player_color == 'B' else WHITE_val
opponent_val = WHITE_val if player_color == 'B' else BLACK_val
board.place_stone_and_capture_if_applicable(loc, player_val, opponent_val, EMPTY_val)
training_data_file.write(board.matrix2csv() + '\n')
training_data_file.close()
|
Python
| 0
|
@@ -294,20 +294,24 @@
data')%0D%0A
-path
+sgf_file
s = %5B%0D%0A
@@ -667,20 +667,24 @@
str(len(
-path
+sgf_file
s)) + '_
@@ -754,20 +754,24 @@
path in
-path
+sgf_file
s:%0D%0A
@@ -947,16 +947,51 @@
%5B0%5D%0D%0A
+ moves = game_tree.nodes%5B1:%5D%0D%0A #
meta =
@@ -1025,41 +1025,8 @@
es%0D%0A
- moves = game_tree.nodes%5B1:%5D%0D%0A
|
9e86d12e1135d16b32da5f130e14cfde4ffe9a95
|
Support CloudFlare "email" protection
|
module/plugins/hoster/UpleaCom.py
|
module/plugins/hoster/UpleaCom.py
|
# -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.XFSHoster import XFSHoster
class UpleaCom(XFSHoster):
__name__ = "UpleaCom"
__type__ = "hoster"
__version__ = "0.17"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Uplea.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Redleon" , None),
("GammaC0de", None)]
PLUGIN_DOMAIN = "uplea.com"
SIZE_REPLACEMENTS = [('ko','KB'), ('mo','MB'), ('go','GB'), ('Ko','KB'), ('Mo','MB'), ('Go','GB')]
NAME_PATTERN = r'<span class="gold-text">(?P<N>.+?)</span>'
SIZE_PATTERN = r'<span class="label label-info agmd">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
OFFLINE_PATTERN = r'>You followed an invalid or expired link'
LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
PREMIUM_ONLY_PATTERN = r'You need to have a Premium subscription to download this file'
WAIT_PATTERN = r'timeText: ?(\d+),'
STEP_PATTERN = r'<a href="(/step/.+)">'
def setup(self):
self.multiDL = False
self.chunk_limit = 1
self.resume_download = True
def handle_free(self, pyfile):
m = re.search(self.STEP_PATTERN, self.data)
if m is None:
self.error(_("STEP_PATTERN not found"))
self.data = self.load(urlparse.urljoin("http://uplea.com/", m.group(1)))
m = re.search(self.WAIT_PATTERN, self.data)
if m:
self.wait(m.group(1), True)
self.retry()
m = re.search(self.LINK_PATTERN, self.data)
if m is None:
self.error(_("LINK_PATTERN not found"))
self.link = m.group(1)
m = re.search(r".ulCounter\({'timer':(\d+)}\)", self.data)
if m:
self.wait(m.group(1))
|
Python
| 0
|
@@ -103,16 +103,206 @@
oster%0A%0A%0A
+def decode_cloudflare_email(value):%0A email = %22%22%0A%0A key = int(value%5B:2%5D, 16)%0A for i in xrange(2, len(value), 2):%0A email += chr(int(value%5Bi:i+2%5D, 16) %5E key)%0A%0A return email%0A%0A%0A
class Up
@@ -402,9 +402,9 @@
%220.1
-7
+8
%22%0A
@@ -1810,16 +1810,174 @@
.+)%22%3E'%0A%0A
+ NAME_REPLACEMENTS = %5B(r'(%3Ca class=%22__cf_email__%22 .+? data-cfemail=%22(%5Cw+?)%22.+)',%0A lambda x: decode_cloudflare_email(x.group(2)))%5D%0A%0A
%0A def
|
f9e63022eb975c131bef86a81655885ea0563857
|
Capitalise constants
|
saau/sections/geology/elevation.py
|
saau/sections/geology/elevation.py
|
# geology-elevation1
from os.path import basename
import cartopy.crs as ccrs
from ..image_provider import ImageProvider
from ...utils.download import get_binary
from ...utils.shape import shape_from_zip
url = 'http://www.ga.gov.au/corporate_data/48006/48006_shp.zip'
filename = basename(url)
class ElevationImageProvider(ImageProvider):
def has_required_data(self):
return self.data_dir_exists(filename)
def obtain_data(self):
return get_binary(url, self.data_dir_join(filename))
def build_image(self):
shp = shape_from_zip(self.data_dir_join(filename))
aus_map = self.services.aus_map.get_map()
aus_map.add_geometries(
[rec.geometry for rec in shp.records()],
crs=ccrs.PlateCarree()
)
return aus_map
|
Python
| 0.999886
|
@@ -199,19 +199,19 @@
om_zip%0A%0A
-url
+URL
= 'http
@@ -263,24 +263,24 @@
hp.zip'%0A
-filename
+FILENAME
= basen
@@ -283,19 +283,19 @@
asename(
-url
+URL
)%0A%0A%0Aclas
@@ -405,24 +405,24 @@
_exists(
-filename
+FILENAME
)%0A%0A d
@@ -473,11 +473,11 @@
ary(
-url
+URL
, se
@@ -489,32 +489,32 @@
ta_dir_join(
-filename
+FILENAME
))%0A%0A def
@@ -584,16 +584,16 @@
oin(
-filename
+FILENAME
))%0A%0A
|
327fcfd4c6b0ad10b25c286f271c577afd741099
|
set width for login details to 50 chars.
|
Source/Hg/wb_hg_credential_dialogs.py
|
Source/Hg/wb_hg_credential_dialogs.py
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_hg_credential_dialogs.py
'''
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
import wb_dialog_bases
class WbHgGetLoginDialog(wb_dialog_bases.WbDialog):
def __init__( self, parent, url, realm ):
super().__init__( parent )
self.setWindowTitle( T_('Mercurial Credentials') )
self.username = QtWidgets.QLineEdit( '' )
self.password = QtWidgets.QLineEdit()
self.password.setEchoMode( self.password.Password )
self.username.textChanged.connect( self.nameTextChanged )
self.password.textChanged.connect( self.nameTextChanged )
em = self.fontMetrics().width( 'M' )
self.username.setMinimumWidth( 50*em )
self.addRow( T_('URL'), url )
self.addRow( T_('Realm'), realm )
self.addRow( T_('Username'), self.username )
self.addRow( T_('Password'), self.password )
self.addButtons()
def completeInit( self ):
# set focus
self.username.setFocus()
def nameTextChanged( self, text ):
self.ok_button.setEnabled( self.getUsername() != '' and self.getPassword() != '' )
def getUsername( self ):
return self.username.text().strip()
def getPassword( self ):
return self.password.text().strip()
|
Python
| 0
|
@@ -995,55 +995,8 @@
M' )
-%0A self.username.setMinimumWidth( 50*em )
%0A%0A
@@ -1123,16 +1123,33 @@
username
+, min_width=50*em
)%0A
|
bd66185722417cfc24f348b7538e189636c75352
|
Fix full node in VoresourceRendererMixin
|
daiquiri/core/renderers/voresource.py
|
daiquiri/core/renderers/voresource.py
|
from datetime import datetime
from . import XMLRenderer
from .vosi import CapabilitiesRendererMixin, TablesetRendererMixin
class VoresourceRendererMixin(CapabilitiesRendererMixin, TablesetRendererMixin):
def render_voresource(self, metadata):
self.start('ri:Resource', {
'created': self.render_date(metadata.get('created')),
'updated': self.render_date(metadata.get('updated')),
'status': metadata.get('status'),
'xsi:type': metadata.get('type'),
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:ri': 'http://www.ivoa.net/xml/RegistryInterface/v1.0',
'xmlns:vg': 'http://www.ivoa.net/xml/VORegistry/v1.0',
'xmlns:vr': 'http://www.ivoa.net/xml/VOResource/v1.0',
'xmlns:vs': 'http://www.ivoa.net/xml/VODataService/v1.1',
'xsi:schemaLocation': 'http://www.ivoa.net/xml/RegistryInterface/v1.0 http://www.ivoa.net/xml/VORegistry/v1.0 http://www.ivoa.net/xml/VOResource/v1.0 http://www.ivoa.net/xml/VODataService/v1.1'
})
self.node('title', {}, metadata.get('title'))
self.node('identifier', {}, metadata.get('identifier'))
if metadata.get('short_name'):
self.node('shortName', {}, metadata.get('short_name'))
self.render_curation(metadata.get('curation', {}))
self.render_content(metadata.get('content', {}))
for capability in metadata.get('capabilities', []):
self.render_capability(capability)
tableset = metadata.get('tableset', [])
if tableset:
self.start('tableset')
self.render_tableset(tableset)
self.end('tableset')
rights = metadata.get('rights')
if rights:
self.node('rights', {}, metadata.get('rights'))
full = metadata.get('full')
if full:
self.node('full', {}, metadata.get('full'))
managed_authority = metadata.get('managed_authority')
if managed_authority:
self.node('managedAuthority', {}, managed_authority)
managing_org = metadata.get('managing_org')
if managing_org:
self.node('managingOrg', {}, managing_org)
self.end('ri:Resource')
def render_curation(self, curation_metadata):
self.start('curation')
self.node('publisher', {}, curation_metadata.get('publisher'))
creator = curation_metadata.get('creator')
if creator:
self.start('creator')
self.node('name', {}, creator.get('name'))
self.node('logo', {}, creator.get('logo'))
self.end('creator')
self.node('date', {'role': 'updated'}, self.render_date(curation_metadata.get('date')))
contact = curation_metadata.get('contact')
if contact:
self.start('contact')
self.node('name', {}, contact.get('name'))
self.node('address', {}, contact.get('address'))
self.node('email', {}, contact.get('email'))
self.node('telephone', {}, contact.get('telephone'))
self.end('contact')
self.end('curation')
def render_content(self, content_metadata):
self.start('content')
for subject in content_metadata.get('subjects', []):
self.node('subject', {}, subject)
self.node('description', {}, content_metadata.get('description'))
self.node('referenceURL', {}, content_metadata.get('referenceURL'))
self.node('type', {}, content_metadata.get('type'))
self.end('content')
def render_date(self, date):
return datetime.strptime(date, '%Y-%m-%d').strftime('%Y-%m-%dT%H:%M:%SZ')
class VoresourceRenderer(VoresourceRendererMixin, XMLRenderer):
def render_document(self, data, accepted_media_type=None, renderer_context=None):
self.render_voresource(data)
|
Python
| 0.000001
|
@@ -1826,65 +1826,8 @@
))%0A%0A
- full = metadata.get('full')%0A if full:%0A
@@ -1871,16 +1871,25 @@
t('full'
+, 'false'
))%0A%0A
|
f6a80e8717e398eb02bd0aeb023f554f1e514363
|
remove group name to group ID conversion
|
oauthenticator/gitlab.py
|
oauthenticator/gitlab.py
|
"""
Custom Authenticator to use GitLab OAuth with JupyterHub
Modified for GitLab by Laszlo Dobos (@dobos)
based on the GitHub plugin by Kyle Kelley (@rgbkrk)
"""
import json
import os
import sys
from tornado.auth import OAuth2Mixin
from tornado import gen, web
import requests
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set
from .oauth2 import OAuthLoginHandler, OAuthenticator
# Support gitlab.com and gitlab community edition installations
GITLAB_HOST = os.environ.get('GITLAB_HOST') or 'https://gitlab.com'
GITLAB_API = '%s/api/v3' % GITLAB_HOST
def _api_headers(access_token):
return {"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token)
}
def _get_next_page(response):
# Gitlab uses Link headers for pagination.
# See https://docs.gitlab.com/ee/api/README.html#pagination-link-header
link_header = response.headers.get('Link')
if not link_header:
return
for link in requests.utils.parse_header_links(link_header):
if link.get('rel') == 'next':
return link['url']
# if no "next" page, this is the last one
return None
class GitLabMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "%s/oauth/authorize" % GITLAB_HOST
_OAUTH_ACCESS_TOKEN_URL = "%s/oauth/access_token" % GITLAB_HOST
class GitLabLoginHandler(OAuthLoginHandler, GitLabMixin):
pass
class GitLabOAuthenticator(OAuthenticator):
login_service = "GitLab"
client_id_env = 'GITLAB_CLIENT_ID'
client_secret_env = 'GITLAB_CLIENT_SECRET'
login_handler = GitLabLoginHandler
gitlab_group_whitelist = Set(
config=True,
help="Automatically whitelist members of selected groups",
)
@gen.coroutine
def authenticate(self, handler, data=None):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitLab Access Token
#
# See: https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/oauth2.md
# GitLab specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
code=code,
grant_type="authorization_code",
redirect_uri=self.get_callback_url(handler),
)
validate_server_cert = self.validate_server_cert
url = url_concat("%s/oauth/token" % GITLAB_HOST,
params)
req = HTTPRequest(url,
method="POST",
headers={"Accept": "application/json"},
validate_cert=validate_server_cert,
body='' # Body is required for a POST...
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
req = HTTPRequest("%s/user" % GITLAB_API,
method="GET",
validate_cert=validate_server_cert,
headers=_api_headers(access_token)
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
user_id = resp_json["id"]
is_admin = resp_json["is_admin"]
# Check if user is a member of any whitelisted organizations.
# This check is performed here, as it requires `access_token`.
if self.gitlab_group_whitelist:
user_in_group = yield self._check_group_whitelist(
username, user_id, is_admin, access_token)
return username if user_in_group else None
else: # no organization whitelisting
return username
@gen.coroutine
def _check_group_whitelist(self, username, user_id, is_admin, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
if is_admin:
# For admins, /groups returns *all* groups. As a workaround
# we check if we are a member of each group in the whitelist
for group in self.gitlab_group_whitelist:
group_id = _get_group_id(group, headers)
url = "%s/groups/%d/members/%d" % (GITLAB_API, group_id, user_id)
req = HTTPRequest(url, method="GET", headers=headers)
resp = yield http_client.fetch(req)
if resp.code == 200:
return True # user _is_ in group
else:
# For regular users we get all the groups to which they have access
# and check if any of these are in the whitelisted groups
next_page = url_concat("%s/groups" % GITLAB_API,
dict(all_available=True))
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = _get_next_page(resp)
user_groups = set(entry["path"] for entry in resp_json)
# check if any of the organizations seen thus far are in whitelist
if len(self.gitlab_group_whitelist & user_groups) > 0:
return True
return False
class LocalGitLabOAuthenticator(LocalAuthenticator, GitLabOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
Python
| 0.016446
|
@@ -275,16 +275,54 @@
equests%0A
+from tornado.escape import url_escape%0A
from tor
@@ -4622,16 +4622,32 @@
group in
+ map(url_escape,
self.gi
@@ -4670,67 +4670,11 @@
list
+)
:%0A
- group_id = _get_group_id(group, headers)%0A
@@ -4703,17 +4703,17 @@
groups/%25
-d
+s
/members
@@ -4737,19 +4737,16 @@
I, group
-_id
, user_i
|
272e5e79941bbd3293a2285dd35242b5d5f3b4d5
|
remove debug stuff
|
publisher.py
|
publisher.py
|
from wiki import *
from aggregate import *
import logging
class wikipublisher(object):
def __init__(self, dw, export_ns = []):
self.dw = dw
self.export_ns = export_ns
def public_page(self, page, rel_ns = []):
fullname = self.dw.resolve(page, rel_ns)
fullname = fullname.replace(':', '.').strip('.')
if fullname.endswith(".start"):
fullname = fullname[:-6]
fullname = self.dw.resolve(fullname, self.export_ns)
return fullname
def public_file(self, file, rel_ns = []):
return self.public_page(file, rel_ns)
def publish(self, src, dest):
srcns = []
fullsrc = self.dw.resolve(src, [], srcns)
# doc = self.dw.getpage(src, [], srcns)
destns = []
self.dw.resolve(dest, [], destns)
toc = [
' - [[%s]]' % fullsrc
]
doc, chapters = aggregate(self.dw, toc, srcns)
# print(len(doc))
# print(chapters)
newdoc = []
for line in doc:
re1line = wiki.rx_link.sub(lambda m: self.resolve_link(srcns, destns, m), line)
re2line = wiki.rx_image.sub(lambda m: self.resolve_image(srcns, destns, m), re1line)
newdoc.append(re2line)
# DEBUG:
# self.dw.putpage(newdoc, dest, summary='publish page')
def resolve_link(self, srcns, destns, match):
page, section, text = self.dw.parselink(match.group())
if page.startswith('http'):
return self.dw.link(page, section, text)
oldns = []
fullname = self.dw.resolve(page, srcns, oldns)
mappedname = self.public_page(fullname)
# print(page)
# print(fullname)
# print(oldns)
# print(mappedname)
# print(newname)
# print("Resolve link %s -> %s" % (fullname, mappedname))
# TODO: indicate private links
if mappedname is not None:
fullname = self.dw.resolverel(mappedname, destns)
else:
logging.warning("Unresolved link %s" % fullname)
return self.dw.link(fullname, section, text)
def resolve_image(self, srcns, destns, match):
file, caption, params = self.dw.parseimage(match.group())
# print(file, caption, params)
# print("Image %s" % file)
# return match.group()
if file.startswith('http'):
fullname = file
else:
fullname = self.dw.resolve(file, srcns)
# print(fullname)
logging.info("Image %s" % fullname)
mappedname = self.public_file(fullname)
# print(" %s" % mappedname)
if mappedname is not None:
srcinfo = self.dw.fileinfo(fullname)
destinfo = self.dw.fileinfo(mappedname)
# print(srcinfo)
# print(destinfo)
if (srcinfo['size'] != destinfo['size']) or (srcinfo['lastModified'] > destinfo['lastModified']):
logging.info("Copy image ...")
# DEBUG:
# data = self.dw.getfile(fullname)
# if not self.dw.putfile(mappedname, data):
# logging.warning("Copy failed!")
newname = self.dw.resolverel(mappedname, destns)
else:
logging.warning("Unresolved image %s" % fullname)
newname = fullname
image = self.dw.image(newname, caption, params)
# print(image)
return image
class restrictedwikipublisher(wikipublisher):
def __init__(self, dw, public_pages, exceptions = [], export_ns = []):
wikipublisher.__init__(self, dw, export_ns)
self.public_pages = public_pages
self.exceptions = exceptions
def public_page(self, page, rel_ns = []):
fullname = self.dw.resolve(page, rel_ns)
public = False
for rx in self.public_pages:
if rx.match(fullname) is not None:
public = True
break
if not public:
return None
for rx in self.exceptions:
if rx.match(fullname) is not None:
return None
return wikipublisher.public_page(self, fullname)
|
Python
| 0.000001
|
@@ -1107,21 +1107,8 @@
%0A%0A%09%09
-# DEBUG:%0A%09%09#
self
@@ -2582,23 +2582,8 @@
%09%09%09%09
-# DEBUG:%0A%09%09%09%09#
data
@@ -2615,18 +2615,16 @@
me)%0A%09%09%09%09
-#
if not s
@@ -2662,18 +2662,16 @@
):%0A%09%09%09%09%09
-#
logging.
|
504d484d85ebb577943899b5589eeff9d2a67d5a
|
FIX : fix negative triggers due to old neuromag system
|
mne/event.py
|
mne/event.py
|
"""IO with fif files containing events
"""
# Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
from .fiff.constants import FIFF
from .fiff.tree import dir_tree_find
from .fiff.tag import read_tag
from .fiff.open import fiff_open
from .fiff.write import write_int, start_block, start_file, end_block, end_file
from .fiff.pick import pick_channels
def pick_events(events, include=None, exclude=None):
"""Select some events
Parameters
----------
include: int | list | None
A event id to include or a list of them.
If None all events are included.
exclude: int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
Returns
-------
events: array, shape (n_events, 3)
The list of events
"""
if include is not None:
if not isinstance(include, list):
include = [include]
mask = np.zeros(len(events), dtype=np.bool)
for e in include:
mask = np.logical_or(mask, events[:, 2] == e)
events = events[mask]
elif exclude is not None:
if not isinstance(exclude, list):
exclude = [exclude]
mask = np.ones(len(events), dtype=np.bool)
for e in exclude:
mask = np.logical_and(mask, events[:, 2] != e)
events = events[mask]
else:
events = np.copy(events)
if len(events) == 0:
raise RuntimeError("No events found")
return events
def read_events(filename, include=None, exclude=None):
"""Reads events from fif file
Parameters
----------
filename: string
name of the fif file
include: int | list | None
A event id to include or a list of them.
If None all events are included.
exclude: int | list | None
A event id to exclude or a list of them.
If None no event is excluded. If include is not None
the exclude parameter is ignored.
Returns
-------
events: array, shape (n_events, 3)
The list of events
"""
fid, tree, _ = fiff_open(filename)
# Find the desired block
events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
if len(events) == 0:
fid.close()
raise ValueError('Could not find event data')
events = events[0]
for d in events['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_EVENT_LIST:
tag = read_tag(fid, pos)
event_list = tag.data
fid.close()
break
else:
fid.close()
raise ValueError('Could not find any events')
event_list = event_list.reshape(len(event_list) / 3, 3)
event_list = pick_events(event_list, include, exclude)
return event_list
def write_events(filename, event_list):
"""Write events to file
Parameters
----------
filename: string
name of the fif file
events: array, shape (n_events, 3)
The list of events
"""
# Start writing...
fid = start_file(filename)
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, event_list.T)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
end_file(fid)
def find_events(raw, stim_channel='STI 014'):
"""Find events from raw file
Parameters
----------
raw : Raw object
The raw data
stim_channel : string or list of string
Name of the stim channel or all the stim channels
affected by the trigger.
Returns
-------
events : array
The array of event onsets in time samples.
"""
if not isinstance(stim_channel, list):
stim_channel = [stim_channel]
pick = pick_channels(raw.info['ch_names'], include=stim_channel,
exclude=[])
if len(pick) == 0:
raise ValueError('No stim channel found to extract event triggers.')
data, times = raw[pick, :]
idx = np.where(np.all(np.diff(data, axis=1) > 0, axis=0))[0]
events_id = data[0, idx + 1].astype(np.int)
idx += raw.first_samp + 1
events = np.c_[idx, np.zeros_like(idx), events_id]
return events
def merge_events(events, ids, new_id):
"""Merge a set of events
Parameters
----------
events : array
Events
ids : array of int
The ids of events to merge
new_id : int
The new id
Returns
-------
new_events: array
The new events
"""
events = events.copy()
events_numbers = events[:, 2]
for i in ids:
events_numbers[events_numbers == i] = new_id
return events
|
Python
| 0
|
@@ -181,16 +181,32 @@
lause)%0A%0A
+import warnings%0A
import n
@@ -4104,16 +4104,253 @@
ick, :%5D%0A
+ if np.any(data %3C 0):%0A warnings.warn('Trigger channel contains negative values. '%0A 'Taking absolute value.')%0A data = np.abs(data) # make sure trig channel is positive%0A data = data.astype(np.int)%0A
idx
@@ -4539,16 +4539,110 @@
nts_id%5D%0A
+ print %22%25s events found%22 %25 len(events)%0A print %22Events id: %25s%22 %25 np.unique(events%5B:, 2%5D)%0A
retu
|
c44cc51f08aba57703bbc5513c9e5808be78d4b0
|
fix change of API for the method get_from_label_and_partner_field
|
account_statement_completion_label/statement.py
|
account_statement_completion_label/statement.py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# account_statement_completion_label for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com). All Rights Reserved
# @author Benoît GUILLOT <benoit.guillot@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp.osv import fields, orm
from collections import defaultdict
class ErrorTooManyLabel(Exception):
"""
New Exception definition that is raised when more than one label is matched by
the completion rule.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AccountBankSatement(orm.Model):
"""
We add a basic button and stuff to support the auto-completion
of the bank statement once line have been imported or manually fullfill.
"""
_inherit = "account.bank.statement"
def open_completion_label(self, cr, uid, ids, context=None):
return {
'name': 'Statement Label',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.statement.label',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': False,
}
class AccountStatementCompletionRule(orm.Model):
_inherit = "account.statement.completion.rule"
def get_from_label_and_partner_field(self, cr, uid, line_id, context=None):
"""
Match the partner and the account based on the name field of the
statement line and the table account.statement.label.
If more than one statement label matched, raise the ErrorTooManylabel error.
:param int line_id: id of the concerned account.bank.statement.line
:return:
A dict of value that can be passed directly to the write method of
the statement line or {}
{'partner_id': value,
'account_id': value,
...}
"""
st_obj = self.pool.get('account.bank.statement.line')
label_obj = self.pool.get('account.statement.label')
st_line = st_obj.browse(cr, uid, line_id, context=context)
res = {}
# As we have to iterate on each label for each line,
# we memorize the pair to avoid
# to redo computation for each line.
# Following code can be done by a single SQL query
# but this option is not really maintanable
if not context.get('label_memorizer'):
context['label_memorizer'] = defaultdict(list)
label_ids = label_obj.search(cr, uid,
['|',
('profile_id', '=', st_line.statement_id.profile_id.id),
('profile_id', '=', False)],
context=context)
for label in label_obj.browse(cr, uid, label_ids, context=context):
line_ids = st_obj.search(cr, uid,
[('statement_id', '=', st_line.statement_id.id),
('name', 'ilike', label.label),
('already_completed', '=', False)],
context=context)
import pdb;pdb.set_trace()
for line_id in line_ids:
context['label_memorizer'][line_id].append({'partner_id': label.partner_id.id,
'account_id': label.account_id.id})
if st_line['id'] in context['label_memorizer']:
label_info = context['label_memorizer'][st_line['id']]
if len(label_info) > 1:
raise ErrorTooManyPartner(_('Line named "%s" (Ref:%s) was matched by '
'more than one statement label.') %
(st_line['name'], st_line['ref']))
res['partner_id'] = label_info[0]['partner_id']
res['account_id'] = label_info[0]['account_id']
return res
class AccountStatementLabel(orm.Model):
"""Create a new class to map an account statement label to a partner
and a specific account
"""
_name = "account.statement.label"
_description = "Account Statement Label"
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner'),
'label': fields.char('Bank Statement Label', size=100),
'account_id': fields.many2one('account.account', 'Account',
help='Account corresponding to the label '
'for a given partner'),
'company_id': fields.many2one('res.company', 'Company'),
'profile_id': fields.many2one('account.statement.profile',
'Account Profile'),
}
_defaults = {
'company_id': lambda s,cr,uid,c:
s.pool.get('res.company')._company_default_get(cr, uid,
'account.statement.label',
context=c),
}
_sql_constraints = [
('profile_label_unique', 'unique (label, profile_id, company_id)',
'You cannot have similar label for the same profile and company'),
]
def save_and_close_label(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
|
Python
| 0.000001
|
@@ -2145,39 +2145,39 @@
(self, cr, uid,
+st_
line
-_id
, context=None):
@@ -2731,24 +2731,86 @@
st_obj =
+ self.pool.get('account.bank.statement')%0A st_line_obj =
self.pool.g
@@ -2914,21 +2914,23 @@
st
-_line
+atement
= st_ob
@@ -2947,23 +2947,42 @@
r, uid,
+st_
line
-_id
+%5B'statement_id'%5D%5B0%5D
, contex
@@ -3527,32 +3527,24 @@
e_id', '=',
-st_line.
statement_id
@@ -3540,19 +3540,16 @@
tatement
-_id
.profile
@@ -3792,24 +3792,29 @@
ne_ids = st_
+line_
obj.search(c
@@ -3889,16 +3889,8 @@
=',
-st_line.
stat
@@ -3894,19 +3894,16 @@
tatement
-_id
.id),%0A
@@ -4114,51 +4114,8 @@
xt)%0A
- import pdb;pdb.set_trace()%0A
@@ -6273,8 +6273,9 @@
_close'%7D
+%0A
|
fe7d5ec956f0277d0689dec57d9e145fcd19f79f
|
Modify svm
|
mnist_svm.py
|
mnist_svm.py
|
import numpy as np
import matplotlib.pyplot as plt
GRAY_SCALE_RANGE = 255
import pickle
data_filename = 'data_deskewed.pkl'
print('Loading data from file \'' + data_filename + '\' ...')
with open(data_filename, 'rb') as f:
train_labels = pickle.load(f)
train_images = pickle.load(f)
test_labels = pickle.load(f)
test_images = pickle.load(f)
num_pixel = pickle.load(f)
print('Data loading complete.')
train_images = np.array(train_images)
train_images.resize(train_images.size // num_pixel, num_pixel)
test_images = np.array(test_images)
test_images.resize(test_images.size // num_pixel, num_pixel)
test_labels = np.array(test_labels)
train_labels = np.array(train_labels)
## normalization
train_images = train_images / GRAY_SCALE_RANGE
test_images = test_images / GRAY_SCALE_RANGE
from sklearn import svm, metrics
clf = svm.SVC(gamma = 0.001)
clf.fit(train_images, train_labels)
prediction = clf.predict(test_images)
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(test_labels, prediction)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(test_labels, prediction))
|
Python
| 0.000426
|
@@ -834,16 +834,18 @@
etrics%0A%0A
+#
clf = sv
@@ -864,16 +864,49 @@
= 0.001)
+%0Aclf = svm.SVC(kernel = 'linear')
%0A%0Aclf.fi
@@ -919,16 +919,23 @@
n_images
+%5B:1000%5D
, train_
@@ -940,16 +940,23 @@
n_labels
+%5B:1000%5D
)%0A%0Apredi
|
ec7411f409f07bd04778c9baf509adb10f446f10
|
allow cross origin requests
|
mock/mock.py
|
mock/mock.py
|
import cherrypy
class MockController:
def poi(self, location):
with open("poi.json") as poifile:
return poifile.read()
def faq(self, location):
with open("faq.json") as faqfile:
return faqfile.read()
def phrasebook(self, location):
with open("phrasebook.json") as phrasebookfile:
return phrasebookfile.read()
def setup_routes():
d = cherrypy.dispatch.RoutesDispatcher()
d.connect('mock', '/:action/:location', controller=MockController())
dispatcher = d
return dispatcher
conf = {
'/': {
'request.dispatch': setup_routes()
}
}
if __name__ == '__main__':
app = cherrypy.tree.mount(None, config=conf)
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.quickstart(app)
|
Python
| 0
|
@@ -53,32 +53,103 @@
elf, location):%0A
+ cherrypy.response.headers%5B'Access-Control-Allow-Origin'%5D = '*'%0A
with ope
@@ -226,32 +226,103 @@
elf, location):%0A
+ cherrypy.response.headers%5B'Access-Control-Allow-Origin'%5D = '*'%0A
with ope
@@ -409,32 +409,103 @@
elf, location):%0A
+ cherrypy.response.headers%5B'Access-Control-Allow-Origin'%5D = '*'%0A
with ope
@@ -1003,8 +1003,9 @@
art(app)
+%0A
|
fa58cda42afaf1ed80352d9b59cf473a16706436
|
work around, closes #464
|
vent/helpers/paths.py
|
vent/helpers/paths.py
|
import errno
import os
from vent.api.templates import Template
class PathDirs:
""" Global path directories for vent """
def __init__(self,
base_dir=os.path.join(os.path.expanduser("~"), ".vent/"),
plugins_dir="plugins/",
meta_dir=os.path.join(os.path.expanduser("~"), ".vent")):
self.base_dir = base_dir
self.plugins_dir = base_dir + plugins_dir
self.meta_dir = meta_dir
self.init_file = base_dir+"vent.init"
# make sure the paths exists, if not create them
self.ensure_dir(self.base_dir)
self.ensure_dir(self.plugins_dir)
self.ensure_dir(self.meta_dir)
@staticmethod
def ensure_dir(path):
""" Tries to create directory, if fails, checks if path already exists """
try:
os.makedirs(path)
except OSError as e: # pragma: no cover
if e.errno == errno.EEXIST and os.path.isdir(path):
return (True, "exists")
else:
return (False, e)
return (True, path)
@staticmethod
def ensure_file(path):
""" Checks if file exists, if fails, tries to create file """
try:
exists = os.path.isfile(path)
if not exists:
with open (path, 'w+') as fname:
fname.write("initialized")
return (True, path)
return (True, "exists")
except OSError as e: # pragma: no cover
return (False, e)
def host_config(self):
""" Ensure the host configuration file exists """
default_file_dir = "/tmp/vent_files"
config = Template(template=os.path.join(self.base_dir, "vent.cfg"))
resp = config.section("main")
if resp[0]:
resp = config.option("main", "files")
if not resp[0]:
config.add_option("main", "files", default_file_dir)
self.ensure_dir(default_file_dir)
else:
config.add_option("main", "files", default_file_dir)
self.ensure_dir(default_file_dir)
config.write_config()
return
|
Python
| 0
|
@@ -15,16 +15,32 @@
mport os
+%0Aimport platform
%0A%0Afrom v
@@ -1616,32 +1616,175 @@
file exists %22%22%22%0A
+ if platform.system() == 'Darwin':%0A default_file_dir = os.path.join(os.path.expanduser(%22~%22), %22vent_files%22)%0A else:%0A
default_
|
504f57bcecb2cc35b1efd742e60e75d458f68d4a
|
Remove test block
|
munch.py
|
munch.py
|
import sys
class KaleInterp:
'''
TODO
- Write self.if_call
- solve problems related self.if_call
- specifically, the .next in said call
'''
def __init__(self):
#self.keywords is a dictionary of keywords and function calls
self.keywords = {'write:' : self.write, 'var:' : self.variable,
'if:' : self.if_call, 'input:' : self.input,
'math:' : self.math}
self.kale_variables = {} # holds the variables from the kale program
self.open_file = open(sys.argv[1], encoding='utf-8')
# all variable must be declared above this method call
self.file_reader()
def file_reader(self):
for line in self.open_file:
split_line = line.split() # turns the line into an array for iter
self.read_key_words(split_line)
def read_key_words(self, split_line):
for key in self.keywords: # iterate through self.keywords
# try statement is to accomodate blank lines in the kale file
try:
if split_line[0] == key: # compare first word to keys
self.keywords[key](split_line) # make appropriate method call
except IndexError:
continue
def write(self, current_line):
buffer_string = '' # declare variable
for index in range(len(current_line)):
try:
# reassign string with words
# if statements puts variables into printed strings
if current_line[index + 1][0] == '_':
buffer_string += str(self.kale_variables[current_line[index + 1]])
else:
buffer_string += current_line[index + 1] + ' '
except IndexError:
break
print(buffer_string)
def variable(self, current_line):
# these assign variable to a dictionary to store them
if current_line[1] == 'bool:':
var_obj = self.bool_obj(current_line)
elif current_line[1] == 'int:':
var_obj = self.int_obj(current_line)
else:
var_obj = self.str_obj(current_line)
self.kale_variables[current_line[2]] = var_obj
# determines and returns the proper python type for each variable
def bool_obj(self, current_line):
if current_line[4] == 'True':
return True
else:
return False
# determines and returns the proper python type for each variable
def int_obj(self, current_line):
return int(current_line[4])
# determines and returns the proper python type for each variable
# gets all of the string
def str_obj(self, current_line):
var_buffer = ''
for line_index in range(len(current_line)):
try:
var_buffer += current_line[line_index + 4] + ' '
except IndexError:
break
return var_buffer
def if_call(self, whole_line):
conditional_statement = [] # this is the conditional to evaluate
result_statement = [] # this holds what the statement does
# result_index = 0
for x in range(len(whole_line)):
if whole_line[x + 1] == '->':
result_index = x + 2 # this is where the product begins
break
conditional_statement.append(whole_line[x + 1])
while result_index < len(whole_line):
result_statement.append(whole_line[result_index])
result_index += 1
# evaluates the statement and acts on it
if self.operation_eval(conditional_statement, True):
self.read_key_words(result_statement)
# the 'apostrophe' argument is because the method is multi use
def operation_eval(self, operation, apostrophe):
# evaluates the operational value and returns a simplified True or False
eval_buffer = ''
for item in operation:
if item[0] == '_':
for var_name in self.kale_variables:
if item == var_name:
eval_buffer += ' ' + str(
self.insert_apostrophe(self.kale_variables[var_name], apostrophe))
break
else:
eval_buffer += ' ' + self.insert_apostrophe(item, apostrophe)
# TEST
eval_buffer = eval_buffer.split()
for character in range(len(eval_buffer)):
if eval_buffer[character] == '/':
eval_buffer[character] == '//'
eval_buffer = ''.join(eval_buffer)
# END TEST
return eval(eval_buffer)
def input(self, split_line):
innit_counter = 5 # this is the index at which the prompt starts
prompt = '' # the variable to hold the prompt
while innit_counter < len(split_line):
prompt += split_line[innit_counter] + ' '
innit_counter += 1
tmp = input(prompt + '\n')
new_variable_line = [split_line[1], split_line[2], split_line[3],
split_line[4], tmp]
self.variable(new_variable_line)
def math(self, split_line):
innit_counter = 5
operation = []
while innit_counter < len(split_line):
operation.append(split_line[innit_counter])
innit_counter += 1
resolved_var = [split_line[1], split_line[2], split_line[3],
split_line[4], self.operation_eval(operation, False)]
self.variable(resolved_var)
# this takes the apostrophe argument because not everything this is called
# on actually needs this stuff
def insert_apostrophe(self, word, apostrophe):
if apostrophe:
try:
int(word)
return word
except ValueError:
if word != True and word != False and word != 'not' and word != 'and' and word != 'or':
return '\'' + word + '\''
else:
return word
KaleInterp()
|
Python
| 0.000001
|
@@ -3622,226 +3622,8 @@
e)%0A%0A
-%0A%0A%0A%09%09# TEST%0A%09%09eval_buffer = eval_buffer.split()%0A%09%09for character in range(len(eval_buffer)):%0A%09%09%09if eval_buffer%5Bcharacter%5D == '/':%0A%09%09%09%09eval_buffer%5Bcharacter%5D == '//'%0A%09%09eval_buffer = ''.join(eval_buffer)%0A%09%09# END TEST%0A%0A%0A%0A%0A
%09%09re
|
7f248f252b0a846e39c60d66485f796576b2179e
|
fix doctest
|
aoc2016/day9.py
|
aoc2016/day9.py
|
import re
def parse(lines):
return ''.join([x.strip() for x in lines])
class Marker(object):
def __init__(self, chars, repeats):
self.chars = chars
self.repeats = repeats
@classmethod
def parse(clazz, text):
"""
>>> m, rest = Marker.parse('(10x2)abc')
>>> m.chars
10
>>> m.repeats
2
>>> rest
'abc'
"""
pattern = r"\((\d+)x(\d+)\)"
m = re.match(pattern, text)
if not m:
return None, text
return Marker(int(m.group(1)), int(m.group(2))), text[len(m.group(0)):]
def take(s, n):
return s[:n], s[n:]
def decompress(compressed):
"""
>>> decompress('ADVENT')
'ADVENT'
>>> decompress('A(1x5)BC')
'ABBBBBC'
>>> decompress('(3x3)XYZ')
'XYZXYZXYZ'
"""
result = []
while compressed:
m, compressed = Marker.parse(compressed)
if m is None:
c, compressed = take(compressed, 1)
result.append(c)
else:
s, compressed = take(compressed, m.chars)
result.append(s * m.repeats)
return ''.join(result)
def decompressed_length2(compressed):
"""
>>> decompress2('ADVENT')
'ADVENT'
>>> decompress2('X(8x2)(3x3)ABCY')
'XABCABCABCABCABCABCY'
"""
result = 0
while compressed:
m, compressed = Marker.parse(compressed)
if m is None:
c, compressed = take(compressed, 1)
result += 1
else:
s, compressed = take(compressed, m.chars)
d = decompressed_length2(s)
result += d * m.repeats
return result
def step1(input):
return len(decompress(input))
def step2(input):
return decompressed_length2(input)
|
Python
| 0.000001
|
@@ -1226,16 +1226,25 @@
compress
+ed_length
2('ADVEN
@@ -1247,32 +1247,25 @@
DVENT')%0A
-'ADVENT'
+6
%0A %3E%3E%3E dec
@@ -1271,16 +1271,25 @@
compress
+ed_length
2('X(8x2
@@ -1309,30 +1309,10 @@
-'XABCABCABCABCABCABCY'
+20
%0A
|
6b0c9c9a06884535450c81b7da8f22d82e5ffbb0
|
Fix folders initialization in the Refree
|
onitu/referee/referee.py
|
onitu/referee/referee.py
|
import os
import functools
import zmq
from logbook import Logger
from onitu.escalator.client import Escalator, EscalatorClosed
from onitu.utils import get_events_uri
from .cmd import UP, DEL, MOV
class Referee(object):
"""Referee class, receive all events and deal with them.
The events are represented as Redis List 'events' that should be
appended with RPUSH. Each item is the file id (fid) of the file
which triggered the event.
The Referee give orders to the entries via his PUB ZMQ socket,
whose port is stored in the Redis 'referee:publisher' key.
The Plug of each service should subscribe to this port with a PULL
socket and subscribe to all the events starting by their name.
The notifications are sent to the publishers as multipart
messages with three parts :
- The name of the addressee (the channel)
- The name of the service from which the file should be transferred
- The id of the file
"""
def __init__(self, session):
super(Referee, self).__init__()
self.logger = Logger("Referee")
self.context = zmq.Context.instance()
self.escalator = Escalator(session)
self.get_events_uri = functools.partial(get_events_uri, session)
self.services = self.escalator.get('services', default=[])
self.folders = {
key.split(':')[-1]: options
for key, options in self.escalator.range('folders')
}
self.handlers = {
UP: self._handle_update,
DEL: self._handle_deletion,
MOV: self._handle_move,
}
def listen(self):
"""Listen to all the events, and handle them
"""
self.publisher = self.context.socket(zmq.PUB)
self.publisher.bind(self.get_events_uri('referee', 'publisher'))
self.logger.info("Started")
try:
listener = self.context.socket(zmq.PULL)
listener.bind(self.get_events_uri('referee'))
while True:
events = self.escalator.range(prefix='referee:event:')
for key, args in events:
cmd = args[0]
if cmd in self.handlers:
fid = key.split(':')[-1]
self.handlers[cmd](fid, *args[1:])
self.escalator.delete(key)
listener.recv()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
pass
else:
raise
except EscalatorClosed:
pass
finally:
if listener:
listener.close()
def close(self):
self.escalator.close()
self.publisher.close()
self.context.term()
def _handle_deletion(self, fid, driver):
"""
Notify the owners when a file is deleted
"""
metadata = self.escalator.get('file:{}'.format(fid), default=None)
if not metadata:
return
owners = set(metadata['owners'])
filename = metadata['filename']
self.logger.info("Deletion of '{}' from {}", filename, driver)
if driver in owners:
owners.remove(driver)
self.escalator.delete(u'file:{}:service:{}'.format(fid, driver))
metadata['owners'] = tuple(owners)
self.escalator.put('file:{}'.format(fid), metadata)
if not owners:
self.escalator.delete('file:{}'.format(fid))
return
self.notify(owners, DEL, fid)
def _handle_move(self, old_fid, driver, new_fid):
"""
Notify the owners when a file is moved
"""
metadata = self.escalator.get('file:{}'.format(old_fid), default=None)
if not metadata:
return
owners = set(metadata['owners'])
filename = metadata['filename']
new_metadata = self.escalator.get('file:{}'.format(new_fid))
new_filename = new_metadata['filename']
self.logger.info(
"Moving of '{}' to '{}' from {}", filename, new_filename, driver
)
if driver in owners:
owners.remove(driver)
self.escalator.delete(u'file:{}:service:{}'.format(old_fid, driver))
metadata['owners'] = tuple(owners)
self.escalator.put('file:{}'.format(old_fid), metadata)
if not owners:
self.escalator.delete('file:{}'.format(old_fid))
return
self.notify(owners, MOV, old_fid, new_fid)
def _handle_update(self, fid, driver):
"""Choose who are the entries that are concerned by the event
and send a notification to them.
For the moment all the entries are notified for each event, but
this should change when the rules will be introduced.
"""
metadata = self.escalator.get('file:{}'.format(fid))
owners = set(metadata['owners'])
uptodate = set(metadata['uptodate'])
filename = os.path.join("/", metadata['filename'])
self.logger.info("Update for '{}' from {}", filename, driver)
if driver not in owners:
self.logger.debug("The file '{}' was not suposed to be on {}, "
"but syncing anyway.", filename, driver)
# We sync each file with every services as we don't handle
# folders yet
should_own = set(self.services)
if should_own != owners:
metadata['owners'] = list(should_own)
self.escalator.put('file:{}'.format(fid), metadata)
assert uptodate
source = next(iter(uptodate))
self.notify(should_own - uptodate, UP, fid, source)
for name in owners.difference(should_own):
self.logger.debug("The file '{}' on {} is no longer under onitu "
"control. should be deleted.", filename, name)
def notify(self, drivers, cmd, fid, *args):
if not drivers:
return
for name in drivers:
self.escalator.put(
u'service:{}:event:{}'.format(name, fid), (cmd, args)
)
self.publisher.send(name.encode('utf-8'))
|
Python
| 0.000001
|
@@ -1367,23 +1367,55 @@
')%5B-1%5D:
+%7B'
options
+': options, 'services': set()%7D
%0A
@@ -1467,17 +1467,17 @@
('folder
-s
+:
')%0A
@@ -1478,24 +1478,278 @@
%0A %7D%0A%0A
+ for service in self.services:%0A folders = self.escalator.get(%0A 'service:%7B%7D:folders'.format(service), default=%5B%5D%0A )%0A%0A for name in folders:%0A self.folders%5Bname%5D%5B'services'%5D.add(service)%0A%0A
self
|
cef46656955cca0a5b0a83487418cc733a79e52b
|
fix profile url (#849)
|
open_discussions/urls.py
|
open_discussions/urls.py
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework_jwt.views import refresh_jwt_token
from open_discussions.views import index, saml_metadata
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^status/', include('server_status.urls')),
url(r'', include('authentication.urls')),
url(r'', include('social_django.urls', namespace='social')),
url(r'', include('channels.urls')),
url(r'', include('profiles.urls')),
url(r'', include('notifications.urls')),
url(r'', include('embedly.urls')),
url(r'^api/token/refresh/', refresh_jwt_token),
# React App
url(r'^$', index, name='open_discussions-index'),
url(r'^auth_required/$', index),
url(r'^content_policy/$', index),
url( # so that we can use reverse() to link to this
r'^channel/(?P<channel_name>[A-Za-z0-9_]+)/(?P<post_id>[A-Za-z0-9_]+)/comment/(?P<comment_id>[A-Za-z0-9_]+)/$',
index,
name='channel-post-comment',
),
url( # so that we can use reverse() to link to this
r'^channel/(?P<channel_name>[A-Za-z0-9_]+)/(?P<post_id>[A-Za-z0-9_]+)/$',
index,
name='channel-post',
),
url( # so that we can use reverse() to link to this
r'^channel/(?P<channel_name>[A-Za-z0-9_]+)/$',
index,
name='channel',
),
url(r'^settings/(?P<token>[^/]+)/$', index, name='settings-anon'),
url(r'^channel/', index),
url(r'^manage/', index),
url(r'^create_post/', index),
url(r'^moderation/', index),
url(r'^settings/', index),
url(r'^saml/metadata/', saml_metadata, name='saml-metadata'),
url(r'^profile/(?P<channel_name>[A-Za-z0-9_]+)/', index, name='profile'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar # pylint: disable=wrong-import-position, wrong-import-order
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
Python
| 0
|
@@ -2350,33 +2350,16 @@
profile/
-(?P%3Cchannel_name%3E
%5BA-Za-z0
@@ -2363,17 +2363,16 @@
-z0-9_%5D+
-)
/', inde
|
6b8de33dbd50243d566e095005699f0611a38d8b
|
add new fail message during commit
|
netmiko/vyos/vyos_ssh.py
|
netmiko/vyos/vyos_ssh.py
|
from __future__ import print_function
from __future__ import unicode_literals
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class VyOSSSH(CiscoSSHConnection):
"""Implement methods for interacting with VyOS network devices."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="set terminal length 0")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def check_enable_mode(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def enable(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def check_config_mode(self, check_string='#'):
"""Checks if the device is in configuration mode"""
return super(VyOSSSH, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='configure', pattern=r'[edit]'):
"""Enter configuration mode."""
return super(VyOSSSH, self).config_mode(config_command=config_command, pattern=pattern)
def exit_config_mode(self, exit_config='exit', pattern=r'exit'):
"""Exit configuration mode"""
output = ""
if self.check_config_mode():
output = self.send_command_timing(exit_config, strip_prompt=False, strip_command=False)
if 'Cannot exit: configuration modified' in output:
output += self.send_command_timing('exit discard', strip_prompt=False,
strip_command=False)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(self, comment='', delay_factor=.1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
default:
command_string = commit
comment:
command_string = commit comment <comment>
"""
delay_factor = self.select_delay_factor(delay_factor)
error_marker = 'Failed to generate committed config'
command_string = 'commit'
if comment:
command_string += ' comment "{}"'.format(comment)
output = self.config_mode()
output += self.send_command_expect(command_string, strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
if error_marker in output:
raise ValueError('Commit failed with following errors:\n\n{}'.format(output))
return output
def set_base_prompt(self, pri_prompt_terminator='$', alt_prompt_terminator='#',
delay_factor=1):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
prompt = super(VyOSSSH, self).set_base_prompt(pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor)
# Set prompt to user@hostname (remove two additional characters)
self.base_prompt = prompt[:-2].strip()
return self.base_prompt
def send_config_set(self, config_commands=None, exit_config_mode=False, delay_factor=1,
max_loops=150, strip_prompt=False, strip_command=False,
config_mode_command=None):
"""Remain in configuration mode."""
return super(VyOSSSH, self).send_config_set(config_commands=config_commands,
exit_config_mode=exit_config_mode,
delay_factor=delay_factor, max_loops=max_loops,
strip_prompt=strip_prompt,
strip_command=strip_command,
config_mode_command=config_mode_command)
def save_config(self, cmd='', confirm=True, confirm_response=''):
"""Not Implemented"""
raise NotImplementedError
|
Python
| 0
|
@@ -2359,16 +2359,17 @@
arker =
+%5B
'Failed
@@ -2397,16 +2397,34 @@
config'
+, 'Commit failed'%5D
%0A
@@ -2752,16 +2752,41 @@
if
+any(x in output for x in
error_ma
@@ -2789,26 +2789,17 @@
r_marker
- in output
+)
:%0A
|
069ef2ae7d43c8afaee66d35ff03b4673c92e42e
|
Use a set to get unique dirs, and sorted to make the output stable
|
mopidy/core/library.py
|
mopidy/core/library.py
|
from __future__ import unicode_literals
import collections
import urlparse
import pykka
class LibraryController(object):
pykka_traversable = True
def __init__(self, backends, core):
self.backends = backends
self.core = core
def _get_backend(self, uri):
uri_scheme = urlparse.urlparse(uri).scheme
return self.backends.with_library.get(uri_scheme, None)
def _get_backends_to_uris(self, uris):
if uris:
backends_to_uris = collections.defaultdict(list)
for uri in uris:
backend = self._get_backend(uri)
if backend is not None:
backends_to_uris[backend].append(uri)
else:
backends_to_uris = dict([
(b, None) for b in self.backends.with_library.values()])
return backends_to_uris
def browse(self, uri):
"""
Browse directories and tracks at the given ``uri``.
``uri`` is a string which represents some directory belonging to a
backend. To get the intial root directories for backends pass None as
the URI.
Returns a list of :class:`mopidy.models.Ref` objects for the
directories and tracks at the given ``uri``.
The :class:`~mopidy.models.Ref` objects representing tracks keep the
track's original URI. A matching pair of objects can look like this::
Track(uri='dummy:/foo.mp3', name='foo', artists=..., album=...)
Ref.track(uri='dummy:/foo.mp3', name='foo')
The :class:`~mopidy.models.Ref` objects representing directories have
backend specific URIs. These are opaque values, so no one but the
backend that created them should try and derive any meaning from them.
The only valid exception to this is checking the scheme, as it is used
to route browse requests to the correct backend.
For example, the dummy library's ``/bar`` directory could be returned
like this::
Ref.directory(uri='dummy:directory:/bar', name='bar')
:param string uri: URI to browse
:rtype: list of :class:`mopidy.models.Ref`
"""
if uri is None:
backends = self.backends.with_library_browse.values()
root = [b.library.root_directory.get() for b in backends]
return list(collections.OrderedDict.fromkeys(root))
scheme = urlparse.urlparse(uri).scheme
backend = self.backends.with_library_browse.get(scheme)
if not backend:
return []
return backend.library.browse(uri).get()
def find_exact(self, query=None, uris=None, **kwargs):
"""
Search the library for tracks where ``field`` is ``values``.
If the query is empty, and the backend can support it, all available
tracks are returned.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' from any backend
find_exact({'any': ['a']})
find_exact(any=['a'])
# Returns results matching artist 'xyz' from any backend
find_exact({'artist': ['xyz']})
find_exact(artist=['xyz'])
# Returns results matching 'a' and 'b' and artist 'xyz' from any
# backend
find_exact({'any': ['a', 'b'], 'artist': ['xyz']})
find_exact(any=['a', 'b'], artist=['xyz'])
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
find_exact(
{'any': ['a']}, uris=['file:///media/music', 'spotify:'])
find_exact(any=['a'], uris=['file:///media/music', 'spotify:'])
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of strings or :class:`None`
:rtype: list of :class:`mopidy.models.SearchResult`
"""
query = query or kwargs
futures = [
backend.library.find_exact(query=query, uris=backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items()]
return [result for result in pykka.get_all(futures) if result]
def lookup(self, uri):
"""
Lookup the given URI.
If the URI expands to multiple tracks, the returned list will contain
them all.
:param uri: track URI
:type uri: string
:rtype: list of :class:`mopidy.models.Track`
"""
backend = self._get_backend(uri)
if backend:
return backend.library.lookup(uri).get()
else:
return []
def refresh(self, uri=None):
"""
Refresh library. Limit to URI and below if an URI is given.
:param uri: directory or track URI
:type uri: string
"""
if uri is not None:
backend = self._get_backend(uri)
if backend:
backend.library.refresh(uri).get()
else:
futures = [b.library.refresh(uri)
for b in self.backends.with_library.values()]
pykka.get_all(futures)
def search(self, query=None, uris=None, **kwargs):
"""
Search the library for tracks where ``field`` contains ``values``.
If the query is empty, and the backend can support it, all available
tracks are returned.
If ``uris`` is given, the search is limited to results from within the
URI roots. For example passing ``uris=['file:']`` will limit the search
to the local backend.
Examples::
# Returns results matching 'a' in any backend
search({'any': ['a']})
search(any=['a'])
# Returns results matching artist 'xyz' in any backend
search({'artist': ['xyz']})
search(artist=['xyz'])
# Returns results matching 'a' and 'b' and artist 'xyz' in any
# backend
search({'any': ['a', 'b'], 'artist': ['xyz']})
search(any=['a', 'b'], artist=['xyz'])
# Returns results matching 'a' if within the given URI roots
# "file:///media/music" and "spotify:"
search({'any': ['a']}, uris=['file:///media/music', 'spotify:'])
search(any=['a'], uris=['file:///media/music', 'spotify:'])
:param query: one or more queries to search for
:type query: dict
:param uris: zero or more URI roots to limit the search to
:type uris: list of strings or :class:`None`
:rtype: list of :class:`mopidy.models.SearchResult`
"""
query = query or kwargs
futures = [
backend.library.search(query=query, uris=backend_uris)
for (backend, backend_uris)
in self._get_backends_to_uris(uris).items()]
return [result for result in pykka.get_all(futures) if result]
|
Python
| 0
|
@@ -53,16 +53,32 @@
ections%0A
+import operator%0A
import u
@@ -2294,16 +2294,23 @@
-root = %5B
+unique_dirs = %7B
b.li
@@ -2353,17 +2353,17 @@
backends
-%5D
+%7D
%0A
@@ -2378,50 +2378,58 @@
urn
-list(collections.OrderedDict.fromkeys(root
+sorted(unique_dirs, key=operator.attrgetter('name'
))%0A%0A
|
eaae2a1e88572e224621e242be1d15e92065f15e
|
Use new extension setup() API
|
mopidy_nad/__init__.py
|
mopidy_nad/__init__.py
|
from __future__ import unicode_literals
import os
import pygst
pygst.require('0.10')
import gst
import gobject
from mopidy import config, ext
__version__ = '1.0.0'
class Extension(ext.Extension):
dist_name = 'Mopidy-NAD'
ext_name = 'nad'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def register_gstreamer_elements(self):
from .mixer import NadMixer
gobject.type_register(NadMixer)
gst.element_register(NadMixer, 'nadmixer', gst.RANK_MARGINAL)
|
Python
| 0
|
@@ -430,40 +430,28 @@
def
-register_gstreamer_elements(self
+setup(self, registry
):%0A
|
daf577f1e4bab13f9d5f2e3fdad8765dbab70dfe
|
refactor settings
|
openstax/settings/dev.py
|
openstax/settings/dev.py
|
from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# BASE_URL required for notification emails
BASE_URL = 'http://localhost:8000'
try:
from .local import *
except ImportError:
pass
##################################
# ACCOUNTS SETTINGS #
##################################
# Use default login, logout and profile urls
ACC_APP_LOGIN_URL = None
ACC_APP_LOGOUT_URL = None
ACC_APP_PROFILE_URL = None
ACCOUNTS_LOGIN_URL = 'https://accounts-qa.openstax.org/login?'
AUTHORIZATION_URL = 'https://accounts-qa.openstax.org/oauth/authorize'
ACCESS_TOKEN_URL = 'https://accounts-qa.openstax.org/oauth/token'
USER_QUERY = 'https://accounts-qa.openstax.org/api/user?'
SOCIAL_AUTH_OPENSTAX_KEY = '0a3c6b8c21091873805181b4b2a42cdbabeec6f6871332b817f59fac37033537'
SOCIAL_AUTH_OPENSTAX_SECRET = '40035a7f2a7948b33ffce370af3918d692b958a6cc195e8b57b1fbe621a88157'
|
Python
| 0.000002
|
@@ -234,17 +234,16 @@
pass%0A%0A
-%0A
########
@@ -273,20 +273,24 @@
###%0A#
-
+OVERRIDE
ACCOUNT
@@ -302,20 +302,16 @@
TINGS
-
#%0A######
@@ -342,17 +342,17 @@
####%0A%0A#
-U
+u
se defau
@@ -363,17 +363,21 @@
ogin
-,
+g and
logout
and
@@ -376,24 +376,41 @@
out
-and profile urls
+urls,%0A# Needed for selenium test.
%0AACC
@@ -488,456 +488,4 @@
one%0A
-%0AACCOUNTS_LOGIN_URL = 'https://accounts-qa.openstax.org/login?'%0AAUTHORIZATION_URL = 'https://accounts-qa.openstax.org/oauth/authorize'%0AACCESS_TOKEN_URL = 'https://accounts-qa.openstax.org/oauth/token'%0AUSER_QUERY = 'https://accounts-qa.openstax.org/api/user?'%0A%0ASOCIAL_AUTH_OPENSTAX_KEY = '0a3c6b8c21091873805181b4b2a42cdbabeec6f6871332b817f59fac37033537'%0ASOCIAL_AUTH_OPENSTAX_SECRET = '40035a7f2a7948b33ffce370af3918d692b958a6cc195e8b57b1fbe621a88157'%0A%0A
|
9a24356641d5729aa0df347e19cf1cd1f8c979a5
|
Fix SVN updating
|
observatory/dashboard/models/Repository.py
|
observatory/dashboard/models/Repository.py
|
# Copyright (c) 2010, Nate Stedman <natesm@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import settings
import stat
import subprocess
from dashboard.util import format_diff
from django.db import models
from exceptions import Exception
from lib import feedparser, dateutil, pyvcs
from lib.pyvcs.backends import get_backend
from EventSet import EventSet
# a version control repository
class Repository(EventSet):
class Meta:
app_label = 'dashboard'
# web access to the repository
web_url = models.URLField("Repository Web Address", max_length = 128)
# cloned repository fields
clone_url = models.CharField("Repository Clone Address", max_length = 128)
vcs = models.CharField("Version Control System", max_length = 3,
default = 'git',
choices = (('git', 'git'),
('svn', 'Subversion'),
('hg', 'Mercurial'),
('bzr', 'Bazaar')))
# non-cloned repository fields
repo_rss = models.URLField("Repository RSS Feed", max_length = 128)
cmd = models.CharField("Clone Command", max_length = 128)
def fetch(self):
import Commit
events = []
if not self.from_feed:
# this is a cloned repository
fresh_clone = True
# ensure that REPO_ROOT already exists
try:
os.makedirs(settings.REPO_ROOT, 0770)
except OSError as e:
pass
# construct the name of the directory into which to clone the repository
dest_dir = os.path.join(settings.REPO_ROOT, self.project.url_path)
# check if we've already cloned this project
if os.path.isdir(dest_dir):
fresh_clone = False
# clone the repository, or update our copy
clone_repo_function(self.vcs)(self.clone_url, dest_dir, fresh_clone)
# add the commits
backend = get_backend(self.vcs if self.vcs != 'svn' else 'git')
repository = backend.Repository(dest_dir)
# inspect the last five days of commits
for commit in repository.get_recent_commits():
date = commit.time
try:
date = (date - date.utcoffset()).replace(tzinfo=None)
except:
pass
# extract the first line for the title of the commit
try:
commit_title = commit.message.split("\n")[0]
except:
commit_title = commit.message
events.append(self.add_event(Commit.Commit,
title = commit_title,
description = commit.message,
date = date,
author_name = commit.author,
from_feed = False,
extra_args = {
"diff": format_diff(commit.diff),
"repository_id": self.id,
}))
else:
# this is a feed-driven repository
for commit in feedparser.parse(self.repo_rss).entries:
date = dateutil.parser.parse(commit.date)
try:
date = (date - date.utcoffset()).replace(tzinfo=None)
except:
pass
events.append(self.add_event(Commit.Commit,
title = commit.title,
description = commit.description,
date = date,
author_name = commit.author_detail['name'],
from_feed = True,
extra_args = { "repository_id": self.id }
))
# find the new most recent date
dates = [event.date for event in events if event is not None]
dates.append(self.most_recent_date)
self.most_recent_date = max(dates)
self.save()
def clone_cmd(self):
if not self.from_feed:
cmds = { 'git': 'clone', 'svn': 'co', 'hg': 'clone', 'bzr': 'branch' }
return '{0} {1} {2}'.format(self.vcs, cmds[self.vcs], self.clone_url)
else:
return self.cmd
def __unicode__(self):
return self.web_url
def clone_git_repo(clone_url, destination_dir, fresh_clone = False):
if fresh_clone:
clone_cmdline = ["git", "clone", "--mirror", "--bare",
clone_url, destination_dir]
else:
clone_cmdline = ["git", "--git-dir", destination_dir, "fetch"]
clone_subprocess = subprocess.Popen(clone_cmdline)
if clone_subprocess.wait() != 0:
# TODO: handle this better
print "failed to clone from {0}".format(clone_url)
# do something with the repos
def clone_svn_repo(clone_url, destination_dir, fresh_clone = False):
if fresh_clone:
clone_cmdline = ["git", "svn", "clone", clone_url, destination_dir]
else:
clone_cmdline = ["git", "svn", "--git-dir", destination_dir, "fetch"]
if subprocess.call(clone_cmdline) != 0:
print "failed to clone from {0}".format(clone_url)
def clone_repo_function(vcs):
clone_repo_functions = {
'git': clone_git_repo,
'svn': clone_svn_repo
}
if not vcs in clone_repo_functions:
print "don't know how to clone {0}".format(vcs)
return None
return clone_repo_functions[vcs]
|
Python
| 0
|
@@ -5098,32 +5098,155 @@
if fresh_clone:%0A
+ # make the repo's directory%0A try:%0A os.makedirs(destination_dir, 0770)%0A except OSError as e:%0A pass%0A %0A
clone_cmdlin
@@ -5347,38 +5347,8 @@
vn%22,
- %22--git-dir%22, destination_dir,
%22fe
@@ -5390,16 +5390,39 @@
_cmdline
+, cwd = destination_dir
) != 0:%0A
|
3a26ad7b5c24e025a92e5ebaa88c19e4b63a33a3
|
Correct trainer_lib_test setup and lower its BUILD deps.
|
tensor2tensor/utils/trainer_lib_test.py
|
tensor2tensor/utils/trainer_lib_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trainer_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor import models # pylint: disable=unused-import
from tensor2tensor.data_generators import algorithmic
from tensor2tensor.data_generators import problem as problem_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
class TrainerLibTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
algorithmic.TinyAlgo.setup_for_test()
def testExperiment(self):
exp_fn = trainer_lib.create_experiment_fn(
"transformer",
"tiny_algo",
algorithmic.TinyAlgo.data_dir,
train_steps=1,
eval_steps=1,
min_eval_frequency=1,
use_tpu=False)
run_config = trainer_lib.create_run_config(
model_dir=algorithmic.TinyAlgo.data_dir, num_gpus=0,
use_tpu=False)
hparams = registry.hparams("transformer_tiny_tpu")
exp = exp_fn(run_config, hparams)
exp.test()
def testModel(self):
# HParams
hparams = trainer_lib.create_hparams(
"transformer_tiny", data_dir=algorithmic.TinyAlgo.data_dir,
problem_name="tiny_algo")
# Dataset
problem = hparams.problem
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN,
algorithmic.TinyAlgo.data_dir)
dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes)
features = dataset.make_one_shot_iterator().get_next()
features = problem_lib.standardize_shapes(features)
# Model
model = registry.model("transformer")(hparams, tf.estimator.ModeKeys.TRAIN)
logits, losses = model(features)
self.assertTrue("training" in losses)
loss = losses["training"]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_val, loss_val = sess.run([logits, loss])
logits_shape = list(logits_val.shape)
logits_shape[1] = None
self.assertAllEqual(logits_shape, [10, None, 1, 1, 4])
self.assertEqual(loss_val.shape, tuple())
def testMultipleTargetModalities(self):
# HParams
hparams = trainer_lib.create_hparams(
"transformer_tiny", data_dir=algorithmic.TinyAlgo.data_dir,
problem_name="tiny_algo")
tm = hparams.problem.get_hparams().target_modality
hparams.problem.get_hparams().target_modality = {
"targets": tm,
"A": tm,
"B": tm
}
# Dataset
problem = hparams.problem
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN,
algorithmic.TinyAlgo.data_dir)
dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes)
features = dataset.make_one_shot_iterator().get_next()
features = problem_lib.standardize_shapes(features)
features["A"] = features["B"] = features["targets"]
# Model
model = registry.model("transformer")(hparams, tf.estimator.ModeKeys.TRAIN)
def body(args, mb=model.body):
out = mb(args)
return {"targets": out, "A": out, "B": out}
model.body = body
logits, losses = model(features)
self.assertTrue("training" in losses)
loss = losses["training"]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run([logits, loss])
if __name__ == "__main__":
tf.test.main()
|
Python
| 0
|
@@ -742,74 +742,8 @@
on%0A%0A
-from tensor2tensor import models # pylint: disable=unused-import%0A
from
@@ -857,16 +857,94 @@
lem_lib%0A
+from tensor2tensor.models import transformer # pylint: disable=unused-import%0A
from ten
@@ -2770,39 +2770,82 @@
es(self):%0A #
-HParams
+Use existing hparams and override target modality.
%0A hparams = t
@@ -2980,13 +2980,8 @@
%0A
- tm =
hpa
@@ -2988,36 +2988,32 @@
rams.problem
-.get
_hparams
().target_mo
@@ -2992,34 +2992,32 @@
.problem_hparams
-()
.target_modality
@@ -3016,20 +3016,39 @@
modality
+ = %7B
%0A
+ %22targets%22:
hparams
@@ -3059,20 +3059,16 @@
blem
-.get
_hparams
().t
@@ -3063,18 +3063,16 @@
_hparams
-()
.target_
@@ -3071,36 +3071,33 @@
.target_modality
- = %7B
+,
%0A %22target
@@ -3094,20 +3094,51 @@
%22
-targets%22: tm
+A%22: hparams.problem_hparams.target_modality
,%0A
@@ -3148,37 +3148,112 @@
%22
-A%22: tm,
+B%22: hparams.problem_hparams.target_modality
%0A
+%7D%0A
-%22B%22: tm%0A %7D
+hparams.problem._hparams = hparams.problem_hparams
%0A%0A
|
945c93fa91cb7b3b14f002e37e2a8bd2ee915fdd
|
Clean the mako cache between runs, because it breaks theme switching
|
nikola/mako_templates.py
|
nikola/mako_templates.py
|
########################################
# Mako template handlers
########################################
import os
import shutil
from mako import util, lexer
from mako.lookup import TemplateLookup
lookup = None
cache = {}
def get_deps(filename):
text = util.read_file(filename)
lex = lexer.Lexer(text=text, filename=filename)
lex.parse()
deps = []
for n in lex.template.nodes:
if getattr(n, 'keyword', None) == "inherit":
deps.append(n.attributes['file'])
# TODO: include tags are not handled
return deps
def get_template_lookup(directories):
print "Directories:", directories
cache_dir = os.path.join('cache', '.mako.tmp')
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
return TemplateLookup(
directories=directories,
module_directory=cache_dir,
output_encoding='utf-8',
)
def render_template(template_name, output_name, context, global_context):
template = lookup.get_template(template_name)
print template.filename
context.update(global_context)
data = template.render_unicode(**context)
if output_name is not None:
try:
os.makedirs(os.path.dirname(output_name))
except:
pass
with open(output_name, 'w+') as output:
output.write(data)
return data
def template_deps(template_name):
# We can cache here because depedencies should
# not change between runs
if cache.get(template_name, None) is None:
template = lookup.get_template(template_name)
dep_filenames = get_deps(template.filename)
deps = [template.filename]
for fname in dep_filenames:
deps += template_deps(fname)
cache[template_name] = tuple(deps)
return list(cache[template_name])
|
Python
| 0
|
@@ -600,46 +600,8 @@
s):%0A
- print %22Directories:%22, directories%0A
@@ -983,36 +983,8 @@
me)%0A
- print template.filename%0A
|
c21d7bee740fe27012d9affed27b6c489e5f6cac
|
add logging types
|
avalanche/evaluation/metric_results.py
|
avalanche/evaluation/metric_results.py
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 30-12-2020 #
# Author(s): Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
from dataclasses import dataclass
from typing import List, Optional, TYPE_CHECKING, Tuple, Union
from PIL.Image import Image
from matplotlib.figure import Figure
from torch import Tensor
if TYPE_CHECKING:
from .metric_definitions import Metric
MetricResult = Optional[List["MetricValue"]]
@dataclass
class TensorImage:
image: Tensor
def __array__(self):
return self.image.numpy()
MetricType = Union[float, int, Tensor, Image, TensorImage, Figure]
class AlternativeValues:
"""
A container for alternative representations of the same metric value.
"""
def __init__(self, *alternatives: MetricType):
self.alternatives: Tuple[MetricType] = alternatives
def best_supported_value(
self, *supported_types: type
) -> Optional[MetricType]:
"""
Retrieves a supported representation for this metric value.
:param supported_types: A list of supported value types.
:return: The best supported representation. Returns None if no supported
representation is found.
"""
for alternative in self.alternatives:
if isinstance(alternative, supported_types):
return alternative
return None
class MetricValue(object):
"""
The result of a Metric.
A result has a name, a value and a "x" position in which the metric value
should be plotted.
The "value" field can also be an instance of "AlternativeValues", in which
case it means that alternative representations exist for this value. For
instance, the Confusion Matrix can be represented both as a Tensor and as
an Image. It's up to the Logger, according to its capabilities, decide which
representation to use.
"""
def __init__(
self,
origin: "Metric",
name: str,
value: Union[MetricType, AlternativeValues],
x_plot: int,
):
"""
Creates an instance of MetricValue.
:param origin: The originating Metric instance.
:param name: The display name of this value. This value roughly
corresponds to the name of the plot in which the value should
be logged.
:param value: The value of the metric. Can be a scalar value,
a PIL Image, or a Tensor. If more than a possible representation
of the same value exist, an instance of :class:`AlternativeValues`
can be passed. For instance, the Confusion Matrix can be represented
both as an Image and a Tensor, in which case an instance of
:class:`AlternativeValues` carrying both the Tensor and the Image
is more appropriate. The Logger instance will then select the most
appropriate way to log the metric according to its capabilities.
:param x_plot: The position of the value. This value roughly corresponds
to the x-axis position of the value in a plot. When logging a
singleton value, pass 0 as a value for this parameter.
"""
self.origin: "Metric" = origin
self.name: str = name
self.value: Union[MetricType, AlternativeValues] = value
self.x_plot: int = x_plot
__all__ = [
"MetricType",
"MetricResult",
"AlternativeValues",
"MetricValue",
"TensorImage",
]
|
Python
| 0.000001
|
@@ -990,16 +990,38 @@
t Tensor
+%0Afrom enum import Enum
%0A%0Aif TYP
@@ -1123,16 +1123,743 @@
ue%22%5D%5D%0A%0A%0A
+class LoggingType(Enum):%0A %22%22%22A type for MetricValues.%0A%0A It can be used by MetricValues to choose how they want to be visualize.%0A For example, a 2D tensor could be a line plot or be used to create a%0A histogram.%0A %22%22%22%0A ANY = 1 # generic type. The logger will use the value type to decide how%0A # to serialize it.%0A IMAGE = 2%0A FIGURE = 3 # Matplotlib figure.%0A HISTOGRAM = 4%0A # you can add others here. All Tensorboard metrics are good candidates:%0A # https://pytorch.org/docs/stable/tensorboard.html%0A # just remember to add explicit support to the loggers once you add them.%0A # If a metric is already printed correctly by the loggers (e.g. scalars)%0A # there is no need to add it here.%0A%0A%0A
@datacla
@@ -3459,16 +3459,69 @@
t: int,%0A
+ logging_type: LoggingType = LoggingType.ANY,%0A
):%0A
@@ -4636,16 +4636,89 @@
ameter.%0A
+ :param logging_type: determines how the metric should be logged.%0A
@@ -4851,24 +4851,24 @@
es%5D = value%0A
-
self
@@ -4889,16 +4889,57 @@
x_plot%0A
+ self.logging_type = logging_type%0A
%0A%0A__all_
|
4912027d6cb0f27c736e46498231595f50a36cd3
|
add cv element
|
mriqc/classifier/cv.py
|
mriqc/classifier/cv.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-05-12 17:46:31
"""
MRIQC Cross-validation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import pandas as pd
from sklearn import svm
def main():
"""Entry point"""
parser = ArgumentParser(description='MRI Quality Control',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-X', '--in-training', action='store',
required=True)
g_input.add_argument('-y', '--in-training-labels', action='store',
required=True)
# g_outputs = parser.add_argument_group('Outputs')
opts = parser.parse_args()
with open(opts.in_training, 'r') as fileX:
X_df = pd.read_csv(fileX).sort_values(by=['subject_id'])
with open(opts.in_training_labels, 'r') as fileY:
y_df = pd.read_csv(fileY).sort_values(by=['subject_id'])
# Remove columns that are not IQMs
columns = X_df.columns.ravel().to_list()
columns.remove('subject_id')
columns.remove('session_id')
columns.remove('run_id')
# Remove failed cases from Y, append new columns to X
y_df = y_df[y_df['subject_id'].isin(X_df.subject_id)]
X_df['site'] = y_df.site.values
X_df['rate'] = y_df.rate.values
# Convert all samples to tuples
X = [tuple(x) for x in X_df[columns].values]
clf = svm.SVC()
clf.fit(X, list(y_df.rate.values))
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -501,16 +501,70 @@
port svm
+%0Afrom sklearn.cross_validation import LeaveOneLabelOut
%0A%0Adef ma
@@ -1602,23 +1602,21 @@
)%5D%0A%09
-X_df%5B'
site
-'%5D
+s
=
+list(
y_df
@@ -1627,16 +1627,17 @@
e.values
+)
%0A%09X_df%5B'
@@ -1746,16 +1746,52 @@
alues%5D%0A%0A
+ lolo = LeaveOneLabelOut(labels)%0A
clf
|
2aeda5c12710e197282f015f7e4b8519f1d8bcc5
|
Update tests.py
|
verification/tests.py
|
verification/tests.py
|
"""
TESTS is a dict with all you tests.
Keys for this will be categories' names.
Each test is dict with
"input" -- input data for user function
"answer" -- your right answer
"explanation" -- not necessary key, it's using for additional info in animation.
"""
TESTS = {
"Basics": [
{
"input": "$5.34",
"two": "$5.34"
}
]
}
|
Python
| 0.000001
|
@@ -348,21 +348,25 @@
%22
-two%22: %22$5.34%22
+answer%22: lambda:0
%0A
|
1d8909fab3db0b7b1ac0b40fea93e35480581bac
|
test transparent access for nix.S
|
nix/test/test_section.py
|
nix/test/test_section.py
|
# Copyright (c) 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in section and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
import unittest
from nix import *
import nix
class TestSection(unittest.TestCase):
def setUp(self):
self.file = File.open("unittest.h5", FileMode.Overwrite)
self.section = self.file.create_section("test section", "recordingsession")
self.other = self.file.create_section("other section", "recordingsession")
def tearDown(self):
del self.file.sections[self.section.id]
del self.file.sections[self.other.id]
self.file.close()
def test_section_eq(self):
assert(self.section == self.section)
assert(not self.section == self.other)
assert(not self.section == None)
def test_section_id(self):
assert(self.section.id is not None)
def test_section_name(self):
assert(self.section.name is not None)
def test_section_type(self):
def set_none():
self.section.type = None
assert(self.section.type is not None)
self.assertRaises(Exception, set_none)
self.section.type = "foo type"
assert(self.section.type == "foo type")
def test_section_definition(self):
assert(self.section.definition is None)
self.section.definition = "definition"
assert(self.section.definition == "definition")
self.section.definition = None
assert(self.section.definition is None)
def test_section_mapping(self):
assert(self.section.mapping is None)
self.section.mapping = "mapping"
assert(self.section.mapping == "mapping")
self.section.mapping = None
assert(self.section.mapping is None)
def test_section_repository(self):
assert(self.section.repository is None)
self.section.repository = "repository"
assert(self.section.repository == "repository")
self.section.repository = None
assert(self.section.repository is None)
def test_section_sections(self):
assert(len(self.section.sections) == 0)
child = self.section.create_section("test section", "electrode")
assert(child.parent == self.section)
assert(len(self.section.sections) == 1)
assert(child in self.section.sections)
assert(child.id in self.section.sections)
assert("notexist" not in self.section.sections)
assert(child.id == self.section.sections[0].id)
assert(child.id == self.section.sections[-1].id)
del self.section.sections[0]
assert(len(self.section.sections) == 0)
self.section['easy subsection'] = nix.S('electrode')
assert('easy subsection' in [v.name for k, v in self.section.sections.items()])
assert('easy subsection' in self.section.sections)
assert(self.section['easy subsection'].name == 'easy subsection')
#assert('easy subsection' in self.section)
def test_section_find_sections(self):
for i in range(2): self.section.create_section("level1-p0-s" + str(i), "dummy")
for i in range(2): self.section.sections[0].create_section("level2-p1-s" + str(i), "dummy")
for i in range(2): self.section.sections[1].create_section("level2-p2-s" + str(i), "dummy")
for i in range(2): self.section.sections[0].sections[0].create_section("level3-p1-s" + str(i), "dummy")
assert(len(self.section.find_sections()) == 9)
assert(len(self.section.find_sections(limit=1)) == 3)
assert(len(self.section.find_sections(filtr=lambda x : "level2-p1-s" in x.name)) == 2)
assert(len(self.section.find_sections(filtr=lambda x : "level2-p1-s" in x.name, limit=1)) == 0)
assert(len(self.section.find_related()) == 3)
assert(len(self.section.sections[0].find_related()) == 5)
def test_section_properties(self):
assert(len(self.section) == 0)
prop = self.section.create_property("test prop", DataType.String)
assert(len(self.section) == 1)
for p in self.section:
assert(p in self.section)
assert(self.section.has_property_by_name("test prop"))
assert(not self.section.has_property_by_name("notexist"))
assert(self.section.get_property_by_name("test prop") is not None)
assert(self.section.get_property_by_name("notexist") is None)
assert(len(self.section.inherited_properties()) == 1)
assert(prop in self.section)
assert(prop.id in self.section)
assert(prop.name in self.section)
assert("notexist" not in self.section)
props = dict(self.section.items())
assert(props["test prop"] == prop)
assert(prop.id == self.section.props[0].id)
assert(prop.id == self.section.props[-1].id)
#easy prop creation
self.section['ep_str'] = 'str'
self.section['ep_int'] = 23
self.section['ep_float'] = 42.0
self.section['ep_list'] = [1, 2, 3]
self.section['ep_val'] = Value(1.0)
self.section['ep_val'] = 2.0
res = [x in self.section for x in ['ep_str', 'ep_int', 'ep_float']]
assert(all(res))
assert(self.section['ep_str'] == 'str')
assert(self.section['ep_int'] == 23)
assert(self.section['ep_float'] == 42.0)
assert(self.section['ep_list'] == [1, 2, 3])
def create_hetero_section():
self.section['ep_ex'] = [1, 1.0]
self.assertRaises(ValueError, create_hetero_section)
sections = [x.id for x in self.section]
for x in sections:
del self.section[x]
assert(len(self.section) == 0)
|
Python
| 0
|
@@ -2846,16 +2846,194 @@
trode')%0A
+ subject = self.section%5B'subject'%5D = nix.S('subject')%0A %0A assert(self.section%5B'subject'%5D == subject)%0A assert(self.section%5B'subject'%5D.id == subject.id)%0A
|
decab6827b5dacc21f0263af7e5d895e5a737726
|
Update tests.py
|
verification/tests.py
|
verification/tests.py
|
"""
TESTS is a dict with all you tests.
Keys for this will be categories' names.
Each test is dict with
"input" -- input data for user function
"answer" -- your right answer
"explanation" -- not necessary key, it's using for additional info in animation.
"""
TESTS = {
"Basics": [
{
"input": "$5.34",
"answer": "$5.34"
},
{
"input": "$5,34",
"answer": "$5.34"
},
{
"input": "$222,100,455.34",
"answer": "$222,100,455.34"
},
{
"input": "$222.100.455,34",
"answer": "$222,100,455.34"
},
{
"input": "$222,100,455",
"answer": "$222,100,455"
},
{
"input": "$222.100.455",
"answer": "$222,100,455"
}
],
"Extra": [
{
"input": "$4,13 + $5,24 = $9,37",
"answer": "$4.13 + $5.24 = $9.37"
},
{
"input": "$4,13 + $1.005,24 = $1.009,37",
"answer": "$4.13 + $1,005.24 = $1,009.37"
},
{
"input": "$8.000 - $8.000 = $0",
"answer": "$8,000 - $8,000 = $0"
},
{
"input": "$4.545,45 is less than $5,454.54.",
"answer": "$4,545.45 is less than $5,454.54."
},
{
"input": "$4,545.45 is less than $5.454,54.",
"answer": "$4,545.45 is less than $5,454.54."
},
{
"input": "Our movie tickets cost $12,20.",
"answer": "Our movie tickets cost $12.20."
},
{
"input": "127.255.255.255",
"answer": "127.255.255.255"
},
{
"input": ("Clayton Kershaw $31.000.000\n"
"Zack Greinker $27.000.000\n"
"Adrian Gonzalez $21.857.143\n"),
"answer": ("Clayton Kershaw $31,000,000\n"
"Zack Greinker $27,000,000\n"
"Adrian Gonzalez $21,857,143\n")
}
]
}
|
Python
| 0.000001
|
@@ -1835,25 +1835,24 @@
Zack Greinke
-r
$27.000.0
@@ -1999,17 +1999,16 @@
Greinke
-r
$27,0
|
0626c8db3f2287d78c467c194e01cf004f0c7e78
|
Convert simple-mapped results back to Series.
|
pandas/util/map.py
|
pandas/util/map.py
|
import numpy as np
from pandas import _tseries as lib
from pandas import notnull, Series
from functools import wraps
class repeat(object):
def __init__(self, obj):
self.obj = obj
def __getitem__(self, i):
return self.obj
class azip(object):
def __init__(self, *args):
self.cols = []
for a in args:
if np.isscalar(a):
self.cols.append(repeat(a))
else:
self.cols.append(a)
def __getitem__(self, i):
return [col[i] for col in self.cols]
def map_iter_args(arr, f, otherargs, n_otherargs, required, n_results):
'''
Substitute for np.vectorize with pandas-friendly dtype inference
Parameters
----------
arr : ndarray
f : function
Returns
-------
mapped : ndarray
'''
n = len(arr)
result = np.empty((n, n_results), dtype=object)
for i, val in enumerate(arr):
args = otherargs[i]
if notnull(val) and all(notnull(args[r]) for r in required):
result[i] = f(val, *args)
else:
result[i] = [np.nan] * n_results
return [lib.maybe_convert_objects(col, try_float=0) for col in result.T]
def auto_map(arr, f, otherargs, n_results=1, required='all'):
if all(np.isscalar(a) for a in otherargs):
return lib.map_infer(arr, lambda v: f(v, *otherargs))
n_otherargs = len(otherargs)
if required == 'all':
required = list(range(n_otherargs))
res = map_iter_args(arr, f, azip(*otherargs), n_otherargs, required, n_results)
res = [Series(col, index=arr.index, copy=False) for col in res]
if n_results == 1:
return res[0]
return res
def mapwrap(f, n_results_default=1, required='all'):
@wraps(f)
def wrapped(arr, otherargs=(), n_results=None):
n_results = n_results or n_results_default
return auto_map(arr, f, otherargs, n_results, required)
return wrapped
|
Python
| 0.000129
|
@@ -1320,20 +1320,19 @@
re
-turn
+s =
lib.map
@@ -1367,24 +1367,80 @@
otherargs))%0A
+ return Series(res, index=arr.index, copy=False)%0A
%0A n_o
@@ -1836,16 +1836,17 @@
ed(arr,
+*
otherarg
@@ -1850,11 +1850,8 @@
args
-=()
, n_
|
42f74f304d0ac404f17d6489033b6140816cb194
|
Implement Stonesplinter Trogg, Burly Rockjaw Trogg, Ship's Cannon
|
fireplace/cards/gvg/neutral_common.py
|
fireplace/cards/gvg/neutral_common.py
|
from ..utils import *
##
# Minions
# Explosive Sheep
class GVG_076:
def deathrattle(self):
for target in self.game.board:
self.hit(target, 2)
# Clockwork Gnome
class GVG_082:
deathrattle = giveSparePart
# Micro Machine
class GVG_103:
def TURN_BEGIN(self, player):
# That card ID is not a mistake
self.buff(self, "GVG_076a")
# Pistons
class GVG_076a:
Atk = 1
|
Python
| 0
|
@@ -31,16 +31,618 @@
inions%0A%0A
+# Stonesplinter Trogg%0Aclass GVG_067:%0A%09def CARD_PLAYED(self, player, card):%0A%09%09if player is not self.controller and card.type == CardType.SPELL:%0A%09%09%09self.buff(%22GVG_067a%22)%0A%0Aclass GVG_067a:%0A%09Atk = 1%0A%0A%0A# Burly Rockjaw Trogg%0Aclass GVG_068:%0A%09def CARD_PLAYED(self, player, card):%0A%09%09if player is not self.controller and card.type == CardType.SPELL:%0A%09%09%09self.buff(%22GVG_068a%22)%0A%0Aclass GVG_068a:%0A%09Atk = 2%0A%0A%0A# Ship's Cannon%0Aclass GVG_075:%0A%09def OWN_MINION_SUMMONED(self, minion):%0A%09%09if minion.race == Race.PIRATE:%0A%09%09%09targets = self.controller.getTargets(TARGET_ENEMY_CHARACTERS)%0A%09%09%09self.hit(random.choice(targets), 2)%0A%0A%0A
# Explos
|
0bd2fffcab47c79999e5bf20b881a69193855bd9
|
Fix install script
|
dstat_plugins/__init__.py
|
dstat_plugins/__init__.py
|
import shutil
import sys
import pkg_resources as pr
def install():
destdir = sys.argv[1]
datadir = pr.resource_filename(__name__, 'plugins/dstat_mysql5_innodb.py')
shutil.copytree(datadir, destdir)
|
Python
| 0.000001
|
@@ -1,16 +1,28 @@
+import glob%0A
import shutil%0Aim
@@ -29,16 +29,41 @@
port sys
+%0Aimport os%0Aimport os.path
%0A%0Aimport
@@ -164,52 +164,350 @@
ame(
-__name__, 'plugins/dstat_mysql5_innodb.py')%0A
+'dstat_plugins', 'plugins')%0A try:%0A os.makedirs(destdir)%0A except OSError:%0A if not os.path.isdir(destdir):%0A sys.stderr.write(%22%7B%7D could not be created and does not %22%0A %22exist.%5Cn%22.format(destdir))%0A sys.exit(1)%0A for plugin in glob.glob(os.path.join(datadir, 'dstat_*')):%0A
@@ -521,20 +521,15 @@
copy
-tree(datadir
+(plugin
, de
|
2cc505d3a3c54f3ce1e91941a905c6a298a46d05
|
Fix classifiers.
|
narcissus.hub/setup.py
|
narcissus.hub/setup.py
|
# This file is part of Narcissus
# Copyright (C) 2011-2013 Ralph Bean
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
import sys
f = open('README.rst')
long_description = f.read().strip()
f.close()
setup(
name='narcissus.hub',
version='0.9.0.1',
description='Hub components for Narcissus, realtime log visualization',
long_description=long_description,
license="AGPLv3+",
author='Ralph Bean',
author_email='rbean@redhat.com',
url='http://narcissus.ws/',
install_requires=[
"moksha.hub",
"pygeoip",
"geojson",
],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
namespace_packages=['narcissus'],
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Topic :: Scientific/Engineering :: Visualization"
"Topic :: System :: Logging"
"Topic :: System :: Monitoring",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
entry_points={
'moksha.stream' : (
## Enable this to *test* narcissus. It produces random ips.
#'random_lol = narcissus.hub.producers:RandomIPProducer',
# We used to keep these in an rrd database. That was too heavy.
#'series_pro = narcissus.hub.consumers:TimeSeriesProducer',
),
'moksha.consumer': (
'raw_ip = narcissus.hub.consumers:RawIPConsumer',
'httpdlight = narcissus.hub.consumers:HttpLightConsumer',
'latlon2geo = narcissus.hub.consumers:LatLon2GeoJsonConsumer',
# We used to keep these in an rrd database. That was too heavy.
#'series_con = narcissus.hub.consumers:TimeSeriesConsumer',
),
},
)
|
Python
| 0.000007
|
@@ -1515,16 +1515,17 @@
ization%22
+,
%0A
@@ -1553,16 +1553,17 @@
Logging%22
+,
%0A
|
624599bc0172e9166536abfc6be254b5117ac64c
|
Add error handling in plugin installation process
|
nailgun/nailgun/plugin/process.py
|
nailgun/nailgun/plugin/process.py
|
# -*- coding: utf-8 -*-
import traceback
import time
from multiprocessing import Queue, Process
from nailgun.task.helpers import TaskHelper
from nailgun.logger import logger
from nailgun.db import make_session
import nailgun.plugin.manager
PLUGIN_PROCESSING_QUEUE = None
def get_queue():
global PLUGIN_PROCESSING_QUEUE
if not PLUGIN_PROCESSING_QUEUE:
PLUGIN_PROCESSING_QUEUE = Queue()
return PLUGIN_PROCESSING_QUEUE
class PluginProcessor(Process):
def __init__(self):
Process.__init__(self)
self.db = make_session()
self.plugin_manager = nailgun.plugin.manager.PluginManager(self.db)
self.queue = get_queue()
def run(self):
while True:
try:
task_uuid = self.queue.get()
self.plugin_manager.process(task_uuid)
except Exception as exc:
# TaskHelper.set_error(task_uuid, exc)
logger.error(traceback.format_exc())
time.sleep(2)
|
Python
| 0
|
@@ -90,16 +90,82 @@
Process%0A
+from sqlalchemy import update%0A%0Afrom nailgun.api.models import Task
%0Afrom na
@@ -764,16 +764,45 @@
e True:%0A
+ task_uuid = None%0A
@@ -967,20 +967,46 @@
-# TaskHelper
+if task_uuid:%0A self
.set
@@ -1111,8 +1111,203 @@
leep(2)%0A
+%0A def set_error(self, task_uuid, msg):%0A self.db.query(Task).filter_by(uuid=task_uuid).update(%7B%0A 'status': 'error',%0A 'progress': 100,%0A 'msg': str(msg)%7D)%0A
|
9a2b4cfe9b1c683671cb3b4951aa014f3f49d45c
|
put back parsing the english wiki
|
parseWikt.py
|
parseWikt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
import xml.etree.ElementTree
import xml.dom.minidom
import re
import cPickle as pickle
from mwlib.uparser import parseString
from mwlib.xhtmlwriter import MWXHTMLWriter
import xml.etree.ElementTree as ET
import bz2
import wiktionaryGet
def parseBGwikt():
wiktionaryGet.getWiktionaries(['bg'])
fh = bz2.BZ2File("bgwiktionary-latest-pages-meta-current.xml.bz2")
articles = {}
types = {}
debug = True
if debug:
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
except:
from IPython import embed
ipshell = embed
vizhRE = re.compile("#виж", re.UNICODE)
vizhCutRE = re.compile("#виж \[\[(.*)\]\]", re.UNICODE)
tipRE = re.compile("<title>Уикиречник:Български/Типове думи", re.UNICODE)
tipCutRE = re.compile("Уикиречник:Български/Типове думи/([0-9].*)", re.UNICODE)
linkCutRE = re.compile("\[\[(.*)\]\]", re.UNICODE)
keep = False
read = False
while 1:
line = fh.readline()
if not line:
break
if line == " <page>\n":
article = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>"
read = True
elif line == " </page>\n":
read = False
if keep:
keep = False
article += line
root = xml.dom.minidom.parseString(article)
if len(root.getElementsByTagName("text")[0].childNodes) > 0:
title = root.getElementsByTagName("title")[0].firstChild.data
text = root.getElementsByTagName("text")[0].firstChild.data
if vizhCutRE.search(text.encode('utf-8')):
articles[title] = vizhCutRE.search(text.encode('utf-8')).group(1).decode('utf-8')
elif tipCutRE.search(title.encode('utf-8')):
articles[tipCutRE.search(title.encode('utf-8')).group(1).decode('utf-8')] = text
elif title.encode('utf-8') == "Уикиречник:Български/Типове думи":
findNumberLink = re.compile("\[\[/(.*?)/\]\]")
for line in text.split('\n'):
if line.startswith('== '):
generalCategory = line[3:-3]
subCategory = ''
subSubCategory = ''
if line.startswith('=== '):
subCategory = line[4:-4]
subSubCategory = ''
if line.startswith('==== '):
subSubCategory = line[5:-5]
if line.startswith('['):
for number in findNumberLink.finditer(line):
types[number.group(1)] = (generalCategory, subCategory, subSubCategory, )
if read:
if vizhRE.search(line) or tipRE.search(line):
keep = True
article += line
wordLists = sorted([ key for key in articles.keys() if key.startswith(tuple([str(x) for x in range(0,10)])) ])
wordType = {}
for wordList in wordLists:
if wordList.count("/") > 0:
generalType = wordList[:wordList.index("/")]
else:
generalType = wordList
for line in articles[wordList].split("\n"):
if line.startswith("["):
wordType[linkCutRE.search(line.encode('utf-8')).group(1).decode('utf-8')] = generalType
del articles[wordList]
bgWiktBG = bz2.BZ2File("bgWiktBG.pickle.bz2",'wb')
pickle.dump((articles, wordType, types), bgWiktBG, pickle.HIGHEST_PROTOCOL)
bgWiktBG.close()
def parseENwikt():
wiktionaryGet.getWiktionaries(['en'])
fh = bz2.BZ2File("enwiktionary-latest-pages-meta-current.xml.bz2")
bg_en = {}
en_bg = {}
debug = False
if debug:
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
except:
from IPython import embed
ipshell = embed
cyrlRE = re.compile(ur'[\u0400-\u04FF\u0500-\u052F]', re.UNICODE)
bulRE = re.compile("[bB]ulgarian", re.UNICODE)
bulgarianSingle = re.compile("\* [bB]ulgarian", re.UNICODE)
bulgarianSectionStart = re.compile("^==Bulgarian==$", re.UNICODE)
bulgarianSectionEnd = re.compile("^==[A-Za-z-]+==$", re.UNICODE)
keep = False
read = False
w = MWXHTMLWriter()
while 1:
line = fh.readline()
if not line:
break
if line == " <page>\n":
article = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>"
read = True
elif line == " </page>\n":
read = False
if keep:
keep = False
article += line
root = xml.dom.minidom.parseString(article)
if len(root.getElementsByTagName("text")[0].childNodes) > 0:
title = root.getElementsByTagName("title")[0].firstChild.data
text = root.getElementsByTagName("text")[0].firstChild.data
newText = ""
Bulg = False
for line in text.split('\n'):
if bulgarianSectionStart.search(line):
Bulg = True
elif bulgarianSectionEnd.search(line):
Bulg = False
if Bulg == True:
newText += line + '\n'
elif bulgarianSingle.search(line):
newText += line + '\n'
if newText is not "":
p = parseString(title,newText)
if cyrlRE.search(title):
if debug:
print "bg_en = " + newText.encode('utf-8')
ipshell()
bg_en[title] = ''.join(ET.tostring(w.write(p),encoding="utf-8",method="html").split('\n'))
else:
if debug:
print "en_bg = " + newText.encode('utf-8')
ipshell()
en_bg[title] = ''.join(ET.tostring(w.write(p),encoding="utf-8",method="html").split('\n'))
if read:
if bulRE.search(line):
keep = True
article += line
enWiktBG = bz2.BZ2File("enWiktBG.pickle.bz2",'wb')
pickle.dump((bg_en,en_bg), enWiktBG, pickle.HIGHEST_PROTOCOL)
enWiktBG.close()
if __name__ == '__main__':
#parseENwikt()
parseBGwikt()
|
Python
| 0.000004
|
@@ -6798,17 +6798,16 @@
_':%0A
-#
parseENw
|
13ae2f3a50fb75555e763f7c41be9519d6d711e4
|
Use the right var name
|
js/loopjsfunfuzz.py
|
js/loopjsfunfuzz.py
|
#!/usr/bin/env python
from __future__ import with_statement
import os
import random
import shutil
import subprocess
import sys
import time
from optparse import OptionParser
import compareJIT
import jsInteresting
import pinpoint
import shellFlags
p0 = os.path.dirname(os.path.abspath(__file__))
interestingpy = os.path.abspath(os.path.join(p0, 'jsInteresting.py'))
p1 = os.path.abspath(os.path.join(p0, os.pardir, 'util'))
sys.path.append(p1)
from subprocesses import createWtmpDir
from fileManipulation import fuzzSplice, linesWith, writeLinesToFile
import lithOps
def parseOpts(args):
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option("--comparejit",
action = "store_true", dest = "useCompareJIT",
default = False,
help = "After running the fuzzer, run the FCM lines against the engine " + \
"in two configurations and compare the output.")
parser.add_option("--random-flags",
action = "store_true", dest = "randomFlags",
default = False,
help = "Pass a random set of flags (-m, -j, etc) to the js engine")
parser.add_option("--fuzzjs",
action = "store", dest = "fuzzjs",
default = os.path.join(p0, "jsfunfuzz.js"),
help = "Which fuzzer to run (e.g. jsfunfuzz.js)")
parser.add_option("--repo",
action = "store", dest = "repo",
default = os.path.expanduser("~/trees/mozilla-central/"),
help = "The hg repository (e.g. ~/trees/mozilla-central/), for bisection")
parser.add_option("--valgrind",
action = "store_true", dest = "valgrind",
default = False,
help = "use valgrind with a reasonable set of options")
options, args = parser.parse_args(args)
if options.valgrind and options.useCompareJIT:
print "Note: When running comparejit, the --valgrind option will be ignored"
options.timeout = int(args[0])
options.knownPath = os.path.expanduser(args[1])
options.jsEngine = args[2]
options.engineFlags = args[3:]
return options
def showtail(filename):
# FIXME: Get jsfunfuzz to output start & end of interesting result boundaries instead of this.
cmd = []
cmd.extend(['tail', '-n', '20'])
cmd.append(filename)
print ' '.join(cmd)
print
subprocess.check_call(cmd)
print
print
def many_timed_runs(targetTime, args):
options = parseOpts(args)
engineFlags = options.engineFlags # engineFlags is overwritten later if --random-flags is set.
wtmpDir = createWtmpDir(os.getcwdu())
startTime = time.time()
iteration = 0
while True:
if targetTime and time.time() > startTime + targetTime:
print "Out of time!"
if len(os.listdir(wtmpDirNum)) == 0:
os.rmdir(wtmpDirNum)
return (lithOps.HAPPY, None)
# Construct command needed to loop jsfunfuzz fuzzing.
jsInterestingArgs = []
jsInterestingArgs.append('--timeout=' + str(options.timeout))
if options.valgrind:
jsInterestingArgs.append('--valgrind')
jsInterestingArgs.append(options.knownPath)
jsInterestingArgs.append(options.jsEngine)
if options.randomFlags:
engineFlags = shellFlags.randomFlagSet(options.jsEngine)
jsInterestingArgs.extend(engineFlags)
jsInterestingArgs.extend(['-e', 'maxRunTime=' + str(options.timeout*(1000/2))])
jsInterestingArgs.extend(['-f', options.fuzzjs])
jsunhappyOptions = jsInteresting.parseOptions(jsInterestingArgs)
iteration += 1
logPrefix = wtmpDir + os.sep + "w" + str(iteration)
level = jsInteresting.jsfunfuzzLevel(jsunhappyOptions, logPrefix)
if level != jsInteresting.JS_FINE:
showtail(logPrefix + "-out.txt")
showtail(logPrefix + "-err.txt")
# splice jsfunfuzz.js with `grep FRC wN-out`
filenameToReduce = logPrefix + "-reduced.js"
[before, after] = fuzzSplice(options.fuzzjs)
with open(logPrefix + '-out.txt', 'rb') as f:
newfileLines = before + linesWith(f.readlines(), "FRC") + after
writeLinesToFile(newfileLines, logPrefix + "-orig.js")
writeLinesToFile(newfileLines, filenameToReduce)
# Run Lithium and autobisect (make a reduced testcase and find a regression window)
itest = [interestingpy]
if options.valgrind:
itest.append("--valgrind")
itest.append("--minlevel=" + str(level))
itest.append("--timeout=" + str(options.timeout))
itest.append(options.knownPath)
alsoRunChar = (level > jsInteresting.JS_DECIDED_TO_EXIT)
alsoReduceEntireFile = (level > jsInteresting.JS_OVERALL_MISMATCH)
(lithResult, lithDetails) = pinpoint.pinpoint(itest, logPrefix, options.jsEngine, engineFlags, filenameToReduce,
options.repo, targetTime, alsoRunChar=alsoRunChar,
alsoReduceEntireFile=alsoReduceEntireFile)
if targetTime:
return (lithResult, lithDetails)
else:
shellIsDeterministic = os.path.join('build', 'dist', 'js') not in options.jsEngine # bug 751700
flagsAreDeterministic = "--dump-bytecode" not in engineFlags and '-D' not in engineFlags
if options.useCompareJIT and level == jsInteresting.JS_FINE and \
shellIsDeterministic and flagsAreDeterministic:
with open(logPrefix + '-out.txt', 'rb') as f:
jitcomparelines = linesWith(f.readlines(), "FCM") + \
["try{print(uneval(this));}catch(e){}"]
jitcomparefilename = logPrefix + "-cj-in.js"
writeLinesToFile(jitcomparelines, jitcomparefilename)
(lithResult, lithDetails) = compareJIT.compareJIT(options.jsEngine, engineFlags, jitcomparefilename,
logPrefix + "-cj", options.knownPath, options.repo,
options.timeout, targetTime)
if lithResult == lithOps.HAPPY:
os.remove(jitcomparefilename)
if targetTime and lithResult != lithOps.HAPPY:
jsInteresting.deleteLogs(logPrefix)
return (lithResult, lithDetails)
jsInteresting.deleteLogs(logPrefix)
if __name__ == "__main__":
many_timed_runs(None, sys.argv[1:])
|
Python
| 0.016055
|
@@ -2968,19 +2968,16 @@
(wtmpDir
-Num
)) == 0:
@@ -3013,11 +3013,8 @@
pDir
-Num
)%0A
|
49ab81275b0e29281703257000c62a54f9627df8
|
fix property usage
|
polyjit/buildbot/builders/slurm.py
|
polyjit/buildbot/builders/slurm.py
|
import sys
from polyjit.buildbot.builders import register
from polyjit.buildbot import slaves
from polyjit.buildbot.utils import (builder, define, git, cmd, trigger, ip,
mkdir, s_sbranch, s_force, s_trigger,
hash_download_from_master, clean_unpack)
from polyjit.buildbot.repos import make_cb, codebases
from buildbot.plugins import util
from buildbot.changes import filter
codebase = make_cb(['benchbuild'])
P = util.Property
BuildFactory = util.BuildFactory
def has_munged(host):
if "has_munged" in host["properties"]:
return host["properties"]["has_munged"]
return False
accepted_builders = slaves.get_hostlist(slaves.infosun, predicate=has_munged)
# yapf: disable
def configure(c):
llvm_dl = hash_download_from_master("public_html/llvm.tar.gz",
"llvm.tar.gz", "llvm")
polyjit_dl = hash_download_from_master("public_html/polyjit.tar.gz",
"polyjit.tar.gz", "polyjit")
steps = [
# trigger(schedulerNames=['trigger-build-llvm', 'trigger-build-jit']),
define("scratch", ip("/scratch/pjtest/%(prop:buildnumber)s/"))
]
steps.extend(llvm_dl)
steps.extend(clean_unpack("llvm.tar.gz", "llvm"))
steps.extend(polyjit_dl)
steps.extend(clean_unpack("polyjit.tar.gz", "polyjit"))
steps.extend([
define("BENCHBUILD_ROOT", ip("%(prop:builddir)s/build/benchbuild/")),
git('benchbuild', 'develop', codebases, workdir=P("BENCHBUILD_ROOT")),
])
steps.extend([
define('benchbuild', ip('%(prop:scratch)s/env/bin/benchbuild')),
define('llvm', ip('%(prop:scratch)s/llvm')),
define('polyjit', ip('%(prop:scratch)s/polyjit')),
mkdir(P("scratch")),
cmd('virtualenv', '-ppython3', ip('%(prop:scratch)s/env/')),
cmd(ip('%(prop:scratch)s/env/bin/pip3'), 'install', '--upgrade', '.',
workdir='build/benchbuild'),
cmd("rsync", "-var", "./", P("scratch")),
cmd(P('benchbuild'), 'bootstrap', env={
'BB_ENV_COMPILER_PATH': ip('%(prop:llvm)s/bin'),
'BB_ENV_COMPILER_LD_LIBRARY_PATH':
ip('%(prop:llvm)s/lib:%(prop:polyjit)s/lib'),
'BB_ENV_LOOKUP_PATH':
ip('%(prop:llvm)s/bin:%(prop:polyjit)s/bin'),
'BB_ENV_LOOKUP_LD_LIBRARY_PATH':
ip('%(prop:llvm)s/lib:%(prop:polyjit)s/lib'),
'BB_LLVM_DIR': ip('%(prop:scratch)s/llvm'),
'BB_LIKWID_PREFIX': '/usr',
'BB_PAPI_INCLUDE': '/usr/include',
'BB_PAPI_LIBRARY': '/usr/lib',
'BB_SRC_DIR': ip('%(prop:scratch)s/benchbuild'),
'BB_UNIONFS_ENABLE': 'false'
},
workdir=P('%(prop:scratch)s')),
])
c['builders'].append(builder("build-slurm-set", None, accepted_builders,
factory=BuildFactory(steps)))
# yapf: enable
def schedule(c):
c['schedulers'].extend([
s_sbranch("build-slurm-set-sched", codebase, ["build-slurm-set"],
change_filter=filter.ChangeFilter(branch_re='next|develop'),
treeStableTimer=2 * 60),
s_force("force-build-slurm-set", codebase, ["build-slurm-set"]),
s_trigger("trigger-slurm-set", codebase, ['build-slurm-set'])
])
register(sys.modules[__name__])
|
Python
| 0.000002
|
@@ -2839,32 +2839,23 @@
kdir=P('
-%25(prop:
scratch
-)s
')),%0A
|
3a910621b36f0555b4a16f22582313333e162093
|
Check for icons when displaying thumbnails
|
paw/admin.py
|
paw/admin.py
|
from django.contrib import admin
from paw.models import TextLink, IconLink, IconFolder, Page, PageTextLink, PageIconDisplay, IconFolderIcon, EntryPoint
from adminsortable.admin import NonSortableParentAdmin, SortableStackedInline, SortableTabularInline, SortableAdmin
class PageTextLinkInline(SortableStackedInline):
model = PageTextLink
extra = 1
class PageIconDisplayInline(SortableTabularInline):
model = PageIconDisplay
extra = 1
fields = ['icon', 'icon_thumbnail']
readonly_fields = ['icon_thumbnail']
def icon_thumbnail(self, o):
return '<img src="{url:}" />'.format(url=o.icon.display_icon.thumbnail['50x50'].url)
icon_thumbnail.short_description = 'Thumbnail'
icon_thumbnail.allow_tags = True
class IconFolderIconInline(SortableTabularInline):
model = IconFolderIcon
extra = 1
def icon_thumbnail(self, o):
return '<img src="{url:}" />'.format(url=o.icon.display_icon.thumbnail['50x50'].url)
icon_thumbnail.short_description = 'Thumbnail'
icon_thumbnail.allow_tags = True
fields = ['icon', 'icon_thumbnail']
readonly_fields = ['icon_thumbnail']
class PageAdmin(NonSortableParentAdmin):
inlines = [PageIconDisplayInline, PageTextLinkInline]
prepopulated_fields = {"slug": ("title",)}
class IconFolderAdmin(NonSortableParentAdmin):
inlines = [IconFolderIconInline]
list_display = ['title', 'admin_icon']
readonly_fields = ['form_icon']
def admin_icon(self, o):
return '<img src="{url:}" />'.format(url=o.display_icon.thumbnail['50x50'].url)
def form_icon(self, o):
return '<img src="{url:}" />'.format(url=o.display_icon.thumbnail['100x100'].url)
admin_icon.short_description = 'Icon'
admin_icon.allow_tags = True
form_icon.short_description = 'Thumbnail'
form_icon.allow_tags = True
fields = ['display_icon', 'form_icon', 'title', 'internal_description']
class IconLinkAdmin(admin.ModelAdmin):
list_display=['title', 'admin_icon']
fields = ['display_icon', 'form_icon', 'title', 'internal_description', 'url', 'check_url', 'start_hidden', 'mac_pc_only']
readonly_fields = ['form_icon']
def admin_icon(self, o):
return '<img src="{url:}" />'.format(url=o.display_icon.thumbnail['50x50'].url)
def form_icon(self, o):
return '<img src="{url:}" />'.format(url=o.display_icon.thumbnail['100x100'].url)
admin_icon.short_description = 'Icon'
admin_icon.allow_tags = True
form_icon.short_description = 'Thumbnail'
form_icon.allow_tags = True
class EntryPointAdmin(admin.ModelAdmin):
list_display=['__str__', 'page']
# Register your models here.
admin.site.register(TextLink)
admin.site.register(Page, PageAdmin)
admin.site.register(IconLink, IconLinkAdmin)
admin.site.register(IconFolder, IconFolderAdmin)
admin.site.register(EntryPoint, EntryPointAdmin)
|
Python
| 0
|
@@ -547,24 +547,52 @@
l(self, o):%0A
+ if o.icon.display_icon:%0A
return
@@ -874,32 +874,62 @@
bnail(self, o):%0A
+ if o.icon.display_icon:%0A
return '%3Cimg
@@ -1500,32 +1500,57 @@
_icon(self, o):%0A
+ if o.display_icon:%0A
return '%3Cimg
@@ -1638,32 +1638,57 @@
_icon(self, o):%0A
+ if o.display_icon:%0A
return '%3Cimg
@@ -2250,32 +2250,57 @@
_icon(self, o):%0A
+ if o.display_icon:%0A
return '%3Cimg
@@ -2392,24 +2392,49 @@
n(self, o):%0A
+ if o.display_icon:%0A
return '
|
00556c84e23dd86eb4ca08ba4c6238425a3eba7e
|
Create Preparation model
|
project_fish/whats_fresh/models.py
|
project_fish/whats_fresh/models.py
|
from django.contrib.gis.db import models
import os
from phonenumber_field.modelfields import PhoneNumberField
class Image(models.Model):
"""
The Image model holds an image and related data.
The Created and Modified time fields are created automatically by
Django when the object is created or modified, and can not be altered.
This model uses Django's built-ins for holding the image location and
data in the database, as well as for keeping created and modified
timestamps.
"""
image = models.ImageField(upload_to='%Y/%m/%d')
caption = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Vendor(models.Model):
"""
The Vendor model holds the information for a vendor, including the
geographic location as a pair of latitudinal/logitudinal coordinates,
a street address, and an optional text description of their location
(in case the address/coordinates are of, say, a dock instead of a shop).
"""
pass
class Product(models.Model):
"""
The Product model holds the information for a product, including the
origin, season, market price, and availability.
In addition, it holds a foreign key to the image and story related to the
product.
"""
name = models.TextField()
variety = models.TextField()
alt_name = models.TextField()
description = models.TextField()
origin = models.TextField()
season = models.TextField()
available = models.NullBooleanField()
market_price = models.TextField()
link = models.URLField()
image_id = models.ForeignKey('Image')
story_id = models.ForeignKey('Story')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Story(models.Model):
pass
class Preparation(models.Model):
"""
The Preparation model contains possible preparations of product, to be
associated many-to-many with product (a product can have one or more
preparations, preparations apply to many products). Preparations may be
things like 'frozen', 'dried', 'fresh', 'live', etc, to be defined by
Sea Grant data input.
"""
pass
|
Python
| 0
|
@@ -2218,25 +2218,124 @@
input.%0A %22%22%22%0A
-pass
+name = models.TextField()%0A description = models.TextField()%0A additional_info = models.TextField()
%0A
|
6c89b28d63df06020805ef8d16e9a084037cecf7
|
fix egg-path bug
|
projects/imbu/engine/fluent_api.py
|
projects/imbu/engine/fluent_api.py
|
# ----------------------------------------------------------------------
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Implements Imbu's web API.
"""
import simplejson as json
import logging
import os
import pkg_resources
import web
from htmresearch.frameworks.nlp.imbu import ImbuModels
from htmresearch.frameworks.nlp.model_factory import ClassificationModelTypes
g_log = logging.getLogger(__name__)
# No training in Imbu web app, user must specify loadPath
if "IMBU_LOAD_PATH_PREFIX" in os.environ:
_IMBU_LOAD_PATH_PREFIX = os.environ["IMBU_LOAD_PATH_PREFIX"]
else:
raise KeyError("Required IMBU_LOAD_PATH_PREFIX missing from environment")
g_imbus = {} # Global ImbuModels cache
g_models = {} # Global NLP model cache
for datasetName in os.listdir(_IMBU_LOAD_PATH_PREFIX):
datasetPath = os.path.join(_IMBU_LOAD_PATH_PREFIX, datasetName)
if os.path.isdir(datasetPath):
# Create an imbu instance for each dataset
imbu = ImbuModels(
cacheRoot=os.environ.get("MODEL_CACHE_DIR", os.getcwd()),
modelSimilarityMetric=None,
dataPath=os.path.join(datasetPath, "data.csv"),
retina=os.environ["IMBU_RETINA_ID"],
apiKey=os.environ["CORTICAL_API_KEY"]
)
g_imbus.update(((datasetName, imbu),))
# Init the dict for this dataset's models
g_models[datasetName] = {}
def addStandardHeaders(contentType="application/json; charset=UTF-8"):
"""
Add Standard HTTP Headers ("Content-Type", "Server") to the response.
Here is an example of the headers added by this method using the default
values::
Content-Type: application/json; charset=UTF-8
Server: Imbu x.y.z
:param content_type: The value for the "Content-Type" header.
(default "application/json; charset=UTF-8")
"""
web.header("Server", "Imbu 1.0.0", True)
web.header("Content-Type", contentType, True)
def addCORSHeaders():
"""
Add CORS (http://www.w3.org/TR/cors/) headers
"""
web.header("Access-Control-Allow-Origin", "*", True)
web.header("Access-Control-Allow-Headers",
"accept, access-control-allow-origin, content-type", True)
web.header("Access-Control-Allow-Credentials", "true", True)
web.header("Access-Control-Allow-Methods", "POST", True)
class FluentWrapper(object):
def query(self, dataset, model, text):
"""
Queries the model (which is specific to this dataset) and returns an ordered
list of matching samples.
:param str dataset: Dataset name, specifying the ImbuModels instance to use.
Possible values correspond to data dirs in _IMBU_LOAD_PATH_PREFIX.
:param str model: Name of the model to use. Possible values are mapped to
classes in the NLP model factory.
:param str text: The text to match.
:returns: a sequence of matching samples.
::
[
{"0": {"text": "sampleText", "scores": [0.75, ...]},
...
]
"""
global g_imbus
global g_models
if model not in g_models[dataset]:
loadPath = os.path.join(_IMBU_LOAD_PATH_PREFIX, dataset, model)
g_models[dataset][model] = g_imbus[dataset].createModel(
model, str(loadPath), None)
if text:
_, sortedIds, sortedDistances = g_imbus[dataset].query(
g_models[dataset][model], text)
return g_imbus[dataset].formatResults(model, text, sortedDistances, sortedIds)
else:
return []
class DefaultHandler(object):
def GET(self, *args): # pylint: disable=R0201,C0103
addStandardHeaders("text/html; charset=UTF-8")
return "<html><body><h1>Welcome to Nupic Fluent</h1></body></html>"
class DatasetsHandler(object):
"""Handles Dataset requests"""
def GET(self, *args):
"""Use '/fluent/datasets' to get list of available datasets"""
addStandardHeaders()
addCORSHeaders()
return json.dumps(g_imbus.keys())
class FluentAPIHandler(object):
"""Handles API requests"""
def OPTIONS(self, modelName=ImbuModels.defaultModelType): # pylint: disable=R0201,C0103
addStandardHeaders()
addCORSHeaders()
def GET(self, *args):
""" GET global ready status. Returns "true" when all models have been
created and are ready for queries.
"""
addStandardHeaders()
addCORSHeaders()
return json.dumps(True)
def POST(self,
modelName=ImbuModels.defaultModelType,
dataset=ImbuModels.defaultDataset): # pylint: disable=R0201,C0103
addStandardHeaders()
addCORSHeaders()
response = {}
data = web.data()
if data:
if isinstance(data, basestring):
response = g_fluent.query(dataset, modelName, data)
else:
raise web.badrequest("Invalid Data. Query data must be a string")
if len(response) == 0:
# No data, just return all samples
# See "ImbuModels.formatResults" for expected format
for item in g_imbus[dataset].dataDict.items():
response[item[0]] = {"text": item[1][0], "scores": [0]}
return json.dumps(response)
urls = (
"", "DefaultHandler",
"/", "DefaultHandler",
"/fluent", "FluentAPIHandler",
"/fluent/datasets", "DatasetsHandler",
"/fluent/(.*)/(.*)", "FluentAPIHandler",
"/fluent/(.*)", "FluentAPIHandler"
)
app = web.application(urls, globals())
# Create Imbu model runner
g_fluent = FluentWrapper()
# Required by uWSGI per WSGI spec
application = app.wsgifunc()
|
Python
| 0.000015
|
@@ -1691,16 +1691,45 @@
setPath)
+ and %22egg%22 not in datasetPath
:%0A #
|
5d9f83c06e3418cbb4bd5314136bd4700d7e26c3
|
Remove print statement
|
paasta_tools/cli/cmds/performance_check.py
|
paasta_tools/cli/cmds/performance_check.py
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from service_configuration_lib import read_extra_service_information
from paasta_tools.cli.utils import validate_service_name
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import timeout
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
'performance-check',
description='Performs a performance check',
help='Performs a performance check',
)
list_parser.add_argument(
'-s', '--service',
help='Name of service for which you wish to check. Leading "services-", as included in a '
'Jenkins job name, will be stripped.',
)
list_parser.add_argument(
'-d', '--soa-dir',
dest='soa_dir',
metavar='SOA_DIR',
default=DEFAULT_SOA_DIR,
help='Define a different soa config directory',
)
list_parser.set_defaults(command=perform_performance_check)
def load_performance_check_config(service, soa_dir):
return read_extra_service_information(
service_name=service,
extra_info='performance-check',
soa_dir=soa_dir,
)
def submit_performance_check_job(service, soa_dir):
performance_check_config = load_performance_check_config(service, soa_dir)
if not performance_check_config:
print "No performance-check.yaml. Skipping performance-check."
return
endpoint = performance_check_config.pop('endpoint')
r = requests.post(
url=endpoint,
params=performance_check_config,
)
r.raise_for_status()
print "Posted a submission to the PaaSTA performance-check service."
print "Endpoint: {}".format(endpoint)
print "Parameters: {}".format(performance_check_config)
@timeout()
def perform_performance_check(args):
service = args.service
if service.startswith('services-'):
service = service.split('services-', 1)[1]
validate_service_name(service, args.soa_dir)
print service
try:
submit_performance_check_job(
service=service,
soa_dir=args.soa_dir,
)
except Exception as e:
print "Something went wrong with the performance check. Safely bailing. No need to panic."
print "Here was the error:"
print str(e)
|
Python
| 0.007015
|
@@ -2535,25 +2535,8 @@
ir)%0A
- print service
%0A
|
2b9f6a4c1dfdef393588ac5c430735528d276a50
|
Add neat headers with column title
|
habiter/tui.py
|
habiter/tui.py
|
import itertools
__author__ = 'moskupols'
from habiter import habit_api, models
from habiter.settings import user_id, api_key, ACCEL_QUIT, ACCEL_TOGGLE_LIST_MODE
import urwid
class UserInfoBar(urwid.Text):
def __init__(self, user):
super().__init__(user.name, align=urwid.CENTER, wrap=urwid.CLIP)
class StatusBar(urwid.Text):
def __init__(self, markup='initial status'):
super().__init__(markup, wrap=urwid.CLIP)
class TaskWidgetMixin:
def __init__(self, task, *args, **kwargs):
super().__init__(*args, **kwargs)
self.task = task
class HabitWidget(TaskWidgetMixin, urwid.SelectableIcon):
def __init__(self, habit):
super().__init__(habit, text=habit.text)
self.habit = habit
class DailyWidget(TaskWidgetMixin, urwid.CheckBox):
def __init__(self, daily):
super().__init__(daily, label=daily.text, state=daily.completed)
self.daily = daily
class TodoWidget(TaskWidgetMixin, urwid.CheckBox):
def __init__(self, todo):
super().__init__(todo, label=todo.text, state=todo.completed)
self.todo = todo
class RewardWidget(TaskWidgetMixin, urwid.Button):
def __init__(self, reward):
super().__init__(reward, label=reward.text)
self.reward = reward
class TaskListView(urwid.ListBox):
no_filter = (lambda wid: True, 'all')
def __init__(self, task_wids, wid_filters=(no_filter,)):
super().__init__(urwid.SimpleFocusListWalker([]))
self.all_task_wids = task_wids
self.filters_ring = itertools.cycle(wid_filters)
self.switch_to_next_filter()
def update_view(self, task_wids, wid_filter):
self.body.clear()
self.body.extend([wid for wid in task_wids if wid_filter(wid)])
def switch_to_next_filter(self):
self.update_view(self.all_task_wids, next(self.filters_ring)[0])
def keypress(self, size, key):
if key in ACCEL_TOGGLE_LIST_MODE:
self.switch_to_next_filter()
else:
return super().keypress(size, key)
class HabitListView(TaskListView):
def __init__(self, tasks):
super().__init__([HabitWidget(task) for task in tasks])
class DailyListView(TaskListView):
def __init__(self, tasks):
super().__init__(
[DailyWidget(task) for task in tasks],
(TaskListView.no_filter,
(lambda wid: not wid.get_state(), 'due'),
(lambda wid: wid.get_state(), 'checked')
)
)
class TodoListView(TaskListView):
def __init__(self, todos):
super().__init__(
[TodoWidget(todo) for todo in todos if not todo.completed],
((lambda wid: not wid.get_state(), 'due'),
(lambda wid: wid.get_state(), 'done'))
)
class RewardListView(TaskListView):
def __init__(self, tasks):
super().__init__([RewardWidget(task) for task in tasks])
class TasksView(urwid.Columns):
def __init__(self, user):
self.user = user
lists = (
HabitListView(user.habits),
DailyListView(user.dailies),
TodoListView(user.todos),
RewardListView(user.rewards)
)
self.habit_list, self.daily_list, self.todo_list, self.reward_list = lists
super().__init__(lists, dividechars=3, min_width=20)
class MainFrame(urwid.Frame):
def __init__(self, user):
super().__init__(header=UserInfoBar(user), body=TasksView(user), footer=StatusBar())
self.user = user
def keypress(self, size, key):
if key in ACCEL_QUIT:
raise urwid.ExitMainLoop()
return super().keypress(size, key)
def run():
api = habit_api.HabitAPI(user_id, api_key)
user = models.User(api)
# user = Mock()
# user.name = 'mocked name'
main = MainFrame(user)
loop = urwid.MainLoop(main, handle_mouse=False)
loop.run()
|
Python
| 0
|
@@ -1302,15 +1302,13 @@
wid.
-ListBox
+Frame
):%0A
@@ -1367,24 +1367,31 @@
init__(self,
+ title,
task_wids,
@@ -1430,56 +1430,278 @@
s
-uper().__init__(urwid.SimpleFocusListWalker(%5B%5D))
+elf.header = urwid.Pile(%5B%0A urwid.Text(('bold', title), align=urwid.CENTER),%0A urwid.Divider('-'),%0A %5D)%0A self.list_box = urwid.ListBox(urwid.SimpleFocusListWalker(%5B%5D))%0A super().__init__(header=self.header, body=self.list_box)%0A
%0A
@@ -1898,46 +1898,27 @@
elf.
-body.clear()%0A self.body.extend(
+list_box.body%5B:%5D =
%5Bwid
@@ -1958,17 +1958,16 @@
er(wid)%5D
-)
%0A%0A de
@@ -2343,16 +2343,26 @@
_init__(
+'Habits',
%5BHabitWi
@@ -2478,32 +2478,55 @@
per().__init__(%0A
+ 'Dailies',%0A
%5BDai
@@ -2816,32 +2816,54 @@
per().__init__(%0A
+ %22To-dos%22,%0A
%5BTod
@@ -3023,16 +3023,30 @@
'done')
+%0A
)%0A
@@ -3110,32 +3110,32 @@
_(self, tasks):%0A
-
super().
@@ -3143,16 +3143,27 @@
_init__(
+'Rewards',
%5BRewardW
|
0de2aace2a493d0d760b1ceec3b67f5a6c3f86e6
|
fix exceptin 'TypeError: a float is required'
|
vmchecker/coursedb.py
|
vmchecker/coursedb.py
|
#!/usr/bin/env python
"""Manage the course database"""
from __future__ import with_statement
import sqlite3
from contextlib import contextmanager, closing
class CourseDb(object):
"""A class to encapsulate the logic behind updates and querries of
the course's db"""
def __init__(self, db_cursor):
self.db_cursor = db_cursor
def create_tables(self):
"""Create the tables needed for vmchecker"""
self.db_cursor.executescript("""
CREATE TABLE assignments (id INTEGER PRIMARY KEY, name TEXT);
CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT);
CREATE TABLE grades (assignment_id INTEGER,
user_id INTEGER,
grade TEXT,
mtime TIMESTAMP NOT NULL,
PRIMARY KEY(assignment_id, user_id));""")
def add_assignment(self, assignment):
"""Creates an id of the homework and returns it."""
self.db_cursor.execute('INSERT INTO assignments (name) values (?)',
(assignment,))
self.db_cursor.execute('SELECT last_insert_rowid()')
assignment_id, = self.db_cursor.fetchone()
return assignment_id
def get_assignment_id(self, assignment):
"""Returns the id of the assignment"""
self.db_cursor.execute('SELECT id FROM assignments WHERE name=?',
(assignment,))
result = self.db_cursor.fetchone()
if result is None:
return self.add_assignment(assignment)
return result[0]
def add_user(self, user):
"""Creates an id of the user and returns it."""
self.db_cursor.execute('INSERT INTO users (name) values (?)', (user,))
self.db_cursor.execute('SELECT last_insert_rowid()')
user_id, = self.db_cursor.fetchone()
return user_id
def get_user_id(self, user):
"""Returns the id of the user"""
self.db_cursor.execute('SELECT id FROM users WHERE name=?', (user,))
result = self.db_cursor.fetchone()
if result is None:
return self.add_user(user)
return result[0]
def get_grade_mtime(self, assignment_id, user_id):
"""Returns the mtime of a grade"""
self.db_cursor.execute('SELECT mtime FROM grades '
'WHERE assignment_id = ? and user_id = ?',
(assignment_id, user_id))
result = self.db_cursor.fetchone()
if result is not None:
return result[0]
def save_grade(self, assignment_id, user_id, grade, mtime):
"""Save the grade into the database
If the grade identified by (assignment_id, user_id)
exists then update the DB, else inserts a new entry.
"""
self.db_cursor.execute('INSERT OR REPLACE INTO grades '
'(grade, mtime, assignment_id, user_id) '
'VALUES (?, ?, ?, ?) ',
(grade, mtime, assignment_id, user_id))
@contextmanager
def opening_course_db(db_file, isolation_level=None):
"""Context manager ensuring that the database resources are
propperly closed upon either success or exception.
On success the latest changes must be commited, while on failure
they must be rolled back.
"""
db_conn = sqlite3.connect(db_file, isolation_level)
try:
with closing(db_conn.cursor()) as db_cursor:
course_db = CourseDb(db_cursor)
yield course_db
except:
db_conn.rollback()
raise
else:
db_conn.commit()
finally:
db_conn.close()
def create_db_tables(db_file):
"""Create vmchecker's tables inside the given db_file"""
with opening_course_db(db_file) as course_db:
course_db.create_tables()
|
Python
| 0
|
@@ -3469,32 +3469,48 @@
onnect(db_file,
+isolation_level=
isolation_level)
|
c0259abdd1b34cd195e3f1ffcb7fb5479d76a0fe
|
bump version to 1.0.0
|
vncdotool/__init__.py
|
vncdotool/__init__.py
|
__version__ = "1.0.0dev"
|
Python
| 0
|
@@ -17,9 +17,6 @@
.0.0
-dev
%22%0A
|
4e6207361d7ef08a20e343cb5dab500c2c9cdf28
|
Update AppleApple!.py
|
AppleApple!/AppleApple!.py
|
AppleApple!/AppleApple!.py
|
import pygame, random
from pygame.locals import *
costPerTree = 0
class Item(object):
def __init__(self, itemName, isMaterial, isFood, isWeapon, isCraftable, cost, recipe=()):
self.name = str(itemName)
self.isMaterial = isMaterial
self.isFood = isFood
self.isWeapon = isWeapon
self.Craftable = isCraftable
self.cost = cost
self.recipe = recipe
class Weapon(Item):
def __init__(self, itemName, harm, cost, recipe):
super(Weapon, self).__init__(itemName, False, False, True, True, cost, recipe) # all weapons are craftable
self.harm = harm
class Food(Item):
def __init__(self, itemName, fullness, craftable, cost, isPotion=False, potionType=None, useDegree=None):
super(Food,self).__init__(itemName, False, True, False, craftable, cost)
self.fullness = fullness
self.isPotion = isPotion
self.type = potionType
self.degree = useDegree
class Material(Item):
def __init__(self, itemName, isCraftable, cost, recipe=()):
super(Material,self).__init__(itemName, True, False, False, isCraftable, cost, recipe)
class Mob(object):
def __init__(self, name, blood, damage, trophies):
self.blood = blood
self.damage = damage
self.trophie = trophies
self.name = name
# class Tool(Item):
# def __init__(self,):
# Coming "soon"!!
def pickApple(appleTree):
tuple = (False, False, True)
doExtra = random.choice(tuple)
if doExtra:
applePerTree = 5
else:
applePerTree = 3
return appleTree * applePerTree
def buyJustice(money, thing):
if thing.cost > money:
return False
else:
return True
def plantTreeJustice(num, apple):
if num*costPerTree < apple:
return True
else:
return False
# materials
wood = Material('wood', False, 3)
stick = Material('stick', True, 1, (wood,))
rock = Material('rock', False, 2)
copper = Material('copper ingot', True, 5, (rock, rock))
iron = Material('iron ingot', True, 12, (copper, copper, copper))
gold = Material('gold ingot', True, 27, (iron, iron, iron, iron))
diamond = Material('diamond!', True, 58, (gold, gold, gold, gold, gold))
# foods
flesh = Food('flesh', 2, False, 2)
# mobs
zombie = Mob('zombie', 20, 1, (flesh,))
tree = Mob('tree', 10, 0.5, (wood, stick))
stone = Mob('stone', 30, 0.5, (rock,))
# weapons
wooden_sword = Weapon('wooden sword', 2, 5, (wood, wood, stick))
stone_sword = Weapon('stone sword', 6, 12, (rock, rock, stick))
iron_sword = Weapon('iron sword', 18, 26, (iron, iron, stick))
golden_sword = Weapon('golden sword', 54, 54, (gold, gold, stick))
diamond_sword = Weapon('diamond sword', 162, 110, (diamond, diamond, stick))
better_wooden_sword = Weapon('better wooden sword', 10, 10, (wooden_sword, wooden_sword)) # and so on...
placeToMobs = {'forest':(tree,tree,tree,tree,tree)}
|
Python
| 0.000001
|
@@ -1985,24 +1985,26 @@
per ingot',
+!%0A
True, 5, (ro
@@ -2259,17 +2259,124 @@
False,
-2
+1)%0Aberry = Food('blue berry',5,False,2)%0Aegg = Food('egg',%0Acake = Food('cake',20,True,10,(egg,egg,milk,flour)
)%0A# mobs
|
aec24db030e92ff8ede3a79aab28b36b6e74f356
|
Update table_generator marshmallow config
|
great_expectations/datasource/generator/table_generator.py
|
great_expectations/datasource/generator/table_generator.py
|
import os
import logging
from string import Template
from marshmallow import Schema, fields, post_load, ValidationError
from .batch_generator import BatchGenerator
from great_expectations.exceptions import BatchKwargsError, GreatExpectationsError
from great_expectations.datasource.types import SqlAlchemyDatasourceTableBatchKwargs
logger = logging.getLogger(__name__)
try:
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.engine import reflection
except ImportError:
sqlalchemy = None
create_engine = None
reflection = None
logger.debug("Unable to import sqlalchemy.")
class AssetConfigurationSchema(Schema):
table = fields.Str()
schema = fields.Str()
@post_load
def make_asset_configuration(self, data):
return AssetConfiguration(**data)
class AssetConfiguration(object):
def __init__(self, table, schema=None):
self.__table = table
self.__schema = schema
@property
def table(self):
return self.__table
@property
def schema(self):
return self.__schema
assetConfigurationSchema = AssetConfigurationSchema()
class TableGenerator(BatchGenerator):
"""Provide access to already materialized tables or views in a database.
TableGenerator can be used to define specific data asset names that take and substitute parameters,
for example to support referring to the same data asset but with different schemas depending on provided
batch_kwargs.
The python template language is used to substitute table name portions. For example, consider the
following configurations::
my_generator:
class_name: TableGenerator
assets:
my_table:
schema: $schema
table: my_table
In that case, the asset my_datasource/my_generator/my_asset will refer to a table called my_table in a schema
defined in batch_kwargs.
"""
def __init__(self, name="default", datasource=None, assets={}):
super(TableGenerator, self).__init__(name=name, datasource=datasource)
try:
self._assets = {
asset_name: assetConfigurationSchema.load(asset_config) for
(asset_name, asset_config) in assets.items()
}
except ValidationError as err:
raise GreatExpectationsError("Unable to load asset configuration in TableGenerator '%s': "
"validation error: %s." % (name, str(err)))
if datasource is not None:
self.engine = datasource.engine
try:
self.inspector = sqlalchemy.inspect(self.engine)
except sqlalchemy.exc.OperationalError:
logger.warning("Unable to create inspector from engine in generator '%s'" % name)
self.inspector = None
def _get_iterator(self, generator_asset, **kwargs):
# First, we check if we have a configured asset
if generator_asset in self._assets:
asset_config = self._assets[generator_asset]
try:
table_name = Template(asset_config.table).substitute(kwargs)
schema_name = None
if asset_config.schema is not None:
schema_name = Template(asset_config.schema).substitute(kwargs)
except KeyError:
raise BatchKwargsError("Unable to generate batch kwargs for asset '" + generator_asset + "': "
"missing template key",
{"generator_asset": generator_asset,
"table_template": asset_config.table,
"schema_template": asset_config.schema}
)
return iter([
SqlAlchemyDatasourceTableBatchKwargs(
table=table_name,
schema=schema_name
)
])
# If this is not a manually configured asset, we fall back to inspection of the database
elif self.engine is not None and self.inspector is not None:
split_generator_asset = generator_asset.split(".")
if len(split_generator_asset) == 2:
schema_name = split_generator_asset[0]
table_name = split_generator_asset[1]
elif len(split_generator_asset) == 1:
schema_name = self.inspector.default_schema_name
table_name = split_generator_asset[0]
else:
raise ValueError("Table name must be of shape '[SCHEMA.]TABLE'. Passed: " + split_generator_asset)
tables = self.inspector.get_table_names(schema=schema_name)
tables.extend(self.inspector.get_view_names(schema=schema_name))
if table_name in tables:
return iter([
SqlAlchemyDatasourceTableBatchKwargs(
table=table_name,
schema=schema_name,
)
])
def get_available_data_asset_names(self):
defined_assets = list(self._assets.keys())
tables = []
if self.engine is not None and self.inspector is not None:
for schema_name in self.inspector.get_schema_names():
known_information_schemas = [
"INFORMATION_SCHEMA", # snowflake, mssql, mysql, oracle
"information_schema", # postgres, redshift
]
known_system_tables = [
"sqlite_master" # sqlite
]
if schema_name in known_information_schemas:
continue
tables.extend(
[table_name if self.inspector.default_schema_name == schema_name else
schema_name + "." + table_name
for table_name in self.inspector.get_table_names(schema=schema_name)
if table_name not in known_system_tables
]
)
tables.extend(
[table_name if self.inspector.default_schema_name == schema_name else
schema_name + "." + table_name
for table_name in self.inspector.get_view_names(schema=schema_name)
if table_name not in known_system_tables
]
)
return set(defined_assets + tables)
def build_batch_kwargs_from_partition(self, generator_asset, partition_id=None, batch_kwargs=None, **kwargs):
all_the_kwargs = batch_kwargs.copy()
all_the_kwargs.update(kwargs)
return next(self._get_iterator(generator_asset, partition_id=partition_id, **all_the_kwargs))
def get_available_partition_ids(self, generator_asset):
raise BatchKwargsError("TableGenerator cannot identify partitions, however any existing table may"
"already be referenced by accessing a generator_asset with the name of the "
"table or of the form SCHEMA.TABLE", {})
|
Python
| 0.000001
|
@@ -726,16 +726,33 @@
ost_load
+(pass_many=False)
%0A def
@@ -787,16 +787,26 @@
lf, data
+, **kwargs
):%0A
@@ -2029,18 +2029,20 @@
assets=
-%7B%7D
+None
):%0A
@@ -2115,16 +2115,63 @@
source)%0A
+ if not assets:%0A assets = %7B%7D%0A
|
7d1463fc732cdc6aef3299c6d2bbe916418e6d6e
|
Add full_name field to API
|
hkisaml/api.py
|
hkisaml/api.py
|
from django.contrib.auth.models import User
from rest_framework import permissions, routers, serializers, generics, mixins
from oauth2_provider.ext.rest_framework import TokenHasReadWriteScope
class UserSerializer(serializers.ModelSerializer):
def to_representation(self, obj):
ret = super(UserSerializer, self).to_representation(obj)
if hasattr(obj, 'profile'):
ret['department_name'] = obj.profile.department_name
return ret
class Meta:
fields = [
'last_login', 'username', 'email', 'date_joined',
'first_name', 'last_name'
]
model = User
# ViewSets define the view behavior.
class UserView(generics.RetrieveAPIView,
mixins.RetrieveModelMixin):
def get_queryset(self):
user = self.request.user
if user.is_superuser:
return self.queryset
else:
return self.queryset.filter(id=user.id)
def get_object(self):
username = self.kwargs.get('username', None)
if username:
qs = self.get_queryset()
obj = generics.get_object_or_404(qs, username=username)
else:
obj = self.request.user
return obj
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = User.objects.all()
serializer_class = UserSerializer
#router = routers.DefaultRouter()
#router.register(r'users', UserViewSet)
|
Python
| 0.000001
|
@@ -80,17 +80,8 @@
ons,
- routers,
ser
@@ -434,16 +434,134 @@
nt_name%0A
+ if obj.first_name and obj.last_name:%0A ret%5B'full_name'%5D = '%25s %25s' %25 (obj.first_name, obj.last_name)%0A
|
119cd82b6454a8b2ac2177d31105561a991e1ef2
|
Add arg to hide diff
|
push/edit.py
|
push/edit.py
|
# -*- coding: utf-8 -*-
import argparse
import os
from config import cfg # pylint: disable=E0611,W0614
from func import file_get_contents
os.environ['TZ'] = 'UTC'
parser = argparse.ArgumentParser()
parser.add_argument('--auto', action='store_true')
parser.set_defaults(auto=False)
args = parser.parse_args()
print(args)
print('===== project =====')
project = None
while project is None:
for key, val in enumerate(cfg['project'], 1):
print('\t', key, val)
project = input('select a project:')
try:
project = int(project)
project = list(cfg['project'].values())[project - 1]
break
except Exception as e:
print(e)
project = None
print('project', project)
print()
print('===== web =====')
web = None
while web is None:
for key, val in enumerate(project['web'], 1):
print('\t', key, val)
web = input('select a web:')
try:
web = int(web)
webname = project['web'][web - 1]
web = cfg['web'][webname]
break
except Exception as e:
print(e)
web = None
print('web', web)
print()
print('===== source =====')
source = None
while source is None:
for key, val in enumerate(project['source'], 1):
print('\t', key, val)
source = input('select a source:')
try:
source = int(source)
source = cfg['source'][project['source'][source - 1]]
break
except Exception as e:
print(e)
source = None
print('source', source)
print()
print('===== target =====')
target = None
while target is None:
for key, val in enumerate(project['target'], 1):
print('\t', key, val)
target = input('select a target:')
try:
target = int(target)
target = cfg['target'][project['target'][target - 1]]
break
except Exception as e:
print(e)
target = None
print('target', target)
print()
print('===== files =====')
files = {}
while len(files) == 0:
cnt = 0
for fromname in project['files']:
cnt += 1
print('\t', cnt, '\t', fromname, '\t', project['files'][fromname])
temp = input('select a files:')
idxs = []
try:
for idx in temp.split():
idx = int(idx)
idxs.append(idx)
except Exception as e:
print(e)
continue
if any([idx < 0 for idx in idxs]):
for fromname in project['files']:
files[fromname] = project['files'][fromname]
try:
for idx in temp.split():
idx = int(idx)
if idx > 0:
files[list(project['files'].keys())[idx - 1]] = list(project['files'].values())[idx - 1]
else:
del files[list(project['files'].keys())[(-idx) - 1]]
break
except Exception as e:
print(e)
files = {}
if len(files) == 0:
for fromname in project['files']:
files[fromname] = project['files'][fromname]
print('files', files)
print()
summary = project['summary']
print('summary:', summary)
temp = input('new summary:').strip()
if temp != '':
summary = temp
print('summary:', summary)
print()
os.environ['PYWIKIBOT_DIR'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'user-config',
webname
)
import pywikibot
site = pywikibot.Site()
site.login()
for fromname in files:
toname = files[fromname]
fromname = source + fromname
toname = target + toname
print(fromname, '->', toname)
try:
text = file_get_contents(fromname)
except Exception as e:
print(e)
continue
page = pywikibot.Page(site, toname)
if page.text == '':
print('New page')
elif page.text == text.rstrip():
print('Nothing changed. Skipped.')
continue
else:
pywikibot.showDiff(page.text, text)
if args.auto:
save = 'yes'
else:
save = input('Save?')
if save.lower() in ['', 'y', 'yes']:
page.text = text
page.save(summary=summary, minor=web['minor'], botflag=web['bot'], nocreate=web['nocreate'])
|
Python
| 0.000001
|
@@ -246,16 +246,70 @@
_true')%0A
+parser.add_argument('--no-diff', action='store_true')%0A
parser.s
@@ -330,16 +330,31 @@
to=False
+, no_diff=False
)%0Aargs =
@@ -3821,32 +3821,65 @@
tinue%0A else:%0A
+ if not args.no_diff:%0A
pywikibo
|
7fb0e28ad6ef1190e61fc38bfb19744739b2e096
|
Remove unused deps from admin view
|
scoring_engine/web/views/admin.py
|
scoring_engine/web/views/admin.py
|
from flask import Blueprint, flash, redirect, render_template, request, url_for,
from flask_login import current_user, login_required
from operator import itemgetter
from scoring_engine.models.user import User
from scoring_engine.models.team import Team
mod = Blueprint('admin', __name__)
@mod.route('/admin')
@mod.route('/admin/status')
@login_required
def status():
if current_user.is_white_team:
return render_template('admin/status.html')
else:
return redirect(url_for('auth.unauthorized'))
@mod.route('/admin/manage')
@login_required
def manage():
if current_user.is_white_team:
users = User.query.with_entities(User.id, User.username).all()
teams = Team.query.with_entities(Team.id, Team.name).all()
return render_template('admin/manage.html', users=sorted(users, key=itemgetter(0)), teams=teams)
else:
return redirect(url_for('auth.unauthorized'))
@mod.route('/admin/stats')
@login_required
def stats():
if current_user.is_white_team:
return render_template('admin/stats.html')
else:
return redirect(url_for('auth.unauthorized'))
|
Python
| 0
|
@@ -25,15 +25,8 @@
int,
- flash,
red
@@ -52,25 +52,16 @@
ate,
- request,
url_for
,%0Afr
@@ -56,17 +56,16 @@
url_for
-,
%0Afrom fl
|
a900501804a5a07ed9cea77d5d5348be5e100d67
|
Use Acapela TTS if available
|
src/robots/actions/speech.py
|
src/robots/actions/speech.py
|
# coding=utf-8
import logging; logger = logging.getLogger("robot." + __name__)
logger.setLevel(logging.DEBUG)
from robots.action import *
@action
def say(robot, msg):
""" Says loudly the message.
Speech synthesis relies on the ROS wrapper around Festival.
:param msg: a text to say.
"""
def execute(robot):
logger.info("Robot says: " + msg)
if robot.hasROS():
import roslib; roslib.load_manifest('sound_play')
import rospy, os, sys
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
soundhandle = SoundClient()
soundhandle.say(msg)
return (True, None)
elif robot.hasmodule("textos"):
return robot.execute([
genom_request(
"textos",
"Say",
[msg],
wait_for_completion = False if callback else True,
callback = callback)])
else:
logger.warning("No ROS, no textos module: can not do speech synthesis.")
return (True, None)
return [python_request(execute)]
|
Python
| 0
|
@@ -161,16 +161,49 @@
bot, msg
+, callback = None, feedback =None
):%0A %22
@@ -240,66 +240,192 @@
S
-peech synthesis relies on the ROS wrapper around Festival.
+everal TTS systems are tested:%0A - first, try the Acapela TTS (through the acapela-ros Genom module)%0A - then the ROS 'sound_play' node%0A - eventually, the Genom 'textos' module%0A
%0A
@@ -653,60 +653,8 @@
sys%0A
- from sound_play.msg import SoundRequest%0A
@@ -1243,16 +1243,954 @@
None)%0A%0A
+ if robot.hasROS():%0A import rosnode%0A nodes = rosnode.get_node_names()%0A if %22/acapela%22 in nodes:%0A import actionlib%0A from acapela.msg import SayGoal, SayAction%0A%0A # use Acapela TTS%0A client = actionlib.SimpleActionClient('/acapela/Say', SayAction)%0A%0A ok = client.wait_for_server()%0A if not ok:%0A print(%22Could not connect to the Acapela ROS action server! Aborting action%22)%0A return%0A%0A # Creates a goal to send to the action server. %0A goal = SayGoal()%0A goal.message = msg%0A %0A%0A return %5Bros_request(client, %0A goal, %0A wait_for_completion = False if callback else True,%0A callback = callback,%0A feedback=feedback%0A )%5D # Return a non-blocking action. Useful to be able to cancel it later!%0A%0A%0A
retu
|
b00ae9a1023bb649171776f9cfdbf8675621272d
|
Use of `@api.multi`.
|
base_custom_info/models/custom_info.py
|
base_custom_info/models/custom_info.py
|
# -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería S.L. - Sergio Teruel
# © 2015 Antiun Ingeniería S.L. - Carlos Dauden
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, fields, models
class CustomInfoTemplate(models.Model):
"""Defines custom properties expected for a given database object."""
_name = "custom.info.template"
_description = "Custom information template"
_sql_constraints = [
("name_model",
"UNIQUE (name, model_id)",
"Another template with that name exists for that model."),
]
name = fields.Char(translate=True)
model_id = fields.Many2one(comodel_name='ir.model', string='Model')
info_ids = fields.One2many(
comodel_name='custom.info.property',
inverse_name='template_id',
string='Properties')
class CustomInfoProperty(models.Model):
"""Name of the custom information property."""
_name = "custom.info.property"
_description = "Custom information property"
_sql_constraints = [
("name_template",
"UNIQUE (name, template_id)",
"Another property with that name exists for that template."),
]
name = fields.Char(translate=True)
template_id = fields.Many2one(
comodel_name='custom.info.template',
string='Template')
info_value_ids = fields.One2many(
comodel_name="custom.info.value",
inverse_name="property_id",
string="Property Values")
class CustomInfoValue(models.Model):
_name = "custom.info.value"
_description = "Custom information value"
_rec_name = 'value'
_sql_constraints = [
("property_model_res",
"UNIQUE (property_id, model, res_id)",
"Another property with that name exists for that resource."),
]
model_id = fields.Many2one("ir.model", "Model", required=True)
res_id = fields.Integer("Resource ID", index=True, required=True)
property_id = fields.Many2one(
comodel_name='custom.info.property',
required=True,
string='Property')
name = fields.Char(related='property_id.name')
value = fields.Char(translate=True)
class CustomInfo(models.AbstractModel):
_name = "custom.info"
_description = "Inheritable abstract model to add custom info in any model"
custom_info_template_id = fields.Many2one(
comodel_name='custom.info.template',
string='Custom Information Template')
custom_info_ids = fields.One2many(
comodel_name='custom.info.value',
inverse_name='res_id',
domain=lambda self: [
("model_id", "=",
self.env["ir.model"].search([("model", "=", self._name)]).id)],
auto_join=True,
string='Custom Properties')
@api.onchange('custom_info_template_id')
def _onchange_custom_info_template_id(self):
if not self.custom_info_template_id:
self.custom_info_ids = False
else:
info_list = self.custom_info_ids.mapped('property_id')
for info_name in self.custom_info_template_id.info_ids:
if info_name not in info_list:
self.custom_info_ids |= self.custom_info_ids.new({
'model': self._name,
'property_id': info_name.id,
})
@api.multi
def unlink(self):
info_values = self.mapped('custom_info_ids')
res = super(CustomInfo, self).unlink()
if res:
info_values.unlink()
return res
|
Python
| 0
|
@@ -2797,16 +2797,31 @@
ties')%0A%0A
+ @api.multi%0A
@api
|
fb236790b7569eef764ecf3a23776f37345d0f89
|
remove some verbosity
|
GestureAgents/Agent.py
|
GestureAgents/Agent.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import Reactor
from Events import Event
from Policy import PolicyRuleset
class Agent:
"""This class represents something that generates Events.
When receiving an event with an Agent associated,
call acquire to show your interest. If you are an
application, you may call complete directly.
If you are a recognizer call complete when you have
fully recognized a gesture. For symbolic gestures
this means at the end of the recognition. For continuous
gestures, you may call complete directly instead of
acquire, as you know that anything is ypur gesture.
Whenever you feel that you don't need this agent
anymore, call discard AS EARLY AS POSSIBLE.
Recognizers must call these through their own helpers
in the class Recognizer.
"""
#Policy on whenever a confirmed recognizer can be failed by a new recognizer confirming
completion_policy = PolicyRuleset()
#Policy on whenever one gesture can be confirmed while another can be aquired (for instance when a continuous gesture can
#finish another gesture to complete.
compatibility_policy = PolicyRuleset()
def __init__(self,eventnames,creator):
"""eventnames is a list of names that will become member events.
creator is a class or instance with newAgent event to be called when recycling this agent."""
self.recognizers_acquired = []
self.recognizer_complete = None
self.events = {}
self.owners = []
self.newAgent = creator.newAgent
#is this agent having a confirmed recognizer?
self.completed = False
#is this agent recycled?
self.recycled = False
self.finished = False
for ename in list(eventnames)+["finishAgent"]:
self.events[ename]=Event()
setattr(self,ename,self.events[ename])
def acquire(self,Recognizer):
"The recognizer is interested on this agent"
#can we acquire even if there is someone confirmed?
if self.completed and self.compatibility_policy.result(self.recognizer_complete,Recognizer) != True:
return False
else:
self.recognizers_acquired.append(Recognizer)
return True
def finish(self):
"The owner of the event will not generate more events"
self.finishAgent(self)
def discard(self,Recognizer):
"""The recognizer is no longer interested in this agent.
This should occur after acquiring the agent. If it happens
after confirming, the agent will be recycled."""
if Recognizer == self.recognizer_complete:
print "DISCARD"
import traceback
#traceback.print_stack()
self.recognizer_complete = None
if self.completed and not self.finished:
self.completed = False
self.recycled = True
print "Recycling!:",type(Recognizer)
self.newAgent(self)
#print "WARNING: discarding a confirmed recognizer. That shouldn't happen"
elif Recognizer in self.recognizers_acquired:
self.recognizers_acquired.remove(Recognizer)
if self._can_confirm():
self.recognizer_complete.confirm(self)
self.completed = True
def _can_confirm(self):
"Decides if self.recognizer_complete can be confirmed"
if not self.recognizer_complete: return False
if self.completed: return False
if not self.recognizers_acquired: return True
for r in self.recognizers_acquired:
if self.compatibility_policy.result(self.recognizer_complete,r) != True \
and self.compatibility_policy.result(r, self.recognizer_complete) != True:
return False
return True
def _complete(self,Recognizer):
assert(Recognizer is not self.recognizer_complete)
# According to the policy we choose the best Recognizer
print "CCC", self, type(Recognizer), type(self.recognizer_complete)
if self.completion_policy.result(self.recognizer_complete,Recognizer) == False:
#Policy doesn't accept change
Recognizer.safe_fail()
return
elif self.recognizer_complete:
self.recognizer_complete.safe_fail("Displaced by another recognizer: "+str(Recognizer))
self.recognizer_complete = None
self.completed = False
self.recognizer_complete = Recognizer
if Recognizer in self.recognizers_acquired:
self.recognizers_acquired.remove(Recognizer)
#According to the policy we remove acquisitions
if self._can_confirm():
self.recognizer_complete.confirm(self)
self.completed = True
def complete(self,Recognizer):
assert(Recognizer in self.recognizers_acquired)
Reactor.run_after(lambda Recognizer=Recognizer, self=self: self._complete(Recognizer) )
def is_someone_subscribed(self):
for ename,event in self.events.iteritems():
if event.registered:
return True
return False
def fail(self):
"The Recognizer owner of this agent fails before really existing, so All the recognizers based on it must fail"
if self.finished: return
for r in self._get_recognizers_subscribed():
r.safe_fail()
def _get_recognizers_subscribed(self):
from Recognizer import Recognizer
return [r for r in set([r[1] for ename,event in self.events.iteritems() for r in event.registered]) if isinstance(r,Recognizer)]
def fail_all_others(self,winner):
Reactor.run_after(lambda winner=winner,self=self: self._fail_all_others(winner))
def _fail_all_others(self,winner):
#assert(self.recognizer_complete is winner) we are all consenting adults here
target = type(winner)
#print "fail_all_others :",winner,"wants to fail",target
for r in list(self.recognizers_acquired):
if type(r) == target and r is not winner:
#print "fail_all_others by",winner,":", r, "is target"
r.safe_fail(cause="Fail all others by %s"%str(winner))
else:
#print "fail_all_others :", r, "is not target"
pass
#default policies
@Agent.completion_policy.rule(-100)
def _accept_if_none(recognizer1,recognizer2):
"Accept First"
if recognizer1 == None:
return True
@Agent.completion_policy.rule(-99)
def _accept_if_compatible(recognizer1,recognizer2):
"Use compatibility_policy to accept completion one over another"
if Agent.compatibility_policy.result(recognizer1,recognizer2) == True:
return True
@Agent.compatibility_policy.rule(100)
def _never_accept(recognizer_confirmed,recognizer_acquiring):
"Never accept acquire when confirmed"
return False
|
Python
| 0.999731
|
@@ -4091,24 +4091,25 @@
zer%0A
+#
print %22CCC%22,
|
74101a9f24b218c036cf32c540cfb911e601080b
|
fix freeze of ppo2 (#849)
|
baselines/common/mpi_adam_optimizer.py
|
baselines/common/mpi_adam_optimizer.py
|
import numpy as np
import tensorflow as tf
from mpi4py import MPI
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
sess = tf.get_default_session()
assert sess is not None
countholder = [0] # Counts how many times _collect_grads has been called
stat = tf.reduce_sum(grads_and_vars[0][1]) # sum of first variable
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks), out=buf)
if countholder[0] % 100 == 0:
check_synced(sess, self.comm, stat)
countholder[0] += 1
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
def check_synced(sess, comm, tfstat):
"""
Check that 'tfstat' evaluates to the same thing on every MPI worker
"""
localval = sess.run(tfstat)
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:])
|
Python
| 0
|
@@ -45,25 +45,169 @@
rom
-mpi4py import MPI
+baselines.common import tf_util as U%0Afrom baselines.common.tests.test_with_mpi import with_mpi%0Atry:%0A from mpi4py import MPI%0Aexcept ImportError:%0A MPI = None
%0A%0Acl
@@ -882,17 +882,16 @@
shapes%5D%0A
-%0A
@@ -973,81 +973,8 @@
t32)
-%0A%0A sess = tf.get_default_session()%0A assert sess is not None
%0A
@@ -1162,16 +1162,25 @@
lat_grad
+, np_stat
):%0A
@@ -1359,20 +1359,23 @@
_synced(
-sess
+np_stat
, self.c
@@ -1381,14 +1381,8 @@
comm
-, stat
)%0A
@@ -1496,16 +1496,22 @@
lat_grad
+, stat
%5D, tf.fl
@@ -1808,254 +1808,1443 @@
ced(
-sess, comm, tfstat):%0A %22%22%22%0A Check that 'tfstat' evalu
+localval, comm=None):%0A %22%22%22%0A It's common to forget to initialize your variables to the same values, or%0A (less commonly) if you upd
ate
-s
t
-o the same thing on every MPI worker%0A %22%22%22%0A localval = sess.run(tfstat)%0A vals = comm.gather(localval)%0A if comm.rank == 0:%0A assert all(val==vals%5B0%5D for val in vals%5B1:%5D)
+hem in some other way than adam, to get them out of sync.%0A This function checks that variables on all MPI workers are the same, and raises%0A an AssertionError otherwise%0A%0A Arguments:%0A comm: MPI communicator%0A localval: list of local variables (list of variables on current worker to be compared with the other workers)%0A %22%22%22%0A comm = comm or MPI.COMM_WORLD%0A vals = comm.gather(localval)%0A if comm.rank == 0:%0A assert all(val==vals%5B0%5D for val in vals%5B1:%5D)%0A%0A%0A@with_mpi(timeout=5)%0Adef test_nonfreeze():%0A np.random.seed(0)%0A tf.set_random_seed(0)%0A%0A a = tf.Variable(np.random.randn(3).astype('float32'))%0A b = tf.Variable(np.random.randn(2,5).astype('float32'))%0A loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))%0A%0A stepsize = 1e-2%0A # for some reason the session config with inter_op_parallelism_threads was causing%0A # nested sess.run calls to freeze%0A config = tf.ConfigProto(inter_op_parallelism_threads=1)%0A sess = U.get_session(config=config)%0A update_op = MpiAdamOptimizer(comm=MPI.COMM_WORLD, learning_rate=stepsize).minimize(loss)%0A sess.run(tf.global_variables_initializer())%0A losslist_ref = %5B%5D%0A for i in range(100):%0A l,_ = sess.run(%5Bloss, update_op%5D)%0A print(i, l)%0A losslist_ref.append(l)%0A
%0A
|
a2982804011e808bd8bf8d9781d9b7bb20328ddc
|
remove import test line
|
noteorganiser/tests/test_utils.py
|
noteorganiser/tests/test_utils.py
|
"""tests for utilities"""
import os
import shutil
import datetime
from PySide import QtGui
from PySide import QtCore
import test
#utils to test
from ..utils import fuzzySearch
from .custom_fixtures import parent
def test_fuzzySearch():
### these should return True
#starts with the searchstring
assert fuzzySearch('g', 'git got gut')
#starts with the (longer) searchstring
assert fuzzySearch('git', 'git got gut')
#searchstring not at the start
assert fuzzySearch('got', 'git got gut')
#multiple substrings (separated by a space) found somewhere in the string
assert fuzzySearch('gi go', 'git got gut')
#empty string
assert fuzzySearch('', 'git got gut')
#strange whitespace
assert fuzzySearch('gi go', 'git got gut')
assert fuzzySearch('gi go', 'git got gut')
### these should return False
#searchstring not found
assert not fuzzySearch('bot', 'git got gut')
#searchstring not found
assert not fuzzySearch('gran', 'this is a great neat thing')
|
Python
| 0.000001
|
@@ -114,20 +114,8 @@
Core
-%0Aimport test
%0A%0A#u
|
6e42e355d6ae60f115c9027ff6fcb17814b346c2
|
use mah special charm helpers
|
hooks/setup.py
|
hooks/setup.py
|
import subprocess
def pre_install():
"""
Do any setup required before the install hook.
"""
install_charmhelpers()
def install_charmhelpers():
"""
Install the charmhelpers library, if not present.
"""
try:
import charmhelpers # noqa
except ImportError:
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'charmhelpers'])
|
Python
| 0
|
@@ -419,16 +419,73 @@
tall', '
+-e', 'git+https://github.com/whitmo/charmhelpers.git#egg=
charmhel
|
a02624cdbacd666d4e0cdba6230e2ee67837f874
|
add AsText to __all__ list
|
geoalchemy2/functions.py
|
geoalchemy2/functions.py
|
from sqlalchemy.sql import functions
from . import types
__all__ = [
'GenericFunction', 'GeometryType', 'Buffer'
]
class GenericFunction(functions.GenericFunction):
def __init__(self, *args, **kwargs):
expr = kwargs.pop('expr', None)
if expr is not None:
args = (expr,) + args
functions.GenericFunction.__init__(self, *args, **kwargs)
# Functions are classified as in the PostGIS doc.
# <http://www.postgis.org/documentation/manual-svn/reference.html>
#
# Geometry Accessors
#
class GeometryType(GenericFunction):
name = 'ST_GeometryType'
#
# Geometry Outputs
#
class AsText(GenericFunction):
name = 'ST_AsText'
#
# Geometry Processing
#
class Buffer(GenericFunction):
name = 'ST_Buffer'
type = types.Geometry
|
Python
| 0.000861
|
@@ -90,16 +90,24 @@
nction',
+%0A
'Geomet
@@ -114,16 +114,42 @@
ryType',
+%0A 'AsText',%0A
'Buffer
|
9fb1c2781582e52c6618b61d4a8a60c3363ee711
|
bump controller API to v1.1
|
api/__init__.py
|
api/__init__.py
|
"""
The **api** Django app presents a RESTful web API for interacting with the **deis** system.
"""
__version__ = '1.0.0'
|
Python
| 0.000001
|
@@ -111,13 +111,13 @@
__ = '1.
-0
+1
.0'%0A
|
60e60c9d7c5551701eafbfe15dd3931d45b594b6
|
Handle Accept headers
|
api/__init__.py
|
api/__init__.py
|
# try and keep Flask imports to a minimum, going to refactor later to use
# just werkzeug, for now, prototype speed is king
from flask import Flask, request
import yaml
import os
import re
from datetime import datetime
from api.config import config, ConfigException
import api.repo
import api.utils
app = Flask(__name__)
app.config.from_object('api.config')
@app.route('/schema', defaults={'path': ''}, methods=['GET'])
@app.route('/schema/<path:path>', methods=['GET'])
def get_schemas(path):
# TODO serve schemas
return utils.json_response({'win': 'scheme away'}, 200)
@app.route('/', defaults={'path': ''}, methods=['GET'])
@app.route('/<path:path>', methods=['GET'])
def get_data(path):
"""Handle all GET requests to the api"""
metadata = {}
file_path = os.path.join('data', path)
latest_version = repo.get_latest_commit()
# check if the request specified a version via header
accept_pattern = re.compile('application/(.+)\+json')
match = accept_pattern.match(request.headers['Accept'])
if match is not None:
cid = match.group(1)
try:
version = repo.get_commit(cid)
except KeyError as e:
return utils.err(406)
else:
version = latest_version
if repo.path_files(file_path + config['DATA_FILE_EXT'], version.id) is None:
# .yml file doesn't exist, check if path matches a directory
f_list = repo.path_files(file_path, version.id)
if f_list is None:
return utils.err(404)
data = utils.file_list_to_links(f_list, request.host_url, 'data/')
metadata['data_type'] = 'directory listing'
else:
raw = repo.file_contents(file_path + config['DATA_FILE_EXT'], version.id)
data = yaml.load(raw)
data = utils.refs_to_links(data, request.host_url)
metadata['data_type'] = 'file content'
metadata['version'] = {
'id': str(version.id),
'date': datetime.fromtimestamp(version.commit_time).isoformat()
}
if version.id != latest_version.id:
metadata['latest_version'] = {
'id': str(latest_version.id),
'date': datetime.fromtimestamp(latest_version.commit_time).isoformat()
}
ret_obj = {
'data': data,
'metadata': metadata
}
return utils.json_response(ret_obj)
|
Python
| 0
|
@@ -1049,18 +1049,248 @@
is
-not
None:%0A
+ match = request.accept_mimetypes.best_match(%5B'application/json'%5D)%0A if match is None:%0A return utils.err(406)%0A else:%0A if match.group(2) is None:%0A version = latest_version%0A else:%0A
@@ -1315,9 +1315,9 @@
oup(
-1
+2
)%0A
@@ -1322,16 +1322,20 @@
+
+
try:%0A
@@ -1335,32 +1335,36 @@
ry:%0A
+
+
version = repo.g
@@ -1390,23 +1390,28 @@
+
except
+(
KeyError
as
@@ -1410,15 +1410,32 @@
rror
+, ValueError)
as e:%0A
+
@@ -1467,51 +1467,8 @@
406)
-%0A else:%0A version = latest_version
%0A%0A
|
a652e43ca73eacda7e42e27afb0d91d75000b4df
|
Fix typing errors
|
gerber_to_scad/vector.py
|
gerber_to_scad/vector.py
|
# Basic vector maths class
import math
class V(object):
def __init__(self, x=0, y=0):
self.x = float(x)
self.y = float(y)
def __unicode__(self):
return "(%s, %s)" % (self.x, self.y)
__repr__ = __unicode__
@classmethod
def from_tuple(cls, coordinates):
x, y = coordinates
return V(x, y)
def as_tuple(self):
return (self.x, self.y)
@classmethod
def intersection(cls, o1, d1, o2, d2):
""" Find intersection of two vectors, if any """
try:
l2 = ((o2.x - o1.x) * d1.y / d1.x - o2.y + o1.y) / (d2.y - d2.x * d1.y / d1.x)
return o2 + d2 * l2
except ZeroDivisionError:
return None
@classmethod
def point_line_projection(cls, v1, v2, p, limit_to_segment=False):
""" Returns the projection of the point p on the line defined
by the two endpoints v1 and v2
"""
d = v2 - v1
l2 = d.abs_sq()
# If v1 and v2 are equal, simply return v1 (the line direction is undefined)
if l2 == 0:
return v1
# Get the projection factor
a = ((p - v1) * d) / l2
# Limit the projection to be limited to stay between v1 and v2, if requested
if limit_to_segment:
if a < 0:
return v1
if a > 1:
return v2
return v1 + d * a
def abs_sq(self):
""" Square of absolute value of vector self """
return abs(self.x * self.x + self.y * self.y)
def consume_tuple(self, other):
if isinstance(other, tuple) or isinstance(other, list):
return V(other[0], other[1])
return other
def cross(self, other):
""" cross product """
return V(self.x * other.y - other.x * self.y)
def rotate(self, theta, as_degrees=False):
""" Adapted from https://gist.github.com/mcleonard/5351452.
Rotate this vector by theta in degrees.
"""
if as_degrees:
theta = math.radians(theta)
dc, ds = math.cos(theta), math.sin(theta)
x, y = dc*self.x - ds*self.y, ds*self.x + dc*self.y
return V(x, y)
def __abs__(self):
return math.sqrt(self.abs_sq())
def __cmp__(self, other):
other = self.consume_tuple(other)
if self.x == other.x and self.y == other.y:
return 0
if self.abs() < other.abs():
return -1
return 1
def __nonzero__(self):
if self.x or self.y:
return True
return False
def __neg__(self):
return V(-self.x, -self.y)
def __add__(self, other):
other = self.consume_tuple(other)
return V(self.x + other.x, self.y + other.y)
def __sub__(self, other):
other = self.consume_tuple(other)
return V(self.x - other.x, self.y - other.y)
def __mul__(self, other):
other = self.consume_tuple(other)
if isinstance(other, V):
return (self.x * other.x + self.y * other.y)
return V(other * self.x, other * self.y)
def __div__(self, other):
if not other:
raise Exception("Division by zero")
other = float(other)
return V(self.x / other, self.y / other)
__truediv__ = __div__
|
Python
| 0.00611
|
@@ -206,24 +206,25 @@
.x, self.y)%0A
+%0A
__repr__
@@ -473,17 +473,16 @@
%22%22%22
-
Find int
@@ -513,17 +513,16 @@
, if any
-
%22%22%22%0A
@@ -594,16 +594,33 @@
1.y) / (
+%0A
d2.y - d
@@ -636,16 +636,29 @@
y / d1.x
+%0A
)%0A
@@ -841,17 +841,16 @@
%22%22%22
-
Returns
@@ -899,20 +899,16 @@
defined%0A
-
@@ -1463,17 +1463,16 @@
%22%22%22
-
Square o
@@ -1502,17 +1502,16 @@
tor self
-
%22%22%22%0A
@@ -1763,17 +1763,16 @@
%22%22%22
-
cross pr
@@ -1776,17 +1776,16 @@
product
-
%22%22%22%0A
@@ -1893,17 +1893,16 @@
%22%22%22
-
Adapted
@@ -1949,20 +1949,16 @@
351452.%0A
-
@@ -2140,17 +2140,19 @@
, y = dc
-*
+ *
self.x -
@@ -2154,17 +2154,19 @@
f.x - ds
-*
+ *
self.y,
@@ -2167,17 +2167,19 @@
lf.y, ds
-*
+ *
self.x +
@@ -2181,17 +2181,19 @@
f.x + dc
-*
+ *
self.y%0A
@@ -2439,31 +2439,29 @@
if
-self.abs() %3C other.abs(
+abs(self) %3C abs(other
):%0A
@@ -2932,32 +2932,39 @@
l__(self, other)
+ -%3E %22V%22
:%0A other
@@ -3039,25 +3039,24 @@
return
-(
self.x * oth
@@ -3078,17 +3078,16 @@
other.y
-)
%0A
@@ -3303,24 +3303,25 @@
.y / other)%0A
+%0A
__truedi
|
ab3f331246e844812fd91b51908a0d0972a9793f
|
improve run_bin (#885)
|
gfauto/gfauto/run_bin.py
|
gfauto/gfauto/run_bin.py
|
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a binary from the given binary name and settings file."""
import argparse
import subprocess
import sys
from pathlib import Path
from typing import List
from gfauto import binaries_util, settings_util
def main() -> int:
parser = argparse.ArgumentParser(
description="Runs a binary given the binary name and settings.json file."
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parser.add_argument(
"binary_name",
help="The name of the binary to run. E.g. spirv-opt, glslangValidator",
type=str,
)
parser.add_argument(
"arguments",
metavar="arguments",
type=str,
nargs="*",
help="The arguments to pass to the binary",
)
parsed_args = parser.parse_args(sys.argv[1:])
# Args.
settings_path: Path = Path(parsed_args.settings)
binary_name: str = parsed_args.binary_name
arguments: List[str] = parsed_args.arguments
settings = settings_util.read_or_create(settings_path)
binary_manager = binaries_util.get_default_binary_manager(settings=settings)
cmd = [str(binary_manager.get_binary_path_by_name(binary_name).path)]
cmd.extend(arguments)
return subprocess.run(cmd, check=False).returncode
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -826,16 +826,49 @@
gs_util%0A
+from gfauto.gflogging import log%0A
%0A%0Adef ma
@@ -998,16 +998,80 @@
on file.
+ %22%0A %22Use -- to separate args to run_bin and your binary.
%22%0A )%0A
@@ -1791,16 +1791,29 @@
uments%0A%0A
+ try:%0A
sett
@@ -1863,16 +1863,205 @@
s_path)%0A
+ except settings_util.NoSettingsFile:%0A log(f%22Settings file %7Bstr(settings_path)%7D was created for you; using this.%22)%0A settings = settings_util.read_or_create(settings_path)%0A%0A
bina
|
15f22d7c0ac9ddce6cb14cb0cbb35c4d630605d2
|
Remove period so input corresponds to output.
|
api/ud_helper.py
|
api/ud_helper.py
|
import re
from ufal.udpipe import Model, Pipeline, ProcessingError
class Parser:
MODELS = {
"swe": "data/swedish-ud-2.0-170801.udpipe",
}
def __init__(self, language):
model_path = self.MODELS.get(language, None)
if not model_path:
raise ParserException("Cannot find model for language '%s'" % language)
model = Model.load(model_path)
if not model:
raise ParserException("Cannot load model from file '%s'\n" % model_path)
self.model = model
def parse(self, text):
text = text.strip()
last_character = text.strip()[-1]
if re.match(r"\w", last_character, flags=re.UNICODE):
text += "."
pipeline = Pipeline(
self.model,
"tokenize",
Pipeline.DEFAULT,
Pipeline.DEFAULT,
"conllu"
)
error = ProcessingError()
processed = pipeline.process(text, error)
if error.occurred():
raise ParserException(error.message)
return processed
class ParserException(Exception):
pass
|
Python
| 0.999999
|
@@ -575,24 +575,128 @@
xt.strip()%0A%0A
+ # Adding a period improves detection on especially short sentences%0A period_added = False%0A
last
@@ -725,16 +725,16 @@
p()%5B-1%5D%0A
-
@@ -810,16 +810,48 @@
t += %22.%22
+%0A period_added = True
%0A%0A
@@ -1126,16 +1126,16 @@
rred():%0A
-
@@ -1172,24 +1172,199 @@
r.message)%0A%0A
+ # Remove the period to make sure input corresponds to output%0A if period_added:%0A processed = %22%5Cn%22.join(processed.rstrip().split(%22%5Cn%22)%5B:-1%5D) + %22%5Cn%5Cn%22%0A%0A
retu
|
9a1d7bf0ce54c8a405282686ae8307335321d1d0
|
allow environment to supply paths to dumpbin and nm
|
waflib/extras/syms.py
|
waflib/extras/syms.py
|
#! /usr/bin/env python
# encoding: utf-8
"""
this tool supports the export_symbols_regex to export the symbols in a shared library.
by default, all symbols are exported by gcc, and nothing by msvc.
to use the tool, do something like:
def build(ctx):
ctx(features='c cshlib syms', source='a.c b.c', export_symbols_regex='mylib_.*', target='testlib')
only the symbols starting with 'mylib_' will be exported.
"""
import re
from waflib.Context import STDOUT
from waflib.Task import Task
from waflib.Errors import WafError
from waflib.TaskGen import feature, after_method
class gen_sym(Task):
def run(self):
obj = self.inputs[0]
if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
re_nm = re.compile(r'External\s+\|\s+_(' + self.generator.export_symbols_regex + r')\b')
cmd = ['dumpbin', '/symbols', obj.abspath()]
else:
if self.env.DEST_BINFMT == 'pe': #gcc uses nm, and has a preceding _ on windows
re_nm = re.compile(r'T\s+_(' + self.generator.export_symbols_regex + r')\b')
else:
re_nm = re.compile(r'T\s+(' + self.generator.export_symbols_regex + r')\b')
cmd = ['nm', '-g', obj.abspath()]
syms = re_nm.findall(self.generator.bld.cmd_and_log(cmd, quiet=STDOUT))
self.outputs[0].write('%r' % syms)
class compile_sym(Task):
def run(self):
syms = {}
for x in self.inputs:
slist = eval(x.read())
for s in slist:
syms[s] = 1
lsyms = syms.keys()
lsyms.sort()
if self.env.DEST_BINFMT == 'pe':
self.outputs[0].write('EXPORTS\n' + '\n'.join(lsyms))
elif self.env.DEST_BINFMT == 'elf':
self.outputs[0].write('{ global:\n' + ';\n'.join(lsyms) + ";\nlocal: *; };\n")
else:
raise WafError('NotImplemented')
@feature('syms')
@after_method('process_source', 'process_use', 'apply_link', 'process_uselib_local')
def do_the_symbol_stuff(self):
ins = [x.outputs[0] for x in self.compiled_tasks]
self.gen_sym_tasks = [self.create_task('gen_sym', x, x.change_ext('.%d.sym' % self.idx)) for x in ins]
tsk = self.create_task('compile_sym',
[x.outputs[0] for x in self.gen_sym_tasks],
self.path.find_or_declare(getattr(self, 'sym_filename', self.target + '.def')))
self.link_task.set_run_after(tsk)
self.link_task.dep_nodes = [tsk.outputs[0]]
if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
self.link_task.env.append_value('LINKFLAGS', ['/def:' + tsk.outputs[0].bldpath()])
elif self.env.DEST_BINFMT == 'pe': #gcc on windows takes *.def as an additional input
self.link_task.inputs.append(tsk.outputs[0])
elif self.env.DEST_BINFMT == 'elf':
self.link_task.env.append_value('LINKFLAGS', ['-Wl,-version-script', '-Wl,' + tsk.outputs[0].bldpath()])
else:
raise WafError('NotImplemented')
|
Python
| 0
|
@@ -409,16 +409,26 @@
d.%0A%22%22%22%0A%0A
+import os%0A
import r
@@ -429,16 +429,16 @@
port re%0A
-
from waf
@@ -637,16 +637,26 @@
puts%5B0%5D%0A
+%09%09kw = %7B%7D%0A
%09%09if 'ms
@@ -785,32 +785,103 @@
regex + r')%5Cb')%0A
+%09%09%09if 'DUMPBIN' in self.env:%0A%09%09%09%09cmd = %5Bself.env%5B'DUMPBIN'%5D%5D%0A%09%09%09else:%0A%09
%09%09%09cmd = %5B'dumpb
@@ -883,18 +883,29 @@
dumpbin'
-,
+%5D%0A%09%09%09cmd += %5B
'/symbol
@@ -923,16 +923,234 @@
path()%5D%0A
+%0A%09%09%09# Dumpbin requires custom environment sniffed out by msvc.py earlier%0A%09%09%09if self.env%5B'PATH'%5D:%0A%09%09%09%09env = dict(self.env.env or os.environ)%0A%09%09%09%09env.update(PATH = os.pathsep.join(self.env%5B'PATH'%5D))%0A%09%09%09%09kw%5B'env'%5D = env%0A%0A
%09%09else:%0A
@@ -1409,21 +1409,91 @@
%0A%09%09%09
-cmd = %5B'nm',
+if 'NM' in self.env:%0A%09%09%09%09cmd = %5Bself.env%5B'NM'%5D%5D%0A%09%09%09else:%0A%09%09%09%09cmd = 'nm'%0A%09%09%09cmd += %5B
'-g'
@@ -1580,16 +1580,22 @@
t=STDOUT
+, **kw
))%0A%09%09sel
|
ccef871b45f78845a12c3209b463e861244a107e
|
Fix the moin parser.
|
external/moin-parser.py
|
external/moin-parser.py
|
# -*- coding: utf-8 -*-
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a MoinMoin parser plugin that renders source code to HTML via
Pygments; you need Pygments 0.7 or newer for this parser to work.
To use it, set the options below to match your setup and put this file in
the data/plugin/parser subdirectory of your Moin instance, and give it the
name that the parser directive should have. For example, if you name the
file ``code.py``, you can get a highlighted Python code sample with this
Wiki markup::
{{{
#!code python
[...]
}}}
Additionally, if you set ATTACHMENTS below to True, Pygments will also be
called for all attachments for whose filenames there is no other parser
registered.
You are responsible for including CSS rules that will map the Pygments CSS
classes to colors. You can output a stylesheet file with `pygmentize`, put
it into the `htdocs` directory of your Moin instance and then include it in
the `stylesheets` configuration option in the Moin config, e.g.::
stylesheets = [('screen', '/htdocs/pygments.css')]
If you do not want to do that and are willing to accept larger HTML
output, you can set the INLINESTYLES option below to True.
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want to highlight attachments, in addition to
# {{{ }}} blocks.
ATTACHMENTS = True
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from pygments.util import ObjectNotFound
# wrap lines in <span>s so that the Moin-generated line numbers work
class MoinHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
for line in source:
yield 1, '<span class="line">' + line[1] + '</span>'
htmlformatter = MoinHtmlFormatter(noclasses=INLINESTYLES)
textlexer = TextLexer()
codeid = [0]
class Parser:
"""
MoinMoin Pygments parser.
"""
if ATTACHMENTS:
extensions = '*'
else:
extensions = []
Dependencies = []
def __init__(self, raw, request, **kw):
self.raw = raw
self.req = request
if "format_args" in kw:
# called from a {{{ }}} block
try:
self.lexer = get_lexer_by_name(kw['format_args'].strip())
except ObjectNotFound:
self.lexer = textlexer
return
if "filename" in kw:
# called for an attachment
filename = kw['filename']
else:
# called for an attachment by an older moin
# HACK: find out the filename by peeking into the execution
# frame which might not always work
try:
frame = sys._getframe(1)
filename = frame.f_locals['filename']
except:
filename = 'x.txt'
try:
self.lexer = get_lexer_for_filename(filename)
except ObjectNotFound:
self.lexer = textlexer
def format(self, formatter):
codeid[0] += 1
id = "pygments_%s" % codeid[0]
w = self.req.write
w(formatter.code_area(1, id, start=1, step=1))
w(formatter.rawHTML(highlight(self.raw, self.lexer, htmlformatter)))
w(formatter.code_area(0, id))
|
Python
| 0.000001
|
@@ -1800,22 +1800,21 @@
import
-Object
+Class
NotFound
@@ -2586,38 +2586,37 @@
except
-Object
+Class
NotFound:%0A
@@ -3226,14 +3226,13 @@
ept
-Object
+Class
NotF
|
dffcfa42fbf4f200a22b739a0cd24f36317b054c
|
Fix so that /api/login/ follow the specified api documentation.
|
api/userview.py
|
api/userview.py
|
from flask import abort, request, jsonify, make_response, session
from datetime import datetime, timedelta
from api import app
from api.user import *
@require_csrf_token
@app.route('/api/signup/', methods = ['POST'])
def api_user_signup():
generate_csrf_token(session)
status = {}
httpcode = 200
if 'email' in request.json and 'password' in request.json:
if register_user(request.json['email'], request.json['password']):
status['code'] = 0
status['message'] = 'Success'
else:
status['code'] = 1
status['message'] = 'Could not register user, maybe user already exists?'
else:
status['code'] = 2
status['message'] = 'Missing paramter(s)'
httpcode = 400
return make_response(jsonify({ 'csrf_token': session['csrf'], 'status': status }), httpcode)
@require_csrf_token
@app.route('/api/login/', methods = ['POST'])
def api_user_login():
if 'email' in request.json and 'password' in request.json:
id = check_user_credentials(request.json['email'], request.json['password'])
if id is not None:
session = app.open_session(request)
session['id'] = id
session['loggedin'] = True
response = make_response(jsonify({ 'status': 'OK', 'message': 'User logged in successfully'}), 200)
app.save_session(session, response)
else:
response = make_response(jsonify({ 'status': 'FAIL', 'message': 'Email and password combination did not match'}), 200)
return response
return make_response(jsonify({ 'status': 'BAD REQUEST', 'message': 'Missing parameters'}), 400)
@require_csrf_token
@require_authentication
@app.route('/api/logout/', methods = ['POST'])
def api_user_logout():
session.destroy()
response = make_response(jsonify({ 'status': 'OK', 'message': 'User logged out successfully'}), 200)
return response
@app.route('/api/')
def api_root():
generate_csrf_token(session)
status = {'code': 0, 'message': 'Sucess'}
response = make_response(jsonify({'csrf_token': session['csrf'], 'status': status}), 200)
return response
|
Python
| 0
|
@@ -940,24 +940,93 @@
er_login():%0A
+ generate_csrf_token(session)%0A status = %7B%7D%0A httpcode = 200%0A%0A
if 'emai
@@ -1192,56 +1192,8 @@
ne:%0A
- session = app.open_session(request)%0A
@@ -1274,236 +1274,133 @@
-response = make_response(jsonify(%7B 'status': 'OK', 'message': 'User logged in successfully'%7D), 200)%0A app.save_session(session, response)%0A else:%0A response = make_response(jsonify(%7B 'status': 'FAIL',
+status%5B'code'%5D = 0%0A status%5B'message'%5D = 'Success'%0A else:%0A status%5B'code'%5D = 4%0A status%5B
'mes
@@ -1404,17 +1404,19 @@
message'
-:
+%5D =
'Email
@@ -1458,139 +1458,215 @@
tch'
-%7D), 200)%0A return response%0A return make_response(jsonify(%7B 'status': 'BAD REQUEST', 'message': 'Missing parameters'%7D), 400
+%0A else:%0A status%5B'code'%5D = 2%0A status%5B'message'%5D = 'Missing paramter(s)'%0A httpcode = 400%0A%0A return make_response(jsonify(%7B 'csrf_token': session%5B'csrf'%5D, 'status': status %7D), httpcode
)%0A%0A@
|
f8111a0219eb4a5bd4fdc47ddffa2a77b51c9a10
|
Make code compatible with Python 3
|
extract_id_title_url.py
|
extract_id_title_url.py
|
#!/usr/bin/env python2.7
# encoding: utf-8
'''
extract_id_title.py
Created by Hallvord R. M. Steen on 2014-10-25.
Modified by Karl
Mozilla Public License, version 2.0
see LICENSE
Dumps data from webcompat.com bug tracker
by default creates one CSV file (webcompatdata.csv)
and one JSON file (webcompatdata-bzlike.json)
the JSON file uses many of the field names Bugzilla uses in its export,
so the output from this script can be used where Bugzilla data is expected
'''
import json
import re
import socket
import sys
import urllib2
# Config
URL_REPO = "https://api.github.com/repos/webcompat/web-bugs"
VERBOSE = True
# Seconds. Loading searches can be slow
socket.setdefaulttimeout(240)
def get_remote_file(url, req_json=False):
print('Getting ' + url)
req = urllib2.Request(url)
req.add_header('User-agent', 'AreWeCompatibleYetBot')
if req_json:
req.add_header('Accept', 'application/vnd.github.v3+json')
bzresponse = urllib2.urlopen(req, timeout=240)
return {"headers": bzresponse.info(),
"data": json.loads(bzresponse.read().decode('utf8'))}
def extract_url(issue_body):
'''Extract the URL for an issue from WebCompat.
URL in webcompat.com bugs follow this pattern:
**URL**: https://example.com/foobar
'''
url_pattern = re.compile('\*\*URL\*\*\: (.*)\n')
url_match = re.search(url_pattern, issue_body)
if url_match:
url = url_match.group(1).strip()
if not url.startswith(('http://', 'https://')):
url = "http://%s" % url
else:
url = ""
return url.encode('utf-8')
def extract_data(json_data, results_csv, results_bzlike):
resolution_labels = ["duplicate", "invalid", "wontfix", "fixed",
"worksforme"]
whiteboard_labels = ["needsinfo", "contactready", "sitewait",
"needscontact", "needsdiagnosis"]
for issue in json_data["data"]:
# Extracting data
body = issue["body"]
url = extract_url(body)
bug_id = issue["number"]
link = 'https://webcompat.com/issues/%s' % bug_id
issue_title = issue["title"].encode('utf-8').strip()
if VERBOSE:
print('Issue %s: %s' % (bug_id, issue_title))
creation_time = issue['created_at'].encode('utf-8')
last_change_time = issue['updated_at'].encode('utf-8')
issue_state = issue['state'].encode('utf-8')
cf_last_resolved = issue['closed_at']
if issue_state == 'open':
status = 'OPEN'
else:
status = 'RESOLVED'
# Extracting the labels
labels_list = [label['name'] for label in issue['labels']]
# areWEcompatibleyet is only about mozilla bugs
if any([('firefox' or 'mozilla') in label for label in labels_list]):
# Defining the OS
if any(['mobile' in label for label in labels_list]):
op_sys = 'Gonk (Firefox OS)'
elif any(['android' in label for label in labels_list]):
op_sys = 'Android'
else:
op_sys = ''
# Did the bug had a resolution?
resolution = ''
resolution_set = set(labels_list).intersection(resolution_labels)
if resolution_set:
resolution = resolution_set.pop().upper()
# Gathering Whiteboard keys
whiteboard = ''.join(['[%s] ' % label for label in labels_list
if label in whiteboard_labels])
# creating CSV file
if issue_title:
results_csv.append("%i\t%s\t%s\t%s" % (
bug_id, issue_title, url, link))
# Creating dictionary
bzlike = {"id": bug_id,
"summary": issue_title,
"url": url,
"whiteboard": whiteboard,
"op_sys": op_sys,
"creation_time": creation_time,
"last_change_time": last_change_time,
"status": status,
"cf_last_resolved": cf_last_resolved,
"resolution": resolution,
"body": body
}
results_bzlike.append(bzlike)
def extract_next_link(link_hdr):
'''Given a HTTP Link header, extract the "next" link.
Link header has the pattern:
'<https://example.com/foobar?page=2>; rel="next",
<https://example.com/foobar?page=100>; rel="last"'
We need:
https://example.com/foobar?page=2
When no more "next", we return an empty string.
'''
next_link = ''
links = link_hdr.split(',')
for link in links:
link_only, rel = link.split(';')
if 'next' in rel:
next_link = link_only.strip(' <>')
break
return next_link
def get_webcompat_data(url_repo=URL_REPO):
'''Extract Issues data from github repo.
Start with the first page and follow hypermedia links to explore the rest.
'''
next_link = '%s/issues?per_page=100&page=1' % (url_repo)
results = []
bzresults = []
while next_link:
response_data = get_remote_file(next_link, True)
extract_data(response_data, results, bzresults)
next_link = extract_next_link(response_data["headers"]["link"])
return results, {"bugs": bzresults}
def main():
results, bzresults = get_webcompat_data(URL_REPO)
# webcompatdata.csv
with open('webcompatdata.csv', 'w') as f:
f.write("\n".join(results).encode('utf8'))
f.write('\n')
print("Wrote {} items to webcompatdata.csv ".format(len(results)))
# webcompatdata-bzlike.json
with open('webcompatdata-bzlike.json', 'w') as f:
f.write(json.dumps(bzresults, indent=4).encode('utf8'))
print("Wrote {} items to webcompatdata-bzlike.json".format(
len(bzresults['bugs'])))
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.00021
|
@@ -517,22 +517,121 @@
sys%0A
-import urllib2
+try:%0A from urllib.request import urlopen, Request%0Aexcept ImportError:%0A from urllib2 import urlopen, Request
%0A%0A#
@@ -865,24 +865,16 @@
req =
-urllib2.
Request(
@@ -1016,24 +1016,24 @@
b.v3+json')%0A
+
bzrespon
@@ -1041,16 +1041,8 @@
e =
-urllib2.
urlo
|
b76b67cf72e001dd4b41677d0dc48c90d4602000
|
Add some comments
|
app/app/views.py
|
app/app/views.py
|
import os
import sys
sys.path.append('landsat-util/landsat')
import subprocess
import boto
from boto.s3.key import Key
from pyramid.view import view_config
from homura import download
from image import Process
from pyramid.httpexceptions import HTTPFound
from shutil import rmtree
from models import Rendered_Model
import os.path
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
# path of world(geodata) file
direc_world = '{}/world.tfw'.format(os.getcwd())
def delete_directory(direc):
# delete files
try:
if os.path.exists(direc):
rmtree(direc)
except OSError:
pass
# raise Exception('error deleting files')
def process_image(direc, scene, root, path, row, b1, b2, b3):
'''Method to process image'''
direc_scene = '{direc}/{scene}'.format(direc=direc, scene=scene)
direc_scene_scene = '{direc}/{sc}/{sc}'.format(direc=direc, sc=scene)
band_list = [b1, b2, b3]
o_list = []
# Builds a string pointing towards the AWS Landsat datasets
for b in band_list:
o_list.append('{root}{path}/{row}/{scene}/{scene}_B{band}.TIF.ovr'.
format(root=root, path=path, row=row, scene=scene, band=b))
# Create a subdirectory
if not os.path.exists(direc_scene):
os.makedirs(direc_scene)
try:
# Download previews from AWS
for i in o_list:
download(url=i, path=direc_scene)
except:
out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/faileddownload.png'
# return out
raise Exception('Download failed')
print 'done downloading previews from aws'
# Apply the stripped world file to the band previews.
for b in band_list:
file_name = '{}/B{}-geo.TIF'.format(direc_scene, b)
subprocess.call(['geotifcp', '-e', direc_world,
'{direc}/{scene}_B{band}.TIF.ovr'.format(
direc=direc_scene, scene=scene, band=b),
file_name])
print 'done applying world file to previews'
# Resize each band
# subprocess.call(['mkdir', direc + '/ready'])
for b in band_list:
file_name = '{}/B{}-geo.TIF'.format(direc_scene, b)
file_name2 = '{}_B{}.TIF'.format(direc_scene_scene, b)
subprocess.call(['gdal_translate', '-outsize', '15%', '15%',
file_name, file_name2])
if not os.path.exists(file_name2):
out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/badmagicnumber.png'
# return out
raise Exception('Bad magic number')
print 'done resizing 3 images'
# Call landsat-util to merge images
t = direc + '/' + scene
try:
processor = Process(t, [b1, b2, b3], direc, verbose=True)
processor.run(pansharpen=False)
except:
out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/processfailed.png'
# return out
raise Exception('Processing/landsat-util failed')
# Convert black to transparent and save as PNG
file_in = '{}_bands_{}{}{}.TIF'.format(direc_scene_scene, b1, b2, b3)
subprocess.call(['convert', '-transparent', 'black',
file_in, direc_scene + '/final.png'])
# check if final.png exists
if not os.path.isfile('{}/final.png'.format(direc_scene)):
out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/finalpngnotcomposed.png'
# return out
raise Exception('Final.png not rendered')
# upload to s3
try:
conne = boto.connect_s3(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
b = conne.get_bucket('landsatproject')
k = Key(b)
k.key = scene + b1 + b2 + b3 + '.png'
k.set_contents_from_filename(direc_scene + '/final.png')
k.get_contents_to_filename(direc_scene + '/final.png')
hello = b.get_key(scene + b1 + b2 + b3 + '.png')
# make public
hello.set_canned_acl('public-read')
out = hello.generate_url(0, query_auth=False, force_http=True)
except:
out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/connectiontoS3failed.png'
# return out
raise Exception('S3 upload failed')
# store url in db
Rendered_Model.update_p_url(scene, b1, b2, b3, out)
# delete files
delete_directory(direc)
return out
@view_config(route_name='home', renderer='json')
def my_view(request):
"""A view for rendering or retreiving an image on demand."""
direc = os.getcwd() + '/scenes'
scene = request.matchdict['id']
root = 'http://landsat-pds.s3.amazonaws.com/L8/'
path = scene[3:6]
row = scene[6:9]
b1 = request.matchdict['b1']
b2 = request.matchdict['b2']
b3 = request.matchdict['b3']
# Check if image already exists.
out = Rendered_Model.preview_available(scene, b1, b2, b3)
if not out:
try:
out = process_image(direc, scene, root, path, row, b1, b2, b3)
except:
# delete files
delete_directory(direc)
out = "https://s3-us-west-2.amazonaws.com/landsat-pds/L8/{path}/{row}/{scene}/{scene}_thumb_large.jpg".format(
path=path, row=row, scene=scene)
# out = u'https://raw.githubusercontent.com/recombinators/little-worker/master/failimages/errordeletingfiles.png'
return HTTPFound(location=out)
|
Python
| 0
|
@@ -807,16 +807,24 @@
process
+preview
image'''
@@ -5231,32 +5231,105 @@
except:%0A
+ # If error in processing image, render aws image as default.%0A
# de
|
bfb4ba8cb863d80cdd558ebad25f630fef5dc190
|
Stop to use the __future__ module.
|
oslo_middleware/debug.py
|
oslo_middleware/debug.py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Debug middleware"""
from __future__ import print_function
import sys
import webob.dec
from oslo_middleware import base
class Debug(base.ConfigurableMiddleware):
"""Helper class that returns debug information.
Can be inserted into any WSGI application chain to get information about
the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in resp.headers.items():
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Prints the contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
|
Python
| 0.999929
|
@@ -658,47 +658,8 @@
%22%22%0A%0A
-from __future__ import print_function%0A%0A
impo
|
2e8a2d2ac8b90a0806bea90c25d9b06ce8cc3a96
|
check roi for each layer
|
dicom_tools/myroi2roi.py
|
dicom_tools/myroi2roi.py
|
import numpy as np
from skimage.measure import grid_points_in_poly
from dicom_tools.roiFileHandler import roiFileHandler
def myroi2roi(myrois, shape, verbose=False):
if verbose:
print("myroi2roi: called \n")
outroi = np.full(shape,False,dtype=bool)
if len(myrois) != len(outroi):
print("error: len rois = ",len(rois)," but len dicom=",len(outroi))
for myroi, layer in zip(myrois,outroi):
layer = grid_points_in_poly(layer.shape, myroi['points'])
if verbose:
print("myroi2roi: returning \n")
return outroi
|
Python
| 0
|
@@ -408,32 +408,66 @@
myrois,outroi):%0A
+ if not myroi is None:%0A
layer =
|
d671acb2c8a381fa49e98c50d967122738ebbd7b
|
Remove extra slash problem
|
app/comicbook.py
|
app/comicbook.py
|
import os
import sys
from natsort import natsorted
class comicbook(object):
filelist = []
def __init__(self, name, filename=None):
self.name = name
self.path = name + "/"
self.localpath = "res/" + name + "/"
self.filename = filename
self.generate_filelist()
if self.filename is None or not self.filename:
self.filename = self.filelist[0]
self.path.replace('//','/')
def generate_filelist(self):
x, self.dirlist, self.filelist = os.walk(self.localpath).next()
#Filter out system files
self.filelist = [ v for v in self.filelist if not v.startswith('.') ]
self.filelist = [ v for v in self.filelist if not v.startswith('thumbs.db') ]
self.filelist = [ v for v in self.filelist if not v.startswith('desktop.ini') ]
self.filelist = [ v for v in self.filelist if not v.endswith('.txt') ]
self.filelist = [ v for v in self.filelist if not v.startswith('README') ]
def thumbnail_path(self):
try:
return self.filelist[0]
except IndexError:
return None
def thumbnail_mimetype(self):
return 'image/jpeg'
def get_prev_image(self):
try:
idx = self.current_image() - 1
if idx >= 0:
return os.path.join('/', self.name, self.filelist[idx])
else:
return os.path.join('/', self.name)
except IndexError:
print "get_prev_image - IndexError"
return os.path.join('/', self.name)
def get_next_image(self):
try:
idx = self.current_image() + 1
if idx < len(self.filelist):
return os.path.join('/', self.name, self.filelist[idx])
else:
return os.path.join('/', self.name, '..')
return
except IndexError:
return os.path.join('/', self.name, '..')
def get_image(self):
try:
return os.path.join('/', self.name, self.filename, 'img')
except IndexError:
return None
def current_image(self):
return self.filelist.index(self.filename)
|
Python
| 0.000009
|
@@ -179,16 +179,17 @@
.path =
+(
name + %22
@@ -182,32 +182,51 @@
th = (name + %22/%22
+).replace('//','/')
%0A self.lo
@@ -424,52 +424,8 @@
t%5B0%5D
- %0A self.path.replace('//','/')
%0A%0A
|
0b3de872d0078ef7f32fc64ed6b110f65c7f3983
|
Add Release#upload_asset to create asset files
|
github3/repos/release.py
|
github3/repos/release.py
|
import json
from github3.decorators import requires_auth
from github3.models import GitHubCore
from uritemplate import URITemplate
class Release(GitHubCore):
"""The :class:`Release <Release>` object.
It holds the information GitHub returns about a release from a
:class:`Repository <github3.repos.repo.Repository>`.
"""
CUSTOM_HEADERS = {'Accept': 'application/vnd.github.manifold-preview'}
def __init__(self, release, session=None):
super(Release, self).__init__(release, session)
#: URL for uploaded assets
self.assets_url = release.get('assets_url')
#: Body of the release (the description)
self.body = release.get('body')
#: Date the release was created
self.created_at = self._strptime(release.get('created_at'))
#: Boolean whether value is True or False
self.draft = release.get('draft')
#: HTML URL of the release
self.html_url = release.get('html_url')
#: GitHub id
self.id = release.get('id')
#: Name given to the release
self.name = release.get('name')
#; Boolean whether release is a prelease
self.prerelease = release.get('prerelease')
#: Date the release was published
self.published_at = release.get('published_at')
#: Name of the tag
self.tag_name = release.get('tag_name')
#: "Commit" that this release targets
self.target_commitish = release.get('target_commitish')
upload_url = release.get('upload_url')
#: URITemplate to upload an asset with
self.upload_urlt = URITemplate(upload_url) if upload_url else None
def __repr__(self):
return '<Release [{0}]>'.format(self.name)
def asset(self, id):
"""Returns a single Asset.
:param int id: (required), id of the asset
:returns: :class:`Asset <Asset>`
"""
data = None
if int(id) > 0:
url = self._build_url(str(id), base_url=self._api)
data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS),
200)
return Asset(data, self) if data else None
@requires_auth
def delete(self):
"""Users with push access to the repository can delete a release.
:returns: True if successful; False if not successful
"""
url = self._api
return self._boolean(
self._delete(url, headers=Release.CUSTOM_HEADERS),
204,
404
)
@requires_auth
def edit(self, tag_name=None, target_commitish=None, name=None, body=None,
draft=None, prerelease=None):
"""Users with push access to the repository can edit a release.
If the edit is successful, this object will update itself.
:param str tag_name: (optional), Name of the tag to use
:param str target_commitish: (optional), The "commitish" value that
determines where the Git tag is created from. Defaults to the
repository's default branch.
:param str name: (optional), Name of the release
:param str body: (optional), Description of the release
:param boolean draft: (optional), True => Release is a draft
:param boolean prerelease: (optional), True => Release is a prerelease
:returns: True if successful; False if not successful
"""
url = self._api
data = {
'tag_name': tag_name,
'target_commitish': target_commitish,
'name': name,
'body': body,
'draft': draft,
'prerelease': prerelease,
}
self._remove_none(data)
r = self._session.patch(
url, data=json.dumps(data), headers=Release.CUSTOM_HEADERS
)
successful = self._boolean(r, 200, 404)
if successful:
# If the edit was successful, let's update the object.
self.__init__(r.json())
return successful
def iter_assets(self, number=-1, etag=None):
"""Iterate over the assets available for this release.
:param int number: (optional), Number of assets to return
:param str etag: (optional), last ETag header sent
:returns: generator of :class:`Asset <Asset>` objects
"""
url = self._build_url('assets', base_url=self.__api)
return self._iter(number, url, Asset, etag=etag)
class Asset(GitHubCore):
def __init__(self, asset, session=None):
super(Asset, self).__init__(asset, session)
#: Content-Type provided when the asset was created
self.content_type = asset.get('content_type')
#: Date the asset was created
self.created_at = self._strptime(asset.get('created_at'))
#: Number of times the asset was downloaded
self.download_count = asset.get('download_count')
#: GitHub id of the asset
self.id = asset.get('id')
#: Short description of the asset
self.label = asset.get('label')
#: Name of the asset
self.name = asset.get('name')
#: Size of the asset
self.size = asset.get('size')
#: State of the asset, e.g., "uploaded"
self.state = asset.get('state')
#: Date the asset was updated
self.updated_at = self._strptime(asset.get('updated_at'))
|
Python
| 0
|
@@ -4440,16 +4440,751 @@
=etag)%0A%0A
+ @requires_auth%0A def upload_asset(self, content_type, name, asset):%0A %22%22%22Upload an asset to this release.%0A%0A All parameters are required.%0A%0A :param str content_type: The content type of the asset. Wikipedia has%0A a list of common media types%0A :param str name: The name of the file%0A :param asset: The file or bytes object to upload.%0A :returns: :class:%60Asset %3CAsset%3E%60%0A %22%22%22%0A headers = Release.CUSTOM_HEADERS.copy()%0A headers.update(%7B'Content-Type': content_type%7D)%0A url = self.upload_urlt.expand(%7B'name': name%7D)%0A data = self._json(%0A self._post(url, data=asset, headers=headers),%0A 201%0A )%0A return Asset(data, self)%0A%0A
%0Aclass A
|
dd583dce4124bea13a1f0bd1d037acda0344c2eb
|
Remove old db-related code
|
ppbot.py
|
ppbot.py
|
"""ppbot.py
A modular python bot that utilizes/will utilize postgresql as a data source.
TODO: Lots
"""
import traceback
from optparse import OptionParser
import irc.client
import gevent
from gevent import monkey; monkey.patch_all()
from gevent import wsgi
from handlers.modulehandler import ModuleHandler
from handlers.eventhandler import EventHandler
from models.configuration import Configuration
from models.network import Network
from models.server import Server
from models.channel import Channel
from http.core import app as httpcore
from db import Db
from config import BotConfig
class ppbot(object):
def __init__ (self):
"""Create an IRC object and do some initializations.
Need to set handlers for events that may occur so that modules will be able to
use them.
"""
self.ircloop_timeout = 0.5
self.irc = irc.client.IRC()
self.servers = []
# initialize the databse
self.engine = Db.engine
self.session = Db.session
self.session.expire_on_commit = False
# load configuration
self.config = Configuration()
self.config.session_start()
# initialize the module handler
self.module_handler = ModuleHandler(self.servers, self.irc, httpcore=httpcore)
# initialize the event handler
self.event_handler = EventHandler(self.servers)
self.event_handler.module_handler = self.module_handler
# send all events to the event handler dispatcher
self.irc.add_global_handler('all_events', self.event_handler.dispatcher)
# load the default modules and auto-run modules
self.load_modules()
def connect(self):
""" Create a server object, connect and join the channel. """
networks = Network().val()
for network in networks:
# connect to the server
server = self.irc.server()
self.servers.append(server)
server.server_config = network
# TODO: should be using a queue for the servers to go through,
# since it could be possible to have more than one server to
# try to connect to.
server_config = network.servers[0]
try:
server.connect(server_config.address, server_config.port, server_config.nickname, server_config.password, ircname=server_config.realname)
except irclib.ServerConnectionError, e:
print "<<Error>> Couldn't connect to %s:%s" % (server_config.address, server_config.port)
# jump into an infinite loop
jobs = [gevent.spawn(self.irc.process_forever)]
gevent.joinall(jobs)
def _run(self):
self.irc.process_once(self.ircloop_timeout)
gevent.sleep(self.ircloop_timeout)
def load_modules(self):
""" for now we will manually load modules, but this will eventually
call the database for what modules to auto-load
"""
core = self.module_handler.load('Core')
core.module_handler = self.module_handler
self.module_handler.load('Irc')
self.module_handler.load('Weather')
self.module_handler.load('Stock')
self.module_handler.load('Urldupe')
self.module_handler.load('Urbandictionary')
self.module_handler.load('Rottentomatoes')
self.module_handler.load('Karmamod')
self.module_handler.load('Yelp')
self.module_handler.load('Quotemod')
self.module_handler.load('Search')
self.module_handler.load('Isup')
self.module_handler.load('Notemod')
self.module_handler.load('Urlparser')
self.module_handler.load('Github')
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('-i', '--init-db', dest='initdb', action='store_true',
help='Initialize the database.')
# parser.add_option('-c', '--config', dest='config_file', action='store',
# type='string', default='ppbot.cfg', help='Initialize the database.')
(options, args) = parser.parse_args()
if options.initdb:
Db().init_db()
irc.DEBUG = settings.DEBUG
server = wsgi.WSGIServer(('', 8088), httpcore)
bot = ppbot()
gevent.joinall([gevent.spawn(bot.connect), gevent.spawn(server.serve_forever)])
|
Python
| 0.000069
|
@@ -351,16 +351,17 @@
andler%0A%0A
+#
from mod
@@ -399,16 +399,17 @@
uration%0A
+#
from mod
@@ -435,16 +435,17 @@
Network%0A
+#
from mod
@@ -469,16 +469,17 @@
Server%0A
+#
from mod
@@ -564,40 +564,27 @@
ort
-Db%0A%0Afrom config import BotConfig
+db%0Aimport settings%0A
%0A%0Acl
@@ -910,154 +910,8 @@
%5B%5D%0A%0A
- # initialize the databse%0A self.engine = Db.engine%0A self.session = Db.session%0A self.session.expire_on_commit = False%0A%0A
@@ -935,32 +935,33 @@
uration%0A
+#
self.config = Co
@@ -978,24 +978,25 @@
n()%0A
+#
self.config.
@@ -2273,11 +2273,8 @@
irc
-lib
.Ser
|
29754db3313c6a41a2d12677afd2f4abd55284f4
|
Clarify use of imperial.enable()
|
astropy/units/imperial.py
|
astropy/units/imperial.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines colloquially used Imperial units. By default, they
are not enabled. To enable them, do::
>>> from astropy.units import imperial
>>> imperial.enable() # doctest: +SKIP
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitBase, def_unit
from . import si
_ns = globals()
###########################################################################
# LENGTH
def_unit(['inch'], 2.54 * si.cm, namespace=_ns,
doc="International inch")
def_unit(['ft', 'foot'], 12 * inch, namespace=_ns,
doc="International foot")
def_unit(['yd', 'yard'], 3 * ft, namespace=_ns,
doc="International yard")
def_unit(['mi', 'mile'], 5280 * ft, namespace=_ns,
doc="International mile")
def_unit(['mil', 'thou'], 0.001 * inch, namespace=_ns,
doc="Thousandth of an inch")
def_unit(['nmi', 'nauticalmile', 'NM'], 1852 * si.m, namespace=_ns,
doc="Nautical mile")
def_unit(['fur', 'furlong'], 660 * ft, namespace=_ns,
doc="Furlong")
###########################################################################
# AREAS
def_unit(['ac', 'acre'], 43560 * ft ** 2, namespace=_ns,
doc="International acre")
###########################################################################
# VOLUMES
def_unit(['gallon'], si.liter / 0.264172052, namespace=_ns,
doc="U.S. liquid gallon")
def_unit(['quart'], gallon / 4, namespace=_ns,
doc="U.S. liquid quart")
def_unit(['pint'], quart / 2, namespace=_ns,
doc="U.S. liquid pint")
def_unit(['cup'], pint / 2, namespace=_ns,
doc="U.S. customary cup")
def_unit(['foz', 'fluid_oz', 'fluid_ounce'], cup / 8, namespace=_ns,
doc="U.S. fluid ounce")
def_unit(['tbsp', 'tablespoon'], foz / 2, namespace=_ns,
doc="U.S. customary tablespoon")
def_unit(['tsp', 'teaspoon'], tbsp / 3, namespace=_ns,
doc="U.S. customary teaspoon")
###########################################################################
# MASS
def_unit(['oz', 'ounce'], 28.349523125 * si.g, namespace=_ns,
doc="International avoirdupois ounce: mass")
def_unit(['lb', 'lbm', 'pound'], 16 * oz, namespace=_ns,
doc="International avoirdupois pound: mass")
def_unit(['st', 'stone'], 14 * lb, namespace=_ns,
doc="International avoirdupois stone: mass")
def_unit(['ton'], 2000 * lb, namespace=_ns,
doc="International avoirdupois ton: mass")
def_unit(['slug'], 32.174049 * lb, namespace=_ns,
doc="slug: mass")
###########################################################################
# SPEED
def_unit(['kn', 'kt', 'knot', 'NMPH'], nmi / si.h, namespace=_ns,
doc="nautical unit of speed: 1 nmi per hour")
###########################################################################
# FORCE
def_unit('lbf', slug * ft * si.s**-2, namespace=_ns,
doc="Pound: force")
def_unit(['kip', 'kilopound'], 1000 * lbf, namespace=_ns,
doc="Kilopound: force")
##########################################################################
# ENERGY
def_unit(['BTU', 'btu'], 1.05505585 * si.kJ, namespace=_ns,
doc="British thermal unit")
def_unit(['cal', 'calorie'], 4.184 * si.J, namespace=_ns,
doc="Thermochemical calorie: pre-SI metric unit of energy")
def_unit(['kcal', 'Cal', 'Calorie', 'kilocal', 'kilocalorie'],
1000 * cal, namespace=_ns,
doc="Calorie: colloquial definition of Calorie")
##########################################################################
# PRESSURE
def_unit('psi', lbf * inch ** -2, namespace=_ns,
doc="Pound per square inch: pressure")
###########################################################################
# POWER
# Imperial units
def_unit(['hp', 'horsepower'], si.W / 0.00134102209, namespace=_ns,
doc="Electrical horsepower")
###########################################################################
# TEMPERATURE
def_unit(['deg_F', 'Fahrenheit'], namespace=_ns, doc='Degrees Fahrenheit',
format={'latex': r'{}^{\circ}F', 'unicode': '°F'})
###########################################################################
# CLEANUP
del UnitBase
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable Imperial units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable Imperial
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(enable))
|
Python
| 0.000041
|
@@ -144,59 +144,129 @@
its.
- By default, they%0Aare not enabled. To enable them
+%0A%0ATo include them in %60~astropy.units.UnitBase.compose%60 and the results of%0A%60~astropy.units.UnitBase.find_equivalent_units%60
, do
@@ -277,20 +277,22 @@
%3E%3E%3E
-from
+import
astropy
@@ -302,23 +302,12 @@
its
-import imperial
+as u
%0A
@@ -311,16 +311,18 @@
%3E%3E%3E
+u.
imperial
|
ba5edd102ddd53f2e95da8b673bf14bdd72dc012
|
Add quotes around user-provided values
|
pw_cli/py/pw_cli/argument_types.py
|
pw_cli/py/pw_cli/argument_types.py
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Defines argument types for use with argparse."""
import argparse
import logging
from pathlib import Path
def directory(arg: str) -> Path:
path = Path(arg)
if path.is_dir():
return path.resolve()
raise argparse.ArgumentTypeError(f'{path} is not a directory')
def log_level(arg: str) -> int:
try:
return getattr(logging, arg.upper())
except AttributeError:
raise argparse.ArgumentTypeError(
f'{arg.upper()} is not a valid log level')
|
Python
| 0.000001
|
@@ -838,14 +838,16 @@
r(f'
+%22
%7Bpath%7D
+%22
is
@@ -1035,16 +1035,17 @@
f'
+%22
%7Barg.upp
@@ -1049,16 +1049,17 @@
upper()%7D
+%22
is not
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.