commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
1363c12251cb6aaad37f2b3be6890f70e7f80a66
|
Fix invalid syntax
|
location_field/widgets.py
|
location_field/widgets.py
|
from django.conf import settings
from django.forms import widgets
from django.utils.safestring import mark_safe
GOOGLE_MAPS_V3_APIKEY = getattr(settings, 'GOOGLE_MAPS_V3_APIKEY', None)
GOOGLE_API_JS = '//maps.google.com/maps/api/js?sensor=false'
if GOOGLE_MAPS_V3_APIKEY:
GOOGLE_API_JS = '{0}&key={0}'.format(GOOGLE_API_JS, GOOGLE_MAPS_V3_APIKEY))
class LocationWidget(widgets.TextInput):
def __init__(self, attrs=None, based_fields=None, zoom=None, suffix='', **kwargs):
self.based_fields = based_fields
self.zoom = zoom
self.suffix = suffix
super(LocationWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is not None:
if isinstance(value, basestring):
lat, lng = value.split(',')
else:
lng = value.x
lat = value.y
value = '%s,%s' % (
float(lat),
float(lng),
)
else:
value = ''
if '-' not in name:
prefix = ''
else:
prefix = name[:name.rindex('-') + 1]
based_fields = ','.join(
map(lambda f: '#id_' + prefix + f.name, self.based_fields))
attrs = attrs or {}
attrs['data-location-widget'] = name
attrs['data-based-fields'] = based_fields
attrs['data-zoom'] = self.zoom
attrs['data-suffix'] = self.suffix
attrs['data-map'] = '#map_' + name
text_input = super(LocationWidget, self).render(name, value, attrs)
map_div = u'''
<div style="margin:4px 0 0 0">
<label></label>
<div id="map_%(name)s" style="width: 500px; height: 250px"></div>
</div>
'''
return mark_safe(text_input + map_div % {'name': name})
class Media:
# Use schemaless URL so it works with both, http and https websites
js = (
GOOGLE_API_JS,
settings.STATIC_URL + 'location_field/js/form.js',
)
|
Python
| 0.999586
|
@@ -349,17 +349,16 @@
_APIKEY)
-)
%0A%0A%0Aclass
|
b177629c1869fe707ec69ade0927bf9769e1cbe6
|
Freeze sql parse to 0.2.4
|
djongo/__init__.py
|
djongo/__init__.py
|
__version__ = '1.2.31'
|
Python
| 0.999989
|
@@ -14,11 +14,11 @@
= '1.2.3
-1
+2
'%0A
|
30c8e4d7a1e6e237772aa89256b83ec37a015803
|
increment version
|
terminalone/metadata.py
|
terminalone/metadata.py
|
# -*- coding: utf-8 -*-
__name__ = 'TerminalOne'
__author__ = 'MediaMath'
__copyright__ = 'Copyright 2015, MediaMath'
__license__ = 'Apache License, Version 2.0'
__version__ = '1.9.8'
__maintainer__ = 'MediaMath Developer Relations'
__email__ = 'developers@mediamath.com'
__status__ = 'Stable'
__url__ = 'http://www.mediamath.com'
__description__ = "A package for interacting with MediaMath's TerminalOne API."
|
Python
| 0.000004
|
@@ -175,17 +175,17 @@
= '1.9.
-8
+9
'%0A__main
|
c8445a938d9bd9512b8af40ac8e9465a3ab9f04d
|
Fix exception handling error.
|
d2to1/core.py
|
d2to1/core.py
|
import os
import sys
import warnings
from distutils.core import Distribution as _Distribution
from distutils.errors import DistutilsFileError, DistutilsSetupError
from setuptools.dist import _get_unpatched
from .extern import six
from .util import DefaultGetDict, IgnoreDict, cfg_to_args
_Distribution = _get_unpatched(_Distribution)
def d2to1(dist, attr, value):
"""Implements the actual d2to1 setup() keyword. When used, this should be
the only keyword in your setup() aside from `setup_requires`.
If given as a string, the value of d2to1 is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that d2to1 should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, six.string_types):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = cfg_to_args(path)
except:
e = sys.exc_info()[1]
raise DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__,
six.u(e)))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may add
# back in later if demanded
for key, val in six.iteritems(attrs):
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
_Distribution.finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, six.integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
# This bit of hackery is necessary so that the Distribution will ignore
# normally unsupport command options (namely pre-hooks and post-hooks).
# dist.command_options is normally a dict mapping command names to dicts of
# their options. Now it will be a defaultdict that returns IgnoreDicts for
# the each command's options so we can pass through the unsupported options
ignore = ['pre_hook.*', 'post_hook.*']
dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore))
|
Python
| 0.000011
|
@@ -1796,17 +1796,22 @@
six.u(
-e
+str(e)
)))%0A%0A
|
1fca3a48b0617b19554ab55c54db322090a69c3d
|
Add with statement tests
|
magic/tests/test_magic.py
|
magic/tests/test_magic.py
|
import unittest
import magic
import magic.flags
class MagicTestCase(unittest.TestCase):
def setUp(self):
self.magic = magic.Magic()
def test_get_version(self):
self.assertTrue(isinstance(self.magic.version, int))
def test_from_buffer(self):
mimetype = self.magic.from_buffer("ehlo")
self.assertEqual(mimetype, "ASCII text, with no line terminators")
def test_from_file(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
def test_set_flags(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
self.magic.set_flags(magic.flags.MAGIC_MIME_TYPE)
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "text/plain")
|
Python
| 0.000004
|
@@ -493,24 +493,190 @@
CII text%22)%0A%0A
+ def test_with(self):%0A with magic.Magic(mimetype=True) as m:%0A mimetype = self.magic.from_file(%22/etc/passwd%22)%0A self.assertEqual(mimetype, %22text/plain%22)%0A%0A
def test_s
|
436aa56758403b96aa4c0038db6d2a24047cfa16
|
fix bug
|
monthertree/monthertree/wsgi.py
|
monthertree/monthertree/wsgi.py
|
"""
WSGI config for monthertree project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append('/home/jinxp/Documents/shell/mothertree/monthertree/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monthertree.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Python
| 0.000001
|
@@ -245,77 +245,77 @@
sys%0A
-sys.path.append('/home/jinxp/Documents/shell/mothertree/monthertree/'
+from django.conf import settings%0Asys.path.append(settings.PROJECT_DIR
)%0Aos
|
3d7e7c53c5c2809df74a8f8d61a7f929e02a1ce6
|
Require libunwind
|
var/spack/packages/gperftools/package.py
|
var/spack/packages/gperftools/package.py
|
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gperftools(Package):
"""Google's fast malloc/free implementation, especially for multi-threaded applications.
Contains tcmalloc, heap-checker, heap-profiler, and cpu-profiler."""
homepage = "https://code.google.com/p/gperftools"
url = "https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz"
version('2.4', '2171cea3bbe053036fb5d5d25176a160', url="https://github.com/gperftools/gperftools/releases/download/gperftools-2.4/gperftools-2.4.tar.gz")
version('2.3', 'f54dd119f0e46ac1f13264f8d97adf90', url="https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz")
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
Python
| 0.998829
|
@@ -1871,16 +1871,45 @@
r.gz%22)%0A%0A
+ depends_on(%22libunwind%22)%0A%0A
def
|
9a35e57dbde9360409ea883b6426dc1a355e8d16
|
Fix Reverb signature for distributed SAC nightly.
|
tf_agents/experimental/distributed/examples/sac/sac_reverb_server.py
|
tf_agents/experimental/distributed/examples/sac/sac_reverb_server.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Main binary to launch a stand alone Reverb RB server.
See README for launch instructions.
"""
import os
from absl import app
from absl import flags
from absl import logging
import reverb
import tensorflow.compat.v2 as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.specs import tensor_spec
from tf_agents.train import learner
from tf_agents.train.utils import train_utils
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_integer('min_table_size_before_sampling', 1,
'Minimum number of elements in table before sampling.')
flags.DEFINE_integer('replay_buffer_capacity', 1000000,
'Capacity of the replay buffer table.')
flags.DEFINE_integer('port', None, 'Port to start the server on.')
flags.DEFINE_integer(
'samples_per_insert', None,
'Samples per insert limit. To use this option, ensure that '
'min_table_size_before_sampling >= 2 * max(1.0, samples_per_insert)')
FLAGS = flags.FLAGS
# Ratio for samples per insert rate limiting tolerance
_SAMPLES_PER_INSERT_TOLERANCE_RATIO = 0.1
def main(_):
logging.set_verbosity(logging.INFO)
# Wait for the collect policy to become available, then load it.
collect_policy_dir = os.path.join(FLAGS.root_dir,
learner.POLICY_SAVED_MODEL_DIR,
learner.COLLECT_POLICY_SAVED_MODEL_DIR)
collect_policy = train_utils.wait_for_policy(
collect_policy_dir, load_specs_from_pbtxt=True)
samples_per_insert = FLAGS.samples_per_insert
min_table_size_before_sampling = FLAGS.min_table_size_before_sampling
# Create the signature for the variable container holding the policy weights.
train_step = train_utils.create_train_step()
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container_signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
variables)
logging.info('Signature of variables: \n%s', variable_container_signature)
# Create the signature for the replay buffer holding observed experience.
replay_buffer_signature = tensor_spec.from_spec(
collect_policy.collect_data_spec)
logging.info('Signature of experience: \n%s', replay_buffer_signature)
if samples_per_insert is not None:
# Use SamplesPerInsertRatio limiter
samples_per_insert_tolerance = (
_SAMPLES_PER_INSERT_TOLERANCE_RATIO * samples_per_insert)
error_buffer = min_table_size_before_sampling * samples_per_insert_tolerance
experience_rate_limiter = reverb.rate_limiters.SampleToInsertRatio(
min_size_to_sample=min_table_size_before_sampling,
samples_per_insert=samples_per_insert,
error_buffer=error_buffer)
else:
# Use MinSize limiter
experience_rate_limiter = reverb.rate_limiters.MinSize(
min_table_size_before_sampling)
# Crete and start the replay buffer and variable container server.
server = reverb.Server(
tables=[
reverb.Table( # Replay buffer storing experience.
name=reverb_replay_buffer.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=experience_rate_limiter,
max_size=FLAGS.replay_buffer_capacity,
max_times_sampled=0,
signature=replay_buffer_signature,
),
reverb.Table( # Variable container storing policy parameters.
name=reverb_variable_container.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=variable_container_signature,
),
],
port=FLAGS.port)
server.wait()
if __name__ == '__main__':
flags.mark_flags_as_required(['root_dir', 'port'])
app.run(main)
|
Python
| 0.000002
|
@@ -3095,16 +3095,95 @@
a_spec)%0A
+ replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)%0A
loggin
|
6d8dbb6621da2ddfffd58303131eb6cda345e37c
|
Make person experience the default tab for ZA
|
pombola/south_africa/urls.py
|
pombola/south_africa/urls.py
|
from django.conf.urls import patterns, include, url
from pombola.south_africa.views import LatLonDetailView,SAPlaceDetailSub
urlpatterns = patterns('pombola.south_africa.views',
url(r'^place/latlon/(?P<lat>[0-9\.-]+),(?P<lon>[0-9\.-]+)/', LatLonDetailView.as_view(), name='latlon'),
url(r'^place/(?P<slug>[-\w]+)/places/', SAPlaceDetailSub.as_view(), {'sub_page': 'places'}, name='place_places'),
)
|
Python
| 0.000001
|
@@ -46,16 +46,63 @@
e, url%0A%0A
+from pombola.core.views import PersonDetailSub%0A
from pom
@@ -447,10 +447,122 @@
aces'),%0A
+ url(r'%5Eperson/(?P%3Cslug%3E%5B-%5Cw%5D+)/$', PersonDetailSub.as_view(), %7B 'sub_page': 'experience' %7D, name='person'),%0A
)%0A
|
80618891a11c90bdfc763d9a00e9bdcf2e9302fd
|
Create resources on deploy
|
noopy/project_template/deploy.py
|
noopy/project_template/deploy.py
|
#!/usr/bin/python
import glob
import importlib
import os
import sys
import zipfile
from StringIO import StringIO
import boto3
import noopy
from noopy.endpoint import Endpoint
from noopy.utils import to_pascal_case
import settings
def main():
target_dir = 'src'
zip_bytes = make_zip(target_dir)
for endpoint in settings.ENDPOINTS:
importlib.import_module('src.{}'.format(endpoint))
for func in Endpoint.endpoints.values():
create_lambda_function(zip_bytes, func)
def make_zip(target_dir):
f = StringIO()
zip_file = zipfile.ZipFile(f, 'w')
file_names = glob.glob('{}/*.py'.format(target_dir))
if not file_names:
sys.stderr.write('There is no python file in src directory')
sys.exit(1)
for file_name in file_names:
zip_file.write(file_name, os.path.split(file_name)[1])
noopy_parent = os.path.split(noopy.__path__[0])[0]
for root, _, file_names in os.walk(noopy.__path__[0]):
for file_name in file_names:
full_path = os.path.join(root, file_name)
local_path = full_path[len(noopy_parent):]
zip_file.write(full_path, local_path)
zip_file.close()
f.seek(0)
bytes_ = f.read()
f.close()
return bytes_
def create_lambda_function(zip_bytes, func):
lambda_settings = settings.LAMBDA
client = boto3.client('lambda')
function_prefix = 'arn:aws:lambda:{}:{}:{}'.format(
client._client_config.region_name,
settings.ACCOUNT_ID,
lambda_settings['Prefix']
)
func_module = os.path.split(func.func_code.co_filename)[1].split('.')[0]
print client.create_function(
FunctionName='{}{}'.format(function_prefix, to_pascal_case(func.func_name)),
Runtime='python2.7',
Role=lambda_settings['Role'],
Handler='{}.{}'.format(func_module, func.func_name),
Code={
'ZipFile': zip_bytes
}
)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -169,16 +169,61 @@
ndpoint%0A
+from noopy.endpoint.resource import Resource%0A
from noo
@@ -253,16 +253,16 @@
al_case%0A
-
%0Aimport
@@ -539,16 +539,17 @@
func)%0A%0A%0A
+%0A
def make
@@ -2009,16 +2009,16 @@
%7D%0A
-
)%0A%0A%0A
if _
@@ -2013,16 +2013,1449 @@
)%0A%0A%0A
+class ApiGatewayDeployer(object):%0A def __init__(self):%0A self.client = boto3.client('apigateway')%0A apis = self.client.get_rest_apis()%5B'items'%5D%0A filtered_apis = %5Bapi for api in apis if api%5B'name'%5D == settings.PROJECT_NAME%5D%0A if filtered_apis:%0A self.api_id = filtered_apis%5B0%5D%5B'id'%5D%0A else:%0A self.api_id = self.client.create_rest_api(name=settings.PROJECT_NAME)%5B'id'%5D%0A%0A def prepare_resources(self):%0A aws_resources = self.client.get_resources(restApiId=self.api_id, limit=500)%5B'items'%5D%0A aws_resource_by_path = dict((r%5B'path'%5D, r) for r in aws_resources)%0A for path, noopy_resource in Resource.resources.iteritems():%0A aws_resource = aws_resource_by_path.get(path)%0A if aws_resource:%0A noopy_resource.id = aws_resource%5B'id'%5D%0A%0A self.create_omitted_resources(aws_resource_by_path.keys(), Resource.resources%5B'/'%5D)%0A%0A def create_omitted_resources(self, exist_path, parent):%0A for child in parent.children:%0A if child.path not in exist_path:%0A created = self.client.create_resource(%0A restApiId=self.api_id,%0A parentId=parent.id,%0A pathPart=child.path.split('/')%5B-1%5D%0A )%0A child.id = created%5B'id'%5D%0A if child.children:%0A self.create_omitted_resources(exist_path, child)%0A%0A%0A
if __nam
|
76289f734f622227c44487d8f44879e078dbdcb3
|
Improve gzweb launcher
|
src/deedee_tutorials/src/deedee_tutorials/launcher.py
|
src/deedee_tutorials/src/deedee_tutorials/launcher.py
|
#! /usr/bin/env python
import rospy
import subprocess
class MainLauncher:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("middleware_spawner")
rospy.sleep(0.5)
# Configs
self.configs = {"robot_name": "deedee",
"sim_plant": "true",
"autonomous": "true"}
self.cmd = ""
self.retrieve_config()
self.build_cmd()
self.spawn()
def retrieve_config(self):
for setting in self.configs.keys():
self.configs[setting] = rospy.get_param("/{}".format(setting))
def build_cmd(self):
self.cmd = "roslaunch deedee_tutorials follow_waypoints.launch"
for setting in self.configs.keys():
self.cmd += " {}:={}".format(setting, self.configs[setting])
def spawn(self):
subprocess.call(self.cmd, shell=True)
class GzwebManager:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("gzweb_manager")
rospy.sleep(0.5)
# Configs
self.configs = {"gzweb_enable": "true",
"gzweb_path": ""}
self.retrieve_config()
self.cmd = "{}/start_gzweb.sh".format(self.configs["gzweb_path"])
if self.configs["gzweb_enable"]:
subprocess.call("{}/start_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
rospy.on_shutdown(self.shutdown_hook)
rospy.spin()
def retrieve_config(self):
gzweb_params = rospy.get_param("gzweb")
for setting in self.configs.keys():
self.configs[setting] = gzweb_params[setting]
def shutdown_hook(self):
print "Stopping webserver!"
subprocess.call("{}/stop_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
|
Python
| 0
|
@@ -1152,32 +1152,71 @@
trieve_config()%0A
+ if self.configs%5B%22gzweb_enable%22%5D:%0A
self.cmd = %22
@@ -1273,45 +1273,8 @@
%22%5D)%0A
- if self.configs%5B%22gzweb_enable%22%5D:%0A
@@ -1381,17 +1381,18 @@
l=True)%0A
-%0A
+
rosp
@@ -1425,16 +1425,18 @@
n_hook)%0A
+
rosp
|
61c6f174b1e406955c3e881217ff863d6ff6c3ce
|
Fix validate/sanitize functions for click
|
pathvalidate/click.py
|
pathvalidate/click.py
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import click
from ._common import PathType
from ._file import sanitize_filename, sanitize_filepath, validate_filename, validate_filepath
from .error import ValidationError
def validate_filename_arg(ctx, param, value) -> None:
if not value:
return
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(str(e))
def validate_filepath_arg(ctx, param, value) -> None:
if not value:
return
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(str(e))
def sanitize_filename_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filename(value)
def sanitize_filepath_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filepath(value)
def filename(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filename(value)
def filepath(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filepath(value)
|
Python
| 0
|
@@ -282,36 +282,35 @@
aram, value) -%3E
-None
+str
:%0A if not val
@@ -319,32 +319,35 @@
:%0A return
+ %22%22
%0A%0A try:%0A
@@ -445,24 +445,42 @@
er(str(e))%0A%0A
+ return value%0A%0A
%0Adef validat
@@ -516,20 +516,19 @@
lue) -%3E
-None
+str
:%0A if
@@ -553,16 +553,19 @@
return
+ %22%22
%0A%0A tr
@@ -675,16 +675,34 @@
tr(e))%0A%0A
+ return value%0A%0A
%0Adef san
|
b65283984b1be7e8bb88d3281bb3654a3dd12233
|
Make sure test setup is run for subdirectories
|
nova/tests/scheduler/__init__.py
|
nova/tests/scheduler/__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,776 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0A# Copyright 2011 Openstack LLC.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0A# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work%0Afrom nova.tests import *%0A
|
|
fd74c411fefac7a633627fd3eb5a3c194e6e2b1c
|
add never_cache for people_admin views
|
people_admin/views.py
|
people_admin/views.py
|
from django.shortcuts import render, get_object_or_404
from django.db.models import Count
from openstates.data.models import LegislativeSession, Person
from utils.common import abbr_to_jid, sessions_with_bills, states
from people_admin.models import UnmatchedName, NameStatus
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import user_passes_test
from django.http import JsonResponse
import json
def person_data(person):
""" similar to utils.people.person_as_dict but customized for editable fields """
return {
"id": person.id,
"name": person.name,
"title": person.current_role["title"],
"district": person.current_role["district"],
"party": person.primary_party,
"image": person.image,
}
@user_passes_test(lambda u: u.is_staff)
def jurisdiction_list(request):
state_people_data = {}
unmatched_by_state = dict(
UnmatchedName.objects.filter(status="U")
.values_list("session__jurisdiction__name")
.annotate(number=Count("id"))
)
for state in states:
state_people_data[state.abbr.lower()] = {
"state": state.name,
"unmatched": unmatched_by_state.get(state.name, 0),
}
return render(
request,
"people_admin/jurisdiction_list.html",
{"state_people_data": state_people_data},
)
@user_passes_test(lambda u: u.is_staff)
def people_list(request, state):
jid = abbr_to_jid(state)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
).order_by("family_name", "name")
]
context = {
"current_people": current_people,
}
return render(request, "people_admin/person_list.html", {"context": context})
@user_passes_test(lambda u: u.is_staff)
def people_matcher(request, state, session=None):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
if all_sessions:
session = all_sessions[0]
else:
session = get_object_or_404(
LegislativeSession, identifier=session, jurisdiction_id=jid
)
unmatched = UnmatchedName.objects.filter(session_id=session, status="U").order_by(
"-sponsorships_count"
)
state_sponsors = Person.objects.filter(current_jurisdiction_id=jid)
unmatched_total = unmatched.count()
context = {
"state": state,
"session": session,
"all_sessions": all_sessions,
"unmatched": unmatched,
"state_sponsors": state_sponsors,
"unmatched_total": unmatched_total,
}
return render(request, "people_admin/people_matcher.html", context)
@user_passes_test(lambda u: u.is_staff)
@require_http_methods(["POST"])
def apply_match(request):
form_data = json.load(request)["match_data"]
button = form_data["button"]
match_id = form_data["matchedId"]
unmatched_id = form_data["unmatchedId"]
unmatched_name = get_object_or_404(UnmatchedName, pk=unmatched_id)
if button == "Match":
unmatched_name.matched_person_id = match_id
unmatched_name.status = NameStatus.MATCHED_PERSON
elif button == "Source Error":
unmatched_name.status = NameStatus.SOURCE_ERROR
elif button == "Ignore":
unmatched_name.status = NameStatus.IGNORED
else:
unmatched_name.status = NameStatus.UNMATCHED
unmatched_name.save()
return JsonResponse({"status": "success"})
|
Python
| 0
|
@@ -331,16 +331,29 @@
_methods
+, never_cache
%0Afrom dj
@@ -1409,24 +1409,37 @@
a%7D,%0A )%0A%0A%0A
+@never_cache%0A
@user_passes
@@ -1876,24 +1876,37 @@
context%7D)%0A%0A%0A
+@never_cache%0A
@user_passes
|
573a774e8c8bb30d15659e678c83be86e3cc82a7
|
Fix user_login method of google provider
|
pgoapi/auth_google.py
|
pgoapi/auth_google.py
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
import six
import logging
from pgoapi.auth import Auth
from pgoapi.exceptions import AuthException
from gpsoauth import perform_master_login, perform_oauth
class AuthGoogle(Auth):
GOOGLE_LOGIN_ANDROID_ID = '9774d56d682e549c'
GOOGLE_LOGIN_SERVICE= 'audience:server:client_id:848232511240-7so421jotr2609rmqakceuu1luuq0ptb.apps.googleusercontent.com'
GOOGLE_LOGIN_APP = 'com.nianticlabs.pokemongo'
GOOGLE_LOGIN_CLIENT_SIG = '321187995bc7cdc2b5fc91b11a96e2baa8602c62'
def __init__(self):
Auth.__init__(self)
self._auth_provider = 'google'
self._refresh_token = None
def set_proxy(self, proxy_config):
self._session.proxies = proxy_config
def user_login(self, username, password):
self.log.info('Google User Login for: {}'.format(username))
if not isinstance(username, six.string_types) or not isinstance(password, six.string_types):
raise AuthException("Username/password not correctly specified")
user_login = perform_master_login(username, password, self.GOOGLE_LOGIN_ANDROID_ID)
try:
refresh_token = user_login.get('Token', None)
except ConnectionError as e:
raise AuthException("Caught ConnectionError: %s", e)
if refresh_token is not None:
self._refresh_token = refresh_token
self.log.info('Google User Login successful.')
else:
self._refresh_token = None
raise AuthException("Invalid Google Username/password")
self.get_access_token()
def set_refresh_token(self, refresh_token):
self.log.info('Google Refresh Token provided by user')
self._refresh_token = refresh_token
def get_access_token(self, force_refresh = False):
token_validity = self.check_access_token()
if token_validity is True and force_refresh is False:
self.log.debug('Using cached Google Access Token')
return self._access_token
else:
if force_refresh:
self.log.info('Forced request of Google Access Token!')
else:
self.log.info('Request Google Access Token...')
token_data = perform_oauth(None, self._refresh_token, self.GOOGLE_LOGIN_ANDROID_ID, self.GOOGLE_LOGIN_SERVICE, self.GOOGLE_LOGIN_APP,
self.GOOGLE_LOGIN_CLIENT_SIG)
access_token = token_data.get('Auth', None)
if access_token is not None:
self._access_token = access_token
self._access_token_expiry = int(token_data.get('Expiry', 0))
self._login = True
self.log.info('Google Access Token successfully received.')
self.log.debug('Google Access Token: %s...', self._access_token[:25])
return self._access_token
else:
self._access_token = None
self._login = False
raise AuthException("Could not receive a Google Access Token")
|
Python
| 0.999997
|
@@ -2739,24 +2739,51 @@
cess_token()
+%0A return self._login
%0A%0A def se
|
aeb69479a6bf5492411e82bbcb77331daa8da819
|
add a test to test the monitor
|
tests/test_bzoing.py
|
tests/test_bzoing.py
|
"""
test_bzoing
----------------------------------
Tests for `bzoing` module.
"""
import unittest
from bzoing.tasks import Bzoinq, Monitor
import time
class TestTasksAndMonitor(unittest.TestCase):
def test_creating_task(self):
a = Bzoinq()
a.create_task()
self.assertTrue(len(a.task_list) == 1)
def test_delete_task(self):
a = Bzoinq()
a.create_task()
the_id = a.task_list[0].id
a.remove_task(the_id)
self.assertTrue(len(a.task_list) == 0)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -148,16 +148,17 @@
t time%0A%0A
+%0A
class Te
@@ -513,16 +513,473 @@
== 0)%0A%0A
+ def test_monitor(self):%0A import datetime%0A a = Bzoinq()%0A b = Monitor(a)%0A b.start()%0A first_time = datetime.datetime.now() + datetime.timedelta(seconds=10)%0A a.create_task(%22My test task%22, alarm=first_time)%0A # sleep a bit to see if alarm works%0A time.sleep(15)%0A # check the if task was removed from task list%0A self.assertTrue(len(a.task_list) == 0)%0A # kill Monitor%0A b.stop()%0A
%0A%0Aif __n
|
ef4a020098bbd81339b36bd41493d2261a97aedd
|
FIX a MAJOR bug where inference evaluation considers 1 groundtruth caption only
|
keras_image_captioning/inference.py
|
keras_image_captioning/inference.py
|
import fire
import heapq
import numpy as np
import os
from collections import namedtuple
from keras.engine.training import GeneratorEnqueuer
from time import sleep
from .config import FileConfigBuilder, active_config
from .dataset_providers import DatasetProvider
from .io_utils import logging, write_yaml_file
from .metrics import BLEU, CIDEr, ROUGE
from .models import ImageCaptioningModel
class BasicInference(object):
_MAX_Q_SIZE = 10
_WORKERS = 1
_WAIT_TIME = 0.01
def __init__(self, keras_model, dataset_provider):
self._model = keras_model
self._dataset_provider = dataset_provider
self._preprocessor = dataset_provider.caption_preprocessor
self._metrics = [BLEU(4), CIDEr(), ROUGE()]
def predict_training_set(self, include_datum=True):
return self._predict(self._dataset_provider.training_set,
self._dataset_provider.training_steps,
include_datum)
def predict_validation_set(self, include_datum=True):
return self._predict(self._dataset_provider.validation_set,
self._dataset_provider.validation_steps,
include_datum)
def predict_testing_set(self, include_datum=True):
return self._predict(self._dataset_provider.testing_set,
self._dataset_provider.testing_steps,
include_datum)
def evaluate_training_set(self, include_prediction=False):
return self._evaluate(self.predict_training_set(include_datum=True),
include_prediction=include_prediction)
def evaluate_validation_set(self, include_prediction=False):
return self._evaluate(self.predict_validation_set(include_datum=True),
include_prediction=include_prediction)
def evaluate_testing_set(self, include_prediction=False):
return self._evaluate(self.predict_testing_set(include_datum=True),
include_prediction=include_prediction)
def _predict(self,
data_generator_function,
steps_per_epoch,
include_datum=True):
data_generator = data_generator_function(include_datum=True)
enqueuer = GeneratorEnqueuer(data_generator, pickle_safe=False)
enqueuer.start(workers=self._WORKERS, max_q_size=self._MAX_Q_SIZE)
caption_results = []
datum_results = []
for _ in range(steps_per_epoch):
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
sleep(self._WAIT_TIME)
X, y, datum_batch = generator_output
captions_pred_str = self._predict_batch(X, y)
caption_results += captions_pred_str
datum_results += datum_batch
enqueuer.stop()
if include_datum:
return zip(caption_results, datum_results)
else:
return caption_results
def _predict_batch(self, X, y):
captions_pred = self._model.predict_on_batch(X)
captions_pred_str = self._preprocessor.decode_captions(
captions_output=captions_pred,
captions_output_expected=y)
return captions_pred_str
def _evaluate(self, caption_datum_pairs, include_prediction=False):
id_to_prediction = {}
id_to_references = {}
for caption_pred, datum in caption_datum_pairs:
img_id = datum.img_filename
caption_expected = self._preprocessor.normalize_captions(
[datum.caption_txt])
id_to_prediction[img_id] = caption_pred
id_to_references[img_id] = caption_expected
metrics = {}
for metric in self._metrics:
metric_name_to_value = metric.calculate(id_to_prediction,
id_to_references)
metrics.update(metric_name_to_value)
return (metrics, id_to_prediction) if include_prediction else metrics
class BeamSearchInference(BasicInference):
def __init__(self,
keras_model,
dataset_provider,
beam_size=3,
max_caption_length=20):
super(BeamSearchInference, self).__init__(keras_model,
dataset_provider)
if beam_size > 1:
raise NotImplementedError('Beam search with beam_size > 1 is not '
'implemented yet!')
self._beam_size = beam_size
self._max_caption_length = max_caption_length
def _predict_batch(self, X, y):
imgs_input, _ = X
batch_size = imgs_input.shape[0]
captions_input = np.full((batch_size, 1),
self._preprocessor.EOS_TOKEN_LABEL_ENCODED)
captions_result = [None] * batch_size
for _ in range(self._max_caption_length):
captions_pred = self._model.predict_on_batch([imgs_input,
captions_input])
captions_pred_str = self._preprocessor.decode_captions(
captions_output=captions_pred)
for i, caption in enumerate(captions_pred_str):
if caption.endswith(' ' + self._preprocessor.EOS_TOKEN):
if captions_result[i] is None:
captions_result[i] = caption
# If all reach <eos>
if all(x is not None for x in captions_result):
break
encoded = self._preprocessor.encode_captions(captions_pred_str)
captions_input, _ = self._preprocessor.preprocess_batch(encoded)
# For captions that don't reach <eos> until the max caption length
for i, caption in enumerate(captions_pred_str):
if captions_result[i] is None:
captions_result[i] = caption
return captions_result
class NLargest(object):
def __init__(self, n):
self._n = n
self._heap = []
@property
def size(self):
return len(self._heap)
def add(self, item):
if len(self._heap) < self._n:
heapq.heappush(self._heap, item)
else:
heapq.heappushpop(self._heap, item)
def n_largest(self, sort=False):
return sorted(self._heap, reverse=True) if sort else self._heap
# score should precede sentence so Caption is compared with score first
Caption = namedtuple('Caption', 'score sentence')
def main(training_dir, method='beam_search', beam_size=3):
if method != 'beam_search':
raise NotImplementedError('inference method = {} is not implemented '
'yet!'.format(method))
hyperparam_path = os.path.join(training_dir, 'hyperparams-config.yaml')
model_weights_path = os.path.join(training_dir, 'model-weights.hdf5')
logging('Loading hyperparams config..')
config = FileConfigBuilder(hyperparam_path).build_config()
active_config(config)
model = ImageCaptioningModel()
logging('Building model..')
model.build()
keras_model = model.keras_model
logging('Loading model weights..')
keras_model.load_weights(model_weights_path)
dataset_provider = DatasetProvider()
inference = BeamSearchInference(keras_model, dataset_provider,
beam_size=beam_size)
logging('Evaluating validation set..')
metrics, predictions = inference.evaluate_validation_set(
include_prediction=True)
logging('Writting result to files..')
metrics_path = os.path.join(training_dir, 'validation-metrics.yaml')
predictions_path = os.path.join(training_dir, 'validation-predictions.yaml')
write_yaml_file(metrics, metrics_path)
write_yaml_file(predictions, predictions_path)
logging('Done!')
if __name__ == '__main__':
fire.Fire(main)
|
Python
| 0
|
@@ -3863,27 +3863,30 @@
-%5B
datum.
+all_
caption
+s
_txt
-%5D
)%0A
|
7ce0bff2ca63c830e148385b4cce231152843d58
|
change bug of readMsg
|
storm-core/src/multilang/py/storm.py
|
storm-core/src/multilang/py/storm.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import traceback
from collections import deque
try:
import simplejson as json
except ImportError:
import json
json_encode = lambda x: json.dumps(x)
json_decode = lambda x: json.loads(x)
#reads lines and reconstructs newlines appropriately
def readMsg():
msg = ""
while True:
line = sys.stdin.readline()[0:-1]
if line == "end":
break
msg = msg + line + "\n"
return json_decode(msg[0:-1])
MODE = None
ANCHOR_TUPLE = None
#queue up commands we read while trying to read taskids
pending_commands = deque()
def readTaskIds():
if pending_taskids:
return pending_taskids.popleft()
else:
msg = readMsg()
while type(msg) is not list:
pending_commands.append(msg)
msg = readMsg()
return msg
#queue up taskids we read while trying to read commands/tuples
pending_taskids = deque()
def readCommand():
if pending_commands:
return pending_commands.popleft()
else:
msg = readMsg()
while type(msg) is list:
pending_taskids.append(msg)
msg = readMsg()
return msg
def readTuple():
cmd = readCommand()
return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"])
def sendMsgToParent(msg):
print json_encode(msg)
print "end"
sys.stdout.flush()
def sync():
sendMsgToParent({'command':'sync'})
def sendpid(heartbeatdir):
pid = os.getpid()
sendMsgToParent({'pid':pid})
open(heartbeatdir + "/" + str(pid), "w").close()
def emit(*args, **kwargs):
__emit(*args, **kwargs)
return readTaskIds()
def emitDirect(task, *args, **kwargs):
kwargs["directTask"] = task
__emit(*args, **kwargs)
def __emit(*args, **kwargs):
global MODE
if MODE == Bolt:
emitBolt(*args, **kwargs)
elif MODE == Spout:
emitSpout(*args, **kwargs)
def emitBolt(tup, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {"command": "emit"}
if stream is not None:
m["stream"] = stream
m["anchors"] = map(lambda a: a.id, anchors)
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def emitSpout(tup, stream=None, id=None, directTask=None):
m = {"command": "emit"}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def ack(tup):
sendMsgToParent({"command": "ack", "id": tup.id})
def fail(tup):
sendMsgToParent({"command": "fail", "id": tup.id})
def reportError(msg):
sendMsgToParent({"command": "error", "msg": msg})
def log(msg):
sendMsgToParent({"command": "log", "msg": msg})
def initComponent():
setupInfo = readMsg()
sendpid(setupInfo['pidDir'])
return [setupInfo['conf'], setupInfo['context']]
class Tuple(object):
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return '<%s%s>' % (
self.__class__.__name__,
''.join(' %s=%r' % (k, self.__dict__[k]) for k in sorted(self.__dict__.keys())))
class Bolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
self.process(tup)
except Exception, e:
reportError(traceback.format_exc(e))
class BasicBolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
global ANCHOR_TUPLE
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
ANCHOR_TUPLE = tup
self.process(tup)
ack(tup)
except Exception, e:
reportError(traceback.format_exc(e))
class Spout(object):
def initialize(self, conf, context):
pass
def ack(self, id):
pass
def fail(self, id):
pass
def nextTuple(self):
pass
def run(self):
global MODE
MODE = Spout
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
msg = readCommand()
if msg["command"] == "next":
self.nextTuple()
if msg["command"] == "ack":
self.ack(msg["id"])
if msg["command"] == "fail":
self.fail(msg["id"])
sync()
except Exception, e:
reportError(traceback.format_exc(e))
|
Python
| 0
|
@@ -1149,22 +1149,88 @@
adline()
-%5B0:-1%5D
+%0A if not line:%0A raise Exception('Read EOF from stdin')
%0A
@@ -1237,16 +1237,22 @@
if line
+%5B0:-1%5D
== %22end
@@ -1300,15 +1300,8 @@
line
- + %22%5Cn%22
%0A
|
148826f75072576d7f0d0f206e3d1dba34688720
|
Refactor getLongestWord to simplify maximum collection and reduce number of conditionals
|
stream_processor/stream_processor.py
|
stream_processor/stream_processor.py
|
'''
Created on Aug 7, 2017
@author: alkaitz
'''
import heapq
'''
You have a function that will be called with a stream of strings.
Every time you receive a new word, you should return the length of the longest
word that you have received that has showed in the string only once. Ex:
f("Yes") -> 3
f("No") -> 3
f("Yes") -> 2
'''
working_set = []
heapq.heapify(working_set)
repeated = set()
def process(str):
includeWord(str)
return getLongestWord(str)
'''
Structure will be sorted by negative numbers to transform it from a min heap to a max heap.
Storing the tuple, to provide right sorting.
None returned if data set is empty (all received words have appeared more than once)
'''
def includeWord(str):
if str not in repeated:
lenPlusStringTuple = (-len(str),str)
if lenPlusStringTuple not in working_set:
heapq.heappush(working_set, lenPlusStringTuple)
else:
working_set.remove(lenPlusStringTuple)
repeated.add(str)
def getLongestWord(str):
(length, _) = (working_set[0]) if working_set else (None, None)
return -length if length else None
if __name__ == '__main__':
assert(process("Hello") == 5)
assert(process("Hello") == None)
assert(process("Hello") == None)
assert(process("Hallo") == 5)
assert(process("Bye") == 5)
assert(process("By") == 5)
assert(process("B") == 5)
assert(process("Hallo") == 3)
assert(process("By") == 3)
assert(process("Bye") == 1)
print "Successful"
|
Python
| 0
|
@@ -477,19 +477,16 @@
estWord(
-str
)%0A%0A'''%0A
@@ -1046,82 +1046,83 @@
ord(
-str):%0A (length, _) = (working_set%5B0%5D) if working_set else (None, None)%0A
+):%0A if len(working_set) %3E 0:%0A (length, _) = (working_set%5B0%5D)%0A
@@ -1139,23 +1139,19 @@
ngth
- if length else
+%0A return
Non
|
6d267faaf9d18e58b24cf93906961b152ef0fcb7
|
build vehicle list based on if make is provided
|
src/vehicles/views.py
|
src/vehicles/views.py
|
from django.shortcuts import render, render_to_response, RequestContext
# import the custom context processor
from vehicles.context_processor import global_context_processor
from vehicles.models import Vehicle, Category
def home_page(request):
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def category_page(request, slug):
# check if make parameter is passed into the url
vehicle_make = request.GET.get('make', None)
# get category by slug
category = Category.objects.get_category_by_slug(slug)
# get all the vehicles by the category and make (if provided)
vehicles_list = None
if vehicle_make is not None:
vehicles_list = Vehicle.objects.get_vehicles_by_category_and_make(
category, vehicle_make)
else:
vehicles_list = Vehicle.objects.get_vehicles_by_category(category)
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def get_makes_in_category(category):
makes_in_category = []
# get all the vehicle objects by category
vehicles_in_category = Vehicle.objects.get_vehicles_by_category(category=category)
for vehicle in vehicles_in_category:
makes_in_category.append(vehicle.make)
# remove duplicate makes from the list
makes_in_category = list(set(makes_in_category))
makes_in_category = sorted(makes_in_category, key=lambda x:x.v_make)
return makes_in_category
|
Python
| 0.000001
|
@@ -204,16 +204,29 @@
Vehicle,
+ VehicleMake,
Categor
@@ -227,16 +227,16 @@
ategory%0A
-
%0A%0Adef ho
@@ -461,16 +461,21 @@
if make
+slug
paramete
@@ -515,16 +515,21 @@
cle_make
+_slug
= reque
@@ -749,16 +749,21 @@
cle_make
+_slug
is not
@@ -768,16 +768,114 @@
t None:%0A
+ # get make by slug%0A make = VehicleMake.objects.get_make_by_slug(vehicle_make_slug)%0A
@@ -963,24 +963,16 @@
tegory,
-vehicle_
make)%0A
@@ -1054,24 +1054,28 @@
tegory)%0A
+
%0A return
|
39ab86b500cc28420aa0062395adc9e6ddf2017c
|
allow reading fom multiple configuration files
|
src/vsphere/config.py
|
src/vsphere/config.py
|
from ConfigParser import ConfigParser
class EsxConfig:
def __init__(self):
parser = ConfigParser()
parser.read("vsphere.conf")
self.vs_host = parser.get('server', 'host')
self.vs_user = parser.get('server', 'user')
self.vs_password = parser.get('server', 'password')
self.vs_dc = parser.get('server', 'dc')
|
Python
| 0
|
@@ -1,8 +1,39 @@
+import sys%0Afrom os import path%0A
from Con
@@ -67,118 +67,850 @@
er%0A%0A
-class EsxConfig:%0A def __init__(self):%0A parser = ConfigParser()%0A parser.read(%22vsphere.conf%22)%0A%0A
+VSPHERE_CFG_FILE = %22vsphere.conf%22%0A%0Aunix_platforms = %5B%0A %22darwin%22,%0A %22Linux%22%0A%5D%0A%0Aclass EsxConfig:%0A def __init__(self):%0A ok = False%0A%0A # specific configuration%0A local_cfg = VSPHERE_CFG_FILE%0A%0A # user-global configuration%0A user_cfg = %22%22%0A if sys.platform in unix_platforms:%0A user_cfg = path.join(path.expanduser(%22~%22), '.%7B0%7D'.format(VSPHERE_CFG_FILE))%0A%0A # system-wide configuration%0A system_cfg = %22%22%0A if sys.platform in unix_platforms:%0A system_cfg = path.join(path.expanduser(%22/etc/vsphere%22), VSPHERE_CFG_FILE)%0A%0A files = %5B local_cfg, user_cfg, system_cfg %5D%0A%0A for f in files:%0A if path.exists(f):%0A parser = ConfigParser()%0A parser.read(f)%0A ok = True%0A break%0A%0A if ok:%0A
@@ -957,32 +957,36 @@
'host')%0A
+
+
self.vs_user = p
@@ -1013,16 +1013,20 @@
'user')%0A
+
@@ -1077,16 +1077,20 @@
sword')%0A
+
|
1c3d92dc1161fd9441275a3be3e5a0c0e351e876
|
Add --process-dependency-links support to pip wheel
|
pip/commands/wheel.py
|
pip/commands/wheel.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder, wheel_setuptools_support
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel")
if not wheel_setuptools_support():
raise CommandError("'pip wheel' requires setuptools>=0.8. To fix this, run: pip install --upgrade setuptools")
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
session=session,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
session=session,
)
#parse args and/or requirements files
for name in args:
if name.endswith(".whl"):
logger.notify("ignoring %s" % name)
continue
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options, session=session):
if req.editable or (req.name is None and req.url.endswith(".whl")):
logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
Python
| 0
|
@@ -4710,16 +4710,139 @@
ns.pre,%0A
+ process_dependency_links=%0A options.process_dependency_links,%0A
|
7cd4bf13b2db52fa3870110ab36e892c693a2a02
|
version bump
|
pipenv/__version__.py
|
pipenv/__version__.py
|
# ___ ( ) ___ ___ __
# // ) ) / / // ) ) //___) ) // ) ) || / /
# //___/ / / / //___/ / // // / / || / /
# // / / // ((____ // / / ||/ /
__version__ = '11.9.0'
|
Python
| 0
|
@@ -211,7 +211,7 @@
1.9.
-0
+1
'%0A
|
eb34a93c9c49727489726ae52978efa8898b2654
|
Update ap_anpa.py
|
superdesk/io/feed_parsers/ap_anpa.py
|
superdesk/io/feed_parsers/ap_anpa.py
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from .anpa import ANPAFeedParser
from superdesk.io import register_feed_parser
from superdesk.io.iptc import subject_codes
from superdesk.locators.locators import find_cities
from apps.archive.common import format_dateline_to_locmmmddsrc
from superdesk.utc import get_date
from bs4 import BeautifulSoup
import logging
logger = logging.getLogger("AP_ANPAFeedParser")
class AP_ANPAFeedParser(ANPAFeedParser):
"""
Feed parser for AP supplied ANPA, maps category codes and maps the prefix on some sluglines to subject codes
"""
NAME = 'ap_anpa1312'
slug_code_map = { # Basketball
'BKC-': '15008000', 'BKL-': '15008000', 'BKN-': '15008000',
'BKW-': '15008000', 'BKO-': '15008000',
# Baseball
'BBA-': '15007000', 'BBC-': '15007000', 'BBM-': '15007000', 'BBN-': '15007000',
'BBO-': '15007000', 'BBY-': '15007000',
# Boxing
'BOX-': '15014000',
# Motor racing
'CAR-': '15039000',
# Cycling
'CYC-': '15019000',
# American Football
'FBL-': '15003000', 'FBN-': '15003000', 'FBO-': '15003000',
# Figure skating
'FIG-': '15025000',
# Golf
'GLF-': '15027000',
# Ice Hockey
'HKN-': '15031000', 'HKO-': '15031000',
# Horse Racing
'RAC-': '15030000',
# Soccer
'SOC-': '15054000',
# Tennis
'TEN-': '15065000',
# Cricket
'CRI-': '15017000',
# Rugby league
'RGL-': '15048000'}
def parse(self, file_path, provider=None):
item = super().parse(file_path, provider)
self.ap_derive_dateline(item)
self.map_category_codes(item)
self.map_sluglines_to_subjects(item)
return item
def map_category_codes(self, item):
"""
Map the category code that has been received to a more palatable value
:param item:
:return:
"""
for category in item.get('anpa_category', []):
if category.get('qcode').lower() in ('a', 'p', 'w', 'n'):
category['qcode'] = 'i'
elif category.get('qcode').lower() == 'r':
category['qcode'] = 'v'
elif category.get('qcode').lower() == 'z':
category['qcode'] = 's'
def map_sluglines_to_subjects(self, item):
"""
The first few characters of the slugline may match AP supplimetal categories
this is used to set the subject code.
:param item:
:return:
"""
if len(item.get('slugline','')) > 4:
qcode = self.slug_code_map.get(item['slugline'][:4])
if qcode:
try:
item['subject'] = []
item['subject'].append({'qcode': qcode, 'name': subject_codes[qcode]})
except KeyError:
logger.debug("Subject code '%s' not found" % qcode)
def ap_derive_dateline(self, item):
"""
This function looks for a dateline in the article body an uses that.
:param item:
:return: item populated with a dateline
"""
try:
html = item.get('body_html')
if html:
soup = BeautifulSoup(html, "html.parser")
pars = soup.findAll('p')
if len(pars) >= 2:
first = pars[0].get_text()
city, source, the_rest = first.partition(' (AP) _ ')
if source:
# sometimes the city is followed by a comma and either a date or a state
city = city.split(',')[0]
if any(char.isdigit() for char in city):
return
cities = find_cities()
located = [c for c in cities if c['city'].lower() == city.lower()]
item.setdefault('dateline', {})
item['dateline']['located'] = located[0] if len(located) > 0 else {'city_code': city,
'city': city,
'tz': 'UTC',
'dateline': 'city'}
item['dateline']['source'] = item.get('original_source', 'AP')
item['dateline']['text'] = format_dateline_to_locmmmddsrc(item['dateline']['located'],
get_date(item['firstcreated']),
source=item.get('original_source',
'AP'))
return item
except:
logging.exception('AP dateline extraction exception')
register_feed_parser(AP_ANPAFeedParser.NAME, AP_ANPAFeedParser())
|
Python
| 0.000001
|
@@ -3303,16 +3303,17 @@
ugline',
+
'')) %3E 4
|
01e907635a44fdc1802ca8942daa86b43573a247
|
Use tell/seek to allow the user to read from the database inside a get_all_keys loop, rather than constructing a temporary list
|
starbound/btreedb5.py
|
starbound/btreedb5.py
|
# -*- coding: utf-8 -*-
import binascii
import io
import struct
from starbound import sbon
# Override range with xrange when running Python 2.x.
try:
range = xrange
except:
pass
HEADER = '>8si16si?ixxxxii?ixxxxii?445x'
HEADER_SIZE = struct.calcsize(HEADER)
# Constants for the different block types.
FREE = b'FF'
INDEX = b'II'
LEAF = b'LL'
class BTreeDB5(object):
def __init__(self, stream):
self.stream = stream
def get(self, key):
if not hasattr(self, 'key_size'):
self.read_header()
assert len(key) == self.key_size, 'Invalid key length'
# Traverse the B-tree until we reach a leaf.
offset = HEADER_SIZE + self.block_size * self.root_block
entry_size = self.key_size + 4
s = self.stream
while True:
s.seek(offset)
block_type = s.read(2)
if block_type != INDEX:
break
# Read the index header and scan for the closest key.
lo, (_, hi, block) = 0, struct.unpack('>Bii', s.read(9))
offset += 11
while lo < hi:
mid = (lo + hi) // 2
s.seek(offset + entry_size * mid)
if key < s.read(self.key_size):
hi = mid
else:
lo = mid + 1
if lo > 0:
s.seek(offset + entry_size * (lo - 1) + self.key_size)
block, = struct.unpack('>i', s.read(4))
offset = HEADER_SIZE + self.block_size * block
assert block_type == LEAF, 'Did not reach a leaf'
# Scan leaves for the key, then read the data.
reader = LeafReader(self)
num_keys, = struct.unpack('>i', reader.read(4))
for i in range(num_keys):
cur_key = reader.read(self.key_size)
length = sbon.read_varint(reader)
if key == cur_key:
return reader.read(length)
reader.seek(length, 1)
# None of the keys in the leaf node matched.
raise KeyError(binascii.hexlify(key))
def get_all_keys(self, start=None):
"""
A generator which yields a list of all valid keys starting at the
given `start` offset. If `start` is `None`, we will start from
the root of the tree.
"""
s = self.stream
if not start:
start = HEADER_SIZE + self.block_size * self.root_block
s.seek(start)
block_type = s.read(2)
if block_type == LEAF:
reader = LeafReader(self)
num_keys = struct.unpack('>i', reader.read(4))[0]
node_keys = []
for i in range(num_keys):
cur_key = reader.read(self.key_size)
node_keys.append(cur_key)
length = sbon.read_varint(reader)
reader.seek(length, 1)
# We're yielding here rather than in the loop because LeafReader
# can only seek relatively, at the moment, and this way the user
# can `get` a node while looping without changing the stream
# position (and thus screwing up the loop)
for key in node_keys:
yield key
elif block_type == INDEX:
(_, num_keys, first_child) = struct.unpack('>Bii', s.read(9))
children = [first_child]
for i in range(num_keys):
new_key = s.read(self.key_size)
next_child = struct.unpack('>i', s.read(4))[0]
children.append(next_child)
for child_loc in children:
for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc):
yield key
elif block_type == FREE:
pass
else:
raise Exception('Unhandled block type: {}'.format(block_type))
def read_header(self):
self.stream.seek(0)
data = struct.unpack(HEADER, self.stream.read(HEADER_SIZE))
assert data[0] == b'BTreeDB5', 'Invalid header'
self.block_size = data[1]
self.name = data[2].rstrip(b'\0').decode('utf-8')
self.key_size = data[3]
self.use_other_root = data[4]
self.free_block_1 = data[5]
self.free_block_1_end = data[6]
self.root_block_1 = data[7]
self.root_block_1_is_leaf = data[8]
self.free_block_2 = data[9]
self.free_block_2_end = data[10]
self.root_block_2 = data[11]
self.root_block_2_is_leaf = data[12]
@property
def root_block(self):
return self.root_block_2 if self.use_other_root else self.root_block_1
@property
def root_block_is_leaf(self):
if self.use_other_root:
return self.root_block_2_is_leaf
else:
return self.root_block_1_is_leaf
def swap_root(self):
self.use_other_root = not self.use_other_root
class LeafReader(object):
def __init__(self, db):
# The stream offset must be right after an "LL" marker.
self.db = db
self.offset = 2
def read(self, size=-1):
if size < 0:
raise NotImplemented('Can only read specific amount')
with io.BytesIO() as data:
for length in self._traverse(size):
data.write(self.db.stream.read(length))
return data.getvalue()
def seek(self, offset, whence=0):
if whence != 1 or offset < 0:
raise NotImplemented('Can only seek forward relatively')
for length in self._traverse(offset):
self.db.stream.seek(length, 1)
def _traverse(self, length):
block_end = self.db.block_size - 4
while True:
if self.offset + length <= block_end:
yield length
self.offset += length
break
delta = block_end - self.offset
yield delta
block, = struct.unpack('>i', self.db.stream.read(4))
assert block >= 0, 'Could not traverse to next block'
self.db.stream.seek(HEADER_SIZE + self.db.block_size * block)
assert self.db.stream.read(2) == LEAF, 'Did not reach a leaf'
self.offset = 2
length -= delta
|
Python
| 0
|
@@ -2614,35 +2614,8 @@
%5B0%5D%0A
- node_keys = %5B%5D%0A
@@ -2721,464 +2721,311 @@
-node_keys.append(cur_key)%0A length = sbon.read_varint(reader)%0A reader.seek(length, 1)%0A # We're yielding here rather than in the loop because LeafReader%0A # can only seek relatively, at the moment, and this way the user%0A # can %60get%60 a node while looping without changing the stream%0A # position (and thus screwing up the loop)%0A for key in node_keys:%0A yield key
+# We to a tell/seek here so that the user can read from%0A # the file while this loop is still being run%0A cur_pos = s.tell()%0A yield cur_key%0A s.seek(cur_pos)%0A length = sbon.read_varint(reader)%0A reader.seek(length, 1)
%0A
|
f9c93d60a18df83205289e46fe5f406e51aa2527
|
Version 0.3.5
|
starlette/__init__.py
|
starlette/__init__.py
|
__version__ = "0.3.4"
|
Python
| 0.000001
|
@@ -16,7 +16,7 @@
0.3.
-4
+5
%22%0A
|
170a50eeca4249a488cc9d0c69876c5f2708b743
|
use two-tail for testing significance and right_tail for redundancy checking
|
stats/significance.py
|
stats/significance.py
|
'''
Significance testing methods.
@author: anze.vavpetic@ijs.si
'''
from fisher import pvalue
def is_redundant(rule, new_rule):
'''
Computes the redundancy coefficient of a new rule compared to its
immediate generalization.
Rules with a coeff > 1 are deemed non-redundant.
'''
return fisher(new_rule) > fisher(rule)
def fisher(rule):
'''
Fisher's p-value for one rule.
'''
N = float(len(rule.kb.examples))
nX = float(rule.coverage)
nY = rule.kb.distribution[rule.target]
nXY = rule.distribution[rule.target]
nXnotY = nX - nXY
nnotXY = nY - nXY
nnotXnotY = N - nXnotY - nnotXY
return pvalue(nXY, nXnotY, nnotXY, nnotXnotY).right_tail
def apply_fisher(ruleset):
'''
Fisher's exact test to test rule significance.
'''
for rule in ruleset:
rule.pval = fisher(rule)
|
Python
| 0
|
@@ -305,16 +305,17 @@
return
+_
fisher(n
@@ -322,19 +322,31 @@
ew_rule)
+.right_tail
%3E
+_
fisher(r
@@ -349,23 +349,139 @@
er(rule)
-%0A%0A%0Adef
+.right_tail%0A%0A%0Adef fisher(rule):%0A '''%0A Fisher's p-value for one rule.%0A '''%0A return _fisher(rule).two_tail%0A%0Adef _
fisher(r
@@ -817,28 +817,16 @@
otXnotY)
-.right_tail%0A
%0A%0Adef ap
|
8907993e48a59ce39dab1cdb359e287f527b7642
|
Add --verbose parameter
|
stbt_control_relay.py
|
stbt_control_relay.py
|
#!/usr/bin/python
"""
Allows using any of the stbt remote control backends remotely using the lirc
protocol.
Presents the same socket protocol as lircd but sending keypresses using any of
stbt's controls. This allows for example controlling a roku over its HTTP
interface from some software that only speaks lirc.
Example usage:
$ stbt control-relay file:example
Listens on `/var/run/lirc/lircd` for lirc clients. Keypress sent will be
written to the file example. So
$ irsend SEND_ONCE stbt KEY_UP
Will write the text "KEY_UP" to the file `example`.
$ stbt control-relay --input=lircd:lircd.sock \\
roku:192.168.1.13 samsung:192.168.1.14
Listens on lircd.sock and will forward keypresses to the roku at 192.168.1.13
using its HTTP protocol and to the Samsung TV at 192.168.1.14 using its TCP
protocol. So
$ irsend -d lircd.sock SEND_ONCE stbt KEY_OK
Will press KEY_OK on both the Samsung and the roku devices simultaneously.
"""
import argparse
import signal
import sys
from _stbt.control import MultiRemote, uri_to_remote, uri_to_remote_recorder
def main(argv):
parser = argparse.ArgumentParser(
epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--input", default="lircd", help="""The source of remote control
presses. Values are the same as stbt record's --control-recorder.""")
parser.add_argument("output", nargs="+", help="""One or more remote control
configurations. Values are the same as stbt run's --control.""")
args = parser.parse_args(argv[1:])
signal.signal(signal.SIGTERM, lambda _signo, _stack_frame: sys.exit(0))
r = MultiRemote(uri_to_remote(x) for x in args.output)
listener = uri_to_remote_recorder(args.input)
for key in listener:
sys.stderr.write("Received %s\n" % key)
try:
r.press(key)
except Exception as e: # pylint: disable=broad-except
sys.stderr.write("Error pressing key %r: %s\n" % (key, e))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Python
| 0
|
@@ -1082,16 +1082,80 @@
ecorder%0A
+from _stbt.logging import argparser_add_verbose_argument, debug%0A
%0A%0Adef ma
@@ -1605,24 +1605,67 @@
ontrol.%22%22%22)%0A
+ argparser_add_verbose_argument(parser)%0A
args = p
@@ -1911,32 +1911,21 @@
-sys.stderr.write
+debug
(%22Receiv
@@ -1929,18 +1929,16 @@
eived %25s
-%5Cn
%22 %25 key)
|
84ee720fd2d8403de5f49c54fc41bfcb67a78f78
|
Add missing vat alias for Turkey
|
stdnum/tr/__init__.py
|
stdnum/tr/__init__.py
|
# __init__.py - collection of Turkish numbers
# coding: utf-8
#
# Copyright (C) 2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of Turkish numbers."""
|
Python
| 0.000355
|
@@ -861,8 +861,55 @@
ers.%22%22%22%0A
+from stdnum.tr import vkn as vat # noqa: F401%0A
|
f32a5e24b39b00b1a74e31560fd22fad4a50a45f
|
Update utils.py
|
sublime_jedi/utils.py
|
sublime_jedi/utils.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import subprocess
import json
import threading
import warnings
from functools import partial
from collections import defaultdict
from uuid import uuid1
try:
from Queue import Queue
except ImportError:
from queue import Queue
import sublime
from .console_logging import getLogger
from .settings import get_settings_param
logger = getLogger(__name__)
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PY3 = sys.version_info[0] == 3
DAEMONS = defaultdict(dict) # per window
def run_in_active_view(window_id, callback, response):
for window in sublime.windows():
if window.id() == window_id:
callback(window.active_view(), response)
break
class BaseThread(threading.Thread):
def __init__(self, fd, window_id, waiting, lock):
self.fd = fd
self.done = False
self.waiting = waiting
self.wait_lock = lock
self.window_id = window_id
super(BaseThread, self).__init__()
self.daemon = True
self.start()
class ThreadReader(BaseThread):
def run(self):
while not self.done:
line = self.fd.readline()
if line:
data = None
try:
data = json.loads(line.strip())
except ValueError:
if not isinstance(data, dict):
logger.exception(
"Non JSON data from daemon: {0}".format(line)
)
else:
self.call_callback(data)
def call_callback(self, data):
"""
Call callback for response data
:type data: dict
"""
if 'logging' in data:
getattr(logger, data['logging'])(data['content'])
return
with self.wait_lock:
callback = self.waiting.pop(data['uuid'], None)
if callback is not None:
delayed_callback = partial(
run_in_active_view,
self.window_id,
callback,
data[data['type']]
)
sublime.set_timeout(delayed_callback, 0)
class ThreadWriter(BaseThread, Queue):
def __init__(self, *args, **kwargs):
Queue.__init__(self)
super(ThreadWriter, self).__init__(*args, **kwargs)
def run(self):
while not self.done:
request_data = self.get()
if not request_data:
continue
callback, data = request_data
with self.wait_lock:
self.waiting[data['uuid']] = callback
if not isinstance(data, str):
data = json.dumps(data)
self.fd.write(data)
if not data.endswith('\n'):
self.fd.write('\n')
self.fd.flush()
class Daemon(object):
def __init__(self, view):
window_id = view.window().id()
self.waiting = dict()
self.wlock = threading.RLock()
self.process = self._start_process(get_settings(view))
self.stdin = ThreadWriter(self.process.stdin, window_id,
self.waiting, self.wlock)
self.stdout = ThreadReader(self.process.stdout, window_id,
self.waiting, self.wlock)
self.stderr = ThreadReader(self.process.stderr, window_id,
self.waiting, self.wlock)
def _start_process(self, settings):
options = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
'cwd': CUR_DIR,
'bufsize': -1,
}
# hide "cmd" window in Windows
if sys.platform == "win32":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
options['startupinfo'] = startupinfo
command = [
settings['python_interpreter'],
'-B', 'daemon.py',
'-p', settings['project_name']
]
for folder in settings['extra_packages']:
command.extend(['-e', folder])
command.extend(['-f', settings['complete_funcargs']])
logger.debug(
'Daemon process starting with parameters: {0} {1}'
.format(command, options)
)
try:
return subprocess.Popen(command, **options)
except OSError:
logger.error(
'Daemon process failed with next parameters: {0} {1}'
.format(command, options)
)
raise
def request(self, view, request_type, callback, location=None):
"""
Send request to daemon process
:type view: sublime.View
:type request_type: str
:type callback: callabel
:type location: type of (int, int) or None
"""
logger.info('Sending request to daemon for "{0}"'.format(request_type))
if location is None:
location = view.sel()[0].begin()
current_line, current_column = view.rowcol(location)
source = view.substr(sublime.Region(0, view.size()))
if PY3:
uuid = uuid1().hex
else:
uuid = uuid1().get_hex()
data = {
'source': source,
'line': current_line + 1,
'offset': current_column,
'filename': view.file_name() or '',
'type': request_type,
'uuid': uuid,
}
self.stdin.put_nowait((callback, data))
def ask_daemon(view, callback, ask_type, location=None):
"""
Daemon request shortcut
:type view: sublime.View
:type callback: callabel
:type ask_type: str
:type location: type of (int, int) or None
"""
window_id = view.window().id()
if window_id not in DAEMONS:
DAEMONS[window_id] = Daemon(view)
DAEMONS[window_id].request(view, ask_type, callback, location)
def get_settings(view):
"""
get settings for daemon
:type view: sublime.View
:rtype: dict
"""
python_interpreter = get_settings_param(view, 'python_interpreter_path')
if not python_interpreter:
python_interpreter = get_settings_param(view, 'python_interpreter',
'python')
else:
warnings.warn('`python_interpreter_path` parameter is deprecated.'
'Please, use `python_interpreter` instead.',
DeprecationWarning)
extra_packages = get_settings_param(view, 'python_package_paths', [])
complete_funcargs = get_settings_param(view,
'auto_complete_function_params',
'all')
first_folder = ''
if view.window().folders():
first_folder = os.path.split(view.window().folders()[0])[-1]
project_name = get_settings_param(view, 'project_name', first_folder)
return {
'python_interpreter': python_interpreter,
'extra_packages': extra_packages,
'project_name': project_name,
'complete_funcargs': complete_funcargs
}
def is_python_scope(view, location):
""" (View, Point) -> bool
Get if this is a python source scope (not a string and not a comment)
"""
return view.match_selector(location, "source.python - string - comment")
def to_relative_path(path):
"""
Trim project root pathes from **path** passed as argument
If no any folders opened, path will be retuned unchanged
"""
folders = sublime.active_window().folders()
for folder in folders:
# close path with separator
if folder[-1] != os.path.sep:
folder += os.path.sep
if path.startswith(folder):
return path.replace(folder, '')
return path
|
Python
| 0.000001
|
@@ -6643,16 +6643,234 @@
rning)%0A%0A
+ if python_interpreter.startswith('$project_path'):%0A proejct_dir = os.path.dirname(view.window().project_file_name())%0A python_interpreter = python_interpreter.replace('$project_path', proejct_dir, 1)%0A%0A
extr
|
3c2a8aee155913e63bf36dec7a7afd42e2a810e3
|
Change buffer overflow exception message.
|
subsevenzip/buffer.py
|
subsevenzip/buffer.py
|
# Copyright (c) 2015, Daniel Svensson <dsvensson@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import io
import struct
class BoundedBufferedReader(io.BufferedReader):
def __init__(self, stream, limit):
io.BufferedReader.__init__(self, stream)
self._limit = limit
def read(self, size):
position = super().tell()
if (position + size) > self._limit:
return super().read(self._limit - position)
return super().read(size)
def close(self):
# Should not close parent file descriptor
pass
class ReadBuffer(object):
def __init__(self, fd):
assert isinstance(fd, io.IOBase)
self._fd = fd
self._pos = 0
self._limit = None
def _read(self, func):
try:
return func()
finally:
self._check_limit()
def _check_limit(self):
if self._limit is None:
return
if self._fd.tell() > self._limit:
raise IOError("ReadBuffer limit breached! (limit: %d, position: %d)" % (self._limit, self._fd.tell()))
def set_limit(self, limit, absolute=False):
if absolute:
self._limit = limit
else:
self._limit = self._fd.tell() + limit
def unset_limit(self):
self._limit = None
def get_uint8(self):
return self._read(lambda: struct.unpack("<B", self._fd.read(1))[0])
def get_uint16(self):
return self._read(lambda: struct.unpack("<H", self._fd.read(2))[0])
def get_uint32(self):
return self._read(lambda: struct.unpack("<I", self._fd.read(4))[0])
def get_uint64(self):
return self._read(lambda: struct.unpack("<Q", self._fd.read(8))[0])
def get_varint(self):
first_byte = self.get_uint8()
mask = 0x80
value = 0
for x in range(8):
if (first_byte & mask) == 0:
return value | ((first_byte & (mask - 1)) << 8 * x)
value |= self.get_uint8() << 8 * x
mask >>= 1
return value
def get_bits(self, count):
value = 0
mask = 0
cache = 0
for x in range(count):
if not mask:
mask = 0x80
cache = self.get_uint8()
value |= int((cache & mask) != 0) << x
mask >>= 1
return value
def get_all_or_bits(self, count):
all_defined = self.get_uint8()
if all_defined:
return (1 << count) - 1
return self.get_bits(count)
def get_utf16_le(self):
buffer = bytearray([])
while True:
value1 = self.get_uint8()
value2 = self.get_uint8()
if value1 == 0 and value2 == 0:
return buffer.decode("utf-16-le")
buffer.append(value1)
buffer.append(value2)
raise IOError("UTF-16 string was not \\0 terminated")
def get_bytes(self, length):
return self._read(lambda: self._fd.read(length))
def seek(self, offset, whence=io.SEEK_CUR):
self._fd.seek(offset, whence)
self._check_limit()
def tell(self):
return self._fd.tell()
def get_sub_stream(self, length):
return BoundedBufferedReader(self._fd, self._fd.tell() + length)
|
Python
| 0
|
@@ -1681,45 +1681,16 @@
or(%22
-ReadBuffer limit breached! (limit: %25d
+Overflow
, po
@@ -1700,30 +1700,26 @@
ion:
-
%25d
-)%22 %25 (self._limit,
+ %3E limit:%25d%22 %25 (
self
@@ -1729,16 +1729,29 @@
d.tell()
+, self._limit
))%0A%0A
|
e0bd89115c4d103334fe0c751cdbc96a9f005ba6
|
version up
|
substance/_version.py
|
substance/_version.py
|
__version__ = '1.1.beta.3'
|
Python
| 0.998795
|
@@ -21,7 +21,7 @@
eta.
-3
+4
'%0A
|
f02eb748d33b621368198c10a965b27ee31effca
|
update tutorial section link
|
swagger/yamlscript.py
|
swagger/yamlscript.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# This script, when run, parses the file "swagger.yaml" and strips it down to only
# include those paths and methods specified in the included variable.
#
# As of now, it is called with every "jekyll build" - see jekyll-freme/_plugins/jekyll-pages-directory.rb
# line: "exec(python swagger/yamlscript.py)"
#
# To be able to import yaml, on linux, run "sudo pip install PyYAML"
#
# Author: Jonathan Sauder (jonathan_paul.sauder@dfki.de)
#
def main():
import yaml,os,sys
try:
with open(os.path.dirname(__file__)+"/swagger.yaml","r") as f:
full=yaml.safe_load(f.read())
except IOError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py:"+ os.path.dirname(__file__)+"/swagger.yaml could not be found. The generation of a simple API-Doc was skipped")
sys.exit(1)
except yaml.scanner.ScannerError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py: The YAML File at "+ os.path.dirname(__file__)+"/swagger.yaml is invalid! The generation of a simple API-Doc was skipped")
sys.exit(1)
included_paths={
"/e-entity/freme-ner/documents": ["post"],
"/e-entity/dbpedia-spotlight/documents": ["post"],
"/e-publishing/html": ["post"],
"/e-link/documents/": ["post"],
"/e-translation/tilde": ["post"],
"/e-terminology/tilde": ["post"],
"/e-link/explore": ["post"]
}
for path in full["paths"].keys():
if path not in included_paths:
del full["paths"][path]
else:
for method in included_paths[path]:
if method not in full["paths"][path].keys():
del full["paths"][path][method]
# else:
# full["paths"][path][method]['tags']=["Enrichment Endpoints"]
full["tags"]=[x for x in full["tags"] if x["name"]!="General Information"]
full['info']['description']="This section only covers the most important endpoints of FREME: the enrichment endpoints.<br><br> The endpoints can be used to access FREME e-Services via common HTTP requests.<br><br> A full documentation of all e-Service endpoints, including all parameters, is provided <a href=\"full.html\">here</a>. For usage examples, see the <a href=\"../Tutorials/overview.html\">tutorial section</a>."
with open(os.path.dirname(__file__)+"/simple.yaml",'w') as f:
f.write(yaml.dump(full))
return 0
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2116,17 +2116,17 @@
ef=%5C%22../
-T
+t
utorials
|
86b698a228ddf1309e8f2006726724af05c5fca1
|
bump version
|
symposion/__init__.py
|
symposion/__init__.py
|
__version__ = "1.0b1.dev11"
|
Python
| 0
|
@@ -18,11 +18,11 @@
0b1.dev1
-1
+2
%22%0A
|
e9a4157ac30d41ab23ac1de344045886a5c4fa02
|
refactor for consistency
|
libtaxii/__init__.py
|
libtaxii/__init__.py
|
# Copyright (c) 2017, The MITRE Corporation
# For license information, see the LICENSE.txt file
"""
The main libtaxii module
"""
import six
from six.moves import urllib
import libtaxii.messages_10 as tm10
import libtaxii.messages_11 as tm11
import libtaxii.clients as tc
from .constants import *
import cgi
from .version import __version__ # noqa
def get_message_from_http_response(http_response, in_response_to):
"""Create a TAXII message from an HTTPResponse object.
This function parses the :py:class:`httplib.HTTPResponse` by reading the
X-TAXII-Content-Type HTTP header to determine if the message binding is
supported. If the X-TAXII-Content-Type header is present and the value
indicates a supported Message Binding, this function will attempt to parse
the HTTP Response body.
If the X-TAXII-Content-Type header is not present, this function will
attempt to build a Failure Status Message per the HTTP Binding 1.0
specification.
If the X-TAXII-Content-Type header is present and indicates an unsupported
Message Binding, this function will raise a ValueError.
Args:
http_response (httplib.HTTPResponse): the HTTP response to
parse
in_response_to (str): the default value for in_response_to
"""
if isinstance(http_response, six.moves.http_client.HTTPResponse):
return get_message_from_httplib_http_response(http_response, in_response_to)
elif isinstance(http_response, urllib.error.HTTPError):
return get_message_from_urllib2_httperror(http_response, in_response_to)
elif isinstance(http_response, urllib.response.addinfourl):
return get_message_from_urllib_addinfourl(http_response, in_response_to)
else:
raise ValueError('Unsupported response type: %s.' % http_response.__class__.__name__)
def get_message_from_urllib2_httperror(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
info = http_response.info()
if hasattr(info, 'getheader'):
taxii_content_type = info.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.getheader('Content-Type'))
else:
taxii_content_type = info.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = http_response.read()
if taxii_content_type is None:
if isinstance(response_message, six.binary_type):
response_message = response_message.decode(encoding, 'replace')
m = str(http_response) + '\r\n' + str(http_response.info()) + '\r\n' + response_message
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_CERT_EU_JSON_10:
return tm10.get_message_from_json(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
def get_message_from_urllib_addinfourl(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
info = http_response.info()
if hasattr(info, 'getheader'):
taxii_content_type = info.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.getheader('Content-Type'))
else:
taxii_content_type = info.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(info.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = six.ensure_text(http_response.read(), errors='replace')
if taxii_content_type is None: # Treat it as a Failure Status Message, per the spec
message = []
header_dict = six.iteritems(http_response.info().dict)
for k, v in header_dict:
message.append(k + ': ' + v + '\r\n')
message.append('\r\n')
message.append(response_message)
m = ''.join(message)
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_CERT_EU_JSON_10:
return tm10.get_message_from_json(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
def get_message_from_httplib_http_response(http_response, in_response_to):
""" This function should not be called by libtaxii users directly. """
if hasattr(http_response, 'getheader'):
taxii_content_type = http_response.getheader('X-TAXII-Content-Type')
_, params = cgi.parse_header(http_response.getheader('Content-Type'))
else:
taxii_content_type = http_response.get('X-TAXII-Content-Type')
_, params = cgi.parse_header(http_response.get('Content-Type'))
encoding = params.get('charset', 'utf-8')
response_message = six.ensure_text(http_response.read(), errors='replace')
if taxii_content_type is None: # Treat it as a Failure Status Message, per the spec
message = []
header_tuples = http_response.getheaders()
for k, v in header_tuples:
message.append(k + ': ' + v + '\r\n')
message.append('\r\n')
message.append(response_message)
m = ''.join(message)
return tm11.StatusMessage(message_id='0', in_response_to=in_response_to, status_type=ST_FAILURE, message=m)
elif taxii_content_type == VID_TAXII_XML_10: # It's a TAXII XML 1.0 message
return tm10.get_message_from_xml(response_message, encoding)
elif taxii_content_type == VID_TAXII_XML_11: # It's a TAXII XML 1.1 message
return tm11.get_message_from_xml(response_message, encoding)
else:
raise ValueError('Unsupported X-TAXII-Content-Type: %s' % taxii_content_type)
|
Python
| 0.000011
|
@@ -2385,24 +2385,40 @@
e_message =
+six.ensure_text(
http_respons
@@ -2425,16 +2425,35 @@
e.read()
+, errors='replace')
%0A%0A if
@@ -2485,142 +2485,8 @@
ne:%0A
- if isinstance(response_message, six.binary_type):%0A response_message = response_message.decode(encoding, 'replace')%0A
|
18d551d2495fc122edb142e416a06ce4129da1f7
|
Update urls.py
|
life3/config/urls.py
|
life3/config/urls.py
|
"""life3.0 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from life3.dashboard import views as dashboard_view
urlpatterns = [
url(r'^$/', dashboard_view.api_home),
url(r'^dashboard/api/', include('life3.dashboard.urls')),
url(r'^login/', include('life3.login.urls')),
]
|
Python
| 0.000002
|
@@ -785,17 +785,16 @@
url(r'%5E$
-/
', dashb
|
ac3a9211725a0538c8c8f7899d86e4e22ceebb71
|
Update binary_search.py
|
aids/sorting_and_searching/binary_search.py
|
aids/sorting_and_searching/binary_search.py
|
'''
In this module, we implement binary search in Python both
recrusively and iteratively
Assumption: Array is sorted
Time complexity: O(log n)
'''
def binary_search_recursive(arr, left, right, value):
'''
Recursive implementation of binary search of a sorted array
Return index of the value found else return None
'''
if arr and left <= right:
middle = (left + right) / 2
if arr[middle] == value:
return middle
if arr[middle] > value:
return binary_search_recursive(arr, left, middle - 1, value)
return binary_search_recursive(arr, middle + 1, right, value)
return None
def binary_search_iterative(arr, left, right, value):
'''
Iterative implementation of binary search of a sorted array
Return index of the value of found else return None
'''
if arr:
while left <= right:
middle = (left + right) / 2 # left + (right - left) / 2
if arr[middle] == value:
return middle
elif arr[middle] > value:
right = middle - 1
else:
left = middle + 1
return None
|
Python
| 0.000002
|
@@ -376,37 +376,44 @@
middle =
-(
left +
+(
right
+ - left
) / 2%0A
@@ -918,29 +918,8 @@
le =
- (left + right) / 2 #
lef
|
1433106d2e36a08f79b4b2c67e07c1fdd361bda6
|
fix MAINTENANCE_MODE logic
|
electionleaflets/urls.py
|
electionleaflets/urls.py
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.views.generic import TemplateView
admin.autodiscover()
from leaflets.feeds import *
from core.views import HomeView, MaintenanceView
if getattr(settings, 'MAINTENANCE_MODE', None):
urlpatterns = patterns(
'',
url(r'.*', MaintenanceView.as_view(), name='maintenance_view'),
)
else:
urlpatterns = patterns(
'',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^leaflets', include('leaflets.urls')),
url(r'^parties', include('parties.urls')),
url(r'^constituencies', include('constituencies.urls')),
url(r'^analysis', include('analysis.urls')),
url(r'^tags', include('tags.urls')),
url(r'^categories', include('categories.urls')),
url(r'^api/', include('api.urls')),
# Feeds
url(r'^feeds/latest/$', LatestLeafletsFeed(), name='latest_feed'),
# url(r'^feeds/party/(?P<party_slug>[\w_\-\.]+)/$', PartyFeed(), name='party_feed'),
# url(r'^feeds/attacking/(?P<party_slug>[\w_\-\.]+)/$', AttackingPartyFeed(), name='attacking_party_feed'),
url(r'^feeds/constituency/(?P<cons_slug>[\w_\-\.]+)/$', ConstituencyFeed(), name='constituency_feed'),
url(r'^feeds/category/(?P<cat_slug>[\w_\-\.]+)/$', CategoryFeed(), name='category_feed'),
url(r'^feeds/tag/(?P<tag_slug>[\w_\-\.]+)/$', TagFeed(), name='tag_feed'),
# Individual urls
url(r'^about/$', TemplateView.as_view(template_name='core/about.html'), name='about'),
url(r'^report/(?P<id>\d+)/sent/$', TemplateView.as_view(template_name='core/report_sent.html'), name='report_abuse_sent'),
url(r'^report/(?P<id>\d+)/$', 'core.views.report_abuse', name='report_abuse'),
# Administration URLS
(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
)
urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Python
| 0.000011
|
@@ -306,11 +306,26 @@
iew%0A
-%0Aif
+MAINTENANCE_MODE =
get
@@ -363,13 +363,34 @@
E',
-None)
+False)%0Aif MAINTENANCE_MODE
:%0A
|
5605dd1f37f91d0fa627d49332f5550c90e6d2e7
|
Check child is element before inspecting name
|
mammoth/docx/xmlparser.py
|
mammoth/docx/xmlparser.py
|
import collections
import xml.sax
XmlElementBase = collections.namedtuple("XmlElement", ["name", "attributes", "children"])
class XmlElement(XmlElementBase):
def find_child_or_null(self, name):
return self.find_child(name) or _null_xml_element
def find_child(self, name):
for child in self.children:
if child.name == name:
return child
def find_children(self, name):
return XmlElementList(filter(lambda child: child.name == name, self.children))
class XmlElementList(object):
def __init__(self, elements):
self._elements = elements
def __iter__(self):
return iter(self._elements)
def find_children(self, name):
children = []
for element in self._elements:
for child in element.find_children(name):
children.append(child)
return XmlElementList(children)
class NullXmlElement(object):
attributes = {}
def find_child_or_null(self, name):
return self
def find_child(self, name):
return None
_null_xml_element = NullXmlElement()
XmlText = collections.namedtuple("XmlText", ["value"])
def element(name, attributes=None, children=None):
return XmlElement(name, attributes or {}, children or [])
text = XmlText
class node_types(object):
element = 1
text = 3
XmlElement.node_type = node_types.element
XmlText.node_type = node_types.text
def parse_xml(fileobj, namespace_mapping=None):
if namespace_mapping is None:
namespace_prefixes = {}
else:
namespace_prefixes = dict((uri, prefix) for prefix, uri in namespace_mapping)
handler = Handler(namespace_prefixes)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.setContentHandler(handler)
parser.parse(fileobj)
return handler.root()
class Handler(xml.sax.handler.ContentHandler):
def __init__(self, namespace_prefixes):
self._namespace_prefixes = namespace_prefixes
self._element_stack = [RootElement()]
self._character_buffer = []
def root(self):
return self._element_stack[0].children[0]
def startElementNS(self, name, qname, attrs):
self._flush_character_buffer()
attributes = dict((self._read_name(key), value) for key, value in attrs.items())
element = XmlElement(self._read_name(name), attributes, [])
self._element_stack[-1].children.append(element)
self._element_stack.append(element)
def endElementNS(self, name, qname):
self._flush_character_buffer()
self._element_stack.pop()
def characters(self, content):
self._character_buffer.append(content)
def _flush_character_buffer(self):
if self._character_buffer:
text = "".join(self._character_buffer)
self._element_stack[-1].children.append(XmlText(text))
self._character_buffer = []
def _read_name(self, name):
uri, local_name = name
if uri is None:
return local_name
else:
prefix = self._namespace_prefixes.get(uri)
if prefix is None:
return "{%s}%s" % (uri, local_name)
else:
return "%s:%s" % (prefix, local_name)
class RootElement(object):
def __init__(self):
self.children = []
|
Python
| 0.000001
|
@@ -474,16 +474,29 @@
(filter(
+%0A
lambda c
@@ -512,20 +512,74 @@
ld.n
-ame == name,
+ode_type == node_types.element and child.name == name,%0A
sel
@@ -588,16 +588,25 @@
children
+%0A
))%0A%0A%0Acla
|
bc9bbe0075f8a6571179e2310a9cfeaff89652b2
|
Remove unused argument
|
modules/pipeunion.py
|
modules/pipeunion.py
|
# pipeunion.py
#
from pipe2py import util
def pipe_union(context, _INPUT, conf, **kwargs):
"""This operator merges up to 5 source together.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- _OTHER1 - another source generator
_OTHER2 etc.
conf:
Yields (_OUTPUT):
union of all source items
"""
#TODO the multiple sources should be pulled in parallel
# check David Beazely for suggestions (co-routines with queues?)
# or maybe use multiprocessing and Queues (perhaps over multiple servers too)
#Single thread and sequential pulling will do for now...
for item in _INPUT:
if item == True: #i.e. this is being fed forever, i.e. not a real source so just use _OTHERs
break
yield item
for other in kwargs:
if other.startswith('_OTHER'):
for item in kwargs[other]:
yield item
|
Python
| 0.000009
|
@@ -72,14 +72,8 @@
PUT,
- conf,
**k
@@ -300,18 +300,8 @@
tc.%0A
- conf:%0A
|
3053c57a67c4dfb5e20bb93d6a586c7acf84275e
|
Prepare release v1.3.5.
|
monitoring/nagios/__init__.py
|
monitoring/nagios/__init__.py
|
import monitoring.nagios.logger
__version__ = '1.3.2'
|
Python
| 0
|
@@ -49,7 +49,7 @@
1.3.
-2
+5
'%0A
|
cf07c34fe3a3d7b8767e50e77e609253dd177cff
|
Use isoformat date RFC 3339
|
moulinette/utils/serialize.py
|
moulinette/utils/serialize.py
|
import logging
from json.encoder import JSONEncoder
import datetime
logger = logging.getLogger('moulinette.utils.serialize')
# JSON utilities -------------------------------------------------------
class JSONExtendedEncoder(JSONEncoder):
"""Extended JSON encoder
Extend default JSON encoder to recognize more types and classes. It
will never raise if the object can't be encoded and return its repr
instead.
The following objects and types are supported:
- set: converted into list
"""
def default(self, o):
"""Return a serializable object"""
# Convert compatible containers into list
if isinstance(o, set) or (
hasattr(o, '__iter__') and hasattr(o, 'next')):
return list(o)
# Convert compatible containers into list
if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
return str(o)
# Return the repr for object that json can't encode
logger.warning('cannot properly encode in JSON the object %s, '
'returned repr is: %r', type(o), o)
return repr(o)
|
Python
| 0
|
@@ -914,13 +914,20 @@
urn
-str(o
+o.isoformat(
)%0A%0A
|
161ea323d9d1bff81ccec5ab2a7ac60dea52ca27
|
Undo color change to lineEdit part
|
ComboBox.py
|
ComboBox.py
|
# CheckableComboBox
# Lærke Roager Christensen
# 6/30/22
#
# This is a costume ComboBox where you can select multiple items on a list.
# Used in the Settings Tool.
# The code is found on: https://gis.stackexchange.com/questions/350148/qcombobox-multiple-selection-pyqt5
# with some modifications made by Lærke.
from PyQt5.QtWidgets import QComboBox, QStyledItemDelegate, QApplication
from PyQt5.QtGui import QPalette, QStandardItem, QFontMetrics
from PyQt5.QtCore import QEvent, Qt
class CheckableComboBox(QComboBox):
# Subclass Delegate to increase item height
class Delegate(QStyledItemDelegate):
def sizeHint(self, option, index):
size = super().sizeHint(option, index)
size.setHeight(29)
size.setWidth(436)
return size
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Make the combo editable to set a custom text, but readonly
self.setEditable(True)
self.lineEdit().setReadOnly(True)
# Make the lineedit the same color as QPushButton
palette = QApplication.palette()
palette.setBrush(QPalette.Base, palette.button())
self.lineEdit().setPalette(palette)
# Use custom delegate
self.setItemDelegate(CheckableComboBox.Delegate())
# Update the text when an item is toggled
self.model().dataChanged.connect(self.updateText)
# Hide and show popup when clicking the line edit
self.lineEdit().installEventFilter(self)
self.closeOnLineEditClick = False
# Prevent popup from closing when clicking on an item
self.view().viewport().installEventFilter(self)
def resizeEvent(self, event):
# Recompute text to elide as needed
self.updateText()
super().resizeEvent(event)
def eventFilter(self, object, event):
if object == self.lineEdit():
if event.type() == QEvent.MouseButtonRelease:
if self.closeOnLineEditClick:
self.hidePopup()
else:
self.showPopup()
return True
return False
if object == self.view().viewport():
if event.type() == QEvent.MouseButtonRelease:
index = self.view().indexAt(event.pos())
item = self.model().item(index.row())
if item.checkState() == Qt.Checked:
item.setCheckState(Qt.Unchecked)
else:
item.setCheckState(Qt.Checked)
return True
return False
def showPopup(self):
super().showPopup()
# When the popup is displayed, a click on the lineedit should close it
self.closeOnLineEditClick = True
def hidePopup(self):
super().hidePopup()
# Used to prevent immediate reopening when clicking on the lineEdit
self.startTimer(100)
# Refresh the display text when closing
self.updateText()
def timerEvent(self, event):
# After timeout, kill timer, and reenable click on line edit
self.killTimer(event.timerId())
self.closeOnLineEditClick = False
def updateText(self):
texts = []
for i in range(self.model().rowCount()):
if self.model().item(i).checkState() == Qt.Checked:
texts.append(self.model().item(i).text())
text = ", ".join(texts)
# Compute elided text (with "...")
metrics = QFontMetrics(self.lineEdit().font())
elidedText = metrics.elidedText(text, Qt.ElideRight, self.lineEdit().width())
self.lineEdit().setText(elidedText)
def check(self, text):
for i in range(self.model().rowCount()):
if self.model().item(i).text() == text:
self.model().item(i).setCheckState(Qt.Checked)
def addItem(self, text, data=None):
item = QStandardItem()
item.setText(text)
if data is None:
item.setData(text)
else:
item.setData(data)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
item.setData(Qt.Unchecked, Qt.CheckStateRole)
self.model().appendRow(item)
def addItems(self, texts, datalist=None):
texts.sort()
for i, text in enumerate(texts):
try:
data = datalist[i]
except (TypeError, IndexError):
data = None
self.addItem(text, data)
def currentData(self):
# Return the list of selected items data
res = []
for i in range(self.model().rowCount()):
if self.model().item(i).checkState() == Qt.Checked:
res.append(self.model().item(i).text())
return res
|
Python
| 0
|
@@ -1,9 +1,13 @@
#
+%0A#
Checkab
@@ -18,16 +18,20 @@
mboBox%0A#
+%0A#
L%C3%A6rke R
@@ -49,16 +49,18 @@
tensen%0A#
+
6/30/22
@@ -63,16 +63,102 @@
0/22%0A#%0A#
+ Version 3.5.1 - 7/1/22 - Ron Lockwood%0A# Undo color change to lineEdit part%0A#%0A#
This is
@@ -165,14 +165,13 @@
a c
-ostume
+ustom
Com
@@ -224,16 +224,18 @@
list.%0A#
+
Used in
@@ -255,16 +255,18 @@
Tool.%0A#
+
The cod
@@ -363,16 +363,18 @@
-pyqt5%0A#
+
with so
@@ -1114,209 +1114,8 @@
rue)
-%0A # Make the lineedit the same color as QPushButton%0A palette = QApplication.palette()%0A palette.setBrush(QPalette.Base, palette.button())%0A self.lineEdit().setPalette(palette)
%0A%0A
|
a1f2e3a7d32687c8495bf36491a11b885ebe2dee
|
append to mc log instead of overwrite
|
mpfmc/commands/mc.py
|
mpfmc/commands/mc.py
|
"""Starts the MPF media controller."""
import argparse
import logging
import os
import socket
import sys
import threading
from datetime import datetime
import time
import errno
# Note, other imports are done deeper in this file, which we need to do there
# since Kivy does so much with singletons and we don't want MPF to import
# them when it reads this command
class Command(object):
# pylint: disable-msg=too-many-locals
def __init__(self, mpf_path, machine_path, args):
# undo all of Kivy's built-in logging so we can do it our way
os.environ['KIVY_NO_FILELOG'] = '1'
os.environ['KIVY_NO_CONSOLELOG'] = '1'
from kivy.logger import Logger
for handler in Logger.handlers:
Logger.removeHandler(handler)
sys.stderr = sys.__stderr__
# Need to have these in here because we don't want them to load when
# the module is loaded as an mpf.command
import mpfmc
from mpf.core.utility_functions import Util
from mpfmc.core.config_processor import ConfigProcessor
from mpfmc.core.utils import set_machine_path, load_machine_config
del mpf_path
parser = argparse.ArgumentParser(description='Starts the MPF Media Controller')
parser.add_argument("-b",
action="store_false", dest="bcp", default=True,
help="Do not set up the BCP server threads")
parser.add_argument("-c",
action="store", dest="configfile",
default="config", metavar='config_file(s)',
help="The name of a config file to load. Default is "
"config.yaml. Multiple files can be used via a comma-"
"separated list (no spaces between)")
parser.add_argument("-C",
action="store", dest="mcconfigfile",
default="mcconfig.yaml",
metavar='config_file',
help="The MPF framework default config file. Default is "
"<mpf-mc install folder>/mcconfig.yaml")
parser.add_argument("-f",
action="store_true", dest="force_assets_load", default=False,
help="Load all assets upon startup. Useful for "
"ensuring all assets are set up properly "
"during development.")
parser.add_argument("-l",
action="store", dest="logfile",
metavar='file_name',
default=os.path.join("logs", datetime.now().strftime(
"%Y-%m-%d-%H-%M-%S-mc-" + socket.gethostname() +
".log")),
help="The name (and path) of the log file")
parser.add_argument("-p",
action="store_true", dest="pause", default=False,
help="Pause the terminal window on exit. Useful "
"when launching in a separate window so you can "
"see any errors before the window closes.")
parser.add_argument("-v",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.INFO, help="Enables verbose logging to the"
" log file")
parser.add_argument("-V",
action="store_true", dest="consoleloglevel",
default=logging.INFO,
help="Enables verbose logging to the console. Do NOT on "
"Windows platforms")
# The following are just included for full compatibility with mpf.py
# which is needed when using "mpf both".
parser.add_argument("-a",
action="store_const", dest="force_platform",
const='no_load_cache', help=argparse.SUPPRESS)
parser.add_argument("-A",
action="store_const", dest="force_platform",
const='create_config_cache', help=argparse.SUPPRESS)
parser.add_argument("-x",
action="store_const", dest="force_platform",
const='virtual', help=argparse.SUPPRESS)
parser.add_argument("-X",
action="store_const", dest="force_platform",
const='smart_virtual', help=argparse.SUPPRESS)
args = parser.parse_args(args)
args.configfile = Util.string_to_list(args.configfile)
# Configure logging. Creates a logfile and logs to the console.
# Formatting options are documented here:
# https://docs.python.org/2.7/library/logging.html#logrecord-attributes
try:
os.makedirs(os.path.join(machine_path, 'logs'))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
logging.basicConfig(level=args.loglevel,
format='%(asctime)s : %(levelname)s : %(name)s : '
'%(message)s',
filename=os.path.join(machine_path, args.logfile),
filemode='w')
# define a Handler which writes INFO messages or higher to the
# sys.stderr
console = logging.StreamHandler()
console.setLevel(args.consoleloglevel)
# set a format which is simpler for console use
formatter = logging.Formatter('%(levelname)s : %(name)s : %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
mpf_config = ConfigProcessor.load_config_file(os.path.join(
mpfmc.__path__[0], args.mcconfigfile), 'machine')
machine_path = set_machine_path(machine_path,
mpf_config['mpf-mc']['paths'][
'machine_files'])
mpf_config = load_machine_config(args.configfile, machine_path,
mpf_config['mpf-mc']['paths'][
'config'], mpf_config)
self.preprocess_config(mpf_config)
from mpfmc.core.mc import MpfMc
logging.info("Loading MPF-MC controller")
thread_stopper = threading.Event()
try:
MpfMc(options=vars(args), config=mpf_config,
machine_path=machine_path,
thread_stopper=thread_stopper).run()
logging.info("MC run loop ended.")
except Exception as e:
logging.exception(str(e))
logging.info("Stopping child threads... (%s remaining)", len(threading.enumerate()) - 1)
thread_stopper.set()
while len(threading.enumerate()) > 1:
time.sleep(.1)
logging.info("All child threads stopped.")
logging.shutdown()
if args.pause:
input('Press ENTER to continue...')
sys.exit()
def preprocess_config(self, config):
from kivy.config import Config
kivy_config = config['kivy_config']
try:
kivy_config['graphics'].update(config['window'])
except KeyError:
pass
if ('top' in kivy_config['graphics'] and
'left' in kivy_config['graphics']):
kivy_config['graphics']['position'] = 'custom'
for section, settings in kivy_config.items():
for k, v in settings.items():
try:
if k in Config[section]:
Config.set(section, k, v)
except KeyError:
continue
try: # config not validated yet, so we use try
if config['window']['exit_on_escape']:
Config.set('kivy', 'exit_on_escape', '1')
except KeyError:
pass
Config.set('graphics', 'maxfps', int(config['mpf-mc']['fps']))
def get_command():
return 'mc', Command
|
Python
| 0.000001
|
@@ -5440,48 +5440,25 @@
ame=
-os.path.join(machine_path, args.logfile)
+full_logfile_path
,%0A
@@ -5493,17 +5493,17 @@
lemode='
-w
+a
')%0A%0A
|
12549cf9b7bc3c2a7baa5aacde91749bd8f2b94d
|
print crc as well
|
mppsolar/__init__.py
|
mppsolar/__init__.py
|
# !/usr/bin/python3
import logging
from argparse import ArgumentParser
from .version import __version__ # noqa: F401
# import mppcommands
from .mpputils import mppUtils
log = logging.getLogger('MPP-Solar')
# setup logging (DEBUG, INFO, WARNING, ERROR, CRITICAL)
# ch = logging.StreamHandler()
# create formatter and add it to the handlers
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(filename)s - %(levelname)s - %(message)s')
# ch.setFormatter(formatter)
# add the handlers to logger
# log.addHandler(ch)
# set default log levels
log.setLevel(logging.WARNING)
# ch.setLevel(logging.WARNING)
logging.basicConfig()
def main():
parser = ArgumentParser(description='MPP Solar Command Utility, version: {}'.format(__version__))
parser.add_argument('-c', '--command', help='Command to run', default='QID')
parser.add_argument('-D', '--enableDebug', action='store_true', help='Enable Debug and above (i.e. all) messages')
parser.add_argument('-I', '--enableInfo', action='store_true', help='Enable Info and above level messages')
parser.add_argument('-d', '--device', type=str, help='Serial (or USB) device to communicate with, defaults to /dev/ttyUSB0', default='/dev/ttyUSB0')
parser.add_argument('-b', '--baud', type=int, help='Baud rate for serial communications, defaults to 2400', default=2400)
parser.add_argument('-M', '--model', type=str, help='Specifies the inverter model to select commands for, defaults to "standard", currently supports LV5048', default='standard')
parser.add_argument('-l', '--listknown', action='store_true', help='List known commands')
parser.add_argument('-s', '--getStatus', action='store_true', help='Get Inverter Status')
parser.add_argument('-t', '--getSettings', action='store_true', help='Get Inverter Settings')
parser.add_argument('-R', '--showraw', action='store_true', help='Display the raw results')
parser.add_argument('-p', '--printcrc', action='store_true', help='Display the command and crc and nothing else')
args = parser.parse_args()
# Turn on debug if needed
if(args.enableDebug):
log.setLevel(logging.DEBUG)
# ch.setLevel(logging.DEBUG)
elif(args.enableInfo):
log.setLevel(logging.INFO)
# ch.setLevel(logging.INFO)
log.info('command %s', args.command)
log.info('Serial device used: %s, baud rate: %d', args.device, args.baud)
# mp = mppcommands.mppCommands(args.device, args.baud)
mp = mppUtils(args.device, args.baud, args.model)
if(args.printcrc):
# print("{0:#x}".format(100))
_command = mp.getFullCommand(args.command)
if _command:
print ('{}'.format(_command.byte_command))
else:
[crca, crcb] = mppcommand.crc(args.command) # noqa: F821
print("{0} {1:#x} {2:#x}".format(args.command, crca, crcb))
elif(args.listknown):
for line in mp.getKnownCommands():
print(line)
elif(args.getStatus):
fullStatus = mp.getFullStatus()
print("================ Status ==================")
print("{:<30}\t{:<15} {}".format('Parameter', 'Value', 'Unit'))
for key in sorted(fullStatus):
print("{:<30}\t{:<15} {}".format(key, fullStatus[key]['value'], fullStatus[key]['unit']))
elif(args.getSettings):
settings = mp.getSettings()
print("================ Settings ==================")
print("{:<30}\t{:<10}\t{:<10} {}".format('Parameter', 'Default', 'Current', 'Unit'))
for key in sorted(settings):
print("{:<30}\t{:<10}\t{:<10} {}".format(key, settings[key]['default'],
settings[key]['value'],
settings[key]['unit']))
else:
# TODO: check if command is valid
# maybe check if query or setter and ...
if(args.showraw):
print(mp.getResponse(args.command))
else:
results = mp.getResponseDict(args.command)
for key in sorted(results):
print("{:<30}\t{:<15} {}".format(key, results[key][0], results[key][1]))
|
Python
| 0.000002
|
@@ -2696,34 +2696,16 @@
mmand))%0A
- else:%0A
@@ -2762,20 +2762,16 @@
a: F821%0A
-
|
5d8b2224bf2864ad7e4bacb0624542dec8549b57
|
add mpf-mc entry points in machine test
|
mpf/tests/MpfMachineTestCase.py
|
mpf/tests/MpfMachineTestCase.py
|
from mpf.tests.MpfTestCase import MpfTestCase
class MpfMachineTestCase(MpfTestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
# only disable bcp. everything else should run
self.machine_config_patches = dict()
self.machine_config_patches['bcp'] = []
# increase test expected duration
self.expected_duration = 5.0
def getConfigFile(self):
return "config.yaml"
def getMachinePath(self):
return ""
def getAbsoluteMachinePath(self):
# do not use path relative to MPF folder
return self.getMachinePath()
def get_platform(self):
return 'smart_virtual'
|
Python
| 0
|
@@ -1,8 +1,70 @@
+import inspect%0Afrom mpf.core.machine import MachineController%0A
from mpf
@@ -456,16 +456,701 @@
= 5.0%0A%0A
+ @staticmethod%0A def _load_mc_players(cls):%0A mc_players = %7B%0A %22sound_player%22: %22mpfmc.config_players.sound_player%22,%0A %22widget_player%22: %22mpfmc.config_players.widget_player%22,%0A %22slide_player%22: %22mpfmc.config_players.slide_player%22%0A %7D%0A%0A for name, module in mc_players.items():%0A imported_module = inspect.importlib.import_module(module)%0A setattr(cls, '%7B%7D_player'.format(name),%0A imported_module.player_cls(cls))%0A%0A def setUp(self):%0A MachineController._register_plugin_config_players = self._load_mc_players%0A super().setUp()%0A%0A def get_enable_plugins(self):%0A return True%0A%0A
def
|
04745c9c4074ee44e2cfd7ef5fecae1eb796b109
|
Fix now_utc() to return aware datetime
|
mycroft/util/time.py
|
mycroft/util/time.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
""" Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
""" Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return datetime.utcnow()
def now_local(tz=None):
""" Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
""" Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
""" Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
|
Python
| 0
|
@@ -1581,32 +1581,39 @@
%22%22%22%0A return
+to_utc(
datetime.utcnow(
@@ -1613,16 +1613,17 @@
utcnow()
+)
%0A%0A%0Adef n
|
e4ccfdb49951ed9c4073ba389421d89fea273288
|
make test more robust
|
mpfmc/tests/MpfSlideTestCase.py
|
mpfmc/tests/MpfSlideTestCase.py
|
from mpf.tests.MpfTestCase import MpfTestCase
class MpfSlideTestCase(MpfTestCase):
def assertSlideOnTop(self, slide_name, target="default"):
self.assertEqual(slide_name, self.mc.targets[target].current_slide.name)
def assertTextOnTopSlide(self, text, target="default"):
self.assertTextInSlide(text, self.mc.targets[target].current_slide.name)
def assertTextNotOnTopSlide(self, text, target="default"):
self.assertTextNotInSlide(text, self.mc.targets[target].current_slide.name)
def assertSlideActive(self, slide_name):
self.assertIn(slide_name, self.mc.active_slides, "Slide {} is not active.".format(slide_name))
def assertSlideNotActive(self, slide_name):
self.assertNotIn(slide_name, self.mc.active_slides, "Slide {} is active but should not.".format(slide_name))
def assertTextInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertIn(text, [x.text for x in self.mc.active_slides[slide_name].children[0].children],
"Text {} not found in slide {}.".format(text, slide_name))
def assertTextNotInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertNotIn(text, [x.text for x in self.mc.active_slides[slide_name].children[0].children],
"Text {} found in slide {} but should not be there.".format(text, slide_name))
|
Python
| 0.000329
|
@@ -820,32 +820,345 @@
t(slide_name))%0A%0A
+ def _get_texts_from_slide(self, slide):%0A texts = %5B%5D%0A for children in slide.children:%0A if children.children:%0A texts.extend(self._get_texts_from_slide(children))%0A if hasattr(children, %22text%22):%0A texts.append(children.text)%0A%0A return texts%0A%0A
def assertTe
@@ -1263,33 +1263,43 @@
n(text,
-%5Bx.text for x in
+self._get_texts_from_slide(
self.mc.
@@ -1323,38 +1323,17 @@
de_name%5D
-.children%5B0%5D.children%5D
+)
,%0A
@@ -1540,25 +1540,35 @@
xt,
-%5Bx.text for x in
+self._get_texts_from_slide(
self
@@ -1600,30 +1600,9 @@
ame%5D
-.children%5B0%5D.children%5D
+)
,%0A
|
102f7f844d30d30eff98836a93b3f62e5bc8eb68
|
Fix tests
|
nodeconductor/billing/tests/test_invoices.py
|
nodeconductor/billing/tests/test_invoices.py
|
from mock import patch, Mock
from datetime import datetime
from django.core.management import call_command, CommandError
from django.test import TestCase
from nodeconductor.billing.tasks import create_invoices
from nodeconductor.core.utils import datetime_to_timestamp
from nodeconductor.iaas.tests.factories import CloudProjectMembershipFactory
from nodeconductor.structure.tests.factories import CustomerFactory, ProjectFactory
class CreateInvoicesCommandTest(TestCase):
def setUp(self):
self.customer = CustomerFactory(billing_backend_id='billing_backend_id')
def test_create_invoices_command_fail_with_invalid_month(self):
with self.assertRaisesMessage(CommandError, 'Year and month should be valid values.'):
call_command('createinvoices', '2015', '22')
def test_create_invoices_command_fail_with_invalid_year(self):
with self.assertRaisesMessage(CommandError, 'Year and month should be valid values.'):
call_command('createinvoices', '2015abc', '03')
def test_create_invoices_command_fail_with_more_than_three_arguments(self):
with self.assertRaisesMessage(CommandError, 'Only two or zero arguments can be provided.'):
call_command('createinvoices', '2015', '12', 'invalid')
def test_create_invoices_command_fail_with_customer_without_billing_backend(self):
self.customer.billing_backend_id = ''
self.customer.save()
with self.assertRaisesMessage(CommandError, 'Selected customer does not have billing backend id'):
call_command('createinvoices', customer_uuid=self.customer.uuid.hex)
def test_create_invoices_command_succeeds_without_arguments(self):
with patch('nodeconductor.billing.tasks.create_invoices.delay') as mocked_task:
call_command('createinvoices')
self.assertTrue(mocked_task.called)
def test_create_invoices_command_succeeds_with_one_valid_argument(self):
with patch('nodeconductor.billing.tasks.create_invoices.delay') as mocked_task:
call_command('createinvoices', customer_uuid=self.customer.uuid.hex)
self.assertTrue(mocked_task.called)
def test_create_invoices_command_succeeds_with_two_valid_arguments(self):
with patch('nodeconductor.billing.tasks.create_invoices.delay') as mocked_task:
call_command('createinvoices', '2015', '12')
self.assertTrue(mocked_task.called)
def test_create_invoices_command_succeeds_with_three_valid_arguments(self):
with patch('nodeconductor.billing.tasks.create_invoices.delay') as mocked_task:
call_command('createinvoices', '2015', '12', customer_uuid=self.customer.uuid.hex)
mocked_task.assert_called_once()
@patch('nodeconductor.iaas.backend.openstack.OpenStackBackend')
@patch('nodeconductor.structure.models.BillingBackend')
class CreateInvoicesTaskTest(TestCase):
def setUp(self):
self.customer = CustomerFactory()
project = ProjectFactory(customer=self.customer)
self.cpm = CloudProjectMembershipFactory(project=project)
self.nc_settings = {
'BILLING': {
'openstack': {
'invoice_meters': {
'cpu': ('CPU', 'cpu_hours', 'hours'),
'memory': ('Memory', 'ram_gb', 'GB'),
}
}
}
}
def test_create_invoices_with_invalid_customer_uuid_raises_exception(self, mocked_billing, mocked_openstack):
with patch('nodeconductor.billing.tasks.logger') as mocked_logger:
invalid_uuid = 'abc123'
start_date = datetime(day=1, month=3, year=2015)
end_date = datetime(day=31, month=3, year=2015)
create_invoices(invalid_uuid, start_date, end_date)
mocked_logger.exception.assert_called_with('Customer with uuid %s does not exist.', invalid_uuid)
self.assertFalse(mocked_openstack.called)
self.assertFalse(mocked_billing.called)
@patch('nodeconductor.billing.tasks.generate_usage_pdf')
def test_create_invoices_with_valid_uuid_succeeds(self, mocked_pdf_generator, mocked_billing, mocked_openstack):
mocked_openstack().get_nova_usage = Mock(return_value={
'disk': 1.0,
'memory': 1.0,
'cpu': 1.0,
'servers': 1.0}
)
mocked_billing.api.create_invoice = Mock()
start_date = datetime(day=1, month=3, year=2015)
end_date = datetime(day=31, month=3, year=2015)
with self.settings(NODECONDUCTOR=self.nc_settings):
create_invoices(str(self.customer.uuid.hex), datetime_to_timestamp(start_date),
datetime_to_timestamp(end_date))
self.assertTrue(mocked_openstack().get_nova_usage.called)
self.assertTrue(mocked_billing().api.create_invoice.called)
self.assertTrue(mocked_pdf_generator.called)
class CreateSampleDateTest(TestCase):
def setUp(self):
self.customer = CustomerFactory()
def test_create_sample_billing_data_fails(self):
call_command('createsamplebillingdata')
|
Python
| 0
|
@@ -4415,20 +4415,16 @@
billing.
-api.
create_i
@@ -4887,12 +4887,8 @@
g().
-api.
crea
|
d6c9ac218ca2b4fb3c730b13de6d8079448a9825
|
return current timestamp for chapter up-/download
|
mygpo/api/advanced/episode.py
|
mygpo/api/advanced/episode.py
|
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
#
from mygpo.api.basic_auth import require_valid_user, check_username
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, Http404, HttpResponseNotAllowed
from mygpo.api.httpresponse import JsonResponse
from mygpo.exceptions import ParameterMissing
from django.shortcuts import get_object_or_404
from mygpo.api.sanitizing import sanitize_url
from mygpo.api.models import Device, Podcast, Episode
from mygpo.api.models.episodes import Chapter
from django.utils.translation import ugettext as _
from datetime import datetime, timedelta
from mygpo.log import log
from mygpo.utils import parse_time
import dateutil.parser
from django.views.decorators.csrf import csrf_exempt
try:
#try to import the JSON module (if we are on Python 2.6)
import json
# Python 2.5 seems to have a different json module
if not 'dumps' in dir(json):
raise ImportError
except ImportError:
# No JSON module available - fallback to simplejson (Python < 2.6)
print "No JSON module available - fallback to simplejson (Python < 2.6)"
import simplejson as json
@csrf_exempt
@require_valid_user
@check_username
def chapters(request, username):
if request.method == 'POST':
req = json.loads(request.raw_post_data)
if not 'podcast' in req:
return HttpResponseBadRequest('Podcast URL missing')
if not 'episode' in req:
return HttpResponseBadRequest('Episode URL missing')
podcast_url = req.get('podcast', '')
episode_url = req.get('episode', '')
update_urls = []
# podcast sanitizing
s_podcast_url = sanitize_url(podcast_url)
if s_podcast_url != podcast_url:
req['podcast'] = s_podcast_url
update_urls.append((podcast_url, s_podcast_url))
# episode sanitizing
s_episode_url = sanitize_url(episode_url, podcast=False, episode=True)
if s_episode_url != episode_url:
req['episode'] = s_episode_url
update_urls.append((episode_url, s_episode_url))
if (s_podcast_url != '') and (s_episode_url != ''):
try:
update_chapters(req, request.user)
except ParameterMissing, e:
return HttpResponseBadRequest(e)
return JsonResponse({'update_url': update_url})
elif request.method == 'GET':
if not 'podcast' in request.GET:
return HttpResponseBadRequest('podcast URL missing')
if not 'episode' in request.GET:
return HttpResponseBadRequest('Episode URL missing')
podcast_url = request.GET['podcast']
episode_url = request.GET['episode']
since_ = request.GET.get('since'], None)
since = datetime.fromtimestamp(float(since_)) if since_ else None
podcast = Podcast.objects.get(url=sanitize_url(podcast_url))
episode = Episode.objects.get(url=sanitize_url(episode_url, podcast=False, episode=True), podcast=podcast)
chapter_q = Chapter.objects.filter(user=request.user, episode=episode).order_by('start')
if since:
chapter_q = chapter_q.filter(timestamp__gt=since)
chapters = []
for c in chapter_q:
chapters.append({
'start': c.start,
'end': c.end,
'label': c.label,
'advertisement': c.advertisement,
'timestamp': c.created,
'device': c.device.uid
})
return JsonResponse(chapters)
else:
return HttpResponseNotAllowed(['GET', 'POST'])
def update_chapters(req, user):
podcast, c = Podcast.objects.get_or_create(url=req['podcast'])
episode, c = Episode.objects.get_or_create(url=req['episode'], podcast=podcast)
device = None
if 'device' in req:
device, c = Device.objects.get_or_create(user=user, uid=req['device'], defaults = {'type': 'other', 'name': _('New Device')})
timestamp = dateutil.parser.parse(req['timestamp']) if 'timestamp' in req else datetime.now()
for c in req.get('chapters_add', []):
if not 'start' in c:
raise ParameterMissing('start parameter missing')
start = parse_time(c['start'])
if not 'end' in c:
raise ParameterMissing('end parameter missing')
end = parse_time(c['end'])
label = c.get('label', '')
adv = c.get('advertisement', False)
Chapter.objects.create(
user=user,
episode=episode,
device=device,
created=timestamp,
start=start,
end=end,
label=label,
advertisement=adv)
for c in req.get('chapters_remove', []):
if not 'start' in c:
raise ParameterMissing('start parameter missing')
start = parse_time(c['start'])
if not 'end' in c:
raise ParameterMissing('end parameter missing')
end = parse_time(c['end'])
Chapter.objects.filter(
user=user,
episode=episode,
start=start,
end=end).delete()
|
Python
| 0.003125
|
@@ -1880,16 +1880,83 @@
rname):%0A
+%0A now = datetime.now()%0A now_ = int(mktime(now.timetuple()))%0A%0A
if r
@@ -3069,16 +3069,29 @@
sponse(%7B
+%0A
'update_
@@ -3106,16 +3106,60 @@
date_url
+,%0A 'timestamp': now_%0A
%7D)%0A%0A
@@ -4337,24 +4337,95 @@
esponse(
-chapters
+%7B%0A 'chapters': chapters,%0A 'timestamp': now_%0A %7D
)%0A%0A e
|
6ad60176892df0eabb7faf96277c792c742fc9f0
|
simplify some codes in _build_request
|
mechanicalsoup/browser.py
|
mechanicalsoup/browser.py
|
import requests
import bs4
from six.moves import urllib
from six import string_types
from .form import Form
class Browser:
def __init__(self, session=None, soup_config=None):
self.session = session or requests.Session()
self.soup_config = soup_config or dict()
@staticmethod
def add_soup(response, soup_config):
if "text/html" in response.headers.get("Content-Type", ""):
response.soup = bs4.BeautifulSoup(
response.content, **soup_config)
def request(self, *args, **kwargs):
response = self.session.request(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def get(self, *args, **kwargs):
response = self.session.get(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def post(self, *args, **kwargs):
response = self.session.post(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def _build_request(self, form, url=None, **kwargs):
method = form["method"]
action = form["action"]
url = urllib.parse.urljoin(url, action)
# read http://www.w3.org/TR/html5/forms.html
data = kwargs.get("data") or dict()
files = kwargs.get("files") or dict()
for input in form.select("input"):
name = input.get("name")
# web browsers use empty string for inputs with missing values
value = input.get("value", "")
if not name:
continue
if input.get("type") in ("radio", "checkbox") and "checked" not in input.attrs:
continue
if input.get("type") == "checkbox":
if not name in data:
data[name] = list()
data[name].append(value)
elif input.get("type") == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in web browsers, file upload only happens if the form"s (or
# submit button"s) enctype attribute is set to
# "multipart/form-data". we don"t care, simplify.
if not value:
continue
if isinstance(value, string_types):
value = open(value, "rb")
files[name] = value
else:
data[name] = value
for textarea in form.select("textarea"):
name = textarea.get("name")
if not name:
continue
data[name] = textarea.text
for select in form.select("select"):
name = select.get("name")
if not name:
continue
for i, option in enumerate(select.select("option")):
if i == 0 or "selected" in option.attrs:
data[name] = option.get("value", "")
return requests.Request(method, url, data=data, files=files, **kwargs)
def _prepare_request(self, form, url=None, **kwargs):
request = self._build_request(form, url, **kwargs)
return self.session.prepare_request(request)
def submit(self, form, url=None, **kwargs):
if isinstance(form, Form):
form = form.form
request = self._prepare_request(form, url, **kwargs)
response = self.session.send(request)
Browser.add_soup(response, self.soup_config)
return response
|
Python
| 0.000117
|
@@ -1632,16 +1632,38 @@
eckbox%22)
+ %5C%0A
and %22ch
@@ -1783,95 +1783,33 @@
-if not name in data:%0A data%5Bname%5D = list()%0A data%5Bname%5D
+data.setdefault(name, %5B%5D)
.app
|
5e6bbccc844c64628f5dcd2c9ca19d6f00f1b795
|
Fix tabs vs spaces from mergetool
|
mythril/laser/ethereum/svm.py
|
mythril/laser/ethereum/svm.py
|
from z3 import BitVec
import logging
from mythril.laser.ethereum.state import GlobalState, Environment, CalldataType, Account
from mythril.laser.ethereum.instructions import Instruction
from mythril.laser.ethereum.cfg import NodeFlags, Node, Edge, JumpType
from mythril.laser.ethereum.strategy.basic import DepthFirstSearchStrategy
from datetime import datetime, timedelta
from functools import reduce
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
class SVMError(Exception):
pass
'''
Main symbolic execution engine.
'''
class LaserEVM:
"""
Laser EVM class
"""
def __init__(self, accounts, dynamic_loader=None, max_depth=float('inf'), execution_timeout=60, strategy=DepthFirstSearchStrategy):
self.instructions_covered = []
self.accounts = accounts
self.nodes = {}
self.edges = []
self.total_states = 0
self.dynamic_loader = dynamic_loader
self.work_list = []
self.strategy = strategy(self.work_list, max_depth)
self.max_depth = max_depth
self.execution_timeout = execution_timeout
self.time = None
self.pre_hooks = {}
self.post_hooks = {}
logging.info("LASER EVM initialized with dynamic loader: " + str(dynamic_loader))
def sym_exec(self, main_address):
logging.debug("Starting LASER execution")
self.time = datetime.now()
# Initialize the execution environment
environment = Environment(
self.accounts[main_address],
BitVec("caller", 256),
[],
BitVec("gasprice", 256),
BitVec("callvalue", 256),
BitVec("origin", 256),
calldata_type=CalldataType.SYMBOLIC,
)
self.instructions_covered = [False for _ in environment.code.instruction_list]
initial_node = Node(environment.active_account.contract_name)
self.nodes[initial_node.uid] = initial_node
global_state = GlobalState(self.accounts, environment, initial_node)
initial_node.states.append(global_state)
# Empty the work_list before starting an execution
self.work_list.append(global_state)
self._sym_exec()
logging.info("Execution complete")
logging.info("Achieved {0:.3g}% coverage".format(self.coverage))
logging.info("%d nodes, %d edges, %d total states", len(self.nodes), len(self.edges), self.total_states)
def _sym_exec(self):
for global_state in self.strategy:
if self.execution_timeout:
if self.time + timedelta(seconds=self.execution_timeout) <= datetime.now():
return
try:
new_states, op_code = self.execute_state(global_state)
except NotImplementedError:
logging.debug("Encountered unimplemented instruction")
continue
self.manage_cfg(op_code, new_states)
self.work_list += new_states
self.total_states += len(new_states)
def execute_state(self, global_state):
instructions = global_state.environment.code.instruction_list
op_code = instructions[global_state.mstate.pc]['opcode']
self.instructions_covered[global_state.mstate.pc] = True
self._execute_pre_hook(op_code, global_state)
new_global_states = Instruction(op_code, self.dynamic_loader).evaluate(global_state)
self._execute_post_hook(op_code, new_global_states)
return new_global_states, op_code
def manage_cfg(self, opcode, new_states):
if opcode == "JUMP":
assert len(new_states) <= 1
for state in new_states:
self._new_node_state(state)
elif opcode == "JUMPI":
for state in new_states:
self._new_node_state(state, JumpType.CONDITIONAL, state.mstate.constraints[-1])
elif opcode in ("CALL", 'CALLCODE', 'DELEGATECALL', 'STATICCALL'):
assert len(new_states) <= 1
for state in new_states:
self._new_node_state(state, JumpType.CALL)
elif opcode == "RETURN":
for state in new_states:
self._new_node_state(state, JumpType.RETURN)
for state in new_states:
state.node.states.append(state)
def _new_node_state(self, state, edge_type=JumpType.UNCONDITIONAL, condition=None):
new_node = Node(state.environment.active_account.contract_name)
old_node = state.node
state.node = new_node
new_node.constraints = state.mstate.constraints
self.nodes[new_node.uid] = new_node
self.edges.append(Edge(old_node.uid, new_node.uid, edge_type=edge_type, condition=condition))
if edge_type == JumpType.RETURN:
new_node.flags |= NodeFlags.CALL_RETURN
elif edge_type == JumpType.CALL:
if 'retval' in str(state.mstate.stack[-1]):
new_node.flags |= NodeFlags.CALL_RETURN
else:
new_node.flags |= NodeFlags.FUNC_ENTRY
address = state.environment.code.instruction_list[state.mstate.pc - 1]['address']
environment = state.environment
disassembly = environment.code
if address in state.environment.code.addr_to_func:
# Enter a new function
environment.active_function_name = disassembly.addr_to_func[address]
new_node.flags |= NodeFlags.FUNC_ENTRY
logging.info("- Entering function " + environment.active_account.contract_name + ":" + new_node.function_name)
elif address == 0:
environment.active_function_name = "fallback"
new_node.function_name = environment.active_function_name
@property
def coverage(self):
return reduce(lambda sum_, val: sum_ + 1 if val else sum_, self.instructions_covered) / float(
len(self.instructions_covered)) * 100
def _execute_pre_hook(self, op_code, global_state):
if op_code not in self.pre_hooks.keys():
return
for hook in self.pre_hooks[op_code]:
hook(global_state)
def _execute_post_hook(self, op_code, global_states):
if op_code not in self.post_hooks.keys():
return
for hook in self.post_hooks[op_code]:
for global_state in global_states:
hook(global_state)
def hook(self, op_code):
def hook_decorator(function):
if op_code not in self.pre_hooks.keys():
self.pre_hooks[op_code] = []
self.pre_hooks[op_code].append(function)
return function
return hook_decorator
def post_hook(self, op_code):
def hook_decorator(function):
if op_code not in self.post_hooks.keys():
self.post_hooks[op_code] = []
self.post_hooks[op_code].append(function)
return function
return hook_decorator
|
Python
| 0
|
@@ -566,24 +566,25 @@
ass%0A %22%22%22%0A
+%0A
def __in
@@ -670,16 +670,33 @@
eout=60,
+%0A
strateg
@@ -1100,9 +1100,16 @@
out%0A
-%09
+
self
@@ -5137,24 +5137,16 @@
dress'%5D%0A
-
%0A
@@ -5470,16 +5470,33 @@
ng.info(
+%0A
%22- Enter
@@ -6626,32 +6626,33 @@
return function%0A
+%0A
return h
@@ -6916,24 +6916,25 @@
rn function%0A
+%0A
retu
|
8a006ecff95e7699a4ca65f2af5ff566648c3a0d
|
Add norhh suggestion #1
|
mythril/analysis/modules/dos.py
|
mythril/analysis/modules/dos.py
|
"""This module contains the detection code SWC-128 - DOS with block gas limit."""
import logging
from typing import Dict, cast, List
from mythril.analysis.swc_data import DOS_WITH_BLOCK_GAS_LIMIT
from mythril.analysis.report import Issue
from mythril.analysis.modules.base import DetectionModule
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.annotation import StateAnnotation
from mythril.laser.ethereum import util
log = logging.getLogger(__name__)
class LoopAnnotation(StateAnnotation):
def __init__(self, loop_start: int, loop_end: int) -> None:
self.loop_start = loop_start
self.loop_end = loop_end
def contains(self, address: int) -> bool:
return self.loop_start < address < self.loop_end
class DOS(DetectionModule):
"""This module consists of a makeshift loop detector that annotates the state with
a list of byte ranges likely to be loops. If a CALL or SSTORE detection is found in
one of the ranges it creates a low-severity issue. This is not super precise but
good enough to identify places that warrant a closer look. Checking the loop condition
would be a possible improvement.
"""
def __init__(self) -> None:
""""""
super().__init__(
name="DOS",
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
description="Check for DOS",
entrypoint="callback",
pre_hooks=["JUMPI", "CALL", "SSTORE"],
)
"""Keeps track of how often jump destinations are reached."""
self._jumpdest_count = {} # type: Dict[object, dict]
def _execute(self, state: GlobalState) -> None:
"""
:param state:
:return:
"""
self._issues.extend(self._analyze_states(state))
def _analyze_states(self, state: GlobalState) -> List[Issue]:
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
opcode = state.get_current_instruction()["opcode"]
address = state.get_current_instruction()["address"]
if opcode == "JUMPI":
target = util.get_concrete_int(state.mstate.stack[-1])
transaction = state.current_transaction
if state.current_transaction in self._jumpdest_count:
try:
self._jumpdest_count[transaction][target] += 1
if self._jumpdest_count[transaction][target] == 3:
annotation = (
LoopAnnotation(address, target)
if target > address
else LoopAnnotation(target, address)
)
state.annotate(annotation)
except KeyError:
self._jumpdest_count[transaction][target] = 0
else:
self._jumpdest_count[transaction] = {}
self._jumpdest_count[transaction][target] = 0
else:
annotations = cast(
List[LoopAnnotation], list(state.get_annotations(LoopAnnotation))
)
for annotation in annotations:
if annotation.contains(address):
operation = (
"A storage modification"
if opcode == "SSTORE"
else "An external call"
)
description_head = (
"Potential denial-of-service if block gas limit is reached."
)
description_tail = "{} is executed in a loop.".format(operation)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=annotation.loop_start,
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
bytecode=state.environment.code.bytecode,
title="Potential denial-of-service if block gas limit is reached",
severity="Low",
description_head=description_head,
description_tail=description_tail,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
)
return [issue]
return []
detector = DOS()
|
Python
| 0
|
@@ -191,16 +191,82 @@
S_LIMIT%0A
+from mythril.laser.ethereum.strategy.custom import JUMPDEST_LIMIT%0A
from myt
@@ -2556,9 +2556,26 @@
==
-3
+JUMPDEST_LIMIT - 1
:%0A%0A
|
3207e36984e845496dc910ad32e4f5e3ba628836
|
Fix PEP8 issues.
|
doc/source/conf.py
|
doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Swiftclient documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 17 02:17:37 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
import swiftclient
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swiftclient'
copyright = u'2012 OpenStack, LLC.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from swiftclient.version import version_info as swiftclient_version
# The full version, including alpha/beta/rc tags.
release = swiftclient_version.version_string()
# The short X.Y version.
version = swiftclient_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SwiftClientwebdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'SwiftClient.tex', u'SwiftClient Documentation',
u'OpenStack, LLC.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
Python
| 0.000009
|
@@ -427,17 +427,23 @@
port sys
-,
+%0Aimport
os%0A%0A# I
|
be4d150accd90ec856513576664c90d85d5012c7
|
add unit to ProcessTypeResourceType admin list_display
|
valuenetwork/valueaccounting/admin.py
|
valuenetwork/valueaccounting/admin.py
|
from django.contrib import admin
from valuenetwork.valueaccounting.models import *
from valuenetwork.valueaccounting.actions import export_as_csv
admin.site.add_action(export_as_csv, 'export_selected objects')
admin.site.register(Unit)
admin.site.register(AgentType)
#admin.site.register(Stage)
class HelpAdmin(admin.ModelAdmin):
list_display = ('page',)
admin.site.register(Help, HelpAdmin)
class FacetValueInline(admin.TabularInline):
model = FacetValue
class FacetAdmin(admin.ModelAdmin):
list_display = ('name', 'value_list')
inlines = [ FacetValueInline, ]
admin.site.register(Facet, FacetAdmin)
class PatternFacetInline(admin.TabularInline):
model = PatternFacetValue
fields = ('event_type', 'facet_value')
class PatternUseCaseInline(admin.TabularInline):
model = PatternUseCase
extra = 1
class ProcessPatternAdmin(admin.ModelAdmin):
list_display = ('name',)
inlines = [ PatternFacetInline, PatternUseCaseInline]
admin.site.register(ProcessPattern, ProcessPatternAdmin)
class ResourceRelationshipAdmin(admin.ModelAdmin):
list_display = ('name', 'inverse_name', 'related_to', 'direction', 'materiality', 'event_type', 'unit')
list_filter = ['materiality', 'related_to', 'direction']
list_editable = ['event_type',]
admin.site.register(ResourceRelationship, ResourceRelationshipAdmin)
class AgentUserInline(admin.TabularInline):
model = AgentUser
class EconomicAgentAdmin(admin.ModelAdmin):
list_display = ('nick', 'name', 'agent_type', 'url', 'address', 'email', 'created_date')
list_filter = ['agent_type',]
search_fields = ['name', 'address']
inlines = [ AgentUserInline, ]
admin.site.register(EconomicAgent, EconomicAgentAdmin)
class ResourceTypeFacetInline(admin.TabularInline):
model = ResourceTypeFacetValue
class EconomicResourceTypeAdmin(admin.ModelAdmin):
list_display = ('label', 'name', 'category', 'rate', 'unit', 'unit_of_use', 'description', 'facet_list')
list_filter = ['facets__facet_value']
search_fields = ['name',]
list_editable = ['unit', 'unit_of_use']
inlines = [ ResourceTypeFacetInline, ]
admin.site.register(EconomicResourceType, EconomicResourceTypeAdmin)
class AgentResourceTypeAdmin(admin.ModelAdmin):
list_display = ('agent', 'resource_type', 'score', 'event_type', 'relationship')
list_filter = ['event_type', 'agent', 'resource_type']
admin.site.register(AgentResourceType, AgentResourceTypeAdmin)
class ProcessTypeResourceTypeAdmin(admin.ModelAdmin):
list_display = ('process_type', 'resource_type', 'event_type')
list_filter = ['event_type', 'process_type', 'resource_type']
search_fields = ['process_type__name','resource_type__name',]
admin.site.register(ProcessTypeResourceType, ProcessTypeResourceTypeAdmin)
class ProcessTypeResourceTypeInline(admin.TabularInline):
model = ProcessTypeResourceType
class ProcessTypeAdmin(admin.ModelAdmin):
list_display = ('name', 'project' )
list_filter = ['project',]
search_fields = ['name',]
inlines = [ ProcessTypeResourceTypeInline, ]
admin.site.register(ProcessType, ProcessTypeAdmin)
class EventTypeAdmin(admin.ModelAdmin):
list_display = ('name', 'label', 'inverse_label', 'related_to', 'relationship', 'resource_effect', 'unit_type' )
list_filter = ['resource_effect', 'related_to', 'relationship',]
list_editable = ['label', 'inverse_label', 'related_to', 'relationship']
admin.site.register(EventType, EventTypeAdmin)
class EconomicResourceAdmin(admin.ModelAdmin):
list_display = ('id', 'identifier', 'resource_type', 'quantity', 'unit_of_quantity', 'quality', 'notes', 'owner', 'custodian')
list_filter = ['resource_type', 'author', 'owner']
search_fields = ['identifier', 'resource_type__name']
date_hierarchy = 'created_date'
admin.site.register(EconomicResource, EconomicResourceAdmin)
class CommitmentInline(admin.TabularInline):
model = Commitment
class OrderItemInline(admin.TabularInline):
model = Commitment
fk_name = 'order'
fields = ('event_type', 'due_date', 'resource_type', 'quantity', 'unit_of_quantity', 'process')
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'provider', 'receiver', 'description','due_date' )
inlines = [ OrderItemInline, ]
admin.site.register(Order, OrderAdmin)
class ProcessAdmin(admin.ModelAdmin):
date_hierarchy = 'start_date'
list_display = ('name', 'start_date', 'end_date', 'finished', 'process_type', 'project', 'owner', 'managed_by')
list_filter = ['process_type', 'finished', 'owner', 'managed_by']
search_fields = ['name', 'process_type__name', 'owner__name', 'managed_by__name']
inlines = [ CommitmentInline, ]
admin.site.register(Process, ProcessAdmin)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'parent')
list_filter = ['parent',]
search_fields = ['name',]
admin.site.register(Project, ProjectAdmin)
class CommitmentAdmin(admin.ModelAdmin):
date_hierarchy = 'due_date'
list_display = ('resource_type', 'quantity', 'unit_of_quantity', 'event_type', 'due_date', 'from_agent', 'process', 'project', 'order', 'independent_demand',
'description', 'quality')
list_filter = ['independent_demand', 'event_type', 'resource_type', 'from_agent', 'project']
search_fields = ['event_type__name', 'from_agent__name', 'to_agent__name', 'resource_type__name']
admin.site.register(Commitment, CommitmentAdmin)
class EconomicEventAdmin(admin.ModelAdmin):
date_hierarchy = 'event_date'
list_display = ('event_type', 'event_date', 'from_agent', 'project',
'resource_type', 'quantity', 'unit_of_quantity', 'description', 'url', 'quality')
list_filter = ['event_type', 'project', 'resource_type', 'from_agent', ]
search_fields = ['event_type__name', 'from_agent__name', 'to_agent__name', 'resource_type__name']
list_editable = ['event_date',]
admin.site.register(EconomicEvent, EconomicEventAdmin)
class CompensationAdmin(admin.ModelAdmin):
list_display = ('initiating_event', 'compensating_event', 'compensation_date', 'compensating_value')
search_fields = ['initiating_event__from_agent__name', 'initiating_event__to_agent__name']
admin.site.register(Compensation, CompensationAdmin)
|
Python
| 0
|
@@ -2603,16 +2603,24 @@
nt_type'
+, 'unit'
)%0A li
|
39a23d06cc09a9dbf0802740aaca8854bfd64b04
|
Add check for directory access rights in LocalStorage
|
onitu/drivers/local_storage/local_storage.py
|
onitu/drivers/local_storage/local_storage.py
|
from path import path
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from onitu.api import Plug
plug = Plug()
# Ignore the next Watchdog event concerning those files
events_to_ignore = set()
# Store the mtime of the last write of each transfered file
last_mtime = {}
root = None
@plug.handler()
def get_chunk(filename, offset, size):
filename = root.joinpath(filename)
try:
with open(filename, 'rb') as f:
f.seek(offset)
return f.read(size)
except IOError as e:
plug.logger.warn("Error getting file `{}`: {}", filename, e)
@plug.handler()
def start_upload(metadata):
filename = root.joinpath(metadata.filename)
# We ignore the next Watchdog events concerning this file
events_to_ignore.add(metadata.filename)
try:
if not filename.exists():
filename.dirname().makedirs_p()
filename.open('wb').close()
except IOError as e:
plug.logger.warn("Error creating file `{}`: {}", filename, e)
@plug.handler()
def end_upload(metadata):
filename = root.joinpath(metadata.filename)
# this is to make sure that no further event concerning
# this set of writes will be propagated to the Referee
last_mtime[metadata.filename] = filename.mtime
metadata.revision = filename.mtime
metadata.write_revision()
if metadata.filename in events_to_ignore:
events_to_ignore.remove(metadata.filename)
@plug.handler()
def upload_chunk(filename, offset, chunk):
abs_path = root.joinpath(filename)
# We make sure events are ignored for this file
events_to_ignore.add(filename)
try:
# We should not append the file but seek to the right
# position.
# However, the behavior of `offset` isn't well defined
with open(abs_path, 'ab') as f:
f.write(chunk)
except IOError as e:
plug.logger.warn("Error writting file `{}`: {}", filename, e)
def check_changes():
for abs_path in root.walkfiles():
filename = abs_path.relpath(root).normpath()
metadata = plug.get_metadata(filename)
revision = metadata.revision
revision = float(revision) if revision else .0
if abs_path.mtime > revision:
update_file(metadata, abs_path)
def update_file(metadata, path):
if metadata.filename in events_to_ignore:
return
if metadata.filename in last_mtime:
if last_mtime[metadata.filename] >= path.mtime:
# We're about to send an event for a file that hasn't changed
# since the last upload, we stop here
return
else:
del last_mtime[metadata.filename]
metadata.size = path.size
metadata.revision = path.mtime
plug.update_file(metadata)
class EventHandler(FileSystemEventHandler):
def on_moved(self, event):
def handle_move(event):
if event.is_directory:
return
#if event.src_path:
#self._handle_deletion(event.src_path.decode())
self._handle_update(event.dest_path.decode())
handle_move(event)
if event.is_directory:
for subevent in event.sub_moved_events():
handle_move(subevent)
def on_modified(self, event):
if event.is_directory:
return
self._handle_update(event.src_path.decode())
def _handle_update(self, abs_path):
abs_path = path(abs_path)
filename = root.relpathto(abs_path)
metadata = plug.get_metadata(filename)
update_file(metadata, abs_path)
def start(*args, **kwargs):
plug.start(*args, **kwargs)
global root
root = path(plug.options['root'])
observer = Observer()
observer.schedule(EventHandler(), path=root, recursive=True)
observer.start()
check_changes()
plug.wait()
|
Python
| 0
|
@@ -1,12 +1,23 @@
+import os%0A%0A
from path im
@@ -3753,16 +3753,139 @@
oot'%5D)%0A%0A
+ if not root.access(os.W_OK %7C os.R_OK):%0A plug.logger.error(%22Can't access directory %60%7B%7D%60.%22, root)%0A return%0A%0A
obse
|
2d95b9a4b6d87e9f630c59995403988dee390c20
|
Fix simple typo: utilty -> utility (#5182)
|
doc/sphinx_util.py
|
doc/sphinx_util.py
|
# -*- coding: utf-8 -*-
"""Helper utilty function for customization."""
import sys
import os
import docutils
import subprocess
READTHEDOCS_BUILD = (os.environ.get('READTHEDOCS', None) is not None)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
|
Python
| 0.999997
|
@@ -31,16 +31,17 @@
per util
+i
ty funct
|
d806cc19e058ad63c6be47d8e616b0c869549db7
|
FIX remote does not have test file wired...
|
sklearn/decomposition/tests/test_spectra_embedding.py
|
sklearn/decomposition/tests/test_spectra_embedding.py
|
import numpy as np
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn.decomposition.spectra_embedding import SpectralEmbedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.pipeline import Pipeline
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans, SpectralClustering
S = np.array([[1, 5, 2, 1, 0, 0, 0],
[5, 1, 3, 1, 0, 0, 0],
[2, 3, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 2, 1, 1],
[0, 0, 0, 2, 2, 3, 2],
[0, 0, 0, 1, 3, 1, 4],
[0, 0, 0, 1, 2, 4, 1],
])
def test_spectra_embedding_precomputed_graph(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=3, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
se_knn = SpectralEmbedding(n_components=3, affinity="nearest_neighbors",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
embed_knn = se_knn.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(np.abs(embed_precomp), np.abs(embed_rbf), 0)
def test_spectra_embedding_knn_graph(seed=36):
"""Test spectral embedding with knn graph"""
def test_spectra_embedding_callable_graph(seed=36):
"""Test spectral embedding with knn graph"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=3,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
def test_pipline_spectra_clustering():
"""Test using pipline to do spectral clustering"""
spectral_clustering = Pipeline([
('se', SpectralEmbedding()),
('km', KMeans()),
])
for n_cluster in range(1, 5):
n_cluster = 3
spectral_clustering.set_params(km__n_clusters=n_cluster)
spectral_clustering.set_params(se__n_components=n_cluster)
spectral_clustering.set_params(se__gamma=1.0)
spectral_clustering.fit(S)
SC = SpectralClustering(n_clusters=n_cluster)
SC.fit(S)
assert_array_almost_equal(
normalized_mutual_info_score(
spectral_clustering.steps[1][1].labels_,
SC.labels_), 0.0, 0)
|
Python
| 0
|
@@ -1,24 +1,4 @@
-import numpy as np%0A%0A
from
@@ -101,16 +101,35 @@
_matrix%0A
+import numpy as np%0A
from num
|
8784162eb60cd23bbbe669c698e9406d43c1a7ff
|
Explicitly set allow_empty = True
|
nextcloudappstore/core/views.py
|
nextcloudappstore/core/views.py
|
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from nextcloudappstore.core.models import App, Category
from django.http import Http404
from django.db.models import Q
class AppDetailView(DetailView):
model = App
template_name = 'app/detail.html'
slug_field = 'id'
slug_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
return context
class CategoryAppListView(ListView):
model = App
template_name = 'app/list.html'
def get_queryset(self):
category_id = self.kwargs['id']
queryset = super().get_queryset()
if category_id:
queryset = queryset.filter(categories__id=category_id)
if self.has_search_terms():
query = None
for term in self.get_search_terms():
q = Q(translations__name__contains=term) | \
Q(translations__description__contains=term)
if query is None:
query = q
else:
query = query | q
queryset = queryset.filter(query)
# Remove duplicates that for some reason sometimes occur
queryset = list(set(queryset))
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
category_id = self.kwargs['id']
if category_id:
context['current_category'] = Category.objects.get(id=category_id)
if self.has_search_terms():
context['search_query'] = self.request.GET['search']
context['search_terms'] = self.get_search_terms()
return context
def has_search_terms(self):
return ('search' in self.request.GET) \
and self.request.GET['search'].strip()
def get_search_terms(self):
return self.request.GET['search'].strip().split()
|
Python
| 0.999992
|
@@ -611,16 +611,39 @@
st.html'
+%0A allow_empty = True
%0A%0A de
|
22f9ff98e048f47493394570b519d179657d9427
|
Add `--host/port` options
|
skylines/commands/tracking/generate_through_daemon.py
|
skylines/commands/tracking/generate_through_daemon.py
|
from __future__ import print_function
from flask_script import Command, Option
import sys
import socket
import struct
from skylines.model import User
from skylines.tracking.server import (
datetime,
FLAG_LOCATION,
FLAG_ALTITUDE,
TrackingFix,
MAGIC,
TYPE_FIX,
set_crc,
)
from math import sin
from random import randint
from time import sleep
class GenerateThroughDaemon(Command):
""" Generate fake live tracks for debugging on daemon """
UDP_IP = "127.0.0.1"
UDP_PORT = 5597
ADDRESS = (UDP_IP, UDP_PORT)
option_list = (Option("user_id", type=int, help="a user ID"),)
def run(self, user_id):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
user = User.get(user_id)
if not user:
print('User with id "{}" not found.'.format(user_id))
sys.exit(1)
start_time = datetime.utcnow()
i = randint(0, 100)
_time = (
start_time.hour * 60 * 60 * 1000
+ start_time.minute * 60 * 1000
+ start_time.second * 1000
)
_longitude = randint(6500, 7500) / 1000.0
_latitude = randint(50500, 51500) / 1000.0
_altitude = 500
while True:
longitude = sin(i / 73.0) * 0.001 + _longitude
latitude = sin(i / 50.0) * 0.004 + _latitude
altitude = sin(i / 20.0) * 300 + _altitude
flags = FLAG_LOCATION | FLAG_ALTITUDE
fix = TrackingFix()
fix.pilot_id = user.id
fix.set_location(longitude, latitude)
fix.altitude = altitude
data = struct.pack(
"!IHHQIIiiIHHHhhH",
MAGIC,
0,
TYPE_FIX,
user.tracking_key,
flags,
_time,
int(latitude * 1000000),
int(longitude * 1000000),
0,
0,
0,
0,
int(altitude),
0,
0,
)
data = set_crc(data)
sock.sendto(data, self.ADDRESS)
print(".", end="")
sys.stdout.flush()
sleep(1)
i += 1
_time += 1000
|
Python
| 0.000002
|
@@ -475,102 +475,139 @@
-UDP_IP = %22127.0.0.1%22%0A UDP_PORT =
+option_list = (%0A Option(%22--host%22, type=str, default=%22127.0.0.1%22),%0A Option(%22--port%22, type=int, default=
5597
+),
%0A
-ADDRESS = (UDP_IP, UDP_PORT)%0A%0A option_list = (
+
Opti
@@ -648,16 +648,21 @@
er ID%22),
+%0A
)%0A%0A d
@@ -681,16 +681,26 @@
user_id
+, **kwargs
):%0A
@@ -2176,20 +2176,48 @@
ta,
-self.ADDRESS
+(kwargs.get(%22host%22), kwargs.get(%22port%22))
)%0A%0A
|
62f3a1ce0e2af511e897ac300e3ab32f4bf14463
|
Fix docs
|
src/pybel/struct/filters/node_predicates/modifications.py
|
src/pybel/struct/filters/node_predicates/modifications.py
|
# -*- coding: utf-8 -*-
"""Predicates for checking nodes' variants."""
from typing import Tuple, Type, Union
from .utils import node_predicate
from ..typing import NodePredicate
from ....dsl import BaseEntity, CentralDogma, Fragment, GeneModification, Hgvs, ProteinModification, Variant
__all__ = [
'has_variant',
'has_protein_modification',
'has_gene_modification',
'has_fragment',
'has_hgvs',
]
@node_predicate
def has_variant(node: BaseEntity) -> bool:
"""Return true if the node has any variants."""
return isinstance(node, CentralDogma) and node.variants
def _variant_checker(variant_cls: Union[Type[Variant], Tuple[Type[Variant], ...]]) -> NodePredicate:
@node_predicate
def _node_has_variant(node: BaseEntity) -> bool:
"""Return true if the node has at least one of the given variant."""
return isinstance(node, CentralDogma) and node.variants and any(
isinstance(variant, variant_cls)
for variant in node.variants
)
return _node_has_variant
has_protein_modification = _variant_checker(ProteinModification)
has_gene_modification = _variant_checker(GeneModification)
has_hgvs = _variant_checker(Hgvs)
has_fragment = _variant_checker(Fragment)
|
Python
| 0.000003
|
@@ -66,16 +66,44 @@
ts.%22%22%22%0A%0A
+from functools import wraps%0A
from typ
@@ -742,21 +742,145 @@
ate%0A
+@wraps(node_has_variant)%0A def _rv(node: BaseEntity):%0A return node_has_variant(node, variant_cls)%0A%0A return _rv%0A%0A%0A
def
-_
node_has
@@ -896,32 +896,45 @@
node: BaseEntity
+, variant_cls
) -%3E bool:%0A
@@ -928,20 +928,16 @@
%3E bool:%0A
-
%22%22%22R
@@ -1001,20 +1001,16 @@
ant.%22%22%22%0A
-
retu
@@ -1078,20 +1078,16 @@
-
isinstan
@@ -1111,20 +1111,16 @@
nt_cls)%0A
-
@@ -1156,43 +1156,9 @@
- )%0A%0A return _node_has_variant
+)
%0A%0A%0Ah
|
65b658d9bb1b9220cfd15724692517c14f5e2cbc
|
Send more information
|
openprescribing/frontend/signals/handlers.py
|
openprescribing/frontend/signals/handlers.py
|
import logging
from allauth.account.signals import user_logged_in
from anymail.signals import tracking
from requests_futures.sessions import FuturesSession
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from common.utils import google_user_id
from frontend.models import Profile
logger = logging.getLogger(__name__)
@receiver(post_save, sender=User)
def handle_user_save(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(user_logged_in, sender=User)
def handle_user_logged_in(sender, request, user, **kwargs):
user.searchbookmark_set.update(approved=True)
user.orgbookmark_set.update(approved=True)
def send_ga_event(event):
user = User.objects.filter(email=event.recipient)
if user:
user = user[0]
session = FuturesSession()
payload = {
'v': 1,
'tid': settings.GOOGLE_TRACKING_ID,
'cid': google_user_id(user),
't': 'event',
'ec': 'email',
'ea': event.event_type,
'ua': event.user_agent,
'cm': 'email',
}
if event.esp_event:
payload['dt'] = event.esp_event['subject']
payload['cn'] = event.esp_event['campaign_name']
payload['cs'] = event.esp_event['campaign_source']
payload['dp'] = "/email/%s/%s/%s/%s" % (
event.esp_event['campaign_name'],
event.esp_event['campaign_source'],
event.esp_event['user_id'],
event.event_type
)
else:
logger.warn("No esp_event found for event: %s" % event.__dict__)
logger.info("Sending mail event data Analytics: %s" % payload)
session.post(
'https://www.google-analytics.com/collect', data=payload)
else:
logger.warn("Could not find receipient %s" % event.recipient)
@receiver(tracking)
def handle_anymail_webhook(sender, event, esp_name, **kwargs):
logger.debug("Received webhook from %s: %s" % (esp_name))
send_ga_event(event)
|
Python
| 0
|
@@ -2157,16 +2157,32 @@
esp_name
+, event.__dict__
))%0A s
|
4f6ce9c774a0e5a577a54634a80cb2795f085ac0
|
update exit code correctly.
|
vdt/versionplugin/debianize/shared.py
|
vdt/versionplugin/debianize/shared.py
|
import argparse
import logging
import shutil
import subprocess
import tarfile
from os.path import join, basename, dirname
from glob import glob
from pip.commands.download import DownloadCommand
from vdt.version.utils import empty_directory
from vdt.version.utils import change_directory
log = logging.getLogger(__name__)
pre_remove_script = join(dirname(__file__), 'files/preremove.sh')
class FileFilter(object):
def __init__(self, include, exclude):
self.include = include
self.exclude = exclude
def __repr__(self):
return "<FileFilter exclude=%s, include=%s>" % (self.exclude ,self.include)
def is_filtered(self, path):
filtered = False
if self.exclude:
filtered = any(pattern in path for pattern in self.exclude)
if self.include:
filtered = not any(pattern in path for pattern in self.include)
return filtered
def parse_version_extra_args(version_args):
p = argparse.ArgumentParser(description="Package python packages with debianize.sh.")
p.add_argument('--include','-i', action='append', help="Using this flag makes following dependencies explicit. It will only build dependencies listed in install_requires that match the regex specified after -i. Use -i multiple times to specify multiple packages")
p.add_argument('--exclude', '-I', action='append', help="Using this flag, packages can be exluded from being built, dependencies matching the regex, whill not be built")
p.add_argument('--maintainer', help="The maintainer of the package", default="nobody@example.com")
p.add_argument('--pre-remove-script', default=pre_remove_script)
p.add_argument('--fpm-bin', default='fpm')
p.add_argument('--python-install-lib', default='/usr/lib/python2.7/dist-packages/')
p.add_argument('--target', '-t', default='deb', help='the type of package you want to create (deb, rpm, solaris, etc)')
args, extra_args = p.parse_known_args(version_args)
return args, extra_args
def build_from_python_source_with_fpm(fpm_bin, target, maintainer, pre_remove_script,
python_install_lib, target_path=None, version=None, *extra_args):
with change_directory(target_path):
try:
cmd = (
fpm_bin,
'-s', 'python',
'-t', target,
'--maintainer=%s' % maintainer,
'--exclude=*.pyc',
'--exclude=*.pyo',
'--depends=python',
'--category=python',
'--before-remove=%s' % pre_remove_script,
'--template-scripts',
'--python-install-lib=%s' % python_install_lib
) + (tuple() if version is None else ('--version=%s' % version,)) + extra_args + ("setup.py",)
log.debug("Running command %s" % " ".join(cmd))
log.debug(subprocess.check_output(cmd))
except subprocess.CalledProcessError as e:
log.error("failed to build with fpm status code %s\n%s" % (
e.returncode, e.output
))
class PackageBuilder(object):
"""
This class build the a package from a python egg, including it's
dependencies.
It has all kinds of hooks that can be overridden.
"""
def __init__(self, version, args, extra_args, directory):
self.version = version
self.args = args
self.extra_args = extra_args
self.directory = directory
self.exit_code = 0
def build_package(self, version, args, extra_args):
# build current directory, which is a python egg
build_from_python_source_with_fpm(
args.fpm_bin,
args.target,
args.maintainer,
args.pre_remove_script,
args.python_install_lib,
version=version,
*extra_args
)
def download_dependencies(self, install_dir, deb_dir):
downloader = DownloadCommand(False)
downloader.main([
'--no-binary=:all:',
'--dest=%s' % install_dir,
deb_dir
])
return glob(join(install_dir, '*.tar.gz'))
def build_dependency(self, args, extra_args, path, package_dir, deb_dir):
with tarfile.open(path) as tar:
tar.extractall(package_dir)
package_name = basename(path)[:-7]
target_path = join(package_dir, package_name)
build_from_python_source_with_fpm(
args.fpm_bin,
args.target,
args.maintainer,
args.pre_remove_script,
args.python_install_lib,
target_path=target_path,
*extra_args
)
for deb in glob(join(target_path, '*.deb')):
try:
shutil.move(deb, deb_dir)
except shutil.Error:
self.exit_code = 5
log.error("%s allready exists" % package_name)
def build_dependencies(self, version, args, extra_args, deb_dir):
# let's download all the dependencies in a temorary directory
with empty_directory() as install_dir:
# some packages might not be needed, so construct the filter.
file_filter = FileFilter(args.include, args.exclude)
# process all the downloaded packages with fpm
downloaded_packages = self.download_dependencies(install_dir, deb_dir)
for download in downloaded_packages:
if file_filter.is_filtered(download):
log.info("skipping %s because it is filtered out by %s" % (
basename(download), file_filter
))
else:
with empty_directory(install_dir) as package_dir:
self.build_dependency(self.args, self.extra_args, download, package_dir, deb_dir)
def build_package_and_dependencies(self):
self.build_package(self.version, self.args, self.extra_args)
self.build_dependencies(self.version, self.args, self.extra_args, self.directory)
|
Python
| 0
|
@@ -2893,24 +2893,45 @@
utput(cmd))%0A
+ return 0%0A
exce
@@ -2969,16 +2969,16 @@
r as e:%0A
-
@@ -3095,16 +3095,37 @@
))%0A
+ return 1%0A
%0A%0Aclass
@@ -3528,16 +3528,121 @@
de = 0%0A%0A
+ def update_exit_code(self, code):%0A if self.exit_code == 0:%0A self.exit_code = code%0A%0A
def
@@ -3749,24 +3749,29 @@
egg%0A
+ ex =
build_from_
@@ -4005,24 +4005,58 @@
gs%0A )
+%0A self.update_exit_code(ex)
%0A%0A def do
@@ -4615,16 +4615,21 @@
+ ex =
build_f
@@ -4912,17 +4912,54 @@
)%0A
+ self.update_exit_code(ex)
%0A
-
@@ -5128,32 +5128,39 @@
self.
+update_
exit_code = 5%0A
@@ -5156,12 +5156,11 @@
code
- = 5
+(5)
%0A
|
09cb8a0fbb10f14d6622bbeed815e025e4eb1751
|
Update newServer.py
|
Server/newServer.py
|
Server/newServer.py
|
__author__ = 'masudurrahman'
import sys
import os
from twisted.protocols import ftp
from twisted.protocols.ftp import FTPFactory, FTPAnonymousShell, FTPRealm, FTP, FTPShell, IFTPShell
from twisted.cred.portal import Portal
from twisted.cred import checkers
from twisted.cred.checkers import AllowAnonymousAccess, FilePasswordDB
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.defer import succeed, failure
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
# def opsCall(obj):
# print "Processing", obj.fObj.name
# return "Completed"
# class MyFTPRealm(FTPRealm):
# def __init__(self, anonymousRoot):
# self.anonymousRoot = filepath.FilePath(anonymousRoot)
# def requestAvatar(self, avatarId, mind, *interfaces):
# for iface in interfaces:
# if iface is IFTPShell:
# if avatarId is checkers.ANONYMOUS:
# avatar = FTPAnonymousShell(self.anonymousRoot)
# else:
# avatar = FTPShell(filepath.FilePath("/home/") + avatarId)
# return (IFTPShell, avatar,
# getattr(avatar, 'logout', lambda: None))
# raise NotImplementedError("Only IFTPShell interface is supported by this realm")
if __name__ == "__main__":
# Try#1
# p = Portal(MyFTPRealm('./'),[AllowAnonymousAccess(), FilePasswordDB("pass.dat")])
# Try#2
# p = Portal(MyFTPRealm('/no_anon_access/', userHome="/tmp/", callback=opsCall),[FilePasswordDB("pass.dat", ":", 0, 0, True, None, False)])
# Try#3
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
check.addUser("guest", "password")
realm = FTPRealm()
p = portal.Portal(realm, [checker])
f = ftp.FTPFactory(p)
f.welcomeMessage = "CS3240 Team 4 Project"
log.startLogging(sys.stdout)
reactor.listenTCP(21, f)
reactor.run()
# PASSWORD = ''
# users = {
# os.environ['USER']: PASSWORD
# }
# p = Portal(FTPRealm('./', userHome='/Users'),
# ( AllowAnonymousAccess(),
# InMemoryDB(**users),)
# )
# f = FTPFactory(p)
# reactor.listenTCP(21, f)
# reactor.run()
|
Python
| 0.000001
|
@@ -1691,16 +1691,18 @@
check
+er
.addUser
@@ -1744,16 +1744,39 @@
TPRealm(
+'./', userHome='/Users'
)%0A p
@@ -1781,15 +1781,8 @@
p =
-portal.
Port
|
082d1dde70dba7549d14ea13c3400cdbe8e582ec
|
Fix since outer_if_first went away. OUTER JOINs are now the default.
|
mkt/translations/query.py
|
mkt/translations/query.py
|
import itertools
from django.conf import settings
from django.db import models
from django.utils import translation as translation_utils
from mkt.webapps import query
def order_by_translation(qs, fieldname):
"""
Order the QuerySet by the translated field, honoring the current and
fallback locales. Returns a new QuerySet.
The model being sorted needs a get_fallback() classmethod that describes
the fallback locale. get_fallback() can return a string or a Field.
"""
if fieldname.startswith('-'):
desc = True
fieldname = fieldname[1:]
else:
desc = False
qs = qs.all()
model = qs.model
field = model._meta.get_field(fieldname)
# connection is a tuple (lhs, table, join_cols)
connection = (model._meta.db_table, field.rel.to._meta.db_table,
field.rel.field_name)
# Doing the manual joins is flying under Django's radar, so we need to make
# sure the initial alias (the main table) is set up.
if not qs.query.tables:
qs.query.get_initial_alias()
# Force two new (reuse is an empty set) LEFT OUTER JOINs against the
# translation table, without reusing any aliases. We'll hook up the
# language fallbacks later.
qs.query = qs.query.clone(TranslationQuery)
t1 = qs.query.join(connection, join_field=field,
outer_if_first=True, reuse=set())
t2 = qs.query.join(connection, join_field=field,
outer_if_first=True, reuse=set())
qs.query.translation_aliases = {field: (t1, t2)}
f1, f2 = '%s.`localized_string`' % t1, '%s.`localized_string`' % t2
name = 'translated_%s' % field.column
ifnull = 'IFNULL(%s, %s)' % (f1, f2)
prefix = '-' if desc else ''
return qs.extra(select={name: ifnull},
where=['(%s IS NOT NULL OR %s IS NOT NULL)' % (f1, f2)],
order_by=[prefix + name])
class TranslationQuery(query.IndexQuery):
"""
Overrides sql.Query to hit our special compiler that knows how to JOIN
translations.
"""
def clone(self, klass=None, **kwargs):
# Maintain translation_aliases across clones.
c = super(TranslationQuery, self).clone(klass, **kwargs)
c.translation_aliases = self.translation_aliases
return c
def get_compiler(self, using=None, connection=None):
# Call super to figure out using and connection.
c = super(TranslationQuery, self).get_compiler(using, connection)
return SQLCompiler(self, c.connection, c.using)
class SQLCompiler(query.IndexCompiler):
"""Overrides get_from_clause to LEFT JOIN translations with a locale."""
def get_from_clause(self):
# Temporarily remove translation tables from query.tables so Django
# doesn't create joins against them.
old_tables = list(self.query.tables)
for table in itertools.chain(*self.query.translation_aliases.values()):
self.query.tables.remove(table)
joins, params = super(SQLCompiler, self).get_from_clause()
# fallback could be a string locale or a model field.
params.append(translation_utils.get_language())
if hasattr(self.query.model, 'get_fallback'):
fallback = self.query.model.get_fallback()
else:
fallback = settings.LANGUAGE_CODE
if not isinstance(fallback, models.Field):
params.append(fallback)
# Add our locale-aware joins. We're not respecting the table ordering
# Django had in query.tables, but that seems to be ok.
for field, aliases in self.query.translation_aliases.items():
t1, t2 = aliases
joins.append(self.join_with_locale(t1))
joins.append(self.join_with_locale(t2, fallback))
self.query.tables = old_tables
return joins, params
def join_with_locale(self, alias, fallback=None):
# This is all lifted from the real sql.compiler.get_from_clause(),
# except for the extra AND clause. Fun project: fix Django to use Q
# objects here instead of a bunch of strings.
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
mapping = self.query.alias_map[alias]
# name, alias, join_type, lhs, lhs_col, col, nullable = mapping
name, alias, join_type, lhs, join_cols, _, join_field = mapping
lhs_col = join_field.column
rhs_col = join_cols
alias_str = '' if alias == name else (' %s' % alias)
if isinstance(fallback, models.Field):
fallback_str = '%s.%s' % (qn(self.query.model._meta.db_table),
qn(fallback.column))
else:
fallback_str = '%s'
return ('%s %s%s ON (%s.%s = %s.%s AND %s.%s = %s)' %
(join_type, qn(name), alias_str,
qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col),
qn(alias), qn('locale'), fallback_str))
|
Python
| 0
|
@@ -1343,52 +1343,8 @@
eld,
-%0A outer_if_first=True,
reu
@@ -1409,52 +1409,8 @@
eld,
-%0A outer_if_first=True,
reu
|
df5fc7af67aed3aa2d2aeea4cef03d8dd790f1a4
|
Fix ios enable password regex in terminal plugin (#35741)
|
lib/ansible/plugins/terminal/ios.py
|
lib/ansible/plugins/terminal/ios.py
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n][\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b'#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
Python
| 0
|
@@ -2425,16 +2425,17 @@
ssword:
+?
$%22, erro
|
b2fce504fd0b1bd72a0d383f8ea207d20d068729
|
debug stmts
|
api/src/api/v1/handlers/statsHandler.py
|
api/src/api/v1/handlers/statsHandler.py
|
from piston.handler import BaseHandler
import json
from piston.utils import rc
from google.protobuf.message import DecodeError
import log_analytics_proto
from src.api.v1.settings import apiLogger
import apiHelper
import traceback
# import worker classes/functions
from src.api.workers import dbWorker
from src.utils import utils
from src.utils import threadCache
import base64
class StatsHandler( BaseHandler ):
def read(self, request):
apiLogger.debug("Begin stats get handler")
if request.content_type == "application/octet-stream":
pass
else:
return apiHelper.badRequest(code = 101, detail = "Invalid content-type " + str(request.content_type))
req_enc_str = request.GET.get('req', '')
request_data = base64.b64decode(req_enc_str)
req_obj = log_analytics_proto.ReqMsg()
try:
req_obj.ParseFromString(request_data)
except DecodeError, e:
return apiHelper.badRequest(code = 102, detail = "Failed to decode protobuf. " + e)
if req_obj.req_id is None or \
req_obj.req_type is None:
return apiHelper.badRequest(code = 103, detail = "Null value specified for REQUIRED fields")
tcObj = apiHelper.init(reqId = req_obj.req_id)
req_id = req_obj.req_id
if req_obj.req_type == 1:
# default scale is 2 - daily
req_payload = log_analytics_proto.ReqPayloadStruct()
try:
req_payload = req_obj.req_payload
except DecodeError, e:
return apiHelper.badRequest(code = 104, detail = "Failed to decode protobuf. " + e)
scale = req_payload.scale if req_payload.scale is not None else 2
hostname = req_payload.hostname
time_from = req_payload.time_from
# default time_to is current time
time_to = req_payload.time_to if req_payload.time_to is not None else apiHelper.getCurrentTime()
retval, err_msg = utils.validateInput(hostname = hostname, \
scale = scale, \
time_from = time_from, \
time_to = time_to)
if retval is False:
return apiHelper.badRequest(code = 105, detail = err_msg)
resp_dict, retval, err_msg = dbWorker.getResponse(hostname = hostname, scale = scale, time_from = time_from, time_to = time_to)
if retval is False:
return apiHelper.badRequest(code = 106, detail = err_msg)
#resp_dict = {}
#resp_dict = apiHelper.getTestRespDict()
resp_obj, retval, err_msg = apiHelper.constructRespObj(resp_dict = resp_dict,
req_id = req_id)
if retval is False:
return apiHelper.badRequest(code = 107, detail = err_msg)
# get last visitors
elif req_obj.req_type == 2:
visitors_count = req_payload.visitors_count if req_payload.visitors_count is not None else 10
resp_dict, retval, err_msg = dbWorker.getResponse2(hostname = hostname, visitors_count = visitors_count)
if retval is False:
return apiHelper.badRequest(code = 108, detail = err_msg)
resp_obj, retval, err_msg = apiHelper.constructRespObj(resp_dict = resp_dict,
req_id = req_id)
if retval is False:
return apiHelper.badRequest(code = 109, detail = err_msg)
else:
return apiHelper.badRequest(code = 111, detail = "Invalid request type!!")
resp = rc.ALL_OK
resp['Content-Type'] = 'application/octet-stream'
resp.content = resp_obj.SerializeToString()
return resp
def create(self, request):
return apiHelper.badRequest(code = 201, detail = "POST method not allowed")
def update(self, request):
return apiHelper.badRequest(code = 301, detail = "PUT method not allowed")
def delete(self, request):
return apiHelper.badRequest(code = 401, detail = "DELETE method not allowed")
|
Python
| 0.000001
|
@@ -1328,92 +1328,8 @@
%0A %0A
- if req_obj.req_type == 1:%0A %0A # default scale is 2 - daily%0A
@@ -1381,26 +1381,24 @@
oadStruct()%0A
-
try:
@@ -1404,26 +1404,24 @@
:%0A
-
req_payload
@@ -1446,26 +1446,24 @@
oad%0A
-
-
except Decod
@@ -1465,34 +1465,32 @@
DecodeError, e:%0A
-
return
@@ -1567,28 +1567,102 @@
%22 + e)%0A
-
+%0A if req_obj.req_type == 1:%0A %0A # default scale is 2 - daily
%0A
|
25e71a56d48e5bdc4d73522333196d69d735707a
|
Update the PCA10056 example to use new pin naming
|
ports/nrf/boards/pca10056/examples/buttons.py
|
ports/nrf/boards/pca10056/examples/buttons.py
|
import board
import digitalio
import gamepad
import time
pad = gamepad.GamePad(
digitalio.DigitalInOut(board.PA11),
digitalio.DigitalInOut(board.PA12),
digitalio.DigitalInOut(board.PA24),
digitalio.DigitalInOut(board.PA25),
)
prev_buttons = 0
while True:
buttons = pad.get_pressed()
if buttons != prev_buttons:
for i in range(0, 4):
bit = (1 << i)
if (buttons & bit) != (prev_buttons & bit):
print('Button %d %s' % (i + 1, 'pressed' if buttons & bit else 'released'))
prev_buttons = buttons
time.sleep(0.1)
|
Python
| 0
|
@@ -108,17 +108,18 @@
(board.P
-A
+0_
11),%0A
@@ -149,17 +149,18 @@
(board.P
-A
+0_
12),%0A
@@ -190,17 +190,18 @@
(board.P
-A
+0_
24),%0A
@@ -235,9 +235,10 @@
rd.P
-A
+0_
25),
|
3de29a3fdd17beece1fbe26c4f578cd854d16d0d
|
Fix bug introduced in update_from_old_problemformat.py
|
problemtools/update_from_old_problemformat.py
|
problemtools/update_from_old_problemformat.py
|
# -*- coding: utf-8 -*-
import argparse
import glob
import os.path
import yaml
def update(problemdir):
probyaml = os.path.join(problemdir, 'problem.yaml')
if not os.path.isfile(probyaml):
raise Exception('Could not find %s' % probyaml)
config = yaml.safe_load('%s' % open(probyaml, 'r').read())
stmts = glob.glob(os.path.join(problemdir, 'problem_statement/problem.tex'))
stmts.extend(glob.glob(os.path.join(problemdir, 'problem_statement/problem.[a-z][a-z].tex')))
yaml_changed = False
if 'name' in config:
print('Move problem name "%s" to these problem statement files: %s' % (config['name'], stmts))
for f in stmts:
stmt = open(f, 'r').read()
if stmt.find('\\problemname{') != -1:
print(' Statement %s already has a problemname, skipping' % f)
continue
newstmt = '\\problemname{%s}\n\n%s' % (config['name'], stmt)
open(f, 'w').write(newstmt)
del config['name']
yaml_changed = True
if 'validator' in config:
validator_flags = config['validator'].split()
validation = 'default'
if validator_flags[0] == 'custom':
validation = 'custom'
validator_flags = validator_flags[1:]
validator_flags = ' '.join(validator_flags)
print('Old validator option exists, moving to validation: %s, validator_flags: %s' % (validation, validator_flags))
config['validation'] = validation
if validator_flags != '':
config['validator_flags'] = validator_flags
del config['validator']
yaml_changed = True
if yaml_changed:
open(probyaml, 'w').write(yaml.dump(config, default_flow_style=False, allow_unicode=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('problemdir', nargs='+')
options = parser.parse_args()
for problemdir in options:
try:
print('Updating %s' % problemdir)
update(problemdir)
except Exception as e:
print('Update FAILED: %s' % e)
|
Python
| 0
|
@@ -1946,16 +1946,27 @@
options
+.problemdir
:%0A
|
a3bb1ff203789b6547e241f2ba0108e89bd1aefe
|
Remove mystery import
|
profile_collection/startup/80-areadetector.py
|
profile_collection/startup/80-areadetector.py
|
from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF,
AreaDetectorFileStoreTIFFSquashing)
from shutter import sh1
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
pe1 = AreaDetectorFileStoreTIFFSquashing(
'XF:28IDC-ES:1{Det:PE1}',
name='pe1',
stats=[],
ioc_file_path = 'G:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1, 0)
)
# Dan and Sanjit commented this out in June.
#shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
#pe2 = AreaDetectorFileStoreTIFFSquashing(
# 'XF:28IDC-ES:1{Det:PE2}',
# name='pe2',
# stats=[],
# ioc_file_path = 'G:/pe2_data',
# file_path = '/home/xf28id1/pe2_data',
# shutter=shctl2,
# shutter_val=(1,0))
|
Python
| 0
|
@@ -210,16 +210,18 @@
shing)%0A%0A
+#
from shu
|
37062a5695eea63726630e98019c85f7985306a2
|
Use a clearer attribute name
|
dataobject.py
|
dataobject.py
|
import logging
import remoteobjects.fields
all_classes = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a full dotted module and class name.
"""
return all_classes[name]
class DataObjectMetaclass(type):
def __new__(cls, name, bases, attrs):
fields = {}
new_fields = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
del attrs[attrname]
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
# Register the new class so Object fields can have forward-referenced it.
all_classes['.'.join((obj_cls.__module__, name))] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_fields.values():
field.of_cls = obj_cls
return obj_cls
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary, suitable
for serializing to or deserializing from JSON.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import DataObject, fields
>>> class Asset(DataObject):
... name = fields.Something()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
self._id = None
self.__dict__.update(kwargs)
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
try:
# TODO: this shallow copy only prevents sticky modification of the
# dict's contents, not the contents' contents.
data = dict(self._dict)
except AttributeError:
data = {}
for field_name, field in self.fields.iteritems():
field.encode_into(self, data, field_name=field_name)
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into an instance of the DataObject class."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" should
be added to an object manually by setting the object's attributes.
Data that constitutes a new object should be turned into another
object with `from_dict()`.
"""
# Remember this extra data, so we can play it back later.
if not hasattr(self, '_dict'):
self._dict = {}
self._dict.update(data)
for field_name, field in self.fields.iteritems():
field.decode_into(data, self, field_name=field_name)
|
Python
| 0.005027
|
@@ -2609,20 +2609,28 @@
t(self._
-dict
+originaldata
)%0A
@@ -3728,20 +3728,28 @@
self, '_
-dict
+originaldata
'):%0A
@@ -3762,20 +3762,28 @@
self._
-dict
+originaldata
= %7B%7D%0A
@@ -3794,20 +3794,28 @@
self._
-dict
+originaldata
.update(
|
9bd5b66a50def87de2b8a37ba452ee4efc8a17b7
|
add docstring for update_average
|
web/aliendb/apps/analytics/helpers.py
|
web/aliendb/apps/analytics/helpers.py
|
def update_average(field, value, tracked):
return (value + field * tracked) / (1 + tracked)
|
Python
| 0
|
@@ -38,9 +38,320 @@
ked)
-:
+ -%3E float:%0A %22%22%22Updates a previously calculated average with a new value.%0A%0A Args:%0A field: the current average;%0A value: the new value to include in the average;%0A tracked: the number of elements used to form the _original_ average;%0A%0A Returns:%0A float: the updated average%0A %22%22%22
%0A
@@ -400,12 +400,8 @@
racked)%0A
-
|
4839c43db77a88a872db07ab99be0fdd29bb24fc
|
Remove bland from 'irrelevant' preferable tendency
|
LandPortalEntities/lpentities/indicator.py
|
LandPortalEntities/lpentities/indicator.py
|
'''
Created on 19/12/2013
@author: Nacho
'''
from lpentities.measurement_unit import MeasurementUnit
class Indicator(object):
"""
classdocs
"""
#Simulated Enum Values
INCREASE = "increase"
DECREASE = "decrease"
IRRELEVANT = "irrelevant "
#Possible topics
_topics_set = ['CLIMATE_CHANGE', 'GEOGRAPH_SOCIO', 'LAND_USE', 'LAND_GENDER', 'LAND_TENURE', 'FSECURITY_HUNGER', 'TEMP_TOPIC']
def __init__(self, chain_for_id, int_for_id, name_en=None, name_es=None,
name_fr=None, description_en=None, description_es=None,
description_fr=None, dataset=None, measurement_unit=None,
topic=None, preferable_tendency=None):
"""
Constructor
"""
self.name_en = name_en
self.name_es = name_es
self.name_fr = name_fr
self.description_en = description_en
self.description_es = description_es
self.description_fr = description_fr
self.dataset = dataset
self._measurement_unit = measurement_unit
self._topic = topic
self._preferable_tendency = preferable_tendency
self.indicator_id = self._generate_id(chain_for_id, int_for_id)
def __get_measurement_unit(self):
return self._measurement_unit
def __set_measurement_unit(self, measurement_unit):
if isinstance(measurement_unit, MeasurementUnit):
self._measurement_unit = measurement_unit
else:
raise ValueError("Expected Measurement object in Indicator")
measurement_unit = property(fget=__get_measurement_unit, fset=__set_measurement_unit, doc="MeasurementUnit of the indicator")
def __get_topic(self):
return self._topic
def __set_topic(self, topic):
if topic.upper() in self._topics_set:
self._topic = topic
else:
raise ValueError("Provided topic not in the specified list")
topic = property(fget=__get_topic, fset=__set_topic, doc="Topic of the indicator")
def __get_preferable_tendency(self):
return self._preferable_tendency
def __set_preferable_tendency(self, preferable_tendency):
if preferable_tendency == self.DECREASE or preferable_tendency == self.INCREASE or preferable_tendency == self.IRRELEVANT:
self._preferable_tendency = preferable_tendency
else:
raise ValueError("Provided tendency not in the specified list")
preferable_tendency = property(fget=__get_preferable_tendency, fset=__set_preferable_tendency, doc="Preferable tendency of the indicator")
@staticmethod
def _generate_id(chain_for_id, int_for_id):
return "IND" + chain_for_id.upper() + str(int_for_id).upper()
|
Python
| 0.000236
|
@@ -261,17 +261,16 @@
relevant
-
%22%0A%0A #
|
48cf73c12f1c586d5ce71fd872f9054b4209d13b
|
adds missing colon
|
GPIOTest.py
|
GPIOTest.py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
status = not GPIO.input(2)
autoswitch=GPIO.input(4)
onswitch=GPIO.input(17)
GPIO.output(2, status)
if status:
print('Auto status = Off')
else:
print('Auto status = On')
if autoswitch:
print('Auto LED = Off')
else:
print('Auto LED = Blue')
if not onswitch
print ('Status LED = Green')
elif autoswitch:
if status:
print ('Status LED = Red')
else:
print ('Status LED = Green')
else:
print ('Status LED = Red')
#GPIO.cleanup()
|
Python
| 0.998656
|
@@ -465,16 +465,17 @@
onswitch
+:
%0A%09print
|
aaac2228119bf965183d30ebf9d4b8cb13699fd8
|
fix tkinter for python 3
|
GroupEng.py
|
GroupEng.py
|
#!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
"""
External GroupEng Application. Handles user invocation and marshalls things for
use by the rest of GroupEng
.. moduleauthor:: Thomas G. Dimiduk tgd8@cornell.edu
"""
import sys
import os.path
import os
from src import controller
if len(sys.argv) > 1:
try:
debug = os.environ['DEBUG'].lower() == 'true'
except KeyError:
debug = False
if debug:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
else:
try:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
except Exception as e:
print(e)
else:
# import gui stuff only if we are going to use it
from Tkinter import *
from tkFileDialog import askopenfilename
from tkMessageBox import showerror, showinfo
path = askopenfilename()
d, f = os.path.split(path)
os.chdir(d)
try:
status, outdir = controller.run(f)
except Exception as e:
showerror('GroupEng Error', '{0}'.format(e))
if status:
showinfo("GroupEng", "GroupEng Run Succesful\n Output in: {0}".format(outdir))
else:
showinfo("GroupEng", "GroupEng Ran Correctly but not all rules could be met\n"
"Output in: {0}".format(outdir))
|
Python
| 0.000069
|
@@ -1513,16 +1513,83 @@
use it%0A
+ try:%0A from tkinter import *%0A except ImportError:%0A
from
|
3e7d433c193bd2e35b2c760297d81973f56b3eec
|
Fix test cases
|
node/floor_divide.py
|
node/floor_divide.py
|
#!/usr/bin/env python
from nodes import Node
import math
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
@Node.test_func(["123456789", 5], [['1', '2', '3', '4', '56789']])
@Node.test_func([[4,8,15,16,23,42], 7], [[[],[],[],[],[],[],[4,8,15,16,23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
size = len(inp)//num
try:
for i in range(0, num*size, size):
rtn.append(inp[i:i+size])
except ValueError:
for i in range(num): rtn.append([])
i = 0
if len(rtn) != num:
rtn.append(inp[i+size:])
else:
rtn[-1] += inp[i+size:]
return [rtn]
@Node.test_func([[4, 4, 2, 2, 9, 9], [0, -2, 0, 7, 0]], [[[4,4],[2,2],[9,9]]])
def split_at(self, inp:Node.sequence, splits:Node.sequence):
"""Split inp at truthy values in splits"""
rtn = [[]]
for i, do_split in zip(inp, splits+[0]):
if do_split: rtn.append([])
rtn[-1].append(i)
return [rtn]
|
Python
| 0.000241
|
@@ -1674,19 +1674,19 @@
%5B%5B%5B4
-,4
%5D,%5B
-2
+4
,2%5D,%5B
+2,
9,9%5D
|
a9c9cbac36568676be194024f6f660e4fc3f03b6
|
Add old list to applist migration
|
src/yunohost/data_migrations/0010_migrate_to_apps_json.py
|
src/yunohost/data_migrations/0010_migrate_to_apps_json.py
|
import os
from moulinette.utils.log import getActionLogger
from yunohost.app import app_fetchlist, app_removelist, _read_appslist_list, APPSLISTS_JSON
from yunohost.tools import Migration
logger = getActionLogger('yunohost.migration')
BASE_CONF_PATH = '/home/yunohost.conf'
BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup')
APPSLISTS_BACKUP = os.path.join(BACKUP_CONF_DIR, "appslist_before_migration_to_unified_list.json")
class MyMigration(Migration):
"Migrate from official.json to apps.json"
def migrate(self):
# Backup current app list json
os.system("cp %s %s" % (APPSLISTS_JSON, APPSLISTS_BACKUP))
# Remove all the deprecated lists
lists_to_remove = [
"https://app.yunohost.org/official.json",
"https://app.yunohost.org/community.json",
"https://labriqueinter.net/apps/labriqueinternet.json"
]
appslists = _read_appslist_list()
for appslist, infos in appslists.items():
if infos["url"] in lists_to_remove:
app_removelist(name=appslist)
# Replace by apps.json list
app_fetchlist(name="yunohost",
url="https://app.yunohost.org/apps.json")
def backward(self):
if os.path.exists(APPSLISTS_BACKUP):
os.system("cp %s %s" % (APPSLISTS_BACKUP, APPSLISTS_JSON))
|
Python
| 0
|
@@ -707,16 +707,122 @@
ove = %5B%0A
+ %22http://app.yunohost.org/list.json%22, # Old list on old installs, alias to official.json%0A
|
9316ec9f2246ac14176d9bf9d27287dfccedb3f3
|
Update to 0.3.0
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/version.py
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/version.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "0.2.0"
|
Python
| 0
|
@@ -337,8 +337,9 @@
%220.
-2
+3
.0%22
+%0A
|
8c89a0d52c43f96d9673b8b84786a7185ddc3f6f
|
Bump WireCloud version
|
src/wirecloud/platform/__init__.py
|
src/wirecloud/platform/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
__version_info__ = (0, 7, 0)
__version__ = '.'.join(map(str, __version_info__)) + 'b1'
|
Python
| 0
|
@@ -856,11 +856,11 @@
_)) + 'b
-1
+2
'%0A
|
a4dcfff5214e3956f45bf0ef853dea871dbd8da9
|
Fix for missing 'methods' section
|
delphi-395.py
|
delphi-395.py
|
import argparse
import os
import re
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from yaml import load, Loader
def get_config():
parser = argparse.ArgumentParser(description='Construct Delphi classes from a YAML template.')
parser.add_argument('source_files', metavar='SOURCE', type=argparse.FileType('r'), nargs='+',
help='A YAML template file from which Delphi source will be generated')
return parser.parse_args()
def split_on_space_underscore_and_upper(name):
return ((re.sub(r"([A-Z])", r" \1", name)).replace('_', ' ')).split()
def augment_name(target):
if 'name' in target:
split_name = split_on_space_underscore_and_upper(target['name'])
target['name_camelcase'] = split_name[0] + ''.join(x.title() for x in split_name[1:])
target['name_titlecase'] = ''.join(x.title() for x in split_name)
target['name_snakecase'] = '_'.join(x.lower() for x in split_name)
def augment_names(data):
if 'name' in data['type'] and data['type']['name'] is not None:
augment_name(data['type'])
if 'requirements' in data['type'] and data['type']['requirements'] is not None:
for requirement in data['type']['requirements']:
augment_name(requirement)
if 'variables' in data['type'] and data['type']['variables'] is not None:
for variable in data['type']['variables']:
augment_name(variable)
def augment_uuids(data):
data['type']['uuid'] = str(uuid4()).upper()
def create_method_definition(method, class_title):
first, rest = method.split('\n', 1)
match = re.match(r"(function\s+|procedure\s+)(.*)", first)
return {
'definition': first,
'body': "{0}T{1}.{2}\n{3}".format(match.group(1), class_title, match.group(2), rest)
}
def augment_methods(data):
data['type']['methods'] = [create_method_definition(method, data['type']['name_titlecase']) for method in data['type']['methods']
if 'name_titlecase' in data['type'] and
data['type']['name_titlecase'] is not None and
'methods' in data['type'] and
data['type']['methods'] is not None]
def augment_data(data):
if 'type' in data:
augment_uuids(data)
augment_names(data)
augment_methods(data)
def expand_template(config, template, template_file_name, type_data):
with open(template_file_name % type_data, 'w') as dest_file:
dest_file.write(template.render(type_data))
def expand_templates(config, env, source_file):
data = load(source_file, Loader=Loader)
augment_data(data)
for template_file_name in os.listdir('templates'):
if template_file_name[-4:] == '.pas':
augment_uuids(data)
expand_template(config, env.get_template(template_file_name), template_file_name, data['type'])
def main():
config = get_config()
env = Environment(lstrip_blocks=True, trim_blocks=True, loader=PackageLoader('delphi-395', 'templates'))
for source_file in config.source_files:
expand_templates(config, env, source_file)
if __name__ == "__main__":
main()
|
Python
| 0.001131
|
@@ -1831,32 +1831,110 @@
_methods(data):%0A
+ if 'methods' in data%5B'type'%5D and data%5B'type'%5D%5B'methods'%5D is not None:%0A
data%5B'type'%5D
@@ -2078,24 +2078,28 @@
+
if 'name_tit
@@ -2149,32 +2149,36 @@
+
+
data%5B'type'%5D%5B'na
@@ -2239,16 +2239,20 @@
+
'methods
@@ -2265,32 +2265,36 @@
ata%5B'type'%5D and%0A
+
|
36bfa8f556941848eb1a809d48aae1aa43f23c3f
|
Add option to choose if we keep the <none> images
|
di-cleaner.py
|
di-cleaner.py
|
#!/usr/bin/env python
import argparse
import atexit
import logging
import sys
from pprint import pformat
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
HELP_DOCKER_BASE_URL = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
HELP_DOCKER_API_VERSION = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
HELP_DOCKER_HTTP_TIMEOUT = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
DEFAULT_IMAGES_TO_KEEP = 2
HELP_IMAGES_TO_KEEP = ('How many docker images to keep. '
'Defaults to %d images') % DEFAULT_IMAGES_TO_KEEP
def _exit():
logging.shutdown()
def debug_var(name, var):
logging.debug('Var %s has: %s' % (name, pformat(var)))
def setup_parser(parser):
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=HELP_DOCKER_BASE_URL, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=HELP_DOCKER_API_VERSION, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=HELP_DOCKER_HTTP_TIMEOUT, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
parser.add_argument('--images-to-keep', help=HELP_IMAGES_TO_KEEP, default=DEFAULT_IMAGES_TO_KEEP, type=int)
return parser
def validate_args(args):
if args.http_timeout < 0:
sys.stderr.write('HTTP timeout should be 0 or bigger\n')
if args.images_to_keep < 0:
sys.stderr.write('Images to keep should be 0 or bigger\n')
sys.exit(1)
def main():
atexit.register(func=_exit)
parser = setup_parser(argparse.ArgumentParser(description='Clean old docker images'))
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
debug_var(name='args', var=args)
validate_args(args)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -774,16 +774,62 @@
_TO_KEEP
+%0AHELP_KEEP_NONE_IMAGES = 'Keep %3Cnone%3E images'
%0A%0Adef _e
@@ -1479,24 +1479,119 @@
, type=int)%0A
+ parser.add_argument('--keep-none-images', help=HELP_KEEP_NONE_IMAGES, action='store_true')%0A
return p
|
edf099ca644aae12daef65ff65744d99fcd3a634
|
Remove function we won't actually use.
|
st2common/st2common/util/compat.py
|
st2common/st2common/util/compat.py
|
# -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import six
__all__ = [
'to_unicode',
'to_ascii',
'add_st2actions_pythonrunner_to_sys_path'
]
def to_unicode(value):
"""
Ensure that the provided text value is represented as unicode.
:param value: Value to convert.
:type value: ``str`` or ``unicode``
:rtype: ``unicode``
"""
if not isinstance(value, six.string_types):
raise ValueError('Value "%s" must be a string.' % (value))
if not isinstance(value, six.text_type):
value = six.u(value)
return value
def to_ascii(value):
"""
Function which encodes the provided bytes / string to ASCII encoding ignoring any errors
which could come up when trying to encode a non-ascii value.
"""
return value.decode('ascii', errors='ignore')
def add_st2actions_pythonrunner_to_sys_path():
"""
Function which adds "st2common.runners.pythonrunner" to sys.path and redirects it to
"st2common.runners.base_action".
First path was deprecated a long time ago, but some modules still rely on on it. This
is to be used in places where "st2common" is used as a standalone package without access to
st2actions (e.g. serverless).
"""
import st2common.runners.base_action
sys.modules['st2actions'] = {}
sys.modules['st2actions.runners'] = {}
sys.modules['st2actions.runners.pythonrunner'] = st2common.runners.base_action
return sys.modules
|
Python
| 0
|
@@ -801,20 +801,8 @@
e.%0A%0A
-import sys%0A%0A
impo
@@ -808,16 +808,16 @@
ort six%0A
+
%0A%0A__all_
@@ -860,54 +860,8 @@
i',%0A
- 'add_st2actions_pythonrunner_to_sys_path'%0A
%5D%0A%0A%0A
@@ -1469,24 +1469,24 @@
ue.%0A %22%22%22%0A
+
return v
@@ -1527,643 +1527,4 @@
e')%0A
-%0A%0Adef add_st2actions_pythonrunner_to_sys_path():%0A %22%22%22%0A Function which adds %22st2common.runners.pythonrunner%22 to sys.path and redirects it to%0A %22st2common.runners.base_action%22.%0A%0A First path was deprecated a long time ago, but some modules still rely on on it. This%0A is to be used in places where %22st2common%22 is used as a standalone package without access to%0A st2actions (e.g. serverless).%0A %22%22%22%0A import st2common.runners.base_action%0A%0A sys.modules%5B'st2actions'%5D = %7B%7D%0A sys.modules%5B'st2actions.runners'%5D = %7B%7D%0A sys.modules%5B'st2actions.runners.pythonrunner'%5D = st2common.runners.base_action%0A%0A return sys.modules%0A
|
a187bd1f89d40d4274f884bba567a2f6be160dcd
|
Remove unintended changes from reverthousekeeping command
|
cla_backend/apps/cla_butler/management/commands/reverthousekeeping.py
|
cla_backend/apps/cla_butler/management/commands/reverthousekeeping.py
|
# coding=utf-8
import os
import logging
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.core.management.base import BaseCommand
from cla_butler.qs_to_file import QuerysetToFile
from cla_eventlog.models import Log
from cla_provider.models import Feedback
from complaints.models import Complaint
from diagnosis.models import DiagnosisTraversal
from legalaid.models import (
Case,
EligibilityCheck,
CaseNotesHistory,
Person,
Income,
Savings,
Deductions,
PersonalDetails,
ThirdPartyDetails,
AdaptationDetails,
CaseKnowledgebaseAssignment,
EODDetails,
EODDetailsCategory,
Property,
)
from timer.models import Timer
MODELS = [
Deductions,
Income,
Savings,
Person,
AdaptationDetails,
PersonalDetails,
ThirdPartyDetails,
EligibilityCheck,
Property,
DiagnosisTraversal,
Case,
EODDetails,
EODDetailsCategory,
Complaint,
CaseKnowledgebaseAssignment,
Timer,
Feedback,
CaseNotesHistory,
Log,
LogEntry,
]
logger = logging.getLogger("django")
class Command(BaseCommand):
help = "Attempts to re-load data that was deleted in the housekeeping"
def add_arguments(self, parser):
parser.add_argument("directory", nargs=1)
def handle(self, *args, **options):
logger.info("Running monitor_multiple_outcome_codes cron job")
path = os.path.join(settings.TEMP_DIR, args[0])
filewriter = QuerysetToFile(path)
for model in MODELS:
self.stdout.write(model.__name__)
filewriter.load(model)
|
Python
| 0
|
@@ -22,22 +22,8 @@
os%0A
-import logging
%0Afro
@@ -1060,46 +1060,8 @@
%0A%5D%0A%0A
-logger = logging.getLogger(%22django%22)%0A%0A
%0Acla
@@ -1294,79 +1294,8 @@
s):%0A
- logger.info(%22Running monitor_multiple_outcome_codes cron job%22)%0A
|
5ffef1beb126fed15851ddc30ea9fca7edbca017
|
Remove debug code
|
app/soc/modules/gsoc/views/student_forms.py
|
app/soc/modules/gsoc/views/student_forms.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GSoC student forms.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from django.forms import fields
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import url
from django.utils import simplejson
from soc.logic import cleaning
from soc.logic import dicts
from soc.views import forms
from soc.models.user import User
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.profile import GSoCStudentInfo
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.base_templates import LoggedInMsg
from soc.modules.gsoc.views.helper import url_patterns
class TaxForm(forms.ModelForm):
"""Django form for the student tax form.
"""
class Meta:
model = GSoCStudentInfo
css_prefix = 'student_form'
fields = ['tax_form']
widgets = {}
tax_form = fields.FileField(label='Upload new tax form', required=False)
def __init__(self, data, *args, **kwargs):
super(TaxForm, self).__init__(*args, **kwargs)
self.data = data
def clean_tax_form(self):
uploads = self.data.request.file_uploads
return uploads[0] if uploads else None
class TaxFormPage(RequestHandler):
"""View for the participant profile.
"""
def djangoURLPatterns(self):
return [
url(r'^gsoc/student_forms/tax/%s$' % url_patterns.PROGRAM,
self, name='gsoc_tax_forms'),
]
def checkAccess(self):
self.check.isProfileActive()
def templatePath(self):
return 'v2/modules/gsoc/student_forms/tax.html'
def context(self):
tax_form = TaxForm(self.data, self.data.POST or None,
instance=self.data.student_info)
return {
'page_name': 'Tax form',
'forms': [tax_form],
'error': bool(tax_form.errors),
}
def validate(self):
tax_form = TaxForm(self.data, self.data.POST,
instance=self.data.student_info)
if not tax_form.is_valid():
import logging
logging.warning("Sad")
return False
tax_form.save()
def json(self):
url = self.redirect.program().urlOf('gsoc_tax_forms')
upload_url = blobstore.create_upload_url(url)
self.response.write(upload_url)
def post(self):
validated = self.validate()
self.redirect.program().to('gsoc_tax_forms', validated=validated)
|
Python
| 0.000299
|
@@ -2776,58 +2776,8 @@
():%0A
- import logging%0A logging.warning(%22Sad%22)%0A
|
123401cb6ed88b77d9a584eea8f2de75e518e5da
|
remove try except when hintsvm is not installed
|
libact/query_strategies/__init__.py
|
libact/query_strategies/__init__.py
|
"""
Concrete query strategy classes.
"""
import logging
logger = logging.getLogger(__name__)
from .active_learning_by_learning import ActiveLearningByLearning
try:
from .hintsvm import HintSVM
except ImportError:
logger.warn('HintSVM library not found, not importing.')
from .uncertainty_sampling import UncertaintySampling
from .query_by_committee import QueryByCommittee
from .quire import QUIRE
from .random_sampling import RandomSampling
from .variance_reduction import VarianceReduction
|
Python
| 0
|
@@ -157,17 +157,8 @@
ing%0A
-try:%0A
from
@@ -186,89 +186,8 @@
SVM%0A
-except ImportError:%0A logger.warn('HintSVM library not found, not importing.')%0A
from
|
0e56ed6234e1f28b0aac2e22063bb39faab1d54c
|
use '!XyZZy!' as value to be sustituted in metric name
|
librato_python_web/tools/compose.py
|
librato_python_web/tools/compose.py
|
# Copyright (c) 2015. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Helper methods to model Librato composite query dsl
METRIC_PREFIX = "XyZZy"
DUMMY_PREFIX = "DUMMY-PREFIX"
DEFAULT_PERIOD = 60
def s_(metric, source="{}-*".format(DUMMY_PREFIX), period=DEFAULT_PERIOD, function="mean"):
return 's("{}.{}", "{}", {{period: "{}", function: "{}"}})'.format(METRIC_PREFIX, metric, source, period, function)
def timeshift_(shift, series):
return 'timeshift("{}", {})'.format(shift, series)
def sum_(*args):
return 'sum([{}])'.format(', '.join(args))
def subtract_(series1, series2):
return 'subtract([{}, {}])'.format(series1, series2)
def multiply_(*args):
return 'multiply([{}])'.format(', '.join(args))
def divide_(series1, series2):
return 'divide([{}, {}])'.format(series1, series2)
def scale_(series, factor):
return 'scale({}, {{factor: "{}"}})'.format(series, factor)
def derive_(series, detect_reset="true"):
return 'derive({}, {{detect_reset: "{}"}})'.format(series, detect_reset)
|
Python
| 0.000231
|
@@ -1610,13 +1610,15 @@
= %22
+!
XyZZy
+!
%22%0ADU
|
a31a8aa7d5ef0fb742f909c09c340c3f54104833
|
clean up comments
|
linkedin/spiders/linkedin_spider.py
|
linkedin/spiders/linkedin_spider.py
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from linkedin.items import LinkedinItem
# compile to C using Cython if processing speed becomes a constraint
class LinkedinSpider(CrawlSpider):
"""
Define the crawler's start urls, set its follow rules, parse HTML
and assign values to an item. Processing occurs in ../pipelines.py
"""
name = "linkedin"
allowed_domains = ["linkedin.com"]
# TODO: uncomment following lines for full spidering
'''
centilist_one = (i for i in xrange(1,100))
centilist_two = (i for i in xrange(1,100))
centilist_three = (i for i in xrange(1,100))
start_urls = ["http://www.linkedin.com/directory/people-%s-%d-%d-%d"
% (alphanum, num_one, num_two, num_three)
for alphanum in "abcdefghijklmnopqrstuvwxyz"
for num_one in centilist_one
for num_two in centilist_two
for num_three in centilist_three
]
'''
# temporary start url, remove for production
start_urls = ["http://www.linkedin.com/directory/people-a-23-23-2"]
# TODO: allow /in/name urls too? (LinkedIn custom URLs)
rules = (Rule(SgmlLinkExtractor(allow=('\/pub\/.+', ))
, callback='parse_item'),
)
def parse_item(self, response):
if response:
hxs = HtmlXPathSelector(response)
item = LinkedinItem()
# TODO: update this xpath to include class id
# is this the best way to check that I'm scraping the right page
item['full_name'] = hxs.select('//span/span/text()').extract()
if not item['full_name']:
# recursively parse list of duplicate profiles
# NOTE: Results page only displays 25 of possibly many more names;
# LinkedIn requests authentication to see the rest. Need to resolve
# TODO: add error checking here to ensure I'm getting the right links
multi_profile_urls = hxs.select('//*[@id="result-set"]/li/h2/strong/ \
a/@href').extract()
for profile_url in multi_profile_urls:
yield Request(profile_url, callback=self.parse_item)
else:
# handle cleaning in pipeline
item['first_name'] = item['full_name'][0]
item['last_name'] = item['full_name'][2]
item['full_name'] = hxs.select('//span/span/text()').extract()
item['headline_title'] = hxs.select('//*[@id="member-1"]/p/text() \
').extract()
item['locality'] = hxs.select('//*[@id="headline"]/dd[1]/span/text() \
').extract()
item['industry'] = hxs.select('//*[@id="headline"]/dd[2]/text() \
').extract()
item['current_roles'] = hxs.select('//*[@id="overview"]/dd[1]/ul/li/ \
text()').extract()
# TODO: dynamically check for header of field, assign to object
# via variable
if hxs.select('//*[@id="overview"]/dt[2]/text()\
').extract() == [u' \n Education\n ']:
item['education_institutions'] = hxs.select('//*[@id="overview"]/\
dd[2]/ul/li/text()').extract()
# for debugging
print item
|
Python
| 0
|
@@ -1519,60 +1519,8 @@
m()%0A
- # TODO: update this xpath to include class id%0A
@@ -1585,16 +1585,17 @@
ght page
+?
%0A i
@@ -1619,32 +1619,46 @@
= hxs.select('//
+*%5B@id=%22name%22%5D/
span/span/text()
@@ -1908,16 +1908,60 @@
resolve%0A
+ # Fake account and log-in?%0A %0A
@@ -2030,16 +2030,56 @@
t links%0A
+ # and links from %22next%3E%3E%22 pages%0A
@@ -2347,35 +2347,51 @@
#
-handle cleaning in pipeline
+add meta fields (date crawled/updated, etc)
%0A
@@ -2483,17 +2483,17 @@
_name'%5D%5B
-2
+1
%5D%0A
@@ -2528,16 +2528,30 @@
lect('//
+*%5B@id=%22name%22%5D/
span/spa
|
957e1c2ec602d4ec6aa990cdce4196083f0e5a2d
|
Fix buggy import setup for embedded bot tests.
|
zerver/tests/test_embedded_bot_system.py
|
zerver/tests/test_embedded_bot_system.py
|
# -*- coding: utf-8 -*-
from unittest.mock import patch
from typing import Any, Dict, Tuple, Text, Optional
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import UserProfile, Recipient, get_display_recipient
class TestEmbeddedBotMessaging(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user("othello")
self.bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', self.user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='helloworld')
def test_pm_to_embedded_bot(self):
# type: () -> None
self.send_personal_message(self.user_profile.email, self.bot_profile.email,
content="help")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
display_recipient = get_display_recipient(last_message.recipient)
# The next two lines error on mypy because the display_recipient is of type Union[Text, List[Dict[str, Any]]].
# In this case, we know that display_recipient will be of type List[Dict[str, Any]].
# Otherwise this test will error, which is wanted behavior anyway.
self.assert_length(display_recipient, 1) # type: ignore
self.assertEqual(display_recipient[0]['email'], self.user_profile.email) # type: ignore
def test_stream_message_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="@**{}** foo".format(self.bot_profile.full_name),
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
self.assertEqual(last_message.subject, "bar")
display_recipient = get_display_recipient(last_message.recipient)
self.assertEqual(display_recipient, "Denmark")
def test_stream_message_not_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="foo", topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "foo")
class TestEmbeddedBotFailures(ZulipTestCase):
@patch("logging.error")
def test_invalid_embedded_bot_service(self, logging_error_mock):
# type: (mock.Mock) -> None
user_profile = self.example_user("othello")
bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='nonexistent_service')
mention_bot_message = "@**{}** foo".format(bot_profile.full_name)
self.send_stream_message(user_profile.email, "Denmark",
content=mention_bot_message,
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, mention_bot_message)
|
Python
| 0
|
@@ -35,21 +35,16 @@
test
-.mock
import
patc
@@ -39,21 +39,20 @@
import
-patch
+mock
%0Afrom ty
@@ -2525,16 +2525,21 @@
):%0A @
+mock.
patch(%22l
|
e4aa61b523dc613ff51c6f2192dabfe6ac28edba
|
update tests
|
custom/enikshay/integrations/ninetyninedots/tests/test_integration.py
|
custom/enikshay/integrations/ninetyninedots/tests/test_integration.py
|
from datetime import datetime
import pytz
from django.test import SimpleTestCase, TestCase
from django.utils.dateparse import parse_datetime
from corehq.form_processor.tests.utils import run_with_all_backends, FormProcessorTestUtils
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.ninetyninedots.views import (
validate_adherence_values,
validate_beneficiary_id
)
from custom.enikshay.case_utils import get_open_episode_case_from_person
from custom.enikshay.integrations.ninetyninedots.utils import (
create_adherence_cases,
update_adherence_confidence_level,
update_default_confidence_level,
)
from custom.enikshay.integrations.ninetyninedots.exceptions import AdherenceException
from custom.enikshay.tests.utils import ENikshayCaseStructureMixin
class Receiver99DotsTests(SimpleTestCase):
def test_validate_patient_adherence_data(self):
with self.assertRaises(AdherenceException) as e:
validate_beneficiary_id(None)
self.assertEqual(e.message, "Beneficiary ID is null")
with self.assertRaises(AdherenceException) as e:
validate_adherence_values(u'123')
self.assertEqual(e.message, "Adherences invalid")
class NinetyNineDotsCaseTests(ENikshayCaseStructureMixin, TestCase):
@classmethod
def setUpClass(cls):
super(NinetyNineDotsCaseTests, cls).setUpClass()
FormProcessorTestUtils.delete_all_cases()
def tearDown(self):
FormProcessorTestUtils.delete_all_cases()
@run_with_all_backends
def test_create_adherence_cases(self):
self.create_case_structure()
case_accessor = CaseAccessors(self.domain)
adherence_values = [
{
"timestamp": "2009-03-05T01:00:01-05:00",
"numberFromWhichPatientDialled": "+910123456789",
"sharedNumber": False,
},
{
"timestamp": "2016-03-05T02:00:01-05:00",
"numberFromWhichPatientDialled": "+910123456787",
"sharedNumber": True,
}
]
create_adherence_cases(self.domain, 'person', adherence_values, adherence_source="99DOTS")
potential_adherence_cases = case_accessor.get_reverse_indexed_cases(['episode'])
adherence_cases = [case for case in potential_adherence_cases if case.type == 'adherence']
self.assertEqual(len(adherence_cases), 2)
adherence_times = [case.dynamic_case_properties().get('adherence_date')
for case in adherence_cases]
self.assertItemsEqual(
[parse_datetime(adherence_time) for adherence_time in adherence_times],
[parse_datetime(adherence_value['timestamp']) for adherence_value in adherence_values]
)
for adherence_case in adherence_cases:
self.assertEqual(
adherence_case.dynamic_case_properties().get('adherence_confidence'),
'high'
)
@run_with_all_backends
def test_update_adherence_confidence(self):
self.create_case_structure()
case_accessor = CaseAccessors(self.domain)
adherence_dates = [
datetime(2005, 7, 10),
datetime(2016, 8, 10),
datetime(2016, 8, 11),
]
adherence_cases = self.create_adherence_cases(adherence_dates)
update_adherence_confidence_level(
self.domain,
self.person_id,
datetime(2016, 8, 10, tzinfo=pytz.UTC),
datetime(2016, 8, 11, tzinfo=pytz.UTC),
"new_confidence_level",
)
adherence_case_ids = [adherence_date.strftime("%Y-%m-%d") for adherence_date in adherence_dates]
adherence_cases = {case.case_id: case for case in case_accessor.get_cases(adherence_case_ids)}
self.assertEqual(
adherence_cases[adherence_case_ids[0]].dynamic_case_properties()['adherence_confidence'],
'medium',
)
self.assertEqual(
adherence_cases[adherence_case_ids[1]].dynamic_case_properties()['adherence_confidence'],
'new_confidence_level',
)
self.assertEqual(
adherence_cases[adherence_case_ids[2]].dynamic_case_properties()['adherence_confidence'],
'new_confidence_level',
)
@run_with_all_backends
def test_update_default_confidence_level(self):
self.create_case_structure()
confidence_level = "new_confidence_level"
update_default_confidence_level(self.domain, self.person_id, confidence_level)
episode = get_open_episode_case_from_person(self.domain, self.person_id)
self.assertEqual(episode.dynamic_case_properties().get('default_adherence_confidence'), confidence_level)
|
Python
| 0.000001
|
@@ -88,57 +88,8 @@
ase%0A
-from django.utils.dateparse import parse_datetime
%0A%0Afr
@@ -2050,16 +2050,228 @@
: True,%0A
+ %7D,%0A %7B%0A %22timestamp%22: %222016-03-05T19:00:01-05:00%22, # next day in india%0A %22numberFromWhichPatientDialled%22: %22+910123456787%22,%0A %22sharedNumber%22: True,%0A
@@ -2620,17 +2620,17 @@
cases),
-2
+3
)%0A
@@ -2641,19 +2641,19 @@
herence_
-tim
+dat
es = %5Bca
@@ -2807,78 +2807,23 @@
-%5Bparse_datetime(adherence_time) for adherence_time in adherence_tim
+adherence_dat
es
-%5D
,%0A
@@ -2837,92 +2837,48 @@
%5B
-parse_datetime(adherence_value%5B'timestamp'%5D) for adherence_value in adherence_values
+'2009-03-05', '2016-03-05', '2016-03-06'
%5D%0A
|
58f9acb9a75cceba2be708347dac55f28794553f
|
Fix indentation
|
qiita_pet/handlers/study_handlers/artifact.py
|
qiita_pet/handlers/study_handlers/artifact.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from tornado.web import authenticated
from qiita_db.util import get_files_from_uploads_folders
from qiita_pet.handlers.util import to_int
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.api_proxy import (
artifact_graph_get_req, artifact_types_get_req, data_types_get_req,
ena_ontology_get_req, prep_template_post_req, artifact_post_req,
artifact_status_put_req, artifact_get_req, artifact_delete_req)
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config
class ArtifactGraphAJAX(BaseHandler):
@authenticated
def get(self):
direction = self.get_argument('direction')
artifact = to_int(self.get_argument('artifact_id'))
self.write(artifact_graph_get_req(artifact, direction,
self.current_user.id))
class NewArtifactHandler(BaseHandler):
@authenticated
def get(self, study_id):
prep_files = [f for _, f in get_files_from_uploads_folders(study_id)
if f.endswith(('txt', 'tsv'))]
artifact_types = artifact_types_get_req()['types']
data_types = sorted(data_types_get_req()['data_types'])
ontology = ena_ontology_get_req()
self.render("study_ajax/add_prep_artifact.html", prep_files=prep_files,
artifact_types=artifact_types, data_types=data_types,
ontology=ontology, study_id=study_id)
@authenticated
@execute_as_transaction
def post(self, study_id):
study_id = int(study_id)
name = self.get_argument('name')
data_type = self.get_argument('data-type')
ena_ontology = self.get_argument('ena-ontology', None)
user_ontology = self.get_argument('user-ontology', None)
new_ontology = self.get_argument('new-ontology', None)
artifact_type = self.get_argument('type')
prep_file = self.get_argument('prep-file')
# Remove known columns, leaving just file types and files
files = self.request.arguments
for arg in ['name', 'data-type', 'ena-ontology', 'user-ontology',
'new-ontology', 'type', 'prep-file']:
files.pop(arg, None)
prep = prep_template_post_req(study_id, self.current_user.id,
prep_file, data_type, ena_ontology,
user_ontology, new_ontology)
if prep['status'] == 'error':
self.write(prep)
return
artifact = artifact_post_req(
self.current_user.id, files, artifact_type, name, prep['id'])
if artifact['status'] == 'success' and prep['status'] != 'warning':
self.write({'status': 'success',
'message': 'Artifact created successfully'})
else:
self.write(prep)
class ArtifactAJAX(BaseHandler):
def get(self):
artifact_id = to_int(self.get_argument('artifact_id'))
name = artifact_get_req(self.current_user.id, artifact_id)['name']
self.write(name)
def post(self):
artifact_id = to_int(self.get_argument('artifact_id'))
self.write(artifact_delete_req(artifact_id, self.current_user.id))
class ArtifactAdminAJAX(BaseHandler):
def get(self):
artifact_id = to_int(self.get_argument('artifact_id'))
info = artifact_get_req(self.current_user.id, artifact_id)
status = info['visibility']
buttons = []
btn_base = ('<button onclick="set_admin_visibility(\'%s\', {0})" '
'class="btn btn-primary">%s</button>').format(artifact_id)
if qiita_config.require_approval:
if status == 'sandbox':
# The request approval button only appears if the processed
# data issandboxed and the qiita_config specifies that the
# approval should be requested
buttons.append(
btn_base % ('awaiting_approval', 'Request approval'))
elif self.current_user.level == 'admin' and \
status == 'awaiting_approval':
# The approve processed data button only appears if the user is
# an admin, the processed data is waiting to be approved and
# the qiita config requires processed data approval
buttons.append(btn_base % ('private', 'Approve artifact'))
if status == 'private':
# The make public button only appears if the status is private
buttons.append(btn_base % ('public', 'Make public'))
# The revert to sandbox button only appears if the processed data is
# not sandboxed or public
if status not in {'sandbox', 'public'}:
buttons.append(btn_base % ('sandbox', 'Revert to sandbox'))
# Add EBI and VAMPS submission buttons if allowed
if not info['ebi_run_accessions'] and info['can_submit_ebi']:
buttons.append('<a class="btn btn-primary glyphicon '
'glyphicon-export" href="/ebi_submission/{{ppd_id}}'
'" style="word-spacing: -10px;"> Submit to EBI</a>')
if not info['is_submitted_vamps'] and \
info['can_submit_vamps']:
buttons.append('<a class="btn btn-primary glyphicon '
'glyphicon-export" href="/vamps/{{ppd_id}}" '
'style="word-spacing: -10px;"> Submit to VAMPS</a>')
# Add delete button if in sandbox status
if status == 'sandbox':
buttons = ['<button class="btn btn-danger" '
'onclick="delete_artifact(%d)">Delete Artifact</button>'
% (artifact_id)]
self.write(' '.join(buttons))
def post(self):
visibility = self.get_argument('visibility')
artifact_id = int(self.get_argument('artifact_id'))
response = artifact_status_put_req(artifact_id, self.current_user.id,
visibility)
self.write(response)
|
Python
| 0.017244
|
@@ -3097,36 +3097,32 @@
g':%0A
-
self.write(%7B'sta
@@ -3138,20 +3138,16 @@
ccess',%0A
-
|
65751b4243b3baeb47af36a68a167394cb7d292e
|
fix for python 2.7 not liking yield from
|
Python/Product/TestAdapter/testlauncher.py
|
Python/Product/TestAdapter/testlauncher.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import io
import os
import sys
import traceback
def main():
cwd, testRunner, secret, port, debugger_search_path, mixed_mode, coverage_file, test_file, args = parse_argv()
sys.path[0] = os.getcwd()
os.chdir(cwd)
load_debugger(secret, port, debugger_search_path, mixed_mode)
run(testRunner, coverage_file, test_file, args)
def parse_argv():
"""Parses arguments for use with the test launcher.
Arguments are:
1. Working directory.
2. Test runner, `pytest` or `nose`
3. debugSecret
4. debugPort
5. Debugger search path
6. Mixed-mode debugging (non-empty string to enable, empty string to disable)
7. Enable code coverage and specify filename
8. TestFile, with a list of testIds to run
9. Rest of the arguments are passed into the test runner.
"""
return (sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9:])
def load_debugger(secret, port, debugger_search_path, mixed_mode):
# Load the debugger package
try:
if debugger_search_path:
sys.path.append(debugger_search_path)
if secret and port:
# Start tests with legacy debugger
import ptvsd
from ptvsd.debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code
from ptvsd import enable_attach, wait_for_attach
DEBUG_ENTRYPOINTS.add(get_code(main))
enable_attach(secret, ('127.0.0.1', port), redirect_output = True)
wait_for_attach()
elif port:
# Start tests with new debugger
from ptvsd import enable_attach, wait_for_attach
enable_attach(('127.0.0.1', port), redirect_output = True)
wait_for_attach()
elif mixed_mode:
# For mixed-mode attach, there's no ptvsd and hence no wait_for_attach(),
# so we have to use Win32 API in a loop to do the same thing.
from time import sleep
from ctypes import windll, c_char
while True:
if windll.kernel32.IsDebuggerPresent() != 0:
break
sleep(0.1)
try:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x86.dll']
except WindowsError:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x64.dll']
isTracing = c_char.in_dll(debugger_helper, "isTracing")
while True:
if isTracing.value != 0:
break
sleep(0.1)
except:
traceback.print_exc()
print('''
Internal error detected. Please copy the above traceback and report at
https://github.com/Microsoft/vscode-python/issues/new
Press Enter to close. . .''')
try:
raw_input()
except NameError:
input()
sys.exit(1)
def run(testRunner, coverage_file, test_file, args):
"""Runs the test
testRunner -- test runner to be used `pytest` or `nose`
args -- arguments passed into the test runner
"""
if test_file and os.path.exists(test_file):
with io.open(test_file, 'r', encoding='utf-8') as tests:
args.extend(t.strip() for t in tests)
cov = None
try:
if coverage_file:
try:
import coverage
cov = coverage.coverage(coverage_file)
cov.load()
cov.start()
except:
pass
if testRunner == 'pytest':
import pytest
patch_translate_non_printable()
_plugin = TestCollector()
pytest.main(args, [_plugin])
else:
import nose
nose.run(argv=args)
sys.exit(0)
finally:
pass
if cov is not None:
cov.stop()
cov.save()
cov.xml_report(outfile = coverage_file + '.xml', omit=__file__)
#note: this must match adapter\pytest\_discovery.py
def patch_translate_non_printable():
import _pytest.compat
translate_non_printable = getattr(_pytest.compat, "_translate_non_printable")
if translate_non_printable:
def _translate_non_printable_patched(s):
s = translate_non_printable(s)
s = s.replace(':', '/:') # pytest testcase not found error and VS TestExplorer FQN parsing
s = s.replace('.', '_') # VS TestExplorer FQN parsing
s = s.replace('\n', '/n') # pytest testcase not found error
s = s.replace('\\', '/') # pytest testcase not found error, fixes cases (actual backslash followed by n)
s = s.replace('\r', '/r') # pytest testcase not found error
return s
_pytest.compat._translate_non_printable = _translate_non_printable_patched
else:
print("ERROR: failed to patch pytest, _pytest.compat._translate_non_printable")
class TestCollector(object):
"""This is a pytest plugin that prevents notfound errors from ending execution of tests."""
def __init__(self, tests=None):
pass
#Pytest Hook
def pytest_collectstart(self, collector):
self.patch_collect_test_notfound(collector)
def patch_collect_test_notfound(self, collector):
originalCollect = getattr(collector, "collect")
if not originalCollect:
print("ERROR: failed to patch pytest, collector.collect")
pass
# Fix for RunAll in VS, when a single parameterized test isn't found
# Wrap the actual collect() call and clear any _notfound errors to prevent exceptions which skips remaining tests to run
# We still print the same errors to the user
def collectwapper():
yield from originalCollect()
notfound = getattr(collector, '_notfound', [])
if notfound:
for arg, exc in notfound:
line = "(no name {!r} in any of {!r})".format(arg, exc.args[0])
print("ERROR: not found: {}\n{}".format(arg, line))
#clear errors
collector._notfound = []
collector.collect = collectwapper
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -5887,18 +5887,19 @@
-yield from
+for item in
ori
@@ -5912,16 +5912,56 @@
ollect()
+:%0A yield item%0A
%0A
|
f379d8ce256159a4fc7ce58abf87c609a4a0c3ab
|
rename present() _present(), indicating private
|
AlphaTwirl/EventReader/ProgressMonitor.py
|
AlphaTwirl/EventReader/ProgressMonitor.py
|
# Tai Sakuma <sakuma@fnal.gov>
import multiprocessing
import time
from ProgressReport import ProgressReport
##____________________________________________________________________________||
class ProgressReporter(object):
def __init__(self, queue, pernevents = 1000):
self.queue = queue
self.pernevents = pernevents
self.lastReportTime = time.time()
def report(self, event, component):
if not self.needToReport(event, component): return
done = event.iEvent + 1
report = ProgressReport(name = component.name, done = done, total = event.nEvents)
self.queue.put(report)
self.lastReportTime = time.time()
def needToReport(self, event, component):
iEvent = event.iEvent + 1 # add 1 because event.iEvent starts from 0
if time.time() - self.lastReportTime > 0.02: return True
if iEvent % self.pernevents == 0: return True
if iEvent == event.nEvents: return True
return False
##____________________________________________________________________________||
class Queue(object):
def __init__(self, presentation):
self.presentation = presentation
def put(self, report):
self.presentation.present(report)
##____________________________________________________________________________||
class ProgressMonitor(object):
def __init__(self, presentation):
self.queue = Queue(presentation = presentation)
def monitor(self): pass
def createReporter(self):
reporter = ProgressReporter(self.queue)
return reporter
##____________________________________________________________________________||
class MPProgressMonitor(object):
def __init__(self, presentation):
self.queue = multiprocessing.Queue()
self._presentation = presentation
self.lastTime = time.time()
def monitor(self):
if time.time() - self.lastTime < 0.1: return
self.lastTime = time.time()
self.present()
def last(self):
self.present()
def present(self):
while not self.queue.empty():
report = self.queue.get()
self._presentation.present(report)
def createReporter(self):
return ProgressReporter(self.queue)
##____________________________________________________________________________||
|
Python
| 0.003887
|
@@ -1966,32 +1966,33 @@
()%0A self.
+_
present()%0A%0A d
@@ -2015,24 +2015,25 @@
self.
+_
present()%0A%0A
@@ -2039,16 +2039,17 @@
def
+_
present(
|
afa8fc0e5d9b38e3e65ff70ef5375dad881272ff
|
Use C3DFileAdapter read extension in python to select different ForceLocations for expressing force plate data in read-in forces table.
|
Bindings/Python/tests/test_DataAdapter.py
|
Bindings/Python/tests/test_DataAdapter.py
|
"""
Test DataAdapter interface.
"""
import os, unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
class TestDataAdapter(unittest.TestCase):
def test_TRCFileAdapter(self):
adapter = osim.TRCFileAdapter()
table = adapter.read(os.path.join(test_dir,
'futureOrientationInverseKinematics.trc'))
assert table.getNumRows() == 1202
assert table.getNumColumns() == 2
table = adapter.read(os.path.join(test_dir, 'dataWithNaNsOfDifferentCases.trc'))
assert table.getNumRows() == 5
assert table.getNumColumns() == 14
def test_STOFileAdapter(self):
adapter = osim.STOFileAdapter()
table = adapter.read(os.path.join(test_dir, 'subject02_grf_HiFreq.mot'))
assert table.getNumRows() == 439
assert table.getNumColumns() == 18
table = adapter.read(os.path.join(test_dir,
'std_subject01_walk1_ik.mot'))
assert table.getNumRows() == 73
assert table.getNumColumns() == 23
def test_C3DFileAdapter(self):
try:
adapter = osim.C3DFileAdapter()
except AttributeError:
# C3D support not available. OpenSim was not compiled with BTK.
return
tables = adapter.read(os.path.join(test_dir, 'walking2.c3d'))
markers = tables['markers']
forces = tables['forces']
assert markers.getNumRows() == 1249
assert markers.getNumColumns() == 44
assert forces.getNumRows() == 9992
assert forces.getNumColumns() == 6
tables = adapter.read(os.path.join(test_dir, 'walking5.c3d'))
# Marker data read from C3D.
markers = tables['markers']
assert markers.getNumRows() == 1103
assert markers.getNumColumns() == 40
assert markers.getTableMetaDataString('DataRate') == '250.000000'
assert markers.getTableMetaDataString('Units') == 'mm'
# Flatten marker data.
markersFlat = markers.flatten()
assert markersFlat.getNumRows() == 1103
assert markersFlat.getNumColumns() == 40 * 3
# Make sure flattenned marker data is writable/readable to/from file.
markersFilename = 'markers.sto'
stoAdapter = osim.STOFileAdapter()
stoAdapter.write(markersFlat, markersFilename)
markersDouble = stoAdapter.read(markersFilename)
assert markersDouble.getNumRows() == 1103
assert markersDouble.getNumColumns() == 40 * 3
# Forces data read from C3d.
forces = tables['forces']
assert forces.getNumRows() == 8824
assert forces.getNumColumns() == 6
assert forces.getTableMetaDataString('DataRate') == '2000.000000'
assert forces.getTableMetaDataVectorUnsigned('Types') == (2, 2)
fpCalMats = forces.getTableMetaDataVectorMatrix("CalibrationMatrices")
assert len(fpCalMats) == 2
assert fpCalMats[0].nrow() == 6
assert fpCalMats[0].ncol() == 6
assert fpCalMats[1].nrow() == 6
assert fpCalMats[1].ncol() == 6
fpCorners = forces.getTableMetaDataVectorMatrix("Corners")
assert len(fpCorners) == 2
assert fpCorners[0].nrow() == 3
assert fpCorners[0].ncol() == 4
assert fpCorners[1].nrow() == 3
assert fpCorners[1].ncol() == 4
fpOrigins = forces.getTableMetaDataVectorMatrix("Origins")
assert len(fpOrigins) == 2
assert fpOrigins[0].nrow() == 3
assert fpOrigins[0].ncol() == 1
assert fpOrigins[1].nrow() == 3
assert fpOrigins[1].ncol() == 1
assert forces.getDependentsMetaDataString('units') == ('N', 'mm', 'Nmm',
'N', 'mm', 'Nmm')
# Flatten forces data.
forcesFlat = forces.flatten()
assert forcesFlat.getNumRows() == 8824
assert forcesFlat.getNumColumns() == 6 * 3
# Make sure flattenned forces data is writable/readable to/from file.
forcesFilename = 'forces.sto'
stoAdapter.write(forcesFlat, forcesFilename)
forcesDouble = stoAdapter.read(forcesFilename)
assert forcesDouble.getNumRows() == 8824
assert forcesDouble.getNumColumns() == 6 * 3
# Clean up.
os.remove(markersFilename)
os.remove(forcesFilename)
|
Python
| 0
|
@@ -1428,16 +1428,19 @@
g2.c3d')
+, 0
)%0A
@@ -1764,16 +1764,19 @@
g5.c3d')
+, 1
)%0A%0A
|
68fe7ecadeda267b5645fd804bb7bbf29afa3667
|
add docstring
|
corehq/apps/cleanup/management/commands/delete_es_docs_in_domain.py
|
corehq/apps/cleanup/management/commands/delete_es_docs_in_domain.py
|
from django.core.management import BaseCommand, CommandError
from corehq.apps.domain.models import Domain
from corehq.apps.es import AppES, CaseES, CaseSearchES, FormES, GroupES, UserES
from corehq.apps.es.registry import registry_entry
from corehq.apps.es.transient_util import doc_adapter_from_info
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
domain_obj = Domain.get_by_name(domain)
if domain_obj and not domain_obj.doc_type.endswith('-Deleted'):
raise CommandError(
f"{domain} has not been deleted. This command is intended for use on deleted domains only."
)
for hqESQuery in [AppES, CaseES, CaseSearchES, FormES, GroupES, UserES]:
doc_ids = hqESQuery().domain(domain).source(['_id']).run().hits
doc_ids = [doc['_id'] for doc in doc_ids]
if not doc_ids:
continue
adapter = doc_adapter_from_info(registry_entry(hqESQuery.index))
adapter.bulk_delete(doc_ids)
|
Python
| 0.000005
|
@@ -324,16 +324,141 @@
ommand):
+%0A %22%22%22%0A Intended for use in the event that a domain has been deleted, but ES docs have not been fully cleaned up%0A %22%22%22
%0A%0A de
|
50e729173ba49f0c95ae98266caca16b119f481a
|
add 3.5.0 (#4375)
|
var/spack/repos/builtin/packages/arpack-ng/package.py
|
var/spack/repos/builtin/packages/arpack-ng/package.py
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ArpackNg(Package):
"""ARPACK-NG is a collection of Fortran77 subroutines designed to solve large
scale eigenvalue problems.
Important Features:
* Reverse Communication Interface.
* Single and Double Precision Real Arithmetic Versions for Symmetric,
Non-symmetric, Standard or Generalized Problems.
* Single and Double Precision Complex Arithmetic Versions for Standard or
Generalized Problems.
* Routines for Banded Matrices - Standard or Generalized Problems.
* Routines for The Singular Value Decomposition.
* Example driver routines that may be used as templates to implement
numerous Shift-Invert strategies for all problem types, data types and
precision.
This project is a joint project between Debian, Octave and Scilab in order
to provide a common and maintained version of arpack.
Indeed, no single release has been published by Rice university for the
last few years and since many software (Octave, Scilab, R, Matlab...)
forked it and implemented their own modifications, arpack-ng aims to tackle
this by providing a common repository and maintained versions.
arpack-ng is replacing arpack almost everywhere.
"""
homepage = 'https://github.com/opencollab/arpack-ng'
url = 'https://github.com/opencollab/arpack-ng/archive/3.3.0.tar.gz'
version('3.4.0', 'ae9ca13f2143a7ea280cb0e2fd4bfae4')
version('3.3.0', 'ed3648a23f0a868a43ef44c97a21bad5')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('mpi', default=True, description='Activates MPI support')
# The function pdlamch10 does not set the return variable.
# This is fixed upstream
# see https://github.com/opencollab/arpack-ng/issues/34
patch('pdlamch10.patch', when='@3.3.0')
patch('make_install.patch', when='@3.4.0')
patch('parpack_cmake.patch', when='@3.4.0')
depends_on('blas')
depends_on('lapack')
depends_on('automake', when='@3.3.0', type='build')
depends_on('autoconf', when='@3.3.0', type='build')
depends_on('libtool@2.4.2:', when='@3.3.0', type='build')
depends_on('cmake@2.8.6:', when='@3.4.0:', type='build')
depends_on('mpi', when='+mpi')
@property
def libs(self):
# TODO: do we need spec['arpack-ng:parallel'].libs ?
# query_parameters = self.spec.last_query.extra_parameters
libraries = ['libarpack']
if '+mpi' in self.spec:
libraries = ['libparpack'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recurse=True
)
@when('@3.4.0:')
def install(self, spec, prefix):
options = ['-DEXAMPLES=ON']
options.extend(std_cmake_args)
options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)
# Make sure we use Spack's blas/lapack:
lapack_libs = spec['lapack'].libs.joined(';')
blas_libs = spec['blas'].libs.joined(';')
options.extend([
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS={0}'.format(spec['lapack'].prefix.include),
'-DLAPACK_LIBRARIES={0}'.format(lapack_libs),
'-DBLAS_FOUND=true',
'-DBLAS_INCLUDE_DIRS={0}'.format(spec['blas'].prefix.include),
'-DBLAS_LIBRARIES={0}'.format(blas_libs)
])
if '+mpi' in spec:
options.append('-DMPI=ON')
# TODO: -DINTERFACE64=ON
if '+shared' in spec:
options.append('-DBUILD_SHARED_LIBS=ON')
cmake('.', *options)
make()
if self.run_tests:
make('test')
make('install')
@when('@3.3.0') # noqa
def install(self, spec, prefix):
# Apparently autotools are not bootstrapped
which('libtoolize')()
bootstrap = Executable('./bootstrap')
options = ['--prefix=%s' % prefix]
if '+mpi' in spec:
options.extend([
'--enable-mpi',
'F77=%s' % spec['mpi'].mpif77
])
options.extend([
'--with-blas={0}'.format(spec['blas'].libs.ld_flags),
'--with-lapack={0}'.format(spec['lapack'].libs.ld_flags)
])
if '+shared' not in spec:
options.append('--enable-shared=no')
bootstrap()
configure(*options)
make()
if self.run_tests:
make('check')
make('install')
|
Python
| 0.000001
|
@@ -2596,16 +2596,73 @@
ar.gz'%0A%0A
+ version('3.5.0', '9762c9ae6d739a9e040f8201b1578874')%0A
vers
|
bff3a087ec70ab07fe163394826a41c33f6bc38f
|
Add extra version of py-jinja2 (#14989)
|
var/spack/repos/builtin/packages/py-jinja2/package.py
|
var/spack/repos/builtin/packages/py-jinja2/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
url = "https://pypi.io/packages/source/J/Jinja2/Jinja2-2.10.3.tar.gz"
import_modules = ['jinja2']
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
|
Python
| 0
|
@@ -695,16 +695,113 @@
527de')%0A
+ version('2.10.1', sha256='065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013')%0A
vers
|
45390a88b94aad6d7f04424bf8e1e9b8bbbe424b
|
Test new helper get_schema_or_template.
|
boardinghouse/tests/test_schema_creation.py
|
boardinghouse/tests/test_schema_creation.py
|
from django.test import TestCase
from django.db import connection
from django import forms
from ..models import Schema, template_schema
from ..schema import get_schema, activate_schema, deactivate_schema
SCHEMA_QUERY = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %s"
TABLE_QUERY = "SELECT table_name FROM information_schema.tables WHERE table_schema = %s AND table_name = %s"
class TestPostgresSchemaCreation(TestCase):
def test_schema_table_is_in_public(self):
deactivate_schema()
cursor = connection.cursor()
table_name = Schema._meta.db_table
cursor.execute(TABLE_QUERY, ['public', table_name])
data = cursor.fetchone()
self.assertEquals((table_name,), data)
cursor.close()
def test_template_schema_is_created(self):
cursor = connection.cursor()
cursor.execute(SCHEMA_QUERY, ['__template__'])
data = cursor.fetchone()
self.assertEquals(('__template__',), data)
cursor.close()
def test_schema_object_creation_creates_schema(self):
Schema.objects.create(name="Test Schema", schema="test_schema")
cursor = connection.cursor()
cursor.execute(SCHEMA_QUERY, ['test_schema'])
data = cursor.fetchone()
self.assertEquals(('test_schema',), data)
cursor.close()
def test_schema_object_creation_does_not_leak_between_tests(self):
cursor = connection.cursor()
cursor.execute(SCHEMA_QUERY, ['test_schema'])
data = cursor.fetchone()
self.assertEquals(None, data)
cursor.close()
def test_schema_creation_clones_template(self):
template_schema.activate()
cursor = connection.cursor()
cursor.execute("CREATE TABLE foo (id SERIAL NOT NULL PRIMARY KEY)")
dup = Schema.objects.create(name="Duplicate", schema='duplicate')
cursor.execute(TABLE_QUERY, ['duplicate', 'foo'])
data = cursor.fetchone()
self.assertEquals(('foo',), data)
cursor.close()
def test_bulk_create_creates_schemata(self):
schemata = ['first', 'second', 'third']
created = Schema.objects.bulk_create([
Schema(name=x, schema=x) for x in schemata
])
cursor = connection.cursor()
for schema in schemata:
activate_schema(schema)
cursor.execute(SCHEMA_QUERY, [schema])
data = cursor.fetchone()
self.assertEquals((schema,), data)
cursor.close()
def test_mass_create(self):
Schema.objects.mass_create('a','b','c')
self.assertEquals(
['a','b','c'],
list(Schema.objects.values_list('schema', flat=True))
)
class TestSchemaClassValidationLogic(TestCase):
def test_ensure_schema_model_is_not_schema_aware(self):
self.assertFalse(Schema._is_schema_aware)
self.assertFalse(Schema()._is_schema_aware)
def test_schema_schema_validation_rejects_invalid_chars(self):
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='_foo', name="1")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='-foo', name="2")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='a'*37, name="3")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='foo1', name="4")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='Foo', name="5")
def test_schema_validation_allows_valid_chars(self):
Schema.objects.create(schema='foo', name="Foo 1")
Schema.objects.create(schema='a'*36, name="Foo 2")
Schema.objects.create(schema='foo_bar', name="Foo 3")
def test_schema_rejects_duplicate_values(self):
Schema.objects.create(schema='foo', name="Foo")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='foo_bar', name="Foo")
self.assertRaises(forms.ValidationError, Schema.objects.create, schema='foo', name="Foo 2")
def test_schema_rejects_schema_change(self):
schema = Schema.objects.create(schema='foo', name="Foo")
schema.name = "Bar"
schema.save()
schema.schema = 'bar'
self.assertRaises(forms.ValidationError, schema.save)
class TestGetSearchPath(TestCase):
def test_default_search_path(self):
self.assertEquals(None, get_schema())
def test_activate_schema_sets_search_path(self):
schema = Schema.objects.create(name='a', schema='a')
schema.activate()
self.assertEquals(schema, get_schema())
def test_deactivate_schema_resets_search_path(self):
schema = Schema.objects.create(name='a', schema='a')
schema.activate()
schema.deactivate()
self.assertEquals(None, get_schema())
|
Python
| 0
|
@@ -151,16 +151,22 @@
import
+(%0A
get_sche
@@ -168,16 +168,40 @@
_schema,
+ get_schema_or_template,
activat
@@ -227,16 +227,18 @@
e_schema
+%0A)
%0A%0ASCHEMA
@@ -4869,28 +4869,409 @@
rtEquals(None, get_schema())
+%0A %0A def test_get_schema_or_template_helper(self):%0A schema = Schema.objects.create(name='a', schema='a')%0A self.assertEquals('__template__', get_schema_or_template())%0A %0A schema.activate()%0A self.assertEquals('a', get_schema_or_template())%0A %0A schema.deactivate()%0A self.assertEquals('__template__', get_schema_or_template())
|
1fc208322381438a9bbcb9debcdc0f53a5c52932
|
Create W_layer_1 and b_layer_1 with shape
|
TFBoost/TFModels.py
|
TFBoost/TFModels.py
|
"""
Author: @gabvaztor
StartDate: 04/03/2017
This file contains samples and overrides deep learning algorithms.
"""
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# IMPORTS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
'''LOCAL IMPORTS
'''
import UsefulTools.UtilsFunctions as uf
from TFBoost.TFEncoder import Dictionary as dict
''' TensorFlow: https://www.tensorflow.org/
To upgrade TensorFlow to last version:
*CPU: pip3 install --upgrade tensorflow
*GPU: pip3 install --upgrade tensorflow-gpu
'''
import tensorflow as tf
print("TensorFlow: " + tf.__version__)
''' Numpy is an extension to the Python programming language, adding support for large,
multi-dimensional arrays and matrices, along with a large library of high-level
mathematical functions to operate on these arrays.
It is mandatory to install 'Numpy+MKL' before scipy.
Install 'Numpy+MKL' from here: http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
http://www.numpy.org/
https://en.wikipedia.org/wiki/NumPy '''
import numpy as np
'''
# You need to install the 64bit version of Scipy, at least on Windows.
# It is mandatory to install 'Numpy+MKL' before scipy.
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
# We can find scipi in the url: http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy'''
import scipy.io as sio
''' Matlab URL: http://matplotlib.org/users/installing.html'''
import matplotlib.pyplot as plt
''' TFLearn library. License MIT.
Git Clone : https://github.com/tflearn/tflearn.git
To install: pip install tflearn'''
import tflearn
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ---- GLOBAL VARIABLES ----
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
"""
class Models():
# TODO Docs
"""
This class
"""
# TODO Implement deep learning algorithms
# TODO Use tflearn to use basics algorithms
def lineal_model_basic_with_gradient_descent(self, input, test, input_labels, test_labels,number_of_inputs,number_of_classes,
learning_rate = 0.001,trains = 100, type = None ,validation = None,
validation_labels = None, deviation = None):
"""
This method doesn't do softmax.
:param input: Input data
:param validation: Validation data
:param test: Test data
:param type: Type of data (float32, float16, ...)
:param trains: Number of trains for epoch
:param number_of_inputs: Represents the number of records in input data
:param number_of_classes: Represents the number of labels in data
:param deviation: Number of the deviation for the weights and bias
:return:
"""
# TODO Do general
x = tf.placeholder(shape=[None,number_of_classes])
y_ = tf.placeholder([None, number_of_classes])
W = tf.Variable(tf.zeros([number_of_inputs, number_of_classes]))
b = tf.Variable(tf.zeros([number_of_classes]))
y = tf.matmul(x, W) + b
cross_entropy_lineal = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y,y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = cross_entropy_lineal.minimize(cross_entropy_lineal)
# TODO Error
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# TODO Train for epoch and training number
for i in range(trains):
pass
def convolution_model(self, input, test, input_labels, test_labels,number_of_inputs,number_of_classes,
learning_rate = 0.001,trains = 100, type = None ,validation = None,
validation_labels = None, deviation = None):
"""
:return:
"""
x = tf.placeholder(shape=[None,number_of_classes])
y_ = tf.placeholder([None, number_of_classes])
# TODO Create an simple but generic convolutional model to analyce sets.
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
|
Python
| 0.000074
|
@@ -4224,32 +4224,205 @@
urn:%0A %22%22%22
+%0A # TODO Create an simple but generic convolutional model to analyce sets.%0A # TODO Define firstLabelNeurons%0A firstLabelNeurons = 8 # First label neurons
%0A%0A x = tf
@@ -4522,18 +4522,16 @@
sses%5D)%0A%0A
-%0A%0A
@@ -4541,73 +4541,138 @@
ODO
-Create an simple but generic convolutional model to analyce sets.
+Define shape%0A W_layer_1 = self.weight_variable(%5B5, 5, 1, firstLabelNeurons%5D)%0A b_layer_1 = self.bias_variable(%5B32%5D)%0A%0A
%0A%0A
@@ -4822,16 +4822,21 @@
ariable(
+self,
shape):%0A
|
2b8716f5a1f0e1f147b6bbda3e45e4abec59811d
|
fix TB in indexing debug toolbar
|
abilian/services/indexing/debug_toolbar.py
|
abilian/services/indexing/debug_toolbar.py
|
# coding=utf-8
"""
"""
from __future__ import absolute_import
from flask import current_app
from flask_debugtoolbar.panels import DebugPanel
from abilian.core.util import fqcn
from abilian.i18n import _
from abilian.web.action import actions
class IndexedTermsDebugPanel(DebugPanel):
"""
A panel to display term values found in index for "current" object
FIXME: this notion of "current" object should formalized in
abilian.app.Application
"""
name = 'IndexedTerms'
@property
def current_obj(self):
return actions.context.get('object')
@property
def has_content(self):
obj = self.current_obj
return (obj is not None
and hasattr(obj, 'object_type')
and hasattr(obj, 'id')
and obj.id is not None)
def nav_title(self):
return _('Indexed Terms')
def nav_subtitle(self):
"""Subtitle showing until title in toolbar"""
obj = self.current_obj
if not obj:
return _(u'No current object')
try:
return u'{}(id={})'.format(obj.__class__.__name__, obj.id)
except:
return u''
def title(self):
return _('Indexed Terms')
def url(self):
return ''
def content(self):
obj = self.current_obj
svc = current_app.services['indexing']
index = svc.app_state.indexes['default']
schema = index.schema
context = self.context.copy()
context['schema'] = schema
context['sorted_fields'] = sorted(schema.names())
adapter = svc.adapted.get(fqcn(obj.__class__))
if adapter and adapter.indexable:
doc = context['current_document'] = svc.get_document(obj, adapter)
indexed = {}
for name, field in schema.items():
value = doc.get(name)
indexed[name] = None
if value and field.format:
indexed[name] = list(field.process_text(value))
context['current_indexed'] = indexed
context['current_keys'] = sorted(set(doc) | set(indexed))
with index.searcher() as search:
document = search.document(object_key=obj.object_key)
sorted_keys = sorted(document) if document is not None else None
context.update({
'document': document,
'sorted_keys': sorted_keys,
})
jinja_env = current_app.jinja_env
jinja_env.filters.update(self.jinja_env.filters)
template = jinja_env.get_or_select_template(
'debug_panels/indexing_panel.html'
)
return template.render(context)
|
Python
| 0
|
@@ -1748,16 +1748,35 @@
lue and
+field.analyzer and
field.fo
|
9e6c05a4f5e460d13558d99de3e97e1c31af2d2e
|
fix bug where only the first cookie gets sent. closes #1.
|
livetest/__init__.py
|
livetest/__init__.py
|
"""
LiveTest - Like WebTest, but on a live site.
Setup an app to test against with just a hostname:
>>> import livetest
>>> app = livetest.TestApp('www.google.com')
Make requests just like WebTest:
>>> resp = app.get('/')
Grab forms:
>>> resp.forms # doctest: +ELLIPSIS
{0: <webtest.Form object at 0x...>}
>>> form = resp.forms[0]
>>> form.fields # doctest: +ELLIPSIS
{'btnI': [<webtest.Submit object at 0x...>], 'btnG': [<webtest.Submit object at 0x...>], 'q': [<webtest.Text object at 0x...>], 'source': [<webtest.Hidden object at 0x...>], 'hl': [<webtest.Hidden object at 0x...>], 'ie': [<webtest.Hidden object at 0x...>]}
Submit forms:
>>> form['q'] = 'python testing'
>>> resp = form.submit()
Test stuff in the response:
>>> resp.mustcontain('Agile', 'unittest', 'PyUnit')
>>> resp.status
'200 OK'
"""
__author__ = 'scott@crookedmedia.com'
import sys
import webtest
import httplib
import urlparse
from webtest import BaseCookie, CookieError
conn_classes = {'http': httplib.HTTPConnection,
'https': httplib.HTTPSConnection}
class TestApp(webtest.TestApp):
def _load_conn(self, scheme):
if scheme in conn_classes:
self.conn[scheme] = conn_classes[scheme](self.host)
else:
raise ValueError("Scheme '%s' is not supported." % scheme)
def __init__(self, host, scheme='http'):
self.host = host
self.conn = {}
self._load_conn(scheme)
self.extra_environ = {}
self.reset()
def _do_httplib_request(self, req):
"Convert WebOb Request to httplib request."
headers = dict((name, val) for name, val in req.headers.iteritems()
if name != 'Host')
if req.scheme not in self.conn:
self._load_conn(req.scheme)
conn = self.conn[req.scheme]
conn.request(req.method, req.path_qs, req.body, headers)
webresp = conn.getresponse()
res = webtest.TestResponse()
res.status = '%s %s' % (webresp.status, webresp.reason)
res.body = webresp.read()
res.headerlist = webresp.getheaders()
res.errors = ''
return res
def do_request(self, req, status, expect_errors):
"""
Override webtest.TestApp's method so that we do real HTTP requests
instead of WSGI calls.
"""
headers = {}
if self.cookies:
c = BaseCookie()
for name, value in self.cookies.items():
c[name] = value
req.headers['Cookie'] = str(c).split(': ', 1)[1]
res = self._do_httplib_request(req)
# Set these attributes for consistency with webtest.
res.request = req
res.test_app = self
if not expect_errors:
self._check_status(res.status_int, res)
self._check_errors(res)
res.cookies_set = {}
for header in res.headers.getall('set-cookie'):
try:
c = BaseCookie(header)
except CookieError, e:
raise CookieError(
"Could not parse cookie header %r: %s" % (header, e))
for key, morsel in c.items():
self.cookies[key] = morsel.value
res.cookies_set[key] = morsel.value
return res
def goto(self, href, method='get', **args):
"""
Monkeypatch the TestResponse.goto method so that it doesn't wipe out the
scheme and host.
"""
scheme, host, path, query, fragment = urlparse.urlsplit(href)
# We
fragment = ''
href = urlparse.urlunsplit((scheme, host, path, query, fragment))
href = urlparse.urljoin(self.request.url, href)
method = method.lower()
assert method in ('get', 'post'), (
'Only "get" or "post" are allowed for method (you gave %r)'
% method)
if method == 'get':
method = self.test_app.get
else:
method = self.test_app.post
return method(href, **args)
webtest.TestResponse.goto = goto
|
Python
| 0
|
@@ -2499,56 +2499,111 @@
-req.headers%5B'Cookie'%5D = str(c).split(': ', 1)%5B1%5D
+hc = '; '.join(%5B'='.join(%5Bm.key, m.value%5D) for m in c.values()%5D)%0A req.headers%5B'Cookie'%5D = hc
%0A%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.