diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/spinoff/actor/remoting.py b/spinoff/actor/remoting.py
index <HASH>..<HASH> 100644
--- a/spinoff/actor/remoting.py
+++ b/spinoff/actor/remoting.py
@@ -184,11 +184,11 @@ class Hub(Logging):
conn.state = 'reverse-radiosilence' if msg == PING else 'visible'
if prevstate != conn.state:
self.dbg("%s went %s => %s" % (sender_addr, prevstate, conn.state))
- if prevstate != 'visible' and conn.state == 'visible':
- while conn.queue:
- (ref, queued_msg), _ = conn.queue.popleft()
- assert ref.uri.root.url == sender_addr
- self.outgoing.sendMsg((sender_addr, dumps((ref.uri.path, queued_msg), protocol=2)))
+ if conn.state == 'visible':
+ while conn.queue:
+ (ref, queued_msg), _ = conn.queue.popleft()
+ assert ref.uri.root.url == sender_addr
+ self.outgoing.sendMsg((sender_addr, dumps((ref.uri.path, queued_msg), protocol=2)))
def _connect(self, addr, conn):
assert _valid_addr(addr)
|
Simplification/optimisation in Hub._got_message
|
py
|
diff --git a/lambda_uploader/uploader.py b/lambda_uploader/uploader.py
index <HASH>..<HASH> 100644
--- a/lambda_uploader/uploader.py
+++ b/lambda_uploader/uploader.py
@@ -33,7 +33,6 @@ def upload_package(pkg, config):
existing_function = False
LOG.debug("function not found creating new function")
- response = ''
if existing_function:
LOG.debug('running update_function_code')
response = client.update_function_code(
@@ -41,6 +40,17 @@ def upload_package(pkg, config):
ZipFile=zip_file,
Publish=config.publish,
)
+ LOG.debug("AWS update_function_code response: %s" % response)
+ LOG.debug('running update_function_configuration')
+ response = client.update_function_configuration(
+ FunctionName=config.name,
+ Handler=config.handler,
+ Role=config.role,
+ Description=config.description,
+ Timeout=config.timeout,
+ MemorySize=config.memory,
+ )
+ LOG.debug("AWS update_function_configuration response: %s" % response)
else:
LOG.debug('running create_function_code')
response = client.create_function(
@@ -54,5 +64,5 @@ def upload_package(pkg, config):
MemorySize=config.memory,
Publish=config.publish,
)
+ LOG.debug("AWS create_function response: %s" % response)
- LOG.debug("AWS create_function response: %s" % response)
|
Fixed issue with updating lambda configurations - When updating existing lambda functions, only code was updated. Changed to call `update-function-configuration` and update the configuration.
|
py
|
diff --git a/pebble/pebble.py b/pebble/pebble.py
index <HASH>..<HASH> 100644
--- a/pebble/pebble.py
+++ b/pebble/pebble.py
@@ -17,7 +17,13 @@
from uuid import uuid4
+from inspect import isclass
+from itertools import count
from threading import Condition, Lock
+try: # Python 2
+ from Queue import Queue
+except: # Python 3
+ from queue import Queue
class PebbleError(Exception):
@@ -148,3 +154,19 @@ class Task(object):
self._ready = True
self._results = results
self._task_ready.notify_all()
+
+
+class PoolContext(object):
+ def __init__(self, state, workers, task_limit, queue, queueargs):
+ self.state = state
+ self.workers = workers
+ self.pool = []
+ self.limit = task_limit
+ self.counter = count()
+ if queue is not None:
+ if isclass(queue):
+ self.queue = queue(*queueargs)
+ else:
+ raise ValueError("Queue must be Class")
+ else:
+ self.queue = Queue()
|
added PoolContext object to share state within pool's threads
|
py
|
diff --git a/microcosm_flask/conventions/encoding.py b/microcosm_flask/conventions/encoding.py
index <HASH>..<HASH> 100644
--- a/microcosm_flask/conventions/encoding.py
+++ b/microcosm_flask/conventions/encoding.py
@@ -64,8 +64,8 @@ def merge_data(path_data, request_data):
Path data wins.
"""
- merged = request_data.copy()
- merged.update(path_data)
+ merged = request_data.copy() if request_data else {}
+ merged.update(path_data or {})
return merged
|
Account for empty content (e.g. on PATCH with no args)
|
py
|
diff --git a/simple_rest/utils/serializers.py b/simple_rest/utils/serializers.py
index <HASH>..<HASH> 100644
--- a/simple_rest/utils/serializers.py
+++ b/simple_rest/utils/serializers.py
@@ -42,7 +42,11 @@ def to_json(content, indent=None):
json_serializer = serializers.get_serializer('json')()
serialized_content = json_serializer.serialize(content, ensure_ascii=False, indent=indent)
else:
- serialized_content = json.dumps(content, cls=DecimalEncoder, ensure_ascii=False, indent=indent)
+ try:
+ serialized_content = json.dumps(content, cls=DecimalEncoder, ensure_ascii=False, indent=indent)
+ except TypeError:
+ # Fix for Django 1.5
+ serialized_content = json.dumps(content, ensure_ascii=False, indent=indent)
return serialized_content
|
fix for Django <I> for json serialization
|
py
|
diff --git a/ipwhois/asn.py b/ipwhois/asn.py
index <HASH>..<HASH> 100644
--- a/ipwhois/asn.py
+++ b/ipwhois/asn.py
@@ -600,7 +600,7 @@ class ASNOrigin:
net_start (:obj:`int`): The starting point of the network (if
parsing multiple networks). Defaults to None.
net_end (:obj:`int`): The ending point of the network (if parsing
- multiple ks). Defaults to None.
+ multiple networks). Defaults to None.
field_list (:obj:`list`): If provided, a list of fields to parse:
['description', 'maintainer', 'updated', 'source']
If None, defaults to all fields.
|
docstring typo fix (#<I>)
|
py
|
diff --git a/openquake/hazardlib/pmf.py b/openquake/hazardlib/pmf.py
index <HASH>..<HASH> 100644
--- a/openquake/hazardlib/pmf.py
+++ b/openquake/hazardlib/pmf.py
@@ -68,6 +68,17 @@ class PMF(object):
:returns:
Samples from PMF as a list
"""
+ return [pair[1] for pair in self.sample_pairs(number_samples)]
+
+ def sample_pairs(self, number_samples):
+ """
+ Produces a list of samples from the probability mass function.
+
+ :param int data:
+ Number of samples
+ :returns:
+ Samples from PMF as a list of pairs
+ """
probs = np.cumsum([val[0] for val in self.data])
sampler = np.random.uniform(0., 1., number_samples)
- return [self.data[ival][1] for ival in np.searchsorted(probs, sampler)]
+ return [self.data[ival] for ival in np.searchsorted(probs, sampler)]
|
Added a method PMF.sample_pairs
|
py
|
diff --git a/emoticons/__init__.py b/emoticons/__init__.py
index <HASH>..<HASH> 100644
--- a/emoticons/__init__.py
+++ b/emoticons/__init__.py
@@ -1,5 +1,5 @@
"""django-emoticons"""
-__version__ = '1.0'
+__version__ = '1.0.1'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
|
Bumping to version <I>
|
py
|
diff --git a/tests/api/test_people.py b/tests/api/test_people.py
index <HASH>..<HASH> 100644
--- a/tests/api/test_people.py
+++ b/tests/api/test_people.py
@@ -116,8 +116,16 @@ class PeopleManager(object):
return iter(self.list)
def __del__(self):
- for person in self.test_people.values():
- delete_person(self._api, person)
+ # TODO: Enable test account clean-up.
+ # Licensed privileges aren't taking effect for accounts that have
+ # just been created and this is causing some tests to fail.
+ # I am temporarily disabling test account clean-up to enable the
+ # accounts (with their privileges) to persist. It would be good to
+ # find a way around this.
+
+ # for person in self.test_people.values():
+ # delete_person(self._api, person)
+ pass
@pytest.fixture(scope="session")
|
Disable test account clean-up Licensed privileges aren't taking effect for accounts that have just been created and this is causing some tests to fail. I am temporarily disabling test account clean-up to enable the accounts (with their privileges) to persist. It would be good to find a way around this.
|
py
|
diff --git a/pyquil/paulis.py b/pyquil/paulis.py
index <HASH>..<HASH> 100644
--- a/pyquil/paulis.py
+++ b/pyquil/paulis.py
@@ -22,8 +22,7 @@ from itertools import product
import numpy as np
import copy
from .quil import Program
-from .gates import H, RZ, RX, CNOT, X, PHASE
-from . import quilbase as pqb
+from .gates import H, RZ, RX, CNOT, X, PHASE, STANDARD_GATES
from numbers import Number
from collections import Sequence
import warnings
@@ -125,6 +124,10 @@ class PauliTerm(object):
return new_term
+ @property
+ def program(self):
+ return Program([STANDARD_GATES[gate](q) for q, gate in self])
+
def get_qubits(self):
"""Gets all the qubits that this PauliTerm operates on.
"""
@@ -576,6 +579,17 @@ class PauliSum(object):
return coalesce(like_terms)
+ def get_programs(self):
+ """
+ Get a Pyquil Program corresponding to each term in the PauliSum and a coefficient
+ for each program
+
+ :return: (programs, coefficients)
+ """
+ programs = [term.program for term in self.terms]
+ coefficients = np.array([term.coefficient for term in self.terms])
+ return programs, coefficients
+
def check_commutation(pauli_list, pauli_two):
"""
|
PauliSum helper functions for computing expectation (#<I>) * Pauli convenience * Fancy qvm expectation * Revert "Fancy qvm expectation" This reverts commit b4cd<I>ca<I>adf<I>fd5c<I>e5c<I>a<I>.
|
py
|
diff --git a/alerta/exceptions.py b/alerta/exceptions.py
index <HASH>..<HASH> 100644
--- a/alerta/exceptions.py
+++ b/alerta/exceptions.py
@@ -1,7 +1,7 @@
import traceback
-from flask import jsonify
+from flask import current_app, jsonify
class AlertaException(IOError):
@@ -49,15 +49,6 @@ class ExceptionHandlers(object):
app.register_error_handler(Exception, handle_exception)
-def handle_api_error(error):
- return jsonify({
- 'status': 'error',
- 'message': error.message,
- 'code': error.code,
- 'errors': error.errors
- }), error.code
-
-
def handle_http_error(error):
return jsonify({
'status': 'error',
@@ -69,7 +60,17 @@ def handle_http_error(error):
}), error.code
+def handle_api_error(error):
+ return jsonify({
+ 'status': 'error',
+ 'message': error.message,
+ 'code': error.code,
+ 'errors': error.errors
+ }), error.code
+
+
def handle_exception(error):
+ current_app.logger.exception(error)
return jsonify({
'status': 'error',
'message': str(error),
|
Log application exception tracebacks (#<I>)
|
py
|
diff --git a/pipeinspector/app.py b/pipeinspector/app.py
index <HASH>..<HASH> 100644
--- a/pipeinspector/app.py
+++ b/pipeinspector/app.py
@@ -51,6 +51,8 @@ def get_pump(input_file):
pump = EvtPump(filename=input_file, cache_enabled=True)
elif extension == 'dat':
pump = DAQPump(filename=input_file)
+ elif extension == 'dqd':
+ pump = CLBPump(filename=input_file)
elif extension == 'root':
pump = AanetPump(filename=input_file)
else:
|
Adds pump for CLB file extension 'dqd'
|
py
|
diff --git a/emirdrp/store.py b/emirdrp/store.py
index <HASH>..<HASH> 100644
--- a/emirdrp/store.py
+++ b/emirdrp/store.py
@@ -29,7 +29,7 @@ from numina.store import dump, load
from .products import ChannelLevelStatistics
from .products import LinesCatalog
from .products import SlitsCatalog
-from emirdrp.wavecal.slitlet import Slitlet
+from numina.array.wavecal.slitlet import Slitlet
_logger = logging.getLogger('emirdrp.store')
|
Fix import of slitlet, now in numina
|
py
|
diff --git a/lib/shopify/base.py b/lib/shopify/base.py
index <HASH>..<HASH> 100644
--- a/lib/shopify/base.py
+++ b/lib/shopify/base.py
@@ -1,4 +1,3 @@
-import pyactiveresource.util
import pyactiveresource.connection
from pyactiveresource.activeresource import ActiveResource, ResourceMeta
import shopify.yamlobjects
@@ -103,10 +102,6 @@ class ShopifyResource(ActiveResource, mixins.Countable):
def _load_attributes_from_response(self, response):
self._update(self.__class__.format.decode(response.body))
- def encode(self, options):
- # pyactiveresource (version 1.0.1) doesn't support encoding to_json
- return pyactiveresource.util.to_xml(options)
-
def __get_primary_key(self):
return self._primary_key
|
Remove ShopifyResource.encode which is no longer needed. This should still be provided in pyactiveresource to support json encoding.
|
py
|
diff --git a/tests/test_functions.py b/tests/test_functions.py
index <HASH>..<HASH> 100644
--- a/tests/test_functions.py
+++ b/tests/test_functions.py
@@ -27,7 +27,7 @@ class Test_IncludeStatement(TestCase):
def test_includeStatement(self):
stream = tokenize(self.sql)
- includeStatement = IncludeStatement('tests/files')
+ includeStatement = IncludeStatement('tests/files', raiseexceptions=True)
stream = includeStatement.process(None, stream)
stream = compact(stream)
|
Fixed test_includeStatement
|
py
|
diff --git a/tests/metadata/test_genconverter.py b/tests/metadata/test_genconverter.py
index <HASH>..<HASH> 100644
--- a/tests/metadata/test_genconverter.py
+++ b/tests/metadata/test_genconverter.py
@@ -15,7 +15,6 @@ from . import nested_typed_classes, simple_typed_attrs, simple_typed_classes
unstructure_strats = sampled_from(list(UnstructureStrategy))
-@settings(max_examples=10000)
@given(simple_typed_classes(), unstructure_strats)
def test_simple_roundtrip(cls_and_vals, strat):
"""
|
Tone down GenConverter tests
|
py
|
diff --git a/pyipmi/ipmitool.py b/pyipmi/ipmitool.py
index <HASH>..<HASH> 100755
--- a/pyipmi/ipmitool.py
+++ b/pyipmi/ipmitool.py
@@ -534,7 +534,7 @@ COMMANDS = (
Command('chassis power cycle',
lambda i, a: i.chassis_control_power_cycle()),
Command('chassis power reset',
- lambda i, a: i.chassis_control_power_hard_reset()),
+ lambda i, a: i.chassis_control_hard_reset()),
Command('chassis power diag',
lambda i, a: i.chassis_control_power_diagnostic_interrupt()),
Command('chassis power soft',
|
Fix ipmitool command chassis power reset Use the correct API function.
|
py
|
diff --git a/tests/test_journals.py b/tests/test_journals.py
index <HASH>..<HASH> 100644
--- a/tests/test_journals.py
+++ b/tests/test_journals.py
@@ -583,3 +583,20 @@ def test_book_series_from_double_980__a():
assert validate(result['book_series'], subschema) is None
assert expected == result['book_series']
+
+
+def test_deleted_from_980__a():
+ schema = load_schema('journals')
+ subschema = schema['properties']['deleted']
+
+ snippet = (
+ '<datafield tag="980" ind1=" " ind2=" ">'
+ ' <subfield code="a">DELETED</subfield>'
+ '</datafield>'
+ ) # synthetic data
+
+ expected = True
+ result = journals.do(create_record(snippet))
+
+ assert validate(result['deleted'], subschema) is None
+ assert expected == result['deleted']
|
tests: add test for deleted in journals
|
py
|
diff --git a/wfdb/plot/plot.py b/wfdb/plot/plot.py
index <HASH>..<HASH> 100644
--- a/wfdb/plot/plot.py
+++ b/wfdb/plot/plot.py
@@ -286,7 +286,7 @@ def plot_items(
)
if ecg_grids:
- plot_ecg_grids(
+ _plot_ecg_grids(
ecg_grids,
fs,
sig_units,
@@ -625,7 +625,7 @@ def _plot_annotation(
)
-def plot_ecg_grids(ecg_grids, fs, units, time_units, axes, sampling_freq=None):
+def _plot_ecg_grids(ecg_grids, fs, units, time_units, axes, sampling_freq=None):
"""
Add ECG grids to the axes.
|
wfdb.plot.plot: rename plot_ecg_grids to _plot_ecg_grids. This function is not in the wfdb module, is not listed in the documentation, and shouldn't be used by applications directly.
|
py
|
diff --git a/tests/test_select_model.py b/tests/test_select_model.py
index <HASH>..<HASH> 100644
--- a/tests/test_select_model.py
+++ b/tests/test_select_model.py
@@ -68,8 +68,9 @@ def test_top():
def definition(io):
# Instance declaration of adder, definition will be selected
# later
- adder0 = DeclareAdder(4)(name="adder0")
- adder1 = DeclareAdder(4)(name="adder1")
+ Adder4 = DeclareAdder(4)
+ adder0 = Adder4(name="adder0")
+ adder1 = Adder4(name="adder1")
# Random logic with the two adders
O, COUT = adder0(io)
O, COUT = adder1(O, O, COUT)
|
Reuse declaration to make it more clear
|
py
|
diff --git a/pysat/_meta.py b/pysat/_meta.py
index <HASH>..<HASH> 100644
--- a/pysat/_meta.py
+++ b/pysat/_meta.py
@@ -1,3 +1,9 @@
+#!/usr/bin/env python
+# Full license can be found in License.md
+# Full author list can be found in .zenodo.json file
+# DOI:10.5281/zenodo.1199703
+# ----------------------------------------------------------------------------
+
from copy import deepcopy as deepcopy
import os
import warnings
@@ -153,6 +159,10 @@ class Meta(object):
if self._export_nan is None:
self._export_nan = []
+ for lvals in labels.values():
+ if lvals[0] not in self._export_nan and lvals[1] == float:
+ self._export_nan.append(lvals[0])
+
# Set the labels
self.labels = MetaLabels(metadata=self, **labels)
@@ -192,6 +202,7 @@ class Meta(object):
nvar = len([kk for kk in self.keys()])
out_str = ''.join(['Meta(metadata=', self._data.__repr__(),
', labels=', self.labels.__repr__(),
+ 'export_nan=', self._export_nan.__repr__(),
') -> {:d} Variables'.format(nvar)])
return out_str
|
ENH: export_nan update Updated export_nan to default to allowing NaN in all floating labels.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,8 @@ setup(
include_package_data = True,
install_requires = [
'Django >= 1.6',
+ # Form helper.
+ 'django-crispy-forms >= 1.4',
# Needed for address field.
'django-countries >= 2.0',
],
|
Added django-crispy-forms dependency.
|
py
|
diff --git a/bottom/event.py b/bottom/event.py
index <HASH>..<HASH> 100644
--- a/bottom/event.py
+++ b/bottom/event.py
@@ -106,10 +106,10 @@ def validate_func(event, func, parameters):
def partial_bind(func):
+ sig = inspect.signature(func)
# Wrap non-coroutines so we can always `yield from func(**kw)`
if not asyncio.iscoroutinefunction(func):
func = asyncio.coroutine(func)
- sig = inspect.signature(func)
base = {}
for key, param in sig.parameters.items():
default = param.default
|
calculate signature before wrapping in coro Fixes #9
|
py
|
diff --git a/tests/cli/test_init_missing_libraries.py b/tests/cli/test_init_missing_libraries.py
index <HASH>..<HASH> 100644
--- a/tests/cli/test_init_missing_libraries.py
+++ b/tests/cli/test_init_missing_libraries.py
@@ -180,7 +180,6 @@ def test_cli_init_spark_without_library_installed_instructs_user(
great_expectations/
.gitignore
great_expectations.yml
- datasources/
expectations/
notebooks/
pandas/
|
* bugfix in init_missing_libraries due to datasources folder
|
py
|
diff --git a/emma2/coordinates/tica.py b/emma2/coordinates/tica.py
index <HASH>..<HASH> 100644
--- a/emma2/coordinates/tica.py
+++ b/emma2/coordinates/tica.py
@@ -1,13 +1,23 @@
+# -*- coding: utf-8 -*-
+
r"""
====
TICA
====
-TODO: describe usage here
+performs this algo [Ref]_.
+
+.. TODO: describe method. See http://msmbuilder.org/theory/tICA.html
+
+
+.. date: Created on 19.11.2013
+
+.. moduleauthor:: Fabian Paul <fabian.paul@mpikg.mpg.de>, marscher
-Created on 19.11.2013
-@author: Fabian Paul <fabian.paul@mpikg.mpg.de>
-@author: marscher
+.. [Ref] Identification of slow molecular order parameters for Markov model construction
+ Pérez-Hernández, Guillermo and Paul, Fabian and Giorgino, Toni and De Fabritiis,
+ Gianni and Noé, Frank, The Journal of Chemical Physics, 139, 015102 (2013),
+ DOI:http://dx.doi.org/10.1063/1.4811489
"""
@@ -17,10 +27,8 @@ import numpy
import warnings
__docformat__ = "restructuredtext en"
-__all__ = ['correlation', 'log_loop', 'rename', 'Amuse']
+__all__ = ['correlation', 'Amuse']
-''' import correlation covariance C extension module '''
-from . import cocovar
from emma2.util.log import getLogger
log = getLogger()
|
[tica] added some docstrings
|
py
|
diff --git a/asphalt/core/component.py b/asphalt/core/component.py
index <HASH>..<HASH> 100644
--- a/asphalt/core/component.py
+++ b/asphalt/core/component.py
@@ -47,12 +47,12 @@ class ContainerComponent(Component):
:vartype child_components: Dict[str, Component]
:ivar component_configs: dictionary of component alias ⭢ externally provided component
configuration
- :vartype component_configs: Dict[str, Dict[str, Any]]
+ :vartype component_configs: Dict[str, Optional[Dict[str, Any]]]
"""
__slots__ = 'child_components', 'component_configs'
- def __init__(self, components: Dict[str, Dict[str, Any]] = None):
+ def __init__(self, components: Dict[str, Optional[Dict[str, Any]]] = None):
assert check_argument_types()
self.child_components = OrderedDict()
self.component_configs = components or {}
|
Allow None in the values of component configurations These will be replaced by empty dictionaries.
|
py
|
diff --git a/billy/web/public/views/legislators.py b/billy/web/public/views/legislators.py
index <HASH>..<HASH> 100644
--- a/billy/web/public/views/legislators.py
+++ b/billy/web/public/views/legislators.py
@@ -29,8 +29,10 @@ def legislators(request, abbr):
chamber = request.GET.get('chamber', 'both')
if chamber in ('upper', 'lower'):
spec['chamber'] = chamber
+ chamber_title = meta['%s_chamber_title' % chamber] + 's'
else:
chamber = 'both'
+ chamber_title = 'Legislators'
fields = mongo_fields('leg_id', 'full_name', 'photo_url', 'district',
'party', 'first_name', 'last_name', 'chamber',
@@ -69,6 +71,7 @@ def legislators(request, abbr):
return render(request, templatename('legislators'),
dict(metadata=meta,
chamber=chamber,
+ chamber_title=chamber_title,
chamber_select_form=chamber_select_form,
chamber_select_template=templatename('chamber_select_form'),
chamber_select_collection='legislators',
|
add chamber_title to legislators view
|
py
|
diff --git a/djangocms_spa/views.py b/djangocms_spa/views.py
index <HASH>..<HASH> 100644
--- a/djangocms_spa/views.py
+++ b/djangocms_spa/views.py
@@ -85,16 +85,8 @@ class SpaApiView(APIView):
@cache_view
def dispatch(self, request, **kwargs):
# Take the language from the URL kwarg and set it as request language
- self.set_language(kwargs, request)
return super(SpaApiView, self).dispatch(request, **kwargs)
- def set_language(self, kwargs, request):
- if hasattr(request, "LANGUAGE_CODE"):
- language_code = request.LANGUAGE_CODE
- else:
- language_code = kwargs.pop('language_code')
- available_languages = {language[0] for language in settings.LANGUAGES}
- request.LANGUAGE_CODE = language_code if language_code in available_languages else settings.LANGUAGES[0][0]
def get(self, *args, **kwargs):
data = {
|
[language_activation] Remove language activation it is already handled by the Locale Middleware
|
py
|
diff --git a/xdata-web.py b/xdata-web.py
index <HASH>..<HASH> 100644
--- a/xdata-web.py
+++ b/xdata-web.py
@@ -9,7 +9,7 @@ class Server(object):
<h1>XDATA Web (running over CherryPy)</h1>"""
@cherrypy.expose
- def app(self, module, *pargs, **kwargs):
+ def service(self, module, *pargs, **kwargs):
# TODO(choudhury): This method should attempt to load the named module, then invoke it
# with the given arguments. However, if the named module is "config" or
# something similar, the method should instead launch a special "config"
|
changed name of service path from 'app' to 'service'
|
py
|
diff --git a/dataviews/ndmapping.py b/dataviews/ndmapping.py
index <HASH>..<HASH> 100644
--- a/dataviews/ndmapping.py
+++ b/dataviews/ndmapping.py
@@ -487,24 +487,7 @@ class NdIndexableMapping(param.Parameterized, Dimensional):
def __iter__(self):
- return self
-
- def next(self): # For Python 2 and 3 compatibility
- return self.__next__()
-
- def __next__(self):
- """
- Implements the iterable interface, returning values unlike a standard
- dictionary.
- """
- if self._next_ind < len(self.keys()):
- val = list(self.values())[self._next_ind]
- self._next_ind += 1
- return val
- else:
- self._next_ind = 0
- raise StopIteration
-
+ return iter(self.values())
def __contains__(self, key):
if self.ndims == 1:
|
Simplified the iterator interface for NdIndexableMapping
|
py
|
diff --git a/pyannote/metrics/diarization.py b/pyannote/metrics/diarization.py
index <HASH>..<HASH> 100755
--- a/pyannote/metrics/diarization.py
+++ b/pyannote/metrics/diarization.py
@@ -91,6 +91,8 @@ class DiarizationErrorRate(IdentificationErrorRate):
return self._mapper(hypothesis, reference)
def _get_details(self, reference, hypothesis, **kwargs):
+ reference = reference.anonymize_labels(generator='string')
+ hypothesis = hypothesis.anonymize_labels(generator='int')
mapping = self.optimal_mapping(reference, hypothesis)
return super(DiarizationErrorRate, self)\
._get_details(reference, hypothesis % mapping)
|
fix: anonymize annotations before computing diarization error rate
|
py
|
diff --git a/MAVProxy/modules/mavproxy_link.py b/MAVProxy/modules/mavproxy_link.py
index <HASH>..<HASH> 100644
--- a/MAVProxy/modules/mavproxy_link.py
+++ b/MAVProxy/modules/mavproxy_link.py
@@ -35,6 +35,7 @@ preferred_ports = [
'*mRo*',
'*FMU*',
'*Swift-Flyer*',
+ '*Serial*',
]
class LinkModule(mp_module.MPModule):
|
Link: Add *serial* to preferred ports
|
py
|
diff --git a/stripe/__init__.py b/stripe/__init__.py
index <HASH>..<HASH> 100644
--- a/stripe/__init__.py
+++ b/stripe/__init__.py
@@ -274,11 +274,17 @@ class APIRequestor(object):
try:
func = getattr(requests, meth)
result = func(abs_url, headers=headers, data=data)
+
+ # This causes the content to actually be read, which could cause
+ # e.g. a socket timeout. TODO: The other fetch methods probably
+ # are succeptible to the same and should be updated.
+ content = result.content
+ status_code = result.status_code
except Exception, e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self.handle_requests_error(e)
- return result.content, result.status_code
+ return content, status_code
def handle_requests_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
|
Handle socket timeouts in requests library
|
py
|
diff --git a/dev/ci.py b/dev/ci.py
index <HASH>..<HASH> 100644
--- a/dev/ci.py
+++ b/dev/ci.py
@@ -3,11 +3,12 @@ from __future__ import unicode_literals, division, absolute_import, print_functi
import sys
-from .tests import run as run_tests
if sys.version_info >= (2, 7):
from .lint import run as run_lint
if sys.version_info < (3, 0) or sys.version_info >= (3, 3):
from .coverage import run as run_coverage
+else
+ from .tests import run as run_tests
def run():
@@ -24,12 +25,14 @@ def run():
lint_result = run_lint()
else:
lint_result = True
- print('\nRunning tests')
- sys.stdout.flush()
- tests_result = run_tests()
if sys.version_info < (3, 0) or sys.version_info >= (3, 3):
- print('\nRunning coverage.py')
- run_coverage(write_xml=True)
+ print('\nRunning tests (via coverage.py)')
+ sys.stdout.flush()
+ tests_result = run_coverage(write_xml=True)
+ else
+ print('\nRunning tests')
+ sys.stdout.flush()
+ tests_result = run_tests()
return lint_result and tests_result
|
Ensure CI coverage reports include initial import
|
py
|
diff --git a/vyper/parser/parser.py b/vyper/parser/parser.py
index <HASH>..<HASH> 100644
--- a/vyper/parser/parser.py
+++ b/vyper/parser/parser.py
@@ -378,6 +378,8 @@ def parse_func(code, sigs, origcode, global_ctx, _vars=None):
custom_structs=global_ctx._structs,
constants=global_ctx._constants
)
+ # Validate return statements.
+ sig.validate_return_statement_balance()
# Get base args for function.
total_default_args = len(code.args.defaults)
base_args = sig.args[:-total_default_args] if total_default_args > 0 else sig.args
@@ -689,12 +691,6 @@ def parse_func(code, sigs, origcode, global_ctx, _vars=None):
] + nonreentrant_post + stop_func
], typ=None, pos=getpos(code))
- # Check for at leasts one return statement if necessary.
- if context.return_type and context.function_return_count == 0:
- raise FunctionDeclarationException(
- "Missing return statement in function '%s' " % sig.name, code
- )
-
o.context = context
o.total_gas = o.gas + calc_mem_gas(
o.context.memory_allocator.get_next_memory_position()
|
Remove old missing statement check, use unbalanced check instead.
|
py
|
diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py
index <HASH>..<HASH> 100644
--- a/watson_developer_cloud/speech_to_text_v1.py
+++ b/watson_developer_cloud/speech_to_text_v1.py
@@ -54,11 +54,8 @@ class SpeechToTextV1(WatsonService):
'interim_results': interim_results,
'profanity_filter': profanity_filter,
'smart_formatting': smart_formatting,
- 'speaker_labels': speaker_labels}
-
- if (params['customization_id'] is not None and
- customization_weight is not None):
- params['customization_weight'] = customization_weight
+ 'speaker_labels': speaker_labels,
+ 'customization_weight': customization_weight}
return self.request(method='POST', url='/v1/recognize',
headers=headers,
|
:art: remove reduntant is None checks
|
py
|
diff --git a/fsps/fsps.py b/fsps/fsps.py
index <HASH>..<HASH> 100644
--- a/fsps/fsps.py
+++ b/fsps/fsps.py
@@ -6,7 +6,11 @@ from __future__ import (division, print_function, absolute_import,
__all__ = ["StellarPopulation"]
-from ._fsps import driver
+try:
+ from ._fsps import driver
+ driver = driver
+except ImportError:
+ driver = None
# Hard-set FSPS parameters.
@@ -289,6 +293,9 @@ class StellarPopulation(object):
def __init__(self, compute_vega_mags=True, redshift_colors=False,
**kwargs):
+ if driver is None:
+ raise ImportError("Can't import fsps._fsps")
+
# Set up the parameters to their default values.
self.params = ParameterSet(
dust_type=0,
|
deal with Fortran import error for the sake of readthedocs
|
py
|
diff --git a/pygame_vkeyboard/vkeyboard.py b/pygame_vkeyboard/vkeyboard.py
index <HASH>..<HASH> 100644
--- a/pygame_vkeyboard/vkeyboard.py
+++ b/pygame_vkeyboard/vkeyboard.py
@@ -81,8 +81,10 @@ class VKeyboardRenderer(object):
:param key: Target key to be drawn.
"""
pygame.draw.rect(surface, self.key_background_color[key.state], key.position + (key.size, key.size))
- # TODO : Center key into cell.
- return surface.blit(self.font.render(key.value, 1, self.text_color[key.state], None), key.position)
+ size = self.font.size(key.value)
+ x = key.position[0] + ((key.size - size[0]) / 2)
+ y = key.position[1] + ((key.size - size[1]) / 2)
+ return surface.blit(self.font.render(key.value, 1, self.text_color[key.state], None), (x, y))
""" Default style implementation. """
VKeyboardRenderer.DEFAULT = VKeyboardRenderer(
|
Finalizes key text centering.
|
py
|
diff --git a/salt/fileclient.py b/salt/fileclient.py
index <HASH>..<HASH> 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -359,15 +359,18 @@ class Client(object):
if senv:
saltenv = senv
+ escape = '|' if path.startswith('|') else ''
+
+ # also strip escape character '|'
localsfilesdest = os.path.join(
- self.opts['cachedir'], 'localfiles', path.lstrip('/'))
+ self.opts['cachedir'], 'localfiles', path.lstrip('|/'))
filesdest = os.path.join(
- self.opts['cachedir'], 'files', saltenv, path)
+ self.opts['cachedir'], 'files', saltenv, path.lstrip('|'))
if os.path.exists(filesdest):
- return filesdest
+ return u'{0}{1}'.format(escape, filesdest)
elif os.path.exists(localsfilesdest):
- return localsfilesdest
+ return u'{0}{1}'.format(escape, localsfilesdest)
return ''
|
also handle salt://| escaping in fileclient
|
py
|
diff --git a/src/main/python/rlbot/agents/rlbot_runnable.py b/src/main/python/rlbot/agents/rlbot_runnable.py
index <HASH>..<HASH> 100644
--- a/src/main/python/rlbot/agents/rlbot_runnable.py
+++ b/src/main/python/rlbot/agents/rlbot_runnable.py
@@ -81,6 +81,7 @@ class RLBotRunnable:
details_config.add_value('fun_fact', str, description="Fun fact about the bot")
details_config.add_value('github', str, description="Link to github repository")
details_config.add_value('language', str, description="Programming language")
+ details_config.add_value('tags', str, description="Comma separated list of tags, used by RLBotGUI")
cls.create_agent_configurations(config)
|
Add tags field to details config (#<I>)
|
py
|
diff --git a/kerncraft/models/benchmark.py b/kerncraft/models/benchmark.py
index <HASH>..<HASH> 100644
--- a/kerncraft/models/benchmark.py
+++ b/kerncraft/models/benchmark.py
@@ -306,7 +306,7 @@ class Benchmark(PerformanceModel):
# Determine base runtime with 10 iterations
runtime = 0.0
time_per_repetition = 0.2 / 10.0
- repetitions = 10
+ repetitions = 1
mem_results = {}
while runtime < 0.15:
|
Benchmark mode now starts with <I> iterations (#<I>)
|
py
|
diff --git a/salt/utils/psutil_compat.py b/salt/utils/psutil_compat.py
index <HASH>..<HASH> 100644
--- a/salt/utils/psutil_compat.py
+++ b/salt/utils/psutil_compat.py
@@ -16,7 +16,7 @@ from __future__ import absolute_import
import psutil
if psutil.version_info >= (2, 0):
- from psutil import * # pylint: disable=wildcard-import
+ from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
|
Make PyLint a little happier
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -213,7 +213,7 @@ else:
print('Generating ffconfig.h')
-with open(join('ffpyplayer', 'includes', 'ffconfig.h'), 'wb') as f:
+with open(join('ffpyplayer', 'includes', 'ffconfig.h'), 'w') as f:
f.write('''
#ifndef _FFCONFIG_H
#define _FFCONFIG_H
@@ -247,7 +247,7 @@ with open(join('ffpyplayer', 'includes', 'ffconfig.h'), 'wb') as f:
''')
print('Generating ffconfig.pxi')
-with open(join('ffpyplayer', 'includes', 'ffconfig.pxi'), 'wb') as f:
+with open(join('ffpyplayer', 'includes', 'ffconfig.pxi'), 'w') as f:
for k, v in c_options.items():
f.write('DEF %s = %d\n' % (k.upper(), int(v)))
|
Fix py3 writing to file.
|
py
|
diff --git a/treebeard/mp_tree.py b/treebeard/mp_tree.py
index <HASH>..<HASH> 100644
--- a/treebeard/mp_tree.py
+++ b/treebeard/mp_tree.py
@@ -1052,9 +1052,11 @@ class MP_Node(Node):
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
- return '%s%s%s' % (parentpath,
- '0' * (cls.steplen - len(key)),
- key)
+ return '{}{}{}'.format(
+ parentpath,
+ cls.alphabet[0] * (cls.steplen - len(key)),
+ key
+ )
def _inc_path(self):
""":returns: The path of the next sibling of a given node path."""
@@ -1062,9 +1064,11 @@ class MP_Node(Node):
key = self._int2str(newpos)
if len(key) > self.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path, )))
- return '%s%s%s' % (self.path[:-self.steplen],
- '0' * (self.steplen - len(key)),
- key)
+ return '{}{}{}'.format(
+ self.path[:-self.steplen],
+ self.alphabet[0] * (self.steplen - len(key)),
+ key
+ )
def _get_lastpos_in_path(self):
""":returns: The integer value of the last step in a path."""
|
Fixed MP trees when using alphabets that don't start with a '0'.
|
py
|
diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py
index <HASH>..<HASH> 100644
--- a/tests/test_ec2/test_route_tables.py
+++ b/tests/test_ec2/test_route_tables.py
@@ -163,7 +163,6 @@ def test_route_table_associations():
# Refresh
route_table = conn.get_all_route_tables(route_table.id)[0]
- print route_table.__dict__
route_table.associations.should.have.length_of(0)
# Associate
|
Route Tables: Added support for associate/disassociate subnets. (removed wayward print)
|
py
|
diff --git a/py/testdir_multi_jvm/test_rf_big_rand_tree_fvec.py b/py/testdir_multi_jvm/test_rf_big_rand_tree_fvec.py
index <HASH>..<HASH> 100644
--- a/py/testdir_multi_jvm/test_rf_big_rand_tree_fvec.py
+++ b/py/testdir_multi_jvm/test_rf_big_rand_tree_fvec.py
@@ -73,7 +73,7 @@ class Basic(unittest.TestCase):
kwargs = {'ntrees': 3, 'max_depth': 20, 'seed': seed}
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=90)
- h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=200, pollTimeoutSecs=180, **kwargs)
+ h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=600, pollTimeoutSecs=180, **kwargs)
print "trial #", trial, "rowCount:", rowCount, "colCount:", colCount, "RF end on ", csvFilename, \
'took', time.time() - start, 'seconds'
|
Test ran on my laptop successfully. Increase timeout from <I> to <I> seconds anyway.
|
py
|
diff --git a/treeherder/model/derived/jobs.py b/treeherder/model/derived/jobs.py
index <HASH>..<HASH> 100644
--- a/treeherder/model/derived/jobs.py
+++ b/treeherder/model/derived/jobs.py
@@ -2829,7 +2829,7 @@ into chunks of chunk_size size. Returns the number of result sets deleted"""
if rs['state'] == 'completed':
resultset_status_dict[rs['result']] = int(rs['total']) - rs['num_coalesced']
else:
- resultset_status_dict[rs['result']] = rs['state']
+ resultset_status_dict[rs['state']] = int(rs['total'])
resultset_status_dict['coalesced'] = num_coalesced
return resultset_status_dict
|
Bug <I> - Fix count of uncompleted jobs in resultset-count embed
|
py
|
diff --git a/pysat/tests/test_instrument.py b/pysat/tests/test_instrument.py
index <HASH>..<HASH> 100644
--- a/pysat/tests/test_instrument.py
+++ b/pysat/tests/test_instrument.py
@@ -206,6 +206,17 @@ class TestBasics():
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'))
assert self.testInst.index[0] == self.ref_time
+ def test_filenames_load(self):
+ """Test if files are loadable by filenames, relative to
+ top_data_dir/platform/name/tag"""
+ fname2 = self.ref_time + pds.DateOffset(days=1)
+ fname2 = fname2.strftime('%Y-%m-%d.nofile')
+ self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'),
+ fname2=fname2)
+ assert self.testInst.index[0] == self.ref_time
+ assert self.testInst.index[-1] >= self.ref_time + pds.DateOffset(days=1)
+ assert self.testInst.index[-1] <= self.ref_time + pds.DateOffset(days=2)
+
def test_next_filename_load_default(self):
"""Test next day is being loaded (checking object date)."""
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'))
|
TST: added test for fname and fname2 .load
|
py
|
diff --git a/pysswords/__main__.py b/pysswords/__main__.py
index <HASH>..<HASH> 100644
--- a/pysswords/__main__.py
+++ b/pysswords/__main__.py
@@ -91,7 +91,7 @@ def list_credentials(database, query=None, show_password=False):
table = []
for credential in database.credentials:
row = [
- colorama.Fore.GREEN + credential.name + colorama.Fore.RESET,
+ colorama.Fore.YELLOW + credential.name + colorama.Fore.RESET,
credential.login,
"..." if not show_password else database.gpg.decrypt(
credential.password,
|
Use yellow for listing credentials as a table
|
py
|
diff --git a/angr/analyses/decompiler/region_identifier.py b/angr/analyses/decompiler/region_identifier.py
index <HASH>..<HASH> 100644
--- a/angr/analyses/decompiler/region_identifier.py
+++ b/angr/analyses/decompiler/region_identifier.py
@@ -71,12 +71,16 @@ class RegionIdentifier(Analysis):
self.region = self._make_regions(graph)
- @staticmethod
- def _get_start_node(graph):
+ def _get_start_node(self, graph):
try:
return next(n for n in graph.nodes() if graph.in_degree(n) == 0)
except StopIteration:
- return None
+ pass
+
+ try:
+ return next(n for n in graph.nodes() if n.addr == self.function.addr)
+ except StopIteration:
+ raise RuntimeError("Cannot find the start node from the graph!")
def _test_reducibility(self):
|
RegionIdentifier: Do not crash if the beginning node has a self loop. (#<I>)
|
py
|
diff --git a/tests/conftest.py b/tests/conftest.py
index <HASH>..<HASH> 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,3 @@
-from unittest.mock import MagicMock
import pytest
from rocketchat_API.rocketchat import RocketChat
@@ -13,7 +12,8 @@ def rocket():
@pytest.fixture(scope="session")
def create_user(rocket):
def _create_user(name="user1", password="password", email="email@domain.com"):
- user = MagicMock()
+ # create empty object, because Mock not included to python2
+ user = type('test', (object,), {})()
user.name = name
user.password = password
|
Remove Mock and create "empty" object on the fly
|
py
|
diff --git a/doctr/travis.py b/doctr/travis.py
index <HASH>..<HASH> 100644
--- a/doctr/travis.py
+++ b/doctr/travis.py
@@ -166,7 +166,8 @@ def setup_GitHub_push(deploy_repo, auth_type='deploy_key', full_key_path='github
remotes = subprocess.check_output(['git', 'remote']).decode('utf-8').split('\n')
if 'doctr_remote' in remotes:
- print("doctr_remote already exists")
+ print("doctr_remote already exists, removing")
+ run(['git', 'remote', 'remove', 'doctr_remote'])
else:
print("Adding doctr remote")
if auth_type == 'token':
|
Remote the doctr_remote if it already exists I don't know if this will fix the problem, but we need to do this anyway in case a second run has a separate deploy repo.
|
py
|
diff --git a/pypeerassets/voting.py b/pypeerassets/voting.py
index <HASH>..<HASH> 100644
--- a/pypeerassets/voting.py
+++ b/pypeerassets/voting.py
@@ -17,14 +17,14 @@ def deck_vote_tag(deck):
class Vote:
def __init__(self, version: int, description: str, count_mode: str,
- choices=[], metainfo=None):
+ choices=[], vote_metainfo=None):
'''initialize vote object'''
self.version = version
self.description = description
self.choices = choices
self.count_mode = count_mode
- self.metainfo = metainfo
+ self.vote_metainfo = vote_metainfo
@property
def vote_info_to_protobuf(self):
@@ -36,10 +36,10 @@ class Vote:
vote.count_mode = vote.MODE.Value(self.count_mode)
vote.choices.extend(self.choices)
- if not isinstance(self.asset_specific_data, bytes):
- vote.metainfo = self.metainfo.encode()
+ if not isinstance(self.vote_metainfo, bytes):
+ vote.vote_metainfo = self.vote_metainfo.encode()
else:
- vote.metainfo = self.metainfo
+ vote.vote_metainfo = self.vote_metainfo
proto = vote.SerializeToString()
|
voting: rename metainfo to vote_metainfo
|
py
|
diff --git a/flask_passwordless/login_url.py b/flask_passwordless/login_url.py
index <HASH>..<HASH> 100644
--- a/flask_passwordless/login_url.py
+++ b/flask_passwordless/login_url.py
@@ -21,7 +21,7 @@ class PlainLoginURL(LoginURL):
from flask import url_for
return "".join([
url_for('authenticate', _external=True),
- "?token={}&uid={}".format(token, userid)
+ "?token=" + token + "&uid=" + userid
])
def parse(self, request):
|
use string concat since on our py<I> machines, .format fails despite "New in version <I>"
|
py
|
diff --git a/foolbox/ext/native/attacks/newtonfool.py b/foolbox/ext/native/attacks/newtonfool.py
index <HASH>..<HASH> 100644
--- a/foolbox/ext/native/attacks/newtonfool.py
+++ b/foolbox/ext/native/attacks/newtonfool.py
@@ -52,6 +52,7 @@ class NewtonFoolAttack(MinimizationAttack):
x_l2_norm = flatten(x.square()).sum(1)
def loss_fun(x):
+ # TODO: this is wrong!
logits = model(x)
scores = ep.softmax(logits)
pred = scores.argmax(-1)
|
added TODO because the newtonfool loss function is wrong
|
py
|
diff --git a/luigi/worker.py b/luigi/worker.py
index <HASH>..<HASH> 100644
--- a/luigi/worker.py
+++ b/luigi/worker.py
@@ -778,16 +778,16 @@ class Worker(object):
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
- p = self._create_task_process(task)
+ task_process = self._create_task_process(task)
- self._running_tasks[task_id] = p
+ self._running_tasks[task_id] = task_process
- if p.random_seed:
+ if task_process.random_seed:
with fork_lock:
- p.start()
+ task_process.start()
else:
# Run in the same process
- p.run()
+ task_process.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
|
Uses task_process instead of p as a variable name in _run_task
|
py
|
diff --git a/src/unity/python/turicreate/toolkits/object_detector/_sframe_loader.py b/src/unity/python/turicreate/toolkits/object_detector/_sframe_loader.py
index <HASH>..<HASH> 100644
--- a/src/unity/python/turicreate/toolkits/object_detector/_sframe_loader.py
+++ b/src/unity/python/turicreate/toolkits/object_detector/_sframe_loader.py
@@ -17,16 +17,8 @@ _TMP_COL_RANDOM_ORDER = '_random_order'
def _convert_image_to_raw(image):
- FORMAT_RAW = 2
- if image._format_enum == FORMAT_RAW:
- return image
- else:
- return _tc.Image(_image_data=image.pixel_data.tobytes(),
- _width=image.width,
- _height=image.height,
- _channels=image.channels,
- _format_enum=FORMAT_RAW,
- _image_data_size=image.width * image.height * image.channels)
+ # Decode image and make sure it has 3 channels
+ return _tc.image_analysis.resize(image, image.width, image.height, 3, decode=True)
class SFrameDetectionIter(_mx.io.DataIter):
|
Handle RGBA (4 channels) and L (1 channel) images in detector (#<I>) This relies on the `resize` function to do make these conversions. This function is really fast when there is no resizing or channel change, although a bit slower than the previous decoding method in speed tests. Fixes #<I>
|
py
|
diff --git a/tests/__init__.py b/tests/__init__.py
index <HASH>..<HASH> 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -281,6 +281,10 @@ class BaseGeneralInterfaceTest(StubbedClientTest):
def test_returns_future_with_meta(self):
self._setup_default_stubbed_responses()
future = self.method(**self.create_call_kwargs())
+ # The result is called so we ensure that the entire process executes
+ # before we try to clean up resources in the tearDown.
+ future.result()
+
# Assert the return value is a future with metadata associated to it.
self.assertIsInstance(future, TransferFuture)
self.assertIsInstance(future.meta, TransferMeta)
@@ -289,6 +293,10 @@ class BaseGeneralInterfaceTest(StubbedClientTest):
self._setup_default_stubbed_responses()
call_kwargs = self.create_call_kwargs()
future = self.method(**call_kwargs)
+ # The result is called so we ensure that the entire process executes
+ # before we try to clean up resources in the tearDown.
+ future.result()
+
# Assert that there are call args associated to the metadata
self.assertIsInstance(future.meta.call_args, CallArgs)
# Assert that all of the arguments passed to the method exist and
|
Fix issue in tests The issue is that since we did not call result on the future, the process still maybe going during the tearDown and as a result on systems like windows we may be trying to remove the temporary file for downloads while the transfer manager is still acting on it, which causes issues when two processes are performing on the same file.
|
py
|
diff --git a/tests/baseline_data.py b/tests/baseline_data.py
index <HASH>..<HASH> 100644
--- a/tests/baseline_data.py
+++ b/tests/baseline_data.py
@@ -1,15 +1,14 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright (c) 2014-18 Richard Hull and contributors
+# Copyright (c) 2014-2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Collection of datasets to prevent regression bugs from creeping in.
"""
-import io
import json
-import os.path
+from pathlib import Path
def primitives(device, draw):
@@ -33,7 +32,13 @@ def primitives(device, draw):
def get_json_data(fname):
- dirname = os.path.abspath(os.path.dirname(__file__))
- fpath = os.path.join(dirname, 'reference', 'data', fname + '.json')
- with io.open(fpath) as f:
+ """
+ Load JSON reference data.
+
+ :param fname: Filename without extension.
+ :type fname: str
+ """
+ base_dir = Path(__file__).resolve().parent
+ fpath = base_dir.joinpath('reference', 'data', fname + '.json')
+ with fpath.open() as f:
return json.load(f)
|
tests: use pathlib (#<I>)
|
py
|
diff --git a/src/jukeboxmaya/__init__.py b/src/jukeboxmaya/__init__.py
index <HASH>..<HASH> 100644
--- a/src/jukeboxmaya/__init__.py
+++ b/src/jukeboxmaya/__init__.py
@@ -1,3 +1,3 @@
__author__ = 'David Zuber'
__email__ = 'zuber.david@gmx.de'
-__version__ = '0.1.0'
+__version__ = '1.0.0'
|
Bumped version number to <I>
|
py
|
diff --git a/pyathenajdbc/cursor.py b/pyathenajdbc/cursor.py
index <HASH>..<HASH> 100644
--- a/pyathenajdbc/cursor.py
+++ b/pyathenajdbc/cursor.py
@@ -162,7 +162,7 @@ class Cursor(object):
return None
self._rownumber += 1
return tuple([
- self._converter.convert(column.getSQLColumnType(), row)
+ self._converter.convert(column.getSQLColumnType(), row.getVarCharValue())
for column, row in zip(self._columns(), self._rows())
])
|
Fix to support JDBC driver <I>
|
py
|
diff --git a/py/test/rsession/box.py b/py/test/rsession/box.py
index <HASH>..<HASH> 100644
--- a/py/test/rsession/box.py
+++ b/py/test/rsession/box.py
@@ -7,6 +7,7 @@ import py
import os
import sys
import marshal
+from py.__.test import config as pytestconfig
PYTESTSTDOUT = "pyteststdout"
PYTESTSTDERR = "pyteststderr"
@@ -30,6 +31,7 @@ class FileBox(object):
self.kwargs = kwargs
def run(self, continuation=False):
+ # XXX we should not use py.test.ensuretemp here
tempdir = py.test.ensuretemp("box%d" % self.count)
self.count += 1
self.tempdir = tempdir
@@ -78,6 +80,11 @@ class FileBox(object):
try:
if nice_level:
os.nice(nice_level)
+ # with fork() we have duplicated py.test's basetemp
+ # directory so we unset it manually here.
+ # this may be expensive for some test setups,
+ # but that is what you get with boxing.
+ pytestconfig.basetemp = None
retval = self.fun(*self.args, **self.kwargs)
retvalf.write(marshal.dumps(retval))
finally:
|
[svn r<I>] have each boxed test run use its own tempdir --HG-- branch : trunk
|
py
|
diff --git a/netsnmptestenv.py b/netsnmptestenv.py
index <HASH>..<HASH> 100644
--- a/netsnmptestenv.py
+++ b/netsnmptestenv.py
@@ -58,12 +58,7 @@ class netsnmpTestEnv(object):
subprocess.check_call(cmd, shell=True)
def shutdown(self):
- # Check for existance of snmpd's PID file
- if os.access(self.pidfile, os.R_OK):
- # Read the PID
- with open(self.pidfile, "r") as f:
- pid = int(f.read())
-
+ def kill_process(pid):
# First we ask it nicely to quit. If after a second it hasn't, we
# will kill it the hard way.
try:
@@ -78,6 +73,15 @@ class netsnmpTestEnv(object):
except OSError as e:
pass
+ # Check for existance of snmpd's PID file
+ if os.access(self.pidfile, os.R_OK):
+ # Read the PID
+ with open(self.pidfile, "r") as f:
+ pid = int(f.read())
+
+ # And kill it
+ kill_process(pid)
+
# Recursively remove the temporary directory
if os.access(self.tmpdir, os.R_OK):
shutil.rmtree(self.tmpdir)
|
netsnmptestenv: Move shutdown()'s process killing into own function Sooner or later our net-snmp test environment will consist of more than snmpd (eg. snmptrapd), so it makes sense to generalize the process killing.
|
py
|
diff --git a/test/test_hosts.py b/test/test_hosts.py
index <HASH>..<HASH> 100644
--- a/test/test_hosts.py
+++ b/test/test_hosts.py
@@ -153,11 +153,11 @@ class TestHost(ShinkenTest):
def test_hostgroup(self):
- hg = self.sched.hostgroups.find_by_name("hostgroup_01")
+ hg = self.conf.hostgroups.find_by_name("hostgroup_01")
self.assertIsNot(hg, None)
- h = self.sched.hosts.find_by_name('test_host_0')
+ h = self.conf.hosts.find_by_name('test_host_0')
self.assertIn(h, hg.members)
- self.assertIn(hg, h.hostgroups)
+ self.assertIn(hg.get_name(), [hg.get_name() for hg in h.hostgroups])
def test_childs(self):
|
Fix: bad hostgroup test that was looking for hard objects
|
py
|
diff --git a/source/rafcon/gui/models/state_machine.py b/source/rafcon/gui/models/state_machine.py
index <HASH>..<HASH> 100644
--- a/source/rafcon/gui/models/state_machine.py
+++ b/source/rafcon/gui/models/state_machine.py
@@ -294,7 +294,6 @@ class StateMachineModel(ModelMT, Hashable):
if 'before' in info:
self._send_root_state_notification(model, prop_name, info)
- self.change_root_state_type.__func__.suppressed_notification_parameters = None
else:
# Do not forward the notification yet, but store its parameters locally at the function
# The function helpers.state.change_state_type will forward the notification after some preparation
|
refactor(StateMachineModel): remove obsolete line
|
py
|
diff --git a/lifxlan/unpack.py b/lifxlan/unpack.py
index <HASH>..<HASH> 100644
--- a/lifxlan/unpack.py
+++ b/lifxlan/unpack.py
@@ -171,8 +171,9 @@ def unpack_lifx_message(packed_message):
message = LightGet(target_addr, source_id, seq_num, {}, ack_requested, response_requested)
elif message_type == MSG_IDS[LightSetColor]:
- color = struct.unpack("<" + ("H"*4), payload_str[0:8])
- duration = struct.unpack("<I", payload_str[8:12])[0]
+ reserved = struct.unpack("<B", payload_str[0:1])[0]
+ color = struct.unpack("<" + ("H"*4), payload_str[1:9])
+ duration = struct.unpack("<I", payload_str[9:13])[0]
payload = {"color": color, "duration": duration}
message = LightSetColor(target_addr, source_id, seq_num, payload, ack_requested, response_requested)
|
Fixed unpack bug for LightSetColor message API spec and message packing routine both include a reserved 8-bit unsigned field. Unpack did not account for this.
|
py
|
diff --git a/ripe/atlas/sagan/helpers/abuf.py b/ripe/atlas/sagan/helpers/abuf.py
index <HASH>..<HASH> 100644
--- a/ripe/atlas/sagan/helpers/abuf.py
+++ b/ripe/atlas/sagan/helpers/abuf.py
@@ -290,12 +290,14 @@ class AbufParser(object):
edns0 = {
'UDPsize': res[1],
'ExtendedReturnCode': res[2] >> 24,
- 'Version': (res[2] and 0x0f00) >> 16,
- 'Z': (res[2] and 0x00ff),
+ 'Version': (res[2] & 0x00ff0000) >> 16,
+ 'Z': (res[2] & 0x007fff),
'Type': 'OPT',
'Option': [],
'Name': name,
}
+ if res[2] & 0x8000:
+ edns0['DO']= True
o = 0
while o < len(rdata):
|
Fixed some bugs in EDNS0 parsing and extract DO flag.
|
py
|
diff --git a/pelix/shell/remote.py b/pelix/shell/remote.py
index <HASH>..<HASH> 100644
--- a/pelix/shell/remote.py
+++ b/pelix/shell/remote.py
@@ -200,9 +200,12 @@ class RemoteConsole(socketserver.StreamRequestHandler):
_logger.info("RemoteConsole client gone: [%s]:%d",
self.client_address[0], self.client_address[1])
- # Be polite
- self.send("\nSession closed. Good bye.\n")
- self.finish()
+ try:
+ # Be polite
+ self.send("\nSession closed. Good bye.\n")
+ self.finish()
+ except IOError as ex:
+ _logger.warning("Error cleaning up connection: %s", ex)
# ------------------------------------------------------------------------------
|
Remote Shell: Log the connection ending error ... instead of letting it be shown on output
|
py
|
diff --git a/hug/test.py b/hug/test.py
index <HASH>..<HASH> 100644
--- a/hug/test.py
+++ b/hug/test.py
@@ -87,8 +87,8 @@ def cli(method, *kargs, **arguments):
try:
method.cli()
- except Exception:
- pass
+ except Exception as e:
+ to_return = (e, )
method.cli.output = old_output
sys.argv = old_sys_argv
|
Improve how hug clis are tested
|
py
|
diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py
index <HASH>..<HASH> 100644
--- a/src/streamlink/plugins/bfmtv.py
+++ b/src/streamlink/plugins/bfmtv.py
@@ -8,7 +8,7 @@ from streamlink.stream import HLSStream
class BFMTV(Plugin):
_url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
_brightcove_video_re = re.compile(r'data-holder="video(?P<video_id>[0-9]+)" data-account="(?P<account_id>[0-9]+)"')
- _brightcove_video_alt_re = re.compile(r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"')
+ _brightcove_video_alt_re = re.compile(r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"', re.DOTALL)
_embed_video_url_re = re.compile(r"\$YOPLAYER\('liveStitching', {.+?file: '(?P<video_url>[^\"]+?)'.+?}\);", re.DOTALL)
@classmethod
|
[plugins.bfmtv] Fix player regex
|
py
|
diff --git a/src/numdifftools/info.py b/src/numdifftools/info.py
index <HASH>..<HASH> 100644
--- a/src/numdifftools/info.py
+++ b/src/numdifftools/info.py
@@ -54,7 +54,7 @@ Visualize high order derivatives of the tanh function
... y = df(x)
... h = plt.plot(x, y/np.abs(y).max())
- plt.show()
+ >>> plt.show() # doctest + SKIP
.. image:: https://raw.githubusercontent.com/pbrod/numdifftools/master/examples/fun.png
:target: https://github.com/pbrod/numdifftools/blob/master/examples/fun.py
|
Added "# doctest + SKIP" to doctest string in info.py
|
py
|
diff --git a/vk/groups.py b/vk/groups.py
index <HASH>..<HASH> 100644
--- a/vk/groups.py
+++ b/vk/groups.py
@@ -44,10 +44,10 @@ class Group(VKBase):
:param: sort {id_asc, id_desc, time_asc, time_desc} string
Docs: https://vk.com/dev/groups.getMembers
"""
- return self._session.fetch_items("groups.getMembers", User.from_json, 100, group_id=self.id, sort=sort, fields=User.__slots__ + User.USER_FIELDS)
+ return self._session.fetch_items("groups.getMembers", User.from_json, 1000, group_id=self.id, sort=sort, fields=User.__slots__ + User.USER_FIELDS)
def get_members_only_id(self):
- return self._session.fetch_items("groups.getMembers", lambda _, y: y, 100, group_id=self.id)
+ return self._session.fetch_items("groups.getMembers", lambda _, y: y, 1000, group_id=self.id)
def get_members_count(self):
response = self._session.fetch("groups.getById", group_ids=self.id, fields="members_count")
|
`Group.get_members` and `Group.get_members_only_id` at a time requests <I> users instead of <I>
|
py
|
diff --git a/generic_positions/templatetags/position_tags.py b/generic_positions/templatetags/position_tags.py
index <HASH>..<HASH> 100644
--- a/generic_positions/templatetags/position_tags.py
+++ b/generic_positions/templatetags/position_tags.py
@@ -19,6 +19,9 @@ def order_by_position(qs, reverse=False):
position = 'position'
if reverse:
position = '-' + position
+ # Check that every item has a valid position item
+ for obj in qs:
+ ObjectPosition.objects.get_or_create(content_object=obj)
# Get content type of first queryset item
c_type = ContentType.objects.get_for_model(qs[0])
return [
|
fixed order by position tag if items have no position object
|
py
|
diff --git a/rest_api/api.py b/rest_api/api.py
index <HASH>..<HASH> 100644
--- a/rest_api/api.py
+++ b/rest_api/api.py
@@ -183,6 +183,7 @@ def biopax_process_pc_neighborhood():
### PYSB ###
@route('/assemblers/pysb', method='POST')
+@allow_cors
def assemble_pysb():
"""Assemble INDRA Statements and return PySB model string."""
response = request.body.read().decode('utf-8')
|
Allow CORS on PySB assembler
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ with open('README.txt') as file:
long_description = file.read()
setup(name="ped_parser",
- version="0.2.6",
+ version="0.3",
author="Mans Magnusson",
author_email="mans.magnusson@scilifelab.se",
license='BSD',
@@ -20,7 +20,8 @@ setup(name="ped_parser",
# scripts=[''],
classifiers = [
"Programming Language :: Python",
- "Development Status :: 3 - Alpha",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Intended Audience :: Science/Research",
|
Bumped version and added classifiers
|
py
|
diff --git a/pook/mock_engine.py b/pook/mock_engine.py
index <HASH>..<HASH> 100644
--- a/pook/mock_engine.py
+++ b/pook/mock_engine.py
@@ -3,16 +3,27 @@ from .interceptors import interceptors
class MockEngine(object):
"""
- MockEngine implements the built-in `pook` mock engine based on HTTP
+ ``MockEngine`` represents the low-level mocking engine abstraction
+ layer between ``pook`` and the underlying mocking mechanism
+ responsible of intercepting and trigger outgoing HTTP traffic within
+ the Python runtime.
+
+ ``MockEngine`` implements the built-in `pook` mock engine based on HTTP
interceptors strategy.
+
+ Developers can implement and plug in their own ``MockEngine`` in order
+ to fit custom mocking logic needs.
+
+ You can see a custom ``MockEngine`` implementation here:
+ http://bit.ly/2EymMro
- Mock engines must implementent the following methods:
+ Custom mock engines must implementent at least the following methods:
- `engine.__init__(self, engine)`
- `engine.activate(self)`
- `engine.disable(self)`
- Mock engines can optionally implement the follow methods:
+ Custom mock engines can optionally implement the following methods:
- `engine.add_interceptors(self, *interceptors)`
- `engine.flush_interceptors(self)`
|
refactor(mock_engine): improve docstring docs
|
py
|
diff --git a/etrago/appl.py b/etrago/appl.py
index <HASH>..<HASH> 100755
--- a/etrago/appl.py
+++ b/etrago/appl.py
@@ -360,6 +360,7 @@ def run_etrago(args, json_path):
etrago.network.storage_units.capital_cost.fillna(0., inplace=True)
etrago.network.storage_units.p_nom_max.fillna(np.inf, inplace=True)
etrago.network.storage_units.standing_loss.fillna(0., inplace=True)
+ etrago.network.storage_units.lifetime = np.inf
etrago.network.lines.v_ang_min.fillna(0., inplace=True)
etrago.network.links.terrain_factor.fillna(1., inplace=True)
etrago.network.lines.v_ang_max.fillna(1., inplace=True)
|
set storage_units.lifetime as np.inf
|
py
|
diff --git a/varlink/__init__.py b/varlink/__init__.py
index <HASH>..<HASH> 100644
--- a/varlink/__init__.py
+++ b/varlink/__init__.py
@@ -697,7 +697,7 @@ class Scanner:
self.patterns = {
'interface-name': re.compile(r'[a-z]+(\.[a-z0-9][a-z0-9-]*)+'),
'member-name': re.compile(r'\b[A-Z][A-Za-z0-9_]*\b', re.ASCII),
- 'identifier': re.compile(r'\b[A-Za-z0-9_]+\b', re.ASCII),
+ 'identifier': re.compile(r'\b[a-z][A-Za-z0-9_]*\b', re.ASCII),
}
self.string = string
|
identifier have to start with lowercase
|
py
|
diff --git a/src/adafruit_blinka/board/jetson_tx2.py b/src/adafruit_blinka/board/jetson_tx2.py
index <HASH>..<HASH> 100644
--- a/src/adafruit_blinka/board/jetson_tx2.py
+++ b/src/adafruit_blinka/board/jetson_tx2.py
@@ -10,6 +10,7 @@ SCL_1 = pin.SCL_1
D4 = pin.J04
D5 = pin.J06
D6 = pin.AA02
+D7 = pin.N03
D8 = pin.N06
D9 = pin.N04
D10 = pin.N05
|
Jetson TX2: Add missing D7 pin definition
|
py
|
diff --git a/acestream/request.py b/acestream/request.py
index <HASH>..<HASH> 100644
--- a/acestream/request.py
+++ b/acestream/request.py
@@ -92,7 +92,7 @@ class Request(object):
def _parse_json(self, string):
try:
- return json.loads(str(string))
+ return json.loads(string)
except (IOError, ValueError):
return {}
|
request: fix json parsing
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -51,5 +51,20 @@ setup(
include_package_data=True,
long_description="""\
Python client for kubernetes http://kubernetes.io/
- """
+ """,
+ classifiers=[
+ "Development Status :: 4 - Alpha",
+ "Environment :: Kubernetes",
+ "Topic :: Utilities",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Information Technology",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ ],
)
|
Add classifiers to setup.py Add some classifiers for better metadata/information on pypi
|
py
|
diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index <HASH>..<HASH> 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -54,7 +54,6 @@ parser.add_argument('-o', '--output',
metavar="<file>",
dest='log_file',
help='path of file in which to save the report (default: vb_suite.log).')
-args = parser.parse_args()
def get_results_df(db,rev):
from pandas import DataFrame
@@ -207,6 +206,7 @@ def _parse_commit_log(repo_path):
if __name__ == '__main__':
+ args = parser.parse_args()
if not args.auto and not args.base_commit and not args.target_commit:
parser.print_help()
else:
|
CLN: modify test_perf to allow import as a module
|
py
|
diff --git a/multiqc/modules/quast/quast.py b/multiqc/modules/quast/quast.py
index <HASH>..<HASH> 100644
--- a/multiqc/modules/quast/quast.py
+++ b/multiqc/modules/quast/quast.py
@@ -54,11 +54,13 @@ class MultiqcModule(BaseMultiqcModule):
'content': self.quast_contigs_barplot()
})
# Number of genes plot
- self.sections.append({
- 'name': 'Number of Predicted Genes',
- 'anchor': 'quast-genes',
- 'content': self.quast_predicted_genes_barplot()
- })
+ genes_plot = self.quast_predicted_genes_barplot()
+ if genes_plot is not None:
+ self.sections.append({
+ 'name': 'Number of Predicted Genes',
+ 'anchor': 'quast-genes',
+ 'content': self.quast_predicted_genes_barplot()
+ })
def parse_quast_log(self, f):
lines = f['f'].splitlines()
|
Quast: Don't add genes_predicted section if no plot is created. See #<I>
|
py
|
diff --git a/intranet/apps/users/models.py b/intranet/apps/users/models.py
index <HASH>..<HASH> 100644
--- a/intranet/apps/users/models.py
+++ b/intranet/apps/users/models.py
@@ -350,7 +350,7 @@ class User(AbstractBaseUser):
data = results[0][1]['jpegPhoto'][0]
else:
data = None
- except ldap.NO_SUCH_OBJECT:
+ except (ldap.NO_SUCH_OBJECT, KeyError):
data = None
cache.set(key, data, settings.CACHE_AGE['ldap_permissions'])
|
Allow missing ldap image data
|
py
|
diff --git a/scripts/experiments/run_experiments.py b/scripts/experiments/run_experiments.py
index <HASH>..<HASH> 100644
--- a/scripts/experiments/run_experiments.py
+++ b/scripts/experiments/run_experiments.py
@@ -62,7 +62,7 @@ class DPExpParams(experiment_runner.JavaExpParams):
def get_java_args(self, eprunner):
# Allot the available memory to the JVM, ILP solver, and ZIMPL
total_work_mem_megs = eprunner.work_mem_megs
- if (eprunner.queue != None):
+ if (self.get("parser").startswith("ilp-")):
zimpl_mem = int(total_work_mem_megs * 0.5)
else:
zimpl_mem = 0
|
Refining when Zimpl memory is accounted for git-svn-id: svn+ssh://external.hltcoe.jhu.edu/home/hltcoe/mgormley/public/repos/dep_parse_filtered/trunk@<I> <I>f-cb4b-<I>-8b<I>-c<I>bcb<I>
|
py
|
diff --git a/paypal/pro/helpers.py b/paypal/pro/helpers.py
index <HASH>..<HASH> 100644
--- a/paypal/pro/helpers.py
+++ b/paypal/pro/helpers.py
@@ -212,6 +212,22 @@ class PayPalWPP(object):
def refundTransaction(self, params):
raise NotImplementedError
+ def doReferenceTransaction(self, params):
+ """
+ Process a payment from a buyer's account, identified by a previous
+ transaction.
+ The `paymentaction` param defaults to "Sale", but may also contain the
+ values "Authorization" or "Order".
+ """
+ defaults = {"method": "DoReferenceTransaction",
+ "paymentaction": "Sale"}
+ required = ["referenceid", "amt"]
+
+ nvp_obj = self._fetch(params, required, defaults)
+ if nvp_obj.flag:
+ raise PayPalFailure(nvp_obj.flag_info)
+ return nvp_obj
+
def _is_recurring(self, params):
"""Returns True if the item passed is a recurring transaction."""
return 'billingfrequency' in params
|
Added a method to send a DoReferenceTransaction request
|
py
|
diff --git a/playhouse/db_url.py b/playhouse/db_url.py
index <HASH>..<HASH> 100644
--- a/playhouse/db_url.py
+++ b/playhouse/db_url.py
@@ -8,7 +8,10 @@ from playhouse.pool import PooledMySQLDatabase
from playhouse.pool import PooledPostgresqlDatabase
from playhouse.pool import PooledSqliteDatabase
from playhouse.pool import PooledSqliteExtDatabase
-from playhouse.sqlite_ext import SqliteExtDatabase
+try:
+ from playhouse.sqlite_ext import SqliteExtDatabase
+except ImportError:
+ SqliteExtDatabase = None
schemes = {
|
Fix dependency on sqlite_ext in db_url.
|
py
|
diff --git a/quail/load.py b/quail/load.py
index <HASH>..<HASH> 100644
--- a/quail/load.py
+++ b/quail/load.py
@@ -1,11 +1,8 @@
from __future__ import division
from sqlalchemy import create_engine, MetaData, Table
import json
-import math
import re
import csv
-from itertools import izip_longest
-from collections import Counter
import pandas as pd
import numpy as np
from .egg import Egg
|
cleaned up reqs in load function
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ from setuptools import find_packages, setup
setup(
name='ActionCableZwei',
- version='0.1.7',
+ version='0.1.7.1',
license='MIT',
description='Action Cable Client for Python 3',
author='Tobias Feistmantl',
|
Update version number to <I>
|
py
|
diff --git a/vdw/conf/global_settings.py b/vdw/conf/global_settings.py
index <HASH>..<HASH> 100644
--- a/vdw/conf/global_settings.py
+++ b/vdw/conf/global_settings.py
@@ -42,6 +42,7 @@ INSTALLED_APPS = (
'avocado.export',
'modeltree',
'reversion',
+ 'sts',
)
|
Add sts to INSTALLED_APPS in global_settings
|
py
|
diff --git a/phonopy/unfolding/__init__.py b/phonopy/unfolding/__init__.py
index <HASH>..<HASH> 100644
--- a/phonopy/unfolding/__init__.py
+++ b/phonopy/unfolding/__init__.py
@@ -37,7 +37,10 @@ from phonopy.harmonic.dynmat_to_fc import get_commensurate_points
from phonopy.structure.atoms import Atoms
from phonopy.structure.cells import get_supercell
-class Unfolding:
+from future.utils import implements_iterator
+
+@implements_iterator
+class Unfolding(object):
def __init__(self,
phonon,
supercell_matrix,
@@ -80,8 +83,6 @@ class Unfolding:
self._q_index += 1
return self._weights[self._q_index - 1]
- next=__next__
-
def prepare(self):
self._comm_points = get_commensurate_points(self._supercell_matrix)
self._set_translations()
|
Correct Py2/Py3 iterator in unfolding.
|
py
|
diff --git a/test/test_sblgnt.py b/test/test_sblgnt.py
index <HASH>..<HASH> 100644
--- a/test/test_sblgnt.py
+++ b/test/test_sblgnt.py
@@ -34,7 +34,7 @@ def test_text():
else:
assert n['left'] is not None and n['right'] is not None
assert text is not None
- hits = text.find('ἐν')
+ hits = text.find(u'ἐν')
assert hits is not None and len(hits) == 8
def __get_testfiles():
|
added unicode flag for test string (back since python <I>)
|
py
|
diff --git a/ipyrad/analysis/bpp.py b/ipyrad/analysis/bpp.py
index <HASH>..<HASH> 100644
--- a/ipyrad/analysis/bpp.py
+++ b/ipyrad/analysis/bpp.py
@@ -1547,7 +1547,7 @@ class Bpp(object):
# do not allow any tips in node_dists:
for nidx in node_dists:
- if node in ttre.idx_dict[nidx].is_leaf():
+ if ttre.idx_dict[nidx].is_leaf():
raise IPyradError(
"error in node_dists: cannot plot div time for tip nodes")
|
node_dists check for tips in bpp plot
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -49,6 +49,7 @@ def test_packages():
line.strip() for line in open(test_reqs).readlines()
if not line.startswith("#")
]
+ return tests_require
setup(name='httpretty',
version=version,
@@ -57,5 +58,9 @@ setup(name='httpretty',
author_email='gabriel@nacaolivre.org',
url='http://github.com/gabrielfalcao/httpretty',
packages=get_packages(),
- tests_require=test_packages()
+ tests_require=test_packages(),
+ license='MIT',
+ classifiers=["Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Topic :: Software Development :: Testing"],
)
|
Clarify project license and other setup fixes. In my case it is important that license is correctly set and attributed, also add a couple of other classifiers and fixes whilst there.
|
py
|
diff --git a/thinc/extra/load_nlp.py b/thinc/extra/load_nlp.py
index <HASH>..<HASH> 100644
--- a/thinc/extra/load_nlp.py
+++ b/thinc/extra/load_nlp.py
@@ -22,6 +22,6 @@ def get_vectors(ops, lang):
vectors = numpy.zeros((nV, nM), dtype='float32')
for lex in nlp.vocab:
if lex.has_vector:
- vectors[lex.rank] = lex.vector / lex.vector_norm
+ vectors[lex.rank] = lex.vector / (lex.vector_norm+1e-8)
VECTORS[key] = ops.asarray(vectors)
return VECTORS[key]
|
Fix divide by zero error in vectors loading
|
py
|
diff --git a/sendgrid/sendgrid.py b/sendgrid/sendgrid.py
index <HASH>..<HASH> 100644
--- a/sendgrid/sendgrid.py
+++ b/sendgrid/sendgrid.py
@@ -103,6 +103,7 @@ class SendGridClient(object):
data = urlencode(self._build_body(message), True).encode('utf-8')
req = urllib_request.Request(self.mail_url, data)
req.add_header('User-Agent', self.useragent)
+ req.add_header('Accept', '*/*')
if self.username is None:
# Using API key
|
Fix cause of HTTP <I> responses
|
py
|
diff --git a/hedgehog/protocol/messages/vision.py b/hedgehog/protocol/messages/vision.py
index <HASH>..<HASH> 100644
--- a/hedgehog/protocol/messages/vision.py
+++ b/hedgehog/protocol/messages/vision.py
@@ -1,4 +1,4 @@
-from typing import Any, Dict, Optional, Sequence, Tuple, Union
+from typing import Any, Dict, Optional, Set, Tuple, Union
from dataclasses import dataclass
from . import RequestMsg, ReplyMsg, Message, SimpleMessage
@@ -141,7 +141,7 @@ class UpdateChannelAction(Message):
@protobuf.message(vision_pb2.VisionChannelMessage, 'vision_channel_message', fields=('keys',))
@dataclass(frozen=True, repr=False)
class DeleteChannelAction(Message):
- keys: Sequence[str]
+ keys: Set[str]
def __post_init__(self):
# <default GSL customizable: DeleteChannelAction-init-validation>
@@ -161,7 +161,7 @@ class DeleteChannelAction(Message):
@protobuf.message(vision_pb2.VisionChannelMessage, 'vision_channel_message', fields=('keys',))
@dataclass(frozen=True, repr=False)
class ChannelRequest(Message):
- keys: Sequence[str]
+ keys: Set[str]
def __post_init__(self):
# <default GSL customizable: ChannelRequest-init-validation>
|
change keys type declaration to Set[str]
|
py
|
diff --git a/barf/barf/barf.py b/barf/barf/barf.py
index <HASH>..<HASH> 100644
--- a/barf/barf/barf.py
+++ b/barf/barf/barf.py
@@ -145,6 +145,7 @@ class BARF(object):
self.smt_translator = SmtTranslator(self.smt_solver, self.arch_info.address_size)
self.ir_emulator.set_arch_registers(self.arch_info.registers_gp_all)
+ self.ir_emulator.set_arch_flags(self.arch_info.registers_flags)
self.ir_emulator.set_arch_registers_size(self.arch_info.registers_size)
self.ir_emulator.set_arch_alias_mapper(self.arch_info.alias_mapper)
|
Fix missing emulator native flags set up.
|
py
|
diff --git a/python/ray/tests/test_placement_group_3.py b/python/ray/tests/test_placement_group_3.py
index <HASH>..<HASH> 100644
--- a/python/ray/tests/test_placement_group_3.py
+++ b/python/ray/tests/test_placement_group_3.py
@@ -700,5 +700,16 @@ def test_placement_group_local_resource_view(monkeypatch, ray_start_cluster):
ray.get(trainer.train.remote())
+def test_fractional_resources_handle_correct(ray_start_cluster):
+ cluster = ray_start_cluster
+ cluster.add_node(num_cpus=1000)
+ ray.init(address=cluster.address)
+
+ bundles = [{"CPU": 0.01} for _ in range(5)]
+ pg = placement_group(bundles, strategy="SPREAD")
+
+ ray.get(pg.ready(), timeout=10)
+
+
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
[Placement Group] [Test] Add fractional resources test for placement group (#<I>) * add fractional resources test * lint
|
py
|
diff --git a/src/urh/dev/VirtualDevice.py b/src/urh/dev/VirtualDevice.py
index <HASH>..<HASH> 100644
--- a/src/urh/dev/VirtualDevice.py
+++ b/src/urh/dev/VirtualDevice.py
@@ -6,6 +6,7 @@ from PyQt5.QtCore import pyqtSignal, QObject
from urh.dev import config
from urh.dev.BackendHandler import Backends, BackendHandler
+from urh.dev.native.AirSpy import AirSpy
from urh.dev.native.Device import Device
from urh.plugins.NetworkSDRInterface.NetworkSDRInterfacePlugin import NetworkSDRInterfacePlugin
from urh.util.Logger import logger
@@ -92,6 +93,8 @@ class VirtualDevice(QObject):
elif name == "limesdr":
from urh.dev.native.LimeSDR import LimeSDR
self.__dev = LimeSDR(freq, gain, sample_rate, bandwidth, gain, is_ringbuffer=is_ringbuffer)
+ elif name.startswith("airspy"):
+ self.__dev = AirSpy(freq, sample_rate, bandwidth, gain, if_gain, baseband_gain, is_ringbuffer=is_ringbuffer)
else:
raise NotImplementedError("Native Backend for {0} not yet implemented".format(name))
|
consider airspy in virtual device
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.