diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/kitnirc/client.py b/kitnirc/client.py
index <HASH>..<HASH> 100644
--- a/kitnirc/client.py
+++ b/kitnirc/client.py
@@ -401,7 +401,7 @@ class Client(object):
def part(self, target, message=None):
"""Part a channel."""
- if target not in self.server.channels:
+ if str(target) not in self.server.channels:
_log.warning("Ignoring request to part channel '%s' because we "
"are not in that channel.", target)
return
@@ -437,7 +437,7 @@ class Client(object):
(Values for modes which do not take arguments are ignored.)
"""
- if channel not in self.server.channels:
+ if str(channel) not in self.server.channels:
_log.warning("Ignoring request to set modes in channel '%s' "
"because we are not in that channel.", channel)
return
|
String-ify before checking .channels membership This allows passing in `Channel` objects to work properly.
|
py
|
diff --git a/src/views.py b/src/views.py
index <HASH>..<HASH> 100644
--- a/src/views.py
+++ b/src/views.py
@@ -2221,10 +2221,11 @@ class GenList(GenBase, ListView):
# Add adapted columns
a['columns']=[]
for column in context['columns']:
- # Repair the name
- column['name']=gettext(column['name'])
- # Save the column
- a['columns'].append(column)
+ if column['name']:
+ # Repair the name
+ column['name']=gettext(column['name'])
+ # Save the column
+ a['columns'].append(column)
# Remember ordering
ordering={}
|
Columns without label are allowed in the lists
|
py
|
diff --git a/nyamuk/nyamuk.py b/nyamuk/nyamuk.py
index <HASH>..<HASH> 100644
--- a/nyamuk/nyamuk.py
+++ b/nyamuk/nyamuk.py
@@ -12,6 +12,7 @@ import nyamuk_net
class Nyamuk(base_nyamuk.BaseNyamuk):
def __init__(self, id):
base_nyamuk.BaseNyamuk.__init__(self, id)
+ self.in_pub_msg = [] #incoming publish message
def loop(self, timeout = 1):
rlist = [self.sock]
@@ -296,11 +297,14 @@ class Nyamuk(base_nyamuk.BaseNyamuk):
message.timestamp = time.time()
qos = message.msg.qos
+
if qos == 0:
if self.on_message is not None:
self.in_callback = True
self.on_message(self, message.msg)
self.in_callback = False
+ else:
+ self.in_pub_msg.append(message.msg)
return MV.ERR_SUCCESS
elif qos == 1 or qos == 2:
|
add incoming publish message to it's list if there is no on_message callback
|
py
|
diff --git a/salt/utils/templates.py b/salt/utils/templates.py
index <HASH>..<HASH> 100644
--- a/salt/utils/templates.py
+++ b/salt/utils/templates.py
@@ -20,7 +20,9 @@ import jinja2.ext
# Import salt libs
import salt.utils
-from salt.exceptions import SaltRenderError
+from salt.exceptions import (
+ SaltRenderError, CommandExecutionError, SaltInvocationError
+)
from salt.utils.jinja import ensure_sequence_filter
from salt.utils.jinja import SaltCacheLoader as JinjaSaltCacheLoader
from salt.utils.jinja import SerializerExtension as JinjaSerializerExtension
@@ -285,7 +287,17 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
exc, out),
line,
tmplstr)
- except Exception, exc:
+ except (SaltInvocationError, CommandExecutionError) as exc:
+ trace = traceback.extract_tb(sys.exc_info()[2])
+ line, out = _get_jinja_error(trace, context=unicode_context)
+ if not line:
+ tmplstr = ''
+ raise SaltRenderError(
+ 'Problem running salt function in Jinja template: {0}{1}'.format(
+ exc, out),
+ line,
+ tmplstr)
+ except Exception as exc:
tracestr = traceback.format_exc()
trace = traceback.extract_tb(sys.exc_info()[2])
line, out = _get_jinja_error(trace, context=unicode_context)
|
Improve jinja error reporting Salt execution functions routinely raise SaltInvocationError and CommandExecutionError exceptions. This commit catches them in the jinja renderer and provides a nicely-formatted error message, preventing the traceback from being in the render error that is printed to the CLI.
|
py
|
diff --git a/pyte/screens.py b/pyte/screens.py
index <HASH>..<HASH> 100644
--- a/pyte/screens.py
+++ b/pyte/screens.py
@@ -124,6 +124,10 @@ class Screen(list):
1-indexed**, so, for instance ``ESC [ 10;10 f`` really means
-- move cursor to position (9, 9) in the display matrix.
+ .. versionchanged:: 0.4.7
+
+ :data:`~pyte.modes.LNM` is reset by default.
+
.. seealso::
`Standard ECMA-48, Section 6.1.1 \
|
Added a note on `pyte.modes.LNM` to `Screen` docstring
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,8 @@
import os
import sys
+# Temporary patch for issue reported here:
+# https://groups.google.com/forum/#!topic/nose-users/fnJ-kAUbYHQ
+import multiprocessing # TODO: Remove when Travis-CI updates 2.7 to 2.7.4+
__DIR__ = os.path.abspath(os.path.dirname(__file__))
import codecs
from setuptools import setup
|
Temporary patch for <I> multiprocessing bug
|
py
|
diff --git a/openquake/calculators/event_based_risk.py b/openquake/calculators/event_based_risk.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/event_based_risk.py
+++ b/openquake/calculators/event_based_risk.py
@@ -160,7 +160,7 @@ class EbrCalculator(base.RiskCalculator):
is_stochastic = True
precalc = 'event_based'
accept_precalc = ['event_based', 'event_based_risk', 'ucerf_hazard',
- 'ebrisk']
+ 'ebrisk', 'event_based_advanced']
def pre_execute(self):
oq = self.oqparam
|
Add the new calculator as an accepted precalculator for event_based_risk
|
py
|
diff --git a/satpy/dependency_tree.py b/satpy/dependency_tree.py
index <HASH>..<HASH> 100644
--- a/satpy/dependency_tree.py
+++ b/satpy/dependency_tree.py
@@ -185,9 +185,9 @@ class DependencyTree(Tree):
"""Update 'name' property of a node and any related metadata."""
old_name = node.name
assert old_name in self._all_nodes
+ del self._all_nodes[old_name]
node.update_name(new_name)
self._all_nodes[new_name] = node
- del self._all_nodes[old_name]
def populate_with_keys(self, dataset_keys: set, query=None):
"""Populate the dependency tree.
|
Fix dependency tree node name when the new name is the same as old
|
py
|
diff --git a/tests/unit/modules/pillar_test.py b/tests/unit/modules/pillar_test.py
index <HASH>..<HASH> 100644
--- a/tests/unit/modules/pillar_test.py
+++ b/tests/unit/modules/pillar_test.py
@@ -16,6 +16,7 @@ from salttesting.mock import (
ensure_in_syspath('../../')
# Import Salt libs
+import salt.ext.six as six
from salt.utils.odict import OrderedDict
from salt.modules import pillar as pillarmod
@@ -52,7 +53,10 @@ class PillarModuleTestCase(TestCase):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.modules.pillar.items', MagicMock(return_value=pillar_value_1))
def test_ls(self):
- self.assertEqual(pillarmod.ls(), ['a', 'b'])
+ if six.PY3:
+ self.assertCountEqual(pillarmod.ls(), ['a', 'b'])
+ else:
+ self.assertEqual(pillarmod.ls(), ['a', 'b'])
# gracinet: not sure this is really useful, but other test modules have this as well
|
Use assertCountEqual instead of assertEqual for lists in Py3
|
py
|
diff --git a/proso_concepts/admin.py b/proso_concepts/admin.py
index <HASH>..<HASH> 100644
--- a/proso_concepts/admin.py
+++ b/proso_concepts/admin.py
@@ -18,5 +18,5 @@ class ConceptAdmin(admin.ModelAdmin):
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
- list_display = ('type', 'value')
- search_fields = ('type', 'value')
+ list_display = ('type', 'value', 'lang')
+ search_fields = ('type', 'value', 'lang')
|
concepts - update Tags in admin
|
py
|
diff --git a/simple_settings/special_settings.py b/simple_settings/special_settings.py
index <HASH>..<HASH> 100644
--- a/simple_settings/special_settings.py
+++ b/simple_settings/special_settings.py
@@ -23,7 +23,8 @@ def override_settings_by_env(settings_dict):
if not settings_dict[SPECIAL_SETTINGS_KEY]['OVERRIDE_BY_ENV']:
return
for key, value in settings_dict.items():
- settings_dict[key] = os.environ.get(key, value)
+ if key != SPECIAL_SETTINGS_KEY:
+ settings_dict[key] = os.environ.get(key, value)
SPECIAL_SETTINGS_MAPPING = {
|
Refs #<I> - Do not update special settings key from ENV
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -14,9 +14,10 @@ setup(name='python-for-android',
entry_points={
'console_scripts': [
'python-for-android = pythonforandroid.toolchain:ToolchainCL',
+ 'p4a = pythonforandroid.toolchain:ToolchainCL',
],
'distutils.commands': [
- 'apktest = pythonforandroid.bdist_apk:BdistAPK',
+ 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',
],
},
)
|
Added p4a console_script in setup.py
|
py
|
diff --git a/runcommands/args.py b/runcommands/args.py
index <HASH>..<HASH> 100644
--- a/runcommands/args.py
+++ b/runcommands/args.py
@@ -190,7 +190,9 @@ class Arg:
container = tuple
if type is None:
- if container is None:
+ if isinstance(choices, builtins.type) and issubclass(choices, Enum):
+ type = choices
+ elif container is None:
if default not in (None, parameter.empty):
type = default.__class__
else:
|
Allow arg choices to be specified as an enum This seems more intuitive than using `type` and allows a `type` to be specified along with `choices` when `choices` is an enum (maybe an unlikely need, but who knows).
|
py
|
diff --git a/sentinelhub/constants.py b/sentinelhub/constants.py
index <HASH>..<HASH> 100644
--- a/sentinelhub/constants.py
+++ b/sentinelhub/constants.py
@@ -494,6 +494,23 @@ class MimeType(Enum):
return self.value
return mimetypes.types_map['.' + self.value]
+ def get_sample_type(self):
+ """ Returns sampleType used in Sentinel-Hub evalscripts.
+
+ :return: sampleType
+ :rtype: str
+ :raises: ValueError
+ """
+ try:
+ return {
+ MimeType.TIFF: 'INT16',
+ MimeType.TIFF_d8: 'INT8',
+ MimeType.TIFF_d16: 'INT16',
+ MimeType.TIFF_d32f: 'FLOAT32'
+ }[self]
+ except IndexError:
+ raise ValueError('Type {} is not supported by this method'.format(self))
+
def get_expected_max_value(self):
""" Returns max value of image `MimeType` format and raises an error if it is not an image format
|
Function to return sample type used in Sentinel-Hub evalscripts.
|
py
|
diff --git a/examples/server/sanic/app.py b/examples/server/sanic/app.py
index <HASH>..<HASH> 100644
--- a/examples/server/sanic/app.py
+++ b/examples/server/sanic/app.py
@@ -4,7 +4,7 @@ from sanic.response import html
import socketio
sio = socketio.AsyncServer(async_mode='sanic')
-app = Sanic()
+app = Sanic(name='sanic_application')
sio.attach(app)
|
Add application name to Sanic example (#<I>)
|
py
|
diff --git a/tests/test_trie.py b/tests/test_trie.py
index <HASH>..<HASH> 100644
--- a/tests/test_trie.py
+++ b/tests/test_trie.py
@@ -69,9 +69,3 @@ def test_hash_backoff(tree):
assert tree(1, 5) == 3
assert tree(1, 10) == 4
assert tree.longest_node == 11
-
-
-def test_fmt_line():
- assert _fmt_line(15, [10]) == b'15\t10\n'
- assert _fmt_line(15, [10, 1]) == b'15\t10\t1\n'
- assert _fmt_line(5, [20, 2]) == b'5\t20\t2\n'
|
* Remove serialization test, as now use ujson
|
py
|
diff --git a/openfisca_core/populations/group_population.py b/openfisca_core/populations/group_population.py
index <HASH>..<HASH> 100644
--- a/openfisca_core/populations/group_population.py
+++ b/openfisca_core/populations/group_population.py
@@ -267,7 +267,6 @@ class GroupPopulation(Population):
# The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order.
result[nb_persons_per_entity > n] = array[members_map][positions[members_map] == n]
- # Preserve Enum dtype
if isinstance(array, EnumArray):
result = EnumArray(result, array.possible_values)
|
Update openfisca_core/populations/group_population.py
|
py
|
diff --git a/scout/server/blueprints/panels/views.py b/scout/server/blueprints/panels/views.py
index <HASH>..<HASH> 100644
--- a/scout/server/blueprints/panels/views.py
+++ b/scout/server/blueprints/panels/views.py
@@ -95,9 +95,11 @@ def gene_edit(panel_id, hgnc_id):
panel_gene = controllers.existing_gene(store, panel_obj, hgnc_id)
form = PanelGeneForm()
- transcript_choices = [(transcript['refseq_id'], transcript['refseq_id'])
- for transcript in hgnc_gene['transcripts']
- if transcript.get('refseq_id')]
+ transcript_choices = []
+ for transcript in hgnc_gene['transcripts']:
+ if transcript.get('refseq_ids'):
+ for refseq_id in transcript['refseq_ids']:
+ transcript_choices.append((refseq_id, refseq_id))
form.disease_associated_transcripts.choices = transcript_choices
if form.validate_on_submit():
|
fix issue with adding transcripts to panel genes
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,9 @@ import setuptools
setup(
name='amptrac',
version='0.0',
- description='',
+ url='https://github.com/tomprince/amptrac',
+ description="Client for twisted's amp interface to trac",
+ license='MIT',
author='Tom Prince',
author_email='tom.prince@ualberta.net',
packages=['amptrac', 'amptrac.scripts', 'amptrac.test'],
|
Update license in setup.py.
|
py
|
diff --git a/intranet/apps/printing/views.py b/intranet/apps/printing/views.py
index <HASH>..<HASH> 100644
--- a/intranet/apps/printing/views.py
+++ b/intranet/apps/printing/views.py
@@ -175,6 +175,7 @@ def check_page_range(page_range, max_pages):
pages += range_high - range_low + 1
else:
+ single_range = int(single_range)
if single_range <= 0 or single_range > max_pages: # check in page range
return False
|
fix(printing): convert page to int
|
py
|
diff --git a/tests/test_file.py b/tests/test_file.py
index <HASH>..<HASH> 100644
--- a/tests/test_file.py
+++ b/tests/test_file.py
@@ -58,8 +58,12 @@ def test_ini_to_dict():
@patch("runez.open", side_effect=Exception)
@patch("os.path.exists", return_value=True)
@patch("os.path.isfile", return_value=True)
+@patch("os.path.getsize", return_value=10)
def test_failure(*_):
with runez.CaptureOutput() as logged:
+ assert runez.readlines("bar") is None
+ assert not logged
+
assert runez.write("bar", "some content", fatal=False)
assert "Can't write" in logged.pop()
|
Restored readlines() failure test
|
py
|
diff --git a/gwpy/timeseries/core.py b/gwpy/timeseries/core.py
index <HASH>..<HASH> 100644
--- a/gwpy/timeseries/core.py
+++ b/gwpy/timeseries/core.py
@@ -408,7 +408,7 @@ class TimeSeries(Series):
stepseries *= win
# calculated FFT, weight, and stack
fft_ = stepseries.fft(nfft=nfft) * scaling
- ffts[i] = fft_.data
+ ffts.data[i,:] = fft_.data
idx += (nfft - noverlap)
mean = ffts.mean(0)
mean.name = self.name
|
TimeSeries.average_fft: fixed bug in data setting
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,6 @@ import sys
from setuptools import find_packages, setup
-# from m2r import convert
-
# Package meta-data.
NAME = 'hatesonar'
DESCRIPTION = 'Hate Speech Detection Library for Python'
@@ -35,6 +33,7 @@ setup(
version='0.0.5',
description=DESCRIPTION,
long_description=long_description,
+ long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
|
Update setup.py to use markdown as is
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -39,7 +39,6 @@ setup(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
|
Removed tag for Python <I> (#<I>) Python <I> could not be supported since neither mypy nor pylint support Python <I> anymore. Python <I> reached end-of-life in <I>, so more dependencies are probably affected as well. This change simply removes the corresponding tag in the distribution. The continous integration only tested for >= <I> Python versions anyhow.
|
py
|
diff --git a/salt/states/boto_elasticache.py b/salt/states/boto_elasticache.py
index <HASH>..<HASH> 100644
--- a/salt/states/boto_elasticache.py
+++ b/salt/states/boto_elasticache.py
@@ -212,7 +212,12 @@ def present(
if not security_group_ids:
security_group_ids = []
_security_group_ids = __salt__['boto_secgroup.convert_to_group_ids'](
- cache_security_group_names, vpc_id, region, key, keyid, profile
+ groups=cache_security_group_names,
+ vpc_id=vpc_id,
+ region=region,
+ key=key,
+ keyid=keyid,
+ profile=profile
)
security_group_ids.extend(_security_group_ids)
cache_security_group_names = None
|
call to boto_secgroup.convert_to_group_ids should be by keyword args, the earlier code would end up with incorrect parameters being used with region being mapped to vpc_name.
|
py
|
diff --git a/delocate/tools.py b/delocate/tools.py
index <HASH>..<HASH> 100644
--- a/delocate/tools.py
+++ b/delocate/tools.py
@@ -375,7 +375,7 @@ def dir2zip(in_dir, zip_fname):
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
- info.external_attr = chmod_perms(in_fname) << 16
+ info.external_attr = (chmod_perms(in_fname) | stat.S_IFREG) << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
|
set S_IFREG (regular file) in ZipInfo.external_attr Fixes <URL> in the ZipInfo 'external_attr'. If we don't do that, then upon installing the wheel with pip, any files that had executable flags (e.g. embedded executables) in the wheel will lose them. Pip restores the execute permissions when uncompressing the wheel only if stat.S_ISREG(mode) <URL>
|
py
|
diff --git a/splunklib/client.py b/splunklib/client.py
index <HASH>..<HASH> 100644
--- a/splunklib/client.py
+++ b/splunklib/client.py
@@ -1872,7 +1872,7 @@ class StoragePasswords(Collection):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
|
Fix SyntaxWarning over comparison of literals using is
|
py
|
diff --git a/spyderlib/widgets/externalshell/sitecustomize.py b/spyderlib/widgets/externalshell/sitecustomize.py
index <HASH>..<HASH> 100644
--- a/spyderlib/widgets/externalshell/sitecustomize.py
+++ b/spyderlib/widgets/externalshell/sitecustomize.py
@@ -324,18 +324,17 @@ if matplotlib is not None:
if mpl_ion.lower() == "true":
matplotlib.rcParams['interactive'] = True
- # Setting the user defined backend
- matplotlib.use(mpl_backend)
-
- # Setting the right input hook according to mpl_backend,
- # but only for our Python consoles
- # IMPORTANT NOTE: Don't try to abstract the steps to set a PyOS
- # input hook callback in a function. It will *crash* the
- # interpreter!!
if os.environ.get("IPYTHON_KERNEL", "").lower() != "true":
import ctypes
from spyderlib.widgets.externalshell import inputhooks
+ # Setting the user defined backend
+ matplotlib.use(mpl_backend)
+
+ # Setting the right input hook according to mpl_backend,
+ # IMPORTANT NOTE: Don't try to abstract the steps to set a PyOS
+ # input hook callback in a function. It will *crash* the
+ # interpreter!!
if mpl_backend == "Qt4Agg" and os.name == 'nt' and \
monitor is not None:
# Removing PyQt4 input hook which is not working well on
|
sitecustomize: Only set the mpl backend for our Python consoles
|
py
|
diff --git a/tests/tests.py b/tests/tests.py
index <HASH>..<HASH> 100644
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -990,7 +990,7 @@ class TestAssets(unittest.TestCase):
pass
def test_assets_set_asset(self):
- assets_set_asset = self.rocket.assets_set_asset(asset_name='logo', file='logo.png').json()
+ assets_set_asset = self.rocket.assets_set_asset(asset_name='logo', file='tests/logo.png').json()
self.assertTrue(assets_set_asset.get('success'))
def test_assets_unset_asset(self):
|
Use absolute filename for the logo file
|
py
|
diff --git a/testing/wd_wrapper.py b/testing/wd_wrapper.py
index <HASH>..<HASH> 100644
--- a/testing/wd_wrapper.py
+++ b/testing/wd_wrapper.py
@@ -25,7 +25,7 @@ class WorkDir:
return do(cmd, self.cwd)
- def write(self, name: str, content: "str | bytes", /, **kw: object) -> Path:
+ def write(self, name: str, content: "str | bytes", **kw: object) -> Path:
path = self.cwd / name
if kw:
assert isinstance(content, str)
|
restore python <I> support
|
py
|
diff --git a/python/ray/tests/test_client.py b/python/ray/tests/test_client.py
index <HASH>..<HASH> 100644
--- a/python/ray/tests/test_client.py
+++ b/python/ray/tests/test_client.py
@@ -1,3 +1,4 @@
+import os
import pytest
import time
import sys
@@ -417,15 +418,16 @@ def test_basic_named_actor(ray_start_regular_shared):
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
- with pytest.raises(PermissionError):
+ fake_path = os.path.join(os.path.dirname(__file__), "not_a_real_file")
+ with pytest.raises(FileNotFoundError):
with ray_start_client_server() as ray:
@ray.remote
def g():
- with open("/dev/asdf", "w") as f:
- f.write("HI")
+ with open(fake_path, "r") as f:
+ f.read()
- # Raises a PermissionError
+ # Raises a FileNotFoundError
ray.get(g.remote())
|
[Client] Test Serialization in a platform independent way. (#<I>)
|
py
|
diff --git a/src/ocrmypdf/_weave.py b/src/ocrmypdf/_weave.py
index <HASH>..<HASH> 100644
--- a/src/ocrmypdf/_weave.py
+++ b/src/ocrmypdf/_weave.py
@@ -21,6 +21,7 @@ from itertools import groupby
import pikepdf
from .helpers import flatten_groups, page_number
+from .exec import tesseract
def _update_page_resources(*, page, font, font_key, procset):
@@ -54,6 +55,16 @@ def _weave_layers_graft(
pdf_text = pikepdf.open(text)
pdf_text_contents = pdf_text.pages[0].Contents.read_bytes()
+ if not tesseract.has_textonly_pdf():
+ # If we don't have textonly_pdf, edit the stream to delete the
+ # instruction to draw the image Tesseract generated, which we do not
+ # use.
+ stream = bytearray(pdf_text_contents)
+ pattern = b'/Im1 Do'
+ idx = stream.find(pattern)
+ stream[idx:(idx + len(pattern))] = b' ' * len(pattern)
+ pdf_text_contents = bytes(stream)
+
base_page = pdf_base.pages.p(page_num)
# The text page always will be oriented up by this stage but the original
|
weave: if we don't have textonly_pdf, delete instruction to draw image
|
py
|
diff --git a/bcbio/variation/genotype.py b/bcbio/variation/genotype.py
index <HASH>..<HASH> 100644
--- a/bcbio/variation/genotype.py
+++ b/bcbio/variation/genotype.py
@@ -510,6 +510,7 @@ def variantcall_sample(data, region=None, out_file=None):
"cortex": cortex.run_cortex,
"samtools": samtools.run_samtools,
"varscan": varscan.run_varscan,
+ "varscan-paired": varscan.run_varscan_paired,
"mutect": mutect.mutect_caller}
sam_ref = data["sam_ref"]
config = data["config"]
|
Add the paired VarScan call to the available callers
|
py
|
diff --git a/nodeshot/conf/project_template/project_name/settings.py b/nodeshot/conf/project_template/project_name/settings.py
index <HASH>..<HASH> 100644
--- a/nodeshot/conf/project_template/project_name/settings.py
+++ b/nodeshot/conf/project_template/project_name/settings.py
@@ -43,6 +43,8 @@ DATABASES = {
#}
}
+POSTGIS_VERSION = (2, 1)
+
# sentry integration
#RAVEN_CONFIG = {
# 'dsn': 'https://<api-public-key>:<api-secret-key>@<sentry.host>/<id>?timeout=5&verify_ssl=0',
|
Added POSTGIS_VERSION to project_template settings.py
|
py
|
diff --git a/pybliometrics/scopus/abstract_retrieval.py b/pybliometrics/scopus/abstract_retrieval.py
index <HASH>..<HASH> 100644
--- a/pybliometrics/scopus/abstract_retrieval.py
+++ b/pybliometrics/scopus/abstract_retrieval.py
@@ -168,7 +168,7 @@ class AbstractRetrieval(Retrieval):
return ((start['@year'], start['@month'], start['@day']),
(end['@year'], end['@month'], end['@day']))
else:
- return ((None, None, None), (None, None, None))
+ return None
@property
def conflocation(self):
|
Return None as confdate instead of list with tuples with Nones
|
py
|
diff --git a/openquake/calculators/base.py b/openquake/calculators/base.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/base.py
+++ b/openquake/calculators/base.py
@@ -435,10 +435,7 @@ class HazardCalculator(BaseCalculator):
else: # we are in a basic calculator
self.read_inputs()
if hasattr(self, 'sitecol'):
- if 'scenario' in self.oqparam.calculation_mode:
- self.datastore['sitecol'] = self.sitecol
- else:
- self.datastore['sitecol'] = self.sitecol.complete
+ self.datastore['sitecol'] = self.sitecol.complete
self.param = {} # used in the risk calculators
if 'gmfs' in self.oqparam.inputs:
save_gmfs(self)
|
Fix an extract bug discovered by Catalina [skip hazardlib]
|
py
|
diff --git a/LiSE/LiSE/allegedb/__init__.py b/LiSE/LiSE/allegedb/__init__.py
index <HASH>..<HASH> 100644
--- a/LiSE/LiSE/allegedb/__init__.py
+++ b/LiSE/LiSE/allegedb/__init__.py
@@ -980,9 +980,7 @@ class ORM(object):
windows.append((branch0, turn_from, tick_from, turn0, tick0))
break
else:
- assert not windows
- assert branch_from == branch_to
- return [(branch_from, turn_from, tick_from, turn_to, tick_to)]
+ raise HistoryError("Couldn't build sensible loading windows")
return windows
@world_locked
|
Fail loudly if _build_loading_windows' assumptions are violated
|
py
|
diff --git a/src/ossos/core/setup.py b/src/ossos/core/setup.py
index <HASH>..<HASH> 100644
--- a/src/ossos/core/setup.py
+++ b/src/ossos/core/setup.py
@@ -5,7 +5,6 @@ from setuptools import setup, find_packages
dependencies = ['requests >= 2.7',
'astropy >= 4.0',
'vos >= 3.0',
- 'ephem',
'numpy >= 1.6.1',
'matplotlib',
'd2to1 >= 0.2.10',
|
remove ephem dependency as not needed for pipeline and trouble...
|
py
|
diff --git a/glue/ligolw/metaio.py b/glue/ligolw/metaio.py
index <HASH>..<HASH> 100644
--- a/glue/ligolw/metaio.py
+++ b/glue/ligolw/metaio.py
@@ -1,3 +1,4 @@
+import numarray
import re
import sys
from xml import sax
@@ -124,6 +125,9 @@ class Table(ligolw.Table):
raise ligolw.ElementError, "Stream name %s does not match Table name %s" % (child.getAttribute("Name"), self.getAttribute("Name"))
ligolw.Table.appendChild(self, child)
+ def getColumnArray(self, colname):
+ return numarray.asarray([getattr(row, colname) for row in self.rows])
+
class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
"""
|
Add a method to extract a column of numeric data as a numarray array.
|
py
|
diff --git a/salt/grains/core.py b/salt/grains/core.py
index <HASH>..<HASH> 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1260,6 +1260,13 @@ def os_data():
grains['osfullname'] = \
grains.get('lsb_distrib_id', osname).strip()
if 'osrelease' not in grains:
+ # NOTE: This is a workaround for CentOS 7 os-release bug
+ # https://bugs.centos.org/view.php?id=8359
+ # /etc/os-release contains no minor distro release number so we fall back to parse
+ # /etc/centos-release file instead.
+ # Commit introducing this comment should be reverted after the upstream bug is released.
+ if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
+ grains.pop('lsb_distrib_release', None)
grains['osrelease'] = \
grains.get('lsb_distrib_release', osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename',
|
Added temporary workaround for CentOS 7 os-release id bug.
|
py
|
diff --git a/connector/setup.py b/connector/setup.py
index <HASH>..<HASH> 100755
--- a/connector/setup.py
+++ b/connector/setup.py
@@ -182,7 +182,7 @@ setup(
'paramiko >= 1.15.1',
'lxml >= 3.3.0',
'ncclient >= 0.6.6',
- 'grpcio <= 1.36.1',
+ 'grpcio <= 1.28.1',
'cisco-gnmi >= 1.0.13, < 2.0.0',
],
|
revert grpcio to <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,13 @@ The Scientific PYthon Development EnviRonment
from distutils.core import setup
from distutils.command.build import build
-from sphinx import setup_command
+from distutils.command.install_data import install_data
import os
import os.path as osp
+import subprocess
import sys
+from sphinx import setup_command
def get_package_data(name, extlist):
"""Return data files for package *name* with extensions in *extlist*"""
@@ -65,7 +67,18 @@ class MyBuildDoc(setup_command.BuildDoc):
sys.path.pop(0)
-cmdclass = {'build': MyBuild, 'build_doc': MyBuildDoc}
+class MyInstallData(install_data):
+ def run(self):
+ install_data.run(self)
+ if sys.platform.startswith('linux'):
+ try:
+ subprocess.call(['update-desktop-database'])
+ except:
+ print >>sys.stderr, "ERROR: unable to update desktop database"
+
+
+cmdclass = {'build': MyBuild, 'build_doc': MyBuildDoc,
+ 'install_data': MyInstallData}
NAME = 'spyder'
|
setup.py: Run update-desktop-database after installing our desktop file on Linux - This will inform the OS that Spyder can open python files right away after installation.
|
py
|
diff --git a/tldap/backend/base.py b/tldap/backend/base.py
index <HASH>..<HASH> 100644
--- a/tldap/backend/base.py
+++ b/tldap/backend/base.py
@@ -41,9 +41,6 @@ class LDAPbase(object):
self.settings_dict = settings_dict
self._obj = None
- self._reconnect()
- assert self._obj is not None
-
#########################
# Connection Management #
#########################
@@ -119,6 +116,10 @@ class LDAPbase(object):
assert self._obj is not None
def _do_with_retry(self, fn):
+ if self._obj is None:
+ self._reconnect()
+ assert self._obj is not None
+
try:
return fn(self._obj)
except ldap3.core.exceptions.LDAPSessionTerminatedByServer:
|
Don't connect to LDAP until we need to. Preserve compatability with previous versions of TLDAP. Change-Id: I<I>de<I>a9f9a2fde<I>f<I>efdb<I>b<I>fc3fc2
|
py
|
diff --git a/exist/__init__.py b/exist/__init__.py
index <HASH>..<HASH> 100644
--- a/exist/__init__.py
+++ b/exist/__init__.py
@@ -21,5 +21,5 @@ __author__ = 'Matt McDougall'
__author_email__ = 'matt@moatmedia.com.au'
__copyright__ = 'Copyright 2015 MoatMedia'
__license__ = 'Apache 2.0'
-__version__ = '0.1.9'
+__version__ = '0.1.9.1'
__release__ = __version__
|
Minor version update due to duplicate on pypi
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@ setup(
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
),
)
|
Updated classifiers in setup.py [ci skip]
|
py
|
diff --git a/danceschool/stats/stats.py b/danceschool/stats/stats.py
index <HASH>..<HASH> 100644
--- a/danceschool/stats/stats.py
+++ b/danceschool/stats/stats.py
@@ -211,12 +211,12 @@ def getClassTypeMonthlyData(year=None, series=None, typeLimit=None):
def ClassTypeMonthlyJSON(request):
try:
year = int(request.GET.get('year'))
- except ValueError:
+ except (ValueError, TypeError):
year = None
try:
typeLimit = int(request.GET.get('typeLimit'))
- except ValueError:
+ except (ValueError, TypeError):
typeLimit = None
series = request.GET.get('series')
@@ -523,8 +523,7 @@ def MonthlyPerformanceCSV(request):
def getLocationPerformance(startDate=None,endDate=None):
- # This time filter ensures that only non-special Series are included.
- timeFilters = {'event__series__special': False}
+ timeFilters = {}
if startDate:
timeFilters['event__startTime__gte'] = startDate
|
Fixed broken stats graphs arising from DB migration.
|
py
|
diff --git a/tests/testapp/tests/test_core_settings_based_registry.py b/tests/testapp/tests/test_core_settings_based_registry.py
index <HASH>..<HASH> 100644
--- a/tests/testapp/tests/test_core_settings_based_registry.py
+++ b/tests/testapp/tests/test_core_settings_based_registry.py
@@ -64,3 +64,7 @@ def test_assert_configured_global_limits_registry(settings):
assert len(core.limits_registry.defaults) == 2
assert core.limits_registry.name2cls['QueryBatchLimit'] == QueryBatchLimit
assert core.limits_registry.name2cls['TimeLimit'] == TimeLimit
+
+
+def test_all_known_limits_are_present_in_the_gobal_registry(limit_cls):
+ assert limit_cls in list(core.limits_registry.name2cls.values())
|
regression test to make sure all known limits are in the registry
|
py
|
diff --git a/hbmqtt/plugins/manager.py b/hbmqtt/plugins/manager.py
index <HASH>..<HASH> 100644
--- a/hbmqtt/plugins/manager.py
+++ b/hbmqtt/plugins/manager.py
@@ -140,7 +140,7 @@ class PluginManager:
def clean_fired_events(future):
try:
self._fired_events.remove(task)
- except KeyError:
+ except ValueError:
pass
task.add_done_callback(clean_fired_events)
|
Fix delicious exception so it is properly eaten
|
py
|
diff --git a/dvc/main.py b/dvc/main.py
index <HASH>..<HASH> 100644
--- a/dvc/main.py
+++ b/dvc/main.py
@@ -55,13 +55,12 @@ def main(argv=None):
except DvcParserError:
ret = 254
except UnicodeError:
- if is_py2:
- logger.exception(
- "unicode is not fully supported in DVC for Python 2.\n"
- "Python 2.7 will not be maintained past 2020.\n"
- "Please upgrade to Python 3 to work correctly with unicode.\n"
- "Exception"
- )
+ if not is_py2:
+ raise
+ logger.exception(
+ "unicode is not supported in DVC for Python 2"
+ " (end-of-life January 1, 2020), please upgrade to Python 3"
+ )
ret = 255
except Exception: # pylint: disable=broad-except
logger.exception("unexpected error")
|
main: make sure to raise UinocedeError for py3
|
py
|
diff --git a/zhaquirks/philips/zllextendedcolorlight.py b/zhaquirks/philips/zllextendedcolorlight.py
index <HASH>..<HASH> 100644
--- a/zhaquirks/philips/zllextendedcolorlight.py
+++ b/zhaquirks/philips/zllextendedcolorlight.py
@@ -49,6 +49,8 @@ class ZLLExtendedColorLight(CustomDevice):
(PHILIPS, "LCT021"),
(PHILIPS, "LCT024"),
(PHILIPS, "LLC020"),
+ (PHILIPS, "LCF002"),
+ (PHILIPS, "LCS001"),
],
ENDPOINTS: {
11: {
|
Support default power on state for Hue Lily and Hue Calla (#<I>)
|
py
|
diff --git a/km3pipe/pumps/daq.py b/km3pipe/pumps/daq.py
index <HASH>..<HASH> 100644
--- a/km3pipe/pumps/daq.py
+++ b/km3pipe/pumps/daq.py
@@ -249,6 +249,9 @@ class DAQEvent(object):
self.trigger_mask = unpack('<Q', file_obj.read(8))[0]
self.overlays = unpack('<i', file_obj.read(4))[0]
+ # TODO: This is needed but not documented in Wiki!
+ self.what_is_this = unpack('<i', file_obj.read(4))[0]
+
self.n_triggered_hits = unpack('<i', file_obj.read(4))[0]
self.triggered_hits = []
self._parse_triggered_hits(file_obj)
@@ -261,7 +264,9 @@ class DAQEvent(object):
"""Parse and store triggered hits."""
for _ in range(self.n_triggered_hits):
dom_id, pmt_id, tdc_time, tot = unpack('<ibib', file_obj.read(10))
- self.triggered_hits.append((dom_id, pmt_id, tdc_time, tot))
+ trigger_mask = unpack('<Q', file_obj.read(8))
+ self.triggered_hits.append((dom_id, pmt_id, tdc_time, tot,
+ trigger_mask))
def _parse_snapshot_hits(self, file_obj):
"""Parse and store snapshot hits."""
|
Fixes JDAQEvent parsing, since dataformat description in the Wiki is not correct
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -36,10 +36,12 @@ exec(compile(open(salt_version).read(), salt_version, 'exec'))
class TestCommand(Command):
description = 'Run tests'
- user_options = []
+ user_options = [
+ ('runtests-opts=', 'R', 'Command line options to pass to runtests.py')
+ ]
def initialize_options(self):
- pass
+ self.runtests_opts = None
def finalize_options(self):
pass
@@ -49,7 +51,10 @@ class TestCommand(Command):
self.run_command('build')
build_cmd = self.get_finalized_command('build_ext')
runner = os.path.abspath('tests/runtests.py')
- test_cmd = 'python %s' % runner
+ test_cmd = 'python {0}'.format(runner)
+ if self.runtests_opts:
+ test_cmd += ' {0}'.format(self.runtests_opts)
+
print("running test")
test_process = Popen(
test_cmd, shell=True,
|
Allow passing options to `tests/runtests.py` from `python setup.py test`.
|
py
|
diff --git a/theanets/layers.py b/theanets/layers.py
index <HASH>..<HASH> 100644
--- a/theanets/layers.py
+++ b/theanets/layers.py
@@ -871,7 +871,9 @@ class LSTM(RNN):
fn=fn,
sequences=inputs,
non_sequences=self.weights + self.biases,
- outputs_info=[self.zeros('h'), self.zeros('c')])
+ outputs_info=[self.zeros('h'), self.zeros('c')],
+ go_backwards=self.kwargs.get('direction', '').lower().startswith('back'),
+ )
return outputs[0], updates
|
Include directionality in LSTM layer!
|
py
|
diff --git a/creep/src/definition.py b/creep/src/definition.py
index <HASH>..<HASH> 100755
--- a/creep/src/definition.py
+++ b/creep/src/definition.py
@@ -82,7 +82,9 @@ class Definition:
actions.extend (actions_append)
cancels.extend (cancels_append)
else:
- logger.debug ('Command \'link\' on file \'{0}\' returned non-zero code.'.format (path))
+ logger.warning ('Command \'link\' on file \'{0}\' returned non-zero code.'.format (path))
+
+ type = Action.ERR
# Build output file using processing command if any
if modifier.modify is not None:
@@ -92,7 +94,7 @@ class Definition:
with open (os.path.join (work, path_new), 'wb') as file:
file.write (out)
else:
- logger.debug ('Command \'modify\' on file \'{0}\' returned non-zero code.'.format (path))
+ logger.warning ('Command \'modify\' on file \'{0}\' returned non-zero code.'.format (path))
type = Action.ERR
|
Log failed modifiers and linkers as warning instead of debug.
|
py
|
diff --git a/sllurp/llrp.py b/sllurp/llrp.py
index <HASH>..<HASH> 100644
--- a/sllurp/llrp.py
+++ b/sllurp/llrp.py
@@ -694,10 +694,6 @@ class LLRPClient (LineReceiver):
def pause (self, duration_seconds=0):
"""Pause an inventory operation for a set amount of time."""
- if self.state != LLRPClient.STATE_INVENTORYING:
- logger.debug('cannot pause() if not inventorying; ignoring')
- return None
-
logger.info('pausing for {} seconds'.format(duration_seconds))
rospec = self.getROSpec()['ROSpec']
|
pause() should always be save; remove check that fails on borked reader
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -27,5 +27,7 @@ topydo is a todo list application using the todo.txt format. It is heavily inspi
* Maintain dependencies between todo items;
* Allow todos to recur;
* Some conveniences when adding new items (e.g. adding creation date and use relative dates)
-"""
+""",
+
+ test_suite = "test",
)
|
Add test_suite variable for test directory.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -2,6 +2,8 @@
import os
from setuptools import setup, find_packages
+from itertools import chain
+from glob import glob
import cookielaw
@@ -19,6 +21,12 @@ CLASSIFIERS = [
'Topic :: Internet :: WWW/HTTP :: Session',
]
+package_data_globs = (
+ 'templates/cookielaw/*.html',
+ 'static/cookielaw/*/*',
+ 'locale/*/*/*'
+)
+
setup(
author='Piotr Kilczuk',
author_email='piotr@tymaszweb.pl',
@@ -38,6 +46,7 @@ setup(
'selenium>=2.32.0',
],
packages=find_packages(),
+ package_data={'cookielaw': list(chain(*map(glob, paths)))},
include_package_data=False,
zip_safe = False,
test_suite = 'runtests.main',
|
Adding package_data to setup.py to ensure non-python files are installed
|
py
|
diff --git a/gitlab_runner/tests/conftest.py b/gitlab_runner/tests/conftest.py
index <HASH>..<HASH> 100644
--- a/gitlab_runner/tests/conftest.py
+++ b/gitlab_runner/tests/conftest.py
@@ -13,6 +13,7 @@ from .common import (
CONFIG,
GITLAB_LOCAL_MASTER_PORT,
GITLAB_LOCAL_RUNNER_PORT,
+ GITLAB_MASTER_URL,
GITLAB_RUNNER_URL,
GITLAB_TEST_TOKEN,
HERE,
@@ -39,10 +40,12 @@ def dd_environment():
compose_file=compose_file,
env_vars=env,
conditions=[
- CheckDockerLogs(
- compose_file, ['Gitlab is up!', 'Configuration loaded', 'Metrics server listening'], wait=5
- ),
+ CheckDockerLogs(compose_file, patterns='Gitlab is up!', wait=5),
+ CheckDockerLogs(compose_file, patterns='Configuration loaded', wait=5),
+ CheckDockerLogs(compose_file, patterns='Metrics server listening', wait=5),
CheckEndpoints(GITLAB_RUNNER_URL, attempts=180),
+ CheckEndpoints('{}/ci'.format(GITLAB_MASTER_URL), attempts=90),
],
+ attempts=2,
):
yield CONFIG, E2E_METADATA
|
gitlab runner better conditions (#<I>) * Add attempts * Check for every log line, check for both enpoints
|
py
|
diff --git a/bitfield/tests/tests.py b/bitfield/tests/tests.py
index <HASH>..<HASH> 100644
--- a/bitfield/tests/tests.py
+++ b/bitfield/tests/tests.py
@@ -104,8 +104,8 @@ class BitTest(TestCase):
def test_comparison(self):
self.assertEqual(Bit(0), Bit(0))
- self.assertNotEquals(Bit(1), Bit(0))
- self.assertNotEquals(Bit(0, 0), Bit(0, 1))
+ self.assertNotEqual(Bit(1), Bit(0))
+ self.assertNotEqual(Bit(0, 0), Bit(0, 1))
self.assertEqual(Bit(0, 1), Bit(0, 1))
self.assertEqual(Bit(0), 1)
|
tests: Use assertNotEqual instead of deprecated assertNotEquals.
|
py
|
diff --git a/gcloud/storage/test_connection.py b/gcloud/storage/test_connection.py
index <HASH>..<HASH> 100644
--- a/gcloud/storage/test_connection.py
+++ b/gcloud/storage/test_connection.py
@@ -584,11 +584,11 @@ class TestConnection(unittest2.TestCase):
self.assertEqual(netloc, 'api.example.com')
self.assertEqual(path, RESOURCE)
params = urlparse.parse_qs(qs)
- self.assertEqual(params,
- {'Signature': [SIGNED],
- 'Expires': ['1000'],
- 'GoogleAccessId': [_Credentials.service_account_name],
- })
+ self.assertEqual(len(params), 3)
+ self.assertEqual(params['Signature'], [SIGNED])
+ self.assertEqual(params['Expires'], ['1000'])
+ self.assertEqual(params['GoogleAccessId'],
+ [_Credentials.service_account_name])
self.assertEqual(frag, '')
|
Dead chickens for pep8 E<I>.
|
py
|
diff --git a/pyglibc/__init__.py b/pyglibc/__init__.py
index <HASH>..<HASH> 100644
--- a/pyglibc/__init__.py
+++ b/pyglibc/__init__.py
@@ -41,4 +41,5 @@ __all__ = [
'select',
'selectors',
'signalfd',
+ 'subreaper',
]
|
Add subreaper to __all__
|
py
|
diff --git a/pytestsalt/fixtures/daemons.py b/pytestsalt/fixtures/daemons.py
index <HASH>..<HASH> 100644
--- a/pytestsalt/fixtures/daemons.py
+++ b/pytestsalt/fixtures/daemons.py
@@ -525,7 +525,7 @@ class SaltScriptBase(object):
@property
def log_prefix(self):
- return '[pytest-{0}]'.format(self.config['pytest_log_port'])
+ return '[pytest-{0}]'.format(self.config['pytest_port'])
@property
def io_loop(self):
|
Fix the port source on the daemons fixtures side
|
py
|
diff --git a/polymodels/fields.py b/polymodels/fields.py
index <HASH>..<HASH> 100644
--- a/polymodels/fields.py
+++ b/polymodels/fields.py
@@ -152,4 +152,5 @@ class PolymorphicTypeField(ForeignKey):
kwargs.pop(kwarg)
if self.overriden_default:
kwargs.pop('default')
+ kwargs.pop('limit_choices_to', None)
return name, path, args, kwargs
|
Account for the late limit_choices_to deconstruction in <I>.
|
py
|
diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py
index <HASH>..<HASH> 100644
--- a/salt/states/saltmod.py
+++ b/salt/states/saltmod.py
@@ -79,6 +79,9 @@ def state(
ssh
Set to `True` to use the ssh client instaed of the standard salt client
+ roster
+ In the event of using salt-ssh, a roster system can be set
+
fail_minions
An optional list of targeted minions where failure is an option
'''
|
Add roster doc to salt.state runner
|
py
|
diff --git a/json5/lib.py b/json5/lib.py
index <HASH>..<HASH> 100644
--- a/json5/lib.py
+++ b/json5/lib.py
@@ -172,9 +172,12 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
should produce exactly the same output as ``json.dumps(obj, fp).``
"""
- fp.write(str(dumps(obj, skipkeys, ensure_ascii, check_circular,
- allow_nan, indent, separators, default, sort_keys,
- quote_keys, trailing_commas, allow_duplicate_keys)))
+ fp.write(str(dumps(obj=obj, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan,
+ cls=cls, indent=indent, separators=separators,
+ default=default, sort_keys=sort_keys,
+ quote_keys=quote_keys, trailing_commas=trailing_commas,
+ allow_duplicate_keys=allow_duplicate_keys)))
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
|
dump() properly passes args to dumps() Fix a bug (caused by a missing cls) that caused dump() to pass arguments to dumps() incorrectly.
|
py
|
diff --git a/analytical/tests/test_tag_clickmap.py b/analytical/tests/test_tag_clickmap.py
index <HASH>..<HASH> 100644
--- a/analytical/tests/test_tag_clickmap.py
+++ b/analytical/tests/test_tag_clickmap.py
@@ -2,9 +2,6 @@
Tests for the Clickmap template tags and filters.
"""
-import re
-
-from django.contrib.auth.models import User, AnonymousUser
from django.http import HttpRequest
from django.template import Context
@@ -20,7 +17,7 @@ class ClickyTagTestCase(TagTestCase):
"""
def test_tag(self):
- r = self.render_tag('clicjmap', 'clickmap')
+ r = self.render_tag('clickmap', 'clickmap')
self.assertTrue("tracker: '12345', version:'2'};" in r, r)
def test_node(self):
@@ -33,7 +30,7 @@ class ClickyTagTestCase(TagTestCase):
@override_settings(CLICKMAP_TRACKER_ID='abc')
def test_wrong_site_id(self):
- self.assertRaises(AnalyticalException, ClickyNode)
+ self.assertRaises(AnalyticalException, ClickmapNode)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
|
fixed various typos, this would of failed previously
|
py
|
diff --git a/src/python/pants/engine/native.py b/src/python/pants/engine/native.py
index <HASH>..<HASH> 100644
--- a/src/python/pants/engine/native.py
+++ b/src/python/pants/engine/native.py
@@ -808,7 +808,7 @@ class Native(object):
return self.gc(scheduler, self.lib.scheduler_destroy)
def set_panic_handler(self):
- if os.getenv("RUST_BACKTRACE", "0") != "1":
+ if os.getenv("RUST_BACKTRACE", "0") == "0":
# The panic handler hides a lot of rust tracing which may be useful.
# Don't activate it when the user explicitly asks for rust backtraces.
self.lib.set_panic_handler()
|
Make RUST_BACKTRACE sniffing less specific (#<I>) We set it to 'all' now on CI, so otherwise lose information
|
py
|
diff --git a/flask_permissions/models.py b/flask_permissions/models.py
index <HASH>..<HASH> 100644
--- a/flask_permissions/models.py
+++ b/flask_permissions/models.py
@@ -37,6 +37,22 @@ class Role(db.Model):
def __init__(self, name):
self.name = name.lower()
+ def add_abilities(*abilities):
+ for ability in abilities:
+ existing_ability = Ability.query.filter_by(
+ name=ability).first()
+ if not existing_ability:
+ existing_ability = Ability(ability)
+ db.session.add(existing_ability)
+ db.session.commit()
+ self.abilities.append(existing_ability)
+
+ def remove_abilities(*abilities):
+ for ability in abilities:
+ existing_ability = Role.query.filter_by(name=ability).first()
+ if existing_ability and existing_ability in self.abilities:
+ self.abilities.remove(existing_ability)
+
def __repr__(self):
return '<Role {}>'.format(self.name)
|
Adds add and remove methods for abilities
|
py
|
diff --git a/multiqc/modules/somalier/somalier.py b/multiqc/modules/somalier/somalier.py
index <HASH>..<HASH> 100644
--- a/multiqc/modules/somalier/somalier.py
+++ b/multiqc/modules/somalier/somalier.py
@@ -545,9 +545,13 @@ class MultiqcModule(BaseMultiqcModule):
for s_name, d in self.somalier_data.items():
if "X_depth_mean" in d and "original_pedigree_sex" in d:
+ if d["gt_depth_mean"] == 0:
+ y = 0
+ else:
+ y = 2 * d["X_depth_mean"] / d["gt_depth_mean"]
data[s_name] = {
"x": (random.random() - 0.5) * 0.1 + sex_index.get(d["original_pedigree_sex"], 2),
- "y": 2 * d["X_depth_mean"] / d["gt_depth_mean"],
+ "y": y,
}
if len(data) > 0:
|
Somalier: division by zero in sex ploidy plot
|
py
|
diff --git a/zipline/protocol.py b/zipline/protocol.py
index <HASH>..<HASH> 100644
--- a/zipline/protocol.py
+++ b/zipline/protocol.py
@@ -144,6 +144,13 @@ class BarData(object):
else:
return name in self.__dict__
+ def has_key(self, name):
+ """
+ DEPRECATED: __contains__ is preferred, but this method is for
+ compatibility with existing algorithms.
+ """
+ return name in self
+
def __setitem__(self, name, value):
self._data[name] = value
|
MAINT: Add a has_key method to BarData for compatibility. BarData should, at least for the time being, be compatible with existing algorithms that had worked against the prior usage of an ndict as data, which provided `has_key`. Of note, the Python language has deprecated `has_key` in favor of using `in` and `__contains__`.
|
py
|
diff --git a/phypno/widgets/overview.py b/phypno/widgets/overview.py
index <HASH>..<HASH> 100644
--- a/phypno/widgets/overview.py
+++ b/phypno/widgets/overview.py
@@ -177,8 +177,8 @@ class Overview(QGraphicsView):
stamps = _make_timestamps(self.start_time, self.minimum, self.maximum,
self.parent.value('timestamp_steps'))
- for stamp, xpos in stamps.items():
+ for stamp, xpos in zip(*stamps):
text = self.scene.addSimpleText(stamp)
text.setFlag(QGraphicsItem.ItemIgnoresTransformations)
@@ -438,11 +438,11 @@ def _make_timestamps(start_time, minimum, maximum, steps):
first_stamp = ceil(d0.total_seconds() / steps) * steps
last_stamp = ceil(d1.total_seconds() / steps) * steps
- stamps = {}
+ stamp_label = []
+ stamp_time = []
for stamp in range(first_stamp, last_stamp, steps):
stamp_as_datetime = t0_midnight + timedelta(seconds=stamp)
- key = stamp_as_datetime.strftime('%H:%M')
- value = stamp - d0.total_seconds()
- stamps[key] = value
+ stamp_label.append(stamp_as_datetime.strftime('%H:%M'))
+ stamp_time.append(stamp - d0.total_seconds())
- return stamps
+ return stamp_label, stamp_time
|
use two lists, not dict for time stamps in overview
|
py
|
diff --git a/grammpy/StringGrammar.py b/grammpy/StringGrammar.py
index <HASH>..<HASH> 100644
--- a/grammpy/StringGrammar.py
+++ b/grammpy/StringGrammar.py
@@ -24,7 +24,7 @@ class StringGrammar(Grammar):
return super().add_term(StringGrammar.__to_string_arr(term))
def term(self, term=None):
- return super().term(StringGrammar.__to_string_arr(term))
+ return self.get_term(term)
def get_term(self, term=None):
res = super().get_term(StringGrammar.__to_string_arr(term))
|
Fix return of Terminal instance when term method accept string
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,8 @@ setup(
'PyYAML>=3.11',
'retrying>=1.3.3',
'six>=1.10.0',
- 'pytz>=2016.10'
+ 'pytz>=2016.10',
+ 'future>=0.16.0'
],
setup_requires=[
'pytest-runner'
|
add the future package as a dependency
|
py
|
diff --git a/pysparkling/rdd.py b/pysparkling/rdd.py
index <HASH>..<HASH> 100644
--- a/pysparkling/rdd.py
+++ b/pysparkling/rdd.py
@@ -146,7 +146,7 @@ class RDD(object):
>>> r = Context().parallelize(
... [('a', 1), ('b', 2), ('a', 3), ('c', 4)]
... ).aggregateByKey(0, seqOp, combOp)
- ... (r['a'], r['b'])
+ >>> (r['a'], r['b'])
(4, 2)
"""
|
revert last change to doctest of aggregateByKey
|
py
|
diff --git a/autofit/core/phase.py b/autofit/core/phase.py
index <HASH>..<HASH> 100644
--- a/autofit/core/phase.py
+++ b/autofit/core/phase.py
@@ -40,8 +40,8 @@ class AbstractPhase(object):
phase_name: str
The name of this phase
"""
- self.optimizer = optimizer_class(name=phase_name)
self.phase_name = phase_name or make_name(self.__class__)
+ self.optimizer = optimizer_class(name=self.phase_name)
self.auto_link_priors = auto_link_priors
@property
|
creating default phase name prior to passing argument to optimizer class constructor
|
py
|
diff --git a/tests/test_datalad.py b/tests/test_datalad.py
index <HASH>..<HASH> 100644
--- a/tests/test_datalad.py
+++ b/tests/test_datalad.py
@@ -27,3 +27,5 @@ def test_commit_file(annex_path, new_dataset):
with open(file_path, 'w') as fd:
fd.write("""GPL""")
commit_files.run(annex_path, ds_id, ['LICENSE'])
+ dataset = Dataset(str(annex_path.join(ds_id)))
+ assert not dataset.repo.is_dirty()
|
Fix missing assert for test_commit_file.
|
py
|
diff --git a/tests/unit/modules/win_repo_test.py b/tests/unit/modules/win_repo_test.py
index <HASH>..<HASH> 100644
--- a/tests/unit/modules/win_repo_test.py
+++ b/tests/unit/modules/win_repo_test.py
@@ -24,6 +24,7 @@ ensure_in_syspath('../../')
from salt.modules import win_repo
win_repo.__opts__ = {}
+win_repo.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@@ -31,6 +32,8 @@ class WinRepoTestCase(TestCase):
'''
Test cases for salt.modules.win_repo
'''
+
+ @patch('salt.loader.render', MagicMock(return_valu=''))
def test_genrepo(self):
'''
Test to generate win_repo_cachefile
|
Mock __salt__ and call to salt.loader.render to fix win_repo test
|
py
|
diff --git a/eco/__init__.py b/eco/__init__.py
index <HASH>..<HASH> 100644
--- a/eco/__init__.py
+++ b/eco/__init__.py
@@ -32,7 +32,7 @@ class Source(object):
@property
def combined_contents(self):
- return ";\n".join([coffeescript._default_compiler_script(), self.contents])
+ return ";\n".join([coffeescript.get_compiler_script(), self.contents])
@property
def version(self):
|
Update eco/__init__.py Hi, I've noticed that with the latest version of <URL>) Thank you
|
py
|
diff --git a/django_fsm/tests.py b/django_fsm/tests.py
index <HASH>..<HASH> 100644
--- a/django_fsm/tests.py
+++ b/django_fsm/tests.py
@@ -118,7 +118,7 @@ class DocumentTest(TestCase):
class BlogPostStatus(models.Model):
- name = models.CharField(max_length=3, unique=True)
+ name = models.CharField(max_length=10, unique=True)
objects = models.Manager()
@transition(source='new', target='published')
|
change length CharField BlogPostStatus.name to accommodate all the possible states in the tests At least PostgreSQL complains loudly when a value doesn't fit into the varchar.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,7 @@ if sys.platform == 'darwin':
setup(name = "paramiko",
- version = "1.11.0",
+ version = "1.10.3",
description = "SSH2 protocol library",
author = "Jeff Forcier",
author_email = "jeff@bitprophet.org",
|
Not sure how this got updated :(
|
py
|
diff --git a/great_expectations/render/renderer/column_section_renderer.py b/great_expectations/render/renderer/column_section_renderer.py
index <HASH>..<HASH> 100644
--- a/great_expectations/render/renderer/column_section_renderer.py
+++ b/great_expectations/render/renderer/column_section_renderer.py
@@ -375,10 +375,12 @@ class ProfilingResultsColumnSectionRenderer(ColumnSectionRenderer):
if any(len(value) > 80 for value in values):
content_block_type = "bullet_list"
+ content_block_class = RenderedBulletListContent
else:
content_block_type = "value_list"
+ content_block_class = ValueListContent
- new_block = ValueListContent(**{
+ new_block = content_block_class(**{
"content_block_type": content_block_type,
"header":
{
|
Set content block class dynamically (#<I>)
|
py
|
diff --git a/src/infi/docopt_completion/common.py b/src/infi/docopt_completion/common.py
index <HASH>..<HASH> 100644
--- a/src/infi/docopt_completion/common.py
+++ b/src/infi/docopt_completion/common.py
@@ -67,7 +67,7 @@ def build_command_tree(pattern, cmd_params):
Recursively fill in a command tree in CommandParams (see CommandParams documentation) according to a
docopt-parsed "pattern" object
"""
- from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument
+ from docopt import (Either, Optional, OneOrMore, Required, Option, Command, Argument)
if type(pattern) in [Either, Optional, OneOrMore]:
for child in pattern.children:
build_command_tree(child, cmd_params)
@@ -90,7 +90,7 @@ def parse_params(cmd):
# this function creates a parameter tree for the target docopt tool.
# a parameter tree is a CommandParams instance, see the documentation of the class
# this function also returns a second parameter, which is a dictionary of option->option help string
- from docopt import parse_defaults, parse_pattern, formal_usage, printable_usage
+ from docopt import (parse_defaults, parse_pattern, formal_usage, printable_usage)
usage = get_usage(cmd)
options = parse_defaults(usage)
pattern = parse_pattern(formal_usage(printable_usage(usage)), options)
|
Group long imports in parens
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,14 @@
#from distutils.core import setup
+import io
+
from setuptools import setup
+
+def read(path):
+ with io.open(path, mode="r", encoding="utf-8") as fd:
+ return fd.read()
+
+
setup(
name = 'EbookLib',
version = '0.16',
@@ -10,7 +18,7 @@ setup(
url = 'https://github.com/aerkalov/ebooklib',
license = 'GNU Affero General Public License',
description = 'Ebook library which can handle EPUB2/EPUB3 and Kindle format',
- long_description = open('README.md').read(),
+ long_description = read('README.md'),
keywords = ['ebook', 'epub', 'kindle'],
classifiers = [
"Development Status :: 4 - Beta",
|
Fixed #<I> - Read the ``README.md`` file using utf-8 encoding. (cherry picked from commit <I>)
|
py
|
diff --git a/pvlib/pvl_tools.py b/pvlib/pvl_tools.py
index <HASH>..<HASH> 100755
--- a/pvlib/pvl_tools.py
+++ b/pvlib/pvl_tools.py
@@ -180,10 +180,10 @@ class Parse(): #parse complex logic
except:
pvl_logger.warning('Optional value "'+arg+'" not input'"")
continue
- if not(eval(lambdastring)(kwargs[arg])): #check its logical constraint
+ if not(eval(lambdastring)(kwargs[arg][~np.isnan(kwargs[arg])])): #check its logical constraint
raise Exception('Error: Optional input "'+arg+'" fails on logical test "'+ re.findall(reg,string)[0]+'"')
#check all other contraints
- elif not(eval(lambdastring)(kwargs[arg]).all()):
+ elif not(eval(lambdastring)(kwargs[arg][~np.isnan(kwargs[arg])]).all()): #ignore NAN entries
raise Exception('Error: Numeric input "'+arg+' " fails on logical test " '+ re.findall(reg,string)[0]+'"')
#Check if any string logicals are bypassed due to poor formatting
|
Modified lambda execution to ignore NAN entries
|
py
|
diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py
index <HASH>..<HASH> 100644
--- a/insights/specs/__init__.py
+++ b/insights/specs/__init__.py
@@ -223,7 +223,6 @@ class Specs(SpecSet):
journal_since_boot = RegistryPoint(filterable=True)
katello_service_status = RegistryPoint(filterable=True)
kdump_conf = RegistryPoint()
- kdump = RegistryPoint()
kerberos_kdc_log = RegistryPoint(filterable=True)
kexec_crash_loaded = RegistryPoint()
kexec_crash_size = RegistryPoint()
|
Delete kdump from specs/init (#<I>)
|
py
|
diff --git a/gitlab/__init__.py b/gitlab/__init__.py
index <HASH>..<HASH> 100644
--- a/gitlab/__init__.py
+++ b/gitlab/__init__.py
@@ -30,7 +30,7 @@ from gitlab.exceptions import * # noqa
from gitlab import utils # noqa
__title__ = "python-gitlab"
-__version__ = "2.0.1"
+__version__ = "2.1.0"
__author__ = "Gauvain Pocentek"
__email__ = "gauvainpocentek@gmail.com"
__license__ = "LGPL3"
|
chore: bump to <I> There are a few more features in there
|
py
|
diff --git a/rest_framework_nested/routers.py b/rest_framework_nested/routers.py
index <HASH>..<HASH> 100644
--- a/rest_framework_nested/routers.py
+++ b/rest_framework_nested/routers.py
@@ -52,7 +52,7 @@ class NestedSimpleRouter(SimpleRouter):
self.nest_prefix = kwargs.pop('lookup', 'nested_%i' % self.nest_count) + '_'
super(NestedSimpleRouter, self).__init__(*args, **kwargs)
- parent_registry = filter(lambda registered: registered[0] == self.parent_prefix, self.parent_router.registry)
+ parent_registry = [registered for registered in self.parent_router.registry if registered[0] == self.parent_prefix]
try:
parent_registry = parent_registry[0]
parent_prefix, parent_viewset, parent_basename = parent_registry
|
Replaced filter call with a list comprehension for py3k support
|
py
|
diff --git a/src/masonite/authorization/Gate.py b/src/masonite/authorization/Gate.py
index <HASH>..<HASH> 100644
--- a/src/masonite/authorization/Gate.py
+++ b/src/masonite/authorization/Gate.py
@@ -33,16 +33,14 @@ class Gate:
self.policies[model_class] = policy_class
return self
- def get_policy_for(self, instance):
- from masoniteorm import Model
-
- if isinstance(instance, Model):
- policy = self.policies.get(instance.__class__, None)
- elif isclass(instance):
- policy = self.policies.get(instance, None)
- elif isinstance(instance, str):
+ def get_policy_for(self, instance_or_class):
+ if isinstance(instance_or_class, str):
# TODO: load model from str, get class and get policies
policy = None
+ elif isclass(instance_or_class):
+ policy = self.policies.get(instance_or_class, None)
+ else:
+ policy = self.policies.get(instance_or_class.__class__, None)
if policy:
return policy()
else:
|
rewrite objects policy to remove orm dependency
|
py
|
diff --git a/lhc/io/sequence/sequence_file.py b/lhc/io/sequence/sequence_file.py
index <HASH>..<HASH> 100644
--- a/lhc/io/sequence/sequence_file.py
+++ b/lhc/io/sequence/sequence_file.py
@@ -10,13 +10,13 @@ class SequenceFile:
REGISTERED_EXTENSIONS = {}
REGISTERED_FORMATS = {} # type: Dict[str, ClassVar['SequenceFile']]
- def __init__(self, file: str, mode: str = 'r', encoding: str = 'utf-8'):
+ def __init__(self, filename: str = None, mode: str = 'r', encoding: str = 'utf-8'):
self.generator = None
if 'r' in mode or 'w' in mode:
- self.generator = open_file(file, mode, encoding)
+ self.generator = open_file(filename, mode, encoding)
self.file = self.generator.__enter__()
elif mode == 'q':
- self.file = pysam.FastaFile(file)
+ self.file = pysam.FastaFile(filename)
else:
raise ValueError('Unrecognised open mode: {}'.format(mode))
self.mode = mode
|
allow sequence filenames to be None
|
py
|
diff --git a/metpy/calc/thermo.py b/metpy/calc/thermo.py
index <HASH>..<HASH> 100644
--- a/metpy/calc/thermo.py
+++ b/metpy/calc/thermo.py
@@ -135,7 +135,7 @@ def moist_lapse(pressure, temperature):
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
- rs = mixing_ratio(saturation_vapor_pressure(t), p)
+ rs = saturation_mixing_ratio(p, t)
frac = ((Rd * t + Lv * rs) /
(Cp_d + (Lv * Lv * rs * epsilon / (Rd * t * t)))).to('kelvin')
return frac / p
@@ -425,7 +425,7 @@ def saturation_mixing_ratio(tot_press, temperature):
Survey. 73.
'''
- return 0.622 * saturation_vapor_pressure(temperature) / tot_press
+ return mixing_ratio(saturation_vapor_pressure(temperature), tot_press)
@exporter.export
@@ -447,6 +447,10 @@ def equivalent_potential_temperature(pressure, temperature):
array_like
The corresponding equivalent potential temperature of the parcel
+ Notes
+ -----
+ .. math:: \Theta_e = \Theta e^\frac{L_v r_s}{C_{pd} T}
+
References
----------
.. [5] Hobbs, Peter V. and Wallace, John M., 1977: Atmospheric Science, an Introductory
|
Better implementation for sat mixing ratio Changed the implementation for saturation mixing ratio, added a formula in the notes section of the doc string to equiv pot temp, and included sat mixing ratio in moist lapse function
|
py
|
diff --git a/torchvision/datasets/cifar.py b/torchvision/datasets/cifar.py
index <HASH>..<HASH> 100644
--- a/torchvision/datasets/cifar.py
+++ b/torchvision/datasets/cifar.py
@@ -21,7 +21,7 @@ class CIFAR10(data.Dataset):
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
- transform (callable, optional): A function/transform that takes in an PIL image
+ transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
|
remove extra space (#<I>)
|
py
|
diff --git a/src/quart/app.py b/src/quart/app.py
index <HASH>..<HASH> 100644
--- a/src/quart/app.py
+++ b/src/quart/app.py
@@ -1780,8 +1780,6 @@ class Quart(Scaffold):
self.log_exception(sys.exc_info())
async def shutdown(self) -> None:
- await asyncio.gather(*self.background_tasks)
-
try:
async with self.app_context():
for func in self.after_serving_funcs:
@@ -1797,6 +1795,8 @@ class Quart(Scaffold):
await got_serving_exception.send(self, exception=error)
self.log_exception(sys.exc_info())
+ await asyncio.gather(*self.background_tasks)
+
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
|
Bugfix await background task shutdown after shutdown funcs This allows the after-serving shutdown functions to stop or otherwise trigger the end of the background tasks.
|
py
|
diff --git a/ratcave/console_scripts/arena_scanner.py b/ratcave/console_scripts/arena_scanner.py
index <HASH>..<HASH> 100644
--- a/ratcave/console_scripts/arena_scanner.py
+++ b/ratcave/console_scripts/arena_scanner.py
@@ -42,7 +42,6 @@ def scan(tracker, rigid_body_name, pointwidth=.06, pointspeed=3.):
rigid body data into a pickled file."""
from ratcave import graphics
- from psychopy import event
# Initialize Calibration Point Grid.
wavefront_reader = graphics.WavefrontReader(ratcave.graphics.resources.obj_primitives)
@@ -58,7 +57,7 @@ def scan(tracker, rigid_body_name, pointwidth=.06, pointspeed=3.):
# Main Loop
old_frame, clock, points, body_markers = tracker.iFrame, countdown_timer(3.), [], []
- while ('escape' not in event.getKeys()) and clock.next() > 0:
+ while clock.next() > 0:
# Update Calibration Grid
scene.camera.position[:2] = (pointwidth * np.sin(clock.next() * pointspeed)), (pointwidth * np.cos(clock.next() * pointspeed))
|
removed key detection in arena_scan, because it's so fast, and it removes the last psychopy dependency.
|
py
|
diff --git a/blocking.py b/blocking.py
index <HASH>..<HASH> 100644
--- a/blocking.py
+++ b/blocking.py
@@ -22,7 +22,8 @@ def predicateCoverage(pairs, predicates) :
# page 102 of Bilenko
def trainBlocking(training_pairs, predicates, data_model, eta, epsilon) :
- training_distinct, training_dupes = training_pairs
+ training_distinct = training_pairs[0][:]
+ training_dupes = training_pairs[1][:]
n_training_dupes = len(training_dupes)
n_training_distinct = len(training_distinct)
sample_size = n_training_dupes + n_training_distinct
|
fixed important bug that was emptying out the training set of duplicates
|
py
|
diff --git a/codespell.py b/codespell.py
index <HASH>..<HASH> 100755
--- a/codespell.py
+++ b/codespell.py
@@ -193,10 +193,6 @@ def parse_options(args):
parser.add_option('-d', '--disable-colors',
action = 'store_true', default = False,
help = 'Disable colors even when printing to terminal')
- parser.add_option('-r', '-R',
- action = 'store_true', default = False,
- dest = 'recursive',
- help = 'parse directories recursively')
parser.add_option('-w', '--write-changes',
action = 'store_true', default = False,
help = 'write changes in place if possible')
@@ -495,9 +491,6 @@ def main(*args):
if ishidden(filename):
continue
- if not options.recursive and os.path.isdir(filename):
- continue
-
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
i = 0
|
Remove '-r' recursive flag If user specifies directory then it means user wants to check it recursively.
|
py
|
diff --git a/aiobotocore/paginate.py b/aiobotocore/paginate.py
index <HASH>..<HASH> 100644
--- a/aiobotocore/paginate.py
+++ b/aiobotocore/paginate.py
@@ -22,6 +22,10 @@ class AioPageIterator(PageIterator):
self._current_kwargs = self._op_kwargs
self._previous_next_token = None
self._next_token = dict((key, None) for key in self._input_token)
+
+ if self._starting_token is not None:
+ next_token = self._parse_starting_token()[0]
+
# The number of items from result_key we've seen so far.
self._total_items = 0
self._first_request = True
|
port paginator fix from botocore
|
py
|
diff --git a/gitenberg/book.py b/gitenberg/book.py
index <HASH>..<HASH> 100644
--- a/gitenberg/book.py
+++ b/gitenberg/book.py
@@ -73,7 +73,10 @@ class Book():
path = os.path.join(self.library_path, name)
if os.path.exists(path):
self.local_path = path
- else:
+
+ def make_local_path(self):
+ path = os.path.join(self.library_path, self.book_id)
+ if not os.path.exists(path):
try:
os.makedirs(path)
self.local_path = path
@@ -82,7 +85,6 @@ class Book():
finally: # weird try-except-finally, I know
os.chmod(path, 0o777)
-
def parse_book_metadata(self, rdf_library=None):
# cloned repo
if self.local_repo and self.local_repo.metadata_file:
@@ -120,6 +122,7 @@ class Book():
def fetch(self):
""" just pull files from PG
"""
+ self.make_local_path()
fetcher = BookFetcher(self)
fetcher.fetch()
|
fix to fetch broke clone
|
py
|
diff --git a/hdate/date.py b/hdate/date.py
index <HASH>..<HASH> 100644
--- a/hdate/date.py
+++ b/hdate/date.py
@@ -71,7 +71,7 @@ class HDate(object): # pylint: disable=useless-object-inheritance
def __repr__(self):
"""Return a representation of HDate for programmatic use."""
return ("<HDate(gdate='{}', diaspora='{}', hebrew='{}')>".format(
- self.gdate, self.diaspora, self.hebrew))
+ repr(self.gdate), self.diaspora, self.hebrew))
@property
def hdate(self):
|
When printing the HDate represantation, return the gdate `repr`
|
py
|
diff --git a/fedmsg/commands/relay.py b/fedmsg/commands/relay.py
index <HASH>..<HASH> 100644
--- a/fedmsg/commands/relay.py
+++ b/fedmsg/commands/relay.py
@@ -68,6 +68,8 @@ class RelayCommand(BaseCommand):
options=self.config,
# Only run this *one* consumer
consumers=[RelayConsumer],
+ # And no producers.
+ producers=[],
# Tell moksha to quiet its logging.
framework=False,
)
|
Fedmsg-relay shouldn't run producers. If another package happens to be installed in the system, running `fedmsg-relay` shouldn't inadvertently start that thing.
|
py
|
diff --git a/openfisca_core/columns.py b/openfisca_core/columns.py
index <HASH>..<HASH> 100644
--- a/openfisca_core/columns.py
+++ b/openfisca_core/columns.py
@@ -224,13 +224,17 @@ class EnumCol(IntCol):
def json_to_python(self):
enum = self.enum
if enum is None:
- return super(EnumCol, self).json_to_python
+ return conv.pipe(
+ conv.test_isinstance((basestring, int)),
+ conv.anything_to_int,
+ conv.default(self._default),
+ )
# This converters accepts either an item number or an item name.
index_by_slug = self.index_by_slug
if index_by_slug is None:
self.index_by_slug = index_by_slug = dict(
(strings.slugify(name), index)
- for index, name in sorted(enum._vars.iteritems() if enum is not None else ())
+ for index, name in sorted(enum._vars.iteritems())
)
return conv.pipe(
conv.test_isinstance((basestring, int)),
|
Accept string indexs when enumeration column has no enumeration.
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.