diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/core/harvester.py b/core/harvester.py
index <HASH>..<HASH> 100755
--- a/core/harvester.py
+++ b/core/harvester.py
@@ -727,14 +727,22 @@ class Harvester:
#
for file_path in self._cti_files_list:
producer = GenTLProducer.create_producer()
- producer.open(file_path)
- self._producers.append(producer)
+ try:
+ producer.open(file_path)
+ except ClosedException as e:
+ print(e)
+ else:
+ self._producers.append(producer)
def _open_systems(self):
for producer in self._producers:
system = producer.create_system()
- system.open()
- self._systems.append(system)
+ try:
+ system.open()
+ except ClosedException as e:
+ print(e)
+ else:
+ self._systems.append(system)
def reset(self):
self.reset_cti_files_list()
|
Implement code for a case where the target module has been closed
|
py
|
diff --git a/src/python_pachyderm/client.py b/src/python_pachyderm/client.py
index <HASH>..<HASH> 100644
--- a/src/python_pachyderm/client.py
+++ b/src/python_pachyderm/client.py
@@ -165,8 +165,14 @@ class Client(
"""
if config_file is None:
- with open(str(Path.home() / ".pachyderm/config.json"), "r") as config_file:
- j = json.load(config_file)
+ try:
+ # Search for config file in default home location
+ with open(str(Path.home() / ".pachyderm/config.json"), "r") as config_file:
+ j = json.load(config_file)
+ except FileNotFoundError:
+ # If not found, search in "/pachctl" (default mount for spout)
+ with open("/pachctl/config.json", "r") as config_file:
+ j = json.load(config_file)
else:
j = json.load(config_file)
|
added check of /pachctl/config.json for configuration (#<I>)
|
py
|
diff --git a/django_oidc_user/models.py b/django_oidc_user/models.py
index <HASH>..<HASH> 100644
--- a/django_oidc_user/models.py
+++ b/django_oidc_user/models.py
@@ -1,7 +1,7 @@
from django.db import models
from django.utils import timezone
from django.core import validators
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import UserManager
@@ -62,4 +62,4 @@ class User(AbstractBaseUser, PermissionsMixin):
])
zoneinfo = models.CharField(_('time zone'), max_length = 254, choices = TIMEZONES, default = "Europe/London")
- locale = models.CharField(_('locale'), max_length = 254, choices = LOCALES, default = "en-US")
\ No newline at end of file
+ locale = models.CharField(_('locale'), max_length = 254, choices = LOCALES, default = "en-US")
|
Fix issue with latest Django version
|
py
|
diff --git a/ca/django_ca/tests/base.py b/ca/django_ca/tests/base.py
index <HASH>..<HASH> 100644
--- a/ca/django_ca/tests/base.py
+++ b/ca/django_ca/tests/base.py
@@ -69,6 +69,22 @@ class DjangoCATestCase(TestCase):
if cls._overridden_settings:
reload_module(ca_settings)
+ @classmethod
+ def tearDownClass(cls):
+ overridden = False
+ ca_dir = None
+ if hasattr(cls, '_cls_overridden_context'):
+ overridden = True
+ ca_dir = cls._cls_overridden_context.options.get('CA_DIR')
+
+ super(DjangoCATestCase, cls).tearDownClass()
+
+ if overridden is True:
+ reload_module(ca_settings)
+ if ca_dir is not None:
+ shutil.rmtree(ca_dir)
+
+
def setUp(self):
reload_module(ca_settings)
|
add tearDown class so that everything is disabled/removed again
|
py
|
diff --git a/tests/test_features.py b/tests/test_features.py
index <HASH>..<HASH> 100644
--- a/tests/test_features.py
+++ b/tests/test_features.py
@@ -301,7 +301,7 @@ def test_spectral_contrast_errors():
@raises(ValueError)
def __test(S, freq, fmin, n_bands, quantile):
- librosa.feature.spectral_contrast(S,
+ librosa.feature.spectral_contrast(S=S,
freq=freq,
fmin=fmin,
n_bands=n_bands,
|
fixed broken test code for spectral_contrast
|
py
|
diff --git a/addok/helpers/text.py b/addok/helpers/text.py
index <HASH>..<HASH> 100644
--- a/addok/helpers/text.py
+++ b/addok/helpers/text.py
@@ -128,10 +128,12 @@ class ascii(str):
cache = value._cache
except AttributeError:
cache = alphanumerize(unidecode(value.lower()))
- obj = str.__new__(cls, cache)
- obj._cache = cache
- obj._raw = getattr(value, '_raw', value)
- return obj
+ obj = str.__new__(cls, cache)
+ obj._cache = cache
+ obj._raw = getattr(value, '_raw', value)
+ return obj
+ else:
+ return value
def __str__(self):
return self._raw
|
Prevent creating a new string instance when calling ascii with an instance
|
py
|
diff --git a/openquake/engine/calculators/hazard/classical/post_processing.py b/openquake/engine/calculators/hazard/classical/post_processing.py
index <HASH>..<HASH> 100644
--- a/openquake/engine/calculators/hazard/classical/post_processing.py
+++ b/openquake/engine/calculators/hazard/classical/post_processing.py
@@ -193,7 +193,7 @@ def do_hazard_map_post_process(job):
# Stats for debug logging:
hazard_curve_ids = models.HazardCurve.objects.filter(
- output__oq_job=job, imt__isnull=True).values_list('id', flat=True)
+ output__oq_job=job, imt__isnull=False).values_list('id', flat=True)
logs.LOG.debug('num haz curves: %s' % len(hazard_curve_ids))
# Limit the number of concurrent tasks to the configured concurrency level:
|
do not compute maps for multi curves
|
py
|
diff --git a/dawg_python/wrapper.py b/dawg_python/wrapper.py
index <HASH>..<HASH> 100644
--- a/dawg_python/wrapper.py
+++ b/dawg_python/wrapper.py
@@ -89,7 +89,8 @@ class Guide(object):
def read(self, fp):
base_size = struct.unpack(str("=I"), fp.read(4))[0]
- self._units = bytearray(fp.read(base_size*2))
+ self._units = array.array(str("B"))
+ self._units.fromfile(fp, base_size*2)
def size(self):
return len(self._units) if self._units is not None else 0
|
array.array is more memory efficient than bytearray under pypy
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -12,12 +12,13 @@ with open('HISTORY.md') as history_file:
history = history_file.read()
install_requires = [
- 'numpy>=1.13.1',
- 'pandas>=0.22.0',
- 'scipy>=0.19.1, <1.3',
- 'exrex>=0.10.5',
- 'matplotlib>=2.2.2',
- 'boto3>=1.7.47'
+ 'numpy>=1.13.1,<1.17',
+ 'pandas>=0.22.0,<0.25',
+ 'scipy>=0.19.1,<1.3',
+ 'exrex>=0.10.5,<0.11',
+ 'matplotlib>=2.2.2,<4',
+ 'boto3>=1.7.47,<1.10',
+ 'docutils>=0.10,<0.15'
]
development_requires = [
|
Restrict dependency versions to safe ranges with upper bounds
|
py
|
diff --git a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py
index <HASH>..<HASH> 100644
--- a/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py
+++ b/sdk/monitor/azure-monitor-opentelemetry-exporter/tests/test_base_exporter.py
@@ -316,3 +316,8 @@ class MockResponse:
self.headers = headers
self.reason = reason
self.content = content
+ self.raw = MockRaw()
+
+class MockRaw:
+ def __init__(self):
+ self.enforce_content_length = False
|
fix test by adding raw property in mock response (#<I>)
|
py
|
diff --git a/gromacs/qsub.py b/gromacs/qsub.py
index <HASH>..<HASH> 100644
--- a/gromacs/qsub.py
+++ b/gromacs/qsub.py
@@ -34,6 +34,7 @@ import warnings
import gromacs.config
import gromacs.cbook
from gromacs.utilities import asiterable, Timedelta
+from gromacs import AutoCorrectionWarning
import logging
logger = logging.getLogger('gromacs.qsub')
|
fixed: missed AutoCorrectionWarning in qsub
|
py
|
diff --git a/scripts/maf_extract_ranges_indexed.py b/scripts/maf_extract_ranges_indexed.py
index <HASH>..<HASH> 100755
--- a/scripts/maf_extract_ranges_indexed.py
+++ b/scripts/maf_extract_ranges_indexed.py
@@ -10,6 +10,9 @@ file.
NOTE: If two intervals overlap the same block it will be written twice. With
non-overlapping intervals and --chop this is never a problem.
+NOTE: Intervals are origin-zero, half-open. For example, the interval 100,150
+ is 50 bases long, and there are 100 bases to its left in the sequence.
+
NOTE: Intervals are relative to the + strand, regardless of the strands in
the alignments.
|
Added comment in header to clarify that intervals are origin-zero, half-open.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -8,11 +8,10 @@ Python bindings to zopfli
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
-import sys
class custom_build_ext(build_ext):
- """Disable language extensions not compatible with ANSI C"""
+ """Pass platform-specific compiler/linker flags"""
def build_extensions(self):
compiler_type = self.compiler.compiler_type
@@ -27,6 +26,9 @@ class custom_build_ext(build_ext):
# python uses long long (C99), so we mute the warning
"-Wno-long-long",
])
+ # on some Unix-like systems, such as Linux, the libc math
+ # library is not linked by default
+ ext.extra_link_args.append("-lm")
build_ext.build_extensions(self)
|
[setup.py] pass extra flag to link C math library required by zopfli and not linked by default on some Unix systems
|
py
|
diff --git a/master/buildbot/__init__.py b/master/buildbot/__init__.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/__init__.py
+++ b/master/buildbot/__init__.py
@@ -37,7 +37,8 @@ except IOError:
try:
dir = os.path.dirname(os.path.abspath(__file__))
- p = Popen(['git', 'describe', '--tags', '--always'], cwd=dir, stdout=PIPE, stderr=PIPE)
+ p = Popen(['git', 'describe', '--tags', '--always'], cwd=dir,
+ stdout=PIPE, stderr=PIPE)
out = p.communicate()[0]
if (not p.returncode) and out:
|
made indention PEP8 compliant
|
py
|
diff --git a/src/shellingham/__init__.py b/src/shellingham/__init__.py
index <HASH>..<HASH> 100644
--- a/src/shellingham/__init__.py
+++ b/src/shellingham/__init__.py
@@ -4,7 +4,7 @@ import os
from ._core import ShellDetectionFailure
-__version__ = '1.2.6'
+__version__ = '1.2.7.dev0'
def detect_shell(pid=None, max_depth=6):
|
Prebump to <I>.dev0
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -25,11 +25,11 @@ class TestCommand(Command):
suite = unittest.TestSuite()
if self.test == '*':
print('Running all tests')
- import test
- for tst in test.__all__:
- suite.addTests(unittest.TestLoader().loadTestsFromName('test.%s' % tst))
+ import stomp.test
+ for tst in stomp.test.__all__:
+ suite.addTests(unittest.TestLoader().loadTestsFromName('stomp.test.%s' % tst))
else:
- suite = unittest.TestLoader().loadTestsFromName('test.%s' % self.test)
+ suite = unittest.TestLoader().loadTestsFromName('stomp.test.%s' % self.test)
unittest.TextTestRunner(verbosity=2).run(suite)
|
fix for running unit tests in py versions < 3
|
py
|
diff --git a/ros_buildfarm/wrapper/apt.py b/ros_buildfarm/wrapper/apt.py
index <HASH>..<HASH> 100755
--- a/ros_buildfarm/wrapper/apt.py
+++ b/ros_buildfarm/wrapper/apt.py
@@ -63,6 +63,7 @@ def call_apt_update_install_clean(
'maybe run apt update',
'The following packages cannot be authenticated!',
'Unable to locate package',
+ 'has no installation candidate',
]
rc, known_error_conditions = \
call_apt(
|
catch another apt hiccup
|
py
|
diff --git a/cassandra/cluster.py b/cassandra/cluster.py
index <HASH>..<HASH> 100644
--- a/cassandra/cluster.py
+++ b/cassandra/cluster.py
@@ -123,10 +123,14 @@ class ResponseFuture(object):
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, OverloadedErrorMessage):
# need to retry against a different host here
- self._retry(False, None)
+ log.warn("Host %s is overloaded, retrying against a different "
+ "host" % (self._current_host))
+ self._retry(reuse_connection=False, consistency_level=None)
+ return
elif isinstance(response, IsBootstrappingErrorMessage):
# need to retry against a different host here
- self._retry(False, None)
+ self._retry(reuse_connection=False, consistency_level=None)
+ return
# TODO need to define the PreparedQueryNotFound class
# elif isinstance(response, PreparedQueryNotFound):
# pass
@@ -137,7 +141,7 @@ class ResponseFuture(object):
retry_type, consistency = retry
if retry_type is RetryPolicy.RETRY:
self._query_retries += 1
- self._retry(True, consistency)
+ self._retry(reuse_connection=True, consistency_level=consistency)
elif retry_type is RetryPolicy.RETHROW:
self._set_final_exception(response)
else: # IGNORE
|
Correct retry handling on Overloaded, Bootstrapping errors
|
py
|
diff --git a/src/parsy/__init__.py b/src/parsy/__init__.py
index <HASH>..<HASH> 100644
--- a/src/parsy/__init__.py
+++ b/src/parsy/__init__.py
@@ -114,7 +114,9 @@ def combine(fn):
return send(None)
- return success(None).bind(lambda _: genparser())
+ # this makes sure there is a separate instance of the generator
+ # for each parse
+ return Parser(lambda *args: genparser()(*args))
def success(val):
@Parser
|
make the combined parser lazy in a better way
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ setup(
packages=find_packages(),
install_requires=[
'jmbo>=1.1.1',
- 'django-dfp>=0.3.1',
+ 'django-dfp>=0.3.3',
],
tests_require=[
'django-setuptest>=0.1.4',
|
Up django-dfp version because old version pointed to wrong Django
|
py
|
diff --git a/dhtmlparser.py b/dhtmlparser.py
index <HASH>..<HASH> 100755
--- a/dhtmlparser.py
+++ b/dhtmlparser.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-pyDHTMLParser v1.6.0 (06.12.2012) by Bystroushaak (bystrousak@kitakitsune.org)
+pyDHTMLParser v1.6.1 (28.01.2013) by Bystroushaak (bystrousak@kitakitsune.org)
This version doesn't corresponds with DHTMLParser v1.5.0 - there were updates, which
makes both parsers incompatible. Changelist: https://gist.github.com/d16b613b84ce9de8adb3
@@ -10,7 +10,7 @@ This work is licensed under a Creative Commons 3.0 Unported License
Project page; https://github.com/Bystroushaak/pyDHTMLParser
-Created in Geany & gedit text editor.
+Created in Geany, Gedit and Sublime Text 2.
"""
def unescape(inp, quote = '"'):
@@ -529,7 +529,7 @@ class HTMLElement():
"""
output = ""
- if self.childs != []:
+ if self.childs != [] or self.isOpeningTag():
output += self.__element if original else self.tagToString()
for c in self.childs:
|
Fixed one minor bug in toString(), which caused bad DOM printing.
|
py
|
diff --git a/galpy/potential_src/ForceSoftening.py b/galpy/potential_src/ForceSoftening.py
index <HASH>..<HASH> 100644
--- a/galpy/potential_src/ForceSoftening.py
+++ b/galpy/potential_src/ForceSoftening.py
@@ -1,6 +1,7 @@
###############################################################################
# ForceSoftening: class representing a force softening kernel
###############################################################################
+import numpy as nu
class ForceSoftening:
"""class representing a force softening kernel"""
def __init__(self): #pragma: no cover
@@ -110,5 +111,5 @@ class PlummerSoftening (ForceSoftening):
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
- return self._softening_length**2.\
+ return 3./4./nu.pi*self._softening_length**2.\
*(d**2.+self._softening_length**2.)**-2.5
|
Fix density in Plummer softening; fixes #<I>
|
py
|
diff --git a/dingo/core/network/__init__.py b/dingo/core/network/__init__.py
index <HASH>..<HASH> 100644
--- a/dingo/core/network/__init__.py
+++ b/dingo/core/network/__init__.py
@@ -132,7 +132,15 @@ class GridDingo:
nodes_pos[node] = (x_pos_start + node.branch_no - 1 + 0.5, -node.load_no - 2 - 0.25)
nodes_color.append((0.5, 0.5, 1))
elif isinstance(node, GeneratorDingo):
- nodes_pos[node] = (1, 1)
+ # get neighbor of geno
+ neighbor = g.neighbors(node)[0]
+
+ # neighbor is cable distributor of building
+ if isinstance(neighbor, CableDistributorDingo):
+ nodes_pos[node] = (x_pos_start + neighbor.branch_no - 1 + 0.5, -neighbor.load_no - 2 + 0.25)
+ else:
+ nodes_pos[node] = (1,1)
+
nodes_color.append((0.5, 1, 0.5))
elif isinstance(node, StationDingo):
nodes_pos[node] = (0, 0)
|
fix drawing of LV grids set correct location of genos
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,10 @@
from setuptools import setup, find_packages
+
+from os.path import join, dirname
+import sys
+sys.path.insert(0, join(dirname(__file__), 'src'))
from cache.version import __version__
+sys.path.pop(0)
setup(
name="cache",
|
add some hackery to fix the install file
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@ setup(
description='A small, modular, transport and protocol neutral RPC '
'library that, among other things, supports JSON-RPC and zmq.',
long_description=read('README.rst'),
- packages=find_packages(exclude=['test']),
+ packages=find_packages(exclude=['test', 'examples']),
keywords='json rpc json-rpc jsonrpc 0mq zmq zeromq',
author='Marc Brinkmann',
author_email='git@marcbrinkmann.de',
|
Exclude examples from find_packages.
|
py
|
diff --git a/chartpress.py b/chartpress.py
index <HASH>..<HASH> 100755
--- a/chartpress.py
+++ b/chartpress.py
@@ -307,7 +307,12 @@ def main():
)
build_values(chart['name'], value_mods)
chart_paths = ['.'] + list(chart.get('paths', []))
- build_chart(chart['name'], paths=chart_paths, version=args.tag)
+ version = args.tag
+ if version:
+ # version of the chart shouldn't have leading 'v' prefix
+ # if tag is of the form 'v1.2.3'
+ version = version.lstrip('v')
+ build_chart(chart['name'], paths=chart_paths, version=version)
if args.publish_chart:
publish_pages(chart['name'],
paths=chart_paths,
|
avoid adding ‘v’ to chart version when using `--tag vX.Y` chart versions should be version strings, but tags may have a leading ‘v’. Strip the v from the chart version if it’s there.
|
py
|
diff --git a/cherry_picker/cherry_picker/__init__.py b/cherry_picker/cherry_picker/__init__.py
index <HASH>..<HASH> 100644
--- a/cherry_picker/cherry_picker/__init__.py
+++ b/cherry_picker/cherry_picker/__init__.py
@@ -1,2 +1,2 @@
"""Backport CPython changes from master to maintenance branches."""
-__version__ = '0.2.4.dev1'
+__version__ = '0.2.5.dev1'
|
Update cherry-picker version (GH-<I>)
|
py
|
diff --git a/pqhelper/base.py b/pqhelper/base.py
index <HASH>..<HASH> 100644
--- a/pqhelper/base.py
+++ b/pqhelper/base.py
@@ -50,10 +50,10 @@ class StateInvestigator(object):
# fill, empty, ignore BGR for the various parts detected with TankLevel
_TANK_COLORS = {'health': ((5, 5, 200), (40, 40, 50), (20, 20, 20)),
- 'g': ((30, 130, 65), (30, 50, 35), (25, 25, 25)),
- 'r': ((50, 50, 115), (30, 30, 45), (25, 25, 25)),
- 'y': ((35, 115, 130), (20, 30, 30), (25, 25, 25)),
- 'b': ((135, 60, 0), (45, 40, 30), (25, 25, 25))}
+ 'g': ((0, 135, 0), (30, 45, 30), (25, 25, 25)),
+ 'r': ((0, 0, 135), (30, 30, 45), (25, 25, 25)),
+ 'y': ((0, 135, 135), (20, 40, 40), (25, 25, 25)),
+ 'b': ((135, 0, 0), (45, 30, 30), (25, 25, 25))}
_game_finders = {'capture': v.TemplateFinder(_data.capture_template,
sizes=_GAME_SIZES,
|
update to TankLevel colors for health and mana to make them more accurate.
|
py
|
diff --git a/salt/master.py b/salt/master.py
index <HASH>..<HASH> 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -1788,7 +1788,8 @@ class ClearFuncs(object):
try:
fun = clear_load.pop('fun')
- return self.wheel_.call_func(fun, **clear_load)
+ runner_client = salt.runner.RunnerClient(self.opts)
+ return runner_client.async(fun, **clear_load)
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
@@ -1829,7 +1830,8 @@ class ClearFuncs(object):
try:
fun = clear_load.pop('fun')
- return self.wheel_.call_func(fun, **clear_load)
+ runner_client = salt.runner.RunnerClient(self.opts)
+ return runner_client.async(fun, **clear_load)
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
|
Call runner async exec and return runner jid
|
py
|
diff --git a/cleverhans/serial.py b/cleverhans/serial.py
index <HASH>..<HASH> 100644
--- a/cleverhans/serial.py
+++ b/cleverhans/serial.py
@@ -73,6 +73,13 @@ class NoRefModel(Model):
def __getstate__(self):
# Serialize everything except the Variables
out = self.__dict__.copy()
+
+ # The base Model class adds this tf reference to self
+ # We mustn't pickle anything tf, this will need to be
+ # regenerated after the model is reloaded.
+ if "_dummy_input" in out:
+ del out["_dummy_input"]
+
# Add the Variables
sess = tf.get_default_session()
if sess is None:
|
fix bug where attempted to pickle a placeholder
|
py
|
diff --git a/bitgo/bitgo.py b/bitgo/bitgo.py
index <HASH>..<HASH> 100644
--- a/bitgo/bitgo.py
+++ b/bitgo/bitgo.py
@@ -43,7 +43,7 @@ from pycoin.tx import Spendable
from pycoin.tx.pay_to.ScriptMultisig import ScriptMultisig
from pycoin.tx.pay_to import SolvingError
from pycoin.tx.script import tools
-from pycoin.tx.script.vm import parse_signature_blob
+from pycoin.tx.script.check_signature import parse_signature_blob
from pycoin import ecdsa
from pycoin import encoding
|
Fixed import issue for parse_signature_blob pycoin changed the location of parse_signature_blob from vm.py to check_signature.py within the same folder.
|
py
|
diff --git a/telethon/client/uploads.py b/telethon/client/uploads.py
index <HASH>..<HASH> 100644
--- a/telethon/client/uploads.py
+++ b/telethon/client/uploads.py
@@ -39,7 +39,7 @@ def _resize_photo_if_needed(
or (isinstance(file, io.IOBase) and not file.seekable())):
return file
- before = file.tell() if isinstance(file, io.IOBase) else None
+ before = file.tell() if isinstance(file, io.IOBase) else 0
if isinstance(file, bytes):
file = io.BytesIO(file)
|
Fix resize if needed not seeking back for image = bytes
|
py
|
diff --git a/keyring/backends/fail.py b/keyring/backends/fail.py
index <HASH>..<HASH> 100644
--- a/keyring/backends/fail.py
+++ b/keyring/backends/fail.py
@@ -15,6 +15,8 @@ class Keyring(KeyringBackend):
priority = 0
def get_password(self, service, username, password=None):
- raise RuntimeError("No recommended backend was available")
+ raise RuntimeError("No recommended backend was available. Install the "
+ "keyrings.alt package if you want to use the non-"
+ "recommended backends. See README.rst for details.")
set_password = delete_pasword = get_password
|
Mention keyrings.alt in the fail message
|
py
|
diff --git a/tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py b/tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
index <HASH>..<HASH> 100644
--- a/tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
+++ b/tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
@@ -293,7 +293,8 @@ def _make_momentum_distribution(running_variance_parts, state_parts,
running_variance_rank = ps.rank(variance_part)
state_rank = ps.rank(state_part)
# Pad dimensions and tile by multiplying by tf.ones to add a batch shape
- ones = tf.ones(ps.shape(state_part)[:-(state_rank - running_variance_rank)])
+ ones = tf.ones(ps.shape(state_part)[:-(state_rank - running_variance_rank)],
+ dtype=variance_part.dtype)
ones = bu.left_justified_expand_dims_like(ones, state_part)
variance_tiled = variance_part * ones
reinterpreted_batch_ndims = state_rank - batch_ndims - 1
|
Add missing dtype in `diagonal_mass_matrix_adaptation`
|
py
|
diff --git a/satpy/resample.py b/satpy/resample.py
index <HASH>..<HASH> 100644
--- a/satpy/resample.py
+++ b/satpy/resample.py
@@ -577,8 +577,8 @@ class NativeResampler(BaseResampler):
out_shape = target_geo_def.shape
in_shape = data.shape
- y_repeats = out_shape[y_axis] / float(in_shape[y_axis])
- x_repeats = out_shape[x_axis] / float(in_shape[x_axis])
+ y_repeats = out_shape[0] / float(in_shape[y_axis])
+ x_repeats = out_shape[1] / float(in_shape[x_axis])
repeats = {
y_axis: y_repeats,
x_axis: x_repeats,
@@ -597,6 +597,9 @@ class NativeResampler(BaseResampler):
coords['y'] = y_coord
if 'x' in data.coords:
coords['x'] = x_coord
+ for dim in data.dims:
+ if dim not in ['y', 'x'] and dim in data.coords:
+ coords[dim] = data.coords[dim]
return xr.DataArray(d_arr,
dims=data.dims,
|
Fix native resampler for arrays with more than 2 dimensions
|
py
|
diff --git a/_pytest/config.py b/_pytest/config.py
index <HASH>..<HASH> 100644
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -552,11 +552,18 @@ class ArgumentError(Exception):
class Argument:
- """class that mimics the necessary behaviour of optparse.Option """
+ """class that mimics the necessary behaviour of optparse.Option
+
+ its currently a least effort implementation
+ and ignoring choices and integer prefixes
+ https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
+ """
_typ_map = {
'int': int,
'string': str,
- }
+ 'float': float,
+ 'complex': complex,
+ }
# enable after some grace period for plugin writers
TYPE_WARN = False
|
optparse compatibility - add float and complex also documents the implementation quality fixes #<I>
|
py
|
diff --git a/readers.py b/readers.py
index <HASH>..<HASH> 100644
--- a/readers.py
+++ b/readers.py
@@ -33,7 +33,7 @@ def get_percolator_static_xml(fn, ns):
def generate_psms_multiple_fractions(fns, ns):
for fn in fns:
for ac,el in etree.iterparse(fn, tag='{%s}psm' % ns['xmlns']):
- yield filtering.stringify_strip_namespace_declaration(el, ns['xmlns'])
+ yield filtering.stringify_strip_namespace_declaration(el, ns)
def generate_peptides_multiple_fractions(input_files, ns):
for fn in input_files:
|
Pass entire namespace dict to strip declaration thing from psm generator
|
py
|
diff --git a/tests/tests/command_line.py b/tests/tests/command_line.py
index <HASH>..<HASH> 100644
--- a/tests/tests/command_line.py
+++ b/tests/tests/command_line.py
@@ -29,7 +29,7 @@ class TestCommandLine(unittest.TestCase):
print >> tmp, 'localhost # Comment'
print >> tmp, '127.0.0.1'
print >> tmp, '# Ignore me'
- print >> tmp, 'localhost.localdomain'
+ print >> tmp, 'localhost.'
tmp.close()
child = launch_gsh(['--hosts-file=%s' % (tmp_name)])
child.expect('ready \(3\)> ')
|
Ubuntu does not know localdomain, so we use another name.
|
py
|
diff --git a/elifetools/parseJATS.py b/elifetools/parseJATS.py
index <HASH>..<HASH> 100644
--- a/elifetools/parseJATS.py
+++ b/elifetools/parseJATS.py
@@ -2159,7 +2159,10 @@ def body_block_content(tag, html_flag=True, base_url=None):
for p_tag in raw_parser.paragraph(fn_tag):
if "text" not in footnote_content:
footnote_content["text"] = []
- footnote_content["text"].append(body_block_content(p_tag, base_url=base_url))
+ footnote_blocks = body_block_content_render(p_tag, base_url=base_url)
+ for footnote_block in footnote_blocks:
+ if footnote_block != {}:
+ footnote_content["text"].append(footnote_block)
if "footnotes" not in tag_content:
tag_content["footnotes"] = []
|
Allow more than just paragraphs in table footnotes.
|
py
|
diff --git a/niworkflows/interfaces/utils.py b/niworkflows/interfaces/utils.py
index <HASH>..<HASH> 100644
--- a/niworkflows/interfaces/utils.py
+++ b/niworkflows/interfaces/utils.py
@@ -160,9 +160,9 @@ def _copyxform(ref_image, out_image, message=None):
orig = nb.load(ref_image)
if not np.allclose(orig.affine, resampled.affine):
- LOG.warning('Affines of input and reference images '
- 'do not match, CopyXForm will probably '
- 'make the input image useless.')
+ LOG.debug('Affines of input and reference images '
+ 'do not match, CopyXForm will probably '
+ 'make the input image useless.')
# Copy xform infos
qform, qform_code = orig.header.get_qform(coded=True)
|
[ENH] Lower priority of "Affines do not match" warning See poldracklab/fmriprep#<I>
|
py
|
diff --git a/dedupe/api.py b/dedupe/api.py
index <HASH>..<HASH> 100644
--- a/dedupe/api.py
+++ b/dedupe/api.py
@@ -532,7 +532,10 @@ class StaticMatching(Matching):
if hasattr(predicate, "canopy"):
predicate.canopy = canopies[predicate]
else:
- predicate.index._index = indices[predicate]
+ try:
+ predicate.index._index = indices[predicate]
+ except KeyError:
+ pass
self.loaded_indices = True
|
except key error (#<I>) e.g. levenshtein indices will not written to file according to line <I>
|
py
|
diff --git a/satpy/readers/msu_gsa_l1b.py b/satpy/readers/msu_gsa_l1b.py
index <HASH>..<HASH> 100644
--- a/satpy/readers/msu_gsa_l1b.py
+++ b/satpy/readers/msu_gsa_l1b.py
@@ -25,10 +25,7 @@ This reader was tested on sample data provided by EUMETSAT.
"""
from datetime import datetime
-
-import dask.array as da
import numpy as np
-from pyspectral.blackbody import blackbody_wn_rad2temp as rad2temp
from satpy.readers.hdf5_utils import HDF5FileHandler
|
Update MSU-GS/A reader to remove unused imports.
|
py
|
diff --git a/deployment/freebsd/setup/jails_host.py b/deployment/freebsd/setup/jails_host.py
index <HASH>..<HASH> 100644
--- a/deployment/freebsd/setup/jails_host.py
+++ b/deployment/freebsd/setup/jails_host.py
@@ -1,14 +1,15 @@
# coding: utf-8
from fabric import api as fab
-from fabric.contrib.project import rsync_project
-from mr.awsome.ezjail.fabric import bootstrap
+from mr.awsome.ezjail.fabric import bootstrap as _bootstrap
from fabric_scripts import _local_path, _rsync_project
fab.env.shell = '/bin/sh -c'
-# shutup pyflakes
-(bootstrap, )
+def bootstrap(**kwargs):
+ with fab.lcd(_local_path('provisioning/vm-master')):
+ _bootstrap(**kwargs)
+
def download_distfiles():
_rsync_project(remote_dir='/usr/local/poudriere/distfiles',
|
run bootstrap in its expected provisioning directory
|
py
|
diff --git a/src/peltak/extra/changelog/logic.py b/src/peltak/extra/changelog/logic.py
index <HASH>..<HASH> 100644
--- a/src/peltak/extra/changelog/logic.py
+++ b/src/peltak/extra/changelog/logic.py
@@ -22,7 +22,6 @@ from typing import Dict, List, Optional, Pattern
from peltak.core import conf
from peltak.core import git
from peltak.core import shell
-from peltak.core import util
from peltak.core import versioning
from .types import ChangelogItems, ChangelogTag
@@ -36,7 +35,6 @@ DEFAULT_TAG_FORMAT = '({tag})'
DEFAULT_CONTINUATION_TAG = '_more'
-@util.mark_experimental
def changelog(
start_rev: Optional[str] = None,
end_rev: Optional[str] = None,
|
change: changelog command is no longer experimental _more: It’s been used in CI on so many projects now that it can be considered stable under manual user testing.
|
py
|
diff --git a/nolearn/nntools.py b/nolearn/nntools.py
index <HASH>..<HASH> 100644
--- a/nolearn/nntools.py
+++ b/nolearn/nntools.py
@@ -47,7 +47,10 @@ class BatchIterator(object):
yb = self.y[i * bs:(i + 1) * bs]
else:
yb = None
- yield Xb, yb
+ yield self.transform(Xb, yb)
+
+ def transform(self, Xb, yb):
+ return Xb, yb
class NeuralNet(BaseEstimator):
|
A dedicated transform method makes it easier for subclasses to do only that.
|
py
|
diff --git a/runipy/notebook_runner.py b/runipy/notebook_runner.py
index <HASH>..<HASH> 100644
--- a/runipy/notebook_runner.py
+++ b/runipy/notebook_runner.py
@@ -35,7 +35,6 @@ class NotebookRunner(object):
'image/svg+xml': 'svg',
}
-
def __init__(
self,
nb,
@@ -88,7 +87,6 @@ class NotebookRunner(object):
self._wait_for_ready_backport()
self.nb = nb
-
def shutdown_kernel(self):
logging.info('Shutdown kernel')
@@ -199,7 +197,6 @@ class NotebookRunner(object):
if status == 'error':
raise NotebookError(traceback_text)
-
def iter_code_cells(self):
"""
Iterate over the notebook cells containing code.
@@ -209,7 +206,6 @@ class NotebookRunner(object):
if cell.cell_type == 'code':
yield cell
-
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the cells of a notebook in order and update
@@ -227,7 +223,6 @@ class NotebookRunner(object):
if progress_callback:
progress_callback(i)
-
def count_code_cells(self):
"""
Return the number of code cells in the notebook
|
runipy/notebook_runner.py: Delete extra newlines.
|
py
|
diff --git a/couchbase/tests/cases/cluster_t.py b/couchbase/tests/cases/cluster_t.py
index <HASH>..<HASH> 100644
--- a/couchbase/tests/cases/cluster_t.py
+++ b/couchbase/tests/cases/cluster_t.py
@@ -19,6 +19,7 @@ from couchbase.tests.base import CouchbaseTestCase
from couchbase.connstr import ConnectionString
from couchbase.cluster import Cluster, ClassicAuthenticator,\
PasswordAuthenticator, NoBucketError, MixedAuthError
+import gc
class ClusterTest(CouchbaseTestCase):
@@ -55,6 +56,8 @@ class ClusterTest(CouchbaseTestCase):
# Should fail again once the bucket has been GC'd
del cb
+ gc.collect()
+
self.assertRaises(NoBucketError, cluster.n1ql_query, 'select mockrow')
def test_no_mixed_auth(self):
|
Fix sporadic cluster_t test failure The test was failing because a GC sweep wasn't happening. This fix ensures that the GC does a pass before running the assertion Change-Id: Ibc<I>d<I>f8f6f<I>d<I>c<I>c<I>d5a8c<I> Reviewed-on: <URL>
|
py
|
diff --git a/canvasapi/assignment.py b/canvasapi/assignment.py
index <HASH>..<HASH> 100644
--- a/canvasapi/assignment.py
+++ b/canvasapi/assignment.py
@@ -358,6 +358,7 @@ class AssignmentOverride(CanvasObject):
"courses/{}/assignments/{}/overrides/{}".format(
self.course_id, self.assignment_id, self.id
),
+ _kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
@@ -381,6 +382,7 @@ class AssignmentOverride(CanvasObject):
"courses/{}/assignments/{}/overrides/{}".format(
self.course_id, self.assignment_id, self.id
),
+ _kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
|
Add _kwargs to AssignmentOverride edit and delete
|
py
|
diff --git a/OpenSSL/SSL.py b/OpenSSL/SSL.py
index <HASH>..<HASH> 100644
--- a/OpenSSL/SSL.py
+++ b/OpenSSL/SSL.py
@@ -235,6 +235,7 @@ class Context(object):
new SSL connections.
"""
_methods = {
+ SSLv2_METHOD: "SSLv2_method",
SSLv3_METHOD: "SSLv3_method",
SSLv23_METHOD: "SSLv23_method",
TLSv1_METHOD: "TLSv1_method",
|
Allow creating a Context with SSLv2_METHOD
|
py
|
diff --git a/salt/states/locale.py b/salt/states/locale.py
index <HASH>..<HASH> 100644
--- a/salt/states/locale.py
+++ b/salt/states/locale.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
'''
Management of languages/locales
-==============================+
+==============================
The locale can be managed for the system:
|
Improving docs for salt.states.locale
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,13 @@ except ImportError as e:
def convert_file(f, _):
return open(f, 'r').read()
+except ModuleNotFoundError as e:
+ # NOTE: error is thrown only for package build steps
+ if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
+ raise e
+
+ def convert_file(f, _):
+ return open(f, 'r').read()
from pydgraph.meta import VERSION
|
Catch pypandoc ModuleNotFoundError (#<I>) Python >= <I> doesn't receive ImportError, but instead ModuleNotFoundError.
|
py
|
diff --git a/dallinger/command_line.py b/dallinger/command_line.py
index <HASH>..<HASH> 100755
--- a/dallinger/command_line.py
+++ b/dallinger/command_line.py
@@ -772,7 +772,7 @@ class LocalSessionRunner(object):
exp_id = None
tmp_dir = None
- dispatch = {} # Subclass my provide handlers for Heroku process output
+ dispatch = {} # Subclass may provide handlers for Heroku process output
def configure(self):
self.exp_config.update({
@@ -795,7 +795,7 @@ class LocalSessionRunner(object):
self.setup()
self.update_dir()
db.init_db(drop_all=True)
- log("Starting up the server...")
+ self.out.log("Starting up the server...")
config = get_config()
with HerokuLocalWrapper(config, self.out, verbose=self.verbose) as wrapper:
try:
@@ -864,7 +864,7 @@ class DebugSessionRunner(LocalSessionRunner):
def recruitment_closed(self, match):
base_url = get_base_url()
status_url = base_url + '/summary'
- log("Recruitment is complete. Waiting for experiment completion...")
+ self.out.log("Recruitment is complete. Waiting for experiment completion...")
time.sleep(10)
try:
resp = requests.get(status_url)
|
Fix typo and accidental use of module-scope functions
|
py
|
diff --git a/atest/protocol_templates/my_handler.py b/atest/protocol_templates/my_handler.py
index <HASH>..<HASH> 100644
--- a/atest/protocol_templates/my_handler.py
+++ b/atest/protocol_templates/my_handler.py
@@ -7,7 +7,12 @@ def handle_sample(rammbock, msg):
def respond_to_sample(rammbock, msg):
- pass
+ rammbock.save_template("__backup_template")
+ try:
+ rammbock.load_template("sample response")
+ rammbock.client_sends_message()
+ finally:
+ rammbock.load_template("__backup_template")
def get_rcvd_msg():
|
Extended to include the current template, to be reused later
|
py
|
diff --git a/lib/bx/pwm/pwm_tests.py b/lib/bx/pwm/pwm_tests.py
index <HASH>..<HASH> 100644
--- a/lib/bx/pwm/pwm_tests.py
+++ b/lib/bx/pwm/pwm_tests.py
@@ -4,8 +4,7 @@ import bx.pwm.position_weight_matrix as pwm
from StringIO import StringIO
basicPwm = \
-"""
->MA0101 c-REL REL
+""">MA0101 c-REL REL
0 5 8 4
0 1 15 1
1 0 15 1
@@ -19,8 +18,7 @@ basicPwm = \
"""
transfacPwm = \
-"""
-ID TATA
+"""ID TATA
XX
P0 A C G T
01 33 73 78 16 S
|
changed test matrices to sidestep problem with blank lines in matrix files
|
py
|
diff --git a/pyocd/core/memory_map.py b/pyocd/core/memory_map.py
index <HASH>..<HASH> 100644
--- a/pyocd/core/memory_map.py
+++ b/pyocd/core/memory_map.py
@@ -329,11 +329,11 @@ class FlashRegion(MemoryRegion):
# Import locally to prevent import loops.
from ..flash.flash import Flash
- assert ('blocksize' in attrs) or ('sector_size' in attrs)
+ assert ('blocksize' in attrs) or ('sector_size' in attrs) or ('flm' in attrs)
attrs['type'] = MemoryType.FLASH
super(FlashRegion, self).__init__(start=start, end=end, length=length, **attrs)
self._algo = attrs.get('algo', None)
- self._flm = None
+ self._flm = attrs.get('flm', None)
self._flash = None
if 'flash_class' in attrs:
|
Support flm parameter to FlashRegion ctor. - Setting FlashRegion flm from initial attribute passed to ctor. - FlashRegion ctor allows flm to be set in place of blocksize/sector_size.
|
py
|
diff --git a/bika/lims/content/analysis.py b/bika/lims/content/analysis.py
index <HASH>..<HASH> 100644
--- a/bika/lims/content/analysis.py
+++ b/bika/lims/content/analysis.py
@@ -416,10 +416,9 @@ class Analysis(BaseContent):
def getPriority(self):
""" get priority from AR
"""
- ar = self.aq_parent
- priority = ar.getPriority()
- if priority:
- return priority
+ # this analysis could be in a worksheet or instrument, careful
+ return self.aq_parent.getPriority() \
+ if hasattr(self.aq_parent, 'getPriority') else None
def getPrice(self):
price = self.getService().getPrice()
|
Priority adapter works on analyses who's parent is not an AR
|
py
|
diff --git a/lib/svtplay_dl/service/picsearch.py b/lib/svtplay_dl/service/picsearch.py
index <HASH>..<HASH> 100644
--- a/lib/svtplay_dl/service/picsearch.py
+++ b/lib/svtplay_dl/service/picsearch.py
@@ -12,7 +12,7 @@ from svtplay_dl.fetcher.rtmp import RTMP
from svtplay_dl.log import log
class Picsearch(Service, OpenGraphThumbMixin):
- supported_domains = ['dn.se']
+ supported_domains = ['dn.se', 'mobil.dn.se']
def get(self, options):
data = self.get_urldata()
|
picsearch: supports mobil.dn.se as well
|
py
|
diff --git a/tests/http/external.py b/tests/http/external.py
index <HASH>..<HASH> 100644
--- a/tests/http/external.py
+++ b/tests/http/external.py
@@ -23,6 +23,9 @@ class ExternalBase(TestHttpClientBase):
client = self.client()
baseurl = 'https://api.github.com/gists/public'
response = yield from client.get(baseurl)
+ if response.status_code == 403:
+ # TODO: this fails in travis for some reason
+ return
self.assertEqual(response.status_code, 200)
links = response.links
self.assertTrue(links)
|
travis does not like external test on github
|
py
|
diff --git a/galpy/orbit_src/integratePlanarOrbit.py b/galpy/orbit_src/integratePlanarOrbit.py
index <HASH>..<HASH> 100644
--- a/galpy/orbit_src/integratePlanarOrbit.py
+++ b/galpy/orbit_src/integratePlanarOrbit.py
@@ -69,8 +69,13 @@ def integratePlanarOrbit_leapfrog(pot,yo,t,rtol=None,atol=None):
p._omegas,p._gamma])
elif isinstance(p,potential.SteadyLogSpiralPotential):
pot_type.append(3)
- pot_args.extend([p._amp,p._tform,p._tsteady,p._A,p._alpha,p._m,
- p._omegas,p._gamma])
+ if p._tform is None:
+ pot_args.extend([p._amp,p._tform,p._tsteady,p._A,p._alpha,p._m,
+ p._omegas,p._gamma])
+ else:
+ pot_args.extend([p._amp,float('nan'), float('nan'),
+ p._A,p._alpha,p._m,
+ p._omegas,p._gamma])
pot_type= nu.array(pot_type,dtype=nu.int32,order='C')
pot_args= nu.array(pot_args,dtype=nu.float64,order='C')
|
correct handling of parameters when going to C for SteadyLogSpiral
|
py
|
diff --git a/cerium/exceptions.py b/cerium/exceptions.py
index <HASH>..<HASH> 100644
--- a/cerium/exceptions.py
+++ b/cerium/exceptions.py
@@ -33,4 +33,22 @@ class CharactersException(AndroidDriverException):
"""
Thrown when no devices are connected.
"""
+ pass
+
+class ParametersException(AndroidDriverException):
+ """
+ Thrown when a parameter error occurs.
+ """
+ pass
+
+class PackageException(AndroidDriverException):
+ """
+ Thrown when the package does not exist.
+ """
+ pass
+
+class WLANConnectException(AndroidDriverException):
+ """
+ Thrown when the device is not connected to WLAN.
+ """
pass
\ No newline at end of file
|
Exceptions that may happen in all the androiddriver code.
|
py
|
diff --git a/i3pystatus/core/modules.py b/i3pystatus/core/modules.py
index <HASH>..<HASH> 100644
--- a/i3pystatus/core/modules.py
+++ b/i3pystatus/core/modules.py
@@ -88,7 +88,7 @@ class Module(SettingsBase):
self.__log_button_event(button, cb, args, "Member callback")
getattr(self, cb)(*args)
else:
- self.__log_event(button, cb, args, "External command")
+ self.__log_button_event(button, cb, args, "External command")
execute(cb, detach=True)
# Notify status handler
|
Fix handler not executing external cmd (#<I>) Wrong name on function call was raising AttributeError exception disrutping the code execution.
|
py
|
diff --git a/tcex/app_config_object/install_json.py b/tcex/app_config_object/install_json.py
index <HASH>..<HASH> 100644
--- a/tcex/app_config_object/install_json.py
+++ b/tcex/app_config_object/install_json.py
@@ -667,11 +667,10 @@ class InstallJson:
return json_data
for param in json_data.get('params', []):
- if param.get('type', None) != 'String':
+ if param.get('type') != 'String':
continue
- if 'String' not in (param.get('playbookDataType') or []):
- param['playbookDataType'] = param.get('playbookDataType') or []
- param['playbookDataType'].append('String')
+ if param.get('playbookDataType') in [None, []]:
+ param.setdefault('playbookDataType', []).append('String')
return json_data
def validate(self):
|
APP-<I> - changed install.json update logic to only add PB data type of String when list is empty.
|
py
|
diff --git a/src/python/pants/backend/jvm/register.py b/src/python/pants/backend/jvm/register.py
index <HASH>..<HASH> 100644
--- a/src/python/pants/backend/jvm/register.py
+++ b/src/python/pants/backend/jvm/register.py
@@ -174,7 +174,7 @@ def register_goals():
task(name='binary', action=BinaryCreate,
dependencies=['compile', 'resources', 'bootstrap']
- ).install().with_description('Create a jvm binary jar.')
+ ).install().with_description('Create a runnable binary.')
detect_duplicates.install('binary')
|
in help text for 'binary' goal, don't say "jar". Nowadays, could be a pex. So be vague. Testing Done: Just in case: <URL>
|
py
|
diff --git a/src/saml2/authnresponse.py b/src/saml2/authnresponse.py
index <HASH>..<HASH> 100644
--- a/src/saml2/authnresponse.py
+++ b/src/saml2/authnresponse.py
@@ -37,7 +37,7 @@ def _use_on_or_after(condition, slack):
if now > high:
# To old ignore
#print "(%d > %d)" % (now,high)
- raise Exception("To old can't use it!")
+ raise Exception("To old can't use it! %d" % (now-high,))
return not_on_or_after
def _use_before(condition, slack):
|
More info always a good thing (?)
|
py
|
diff --git a/art/art.py b/art/art.py
index <HASH>..<HASH> 100644
--- a/art/art.py
+++ b/art/art.py
@@ -234,7 +234,17 @@ font_map = {"block": [block_dic, True], "banner": [banner_dic, False],
"xsansbi": [xsansbi_dic, False],
"xsansi": [xsansi_dic, False],
"xtimes": [xtimes_dic, False],
- "xttyb": [xttyb_dic, False]
+ "xttyb": [xttyb_dic, False],
+ "heroboti": [heroboti_dic,False],
+ "high_noo": [high_noo_dic,False],
+ "hills" : [hills_dic,False],
+ "home_pak": [home_pak_dic,False],
+ "house_of": [house_of_dic,False],
+ "hypa_bal": [hypa_bal_dic,False],
+ "hyper" : [hyper_dic,False],
+ "inc_raw" : [inc_raw_dic,False],
+ "italics" : [italics_dic,False],
+ "kgames_i": [kgames_i_dic,False]
}
font_counter = len(font_map)
DEFAULT_FONT = "standard"
|
fix : new fonts added to font_map
|
py
|
diff --git a/logentry_admin/admin.py b/logentry_admin/admin.py
index <HASH>..<HASH> 100644
--- a/logentry_admin/admin.py
+++ b/logentry_admin/admin.py
@@ -1,5 +1,4 @@
from django.contrib import admin
-from django.contrib.auth import get_user_model
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, NoReverseMatch
@@ -8,6 +7,12 @@ from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
+try:
+ from django.contrib.auth import get_user_model
+ User = get_user_model()
+except:
+ from django.contrib.auth.models import User # noqa
+
action_names = {
ADDITION: _('Addition'),
DELETION: _('Deletion'),
@@ -33,7 +38,7 @@ class UserListFilter(admin.SimpleListFilter):
parameter_name = 'user'
def lookups(self, request, model_admin):
- staff = get_user_model().objects.filter(is_staff=True)
+ staff = User.objects.filter(is_staff=True)
return (
(s.id, force_text(s))
for s in staff
|
Fix Django <I> (and lower) compatibility
|
py
|
diff --git a/pyipmi/msgs/bmc.py b/pyipmi/msgs/bmc.py
index <HASH>..<HASH> 100644
--- a/pyipmi/msgs/bmc.py
+++ b/pyipmi/msgs/bmc.py
@@ -112,6 +112,7 @@ class ManufacturingTestOnRsp(Message):
__default_lun__ = 0
__fields__ = (
CompletionCode(),
+ RemainingBytes('data'),
)
|
msgs: return bytes for ManufacturingTestOn cmd
|
py
|
diff --git a/spinoff/util/lockfile.py b/spinoff/util/lockfile.py
index <HASH>..<HASH> 100644
--- a/spinoff/util/lockfile.py
+++ b/spinoff/util/lockfile.py
@@ -1,20 +1,24 @@
from __future__ import absolute_import
+import time
from contextlib import contextmanager
from gevent import sleep
-from lockfile import LockFile, AlreadyLocked
+from lockfile import LockFile, AlreadyLocked, LockTimeout
@contextmanager
-def lock_file(path, maxdelay=.1, lock_cls=LockFile):
+def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0):
"""Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
"""
lock = lock_cls(path)
+ max_t = time.time() + timeout
while True:
+ if time.time() >= max_t:
+ raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) # same exception messages as in lockfile
try:
lock.acquire(timeout=0)
except AlreadyLocked:
|
Added timeout to s.u.lockfile with a default value of <I> seconds
|
py
|
diff --git a/tests/contrib/test_cache.py b/tests/contrib/test_cache.py
index <HASH>..<HASH> 100644
--- a/tests/contrib/test_cache.py
+++ b/tests/contrib/test_cache.py
@@ -181,14 +181,16 @@ class TestFileSystemCache(CacheTests):
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self, c):
- # Makre sure we start out empty
- c.clear()
assert c.set('foo', 'bar')
cache_files = os.listdir(c._path)
- assert len(cache_files) == 1
+ # count = 2 because of the count file
+ assert len(cache_files) == 2
assert c.clear()
+
+ # The only file remaining is the count file
cache_files = os.listdir(c._path)
- assert len(cache_files) == 0
+ assert os.listdir(c._path) == [
+ os.path.basename(c._get_filename(c._fs_count_file))]
# Don't use pytest marker
|
Fixed the file system test that was broken
|
py
|
diff --git a/arthur/server.py b/arthur/server.py
index <HASH>..<HASH> 100644
--- a/arthur/server.py
+++ b/arthur/server.py
@@ -48,6 +48,8 @@ class ArthurServer(Arthur):
if writer:
self.writer_th = threading.Thread(target=self.write_items,
args=(writer, self.items))
+ else:
+ self.writer_th = None
cherrypy.engine.subscribe('start', self.start, 100)
|
[server] Fix crash error when a writer was not set
|
py
|
diff --git a/threshold.py b/threshold.py
index <HASH>..<HASH> 100644
--- a/threshold.py
+++ b/threshold.py
@@ -70,9 +70,7 @@ TM_PER_OBJECT = "PerObject"
TM_METHODS = [TM_OTSU, TM_MOG, TM_BACKGROUND, TM_ROBUST_BACKGROUND,
TM_RIDLER_CALVARD, TM_KAPUR, TM_MCT]
-TM_GLOBAL_METHODS = [TM_OTSU_GLOBAL, TM_MOG_GLOBAL, TM_BACKGROUND_GLOBAL,
- TM_ROBUST_BACKGROUND_GLOBAL, TM_RIDLER_CALVARD_GLOBAL,
- TM_KAPUR_GLOBAL, TM_MCT_GLOBAL ]
+TM_GLOBAL_METHODS = [" ".join((x,TM_GLOBAL)) for x in TM_METHODS]
def get_threshold(threshold_method, threshold_modifier, image,
mask=None, labels = None,
|
Update to dd<I>de9ff changes
|
py
|
diff --git a/hpOneView/servers.py b/hpOneView/servers.py
index <HASH>..<HASH> 100644
--- a/hpOneView/servers.py
+++ b/hpOneView/servers.py
@@ -341,7 +341,10 @@ class servers(object):
# missing required field: enclousure group
# E.g.: profile['enclosureGroupUri'] = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
+ return self.create_server_profile_from_dict(profile, blocking, verbose)
+ def create_server_profile_from_dict(self, profile, blocking=True, verbose=False):
+ # Creating a profile returns a task with no resource uri
task, body = self._con.post(uri['profiles'], profile)
if profile['firmware'] is None:
tout = 600
@@ -355,6 +358,10 @@ class servers(object):
return profile
return task
+ def get_server_profile_compliance(self, server_profile):
+ compliance_preview = self._con.get(server_profile['uri'] + '/compliance-preview')
+ return compliance_preview
+
def post_server_profile(self, profile, blocking=True, verbose=False):
""" POST a ServerProfileV5 profile for use with the V200 API
|
Changes made by Chakravarthy Racharla to provide support for Ansible ServerProfile module. Added support to retrieve ServerProfile compliance preview.
|
py
|
diff --git a/django_tenants/tests/__init__.py b/django_tenants/tests/__init__.py
index <HASH>..<HASH> 100644
--- a/django_tenants/tests/__init__.py
+++ b/django_tenants/tests/__init__.py
@@ -1,3 +1,6 @@
+from .files import *
+from .staticfiles import *
+from .template import *
from .test_routes import *
from .test_tenants import *
from .test_cache import *
|
fix(tests): Include static file-related tests in 'test' package.
|
py
|
diff --git a/icalevents/icalparser.py b/icalevents/icalparser.py
index <HASH>..<HASH> 100644
--- a/icalevents/icalparser.py
+++ b/icalevents/icalparser.py
@@ -122,7 +122,8 @@ def next_year_at(dt, count=1):
:param count: number of years
:return: date datetime
"""
- return normalize(datetime(year=dt.year + count, month=dt.month, day=dt.day,
+ dt += relativedelta.relativedelta(years=+count)
+ return normalize(datetime(year=dt.year, month=dt.month, day=dt.day,
hour=dt.hour, minute=dt.minute,
second=dt.second, microsecond=dt.microsecond))
@@ -135,15 +136,10 @@ def next_month_at(dt, count=1):
:param count: number of months
:return: date datetime
"""
- year = dt.year
- month = dt.month + count
+ dt += relativedelta.relativedelta(months=+count)
- while month > 12:
- month -= 12
- year += 1
-
- return normalize(datetime(year=year, month=month, day=dt.day, hour=dt.hour,
- minute=dt.minute, second=dt.second,
+ return normalize(datetime(year=dt.year, month=dt.month, day=dt.day,
+ hour=dt.hour, minute=dt.minute, second=dt.second,
microsecond=dt.microsecond))
|
use relativedelta to calculate next month/year relativedelta handles correctly leap years
|
py
|
diff --git a/openquake/commands/compare.py b/openquake/commands/compare.py
index <HASH>..<HASH> 100644
--- a/openquake/commands/compare.py
+++ b/openquake/commands/compare.py
@@ -217,10 +217,10 @@ def compare_uhs(calc_ids: int, files=False, *, poe_id: int = 0,
arrays = c.compare('uhs', poe_id, files, samplesites, atol, rtol)
if len(arrays) and len(calc_ids) == 2:
# each array has shape (N, M)
- ms = numpy.mean((arrays[0] - arrays[1])**2)
- maxdiff = rmsdiff(arrays[0], arrays[1]).max()
- argmax = rmsdiff(arrays[0], arrays[1]).argmax()
- row = ('%.5f' % c.oq.poes[poe_id], numpy.sqrt(ms), maxdiff, argmax)
+ rms = numpy.sqrt(numpy.mean((arrays[0] - arrays[1])**2))
+ delta = numpy.abs(arrays[0] - arrays[1]).max(axis=1)
+ amax = delta.argmax()
+ row = ('%.5f' % c.oq.poes[poe_id], rms, delta[amax], amax)
print(views.text_table([row], ['poe', 'rms-diff', 'max-diff', 'site']))
|
Improved compare_uhs [ci skip]
|
py
|
diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py
index <HASH>..<HASH> 100644
--- a/salt/modules/boto_iam.py
+++ b/salt/modules/boto_iam.py
@@ -932,9 +932,11 @@ def build_policy(region=None, key=None, keyid=None, profile=None):
# into strings, so let's do the same here.
for key, policy_val in policy.items():
for statement in policy_val:
- if len(statement['Action']) == 1:
+ if (isinstance(statement['Action'], list)
+ and len(statement['Action']) == 1):
statement['Action'] = statement['Action'][0]
- if len(statement['Principal']['Service']) == 1:
+ if (isinstance(statement['Principal']['Service'], list)
+ and len(statement['Principal']['Service']) == 1):
statement['Principal']['Service'] = statement['Principal']['Service'][0]
# build_policy doesn't add a version field, which AWS is going to set to a
# default value, when we get it back, so let's set it.
|
Ensure values are lists before checking their length
|
py
|
diff --git a/guacamole/ingredients/argparse.py b/guacamole/ingredients/argparse.py
index <HASH>..<HASH> 100644
--- a/guacamole/ingredients/argparse.py
+++ b/guacamole/ingredients/argparse.py
@@ -187,7 +187,7 @@ class ParserIngredient(Ingredient):
max_level = level
for subcmd_name, subcmd_obj, subcmd_cmds in cmd_subcmds:
sub_parser = subparsers.add_parser(
- subcmd_name, help=subcmd_obj.get_cmd_help(),
+ str(subcmd_name), help=subcmd_obj.get_cmd_help(),
**self._get_parser_kwargs(subcmd_obj))
sub_parser.add_argument("-h", "--help", action="help")
max_level = max(
|
Fix subcommands being listed with u'...' on Python <I>
|
py
|
diff --git a/satpy/composites/__init__.py b/satpy/composites/__init__.py
index <HASH>..<HASH> 100644
--- a/satpy/composites/__init__.py
+++ b/satpy/composites/__init__.py
@@ -461,7 +461,6 @@ class PSPRayleighReflectance(CompositeBase):
class NIRReflectance(CompositeBase):
- # TODO: Daskify
def __call__(self, projectables, optional_datasets=None, **info):
"""Get the reflectance part of an NIR channel. Not supposed to be used
|
Remove unnecessary TODO from NIR composite
|
py
|
diff --git a/tests/test_mturk.py b/tests/test_mturk.py
index <HASH>..<HASH> 100644
--- a/tests/test_mturk.py
+++ b/tests/test_mturk.py
@@ -172,6 +172,14 @@ class TestMTurkServiceWithFakeConnection(object):
service = MTurkService(**creds)
return service
+ def test_is_sandbox_by_default(self):
+ service = self.make_one()
+ assert service.is_sandbox
+
+ def test_host_server_is_sandbox_by_default(self):
+ service = self.make_one()
+ assert 'sandbox' in service.host
+
def test_check_credentials_converts_response_to_boolean_true(self):
service = self.make_one()
mock_mtc = mock.Mock(
|
Tests to make sure MTurkService defaults to sandbox mode
|
py
|
diff --git a/openquake/risklib/asset.py b/openquake/risklib/asset.py
index <HASH>..<HASH> 100644
--- a/openquake/risklib/asset.py
+++ b/openquake/risklib/asset.py
@@ -612,10 +612,8 @@ def build_asset_array(assets_by_site, tagnames=(), time_event=None):
if deductible_d or limit_d:
logging.warning('Exposures with insuranceLimit/deductible fields are '
'deprecated and may be removed in the future')
- deductibles = ['deductible-%s' % name for name in deductible_d]
- limits = ['insurance_limit-%s' % name for name in limit_d]
retro = ['retrofitted'] if first_asset._retrofitted else []
- float_fields = loss_types + deductibles + limits + retro
+ float_fields = loss_types + retro
int_fields = [(str(name), U16) for name in tagnames]
tagi = {str(name): i for i, name in enumerate(tagnames)}
asset_dt = numpy.dtype(
|
Removed insurance fields from the assetcol [skip CI]
|
py
|
diff --git a/manticore/platforms/decree.py b/manticore/platforms/decree.py
index <HASH>..<HASH> 100644
--- a/manticore/platforms/decree.py
+++ b/manticore/platforms/decree.py
@@ -589,7 +589,7 @@ class Decree(object):
self.running.remove(procid)
#self.procs[procid] = None #let it there so we can report?
if issymbolic(error_code):
- logger.info("TERMINATE PROC_%02d with symbolic exit code [%d,%d]", procid, solver.minmax(constraints, error_code))
+ logger.info("TERMINATE PROC_%02d with symbolic exit code [%d,%d]", procid, solver.minmax(self.constraints, error_code))
else:
logger.info("TERMINATE PROC_%02d %x", procid, error_code)
if len(self.running) == 0 :
|
Fix solver call in decree (#<I>)
|
py
|
diff --git a/pipedatabase.py b/pipedatabase.py
index <HASH>..<HASH> 100644
--- a/pipedatabase.py
+++ b/pipedatabase.py
@@ -15,7 +15,7 @@ import pandas as pd
import os.path
dir_path = os.path.dirname(__file__)
-csv_path = os.path.join(dir_path, 'pipe_database.csv')
+csv_path = os.path.join(dir_path, 'pipedatabase.csv')
with open(csv_path) as pipedbfile:
pipedb = pd.read_csv(pipedbfile)
@@ -69,4 +69,4 @@ def ND_available(NDguess):
# Find the index of the closest nominal diameter
# Take the values of the array, subtract the ND, take the absolute value, find the index of the minimium value
myindex=(ND_all_available() >= NDguess)
- return min(ND_all_available()[myindex])
\ No newline at end of file
+ return min(ND_all_available()[myindex])
|
changed name of csv file
|
py
|
diff --git a/patchboard/action.py b/patchboard/action.py
index <HASH>..<HASH> 100644
--- a/patchboard/action.py
+++ b/patchboard/action.py
@@ -77,7 +77,7 @@ class Action(object):
# context would ever aquire an 'authorizer' method in the ruby
# code, and in any case we need to be certain of the pythonic
# analog.
- if self.auth_scheme and u'authorizer' in context:
+ if hasattr(self, u'auth_scheme') and u'authorizer' in context:
credential = context[u'authorizer'](
self.auth_scheme, resource, self.name)
|
Cleanup: check for attr before accessing
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -111,5 +111,5 @@ setup(name = 'turbodbc',
'Programming Language :: Python :: 2.7',
'Topic :: Database'],
ext_modules = get_extension_modules(),
- install_requires=['pybind11>=2.0.0rc1']
+ install_requires=['pybind11>=2.0.0']
)
|
Replace pybind<I> release candidate with properly released version
|
py
|
diff --git a/flask_appconfig/cli.py b/flask_appconfig/cli.py
index <HASH>..<HASH> 100644
--- a/flask_appconfig/cli.py
+++ b/flask_appconfig/cli.py
@@ -6,6 +6,13 @@ import click
from . import server_backends
from .util import honcho_parse_env
+try:
+ import importlib
+except ImportError:
+ click.echo('You do not have importlib installed. Please install a '
+ 'backport for versions < 2.7/3.1 first.')
+ sys.exit(1)
+
ENV_DEFAULT = '.env'
@@ -22,13 +29,6 @@ ENV_DEFAULT = '.env'
.format(ENV_DEFAULT))
@click.pass_context
def cli(ctx, module_name, configfile, env):
- try:
- import importlib
- except ImportError:
- click.echo('You do not have importlib installed. Please install a '
- 'backport for versions < 2.7/3.1 first.')
- sys.exit(1)
-
extra_files = []
if configfile:
extra_files.append(configfile)
|
Moved importlib import out of cli function.
|
py
|
diff --git a/cleverhans/future/torch/attacks/spsa.py b/cleverhans/future/torch/attacks/spsa.py
index <HASH>..<HASH> 100644
--- a/cleverhans/future/torch/attacks/spsa.py
+++ b/cleverhans/future/torch/attacks/spsa.py
@@ -110,7 +110,7 @@ def spsa(model_fn, x, eps, nb_iter, clip_min=-np.inf, clip_max=np.inf, y=None,
if early_stop_loss_threshold is not None and loss < early_stop_loss_threshold:
break
- adv_x = (x + perturbation).detach()
+ adv_x = torch.clamp((x + perturbation).detach(), clip_min, clip_max)
asserts.append(torch.all(torch.abs(adv_x - x) <= eps + 1e-6))
asserts.append(torch.all(adv_x >= clip_min))
|
Add additional clamp call in SPSA attack to prevent failure of sanity checks
|
py
|
diff --git a/pyswip/easy.py b/pyswip/easy.py
index <HASH>..<HASH> 100644
--- a/pyswip/easy.py
+++ b/pyswip/easy.py
@@ -154,8 +154,11 @@ class Variable(object):
fun = PL_unify_list
else:
raise
-
- t = PL_new_term_ref()
+
+ if self.handle is None:
+ t = PL_new_term_ref(self.handle)
+ else
+ t = PL_copy_term_ref(self.handle)
fun(t, value)
self.handle = t
|
Fixing Variables in foreign functions not unifiable
|
py
|
diff --git a/socketio/handler.py b/socketio/handler.py
index <HASH>..<HASH> 100644
--- a/socketio/handler.py
+++ b/socketio/handler.py
@@ -73,6 +73,10 @@ class SocketIOHandler(WSGIHandler):
request_method = self.environ.get("REQUEST_METHOD")
request_tokens = self.RE_REQUEST_URL.match(path)
+ # Kick non-socket.io requests to our superclass
+ if not path.lstrip('/').startswith(self.server.namespace):
+ return super(SocketIOHandler, self).handle_one_response()
+
# Parse request URL and QUERY_STRING and do handshake
if request_tokens:
request_tokens = request_tokens.groupdict()
|
Only handle requests to paths starting with our server's namespace
|
py
|
diff --git a/py/testdir_multi_jvm/test_GLM_big1_nopoll.py b/py/testdir_multi_jvm/test_GLM_big1_nopoll.py
index <HASH>..<HASH> 100644
--- a/py/testdir_multi_jvm/test_GLM_big1_nopoll.py
+++ b/py/testdir_multi_jvm/test_GLM_big1_nopoll.py
@@ -47,7 +47,8 @@ class Basic(unittest.TestCase):
# if we do another poll they should be done now, and better to get it that
# way rather than the inspect (to match what simpleCheckGLM is expected
for glm in glmInitial:
- print "Checking completed job, with no polling:", glm
+ print "Checking completed job, with no polling using initial response:", h2o.dump_json(glm)
+
a = h2o.nodes[0].poll_url(glm, noPoll=True)
h2o_glm.simpleCheckGLM(self, a, 57, **kwargs)
|
print some extra info for debug of ec2 fail
|
py
|
diff --git a/m.py b/m.py
index <HASH>..<HASH> 100755
--- a/m.py
+++ b/m.py
@@ -304,7 +304,7 @@ def kodi_query(sql): # {{{1
def kodi_path_query(sql):
for row in kodi_query(sql):
f = Path(row[0]); p, name = f.parent, f.name
- yield(p, name, *row[1:])
+ yield [p, name] + row[1:]
KODI_WATCHED_SQL = """
select p.strPath || f.strFileName as fp
|
really support <I> <= python
|
py
|
diff --git a/eztemplate/engines/empy_engine.py b/eztemplate/engines/empy_engine.py
index <HASH>..<HASH> 100644
--- a/eztemplate/engines/empy_engine.py
+++ b/eztemplate/engines/empy_engine.py
@@ -58,6 +58,7 @@ class EmpyEngine(Engine):
def apply(self, mapping):
"""Apply a mapping of name-value-pairs to a template."""
+ self.output.seek(0)
self.output.truncate(0)
self.interpreter.string(self.template, locals=mapping)
return self.output.getvalue()
|
Need to seek first in empy engine because of io.StringIO in Python 3.
|
py
|
diff --git a/anndata/core/alignedmapping.py b/anndata/core/alignedmapping.py
index <HASH>..<HASH> 100644
--- a/anndata/core/alignedmapping.py
+++ b/anndata/core/alignedmapping.py
@@ -2,8 +2,11 @@ from abc import ABC, abstractmethod
from collections.abc import MutableMapping
from functools import singledispatch
from typing import Mapping, Optional, Tuple
+import warnings
+import numpy as np
import pandas as pd
+from scipy.sparse import spmatrix
from ..utils import deprecated
from .views import asview, ViewArgs
@@ -53,6 +56,14 @@ class AlignedMapping(MutableMapping, ABC):
raise IndexError() # Maybe not index error
except AttributeError:
pass
+ # TODO: Remove this as soon as writing dataframes works
+ if not isinstance(val, (np.ndarray, spmatrix)):
+ warnings.warn(
+ f"AnnData does not currently support writing or reading of "
+ f"'{type(val).__name__}' objects in {self.attrname} for either"
+ f" hdf5 or zarr formats.",
+ stacklevel=2
+ )
@property
@abstractmethod
|
Added warning if alignedarray set with unsupported value
|
py
|
diff --git a/nion/swift/Inspector.py b/nion/swift/Inspector.py
index <HASH>..<HASH> 100644
--- a/nion/swift/Inspector.py
+++ b/nion/swift/Inspector.py
@@ -248,7 +248,7 @@ class InfoInspectorSection(InspectorSection):
def begin_caption_edit():
self.caption_editable_text.text = data_item.caption
self.caption_static_text.unbind_text()
- self.caption_edit_stack.set_current_index(1)
+ self.caption_edit_stack.current_index = 1
self.caption_static_edit_button.on_clicked = begin_caption_edit
self.caption_static_button_row.add(self.caption_static_edit_button)
self.caption_static_button_row.add_stretch()
@@ -263,7 +263,7 @@ class InfoInspectorSection(InspectorSection):
self.caption_editable_cancel_button = self.ui.create_push_button_widget(_("Cancel"))
def end_caption_edit():
self.caption_static_text.bind_text(Binding.PropertyBinding(data_item, "caption"))
- self.caption_edit_stack.set_current_index(0)
+ self.caption_edit_stack.current_index = 0
def save_caption_edit():
data_item.caption = self.caption_editable_text.text
end_caption_edit()
|
Use current_index property rather than deprecated set_current_index for stack widget.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ if sys.argv[-1] == "publish":
if sys.argv[-1] == "tag":
print("Tagging the version on github:")
- os.system(f"git tag -a {version} -m 'version {version}'")
+ os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
|
Make friendly for old python
|
py
|
diff --git a/source/awesome_tool/mvc/models/state_machine.py b/source/awesome_tool/mvc/models/state_machine.py
index <HASH>..<HASH> 100755
--- a/source/awesome_tool/mvc/models/state_machine.py
+++ b/source/awesome_tool/mvc/models/state_machine.py
@@ -1,6 +1,7 @@
from gtkmvc import ModelMT, Observable
from statemachine.state_machine import StateMachine
+from statemachine.states.container_state import ContainerState
from mvc.models import ContainerStateModel, StateModel, TransitionModel, DataFlowModel
from utils.vividict import Vividict
@@ -30,7 +31,7 @@ class StateMachineModel(ModelMT):
self.state_machine = state_machine
root_state = self.state_machine.root_state
- if isinstance(root_state, ContainerStateModel):
+ if isinstance(root_state, ContainerState):
self.root_state = ContainerStateModel(root_state)
else:
self.root_state = StateModel(root_state)
|
Fix bug is previous commit Mixed ContainerState and ContainerStateModel
|
py
|
diff --git a/tests/test_client.py b/tests/test_client.py
index <HASH>..<HASH> 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1,6 +1,7 @@
import datetime
from measurement.measures import Energy, Weight
+import ordereddict
import mimic
from myfitnesspal import Client
@@ -73,16 +74,18 @@ class TestClient(MFPTestCase):
self.arbitrary_date2,
)
- expected_measurements = {
- datetime.date(2015, 4, 28): 19.2,
- datetime.date(2015, 4, 27): 19.2,
- datetime.date(2015, 4, 26): 19.0,
- datetime.date(2015, 4, 25): 18.7,
- datetime.date(2015, 4, 23): 18.7,
- datetime.date(2015, 4, 22): 18.4,
- datetime.date(2015, 4, 21): 18.9,
- datetime.date(2015, 4, 20): 19.1,
- }
+ expected_measurements = ordereddict.OrderedDict(
+ [
+ (datetime.date(2015, 4, 28), 19.2),
+ (datetime.date(2015, 4, 27), 19.2),
+ (datetime.date(2015, 4, 26), 19.0),
+ (datetime.date(2015, 4, 25), 18.7),
+ (datetime.date(2015, 4, 23), 18.7),
+ (datetime.date(2015, 4, 22), 18.4),
+ (datetime.date(2015, 4, 21), 18.9),
+ (datetime.date(2015, 4, 20), 19.1),
+ ]
+ )
self.assertEquals(
expected_measurements,
|
#<I> Updated test case to enforce date order The get_measurements test case now enforces the returned results to be in order.
|
py
|
diff --git a/elifetools/__init__.py b/elifetools/__init__.py
index <HASH>..<HASH> 100644
--- a/elifetools/__init__.py
+++ b/elifetools/__init__.py
@@ -1 +1 @@
-__version__ = "0.7.0"
+__version__ = "0.8.0"
|
Bump to version <I>
|
py
|
diff --git a/vc_vidyo/indico_vc_vidyo/cli.py b/vc_vidyo/indico_vc_vidyo/cli.py
index <HASH>..<HASH> 100644
--- a/vc_vidyo/indico_vc_vidyo/cli.py
+++ b/vc_vidyo/indico_vc_vidyo/cli.py
@@ -27,7 +27,6 @@ from indico.core.db import db, DBMgr
from indico.core.db.sqlalchemy.util.session import update_session_options
from indico.modules.scheduler import Client
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomStatus
-
from indico_vc_vidyo.task import VidyoCleanupTask
cli_manager = Manager(usage="Manages the Vidyo plugin")
@@ -53,15 +52,12 @@ def rooms(status=None):
print table.table
-@cli_manager.command
+@cli_manager.option('interval', type=int)
def create_task(interval):
"""Creates a Vidyo cleanup task running every N days"""
update_session_options(db)
- try:
- interval = int(interval)
- if interval < 1:
- raise ValueError
- except ValueError:
+ if interval < 1:
+ raise ValueError
print 'Invalid interval, must be a number >=1'
sys.exit(1)
with DBMgr.getInstance().global_connection(commit=True):
|
VC/Vidyo: Added better validation to CLI
|
py
|
diff --git a/angr/state_plugins/sim_action.py b/angr/state_plugins/sim_action.py
index <HASH>..<HASH> 100644
--- a/angr/state_plugins/sim_action.py
+++ b/angr/state_plugins/sim_action.py
@@ -17,6 +17,7 @@ class SimAction(SimEvent):
TMP = 'tmp'
REG = 'reg'
MEM = 'mem'
+ _MAX_ACTION_ID = -1
def __init__(self, state, region_type):
"""
@@ -26,6 +27,8 @@ class SimAction(SimEvent):
"""
SimEvent.__init__(self, state, 'action')
self.type = region_type
+ SimAction._MAX_ACTION_ID += 1
+ self._action_id = SimAction._MAX_ACTION_ID
def __repr__(self):
if self.sim_procedure is not None:
|
Adding incrementing ID to SimAction (#<I>) * Adding incrementing ID to SimAction * Starting count at 0
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from os import path
from distutils.version import LooseVersion
from setuptools import find_packages, setup
-VERSION = '1.20.0'
+VERSION = '1.20.1'
# Import README.md into long_description
pwd = path.abspath(path.dirname(__file__))
|
Bump package version to <I>
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.