diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index <HASH>..<HASH> 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -41,7 +41,6 @@ import logging import yaml import tempfile import signal -from sys import platform from time import sleep from contextlib import contextmanager
Removed unused sys import
py
diff --git a/werkzeug/local.py b/werkzeug/local.py index <HASH>..<HASH> 100644 --- a/werkzeug/local.py +++ b/werkzeug/local.py @@ -87,7 +87,7 @@ from werkzeug._internal import _patch_wrapper if get_current_greenlet is int: get_ident = get_current_thread else: - get_ident = lambda: hash((get_current_thread(), get_current_greenlet())) + get_ident = lambda: (get_current_thread(), get_current_greenlet()) class Local(object):
combined indent for greenlets and threads is a tuple now so that there is recovery on double hashes.
py
diff --git a/libre/apps/data_drivers/query.py b/libre/apps/data_drivers/query.py index <HASH>..<HASH> 100644 --- a/libre/apps/data_drivers/query.py +++ b/libre/apps/data_drivers/query.py @@ -77,7 +77,7 @@ class Query(): else: query_results = set(filter_results) - logger.debug('query_results: %s' % query_results) + #logger.debug('query_results: %s' % query_results) self.get_data(query_results) self.process_groups() self.process_aggregates()
Disable logging of all query results ids
py
diff --git a/ses_mailer.py b/ses_mailer.py index <HASH>..<HASH> 100644 --- a/ses_mailer.py +++ b/ses_mailer.py @@ -1,12 +1,12 @@ """ -SES Mailer +SES-Mailer A wrapper around boto ses to send email via AWS SES """ import boto -__NAME__ = "SESMailer" +__NAME__ = "SES-Mailer" __version__ = "0.1" __license__ = "MIT" __author__ = "Mardix"
Rename to SES-Mailer
py
diff --git a/spyder/utils/programs.py b/spyder/utils/programs.py index <HASH>..<HASH> 100644 --- a/spyder/utils/programs.py +++ b/spyder/utils/programs.py @@ -519,19 +519,8 @@ def open_files_with_application(app_path, fnames): """ return_codes = {} - # Add quotes as needed - if sys.platform != 'darwin': - new_fnames = [] - for fname in fnames: - if ' ' in fname: - if '"' in fname: - fname = '"{}"'.format(fname) - else: - fname = "'{}'".format(fname) - new_fnames.append(fname) - else: - new_fnames.append(fname) - fnames = new_fnames + if os.name == 'nt': + fnames = [fname.replace('\\', '/') for fname in fnames] if sys.platform == 'darwin': if not (app_path.endswith('.app') and os.path.isdir(app_path)):
Fix oepning files with spaces
py
diff --git a/bpybuild/win_utils.py b/bpybuild/win_utils.py index <HASH>..<HASH> 100644 --- a/bpybuild/win_utils.py +++ b/bpybuild/win_utils.py @@ -403,8 +403,15 @@ def get_all_vc_dev_tools() -> Dict[float, Set[str]]: ALL_VC_DEV_TOOLS = get_all_vc_dev_tools() -VS_LIBS = (f"lib/win{PLATFORM}_vc12" if - VS_VERSION == 2013 else f"lib/win{PLATFORM}_vc14") +# 32bit windows uses the windows path +def compute_svn_path(): + if PLATFORM == 32: + return "windows" + else: + return "win64" + +VS_LIBS = (f"lib/{compute_svn_path()}_vc12" if + VS_VERSION == 2013 else f"lib/{compute_svn_path()}_vc14") BLENDER_SVN_REPO_URL = (f"https://svn.blender.org/svnroot/bf-blender/trunk/" f"{VS_LIBS}")
Fixed SVN repo for <I> bit installation
py
diff --git a/doc/en/example/nonpython/conftest.py b/doc/en/example/nonpython/conftest.py index <HASH>..<HASH> 100644 --- a/doc/en/example/nonpython/conftest.py +++ b/doc/en/example/nonpython/conftest.py @@ -2,9 +2,9 @@ import pytest -def pytest_collect_file(parent, fspath): - if fspath.suffix == ".yaml" and fspath.name.startswith("test"): - return YamlFile.from_parent(parent, path=fspath) +def pytest_collect_file(parent, file_path): + if file_path.suffix == ".yaml" and file_path.name.startswith("test"): + return YamlFile.from_parent(parent, path=file_path) class YamlFile(pytest.File):
Fix hook param name in nonpython example (#<I>) Follow up to #<I>
py
diff --git a/raiden/network/resolver/client.py b/raiden/network/resolver/client.py index <HASH>..<HASH> 100644 --- a/raiden/network/resolver/client.py +++ b/raiden/network/resolver/client.py @@ -58,8 +58,7 @@ def reveal_secret_with_resolver( if secret_request_event.expiration < current_state.block_number: log.debug( - "Stopped using resolver, transfer expired", - resolver_endpoint=resolver_endpoint + "Stopped using resolver, transfer expired", resolver_endpoint=resolver_endpoint ) return False @@ -80,8 +79,7 @@ def reveal_secret_with_resolver( gevent.sleep(5) log.debug( - "Got secret from resolver, dispatching secret reveal", - resolver_endpoint=resolver_endpoint + "Got secret from resolver, dispatching secret reveal", resolver_endpoint=resolver_endpoint ) state_change = ReceiveSecretReveal( sender=secret_request_event.recipient,
Fix resolver lint issues
py
diff --git a/indra/explanation/model_checker.py b/indra/explanation/model_checker.py index <HASH>..<HASH> 100644 --- a/indra/explanation/model_checker.py +++ b/indra/explanation/model_checker.py @@ -82,11 +82,7 @@ class PathResult(object): The maximum length of specific paths to return. path_metrics : list[:py:class:`indra.explanation.model_checker.PathMetric`] A list of PathMetric objects, each describing the results of a simple - path search (path existence). PathMetric attributes: - *source_node* - The source node of the path - *target_node* - The target node of the path - *polarity* - The overall polarity of the path - *length* - The length of the path + path search (path existence). paths : list[list[tuple[str, int]]] A list of paths obtained from path finding. Each path is a list of tuples (which are edges in the path), with the first element of the
Removed list of PathMetric attributes, since that is already documented
py
diff --git a/indra/databases/chembl_client.py b/indra/databases/chembl_client.py index <HASH>..<HASH> 100644 --- a/indra/databases/chembl_client.py +++ b/indra/databases/chembl_client.py @@ -1,6 +1,5 @@ from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str -import json import logging import requests from sympy.physics import units @@ -51,7 +50,6 @@ def get_drug_inhibition_stmts(drug): ---------- drug : Agent Agent representing drug with MESH or CHEBI grounding - Returns ------- stmts : list of INDRA statements @@ -154,6 +152,7 @@ def query_target(target_chembl_id): def activities_by_target(activities): """Get back lists of activities in a dict keyed by ChEMBL target id + Parameters ---------- activities : dict @@ -176,6 +175,7 @@ def activities_by_target(activities): def get_protein_targets_only(target_chembl_ids): """Given list of ChEMBL target ids, return dict of only SINGLE PROTEIN targ + Parameters ---------- target_chembl_ids : list
Docstring edits, remove json import
py
diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py index <HASH>..<HASH> 100644 --- a/salt/utils/jinja.py +++ b/salt/utils/jinja.py @@ -648,11 +648,11 @@ class SerializerExtension(Extension, object): .. code-block:: jinja - escape_regex = {{ 'https://example.com?foo=bar%20baz' | escape_regex }} + regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }} will be rendered as:: - escape_regex = https\\:\\/\\/example\\.com\\?foo\\=bar\\%20baz + regex_escape = https\\:\\/\\/example\\.com\\?foo\\=bar\\%20baz ** Set Theory Filters **
Update jinja.py the name of the jinja filter is wrong in the documentation
py
diff --git a/kmip/services/server/server.py b/kmip/services/server/server.py index <HASH>..<HASH> 100644 --- a/kmip/services/server/server.py +++ b/kmip/services/server/server.py @@ -367,12 +367,12 @@ def build_argument_parser(): "--hostname", action="store", type="str", - default="127.0.0.1", + default=None, dest="hostname", help=( "The host address the server will be bound to. A string " "representing either a hostname in Internet domain notation or " - "an IPv4 address. Defaults to '127.0.0.1'." + "an IPv4 address. Defaults to None." ), ) parser.add_option( @@ -380,12 +380,12 @@ def build_argument_parser(): "--port", action="store", type="int", - default=5696, + default=None, dest="port", help=( "The port number the server will be bound to. An integer " "representing a port number. Recommended to be 5696 according to " - "the KMIP specification. Defaults to 5696." + "the KMIP specification. Defaults to None." ), ) parser.add_option( @@ -429,11 +429,12 @@ def build_argument_parser(): "--auth_suite", action="store", type="str", - default="Basic", + default=None, dest="auth_suite", help=( "A string representing the type of authentication suite to use " - "when establishing TLS connections. Defaults to 'Basic'." + "when establishing TLS connections. Options include 'Basic' and " + "'TLS1.2'. Defaults to None." ), ) parser.add_option(
Fixes configuration settings override by default script values This change fixes a bug with parameter handling when invoking the server's main routine. The default values provided for the parameters would override any configuration file settings if the main routine was used to run the server. To fix this, the default values were removed, requiring the user to explicitly specify parameter values if they want them to override the configuration settings. The parameter doc strings have been updated to match these changes. Fixes #<I>
py
diff --git a/salt/returners/mongo_return.py b/salt/returners/mongo_return.py index <HASH>..<HASH> 100644 --- a/salt/returners/mongo_return.py +++ b/salt/returners/mongo_return.py @@ -4,8 +4,11 @@ This is a VERY simple example for pushing data to a redis server and is not necessarily intended as a usable interface. ''' +import logging import pymongo +log = logging.getLogger(__name__) + __opts__ = { 'mongo.host': 'salt', 'mongo.port': 27017, @@ -28,5 +31,5 @@ def returner(ret): back[key.replace('.', '-')] = ret['return'][key] else: back = ret['return'] - print back + log.debug( back ) col.insert({ret['jid']: back})
log the returner value to the debug logger rather than printing it to stdout
py
diff --git a/roller.py b/roller.py index <HASH>..<HASH> 100755 --- a/roller.py +++ b/roller.py @@ -387,6 +387,8 @@ class Kernel(object): counter += 1 progress_bar(counter, cap) time.sleep(1) + while make_process.poll() is None: + time.sleep(1) if make_process.returncode != 0: print('Failed to make kernel') raise SystemExit
attempt to properly wait for make to finish
py
diff --git a/src/python/test/test_dxclient.py b/src/python/test/test_dxclient.py index <HASH>..<HASH> 100755 --- a/src/python/test/test_dxclient.py +++ b/src/python/test/test_dxclient.py @@ -515,6 +515,8 @@ class TestDXClient(DXTestCase): except: print("*** TODO: FIXME: Unable to verify that grandchild subprocess inherited session") + @unittest.skipUnless(dxpy.APISERVER_HOST.endswith('api.dnanexus.com'), + 'Skipping test that requires production authserver configuration') def test_dx_ssh_config(self): wd = tempfile.mkdtemp() dx_ssh_config = pexpect.spawn("dx ssh-config", env=overrideEnvironment(HOME=wd))
Only test dx ssh-config on staging and production
py
diff --git a/pysc2/run_configs/platforms.py b/pysc2/run_configs/platforms.py index <HASH>..<HASH> 100644 --- a/pysc2/run_configs/platforms.py +++ b/pysc2/run_configs/platforms.py @@ -37,6 +37,9 @@ VERSIONS = {ver.game_version: ver for ver in [ lib.Version("3.19.0", 58400, "2B06AEE58017A7DF2A3D452D733F1019", None), lib.Version("3.19.1", 58400, "D9B568472880CC4719D1B698C0D86984", None), lib.Version("4.0.0", 59587, "9B4FD995C61664831192B7DA46F8C1A1", None), + lib.Version("4.0.2", 59587, "B43D9EE00A363DAFAD46914E3E4AF362", None), + lib.Version("4.1.0", 60196, "1B8ACAB0C663D5510941A9871B3E9FBE", None), + lib.Version("4.1.1", 60321, "5C021D8A549F4A776EE9E9C1748FFBBC", None), ]} flags.DEFINE_enum("sc2_version", None, sorted(VERSIONS.keys()),
Add a few more known versions. PiperOrigin-RevId: <I>
py
diff --git a/cr8/clients.py b/cr8/clients.py index <HASH>..<HASH> 100644 --- a/cr8/clients.py +++ b/cr8/clients.py @@ -125,4 +125,5 @@ class HttpClient: def client(hosts, concurrency=25): + hosts = hosts or 'localhost:4200' return HttpClient(_to_http_hosts(hosts), conn_pool_limit=concurrency)
Use localhost:<I> as default to --hosts
py
diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index <HASH>..<HASH> 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -37,7 +37,7 @@ class KernelTests(unittest.TestCase): k1 = GPy.kern.rbf(1) + GPy.kern.bias(1) k2 = GPy.kern.coregionalise(2,1) - k = k1.prod_orthogonal(k2) + k = k1.prod(k2,tensor=True) m = GPy.models.GP_regression(X,Y,kernel=k) self.assertTrue(m.checkgrad())
changed prod_orthogonal in tests
py
diff --git a/tests/fields/test_base.py b/tests/fields/test_base.py index <HASH>..<HASH> 100644 --- a/tests/fields/test_base.py +++ b/tests/fields/test_base.py @@ -80,3 +80,22 @@ class TestBaseField(TestCase): self.assertEqual(self.repeated_msg.a[0], 'testing') self.assertEqual(self.repeated_msg.a[1], 'middle testing') self.assertEqual(self.repeated_msg.a[2], 'testing1') + + def test_concurrent_len_in_different_instances(self): + class TestMsg(Message): + a = StringField(field_number=1, repeated=True) + + msg_a = TestMsg() + msg_a.a.append('a') + msg_a.a.append('b') + self.assertEqual(len(msg_a.a), 2) + + msg_b = TestMsg() + msg_b.a.append('a') + self.assertEqual(len(msg_b.a), 1) + + msg_a_field = msg_a.a + msg_b_field = msg_b.a + + self.assertEqual(len(msg_a_field), 2) + self.assertEqual(len(msg_b_field), 1)
Added test for #PB3-9
py
diff --git a/pyclustering/cluster/tests/unit/ut_birch.py b/pyclustering/cluster/tests/unit/ut_birch.py index <HASH>..<HASH> 100755 --- a/pyclustering/cluster/tests/unit/ut_birch.py +++ b/pyclustering/cluster/tests/unit/ut_birch.py @@ -50,7 +50,14 @@ class BirchUnitTest(unittest.TestCase): birch_instance.process() clusters = birch_instance.get_clusters() + cf_clusters = birch_instance.get_cf_cluster() + cf_entries = birch_instance.get_cf_entries() + self.assertEqual(birch_instance.get_cluster_encoding(), type_encoding.CLUSTER_INDEX_LIST_SEPARATION) + self.assertEqual(number_clusters, len(clusters)) + self.assertEqual(number_clusters, len(cf_clusters)) + self.assertGreater(len(cf_entries), 0) + self.assertLessEqual(len(cf_entries), entry_size_limit) obtained_cluster_sizes = [len(cluster) for cluster in clusters]
#<I>: [pyclustering.cluster.birch] unit-tests are added.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -28,12 +28,12 @@ setup( packages=find_packages(exclude="example_project"), zip_safe=False, install_requires=[ - 'django', 'django-paging>=0.2.2', 'django-indexer==0.2.1', 'uuid', ], tests_require=[ + 'django', 'django-celery', # also requires the disqus fork of haystack ],
only require django with tests
py
diff --git a/integration_tests/web/test_async_web_client.py b/integration_tests/web/test_async_web_client.py index <HASH>..<HASH> 100644 --- a/integration_tests/web/test_async_web_client.py +++ b/integration_tests/web/test_async_web_client.py @@ -123,7 +123,7 @@ class TestAsyncWebClient(unittest.TestCase): channels=self.channel_id, title="Good Old Slack Logo", filename="slack_logo.png", file=file) self.assertIsNotNone(upload) - deletion = client.files_delete(file=upload["file"]["id"]) + deletion = await client.files_delete(file=upload["file"]["id"]) self.assertIsNotNone(deletion) @async_test @@ -140,7 +140,7 @@ class TestAsyncWebClient(unittest.TestCase): ) self.assertIsNotNone(upload) - deletion = client.files_delete( + deletion = await client.files_delete( token=self.bot_token, file=upload["file"]["id"], )
Fix never-awaited coro in integration tests
py
diff --git a/pyes/utils/__init__.py b/pyes/utils/__init__.py index <HASH>..<HASH> 100644 --- a/pyes/utils/__init__.py +++ b/pyes/utils/__init__.py @@ -1,12 +1,16 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import import base64 -from urllib import quote +from urllib import quote as _quote import array import uuid __all__ = ['clean_string', "ESRange", "ESRangeOp", "string_b64encode", "string_b64decode", "make_path", "make_id"] +def quote(value): + value = value.encode('utf8', errors='ignore') if isinstance(value, unicode) else str(value) + return _quote(value, safe='') + def make_id(value): """ Build a string id from a value @@ -24,7 +28,7 @@ def make_path(*path_components): """ Smash together the path components. Empty components will be ignored. """ - path_components = [quote(str(component), "") for component in path_components if component] + path_components = [quote(component) for component in path_components if component] path = '/'.join(path_components) if not path.startswith('/'): path = '/' + path
bug fixed, if `id` is a unicode instance
py
diff --git a/dynaphopy/__init__.py b/dynaphopy/__init__.py index <HASH>..<HASH> 100644 --- a/dynaphopy/__init__.py +++ b/dynaphopy/__init__.py @@ -775,10 +775,12 @@ class Calculation: integration = np.trapz(power_spectrum_dos, x=frequency_range)/(self.dynamic.structure.get_number_of_atoms()* self.dynamic.structure.get_number_of_dimensions()) - if normalize_dos and self.parameters.project_on_atom < -1: - power_spectrum_dos /=integration + if normalize_dos: + power_spectrum_dos *=integration integration = 1.0 - + if self.parameters.project_on_atom > -1: + power_spectrum_dos *=self.dynamic.structure.get_number_of_primitive_atoms() + integration /=self.dynamic.structure.get_number_of_primitive_atoms() free_energy = thm.get_free_energy(temperature, frequency_range, power_spectrum_dos) entropy = thm.get_entropy(temperature, frequency_range, power_spectrum_dos)
added partial power spectrum calculation and thermal properties
py
diff --git a/raiden/utils/typing.py b/raiden/utils/typing.py index <HASH>..<HASH> 100644 --- a/raiden/utils/typing.py +++ b/raiden/utils/typing.py @@ -43,9 +43,6 @@ BalanceHash = NewType("BalanceHash", T_BalanceHash) T_BlockGasLimit = int BlockGasLimit = NewType("BlockGasLimit", T_BlockGasLimit) -T_BlockGasPrice = int -BlockGasPrice = NewType("BlockGasPrice", T_BlockGasPrice) - T_BlockHash = bytes BlockHash = NewType("BlockHash", T_BlockHash)
Removed unused type BlockGasPrice is not being used, the type may be added back in the future once the JSON RPC client is properly typed
py
diff --git a/google_compute_engine/constants.py b/google_compute_engine/constants.py index <HASH>..<HASH> 100644 --- a/google_compute_engine/constants.py +++ b/google_compute_engine/constants.py @@ -18,7 +18,6 @@ import platform OSLOGIN_CONTROL_SCRIPT = 'google_oslogin_control' -OSLOGIN_NSS_CACHE = '/etc/oslogin_passwd.cache' OSLOGIN_NSS_CACHE_SCRIPT = 'google_oslogin_nss_cache' if platform.system() == 'FreeBSD': @@ -26,13 +25,16 @@ if platform.system() == 'FreeBSD': BOTOCONFDIR = '/usr/local' SYSCONFDIR = '/usr/local/etc' LOCALSTATEDIR = '/var/spool' + OSLOGIN_NSS_CACHE = '/usr/local/etc/oslogin_passwd.cache' elif platform.system() == 'OpenBSD': LOCALBASE = '/usr/local' BOTOCONFDIR = '' SYSCONFDIR = '/usr/local/etc' LOCALSTATEDIR = '/var/spool' + OSLOGIN_NSS_CACHE = '/usr/local/etc/oslogin_passwd.cache' else: LOCALBASE = '' BOTOCONFDIR = '' SYSCONFDIR = '/etc/default' LOCALSTATEDIR = '/var' + OSLOGIN_NSS_CACHE = '/etc/oslogin_passwd.cache'
Fix OSLOGIN_NSS_CACHE for FreeBSD (#<I>) Port specific files are located in /use/local/etc/ in FreeBSD. This avois having a specific patch in FreeBSD package, making it easier to maintain.
py
diff --git a/xclim/indices/_threshold.py b/xclim/indices/_threshold.py index <HASH>..<HASH> 100644 --- a/xclim/indices/_threshold.py +++ b/xclim/indices/_threshold.py @@ -74,7 +74,7 @@ __all__ = [ "sea_ice_area", "sea_ice_extent", "windy_days", - "jetstream", + "jetstream_metric_woolings", ] @@ -2061,7 +2061,7 @@ def windy_days( @declare_units(ua="[speed]") -def jetstream( +def jetstream_metric_woolings( ua: xarray.DataArray, ) -> xarray.DataArray: """Strength and latitude of jetstream. @@ -2071,7 +2071,7 @@ def jetstream( Parameters ---------- ua : xarray.DataArray - Wind velocity at between 750 and 950 hPa. + u-component wind velocity at between 750 and 950 hPa. Returns -------
rename jetstream metric and update docs
py
diff --git a/gwpy/timeseries/io/losc.py b/gwpy/timeseries/io/losc.py index <HASH>..<HASH> 100644 --- a/gwpy/timeseries/io/losc.py +++ b/gwpy/timeseries/io/losc.py @@ -263,10 +263,12 @@ def _fetch_losc_data_file(url, *args, **kwargs): with get_readable_fileobj(url, cache=cache, show_progress=verbose) as rem: if verbose: - print('Reading data... ') + print('Reading data...', end=' ') try: series = cls.read(rem, *args, **kwargs) except Exception as exc: + if verbose: + print('') exc.args = ("Failed to read LOSC data from %r: %s" % (url, str(exc)),) raise @@ -283,6 +285,8 @@ def _fetch_losc_data_file(url, *args, **kwargs): except (TypeError, ValueError): # don't care, bad LOSC pass + if verbose: + print('[Done]') return series
timeseries.io.losc: improved verbose output of FOD
py
diff --git a/pyrogram/session/session.py b/pyrogram/session/session.py index <HASH>..<HASH> 100644 --- a/pyrogram/session/session.py +++ b/pyrogram/session/session.py @@ -145,7 +145,7 @@ class Session: self.ping_thread.start() log.info("Connection inited: Layer {}".format(layer)) - except (OSError, TimeoutError): + except (OSError, TimeoutError, Error): self.stop() else: break
Catch RPCError in InitConnection
py
diff --git a/esgfpid/rabbit/asynchronous/thread_builder.py b/esgfpid/rabbit/asynchronous/thread_builder.py index <HASH>..<HASH> 100644 --- a/esgfpid/rabbit/asynchronous/thread_builder.py +++ b/esgfpid/rabbit/asynchronous/thread_builder.py @@ -129,8 +129,20 @@ class ConnectionBuilder(object): # This catches any error during connection startup and during the entire # time the ioloop runs, blocks and waits for events. logerror(LOGGER, 'Unexpected error during event listener\'s lifetime: %s: %s', e.__class__.__name__, e.message) - self.statemachine.set_to_permanently_unavailable() # to make sure no more messages are accepted, and gentle-finish won't wait... - self.thread._connection.ioloop.start() # to be able to listen to finish events from main thread! + + # As we will try to reconnect, set state to waiting to connect. + # If reconnection fails, it will be set to permanently unavailable. + self.statemachine.set_to_waiting_to_be_available() + + # In case this error is reached, it seems that no callback + # was called that handles the problem. Let's try to reconnect + # somewhere else. + errorname = 'Unexpected error ('+str(e.__class__.__name__)+': '+str(e.message)+')' + self.on_connection_error(self.thread._connection, errorname) + + # We start the ioloop, so it can handle the reconnection events, + # or also receive events from the publisher in the meantime. + self.thread._connection.ioloop.start() else: # I'm quite sure that this cannot happen, as the connection object
Added manual reconnection upon unexpected errors during ioloop.
py
diff --git a/pefile.py b/pefile.py index <HASH>..<HASH> 100644 --- a/pefile.py +++ b/pefile.py @@ -4984,6 +4984,7 @@ class PE(object): if hasattr(self, 'FileInfo'): fileinfo_list = list() + version_info_list.append(fileinfo_list) for entry in self.FileInfo[idx]: fileinfo_list.append(entry.dump_dict())
Update pefile.py added fileinfo_list to version_info_list
py
diff --git a/viper/functions.py b/viper/functions.py index <HASH>..<HASH> 100644 --- a/viper/functions.py +++ b/viper/functions.py @@ -597,7 +597,7 @@ def _RLPlist(expr, args, kwargs, context): ['seq', ['with', '_sub', variable_pointer, ['pop', ['call', - 10000 + 500 * len(_format) + 10 * len(args), + 1500 + 400 * len(_format) + 10 * len(args), LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'), 0, ['add', '_sub', 32],
Improve rlp decoding gas estimation
py
diff --git a/htmresearch/frameworks/pytorch/mnist_sparse_experiment.py b/htmresearch/frameworks/pytorch/mnist_sparse_experiment.py index <HASH>..<HASH> 100644 --- a/htmresearch/frameworks/pytorch/mnist_sparse_experiment.py +++ b/htmresearch/frameworks/pytorch/mnist_sparse_experiment.py @@ -132,7 +132,7 @@ class MNISTSparseExperiment(PyExperimentSuite): ret.update(self.runNoiseTests(params)) print("Noise test results: totalCorrect=", ret["totalCorrect"], "Test error=", ret["testerror"], ", entropy=", ret["entropy"]) - if ret["totalCorrect"] > 93000: + if ret["totalCorrect"] > 100000 and ret["testerror"] > 98.3: print("*******") print(params) else:
Updated excitement factor in experiment printouts
py
diff --git a/raven/contrib/flask/__init__.py b/raven/contrib/flask/__init__.py index <HASH>..<HASH> 100644 --- a/raven/contrib/flask/__init__.py +++ b/raven/contrib/flask/__init__.py @@ -8,6 +8,8 @@ raven.contrib.flask from __future__ import absolute_import +import os + from flask import request from flask.signals import got_request_exception from raven.conf import setup_logging @@ -59,7 +61,7 @@ class Sentry(object): secret_key=app.config.get('SENTRY_SECRET_KEY'), project=app.config.get('SENTRY_PROJECT'), site=app.config.get('SENTRY_SITE_NAME'), - dsn=self.dsn or app.config.get('SENTRY_DSN'), + dsn=self.dsn or app.config.get('SENTRY_DSN') or os.environ.get('SENTRY_DSN'), ) return self._client
Ensure we lookup SENTRY_DSN from the environment in the Flask client if its not set
py
diff --git a/pytestsalt/fixtures/daemons.py b/pytestsalt/fixtures/daemons.py index <HASH>..<HASH> 100644 --- a/pytestsalt/fixtures/daemons.py +++ b/pytestsalt/fixtures/daemons.py @@ -658,7 +658,7 @@ class SaltDaemonScriptBase(SaltScriptBase): if self._connectable.is_set(): return True try: - return self.io_loop.run_sync(self._wait_until_running, timeout=timeout) + return self.io_loop.run_sync(self._wait_until_running, timeout=timeout+1) except ioloop.TimeoutError: return False @@ -667,6 +667,7 @@ class SaltDaemonScriptBase(SaltScriptBase): ''' The actual, coroutine aware, call to wait for the daemon to start listening ''' + yield gen.sleep(1) check_ports = self.get_check_ports() log.debug( '%s [%s] Checking the following ports to assure running status: %s', @@ -694,7 +695,7 @@ class SaltDaemonScriptBase(SaltScriptBase): sock.shutdown(socket.SHUT_RDWR) sock.close() del sock - yield gen.sleep(0.125) + yield gen.sleep(0.5) # A final sleep to allow the ioloop to do other things yield gen.sleep(0.125) log.debug('%s [%s] All ports checked. Running!', self.log_prefix, self.cli_display_name)
Be slower when trying to connect to the engine port
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -29,16 +29,15 @@ except ImportError: sys.exit(1) setup( - name='autokey-py3', + name='autokey', version=R.VERSION, - description='Python 3 port of AutoKey.', + description='AutoKey (Python 3)', author=R.AUTHOR, author_email=R.AUTHOR_EMAIL, maintainer=R.MAINTAINER, maintainer_email=R.MAINTAINER_EMAIL, url='https://github.com/autokey-py3/autokey-py3', license='GPLv3', - install_requires=['dbus-python', 'pyinotify', 'python-xlib', 'typing'], packages=['autokey', 'autokey.gtkui', 'autokey.qtui'], package_dir={'': 'lib'}, package_data={'autokey.qtui': ['data/*'], @@ -79,7 +78,6 @@ setup( 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], )
Update setup.py Remove install_requires= line, as it was causing problems for package-installed version. Rename module to just "autokey" Depend explicitly on Python <I> (due to 'typing' module)
py
diff --git a/GaugeController.py b/GaugeController.py index <HASH>..<HASH> 100644 --- a/GaugeController.py +++ b/GaugeController.py @@ -40,6 +40,7 @@ class AGC100: # below self.ser = serial.Serial(port = port, timeout = timeout) + # checks the controller's response for errors def checkAcknowledgement(self, response): if (response == NAK + CR + LF): message = 'Serial communication returned negative acknowledge (NAK). ' \ @@ -50,6 +51,7 @@ class AGC100: ''.format(repr(response)) raise IOError(message) + # returns the pressure currently read by the controller def getPressure(self): assert self.ser.isOpen() @@ -70,6 +72,7 @@ class AGC100: self.ser.write(CR + LF) return pressure + # returns the model of the gauge connected to the controller def getGaugeModel(self): assert self.ser.isOpen() @@ -84,6 +87,7 @@ class AGC100: return model + # returns the units of the pressure measurement def getUnits(self): assert self.ser.isOpen() @@ -98,6 +102,7 @@ class AGC100: return unit + # closes the connection with the controller def closeConnection(self): self.ser.close()
Started on the piezo controller, added a script to scan for serial ports, added function comments to GaugeController
py
diff --git a/openquake/supervising/supervisor.py b/openquake/supervising/supervisor.py index <HASH>..<HASH> 100644 --- a/openquake/supervising/supervisor.py +++ b/openquake/supervising/supervisor.py @@ -306,6 +306,9 @@ class SupervisorLogMessageConsumer(logs.AMQPLogSource): if failures: message = "job terminated with failures: %s" % failures else: + # Don't check for failed nodes if distribution is disabled. + # In this case, we don't expect any nodes to be present, and + # thus, there are none that can fail. if not openquake.no_distribute(): failed_nodes = abort_due_to_failed_nodes(self.job_id) if failed_nodes:
supervising/supervisor: Added a comment explaining why the failed node checked is skipped in certain cases.
py
diff --git a/hearthstone/enums.py b/hearthstone/enums.py index <HASH>..<HASH> 100644 --- a/hearthstone/enums.py +++ b/hearthstone/enums.py @@ -2,7 +2,6 @@ from enum import IntEnum class GameTag(IntEnum): - IGNORE_DAMAGE = 1 TAG_SCRIPT_DATA_NUM_1 = 2 TAG_SCRIPT_DATA_NUM_2 = 3 TAG_SCRIPT_DATA_ENT_1 = 4 @@ -165,7 +164,6 @@ class GameTag(IntEnum): ADJACENT_BUFF = 350 FORCED_PLAY = 352 LOW_HEALTH_THRESHOLD = 353 - IGNORE_DAMAGE_OFF = 354 SPELLPOWER_DOUBLE = 356 HEALING_DOUBLE = 357 NUM_OPTIONS_PLAYED_THIS_TURN = 358 @@ -255,8 +253,10 @@ class GameTag(IntEnum): OVERKILL = 380 # Deleted + IGNORE_DAMAGE = 1 COPY_DEATHRATTLE_INDEX = 56 DIVINE_SHIELD_READY = 314 + IGNORE_DAMAGE_OFF = 354 NUM_OPTIONS = 359 # Missing, only present in logs
enums: Move GameTags around
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ setup( 'virtualchain==0.0.6', 'kademlia==0.5', 'keychain==0.1.4', - 'blockstack-client==0.0.12.1' + 'blockstack==0.0.12.1' ], classifiers=[ 'Intended Audience :: Developers',
blockstack, not blockstack-client
py
diff --git a/pysat/instruments/superdarn_grdex.py b/pysat/instruments/superdarn_grdex.py index <HASH>..<HASH> 100644 --- a/pysat/instruments/superdarn_grdex.py +++ b/pysat/instruments/superdarn_grdex.py @@ -187,7 +187,7 @@ def clean(self): def download(date_array, tag, data_path, user=None, password=None): """ - download IVM data consistent with pysat + download SuperDARN data from VT organized for loading by pysat """
Corrected docstring for download method
py
diff --git a/client/speaker.py b/client/speaker.py index <HASH>..<HASH> 100644 --- a/client/speaker.py +++ b/client/speaker.py @@ -38,7 +38,7 @@ class AbstractSpeaker(object): @classmethod @abstractmethod def is_available(cls): - return True + return (find_executable('aplay') is not None) def __init__(self): self._logger = logging.getLogger(__name__)
Check for `aplay` in AbstractSpeaker (because of af<I>dc5)
py
diff --git a/query.py b/query.py index <HASH>..<HASH> 100755 --- a/query.py +++ b/query.py @@ -76,6 +76,8 @@ def merged_output(queries, toJson=False, jsonIndent=0): for query in queries: results += auto(query) results = utils.filter_duplicate_dicts(results) + # Sort posts from newest to oldest, like a booru's results would show. + results = sorted(results, key=lambda post: post["id"], reverse=True) if toJson: return json.dumps(results, ensure_ascii=False, indent=jsonIndent)
query: Sort posts from newest to oldest Normal behavior when searching on a booru. Fix sorting after functions threw everything in random order.
py
diff --git a/bcbio/structural/cn_mops.py b/bcbio/structural/cn_mops.py index <HASH>..<HASH> 100644 --- a/bcbio/structural/cn_mops.py +++ b/bcbio/structural/cn_mops.py @@ -196,7 +196,7 @@ ctrl_count <- getReadCountsFromBAM(c("{ctrl_file}"), sampleNames=c("{ctrl_name}" refSeqName="{chrom}", parallel={num_cores}, WL=width(case_count)[[1]]) prep_counts <- referencecn.mops(case_count, ctrl_count, parallel={num_cores}) -cnv_out <- calcFractionalCopyNumbers(prep_counts) +cnv_out <- calcIntegerCopyNumbers(prep_counts) """ _population_prep_targeted = """ @@ -222,5 +222,5 @@ ctrl_count <- getSegmentReadCountsFromBAM(c("{ctrl_file}"), GR=my_gr, sampleNames=c("{case_name}"), mode="{pairmode}", parallel={num_cores}) prep_counts <- referencecn.mops(case_count, ctrl_count, parallel={num_cores}) -cnv_out <- calcFractionalCopyNumbers(prep_counts) +cnv_out <- calcIntegerCopyNumbers(prep_counts) """
Update cn_mops.py Proposing these changes as I've seen errors coming from the (experimental) ``calcFractionalCopyNumbers`` function but same analyses finish fine using ``calcIntegerCopyNumbers``.
py
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index <HASH>..<HASH> 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2582,7 +2582,8 @@ def mod_repo(repo, basedir=None, **kwargs): # Build a list of keys to be deleted todelete = [] - for key in repo_opts: + # list() of keys because the dict could be shrinking in the for loop. + for key in list(repo_opts): if repo_opts[key] != 0 and not repo_opts[key]: del repo_opts[key] todelete.append(key)
fix shrinking list in for loop bug
py
diff --git a/vultr/vultr.py b/vultr/vultr.py index <HASH>..<HASH> 100644 --- a/vultr/vultr.py +++ b/vultr/vultr.py @@ -566,7 +566,7 @@ class Vultr(object): """ return self.request('/v1/backup/list') - def server_list(self, subid): + def server_list(self, subid=None): """ /v1/server/list GET - account
Subid is not required for server_list
py
diff --git a/ipyrad/assemble/clustmap.py b/ipyrad/assemble/clustmap.py index <HASH>..<HASH> 100644 --- a/ipyrad/assemble/clustmap.py +++ b/ipyrad/assemble/clustmap.py @@ -1976,7 +1976,15 @@ def mapping_reads(data, sample, nthreads, altref=False): proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE) error4 = proc4.communicate()[0] if proc4.returncode: - raise IPyradError(error4) + # https://github.com/dereneaton/ipyrad/issues/435 + # If the bamfile has very large chromosomes (>500Mb) then the .bai + # index format can't handle it. Try again with the .csi format `-c`. + if "hts_index_check_range" in error4: + cmd4 = [ip.bins.samtools, "index", "-c", bamout] + proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE) + error4 = proc4.communicate()[0] + if proc4.returncode: + raise IPyradError(error4) # Running cmd5 writes to either edits/sname-refmap_derep.fa for SE # or it makes edits/sname-tmp-umap{12}.fastq for paired data, which
clustmap: Handle bam files with very large chromosomes (#<I>)
py
diff --git a/plaso/engine/path_helper.py b/plaso/engine/path_helper.py index <HASH>..<HASH> 100644 --- a/plaso/engine/path_helper.py +++ b/plaso/engine/path_helper.py @@ -11,7 +11,7 @@ from plaso.engine import logger class PathHelper(object): """Class that implements the path helper.""" - _NON_PRINTABLE_CHARACTERS = list(range(0, 0x20)) + [0x7f] + _NON_PRINTABLE_CHARACTERS = list(range(0, 0x20)) + list(range(0x7f, 0xa0)) _ESCAPE_CHARACTERS = str.maketrans({ value: '\\x{0:02x}'.format(value) for value in _NON_PRINTABLE_CHARACTERS})
Added U<I>-U<I>f to set of non-printable characters (#<I>)
py
diff --git a/github_release.py b/github_release.py index <HASH>..<HASH> 100755 --- a/github_release.py +++ b/github_release.py @@ -157,23 +157,26 @@ gh_release_delete.description = { } -def gh_release_publish(repo_name, tag_name): - patch_release(repo_name, tag_name, draft=False) +def gh_release_publish(repo_name, tag_name, prerelease=False): + patch_release(repo_name, tag_name, draft=False, prerelease=prerelease) gh_release_publish.description = { "help": "Publish a release setting draft to 'False'", - "params": ["repo_name", "tag_name"] + "params": ["repo_name", "tag_name", "prerelease"], + "optional_params": {"prerelease": bool} } -def gh_release_unpublish(repo_name, tag_name): - patch_release(repo_name, tag_name, draft=True) +def gh_release_unpublish(repo_name, tag_name, prerelease=False): + draft = not prerelease + patch_release(repo_name, tag_name, draft=draft, prerelease=prerelease) gh_release_unpublish.description = { "help": "Unpublish a release setting draft to 'True'", - "params": ["repo_name", "tag_name"] + "params": ["repo_name", "tag_name", "prerelease"], + "optional_params": {"prerelease": bool} }
gh_release_publish/unpublish: Support additional parameter "--prerelease"
py
diff --git a/src/fuzzfetch/fetch.py b/src/fuzzfetch/fetch.py index <HASH>..<HASH> 100644 --- a/src/fuzzfetch/fetch.py +++ b/src/fuzzfetch/fetch.py @@ -428,6 +428,9 @@ class FetcherArgs(object): @type args: list @param args: a list of arguments """ + if hasattr(super(FetcherArgs, self), 'sanity_check'): + super(FetcherArgs, self).sanity_check(args) + if self.build_is_ns: # this is a custom build # ensure conflicting options are not set
Call sanity_check on parent where applicable
py
diff --git a/bots/redisbot.py b/bots/redisbot.py index <HASH>..<HASH> 100644 --- a/bots/redisbot.py +++ b/bots/redisbot.py @@ -59,10 +59,6 @@ class MarkovBot(IRCBot): # split the key on the separator to extract the words words = key.split(self.separator) - # if we've hit a stop-word, break here - if words[0] == self.stop_word: - break - # add the word to the list of words in our generated message gen_words.append(words[0])
Removing unneeded check for stopword
py
diff --git a/tests/test_django.py b/tests/test_django.py index <HASH>..<HASH> 100644 --- a/tests/test_django.py +++ b/tests/test_django.py @@ -12,7 +12,6 @@ from django import test as django_test from django.conf import settings from django.db.models import signals from django.test import utils as django_test_utils -from django.test.runner import DiscoverRunner as DjangoTestSuiteRunner import factory.django @@ -34,18 +33,12 @@ test_state = {} def setUpModule(): django_test_utils.setup_test_environment() - runner = DjangoTestSuiteRunner() - runner_state = runner.setup_databases() - test_state.update({ - 'runner': runner, - 'runner_state': runner_state, - }) + runner_state = django_test_utils.setup_databases(verbosity=0, interactive=False) + test_state['runner_state'] = runner_state def tearDownModule(): - runner = test_state['runner'] - runner_state = test_state['runner_state'] - runner.teardown_databases(runner_state) + django_test_utils.teardown_databases(test_state['runner_state'], verbosity=0) django_test_utils.teardown_test_environment()
Use django.test.utils to initialize test database Removes the need to import and instantiate DiscoverRunner. This also allows passing verbosity=0 when creating the database to squelch some noise during tests: Creating test database for alias 'default'... Creating test database for alias 'replica'... Available since Django <I>.
py
diff --git a/steam/items.py b/steam/items.py index <HASH>..<HASH> 100644 --- a/steam/items.py +++ b/steam/items.py @@ -834,9 +834,9 @@ class backpack(base.json_request): raise BackpackError("Steam failed to return inventory data") if status == 8: - raise BackpackError("Bad SteamID64 given") + raise base.user.ProfileError("Bad SteamID64 given") elif status == 15: - raise BackpackError("Profile set to private") + raise base.user.ProfileError("Profile set to private") elif status != 1: raise BackpackError("Unknown error")
Raise ProfileError instead of BackpackError for profile-related status codes
py
diff --git a/great_expectations/data_context/store/html_site_store.py b/great_expectations/data_context/store/html_site_store.py index <HASH>..<HASH> 100644 --- a/great_expectations/data_context/store/html_site_store.py +++ b/great_expectations/data_context/store/html_site_store.py @@ -143,7 +143,7 @@ class HtmlSiteStore(object): ].set(key.resource_identifier.to_tuple(), serialized_value, content_encoding='utf-8', content_type='text/html; charset=utf-8') - def get_url_for_resource(self, only_if_exists=True, resource_identifier=None): + def get_url_for_resource(self, resource_identifier=None, only_if_exists=True): """ Return the URL of the HTML document that renders a resource (e.g., an expectation suite or a validation result).
ayirp/suiterename move only_in_exists in order
py
diff --git a/citrination_client/base/response_handling.py b/citrination_client/base/response_handling.py index <HASH>..<HASH> 100644 --- a/citrination_client/base/response_handling.py +++ b/citrination_client/base/response_handling.py @@ -63,7 +63,8 @@ def _check_response_for_timeout(response): def _check_response_for_missing_resource(response): if response.status_code == 404: - raise ResourceNotFoundException() + msg = _get_message(response) or "Resource not found" + raise ResourceNotFoundException(msg) def _check_response_for_payload_too_large(response): if response.status_code == 413: @@ -84,3 +85,9 @@ def _check_response_for_version_mismatch(response): return response except (ValueError, KeyError): return response + +def _get_message(response): + try: + return response.json().get("message", None) + except Exception: + return None
Attempt to propagate messages from <I> level responses Often times, the Citrination API returns useful messages with <I> level responses. Pycc by default just swallows them and shows a standard 'Resource not found' message. This change will pull the message out of <I> level responses if possible, and then fall back to the default message.
py
diff --git a/spyder/plugins/editor/widgets/tests/test_warnings.py b/spyder/plugins/editor/widgets/tests/test_warnings.py index <HASH>..<HASH> 100644 --- a/spyder/plugins/editor/widgets/tests/test_warnings.py +++ b/spyder/plugins/editor/widgets/tests/test_warnings.py @@ -26,7 +26,6 @@ TEXT = ("def some_function():\n" # D100, D103: Missing docstring @pytest.mark.slow @pytest.mark.second -@pytest.mark.xfail def test_ignore_warnings(qtbot, lsp_codeeditor): """Test that the editor is ignoring some warnings.""" editor, manager = lsp_codeeditor
Editor: Remove xfail from test to ignore warnings
py
diff --git a/python/dllib/src/setup.py b/python/dllib/src/setup.py index <HASH>..<HASH> 100755 --- a/python/dllib/src/setup.py +++ b/python/dllib/src/setup.py @@ -98,7 +98,7 @@ def setup_package(): scripts=scripts, install_requires=[ 'numpy>=1.19.5', 'pyspark==2.4.6', 'conda-pack==0.3.1', - 'six>=1.10.0', 'bigdl-core==2.1.0b20220321'], + 'six>=1.10.0', 'bigdl-core=='+VERSION], dependency_links=['https://d3kbcqa49mib13.cloudfront.net/spark-2.0.0-bin-hadoop2.7.tgz'], include_package_data=True, package_data={"bigdl.share.dllib": ['lib/bigdl-dllib*.jar', 'conf/*',
change bigdl-core version to nightly (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ setup(name='astral', author_email='python@sffjunkie.co.uk', url="http://www.sffjunkie.co.uk/python-astral.html", license='Apache-2.0', - package_dir={'': 'src'}, + package_dir={'': 'astral'}, py_modules=['astral'] )
Changed to match new source directory.
py
diff --git a/usb1.py b/usb1.py index <HASH>..<HASH> 100644 --- a/usb1.py +++ b/usb1.py @@ -1,12 +1,12 @@ """ Pythonic wrapper for libusb-1.0. -The first thing you must do is to get an "USB context". To do so, create a -LibUSBContext instance. +The first thing you must do is to get an "USB context". To do so, create an +USBContext instance. Then, you can use it to browse available USB devices and open the one you want to talk to. At this point, you should have a USBDeviceHandle instance (as returned by -LibUSBContext or USBDevice instances), and you can start exchanging with the +USBContext or USBDevice instances), and you can start exchanging with the device. Features:
Do not mention deprecated LibUSBContext in documentation.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -94,6 +94,7 @@ REQUIRES = [ 'pillow >=4.0', 'packaging >=16.8', 'tornado >=5', + 'typing_extensions >=3.7.4', ] # if this is just conda-build skimming information, skip all this actual work
Add missing typing_extensions dependency (#<I>)
py
diff --git a/cumulusci/salesforce_api/metadata.py b/cumulusci/salesforce_api/metadata.py index <HASH>..<HASH> 100644 --- a/cumulusci/salesforce_api/metadata.py +++ b/cumulusci/salesforce_api/metadata.py @@ -74,22 +74,8 @@ class BaseMetadataApiCall(object): raise MetadataApiError(response.text, response) def _build_endpoint_url(self): - # Parse org id from id which ends in /ORGID/USERID org_id = self.task.org_config.org_id - # If "My Domain" is configured in the org, the instance_url needs to be - # parsed differently instance_url = self.task.org_config.instance_url - if instance_url.find(".my.salesforce.com") != -1: - # Parse instance_url with My Domain configured - # URL will be in the format - # https://name--name.na11.my.salesforce.com and should be - # https://na11.salesforce.com - instance_url = re.sub( - r"https://.*\.(\w+)\.my\.salesforce\.com", - r"https://\1.salesforce.com", - instance_url, - ) - # Build the endpoint url from the instance_url endpoint = f"{instance_url}/services/Soap/m/{self.api_version}/{org_id}" return endpoint
Use the instance_url as is; don't adjust (fixes MDAPI with enhanced domains)
py
diff --git a/mythril/analysis/solver.py b/mythril/analysis/solver.py index <HASH>..<HASH> 100644 --- a/mythril/analysis/solver.py +++ b/mythril/analysis/solver.py @@ -145,9 +145,7 @@ def _replace_with_actual_sha( s_index = len(code.bytecode) + 2 else: s_index = 10 - for i in range(s_index, len(tx["input"]), 64): - # We can do this because the data is aligned as blocks of 32 bytes - # For more info https://solidity.readthedocs.io/en/v0.5.12/abi-spec.html, + for i in range(s_index, len(tx["input"])): data_slice = tx["input"][i : i + 64] if hash_matcher not in data_slice or len(data_slice) != 64: continue
Iterate over single indices than chunks of <I>
py
diff --git a/alot/ui.py b/alot/ui.py index <HASH>..<HASH> 100644 --- a/alot/ui.py +++ b/alot/ui.py @@ -3,6 +3,7 @@ # For further details see the COPYING file import urwid import logging +import signal from twisted.internet import reactor, defer from settings import settings @@ -68,6 +69,8 @@ class UI(object): mainframe = urwid.Frame(urwid.SolidFill()) self.root_widget = urwid.AttrMap(mainframe, global_att) + signal.signal(signal.SIGUSR1, self.handle_signal) + # set up main loop self.mainloop = urwid.MainLoop(self.root_widget, handle_mouse=False, @@ -637,3 +640,7 @@ class UI(object): d.addCallback(call_apply) d.addCallback(call_posthook) return d + def handle_signal(self, signum, frame): + self.current_buffer.rebuild() + self.update() +
refresh current buffer on SIGUSR1
py
diff --git a/nsim/nsim.py b/nsim/nsim.py index <HASH>..<HASH> 100644 --- a/nsim/nsim.py +++ b/nsim/nsim.py @@ -593,6 +593,8 @@ class DistTimeseries(distob.DistArray): except SimValueError: pass return distob.DistArray(results, new_distaxis) + elif all(isinstance(r, numbers.Number) for r in results): + return np.array(results) else: return results # list if hasattr(f, '__name__'):
fix DistTimeseries.__distob_vectorize__() in case where result is a list of numbers
py
diff --git a/andes/models/group.py b/andes/models/group.py index <HASH>..<HASH> 100644 --- a/andes/models/group.py +++ b/andes/models/group.py @@ -238,6 +238,7 @@ class GroupBase: if isinstance(value, (float, str, int)): value = [value] * len(idx) + idx, _ = self._vectorize_idx(idx) models = self.idx2model(idx) for i, idx in enumerate(idx): @@ -331,8 +332,8 @@ class GroupBase: self._check_src(src) self._check_idx(idx) - idx, _ = self._vectorize_idx(idx) + idx, _ = self._vectorize_idx(idx) models = self.idx2model(idx, allow_none=True) ret = [None] * len(models)
Bug fix for not vectorizing idx before calling `idx2model`.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,7 @@ setup( license="Apache", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=[ - "torch>1.3.1,<=1.5.0", + "torch>1.3.1,<1.6.0", "jsonnet>=0.10.0 ; sys.platform != 'win32'", "overrides==2.8.0", "nltk",
tweak torch requirement (#<I>) * tweak torch requirement * allow patches
py
diff --git a/models/rasmachine/rasmachine.py b/models/rasmachine/rasmachine.py index <HASH>..<HASH> 100644 --- a/models/rasmachine/rasmachine.py +++ b/models/rasmachine/rasmachine.py @@ -15,9 +15,8 @@ import datetime import argparse import gmail_client import twitter_client -import ndex.client -import ndex.networkn from indra.sources import reach +from indra.databases import ndex_client import indra.tools.assemble_corpus as ac from indra.tools.gene_network import GeneNetwork from indra.literature import pubmed_client, get_full_text, elsevier_client @@ -283,7 +282,7 @@ def upload_new_ndex(model_path, new_stmts, ndex_cred): with open(cx_name, 'wb') as fh: fh.write(cx_str.encode('utf-8')) network_id = ndex_cred['network'] - ndex_client.update_ndex_network(cx_str, network_id, ndex_cred) + ndex_client.update_network(cx_str, network_id, ndex_cred) def make_date_str():
Fixes to rasmachine imports
py
diff --git a/spout.py b/spout.py index <HASH>..<HASH> 100644 --- a/spout.py +++ b/spout.py @@ -6,9 +6,7 @@ from __future__ import absolute_import, print_function, unicode_literals import logging -from six import PY3 - -from .component import Component, Tuple +from .component import Component log = logging.getLogger(__name__) @@ -26,7 +24,7 @@ class Spout(Component): the main run loop. A good place to initialize connections to data sources. - :param storm_conf: the Storm configuration for this Bolt. This is the + :param storm_conf: the Storm configuration for this Spout. This is the configuration provided to the topology, merged in with cluster configuration on the worker node. :type storm_conf: dict
Remove a bunch of unnecessary imports
py
diff --git a/treeherder/etl/tasks/tbpl_tasks.py b/treeherder/etl/tasks/tbpl_tasks.py index <HASH>..<HASH> 100644 --- a/treeherder/etl/tasks/tbpl_tasks.py +++ b/treeherder/etl/tasks/tbpl_tasks.py @@ -20,7 +20,9 @@ def submit_star_comment(project, job_id, bug_id, submit_timestamp, who): req.generate_request_body() req.send_request() except Exception, e: - submit_star_comment.retry(exc=e) + # Initially retry after 1 minute, then for each subsequent retry + # lengthen the retry time by another minute. + submit_star_comment.retry(exc=e, countdown=(1 + submit_star_comment.request.retries) * 60) # this exception will be raised once the number of retries # exceeds max_retries raise @@ -37,7 +39,9 @@ def submit_bug_comment(project, job_id, bug_id): req.generate_request_body() req.send_request() except Exception, e: - submit_bug_comment.retry(exc=e) + # Initially retry after 1 minute, then for each subsequent retry + # lengthen the retry time by another minute. + submit_bug_comment.retry(exc=e, countdown=(1 + submit_bug_comment.request.retries) * 60) # this exception will be raised once the number of retries # exceeds max_retries raise
Bug <I> - Adjust delay before retrying failure classification tasks With this change, the first retry is now after 1 minute, then the time for each subsequent retry lengthens by a further minute each time.
py
diff --git a/pandas/io/html.py b/pandas/io/html.py index <HASH>..<HASH> 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -103,7 +103,7 @@ def _get_skiprows(skiprows): elif skiprows is None: return 0 raise TypeError( - "%r is not a valid type for skipping rows" % type(skiprows).__name__ + f"'{type(skiprows).__name__}' is not a valid type for skipping rows" ) @@ -133,7 +133,7 @@ def _read(obj): except (TypeError, ValueError): pass else: - raise TypeError("Cannot read object of type %r" % type(obj).__name__) + raise TypeError(f"Cannot read object of type '{type(obj).__name__}'") return text
STY: fstrings, repr (#<I>)
py
diff --git a/src/flapjack/testing.py b/src/flapjack/testing.py index <HASH>..<HASH> 100644 --- a/src/flapjack/testing.py +++ b/src/flapjack/testing.py @@ -39,7 +39,7 @@ class FlapjackUnitTest(unittest.TestCase): self.c = FlapjackRESTClient() #commonly-used functions for tests - def assertValidXMLResponse(self, response, content-type='application/json'): + def assertValidXMLResponse(self, response, content-type='application/xml'): failtext = "" try: etree.fromstring(response.content)
fixed a bug. XML is not json, silly me
py
diff --git a/tests/test_filters.py b/tests/test_filters.py index <HASH>..<HASH> 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -99,7 +99,7 @@ class TestMongoFilters(unittest.TestCase): @defer.inlineCallbacks def test_Explain(self): result = yield self.coll.find({}, filter=qf.explain()) - self.assertTrue("executionStats" in result[0]) + self.assertTrue("executionStats" in result[0] or "nscanned" in result[0]) @defer.inlineCallbacks def test_FilterMerge(self):
Fixed explain test for MongoDB 2.x
py
diff --git a/openquake/baselib/performance.py b/openquake/baselib/performance.py index <HASH>..<HASH> 100644 --- a/openquake/baselib/performance.py +++ b/openquake/baselib/performance.py @@ -227,7 +227,6 @@ class Monitor(object): """ t = (name, self.task_no, self.weight, self.duration, len(res.pik), mem_gb) - print(t) data = numpy.array([t], task_info_dt) hdf5.extend(h5['task_info'], data) h5['task_info'].flush() # notify the reader
Removed debugging print [ci skip]
py
diff --git a/tesseract.py b/tesseract.py index <HASH>..<HASH> 100755 --- a/tesseract.py +++ b/tesseract.py @@ -63,12 +63,12 @@ def run_tesseract(input_filename, output_filename_base, lang=None, boxes=False): command = [tesseract_cmd, input_filename, output_filename_base] - if boxes: - command += ['batch.nochop', 'makebox'] - if lang is not None: command += ['-l', lang] + if boxes: + command += ['batch.nochop', 'makebox'] + proc = subprocess.Popen(command, stderr=subprocess.PIPE) return (proc.wait(), proc.stderr.read()) @@ -120,7 +120,10 @@ def image_to_string(image, lang=None, boxes=False): input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() - output_file_name = '%s.txt' % output_file_name_base + if not boxes: + output_file_name = '%s.txt' % output_file_name_base + else: + output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name,
Fix boxes support (tested with Tesseract <I>)
py
diff --git a/gwpy/cli/spectrogram.py b/gwpy/cli/spectrogram.py index <HASH>..<HASH> 100644 --- a/gwpy/cli/spectrogram.py +++ b/gwpy/cli/spectrogram.py @@ -91,7 +91,7 @@ class Spectrogram(CliProduct): # based on the number of FFT calculation choose between # high time resolution and high SNR - snr_nfft = self.dur / (secpfft * stride) + snr_nfft = self.dur / stride_sec if (snr_nfft > 512): specgram = self.timeseries[0].spectrogram(stride_sec, fftlength=secpfft, overlap=ovlp_sec)
spectrogram.py: fix problem with calculation of snr_fft
py
diff --git a/flask_socketio/__init__.py b/flask_socketio/__init__.py index <HASH>..<HASH> 100644 --- a/flask_socketio/__init__.py +++ b/flask_socketio/__init__.py @@ -36,6 +36,7 @@ class _SocketIOMiddleware(socketio.Middleware): socketio_path) def __call__(self, environ, start_response): + environ = environ.copy() environ['flask.app'] = self.flask_app return super(_SocketIOMiddleware, self).__call__(environ, start_response)
make a copy of the environ dict
py
diff --git a/hadoopy/runner.py b/hadoopy/runner.py index <HASH>..<HASH> 100644 --- a/hadoopy/runner.py +++ b/hadoopy/runner.py @@ -17,7 +17,7 @@ def _find_hstreaming(): search_root = os.environ['HADOOP_HOME'] except KeyError: search_root = '/' - cmd = 'find %s -name hadoop*streaming.jar' % (search_root) + cmd = 'find %s -name hadoop*streaming*.jar' % (search_root) p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p.communicate()[0].split('\n')[0]
Updated runner to look for alterative names
py
diff --git a/yubico/yubikey_usb_hid.py b/yubico/yubikey_usb_hid.py index <HASH>..<HASH> 100644 --- a/yubico/yubikey_usb_hid.py +++ b/yubico/yubikey_usb_hid.py @@ -150,9 +150,15 @@ class YubiKeyHIDDevice(object): # make sure we have a fresh pgm_seq value self.status() self._debug("Programmed slot %i, sequence %i -> %i\n" % (slot, old_pgm_seq, self._status.pgm_seq)) - if self._status.pgm_seq != old_pgm_seq + 1: - raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % \ - (old_pgm_seq, self._status.pgm_seq)) + + if slot in [SLOT.CONFIG, SLOT.CONFIG2] or old_pgm_seq != 0: + if self._status.pgm_seq == old_pgm_seq + 1: + return + elif self._status.pgm_seq == 1: + return + + raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % \ + (old_pgm_seq, self._status.pgm_seq)) def _read_response(self, may_block=False): """ Wait for a response to become available, and read it. """
Correctly handle cases where programming counter shouldn't increase.
py
diff --git a/ftr/config.py b/ftr/config.py index <HASH>..<HASH> 100644 --- a/ftr/config.py +++ b/ftr/config.py @@ -29,6 +29,8 @@ import os import re import logging +LOGGER = logging.getLogger(__name__) + try: from ordered_set import OrderedSet @@ -41,7 +43,9 @@ except ImportError: try: from cacheops import cached -except ImportError: +except Exception, e: + LOGGER.warning(u'Cacheops seems not installed or not importable ' + u'(exception was: %s). Running without cache.', e) from functools import wraps def cached(*a, **kw): @@ -54,7 +58,6 @@ except ImportError: return wrapper return decorator -LOGGER = logging.getLogger(__name__) # defaults to 3 days of caching for website configuration CACHE_TIMEOUT = int(os.environ.get('PYTHON_FTR_CACHE_TIMEOUT', 345600))
Avoid edge-case import where cacheops is here, but not usable.
py
diff --git a/post_office/utils.py b/post_office/utils.py index <HASH>..<HASH> 100644 --- a/post_office/utils.py +++ b/post_office/utils.py @@ -141,7 +141,7 @@ def create_attachments(attachment_files): attachments.append(attachment) - if opened_file: + if opened_file is not None: opened_file.close() return attachments
More verbose check for opened_file
py
diff --git a/expfactory/cli/install.py b/expfactory/cli/install.py index <HASH>..<HASH> 100644 --- a/expfactory/cli/install.py +++ b/expfactory/cli/install.py @@ -73,9 +73,18 @@ def main(args,parser,subparser): dest = "%s/%s" %(folder,exp_id) bot.log("Installing %s to %s" %(exp_id, dest)) - - # Are we in a Container? + + # Building container + in_container = False if os.environ.get('SINGULARITY_IMAGE') is not None: + in_container = True + + # Running, live container + elif os.environ.get('SINGULARITY_CONTAINER') is not None: + in_container = True + + if in_container is True: + bot.log("Preparing experiment routes...") template = get_template('experiments/template.py') template = sub_template(template, '{{ exp_id }}', exp_id)
modified: expfactory/cli/install.py
py
diff --git a/saltapi/netapi/rest_cherrypy/app.py b/saltapi/netapi/rest_cherrypy/app.py index <HASH>..<HASH> 100644 --- a/saltapi/netapi/rest_cherrypy/app.py +++ b/saltapi/netapi/rest_cherrypy/app.py @@ -343,6 +343,12 @@ def hypermedia_in(): 'text/yaml': yaml_processor, } + # Do not process the body for POST requests that have specified no content + # or have not specified Content-Length + if (cherrypy.request.method.upper() == 'POST' + and cherrypy.request.headers.get('Content-Length', 0) == 0): + cherrypy.request.process_request_body = False + cherrypy.request.body.processors.clear() cherrypy.request.body.default_proc = cherrypy.HTTPError( 406, 'Content type not supported')
Do not process the request body if Content-Length is empty or missing
py
diff --git a/progressbar/__about__.py b/progressbar/__about__.py index <HASH>..<HASH> 100644 --- a/progressbar/__about__.py +++ b/progressbar/__about__.py @@ -19,7 +19,7 @@ A Python Progressbar library to provide visual (yet text based) progress to long running operations. '''.strip().split()) __email__ = 'wolph@wol.ph' -__version__ = '3.46.1' +__version__ = '3.50.0' __license__ = 'BSD' __copyright__ = 'Copyright 2015 Rick van Hattem (Wolph)' __url__ = 'https://github.com/WoLpH/python-progressbar'
added marker/fill wrapper support to make coloring easier and improved ansi (color) shell detection support
py
diff --git a/command/build_scripts.py b/command/build_scripts.py index <HASH>..<HASH> 100644 --- a/command/build_scripts.py +++ b/command/build_scripts.py @@ -12,7 +12,7 @@ from distutils.dep_util import newer from distutils.util import convert_path # check if Python is called on the first line with this expression -first_line_re = re.compile(r'^#!.*python(\s+.*)?') +first_line_re = re.compile(r'^#!.*python(\s+.*)?$') class build_scripts (Command):
[Bug #<I>] Tighten the pattern for the first line, so we don't adjust it when a versioned interpreter is supplied (#!.../python2 ...)
py
diff --git a/gns3server/web/route.py b/gns3server/web/route.py index <HASH>..<HASH> 100644 --- a/gns3server/web/route.py +++ b/gns3server/web/route.py @@ -15,11 +15,13 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import sys import json import jsonschema import asyncio import aiohttp import logging +import traceback log = logging.getLogger(__name__) @@ -110,9 +112,18 @@ class Route(object): response.set_status(e.status) response.json({"message": e.text, "status": e.status}) except VMError as e: + log.error("VM error detected: {type}".format(type=type(e)), exc_info=1) response = Response(route=route) response.set_status(500) response.json({"message": str(e), "status": 500}) + except Exception as e: + log.error("Uncaught exception detected: {type}".format(type=type(e)), exc_info=1) + response = Response(route=route) + response.set_status(500) + exc_type, exc_value, exc_tb = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_tb) + tb = "".join(lines) + response.json({"message": tb, "status": 500}) return response cls._routes.append((method, cls._path, control_schema))
Add traceback info when catching an exception to help with debugging.
py
diff --git a/salt/state.py b/salt/state.py index <HASH>..<HASH> 100644 --- a/salt/state.py +++ b/salt/state.py @@ -109,6 +109,24 @@ class State(object): if not data.has_key(aspec[0][ind]): errors.append('Missing paramater ' + aspec[0][ind]\ + ' for state ' + full) + # If this chunk has a recursive require, then it will cause a + # recursive loop when executing, check for it + reqdec = '' + if 'require' in data: + reqdec = 'require' + if 'watch' in data: + reqdec = 'watch' + if reqdec: + for req in data[reqdec]: + print req.keys() + if data['state'] == req.keys()[0]: + if data['name'] == req[req.keys()[0]]: + err = ('Recursive require detected in SLS {0} for' + ' require {1} in ID {2}').format( + data['__sls__'], + req, + data['__id__']) + errors.append(err) return errors def verify_high(self, high):
Add check for recursive requisite dec
py
diff --git a/tools/mpy-tool.py b/tools/mpy-tool.py index <HASH>..<HASH> 100755 --- a/tools/mpy-tool.py +++ b/tools/mpy-tool.py @@ -75,9 +75,9 @@ for n in qstrutil.static_qstr_list: global_qstrs.append(QStrType(n)) class QStrWindow: - def __init__(self, size_log2): + def __init__(self, size): self.window = [] - self.size = 1 << size_log2 + self.size = size def push(self, val): self.window = [val] + self.window[:self.size - 1] @@ -633,7 +633,6 @@ def read_qstr_and_pack(f, bytecode, qstr_win): bytecode.append(qst >> 8) def read_bytecode(file, bytecode, qstr_win): - QSTR_LAST_STATIC = len(qstrutil.static_qstr_list) while not bytecode.is_full(): op = read_byte(file, bytecode) f, sz = mp_opcode_format(bytecode.buf, bytecode.idx - 1, False)
tools/mpy-tool.py: Fix init of QStrWindow, and remove unused variable. The qstr window size is not log-2 encoded, it's just the actual number (but in mpy-tool.py this didn't lead to an error because the size is just used to truncate the window so it doesn't grow arbitrarily large in memory). Addresses issue #<I>.
py
diff --git a/evaluators/tempo_eval.py b/evaluators/tempo_eval.py index <HASH>..<HASH> 100755 --- a/evaluators/tempo_eval.py +++ b/evaluators/tempo_eval.py @@ -6,6 +6,7 @@ Usage: ./tempo_eval.py REFERENCE.TXT ESTIMATED.TXT ''' +from __future__ import print_function import argparse import sys @@ -57,10 +58,11 @@ if __name__ == '__main__': scores = mir_eval.tempo.evaluate(reference_tempi, reference_weight, estimated_tempi) - print "{} vs. {}".format(os.path.basename(parameters['reference_file']), - os.path.basename(parameters['estimated_file'])) + print("{} vs. {}".format(os.path.basename(parameters['reference_file']), + os.path.basename(parameters['estimated_file']))) + eval_utilities.print_evaluation(scores) if parameters['output_file']: - print 'Saving results to: ', parameters['output_file'] + print('Saving results to: {}'.format(parameters['output_file'])) eval_utilities.save_results(scores, parameters['output_file'])
py3ified tempo evaluator #<I>
py
diff --git a/mrcfile/version.py b/mrcfile/version.py index <HASH>..<HASH> 100644 --- a/mrcfile/version.py +++ b/mrcfile/version.py @@ -5,4 +5,4 @@ no dependencies. """ -__version__ = '0.1.1' +__version__ = '0.2.0'
Bump minor version to reflect numpy version dependency change
py
diff --git a/tests/run_tests.py b/tests/run_tests.py index <HASH>..<HASH> 100755 --- a/tests/run_tests.py +++ b/tests/run_tests.py @@ -11,14 +11,15 @@ import unittest import ptpython.completer import ptpython.filters #import ptpython.ipython -import ptpython.layout -import ptpython.python_input -import ptpython.style -import ptpython.validator import ptpython.eventloop +import ptpython.history_browser import ptpython.key_bindings +import ptpython.layout +import ptpython.python_input import ptpython.repl +import ptpython.style import ptpython.utils +import ptpython.validator if __name__ == '__main__':
Added history_browser to tests.
py
diff --git a/blargg/models.py b/blargg/models.py index <HASH>..<HASH> 100644 --- a/blargg/models.py +++ b/blargg/models.py @@ -1,8 +1,8 @@ try: from docutils.core import publish_parts as docutils_publish assert docutils_publish # placate flake8 -except ImportError: - docutils_publish = None +except ImportError: # pragma: no cover + docutils_publish = None # pragma: no cover from django.contrib.auth.models import User from django.contrib.sites.models import Site @@ -125,7 +125,6 @@ class Entry(models.Model): self.rendered_content = doc_parts['fragment'] elif self.content_format == "md": raise NotImplementedError # TODO: run thru markdown! - self.rendered_content = self.raw_content def _set_published(self): """Set the fields that need to be set in order for this thing to
omitting coverage on import errors; remove code that can't get executed
py
diff --git a/axes/handlers/proxy.py b/axes/handlers/proxy.py index <HASH>..<HASH> 100644 --- a/axes/handlers/proxy.py +++ b/axes/handlers/proxy.py @@ -119,5 +119,6 @@ class AxesProxyHandler(AbstractAxesHandler, AxesBaseHandler): return cls.get_implementation().post_save_access_attempt(instance, **kwargs) @classmethod + @toggleable def post_delete_access_attempt(cls, instance, **kwargs): return cls.get_implementation().post_delete_access_attempt(instance, **kwargs)
Add @toggleable decorator to signal handler Conform to the toggleable logic for the AccessAttempt event handling
py
diff --git a/python_modules/dagster/dagster/daemon/sensor.py b/python_modules/dagster/dagster/daemon/sensor.py index <HASH>..<HASH> 100644 --- a/python_modules/dagster/dagster/daemon/sensor.py +++ b/python_modules/dagster/dagster/daemon/sensor.py @@ -275,10 +275,6 @@ def _evaluate_sensor( if sensor_runtime_data.pipeline_run_reactions: for pipeline_run_reaction in sensor_runtime_data.pipeline_run_reactions: origin_run_id = pipeline_run_reaction.pipeline_run.run_id - message = ( - f'Sensor "{external_sensor.name}" processed failure of run {origin_run_id}.' - ) - if pipeline_run_reaction.error: context.logger.error( f"Got a reaction request for run {origin_run_id} but execution errorred: {pipeline_run_reaction.error}" @@ -290,6 +286,10 @@ def _evaluate_sensor( ) else: # log to the original pipeline run + message = ( + f'Sensor "{external_sensor.name}" acted on run status ' + f"{pipeline_run_reaction.pipeline_run.status.value} of run {origin_run_id}." + ) instance.report_engine_event( message=message, pipeline_run=pipeline_run_reaction.pipeline_run )
fix log message from run status sensor (#<I>)
py
diff --git a/sllurp/llrp.py b/sllurp/llrp.py index <HASH>..<HASH> 100644 --- a/sllurp/llrp.py +++ b/sllurp/llrp.py @@ -39,6 +39,7 @@ class LLRPMessage: self.msgbytes = copy.copy(msgbytes) if not msgdict: self.remainder = self.deserialize() + self.peername = None def serialize (self): if self.msgdict is None: @@ -135,6 +136,7 @@ class LLRPClient (Protocol): self.start_inventory = start_inventory self.disconnect_when_done = disconnect_when_done self.standalone = standalone + self.peername = None def readerEventCallback (self, llrpMsg): """Function to handle ReaderEventNotification messages from the reader.""" @@ -152,9 +154,11 @@ class LLRPClient (Protocol): def connectionMade(self): logger.debug('socket connected') self.transport.setTcpKeepAlive(True) + self.peername = self.transport.getHandle().getpeername() def connectionLost(self, reason): logger.debug('reader disconnected: {}'.format(reason)) + self.peername = None self.state = LLRPClient.STATE_DISCONNECTED if self.standalone: try: @@ -297,6 +301,7 @@ class LLRPClient (Protocol): if run_callbacks: for fn in self.eventCallbacks[msgName]: + lmsg.peername = self.peername fn(lmsg) if bail:
record peer name (hostname of reader) in LLRPMessage
py
diff --git a/debreach/middleware.py b/debreach/middleware.py index <HASH>..<HASH> 100644 --- a/debreach/middleware.py +++ b/debreach/middleware.py @@ -83,4 +83,6 @@ class RandomCommentMiddleware(MiddlewareMixin): response.content = '{0}{1}'.format( force_text(response.content), comment) response._random_comment_applied = True + if response.has_header('Content-Length'): + response['Content-Length'] = str(len(response.content)) return response
Ensure we update content length in random comment middleware if it was set
py
diff --git a/nailgun/entities.py b/nailgun/entities.py index <HASH>..<HASH> 100644 --- a/nailgun/entities.py +++ b/nailgun/entities.py @@ -1772,12 +1772,14 @@ class ContentViewVersion( def __init__(self, server_config=None, **kwargs): self._fields = { 'content_view': entity_fields.OneToOneField(ContentView), - 'environment': entity_fields.OneToManyField(Environment), + 'environments': entity_fields.OneToManyField(LifecycleEnvironment), 'major': entity_fields.IntegerField(), 'minor': entity_fields.IntegerField(), 'package_count': entity_fields.IntegerField(), 'puppet_module': entity_fields.OneToManyField(PuppetModule), 'version': entity_fields.StringField(), + 'repositories': entity_fields.OneToManyField(Repository), + 'description': entity_fields.StringField(), } self._meta = { 'api_path': 'katello/api/v2/content_view_versions',
Add more attrs to ContentViewVersion This commit add repositories & description fields to ContentViewVersion. It also fixes an issue with a CVV environment as it should have been LifecycleEnvironment
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ from setuptools import Extension, setup extra_includes = [] extra_library_dirs = [] qpdf_source_tree = environ.get('QPDF_SOURCE_TREE', '') +qpdf_build_libdir = environ.get('QPDF_BUILD_LIBDIR', '') # If CFLAGS is defined, disable any efforts to shim the build, because # the caller is probably a maintainer and knows what they are doing. @@ -26,7 +27,17 @@ if not cflags_defined: if qpdf_source_tree: # Point this to qpdf source tree built with shared libraries extra_includes.append(join(qpdf_source_tree, 'include')) - extra_library_dirs.append(join(qpdf_source_tree, 'libqpdf/build/.libs')) + if not qpdf_build_libdir: + # Pre-cmake qpdf build + old_libdir = join(qpdf_source_tree, 'libqpdf/build/.libs') + if exists(old_libdir): + qpdf_build_libdir = old_libdir + if not qpdf_build_libdir: + raise Exception( + 'Please set QPDF_BUILD_LIBDIR to the directory' + ' containing your libqpdf.so built from' + ' $QPDF_SOURCE_TREE') + extra_library_dirs.append(join(qpdf_build_libdir)) if 'bsd' in sys.platform: extra_includes.append('/usr/local/include')
Support QPDF_BUILD_LIBDIR in addition to QPDF_SOURCE_TREE This is necessary to test with qpdf built with cmake. In a cmake-based build, the location of the libraries is relative to the build directory, which is determined by the user running the build, so we can't figure it out directly from $QPDF_SOURCE_TREE anymore.
py
diff --git a/nodeconductor/structure/views.py b/nodeconductor/structure/views.py index <HASH>..<HASH> 100644 --- a/nodeconductor/structure/views.py +++ b/nodeconductor/structure/views.py @@ -321,6 +321,8 @@ class CustomerPermissionViewSet(rf_mixins.CreateModelMixin, group__customerrole__customer__roles__role_type=models.CustomerRole.OWNER) | Q(group__customerrole__customer__projects__roles__permission_group__user=self.request.user) + | + Q(group__customerrole__customer__project_groups__roles__permission_group__user=self.request.user) ).distinct() return queryset
filter by group manager in customer view set(NC-<I>)
py
diff --git a/tilematrix/io.py b/tilematrix/io.py index <HASH>..<HASH> 100644 --- a/tilematrix/io.py +++ b/tilematrix/io.py @@ -73,7 +73,7 @@ def read_vector_window( 'geometry': mapping(geom) } - yield feature + yield feature def read_raster_window( input_file,
fix bug, where only one feature is returned using read vector
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,6 @@ setup(name='andes', 'matplotlib', 'progressbar2', 'python_utils', - 'sympy', ], packages=[ 'andes',
removed sympy from required package list
py