diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/code/test_png.py b/code/test_png.py index <HASH>..<HASH> 100644 --- a/code/test_png.py +++ b/code/test_png.py @@ -22,6 +22,7 @@ except ImportError: from StringIO import StringIO as BytesIO import itertools import struct +import sys # http://www.python.org/doc/2.4.4/lib/module-unittest.html import unittest import zlib
import sys so that the warnings about skipping the numpy tests are seen.
py
diff --git a/windpowerlib/tools.py b/windpowerlib/tools.py index <HASH>..<HASH> 100644 --- a/windpowerlib/tools.py +++ b/windpowerlib/tools.py @@ -60,8 +60,9 @@ def linear_interpolation_extrapolation(df, target_height): ... columns=[np.array(['wind_speed', ... 'wind_speed']), ... np.array([10, 80])]) - >>> round(linear_interpolation_extrapolation( - ... weather_df['wind_speed'], 100)[0], 2) + >>> value = linear_interpolation_extrapolation( + ... weather_df['wind_speed'], 100)[0] + >>> round(value * 100) / 100 6.86 """
Change test because python <I> on Travis has problems
py
diff --git a/flubber/loop.py b/flubber/loop.py index <HASH>..<HASH> 100644 --- a/flubber/loop.py +++ b/flubber/loop.py @@ -36,6 +36,7 @@ def get_loop(): class Handler(object): + __slots__ = ('_callback', '_args', '_kwargs', '_cancelled') def __init__(self, callback, args=(), kwargs={}): self._callback = callback @@ -61,6 +62,7 @@ class Handler(object): class Timer(Handler): + __slots__ = ('_timer') def __init__(self, callback, args=(), kwargs={}, timer=None): super(Timer, self).__init__(callback, args, kwargs)
Added __slots__ to Handler and Timer objects
py
diff --git a/gwpy/plotter/segments.py b/gwpy/plotter/segments.py index <HASH>..<HASH> 100644 --- a/gwpy/plotter/segments.py +++ b/gwpy/plotter/segments.py @@ -209,7 +209,7 @@ class SegmentAxes(TimeSeriesAxes): if y is None: y = self.get_next_y() # get flag name - name = kwargs.pop('label', flag.texname) + name = kwargs.pop('label', flag.name) # get epoch try:
SegmentAxes.plot_dqflag: don't use flag.texname the underlying `SegmentAxes.plot_segmentlist` takes care of underscore escaping, we don't need to use `flag.texname` here. probably best to deprecate the use of `DataQualityFlag.texname`
py
diff --git a/treq_kerberos/__init__.py b/treq_kerberos/__init__.py index <HASH>..<HASH> 100644 --- a/treq_kerberos/__init__.py +++ b/treq_kerberos/__init__.py @@ -31,13 +31,13 @@ def get(url, headers={}, **kwargs): if auth.force_preemptive: # Save a round-trip and set the Negotiate header on the first req. headers['Authorization'] = yield negotiate_header(url) - response = yield treq.get(url, headers, **kwargs) + response = yield treq.get(url=url, headers=headers, **kwargs) # Retry if we got a 401 / Negotiate response. if response.code == 401 and isinstance(auth, TreqKerberosAuth): auth_mechs = response.headers.getRawHeaders('WWW-Authenticate') if 'Negotiate' in auth_mechs: headers['Authorization'] = yield negotiate_header(url) - response = yield treq.get(url, headers, **kwargs) + response = yield treq.get(url=url, headers=headers, **kwargs) defer.returnValue(response)
call treq.get() with kwargs When writing tests for this, I found StubTreq cannot handle a mixture of unnamed args and kwargs. Use kwargs universally with treq's get() to make this easier to test.
py
diff --git a/linkcheck/checker/fileurl.py b/linkcheck/checker/fileurl.py index <HASH>..<HASH> 100644 --- a/linkcheck/checker/fileurl.py +++ b/linkcheck/checker/fileurl.py @@ -81,7 +81,9 @@ def is_absolute_path (path): with a drive letter. On all other systems absolute paths start with a slash.""" if os.name == 'nt': - return re.search(r"^[a-zA-Z]:", path) + if re.search(r"^[a-zA-Z]:", path): + return True + path = path.replace("\\", "/") return path.startswith("/")
Treat Windows UNC paths as absolute paths.
py
diff --git a/mistletoe/core_tokens.py b/mistletoe/core_tokens.py index <HASH>..<HASH> 100644 --- a/mistletoe/core_tokens.py +++ b/mistletoe/core_tokens.py @@ -277,7 +277,7 @@ def is_opener(start, end, string): is_right = is_right_delimiter(start, end, string) return (is_left_delimiter(start, end, string) and (not is_right - or (is_right and preceded_by(end, string, punctuation)))) + or (is_right and preceded_by(start, string, punctuation)))) def is_closer(start, end, string):
fixed: preceded_by takes wrong starting index
py
diff --git a/tests/test_timeout_decorator.py b/tests/test_timeout_decorator.py index <HASH>..<HASH> 100644 --- a/tests/test_timeout_decorator.py +++ b/tests/test_timeout_decorator.py @@ -90,6 +90,7 @@ def test_timeout_pickle_error(): def test_timeout_custom_exception_message(): message = "Custom fail message" + @timeout(seconds=1, exception_message=message) def f(): time.sleep(2)
Can set exception message in decorator with Travis CI fixes.
py
diff --git a/orpsoc/core.py b/orpsoc/core.py index <HASH>..<HASH> 100644 --- a/orpsoc/core.py +++ b/orpsoc/core.py @@ -128,8 +128,7 @@ class Core: os.path.join(dst_dir, f)) else: logger.debug(" File " + f + " doesn't exist - may it will be added as patch later?") - print("File " + os.path.join(src_dir, f) + " doesn't exist") - exit(1) + print("File " + os.path.join(src_dir, f) + " doesn't exist - maybe it will be added as patch later?") logger.debug('export() -Done-') def patch(self, dst_dir):
Fix crash when patch adds a file to a core
py
diff --git a/slacksocket/client.py b/slacksocket/client.py index <HASH>..<HASH> 100644 --- a/slacksocket/client.py +++ b/slacksocket/client.py @@ -297,7 +297,7 @@ class SlackSocket(object): if matching: return channel_type, matching - return None + return [None, False] def _lookup_channel_by_name(self, name): """
Return [None, False] instead of None since the caller expects vector This fixes #<I>
py
diff --git a/BDMesh/MeshUniform1D.py b/BDMesh/MeshUniform1D.py index <HASH>..<HASH> 100644 --- a/BDMesh/MeshUniform1D.py +++ b/BDMesh/MeshUniform1D.py @@ -111,10 +111,10 @@ class MeshUniform1D(Mesh1D): step_ratio = max_step / min_step if check_if_integer(step_ratio, 1e-8): shift = (mesh.physical_nodes[:min_size] - self.physical_nodes[:min_size]) / min_step + print('SHIFT: #3.12f, step ratio: %2.12f', (shift[0], step_ratio)) if check_if_integer(shift[0], 1e-6): return True else: - print('SHIFT #3.12f', shift[0]) return False else: print(abs(m.floor(step_ratio) - step_ratio))
correcting check if two uniform meshes are aligned.
py
diff --git a/yellowbrick/classifier/class_prediction_error.py b/yellowbrick/classifier/class_prediction_error.py index <HASH>..<HASH> 100644 --- a/yellowbrick/classifier/class_prediction_error.py +++ b/yellowbrick/classifier/class_prediction_error.py @@ -21,12 +21,17 @@ Shows the balance of classes and their associated predictions. import numpy as np from sklearn.utils.multiclass import unique_labels -from sklearn.metrics._classification import _check_targets from yellowbrick.draw import bar_stack from yellowbrick.classifier.base import ClassificationScoreVisualizer from yellowbrick.exceptions import ModelError, YellowbrickValueError, NotFitted +try: + # See #1124: this allows compatibility for scikit-learn >= 0.20 + from sklearn.metrics._classification import _check_targets +except ImportError: + from sklearn.metrics.classification import _check_targets + ########################################################################## ## Class Prediction Error Chart
Handling scikit-learn's public vs. private API (part 1) (#<I>)
py
diff --git a/aioxmpp/ssl_transport.py b/aioxmpp/ssl_transport.py index <HASH>..<HASH> 100644 --- a/aioxmpp/ssl_transport.py +++ b/aioxmpp/ssl_transport.py @@ -696,9 +696,9 @@ def create_starttls_connection( if all(str(exc) == model for exc in exceptions): raise exceptions[0] - exc = OSError("Multiple exceptions: {}".format( - ", ".join(map(str, exceptions)))) - exc.exceptions = exceptions + exc = errors.MultiOSError( + "could not connect to [{}]:{}".format(host, port), + exceptions) raise exc elif sock is None: raise ValueError("sock must not be None if host and/or port are None")
Use MultiOSError in ssl_transport
py
diff --git a/ixexplorer/ixe_app.py b/ixexplorer/ixe_app.py index <HASH>..<HASH> 100644 --- a/ixexplorer/ixe_app.py +++ b/ixexplorer/ixe_app.py @@ -168,7 +168,7 @@ class IxeSession(IxeObject): self.api.call_rc('ixClearTimeStamp {}'.format(port_list_for_packet_groups)) self.api.call_rc('ixStartPacketGroups {}'.format(port_list_for_packet_groups)) self.api.call_rc('ixStartTransmit {}'.format(port_list)) - time.sleep(2) + time.sleep(0.2) if blocking: self.wait_transmit(*ports) @@ -192,7 +192,7 @@ class IxeSession(IxeObject): port_list = self.set_ports_list(*ports) self.api.call_rc('ixStopTransmit {}'.format(port_list)) - time.sleep(2) + time.sleep(0.2) def wait_transmit(self, *ports): """ Wait for traffic end on ports.
Decreased sleep time in start/stop tx
py
diff --git a/neat/config.py b/neat/config.py index <HASH>..<HASH> 100644 --- a/neat/config.py +++ b/neat/config.py @@ -3,6 +3,7 @@ from __future__ import print_function import os import sys +import warnings try: from configparser import ConfigParser @@ -39,7 +40,7 @@ class ConfigParameter(object): if self.default is None: raise RuntimeError('Missing configuration item: ' + self.name) else: - print("Using default '{!r}' for '{!s}'".format(self.default, self.name), file=sys.stderr) + warnings.warn("Using default {!r} for '{!s}'".format(self.default, self.name), DeprecationWarning) value = self.default try:
Warning re default changed to DeprecationWarning - reduce noise
py
diff --git a/web_pdb/__init__.py b/web_pdb/__init__.py index <HASH>..<HASH> 100644 --- a/web_pdb/__init__.py +++ b/web_pdb/__init__.py @@ -52,6 +52,7 @@ class WebPdb(Pdb): with extra convenience features. """ active_instance = None + null = object() def __init__(self, host='', port=5555, patch_stdstreams=False): """ @@ -104,8 +105,13 @@ class WebPdb(Pdb): i(nspect) object Inspect an object """ - obj = self.curframe.f_locals.get(arg) or self.curframe.f_globals.get(arg) - if obj is not None: + if arg in self.curframe.f_locals: + obj = self.curframe.f_locals[arg] + elif arg in self.curframe.f_globals: + obj = self.curframe.f_globals[arg] + else: + obj = WebPdb.null + if obj is not WebPdb.null: self.console.writeline( '{0} = {1}:\n'.format(arg, type(obj)) )
Fix a bug in inspect command The command returned an error message for None objects.
py
diff --git a/pyout/interface.py b/pyout/interface.py index <HASH>..<HASH> 100644 --- a/pyout/interface.py +++ b/pyout/interface.py @@ -8,9 +8,9 @@ from contextlib import contextmanager from functools import partial import inspect from logging import getLogger -import multiprocessing from multiprocessing.dummy import Pool import sys +import threading from pyout.common import ContentWithSummary from pyout.common import RowNormalizer @@ -311,7 +311,7 @@ class Writer(object): self._pool = Pool(processes=self._max_workers) if self._lock is None: lgr.debug("Initializing lock") - self._lock = multiprocessing.Lock() + self._lock = threading.Lock() for cols, fn in callables: cb_func = partial(callback, self, cols)
interface: Lock with threading.Lock() We'll be switching over to concurrent.futures, so we might as well use what is used in Lib/concurrent/futures/_base.py (though I think using multiprocessing.Lock() would work fine too).
py
diff --git a/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_run_storage.py b/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_run_storage.py index <HASH>..<HASH> 100644 --- a/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_run_storage.py +++ b/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_run_storage.py @@ -3,7 +3,7 @@ class PipelineRunObservableSubscribe(object): self.instance = instance self.run_id = run_id self.observer = None - self.after_cursor = after_cursor or -1 + self.after_cursor = after_cursor if after_cursor is not None else -1 def __call__(self, observer): self.observer = observer
[events] cursor fix Summary: This was always off but we didnt actually use the cursor until the instance stuff. Test Plan: execute a pipeline - no more double first event Reviewers: #ft, bengotow, prha Reviewed By: #ft, prha Differential Revision: <URL>
py
diff --git a/tests/test_uniform_block.py b/tests/test_uniform_block.py index <HASH>..<HASH> 100644 --- a/tests/test_uniform_block.py +++ b/tests/test_uniform_block.py @@ -77,7 +77,7 @@ class TestCase(unittest.TestCase): self.assertAlmostEqual(b, 3004.0) def test_2(self): - min_offset: int = self.ctx.info['GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT'] + min_offset = self.ctx.info['GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT'] u3_len = (min_offset // 4) + 1 u3_data = list(range(u3_len))
Remove unecessary type hint in uniform_block tests It appears can actually cause problems depending on python version
py
diff --git a/ontquery/terms.py b/ontquery/terms.py index <HASH>..<HASH> 100644 --- a/ontquery/terms.py +++ b/ontquery/terms.py @@ -424,7 +424,9 @@ class OntTerm(OntId): for keyword, value in result.items(): # TODO open vs closed world orig_value = self.orig_kwargs.get(keyword, None) - if orig_value is not None and value is not None and orig_value != value: + empty_iterable = hasattr(orig_value, '__iter__') and not orig_value + if ((orig_value is not None and not empty_iterable) + and value is not None and orig_value != value): if str(orig_value) == value: pass # rdflib.URIRef(a) != Literl(a) != a so we have to convert elif (keyword == 'label' and
OntTerm ignore None and empty iterable when checking for differences
py
diff --git a/chalice/analyzer.py b/chalice/analyzer.py index <HASH>..<HASH> 100644 --- a/chalice/analyzer.py +++ b/chalice/analyzer.py @@ -1,8 +1,11 @@ """Source code analyzer for chalice app.""" import ast +from typing import Dict, Set # noqa + def get_client_calls(source_code): + # type: (str) -> Dict[str, Set[str]] """Return all clients calls made in the application. :returns: A dict of service_name -> set([client calls]). @@ -19,7 +22,7 @@ class AWSOperationTracker(ast.NodeVisitor): def __init__(self): # Mapping of AWS clients created to method # calls used. client_name -> [methods_called] - self.clients = {} + self.clients = {} # type: Dict[str, Set[str]] # These are the names bound in the module # scope for clients that are created. self._client_identifiers = {}
Add type hints to analyzer I didn't bother adding type hints to AST visitor, as that code is likely to change soon.
py
diff --git a/ella/newman/markup/models.py b/ella/newman/markup/models.py index <HASH>..<HASH> 100644 --- a/ella/newman/markup/models.py +++ b/ella/newman/markup/models.py @@ -4,11 +4,11 @@ from django.utils.translation import ugettext_lazy as _ from ella.core.cache.utils import CachedForeignKey, CachedGenericForeignKey -class Processor(models.Model): +class TextProcessor(models.Model): function = models.CharField(max_length=96, unique=True) name = models.CharField(max_length=96, blank=True) - opts = models.CharField(max_length=255, blank=True) + processor_options = models.CharField(max_length=255, blank=True) def __unicode__(self): return self.name @@ -27,10 +27,10 @@ class Processor(models.Model): verbose_name_plural = (_('Text processors')) -class SrcText(models.Model): +class SourceText(models.Model): """Model for source texts.""" - processor = CachedForeignKey(Processor) + processor = CachedForeignKey(TextProcessor) ct = CachedForeignKey(ContentType) obj_id = models.PositiveIntegerField()
markup models renamed. refs #<I>
py
diff --git a/fragments/__init__.py b/fragments/__init__.py index <HASH>..<HASH> 100644 --- a/fragments/__init__.py +++ b/fragments/__init__.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals import os import codecs -__version__ = (1,2,3) +__version__ = (1,2,4) class FragmentsError(Exception): pass
version bump to <I> this is so people can get the fix in 3a5adb<I>b<I>f<I>b<I>ed<I>d<I>bb1e9
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ setup( classifiers=[ "Development Status :: 4 - Beta", "Topic :: Software Development", - "License :: Apache Software License", + "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python", ],
fix annoying license problem with pypi
py
diff --git a/python/setup.py b/python/setup.py index <HASH>..<HASH> 100644 --- a/python/setup.py +++ b/python/setup.py @@ -196,8 +196,11 @@ if setup_spec.type == SetupType.RAY: "opentelemetry-api==1.1.0", "opentelemetry-sdk==1.1.0", "opentelemetry-exporter-otlp==1.1.0" ], - "cpp": ["ray-cpp==" + setup_spec.version] } + + if os.getenv("RAY_EXTRA_CPP") == "1": + setup_spec.extras["cpp"] = ["ray-cpp==" + setup_spec.version] + if sys.version_info >= (3, 7, 0): setup_spec.extras["k8s"].append("kopf")
Don't add ray-cpp wheel to extras by default (#<I>)
py
diff --git a/command/build_ext.py b/command/build_ext.py index <HASH>..<HASH> 100644 --- a/command/build_ext.py +++ b/command/build_ext.py @@ -117,6 +117,9 @@ class build_ext (Command): if exec_py_include != py_include: self.include_dirs.insert (0, exec_py_include) + if type (self.libs) is StringType: + self.libs = [self.libs] + # XXX how the heck are 'self.define' and 'self.undef' supposed to # be set?
In 'finalize_options()': if 'self.libs' is a string, make it a singleton list.
py
diff --git a/test/test_bsplines.py b/test/test_bsplines.py index <HASH>..<HASH> 100644 --- a/test/test_bsplines.py +++ b/test/test_bsplines.py @@ -21,7 +21,7 @@ def test(): ll = geom.add_line_loop([s1, s2]) geom.add_plane_surface(ll) - ref = 0.9156598733673261 + ref = 0.9156598733673261 if pygmsh.get_gmsh_major_version() < 4 else 0.75 points, cells, _, _, _ = pygmsh.generate_mesh(geom) assert abs(compute_volume(points, cells) - ref) < 1.0e-2 * ref return points, cells
Gmsh 4 does BSpline differently, so test add_bspline accordingly, fixes #<I>
py
diff --git a/spyderlib/utils/module_completion.py b/spyderlib/utils/module_completion.py index <HASH>..<HASH> 100644 --- a/spyderlib/utils/module_completion.py +++ b/spyderlib/utils/module_completion.py @@ -143,8 +143,10 @@ def get_submodules(mod): for sm in submods: sm_name = sm[1] submodules.append(sm_name) - except: + except ImportError: return [] + except: + return [mod] return submodules
utils/module_completion: Make get_submodules return the module under inspection if pkgutil fails - For some modules (e.g. os, os.path) pkgutil can't obtain its submodules but nevertheless they are importable, so they have to be added to the submodules database.
py
diff --git a/xcessiv/views.py b/xcessiv/views.py index <HASH>..<HASH> 100644 --- a/xcessiv/views.py +++ b/xcessiv/views.py @@ -97,6 +97,6 @@ def verify_extraction_test_dataset(path): if xcnb['extraction']['test_dataset']['method'] is None: raise exceptions.UserError('Xcessiv is not configured to use a test dataset') - X_train, y_train = parsers.return_test_data_from_json(xcnb['extraction']) + X_test, y_test = parsers.return_test_data_from_json(xcnb['extraction']) - X_shape, y_shape = X_train \ No newline at end of file + return jsonify(functions.verify_dataset(X_test, y_test))
add view to verify extraction of test dataset
py
diff --git a/ubireader/ubi/display.py b/ubireader/ubi/display.py index <HASH>..<HASH> 100755 --- a/ubireader/ubi/display.py +++ b/ubireader/ubi/display.py @@ -75,13 +75,13 @@ def block(block, tab='\t'): buf += '\n' buf += '\t%sErase Count Header\n' % (tab) buf += '\t%s---------------------\n' % (tab) - ec_hdr(block.ec_hdr, '\t\t%s' % tab) + buf += ec_hdr(block.ec_hdr, '\t\t%s' % tab) if block.vid_hdr and not block.vid_hdr.errors: buf += '\n' - buf += '\t%sVID Header Header\n' % (tab) + buf += '\t%sVID Header\n' % (tab) buf += '\t%s---------------------\n' % (tab) - vid_hdr(block.vid_hdr, '\t\t%s' % tab) + buf += vid_hdr(block.vid_hdr, '\t\t%s' % tab) if block.vtbl_recs: buf += '\n'
Fixed certain ubi block header info not displaying.
py
diff --git a/skillful/__init__.py b/skillful/__init__.py index <HASH>..<HASH> 100644 --- a/skillful/__init__.py +++ b/skillful/__init__.py @@ -2,4 +2,4 @@ from .controller import Skill -__version__ = '0.3.2' +__version__ = '0.3.3'
Bumped to <I>.
py
diff --git a/fluo/middleware/locale.py b/fluo/middleware/locale.py index <HASH>..<HASH> 100644 --- a/fluo/middleware/locale.py +++ b/fluo/middleware/locale.py @@ -27,6 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import re +from django.core.exceptions import ImproperlyConfigured from django.utils.cache import patch_vary_headers from django.utils import translation from django.conf import settings
import ImproperlyConfigured exception in locale middleware
py
diff --git a/bambou/nurest_object.py b/bambou/nurest_object.py index <HASH>..<HASH> 100644 --- a/bambou/nurest_object.py +++ b/bambou/nurest_object.py @@ -678,6 +678,7 @@ class NURESTObject(object): Args: nurest_object: the NURESTObject object to manage method: the HTTP method to use (GET, POST, PUT, DELETE) + async: True or False to make an asynchronous request callback: the callback to call at the end handler: a custom handler to call when complete, before calling the callback @@ -685,6 +686,10 @@ class NURESTObject(object): Returns the object and connection (object, connection) """ + # Force asynchronous request when having a callback + if callback: + async = True + url = None if method == HTTP_METHOD_POST:
Forced async to true when having a callback method
py
diff --git a/curdling/web/__init__.py b/curdling/web/__init__.py index <HASH>..<HASH> 100644 --- a/curdling/web/__init__.py +++ b/curdling/web/__init__.py @@ -1,4 +1,4 @@ -future __from__ import unicode_literals, print_function, absolute_import +from __future__ import unicode_literals, print_function, absolute_import from flask import Flask, render_template, send_file, request, Response from flask import Blueprint, current_app, url_for from gevent.pywsgi import WSGIServer
Fix a mistake caused by lack of sleep
py
diff --git a/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py b/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py index <HASH>..<HASH> 100644 --- a/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py +++ b/datadog_checks_base/datadog_checks/base/checks/win/winpdh.py @@ -130,7 +130,7 @@ class WinPDHCounter(object): # for more detail # create a table of the keys to the counter index, because we want to look up - # by counter name. Some systems may have an odd number of entries, don't + # by counter name. Some systems may have an odd number of entries, don't # accidentaly index at val[len(val] for idx in range(0, len(val) - 1, 2): WinPDHCounter.pdh_counter_dict[val[idx + 1]].append(val[idx])
Fix failing style checks (#<I>)
py
diff --git a/python/ccxt/base/exchange.py b/python/ccxt/base/exchange.py index <HASH>..<HASH> 100644 --- a/python/ccxt/base/exchange.py +++ b/python/ccxt/base/exchange.py @@ -1058,7 +1058,9 @@ class Exchange(object): def parse_timeframe(self, timeframe): amount = int(timeframe[0:-1]) unit = timeframe[-1] - if 'M' in unit: + if 'y' in unit: + scale = 60 * 60 * 24 * 365 + elif 'M' in unit: scale = 60 * 60 * 24 * 30 elif 'w' in unit: scale = 60 * 60 * 24 * 7
added support for "y" to parse_timeframe in py #<I>
py
diff --git a/pyensembl/gene.py b/pyensembl/gene.py index <HASH>..<HASH> 100644 --- a/pyensembl/gene.py +++ b/pyensembl/gene.py @@ -55,7 +55,7 @@ class Gene(Locus): WHERE gene_id = ? AND feature = 'transcript' """ - cursor = db.execute(transcript_ids_query, [self.id]) + cursor = self.db.execute(transcript_ids_query, [self.id]) results = cursor.fetchall() # We're doing a SQL query for each transcript ID to fetch @@ -76,6 +76,6 @@ class Gene(Locus): for exon in transcript.exons: if exon.id not in exons_dict: exons_dict[exon.id] = exon - self._exons = list(exons.values()) + self._exons = list(exons_dict.values()) return self._exons
fix exons and transcripts properties on Gene
py
diff --git a/salt/modules/network.py b/salt/modules/network.py index <HASH>..<HASH> 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py @@ -635,10 +635,12 @@ def connect(host, port=None, proto=None, timeout=5): if not host: ret['result'] = False ret['comment'] = 'Required argument, host, is missing.' + return ret if not port: ret['result'] = False ret['comment'] = 'Required argument, port, is missing.' + return ret if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host): address = host @@ -650,6 +652,7 @@ def connect(host, port=None, proto=None, timeout=5): __proto = socket.SOL_UDP else: __proto = socket.SOL_TCP + proto = 'tcp' (family, socktype,
minor fix, if protocol is not specified then default to TCP but ensure proto variable is set to tcp for return data.
py
diff --git a/txkoji/__init__.py b/txkoji/__init__.py index <HASH>..<HASH> 100644 --- a/txkoji/__init__.py +++ b/txkoji/__init__.py @@ -172,11 +172,13 @@ class Connection(object): :param build_id: ``int``, for example 12345 :returns: deferred that when fired returns a Build (Munch, dict-like) - object representing this Koji build. + object representing this Koji build, or None if no build was + found. """ buildinfo = yield self.call('getBuild', build_id, **kwargs) build = Build.fromDict(buildinfo) - build.connection = self + if build: + build.connection = self defer.returnValue(build) @defer.inlineCallbacks @@ -210,12 +212,14 @@ class Connection(object): :param task_id: ``int``, for example 12345 :returns: deferred that when fired returns a Task (Munch, dict-like) - object representing this Koji task. + object representing this Koji task, or none if no task was + found. """ kwargs['request'] = True taskinfo = yield self.call('getTaskInfo', task_id, **kwargs) task = Task.fromDict(taskinfo) - task.connection = self + if task: + task.connection = self defer.returnValue(task) @defer.inlineCallbacks
handle missing builds/tasks Prior to this change, we would crash when setting ".connection" for NoneType. Clarify that these methods may return None if the task or build was not present on the hub.
py
diff --git a/tornado/ioloop.py b/tornado/ioloop.py index <HASH>..<HASH> 100644 --- a/tornado/ioloop.py +++ b/tornado/ioloop.py @@ -912,12 +912,12 @@ class PollIOLoop(IOLoop): # The check doesn't need to be guarded by the callback lock, # since the GIL makes all access to it atomic, and it can # only ever transition to True - if self._closing: - raise RuntimeError("IOLoop is closing") if thread.get_ident() != self._thread_ident: # If we're not on the IOLoop's thread, we need to synchronize # with other threads, or waking logic will induce a race. with self._callback_lock: + if self._closing: + raise RuntimeError("IOLoop is closing") list_empty = not self._callbacks self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) @@ -929,6 +929,8 @@ class PollIOLoop(IOLoop): # relatively expensive, so we try to avoid it when we can. self._waker.wake() else: + if self._closing: + raise RuntimeError("IOLoop is closing") # If we're on the IOLoop's thread, we don't need the lock, # since we don't need to wake anyone, just add the callback. # Blindly insert into self._callbacks.
Move closed check inside the callback_lock
py
diff --git a/gridmap/job.py b/gridmap/job.py index <HASH>..<HASH> 100644 --- a/gridmap/job.py +++ b/gridmap/job.py @@ -301,7 +301,12 @@ def _append_job_to_session(session, job, uniq_id, job_num, temp_dir='/scratch/', # Create temp directory if necessary if not os.path.exists(temp_dir): - os.makedirs(temp_dir) + try: + os.makedirs(temp_dir) + except OSError: + logging.warning(("Failed to create temporary directory " + + "{0}. Your jobs may not start " + + "correctly.").format(temp_dir)) jobid = session.runJob(jt)
Add warning if you can't create temporary directory instead of just dying.
py
diff --git a/plenum/cli/cli.py b/plenum/cli/cli.py index <HASH>..<HASH> 100644 --- a/plenum/cli/cli.py +++ b/plenum/cli/cli.py @@ -10,6 +10,10 @@ import shutil from jsonpickle import json + +# Do not remove this import +# import plenum.cli.ensure_logging_not_setup + from prompt_toolkit.utils import is_windows, is_conemu_ansi import pyorient from ledger.compact_merkle_tree import CompactMerkleTree @@ -105,6 +109,7 @@ class Cli: def __init__(self, looper, basedirpath, nodeReg, cliNodeReg, output=None, debug=False, logFileName=None): self.curClientPort = None + logging.root.handlers = [] logging.root.addHandler(CliHandler(self.out)) # self.cleanUp() self.looper = looper
[#<I>] commented unwanted code, and did one minor change to make it working
py
diff --git a/plexapi/base.py b/plexapi/base.py index <HASH>..<HASH> 100644 --- a/plexapi/base.py +++ b/plexapi/base.py @@ -429,6 +429,26 @@ class PlexPartialObject(PlexObject): """ return self._server.history(maxresults=maxresults, mindate=mindate, ratingKey=self.ratingKey) + def posters(self): + """ Returns list of available poster objects. :class:`~plexapi.media.Poster`. """ + + return self.fetchItems('%s/posters' % self.key) + + def uploadPoster(self, url=None, filepath=None): + """ Upload poster from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """ + if url: + key = '%s/posters?url=%s' % (self.key, quote_plus(url)) + self._server.query(key, method=self._server._session.post) + elif filepath: + key = '%s/posters?' % self.key + data = open(filepath, 'rb').read() + self._server.query(key, method=self._server._session.post, data=data) + + def setPoster(self, poster): + key = poster._initpath[:-1] + data = '%s?url=%s' % (key, quote_plus(poster.ratingKey)) + self._server.query(data, method=self._server._session.put) + # The photo tag cant be built atm. TODO # def arts(self): # part = '%s/arts' % self.key
moved poster related methods to base PlexPartialObject class
py
diff --git a/keanu-python/tests/test_coal_mining.py b/keanu-python/tests/test_coal_mining.py index <HASH>..<HASH> 100644 --- a/keanu-python/tests/test_coal_mining.py +++ b/keanu-python/tests/test_coal_mining.py @@ -15,7 +15,7 @@ def test_coalmining() -> None: model.disasters.observe(coal_mining.training_data()) net = BayesNet(model.switchpoint.get_connected_graph()) - samples = sample(net=net, sample_from=net.get_latent_vertices(), draws=700, drop=100, down_sample_interval=5) + samples = sample(net=net, sample_from=net.get_latent_vertices(), draws=1000, drop=100, down_sample_interval=5) vertex_samples: List[numpy_types] = samples["switchpoint"] vertex_samples_primitive: List[List[primitive_types]] = list(map(
Further increase in sample count for coal mining
py
diff --git a/hagelslag/processing/TrackProcessing.py b/hagelslag/processing/TrackProcessing.py index <HASH>..<HASH> 100644 --- a/hagelslag/processing/TrackProcessing.py +++ b/hagelslag/processing/TrackProcessing.py @@ -149,6 +149,9 @@ class TrackProcessor(object): dims = model_obj.timesteps[-1].shape if h > 0: model_obj.estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0]) + del scaled_data + del model_data + del hour_labels tracked_model_objects.extend(track_storms(model_objects, self.hours, self.object_matcher.cost_function_components, self.object_matcher.max_values, @@ -214,6 +217,9 @@ class TrackProcessor(object): if h > 0: dims = model_objects[-1][-1].timesteps[0].shape model_objects[-1][-1].estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0]) + del hour_labels + del scaled_data + del model_data for h, hour in enumerate(self.hours): past_time_objs = [] for obj in tracked_model_objects:
Added NCARSTORM model grid type and associated config files.
py
diff --git a/kconfiglib.py b/kconfiglib.py index <HASH>..<HASH> 100644 --- a/kconfiglib.py +++ b/kconfiglib.py @@ -5900,7 +5900,7 @@ def expr_str(expr, sc_expr_str_fn=standard_sc_expr_str): # # Relation operands are always symbols (quoted strings are constant # symbols) - return "{} {} {}".format(sc_expr_str_fn(expr[1]), _REL_TO_STR[expr[0]], + return "{} {} {}".format(sc_expr_str_fn(expr[1]), REL_TO_STR[expr[0]], sc_expr_str_fn(expr[2])) @@ -6779,7 +6779,7 @@ LESS_EQUAL = _T_LESS_EQUAL GREATER = _T_GREATER GREATER_EQUAL = _T_GREATER_EQUAL -_REL_TO_STR = { +REL_TO_STR = { EQUAL: "=", UNEQUAL: "!=", LESS: "<",
Make REL_TO_STR public Generally useful, and not likely to need any internal hackery.
py
diff --git a/tests/test_utilities.py b/tests/test_utilities.py index <HASH>..<HASH> 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -449,9 +449,9 @@ def test_cells_dict_utils(): cells_arr = np.array([3, 0, 1, 2, 3, 3, 4, 5]) cells_types = np.array([vtk.VTK_TRIANGLE] * 2) - assert np.all( - cells.generate_cell_offsets(cells_arr, cells_types) - == cells.generate_cell_offsets(cells_arr, cells_types) + assert np.array_equal( + cells.generate_cell_offsets(cells_arr, cells_types), + cells.generate_cell_offsets_loop(cells_arr, cells_types), ) # Non-integer type
Fix generate_cell_offsets test (#<I>) to make it check at least anything (shortcut method generate_cell_offsets returns the same offsets as the explicit generate_cell_offsets_loop method.
py
diff --git a/airflow/contrib/operators/mysql_to_gcs.py b/airflow/contrib/operators/mysql_to_gcs.py index <HASH>..<HASH> 100644 --- a/airflow/contrib/operators/mysql_to_gcs.py +++ b/airflow/contrib/operators/mysql_to_gcs.py @@ -210,6 +210,7 @@ class MySqlToGoogleCloudStorageOperator(BaseOperator): FIELD_TYPE.TINY: 'INTEGER', FIELD_TYPE.BIT: 'INTEGER', FIELD_TYPE.DATETIME: 'TIMESTAMP', + FIELD_TYPE.DATE: 'TIMESTAMP', FIELD_TYPE.DECIMAL: 'FLOAT', FIELD_TYPE.NEWDECIMAL: 'FLOAT', FIELD_TYPE.DOUBLE: 'FLOAT',
[AIRFLOW-<I>] Add mapping for date type to mysql_to_gcs operator Closes #<I> from mikeghen/bug/airflow-<I>
py
diff --git a/tcex/testing/test_case_service_common.py b/tcex/testing/test_case_service_common.py index <HASH>..<HASH> 100644 --- a/tcex/testing/test_case_service_common.py +++ b/tcex/testing/test_case_service_common.py @@ -180,7 +180,7 @@ class TestCaseServiceCommon(TestCasePlaybookCommon): body = json.dumps(body) body = self.redis_client.hset( - request_key, 'request.body', base64.b64encode(json.dumps(body).encode('utf-8')) + request_key, 'request.body', base64.b64encode(body.encode('utf-8')) ) event = { 'command': 'WebhookEvent',
+ fixed test method for serialization of body
py
diff --git a/src/toil/cwl/cwltoil.py b/src/toil/cwl/cwltoil.py index <HASH>..<HASH> 100755 --- a/src/toil/cwl/cwltoil.py +++ b/src/toil/cwl/cwltoil.py @@ -267,6 +267,8 @@ def writeFile(writeFunc, index, existing, x): def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): cwllogger.warn("upload %s", uf) + if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): + return if uf["location"] in fileindex: cwllogger.warn("ZZZZZZ found in index %s", fileindex[uf["location"]]) uf["location"] = fileindex[uf["location"]]
Don't try to upload filestore references or file literals.
py
diff --git a/src/onelogin/saml2/authn_request.py b/src/onelogin/saml2/authn_request.py index <HASH>..<HASH> 100644 --- a/src/onelogin/saml2/authn_request.py +++ b/src/onelogin/saml2/authn_request.py @@ -108,7 +108,7 @@ class OneLogin_Saml2_Authn_Request(object): attr_consuming_service_str = '' if 'attributeConsumingService' in sp_data and sp_data['attributeConsumingService']: - attr_consuming_service_str = "\n AttributeConsumingServiceIndex=\"1\"" + attr_consuming_service_str = "\n AttributeConsumingServiceIndex=\"%s\"" % sp_data['attributeConsumingService'].get('index', '1') request = OneLogin_Saml2_Templates.AUTHN_REQUEST % \ {
Add the ability to set AttributeConsumingServiceIndex in the authn request.
py
diff --git a/udiskie/config.py b/udiskie/config.py index <HASH>..<HASH> 100644 --- a/udiskie/config.py +++ b/udiskie/config.py @@ -204,7 +204,7 @@ class Config(object): :param ConfigParser data: config file accessor """ - self._data = data + self._data = data or {} @classmethod def default_pathes(cls):
Fix crash on startup if config file is empty Resolves #<I>
py
diff --git a/tweepy/streaming.py b/tweepy/streaming.py index <HASH>..<HASH> 100644 --- a/tweepy/streaming.py +++ b/tweepy/streaming.py @@ -281,7 +281,6 @@ class Stream(object): self._start(async) def sample(self, async=False): - self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/sample.json?delimited=length' % STREAM_VERSION
Remove delimited=length from sample stream This causes Twitter to reject the request, for some reason.
py
diff --git a/telemetry/telemetry/core/platform/profiler/trace_profiler.py b/telemetry/telemetry/core/platform/profiler/trace_profiler.py index <HASH>..<HASH> 100644 --- a/telemetry/telemetry/core/platform/profiler/trace_profiler.py +++ b/telemetry/telemetry/core/platform/profiler/trace_profiler.py @@ -14,7 +14,11 @@ class TraceProfiler(profiler.Profiler): super(TraceProfiler, self).__init__( browser_backend, platform_backend, output_path, state) assert self._browser_backend.supports_tracing - self._browser_backend.StartTracing(categories, timeout=10) + # We always want flow events when tracing via telemetry. + categories_with_flow = 'disabled-by-default-toplevel.flow' + if categories: + categories_with_flow = ',%s' % categories + self._browser_backend.StartTracing(categories_with_flow, timeout=10) @classmethod def name(cls):
[telemetry] Enable flow events by default in telemetry when tracing. BUG= Review URL: <URL>
py
diff --git a/api/python/quilt3/session.py b/api/python/quilt3/session.py index <HASH>..<HASH> 100644 --- a/api/python/quilt3/session.py +++ b/api/python/quilt3/session.py @@ -77,7 +77,7 @@ def _update_auth(refresh_token, timeout=None): def _handle_response(resp, **kwargs): if resp.status_code == requests.codes.unauthorized: raise QuiltException( - "Authentication failed. Run `quilt login` again." + "Authentication failed. Run `quilt3 login` again." ) elif not resp.ok: try:
Update <I> login message to point to the latest CLI (#<I>)
py
diff --git a/auto_ml/utils_models.py b/auto_ml/utils_models.py index <HASH>..<HASH> 100644 --- a/auto_ml/utils_models.py +++ b/auto_ml/utils_models.py @@ -1,3 +1,5 @@ +import dill + from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor, GradientBoostingRegressor, GradientBoostingClassifier, ExtraTreesClassifier, AdaBoostClassifier from sklearn.linear_model import RandomizedLasso, RandomizedLogisticRegression, RANSACRegressor, LinearRegression, Ridge, Lasso, ElasticNet, LassoLars, OrthogonalMatchingPursuit, BayesianRidge, ARDRegression, SGDRegressor, PassiveAggressiveRegressor, LogisticRegression, RidgeClassifier, SGDClassifier, Perceptron, PassiveAggressiveClassifier @@ -316,3 +318,18 @@ def get_search_params(model_name): return grid_search_params[model_name] +def load_keras_model(file_name): + from keras.models import load_model + + with open(file_name, 'rb') as read_file: + base_pipeline = dill.load(read_file) + + keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5' + + keras_model = load_model(keras_file_name) + + base_pipeline.named_steps['final_model'].model = keras_model + + return base_pipeline + +
adds load_keras_model function, a single one-word difference from our normal models to load in a trained model
py
diff --git a/dvc/__init__.py b/dvc/__init__.py index <HASH>..<HASH> 100644 --- a/dvc/__init__.py +++ b/dvc/__init__.py @@ -7,7 +7,7 @@ import os import warnings -VERSION_BASE = '0.18.2' +VERSION_BASE = '0.18.3' __version__ = VERSION_BASE PACKAGEPATH = os.path.abspath(os.path.dirname(__file__))
dvc: bump to <I>
py
diff --git a/everest/tests/test_visitors.py b/everest/tests/test_visitors.py index <HASH>..<HASH> 100644 --- a/everest/tests/test_visitors.py +++ b/everest/tests/test_visitors.py @@ -16,6 +16,7 @@ from everest.testing import Pep8CompliantTestCase from sqlalchemy.engine import create_engine import sqlalchemy as sa import sqlalchemy.orm as orm +from everest.orm import Session __docformat__ = 'reStructuredText en' __all__ = ['CompositeCqlFilterSpecificationVisitorTestCase', @@ -490,6 +491,10 @@ class SqlOrderSpecificationVisitorTestCase(OrderVisitorTestCase): self.assert_equal(str(expr), str(expected_expr)) finally: Person.name.asc = old_asc + # Make sure the correct ORDER BY clause is generated. + q = Session.query(Person).order_by(expr) # pylint: disable=E1101 + q_str = str(q.statement) + self.assert_not_equal(q_str.find("ORDER BY %s" % expr), -1) class CqlOrderSpecificationVisitorTestCase(OrderVisitorTestCase):
Added coverage for a SQL order clause generation corner case.
py
diff --git a/punch/vcs_repositories/git_flow_repo.py b/punch/vcs_repositories/git_flow_repo.py index <HASH>..<HASH> 100644 --- a/punch/vcs_repositories/git_flow_repo.py +++ b/punch/vcs_repositories/git_flow_repo.py @@ -59,7 +59,7 @@ class GitFlowRepo(gr.GitRepo): self._run(command_line) - self._run(self.commands + ["release", "finish", "-m", branch]) + self._run(self.commands + ["release", "finish", self.config_obj['new_version'], "-m", branch]) def post_finish_release(self): pass
Fixed finish_release() for git-flow adapter to support older GitFlow versions that require the release name
py
diff --git a/spyderlib/spyder.py b/spyderlib/spyder.py index <HASH>..<HASH> 100644 --- a/spyderlib/spyder.py +++ b/spyderlib/spyder.py @@ -1178,7 +1178,10 @@ class MainWindow(QMainWindow): if plugin is not None: plugin.dockwidget.raise_() self.extconsole.setMinimumHeight(250) - for toolbar in (self.source_toolbar, self.edit_toolbar): + hidden_toolbars = [self.source_toolbar, self.edit_toolbar] + if sys.platform.startswith('linux'): + hidden_toolbars.append(self.search_toolbar) + for toolbar in hidden_toolbars: toolbar.close() for plugin in (self.projectexplorer, self.outlineexplorer): plugin.dockwidget.close()
Hide "Search toolbar" in Linux because it robs space to the working dir widget
py
diff --git a/openid/sreg.py b/openid/sreg.py index <HASH>..<HASH> 100644 --- a/openid/sreg.py +++ b/openid/sreg.py @@ -37,6 +37,12 @@ OpenID providers. from openid.extension import Extension +try: + basestring +except NameError: + # For Python 2.2 + basestring = (str, unicode) + __all__ = [ 'SRegRequest', 'SRegResponse',
[project @ Added basestring Python <I> compatibility]
py
diff --git a/holoviews/plotting/mpl/plot.py b/holoviews/plotting/mpl/plot.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/mpl/plot.py +++ b/holoviews/plotting/mpl/plot.py @@ -487,7 +487,6 @@ class GridPlot(CompositePlot): layout_axis.spines['bottom' if self.xaxis == 'top' else 'top'].set_visible(False) axis = layout_axis - disabled_spines = [] if self.xaxis is not None: axis.xaxis.set_ticks_position(self.xaxis) axis.xaxis.set_label_position(self.xaxis)
Removed unused variable in GridPlot
py
diff --git a/docker/client.py b/docker/client.py index <HASH>..<HASH> 100644 --- a/docker/client.py +++ b/docker/client.py @@ -307,7 +307,9 @@ class Client(clientbase.ClientBase): def exec_inspect(self, exec_id): if utils.compare_version('1.16', self._version) < 0: - raise errors.InvalidVersion('Exec is not supported in API < 1.16') + raise errors.InvalidVersion( + 'exec_inspect is not supported in API < 1.16' + ) if isinstance(exec_id, dict): exec_id = exec_id.get('Id') res = self._get(self._url("/exec/{0}/json".format(exec_id)))
Update error message to state that exec_inspect is not supported
py
diff --git a/bcbio/variation/validate.py b/bcbio/variation/validate.py index <HASH>..<HASH> 100644 --- a/bcbio/variation/validate.py +++ b/bcbio/variation/validate.py @@ -116,10 +116,12 @@ def _create_validate_config_file(vrn_file, rm_file, rm_interval_file, rm_genome, base_dir, data): config_dir = utils.safe_makedir(os.path.join(base_dir, "config")) config_file = os.path.join(config_dir, "validate.yaml") - with open(config_file, "w") as out_handle: - out = _create_validate_config(vrn_file, rm_file, rm_interval_file, rm_genome, - base_dir, data) - yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) + if not utils.file_uptodate(config_file, vrn_file): + with file_transaction(data, config_file) as tx_config_file: + with open(tx_config_file, "w") as out_handle: + out = _create_validate_config(vrn_file, rm_file, rm_interval_file, rm_genome, + base_dir, data) + yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return config_file def _create_validate_config(vrn_file, rm_file, rm_interval_file, rm_genome,
Improved re-run speeds with validation by skipping creating configuration file if up to date
py
diff --git a/flask_images/core.py b/flask_images/core.py index <HASH>..<HASH> 100755 --- a/flask_images/core.py +++ b/flask_images/core.py @@ -180,7 +180,7 @@ class Images(object): public_kwargs = ( (LONG_TO_SHORT.get(k, k), v) for k, v in kwargs.iteritems() - if not k.startswith('_') + if v is not None and not k.startswith('_') ) query = urlencode(sorted(public_kwargs), True) signer = Signer(current_app.secret_key) @@ -204,6 +204,7 @@ class Images(object): return url def find_img(self, local_path): + local_path = os.path.normpath(local_path.lstrip('/')) for path_base in current_app.config['IMAGES_PATH']: path = os.path.join(current_app.root_path, path_base, local_path) if os.path.exists(path):
Don't stick `None` values into the URL
py
diff --git a/hardware/opentrons_hardware/hardware_control/constants.py b/hardware/opentrons_hardware/hardware_control/constants.py index <HASH>..<HASH> 100644 --- a/hardware/opentrons_hardware/hardware_control/constants.py +++ b/hardware/opentrons_hardware/hardware_control/constants.py @@ -2,5 +2,5 @@ from typing_extensions import Final -interrupts_per_sec: Final = 170000 +interrupts_per_sec: Final = 100000 """The number of motor interrupts per second."""
refactor(CAN): <I>khz is the frequency of the interrupt. And the frequency of the interrupt is <I>khz. (#<I>)
py
diff --git a/satpy/readers/hrit_goes.py b/satpy/readers/hrit_goes.py index <HASH>..<HASH> 100644 --- a/satpy/readers/hrit_goes.py +++ b/satpy/readers/hrit_goes.py @@ -34,6 +34,7 @@ from datetime import datetime, timedelta import numpy as np import xarray as xr +import dask.array as da from pyresample import geometry @@ -425,6 +426,7 @@ class HRITGOESFileHandler(HRITFileHandler): """Calibrate *data*.""" idx = self.mda['calibration_parameters']['indices'] val = self.mda['calibration_parameters']['values'] + data.data = da.where(data.data == 0, np.nan, data.data) ddata = data.data.map_blocks(lambda block: np.interp(block, idx, val), dtype=val.dtype) res = xr.DataArray(ddata, dims=data.dims, attrs=data.attrs,
Add masking to loading calibrated GOES HRIT data
py
diff --git a/plex_metadata/core/defaults.py b/plex_metadata/core/defaults.py index <HASH>..<HASH> 100644 --- a/plex_metadata/core/defaults.py +++ b/plex_metadata/core/defaults.py @@ -1,4 +1,4 @@ -DEFAULT_TYPES = ['movie', 'show', 'episode'] +DEFAULT_TYPES = ['movie', 'show', 'season', 'episode'] DEFAULT_GUID_MAP = { # Multi
Added 'season' to DEFAULT_TYPES
py
diff --git a/simuvex/engines/vex/irop.py b/simuvex/engines/vex/irop.py index <HASH>..<HASH> 100644 --- a/simuvex/engines/vex/irop.py +++ b/simuvex/engines/vex/irop.py @@ -76,6 +76,10 @@ classified = set() unclassified = set() unsupported = set() explicit_attrs = { + 'Iop_64x4toV256': { + '_generic_name': '64x4', + '_to_size': 256, + }, 'Iop_Yl2xF64': { '_generic_name': 'Yl2x', '_to_size': 64, @@ -766,6 +770,9 @@ class SimIROp(object): rounded_bv = claripy.fpToSBV(rm, args[1].raw_to_fp(), args[1].length) return claripy.fpToFP(claripy.fp.RM_RNE, rounded_bv, claripy.fp.FSort.from_size(args[1].length)) + def _op_Iop_64x4toV256(self, args) : + return self._op_concat(args) + #def _op_Iop_Yl2xF64(self, args): # rm = self._translate_rm(args[0]) # arg2_bv = args[2].to_bv()
explicitly added Iop_<I>x4toV<I> unop
py
diff --git a/SoftLayer/CLI/autoscale/create.py b/SoftLayer/CLI/autoscale/create.py index <HASH>..<HASH> 100644 --- a/SoftLayer/CLI/autoscale/create.py +++ b/SoftLayer/CLI/autoscale/create.py @@ -2,7 +2,6 @@ # :license: MIT, see LICENSE for more details. import click -from SoftLayer import utils import SoftLayer from SoftLayer.CLI import environment @@ -67,11 +66,11 @@ def cli(env, **args): ] policy_template = { 'name': args['policy_name'], - 'policies': scale_actions + 'scaleActions': scale_actions } policies = [] - + policies.append(policy_template) block = [] number_disk = 0 for guest_disk in args['disk']: @@ -113,7 +112,7 @@ def cli(env, **args): 'balancedTerminationFlag': False, 'virtualGuestMemberTemplate': virt_template, 'virtualGuestMemberCount': 0, - 'policies': policies.append(utils.clean_dict(policy_template)), + 'policies': policies, 'terminationPolicyId': args['termination_policy'] }
Policy is not added when an AutoScale Group is created
py
diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index <HASH>..<HASH> 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -1232,6 +1232,9 @@ class TestIssue2121: result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"]) +@pytest.mark.skipif( + sys.maxsize <= (2 ** 31 - 1), reason="Causes OverflowError on 32bit systems" +) @pytest.mark.parametrize("offset", [-1, +1]) def test_source_mtime_long_long(testdir, offset): """Support modification dates after 2038 in rewritten files (#4903).
Skip test_source_mtime_long_long on <I>bit and lower platforms
py
diff --git a/configure.py b/configure.py index <HASH>..<HASH> 100755 --- a/configure.py +++ b/configure.py @@ -246,6 +246,8 @@ n.comment('Main executable is library plus main() function.') objs = cxx('ninja') ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) +if ninja != 'ninja': + n.build('ninja', 'phony', ninja) n.newline() all_targets += ninja @@ -297,11 +299,11 @@ if platform != 'mingw' and platform != 'windows': ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib, variables=[('ldflags', test_ldflags), ('libs', test_libs)]) +if ninja_test != 'ninja_test': + n.build('ninja_test', 'phony', ninja_test) n.newline() all_targets += ninja_test -if platform == 'windows': - n.build('ninja_test', 'phony', binary('ninja_test')) n.comment('Perftest executable.') objs = cxx('parser_perftest')
Provide 'ninja' as alias for 'ninja.exe' too. Tidy up.
py
diff --git a/dark/features.py b/dark/features.py index <HASH>..<HASH> 100644 --- a/dark/features.py +++ b/dark/features.py @@ -37,7 +37,7 @@ def getFeatures(fig, record, minX, maxX): totalSubfeatures = 0 if record: for feature in record.features: - if feature.type in ('CDS', 'rRNA'): + if feature.type in ('CDS', 'mat_peptide', 'rRNA'): toPlot.append(feature) totalSubfeatures += len(feature.sub_features) @@ -106,11 +106,16 @@ def addFeatures(fig, record, minX, maxX, offsetAdjuster): 'end': end, 'start': start, }) - frame = start % 3 - fig.plot([start, end], [frame, frame], color=colors[index], - linewidth=2) gene = feature.qualifiers.get('gene', ['<no gene>'])[0] product = feature.qualifiers.get('product', ['<no product>'])[0] + frame = start % 3 + # If we have a polyprotein, shift it up slightly so we can see + # its components below it. + if product.lower().find('polyprotein') > -1: + y = frame + 0.2 + else: + y = frame + fig.plot([start, end], [y, y], color=colors[index], linewidth=2) labels.append('%d-%d: %s (%s)' % (start, end, gene, product)) for subfeature in feature.sub_features: index += 1
Adjust feature display to show mat_peptides and move polypeptides up a little.
py
diff --git a/demag_gui.py b/demag_gui.py index <HASH>..<HASH> 100755 --- a/demag_gui.py +++ b/demag_gui.py @@ -77,7 +77,6 @@ import copy from copy import deepcopy - matplotlib.rc('xtick', labelsize=10) matplotlib.rc('ytick', labelsize=10) matplotlib.rc('axes', labelsize=8) @@ -118,6 +117,7 @@ class Zeq_GUI(wx.Frame): # wx.Frame.__init__(self, None, wx.ID_ANY, self.title) merge confilct testing wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='demag gui') + self.parent = parent self.redo_specimens={} self.currentDirectory = os.getcwd() # get the current working directory @@ -4155,7 +4155,7 @@ class Zeq_GUI(wx.Frame): self.interpertation_editor_open = True self.interpertation_editor.Center() self.interpertation_editor.Show(True) - if sys.platform.startswith('darwin'): + if self.parent==None and sys.platform.startswith('darwin'): TEXT="This is a refresher window for mac os to insure that wx opens the new window" dlg = wx.MessageDialog(self, caption="Open",message=TEXT,style=wx.OK | wx.ICON_INFORMATION | wx.STAY_ON_TOP ) dlg.ShowModal()
demag_gui.py added new instance variable parent to record if the gui was launched stand alone or not and edited a mac compatibility patch
py
diff --git a/openquake/calculators/hazard/classical/core.py b/openquake/calculators/hazard/classical/core.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/hazard/classical/core.py +++ b/openquake/calculators/hazard/classical/core.py @@ -192,7 +192,6 @@ def compute_hazard_curves(job_id, src_ids, lt_rlz_id): logs.LOG.debug('< transaction complete') -@staticmethod def classical_task_arg_gen(hc, job, block_size, progress): """ Loop through realizations and sources to generate a sequence of @@ -273,6 +272,7 @@ class ClassicalHazardCalculator(haz_general.BaseHazardCalculatorNext): hc_prog.imt = imt hc_prog.result_matrix = numpy.zeros((num_points, len(imls))) hc_prog.save() + task_arg_gen = staticmethod(classical_task_arg_gen) def pre_execute(self): """
calcs/hazard/classical/core: Cleaned up the declaration of the classical task arg gen.
py
diff --git a/astroplan/core.py b/astroplan/core.py index <HASH>..<HASH> 100644 --- a/astroplan/core.py +++ b/astroplan/core.py @@ -123,6 +123,8 @@ def get_site_names(): ''' Get list of names of observatories for use with `~astroplan.core.get_site` ''' + if _site_db is None: + _load_sites() return sorted(_site_names) class Observer(object):
Adding forgotten load_site() call in get_site_names
py
diff --git a/eqcorrscan/utils/correlate.py b/eqcorrscan/utils/correlate.py index <HASH>..<HASH> 100644 --- a/eqcorrscan/utils/correlate.py +++ b/eqcorrscan/utils/correlate.py @@ -400,9 +400,10 @@ def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids): n_templates = template_array[seed_ids[0]].shape[0] image_len = stream_array[seed_ids[0]].shape[0] fft_len = next_fast_len(template_len + image_len - 1) - template_array = np.ascontiguousarray(list(template_array.values()), + template_array = np.ascontiguousarray([template_array[x] + for x in seed_ids], dtype=np.float32) - stream_array = np.ascontiguousarray(list(stream_array.values()), + stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids], dtype=np.float32) cccs = np.empty((n_channels, n_templates, image_len - template_len + 1), np.float32)
Arrays created in same order as seed_ids
py
diff --git a/benchbuild/environments/service_layer/ensure.py b/benchbuild/environments/service_layer/ensure.py index <HASH>..<HASH> 100644 --- a/benchbuild/environments/service_layer/ensure.py +++ b/benchbuild/environments/service_layer/ensure.py @@ -9,7 +9,7 @@ else: class ImageNotFound(Exception): - ... + pass class NamedCommand(Protocol):
NFC: change ellipsis to pass This is just a test to get github to sync PR and branch again.
py
diff --git a/nanomath/nanomath.py b/nanomath/nanomath.py index <HASH>..<HASH> 100644 --- a/nanomath/nanomath.py +++ b/nanomath/nanomath.py @@ -75,8 +75,6 @@ def ave_qual(quals): First convert Phred scores to probabilities, calculate average error probability convert average back to Phred scale - - Return None for ZeroDivisionError """ if quals: return -10 * log(sum([10**(q / -10) for q in quals]) / len(quals), 10)
adapted an incorrect docstring [skip ci]
py
diff --git a/test/test_client.py b/test/test_client.py index <HASH>..<HASH> 100755 --- a/test/test_client.py +++ b/test/test_client.py @@ -45,6 +45,15 @@ class ClientTestIndexing(unittest.TestCase): # softCommit because we don't care about data on disk self.solr.commit(test_config['SOLR_COLLECTION'], openSearcher=True, softCommit=True) + def test_down_solr_exception(self): + # connect to "down" sorl host + s = SolrClient('http://localhost:8999/solr', devel=True) + try: + s.query('test', {}) # making dumb query + raise Exception("If solr is down it should error. Either solr isn't down or code is broken.") + except ConnectionError: + pass + def test_delete_doc_by_id_with_space(self): self.delete_docs() self.solr.index_json(test_config['SOLR_COLLECTION'], json.dumps(
added test to check exception is correctly raised if solr is down
py
diff --git a/geocoder/tamu.py b/geocoder/tamu.py index <HASH>..<HASH> 100644 --- a/geocoder/tamu.py +++ b/geocoder/tamu.py @@ -125,7 +125,12 @@ class Tamu(Base): @property def address(self): return ' '.join([ - self.housenumber, self.street, self.city, self.state, self.postal]) + self.parse.get('Number'), + self.parse.get('Name'), + self.parse.get('Suffix'), + self.parse.get('City'), + self.parse.get('State'), + self.parse.get('Zip')]) @property def city(self):
build address property from tree, not self
py
diff --git a/jsonrpcserver/dispatcher.py b/jsonrpcserver/dispatcher.py index <HASH>..<HASH> 100644 --- a/jsonrpcserver/dispatcher.py +++ b/jsonrpcserver/dispatcher.py @@ -123,10 +123,6 @@ def dispatch(methods, request): response = Request(request).process(methods) except JsonRpcServerError as e: response = ExceptionResponse(e, None) - except Exception as e: # pylint: disable=broad-except - # Log the uncaught exception - logger.exception(e) - response = ExceptionResponse(e, None) http_status = 200 if isinstance(request, list) else response.http_status response_log.info(str(response), extra={ 'http_code': http_status,
Don't return library exceptions to client
py
diff --git a/cwltool/utils.py b/cwltool/utils.py index <HASH>..<HASH> 100644 --- a/cwltool/utils.py +++ b/cwltool/utils.py @@ -57,6 +57,10 @@ def copytree_with_merge(src, dst, symlinks=False, ignore=None): def docker_windows_path_adjust(path): # type: (Text) -> (Text) if path is not None and os.name == 'nt': + sp=path.split(':') + if len(sp)==2: + sp[0]=sp[0].capitalize() # Capitalizing windows Drive letters + path=':'.join(sp) path = path.replace(':', '').replace('\\', '/') return path if path[0] == '/' else '/' + path return path
capitalizing the drive letter when passed to docker path adjust function
py
diff --git a/tests/core/tests/resources_tests.py b/tests/core/tests/resources_tests.py index <HASH>..<HASH> 100644 --- a/tests/core/tests/resources_tests.py +++ b/tests/core/tests/resources_tests.py @@ -806,7 +806,7 @@ class PostgresTests(TransactionTestCase): # We can't check the error message because it's package- and version-dependent -if VERSION >= (1, 8) and 'postgresql' in settings.DATABASES['default']['ENGINE']: +if 'postgresql' in settings.DATABASES['default']['ENGINE']: from django.contrib.postgres.fields import ArrayField from django.db import models
Remove now-redundant check for ><I> in resource tests.
py
diff --git a/patroni/postgresql.py b/patroni/postgresql.py index <HASH>..<HASH> 100644 --- a/patroni/postgresql.py +++ b/patroni/postgresql.py @@ -1560,11 +1560,13 @@ $$""".format(name, ' '.join(options)), name, password, password) if cursor.rowcount != 1: # Either slot doesn't exists or it is still active self._schedule_load_slots = True # schedule load_replication_slots on the next iteration + immediately_reserve = ', true' if self._major_version >= 90600 else '' + # create new slots for slot in slots - set(self._replication_slots): - self._query("""SELECT pg_create_physical_replication_slot(%s) + self._query("""SELECT pg_create_physical_replication_slot(%s{0}) WHERE NOT EXISTS (SELECT 1 FROM pg_replication_slots - WHERE slot_name = %s)""", slot, slot) + WHERE slot_name = %s)""".format(immediately_reserve), slot, slot) self._replication_slots = slots except Exception:
Immediately reserve LSN on upon creation of replication slot (#<I>) This feature is available starting from <I>
py
diff --git a/bitex/api/api.py b/bitex/api/api.py index <HASH>..<HASH> 100644 --- a/bitex/api/api.py +++ b/bitex/api/api.py @@ -70,15 +70,20 @@ class RESTAPI: if authenticate: # Pass all locally vars to sign(); Sorting left to children kwargs['urlpath'] = urlpath kwargs['request_method'] = request_method - url, kwargs = self.sign(endpoint, *args, **kwargs) + url, request_kwargs = self.sign(endpoint, *args, **kwargs) else: url = self.uri + urlpath + request_kwargs = kwargs print(url) - r = request_method(url, timeout=5, **kwargs) + r = request_method(url, timeout=5, **request_kwargs) return r + def auth_query(self, endpoint): + pass + + def public_query(self):
renamed kwargs in auth if/else and later on as well to differentiate better what actually belongs in the kwargs for the request
py
diff --git a/fusesoc/main.py b/fusesoc/main.py index <HASH>..<HASH> 100644 --- a/fusesoc/main.py +++ b/fusesoc/main.py @@ -100,7 +100,8 @@ def init(cm, args): if os.path.exists(config_file): - logger.warning("'{}' already exists".format(config_file)) + logger.warning("'{}' already exists. Aborting".format(config_file)) + exit(1) #TODO. Prepend cores_root to file if it doesn't exist f = open(config_file, 'w+') else: @@ -133,7 +134,7 @@ def init(cm, args): else: location = default_dir if os.path.exists(location): - logger.warning("'{}' already exists".format(location)) + logger.warning("'{}' already exists. This library will not be added to fusesoc.conf".format(location)) #TODO: Prompt for overwrite else: logger.info("Initializing {}".format(name))
Make fusesoc init slightly safer to use
py
diff --git a/datalab/utils/_http.py b/datalab/utils/_http.py index <HASH>..<HASH> 100644 --- a/datalab/utils/_http.py +++ b/datalab/utils/_http.py @@ -44,12 +44,12 @@ class RequestException(Exception): self.message = 'HTTP request failed' # Try extract a message from the body; swallow possible resulting ValueErrors and KeyErrors. try: - error = json.loads(content)['error'] + error = json.loads(content.decode('utf-8'))['error'] if 'errors' in error: error = error['errors'][0] self.message += ': ' + error['message'] except Exception: - lines = content.split('\n') if isinstance(content, basestring) else [] + lines = content.splitlines() if isinstance(content, basestring) else [] if lines: self.message += ': ' + lines[0]
Fix python3 unicode incompatibilities. (#<I>)
py
diff --git a/LiSE/LiSE/xcollections.py b/LiSE/LiSE/xcollections.py index <HASH>..<HASH> 100644 --- a/LiSE/LiSE/xcollections.py +++ b/LiSE/LiSE/xcollections.py @@ -229,7 +229,7 @@ class FunctionStore(Signal): self._ast_idx[name] = len(self._ast.body) self._ast.body.append(expr) locl = {} - exec(compile(mod, '<LiSE>', 'exec'), {}, locl) + exec(compile(mod, self._filename, 'exec'), {}, locl) self._locl.update(locl) self.send(self, attr=name, val=locl[name])
Compile stored functions as if they were in their file Makes it possible to get their source right away. Not sure why...
py
diff --git a/hwt/serializer/systemC/utils.py b/hwt/serializer/systemC/utils.py index <HASH>..<HASH> 100644 --- a/hwt/serializer/systemC/utils.py +++ b/hwt/serializer/systemC/utils.py @@ -4,6 +4,7 @@ from hwt.hdl.statement import HdlStatement from hwt.pyUtils.arrayQuery import arr_any from ipCorePackager.constants import DIRECTION from hdlConvertor.hdlAst._defs import HdlVariableDef +from hwt.synthesizer.param import Param def systemCTypeOfSig(s): @@ -24,7 +25,8 @@ def systemCTypeOfSig(s): return SIGNAL_TYPE.PORT_REG else: raise ValueError(t) - + elif isinstance(s, Param): + return SIGNAL_TYPE.PORT_REG elif s._const or\ arr_any(s.drivers, lambda d: isinstance(d, HdlStatement)
systemCTypeOfSig: support for Param
py
diff --git a/dvc/__init__.py b/dvc/__init__.py index <HASH>..<HASH> 100644 --- a/dvc/__init__.py +++ b/dvc/__init__.py @@ -10,7 +10,7 @@ import os import warnings -VERSION_BASE = "0.25.3" +VERSION_BASE = "0.25.4" __version__ = VERSION_BASE PACKAGEPATH = os.path.abspath(os.path.dirname(__file__))
dvc: bump to <I>
py
diff --git a/urlutils.py b/urlutils.py index <HASH>..<HASH> 100644 --- a/urlutils.py +++ b/urlutils.py @@ -112,7 +112,8 @@ def redirect_to_url(req, url, redirection_type=None): redirection_type = apache.HTTP_MOVED_TEMPORARILY req.err_headers_out["Location"] = url if redirection_type != apache.HTTP_MOVED_PERMANENTLY: - req.err_headers_out["Cache-Control"] = "no-cache" + req.err_headers_out["Cache-Control"] = "no-cache, private, no-store, must-revalidate, post-check=0, pre-check=0" + req.err_headers_out["Pragma"] = "no-cache" if req.headers_out.has_key("Set-Cookie"): req.err_headers_out["Set-Cookie"] = req.headers_out["Set-Cookie"]
Copied Cache-Control from FaceBook :-)
py
diff --git a/mot/model_building/evaluation_models.py b/mot/model_building/evaluation_models.py index <HASH>..<HASH> 100644 --- a/mot/model_building/evaluation_models.py +++ b/mot/model_building/evaluation_models.py @@ -168,7 +168,7 @@ class OffsetGaussianEvaluationModel(EvaluationModel): sqrt(pown(''' + eval_fname + '''(data, x, i), 2) + pown(OffsetGaussianNoise_sigma, 2)), 2); } - return (MOT_FLOAT_TYPE) (sum / (pown(OffsetGaussianNoise_sigma, 2))); + return (MOT_FLOAT_TYPE) (sum / (2 * pown(OffsetGaussianNoise_sigma, 2))); } ''' @@ -182,7 +182,7 @@ class OffsetGaussianEvaluationModel(EvaluationModel): sqrt(pown(''' + eval_fname + '''(data, x, i), 2) + pown(OffsetGaussianNoise_sigma, 2)), 2); } - return - sum / (pown(OffsetGaussianNoise_sigma, 2)); + return - sum / (2 * pown(OffsetGaussianNoise_sigma, 2)); } '''
Adds factor 2 to the offset gaussian noise model
py
diff --git a/tests/test_timestamps.py b/tests/test_timestamps.py index <HASH>..<HASH> 100644 --- a/tests/test_timestamps.py +++ b/tests/test_timestamps.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- +from __future__ import unicode_literals + from datetime import datetime import unittest
unicode literals to support timezone tests under py2
py
diff --git a/tensorflow_probability/python/distributions/distribution_properties_test.py b/tensorflow_probability/python/distributions/distribution_properties_test.py index <HASH>..<HASH> 100644 --- a/tensorflow_probability/python/distributions/distribution_properties_test.py +++ b/tensorflow_probability/python/distributions/distribution_properties_test.py @@ -1367,6 +1367,7 @@ class DistributionsWorkWithAutoVectorizationTest(test_util.TestCase): lambda i: dist.sample(seed=seed), tf.range(num_samples))) hp.note('Drew samples {}'.format(sample)) + tfp_hps.guitar_skip_if_matches('NegativeBinomial', dist_name, 'b/147743999') if dist_name not in LOGPROB_AUTOVECTORIZATION_IS_BROKEN: pfor_lp = tf.vectorized_map(dist.log_prob, tf.convert_to_tensor(sample)) batch_lp = dist.log_prob(sample)
Suppress testVmapNegativeBinomial's log_prob portion in Guitar, due to b/<I>. PiperOrigin-RevId: <I>
py
diff --git a/tests/test_style.py b/tests/test_style.py index <HASH>..<HASH> 100644 --- a/tests/test_style.py +++ b/tests/test_style.py @@ -1,9 +1,12 @@ +import logging import pkg_resources import unittest class CodeStyleTestCase(unittest.TestCase): def test_code_style(self): + logger = logging.getLogger('flake8') + logger.setLevel(logging.ERROR) flake8 = pkg_resources.load_entry_point('flake8', 'console_scripts', 'flake8') try: flake8([])
Decrease noise from code-style test
py
diff --git a/ayrton/__init__.py b/ayrton/__init__.py index <HASH>..<HASH> 100644 --- a/ayrton/__init__.py +++ b/ayrton/__init__.py @@ -343,7 +343,7 @@ class Ayrton (object): def run_tree (self, tree, file_name, argv=None, params=None): - logger.debug2 ('AST: %s', ast.dump (tree)) + logger.debug2 ('AST: %s', ast.dump (tree, True, True)) logger.debug2 ('code: \n%s', pprint (tree)) if params is not None:
[+] more verbose, useful ast dumping.
py
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py index <HASH>..<HASH> 100644 --- a/python/pyspark/sql/session.py +++ b/python/pyspark/sql/session.py @@ -43,7 +43,7 @@ def _monkey_patch_RDD(sparkSession): This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)`` :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns - :param samplingRatio: the sample ratio of rows used for inferring + :param sampleRatio: the sample ratio of rows used for inferring :return: a DataFrame >>> rdd.toDF().collect()
[MINOR][PYTHON] Fix typo in a docsting of RDD.toDF ### What changes were proposed in this pull request? Fixes typo in docsting of `toDF` ### Why are the changes needed? The third argument of `toDF` is actually `sampleRatio`. related discussion: <URL>
py
diff --git a/remi/server.py b/remi/server.py index <HASH>..<HASH> 100644 --- a/remi/server.py +++ b/remi/server.py @@ -707,7 +707,15 @@ function uploadFile(widgetID, eventSuccess, eventFail, eventData, file){ self.client = clients[k] if update_thread is None: - update_thread = _UpdateThread(self.server.update_interval) + # we need to, at least, ping the websockets to keep them alive. we might also ping more frequently if the + # user requested we do so + ping_time = self.server.websocket_timeout_timer_ms / 2000.0 # twice the timeout in ms + if self.server.update_interval is None: + interval = ping_time + else: + interval = min(ping_time, self.server.update_interval) + update_thread = _UpdateThread(interval) + update_event.set() # update now def idle(self): """ Idle function called every UPDATE_INTERVAL before the gui update.
server: unsure clients are always updated REMI works with update_interval=None, but that could cause websocket timeouts if they are not pinged reguarly enough. Ensure they are at least pinged at 2x the websocket timeout threshold
py
diff --git a/salt/master.py b/salt/master.py index <HASH>..<HASH> 100644 --- a/salt/master.py +++ b/salt/master.py @@ -379,23 +379,13 @@ class Master(SMaster): :param dict: The salt options ''' - if HAS_ZMQ: - # Warn if ZMQ < 3.2 - try: - zmq_version_info = zmq.zmq_version_info() - except AttributeError: - # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to - # using zmq.zmq_version() and build a version info tuple. - zmq_version_info = tuple( - [int(x) for x in zmq.zmq_version().split('.')] - ) - if zmq_version_info < (3, 2): - log.warning( - 'You have a version of ZMQ less than ZMQ 3.2! There are ' - 'known connection keep-alive issues with ZMQ < 3.2 which ' - 'may result in loss of contact with minions. Please ' - 'upgrade your ZMQ!' - ) + if zmq and zmq_version_info < (3, 2): + log.warning( + 'You have a version of ZMQ less than ZMQ 3.2! There are ' + 'known connection keep-alive issues with ZMQ < 3.2 which ' + 'may result in loss of contact with minions. Please ' + 'upgrade your ZMQ!' + ) SMaster.__init__(self, opts) def __set_max_open_files(self):
Use ZMQ utility for version check
py