diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/threading2/t2_base.py b/threading2/t2_base.py index <HASH>..<HASH> 100644 --- a/threading2/t2_base.py +++ b/threading2/t2_base.py @@ -450,8 +450,8 @@ class SHLock(object): self.is_exclusive -= 1 if not self.is_exclusive: self._exclusive_owner = None - # If there are waiting shared locks, issue them - # all and them wake everyone up. + # If there are waiting shared locks, issue it to them + # all and then wake everyone up. if self._shared_queue: for (thread,waiter) in self._shared_queue: self.is_shared += 1 @@ -504,7 +504,7 @@ class SHLock(object): try: self._shared_queue.append((me,waiter)) if not waiter.wait(timeout=timeout): - self._shared_queue.remove(waiter) + self._shared_queue.remove((me,waiter)) return False assert not self.is_exclusive finally: @@ -529,7 +529,7 @@ class SHLock(object): try: self._exclusive_queue.append((me,waiter)) if not waiter.wait(timeout=timeout): - self._exclusive_queue.remove(waiter) + self._exclusive_queue.remove((me,waiter)) return False finally: self._return_waiter(waiter) @@ -601,4 +601,3 @@ def process_affinity(affinity=None): raise ValueError("unknown cpus: %s" % affinity) return system_affinity() -
SHLock: fix cleanup logic when acquire() times out
py
diff --git a/scripts/gatk_genotyper.py b/scripts/gatk_genotyper.py index <HASH>..<HASH> 100644 --- a/scripts/gatk_genotyper.py +++ b/scripts/gatk_genotyper.py @@ -45,9 +45,6 @@ def main(config_file, ref_file, align_bam, dbsnp=None): def unified_genotyper(picard, align_bam, ref_file, platform, dbsnp=None): """Perform SNP genotyping on the given alignment file. - - XXX Discuss on defaults: - min_confidence 10 """ out_file = "%s-snp.vcf" % os.path.splitext(align_bam)[0] params = ["-T", "UnifiedGenotyper", @@ -60,7 +57,7 @@ def unified_genotyper(picard, align_bam, ref_file, platform, dbsnp=None): "--standard_min_confidence_threshold_for_emitting", "10.0", "--trigger_min_confidence_threshold_for_calling", "10.0", "--trigger_min_confidence_threshold_for_emitting", "10.0", - "--max_reads_at_locus", 10000, + "--downsample_to_coverage", 10000, "--min_base_quality_score", 20, "--platform", platform, "-l", "INFO",
Update coverage sampling argument to match latest version of GATK
py
diff --git a/gcloud/storage/bucket.py b/gcloud/storage/bucket.py index <HASH>..<HASH> 100644 --- a/gcloud/storage/bucket.py +++ b/gcloud/storage/bucket.py @@ -514,6 +514,7 @@ class Bucket(object): self.acl.clear() for entry in result['acl']: self.acl.entity(self.acl.entity_from_dict(entry)) + self.acl.loaded = True return self
Ensure bucket's 'acl' is marked as loaded after 'save_acl'. Incorporates feedback from @dhermes.
py
diff --git a/tests.py b/tests.py index <HASH>..<HASH> 100644 --- a/tests.py +++ b/tests.py @@ -572,10 +572,10 @@ class TWPTestsWithSpans(unittest.TestCase): # Test it! if __name__ == '__main__': - #unittest.main() # only seems to run 1 class? + unittest.main() # only seems to run 1 class? - verbosity = 0 # set to 2 for verbose output - suite = unittest.TestLoader().loadTestsFromTestCase(TWPTestsWithSpans) - unittest.TextTestRunner(verbosity=verbosity).run(suite) - suite = unittest.TestLoader().loadTestsFromTestCase(TWPTests) - unittest.TextTestRunner(verbosity=verbosity).run(suite) + #verbosity = 0 # set to 2 for verbose output + #suite = unittest.TestLoader().loadTestsFromTestCase(TWPTestsWithSpans) + #unittest.TextTestRunner(verbosity=verbosity).run(suite) + #suite = unittest.TestLoader().loadTestsFromTestCase(TWPTests) + #unittest.TextTestRunner(verbosity=verbosity).run(suite)
not sure what happened, unittest.main() does the job now
py
diff --git a/porespy/filters/_size_seq_satn.py b/porespy/filters/_size_seq_satn.py index <HASH>..<HASH> 100644 --- a/porespy/filters/_size_seq_satn.py +++ b/porespy/filters/_size_seq_satn.py @@ -172,21 +172,10 @@ def pc_to_satn(pc, im): wetting phase was displaced. """ - temp = np.copy(pc) - # See if pc has any +/- infs - posinf = temp == np.inf - neginf = temp == -np.inf - vmin = pc[im*~neginf].min() - vmax = pc[im*~posinf].max() - # Deal with negative infinities - if vmin < 0: - temp = temp + im*np.abs(vmin) + 1 # Ensure all a greater than zero - temp[posinf] = vmax*2 - temp[neginf] = vmin/2 - - temp = make_contiguous(temp.astype(int)) - temp[posinf] = -1 - satn = seq_to_satn(seq=temp, im=im) + a = np.digitize(pc, bins=np.unique(pc)) + a[~im] = 0 + a[np.where(pc == np.inf)] = -1 + satn = seq_to_satn(seq=a, im=im) return satn
much simpler pc_to_satn
py
diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py index <HASH>..<HASH> 100644 --- a/salt/modules/iptables.py +++ b/salt/modules/iptables.py @@ -383,7 +383,7 @@ def save(filename=None, family='ipv4'): salt '*' iptables.save /etc/sysconfig/iptables family=ipv6 ''' if _conf() and not filename: - filename = _conf() + filename = _conf(family) parent_dir = os.path.dirname(filename) if not os.path.isdir(parent_dir):
Use family when saving iptables rule. Will save ipv6 rules to proper file.
py
diff --git a/cocaine/proxy/proxy.py b/cocaine/proxy/proxy.py index <HASH>..<HASH> 100644 --- a/cocaine/proxy/proxy.py +++ b/cocaine/proxy/proxy.py @@ -45,7 +45,7 @@ from cocaine.services import Service from cocaine.services import Locator from cocaine.exceptions import ServiceError from cocaine.exceptions import DisconnectionError -from cocaine.detail.service import EmptyResponse +from cocaine.services import EmptyResponse URL_REGEX = re.compile(r"/([^/]*)/([^/?]*)(.*)")
[Proxy] Do not use a detail import path
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ setup( url='https://github.com/JohnVinyard/zounds', author='John Vinyard', author_email='john.vinyard@gmail.com', + description='Zounds is a python library for working with audio', long_description=long_description, download_url=download_url, ext_modules=extension_modules,
Add a description/summary to setup.py
py
diff --git a/atrcopy/ataridos.py b/atrcopy/ataridos.py index <HASH>..<HASH> 100644 --- a/atrcopy/ataridos.py +++ b/atrcopy/ataridos.py @@ -320,7 +320,7 @@ class AtrHeader(BaseHeader): file_format = "ATR" def __init__(self, bytes=None, sector_size=128, initial_sectors=3, create=False): - BaseHeader.__init__(self, sector_size, initial_sectors, 360) + BaseHeader.__init__(self, sector_size, initial_sectors, 360, 1) if create: self.header_offset = 16 self.check_size(0)
Atari DOS disks should start from sector 1
py
diff --git a/lucid/optvis/style.py b/lucid/optvis/style.py index <HASH>..<HASH> 100644 --- a/lucid/optvis/style.py +++ b/lucid/optvis/style.py @@ -27,7 +27,7 @@ class StyleLoss(object): https://arxiv.org/abs/1508.06576 """ - def __init__(self, style_layers, ema_decay=0.0, + def __init__(self, style_layers, ema_decay=None, style_func=gram_style, loss_func=mean_l1_loss): """Initilize style loss. @@ -44,7 +44,7 @@ class StyleLoss(object): outputs of 'style_func'. """ self.input_grams = [style_func(s) for s in style_layers] - if ema_decay: + if ema_decay is not None: ema = tf.train.ExponentialMovingAverage(decay=ema_decay) update_ema_op = ema.apply(self.input_grams) with tf.control_dependencies([update_ema_op]):
StyleLoss to support tensors as decay param.
py
diff --git a/proxmin/nmf.py b/proxmin/nmf.py index <HASH>..<HASH> 100644 --- a/proxmin/nmf.py +++ b/proxmin/nmf.py @@ -35,13 +35,25 @@ def prox_likelihood(X, step, Xs=None, j=None, Y=None, W=None, class Steps_AS: def __init__(self, slack=0.5, Wmax=1): + """Helper class to compute the Lipschitz constants of grad f. + + Because the spectral norm is expensive to compute, it will only update + the step_size if relative changes of L exceed (1-slack)/2. + If not, which is usually the case after only a few iterations, it will + report a previous value for the next several iterations. The stride + beteen updates is set by + stride -> stride * (1-slack)/2 / rel_error + i.e. it increases more strongly if the rel_error is much below the + slack budget. + """ + self.Wmax = Wmax self.slack = slack self.it = 0 N = 2 self.stride = [1] * N self.last = [-1] * N - self.stored = [None] * 2 # last update of L + self.stored = [None] * N # last update of L def __call__(self, j, Xs): if self.it >= self.last[j] + self.stride[j]:
docstring for Steps_AS
py
diff --git a/src/lineage/__init__.py b/src/lineage/__init__.py index <HASH>..<HASH> 100644 --- a/src/lineage/__init__.py +++ b/src/lineage/__init__.py @@ -761,13 +761,17 @@ class Lineage: } # ensure discrepant SNPs are in shared DNA segments - for discrepant_snp in discrepant_snps.copy(): + for discrepant_snp in discrepant_snps: if d["start"] <= df.loc[discrepant_snp].pos <= d["end"]: - discrepant_snps = discrepant_snps.drop(discrepant_snp) discrepant_snps_passed = discrepant_snps_passed.append( df.loc[[discrepant_snp]].index ) + # remove found discrepant SNPs from search on next iteration + discrepant_snps = discrepant_snps.drop( + discrepant_snps_passed, errors="ignore" + ) + shared_dna.append(d) counter += 1 return {"shared_dna": shared_dna, "discrepant_snps": discrepant_snps_passed}
Remove discrepant SNPs from search after loop
py
diff --git a/validator/testcases/content.py b/validator/testcases/content.py index <HASH>..<HASH> 100644 --- a/validator/testcases/content.py +++ b/validator/testcases/content.py @@ -272,7 +272,8 @@ def _process_file(err, xpi_package, name, file_data, name_lower, is_subpackage else PACKAGE_THEME) err.set_tier(1) - supported_versions = err.supported_versions + supported_versions = (err.supported_versions.copy() + if err.supported_versions else None) if is_subpackage: testendpoint_validator.test_inner_package(err, sub_xpi)
Preserve initial supported apps dict for later restoration
py
diff --git a/src/mca.py b/src/mca.py index <HASH>..<HASH> 100644 --- a/src/mca.py +++ b/src/mca.py @@ -59,8 +59,8 @@ class MCA(object): self.P, self.s, self.Q = np.linalg.svd(_mul(self.D_r, Z_c, self.D_c)) if benzecri: - self.E = np.array([(K/(K-1)*(_ - 1/K))**2 - if _ > 1/K else 0 for _ in self.s**2]) + self.E = np.array([(K/(K-1.)*(_ - 1./K))**2 + if _ > 1./K else 0 for _ in self.s**2]) self.inertia = self.E.sum() if benzecri else sum(self.s**2) self.rank = np.argmax((self.E if benzecri else self.s**2) < TOL) self.L = (self.E if benzecri else self.s**2)[:self.rank]
Restore lost commit by jakub
py
diff --git a/shortuuid/django_fields.py b/shortuuid/django_fields.py index <HASH>..<HASH> 100644 --- a/shortuuid/django_fields.py +++ b/shortuuid/django_fields.py @@ -13,7 +13,7 @@ class ShortUUIDField(models.CharField): if "max_length" not in kwargs: # If `max_length` was not specified, set it here. - kwargs["max_length"] = self.length + kwargs["max_length"] = self.length + len(self.prefix) self.alphabet = kwargs.pop("alphabet", None) kwargs["default"] = self._generate_uuid
fix: Correctly account for length when prefix is used (fixes #<I>)
py
diff --git a/tools/c7n_org/c7n_org/cli.py b/tools/c7n_org/c7n_org/cli.py index <HASH>..<HASH> 100644 --- a/tools/c7n_org/c7n_org/cli.py +++ b/tools/c7n_org/c7n_org/cli.py @@ -46,7 +46,7 @@ from c7n_org.utils import environ, account_tags log = logging.getLogger('c7n_org') -WORKER_COUNT = os.environ.get('C7N_ORG_PARALLEL', multiprocessing.cpu_count * 4) +WORKER_COUNT = os.environ.get('C7N_ORG_PARALLEL', multiprocessing.cpu_count() * 4) CONFIG_SCHEMA = {
tools/c7n_org Fixing the WORKER_COUNT (#<I>)
py
diff --git a/tools/python/buildmetadatafromxml.py b/tools/python/buildmetadatafromxml.py index <HASH>..<HASH> 100755 --- a/tools/python/buildmetadatafromxml.py +++ b/tools/python/buildmetadatafromxml.py @@ -364,7 +364,7 @@ class XTerritory(UnicodeMixin): self.o.leading_digits = xterritory.get('leadingDigits', None) self.o.preferred_international_prefix = xterritory.get('preferredInternationalPrefix', None) self.o.national_prefix = xterritory.get('nationalPrefix', None) - self.o.national_prefix_for_parsing = xterritory.get('nationalPrefixForParsing', None) + self.o.national_prefix_for_parsing = _dews_re(xterritory.get('nationalPrefixForParsing', None)) self.o.national_prefix_transform_rule = xterritory.get('nationalPrefixTransformRule', None) if self.o.national_prefix_transform_rule is not None: # Replace '$1' etc with '\1' to match Python regexp group reference format
Remove whitespace in RE for nationalPrefixForParsing. Fixes #<I> (once metadata is rebuilt).
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( version = '.'.join([str(x) for x in __version__]), packages = ['bokeh', 'bokeh.chaco_gg', 'bokeh.server', 'bokeh.server.models', 'bokeh.server.views', - 'bokeh.server.test'], + 'bokeh.server.test', 'bokeh.specialmodels'], package_data = {'bokeh' : package_data_dirs}, author = 'Continuum Analytics', author_email = 'info@continuum.io',
left out new package specialmodels from setup.py
py
diff --git a/aquarius/run.py b/aquarius/run.py index <HASH>..<HASH> 100644 --- a/aquarius/run.py +++ b/aquarius/run.py @@ -1,5 +1,6 @@ from aquarius.myapp import app from aquarius.app.assets import assets +from aquarius.app.musicmap import musicmap from flask_swagger import swagger from flask_swagger_ui import get_swaggerui_blueprint from flask import jsonify @@ -27,6 +28,7 @@ def spec(): swag = swagger(app) swag['info']['version'] = get_version() swag['info']['title'] = "Aquarius" + swag['basePath'] = BaseURLs.BASE_AQUARIUS_URL return jsonify(swag) @@ -43,8 +45,10 @@ swaggerui_blueprint = get_swaggerui_blueprint( # Register blueprint at URL app.register_blueprint(swaggerui_blueprint, url_prefix=BaseURLs.SWAGGER_URL) +app.register_blueprint(musicmap, url_prefix=BaseURLs.ASSETS_URL) app.register_blueprint(assets, url_prefix=BaseURLs.ASSETS_URL) + if __name__ == '__main__': if isinstance(config.aquarius_url.split(':')[-1], int): app.run(host=config.aquarius_url.split(':')[1],
added basePath for swagger
py
diff --git a/PBB_Core.py b/PBB_Core.py index <HASH>..<HASH> 100755 --- a/PBB_Core.py +++ b/PBB_Core.py @@ -65,7 +65,6 @@ class WDItemList(object): class WDItemEngine(object): - create_new_item = False log_file_name = '' def __init__(self, wd_item_id='', item_name='', domain='', data=[], server='www.wikidata.org', @@ -84,6 +83,7 @@ class WDItemEngine(object): self.wd_json_representation = {} self.wd_item_id = wd_item_id self.item_name = item_name + self.create_new_item = False self.domain = domain self.data = data self.server = server
create_new item is always set to False by default.
py
diff --git a/tests/conftest.py b/tests/conftest.py index <HASH>..<HASH> 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,5 @@ import pytest import xenon -import jpype @pytest.fixture(scope='session', autouse=True) @@ -14,5 +13,5 @@ def make_init(): assert xenon.JavaClass.JClassClass is None assert xenon.JavaPackage.JPackageClass is None xenon.init() - assert xenon.JavaClass.JClassClass == jpype.JClass - assert xenon.JavaPackage.JPackageClass == jpype.JPackage + assert xenon.JavaClass.JClassClass is not None + assert xenon.JavaPackage.JPackageClass is not None
Method types did not match on travis...
py
diff --git a/vagrant/__init__.py b/vagrant/__init__.py index <HASH>..<HASH> 100644 --- a/vagrant/__init__.py +++ b/vagrant/__init__.py @@ -838,7 +838,7 @@ class Vagrant: Removes the box matching name and provider. It is an error if no box matches name and provider. """ - self._call_vagrant_command(["box", "remove", name, provider]) + self._call_vagrant_command(["box", "remove", "--force", name, provider]) def plugin_list(self): """
Ensure box remove uses force (#<I>)
py
diff --git a/py/dynesty/utils.py b/py/dynesty/utils.py index <HASH>..<HASH> 100644 --- a/py/dynesty/utils.py +++ b/py/dynesty/utils.py @@ -537,8 +537,8 @@ def jitter_run(res, rstate=None, approx=False): saved_h = zhlnz - logzmax * np.exp(saved_logz - logzmax) # changes in h in each step dh = np.diff(saved_h, prepend=0) - # why ?? - saved_logzvar = np.sum(dh * dlogvol_run) + + saved_logzvar = np.cumsum(dh * dlogvol_run) # Copy results. new_res = Results([item for item in res.items()])
fix the logzerr calculation, (make sure it's an array)
py
diff --git a/tinydb/table.py b/tinydb/table.py index <HASH>..<HASH> 100644 --- a/tinydb/table.py +++ b/tinydb/table.py @@ -590,10 +590,10 @@ class Table: tables = {} try: - table = tables[self.name] + raw_table = tables[self.name] except KeyError: # The table does not exist yet, so it is empty - table = {} + raw_table = {} # Convert the document IDs to the document ID class. # This is required as the rest of TinyDB expects the document IDs @@ -601,7 +601,7 @@ class Table: # might convert dict keys to strings. table = { self.document_id_class(doc_id): doc - for doc_id, doc in table.items() + for doc_id, doc in raw_table.items() } # Perform the table update operation
fix(table): make mypy happy
py
diff --git a/colab/super_archives/search_indexes.py b/colab/super_archives/search_indexes.py index <HASH>..<HASH> 100644 --- a/colab/super_archives/search_indexes.py +++ b/colab/super_archives/search_indexes.py @@ -28,7 +28,7 @@ class ThreadIndex(BaseIndex, indexes.Indexable): latest_message_pk = indexes.IntegerField( model_attr='latest_message__pk', indexed=False ) - score = indexes.IntegerField(model_attr='score') + rating = indexes.IntegerField(model_attr='score') def get_model(self): return Thread
Change score field to fix whoosh indexing
py
diff --git a/djohno/utils.py b/djohno/utils.py index <HASH>..<HASH> 100644 --- a/djohno/utils.py +++ b/djohno/utils.py @@ -14,6 +14,22 @@ def is_pretty_from_address(input): return False +def _get_version_from_app(app): + if hasattr(app, 'get_version'): + get_version = app.get_version + if callable(get_version): + return get_version() + return get_version + + if hasattr(app, 'VERSION'): + return app.VERSION + + if hasattr(app, '__version__'): + return app.__version__ + + return None + + def get_app_versions(): versions = {} @@ -21,17 +37,9 @@ def get_app_versions(): __import__(app) app = sys.modules[app] - if hasattr(app, 'get_version'): - get_version = app.get_version - if callable(get_version): - version = get_version() - else: - version = get_version - elif hasattr(app, 'VERSION'): - version = app.VERSION - elif hasattr(app, '__version__'): - version = app.__version__ - else: + version = _get_version_from_app(app) + + if version is None: continue if isinstance(version, (list, tuple)):
Fixing erroneous missing line in code coverage report I guess this is a bug in coverage.py somewhere, but previously the "continue" for where we couldn't get version info from an app in INSTALLED_APPS was tagged as missing from tests. Shuffling the code around made this go away, and I think makes for more readable code.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ with open('LICENSE.md') as fh: license = fh.read() setup(name='pysasl', - version='0.6.1', + version='0.6.2', author='Ian Good', author_email='icgood@gmail.com', description='Pure Python SASL client and server library.', @@ -39,7 +39,7 @@ setup(name='pysasl', python_requires='~=3.6', include_package_data=True, packages=find_packages(), - install_requiers=['typing-extensions'], + install_requires=['typing-extensions'], extras_require={'passlib': ['passlib']}, entry_points={'pysasl.mechanisms': [ 'crammd5 = pysasl.crammd5:CramMD5Mechanism',
Fix keyword typo in setup.py
py
diff --git a/visidata/_types.py b/visidata/_types.py index <HASH>..<HASH> 100644 --- a/visidata/_types.py +++ b/visidata/_types.py @@ -106,7 +106,7 @@ def currency(*args): class vlen(int): - def __new__(cls, v): + def __new__(cls, v=0): if isinstance(v, (vlen, int, float)): return super(vlen, cls).__new__(cls, v) else:
[vlen] add a minimum value of 0 for vlen() Closes #<I> Numeric binning requires each type to have a default minimum value when calling [type]()
py
diff --git a/dagfactory/utils.py b/dagfactory/utils.py index <HASH>..<HASH> 100644 --- a/dagfactory/utils.py +++ b/dagfactory/utils.py @@ -118,6 +118,8 @@ def get_python_callable(python_callable_name, python_callable_file): :type: callable """ + python_callable_file = os.path.expandvars(python_callable_file) + if not os.path.isabs(python_callable_file): raise Exception("`python_callable_file` must be absolute path")
Add environment variable support (#<I>) Add support for callable file path to use environment variable and not get error on get_python_callable function.
py
diff --git a/ocrd_utils/ocrd_utils/logging.py b/ocrd_utils/ocrd_utils/logging.py index <HASH>..<HASH> 100644 --- a/ocrd_utils/ocrd_utils/logging.py +++ b/ocrd_utils/ocrd_utils/logging.py @@ -97,6 +97,7 @@ def initLogging(): level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s', datefmt='%H:%M:%S') + logging.getLogger('').setLevel(logging.INFO) # logging.getLogger('ocrd.resolver').setLevel(logging.INFO) # logging.getLogger('ocrd.resolver.download_to_directory').setLevel(logging.INFO) # logging.getLogger('ocrd.resolver.add_files_to_mets').setLevel(logging.INFO)
[app][fix] return explict rootLogger level set
py
diff --git a/spyderlib/widgets/shell.py b/spyderlib/widgets/shell.py index <HASH>..<HASH> 100644 --- a/spyderlib/widgets/shell.py +++ b/spyderlib/widgets/shell.py @@ -15,7 +15,8 @@ import sys, os, time import os.path as osp from PyQt4.QtGui import (QMenu, QApplication, QCursor, QToolTip, QKeySequence, - QFileDialog, QMessageBox, QMouseEvent) + QFileDialog, QMessageBox, QMouseEvent, QTextCursor, + QTextCharFormat) from PyQt4.QtCore import Qt, QString, QCoreApplication, SIGNAL, pyqtProperty # For debugging purpose: @@ -94,6 +95,12 @@ class ShellBaseWidget(ConsoleBaseWidget): def set_font(self, font): """Set shell styles font""" self.set_pythonshell_font(font) + cursor = self.textCursor() + cursor.select(QTextCursor.Document) + charformat = QTextCharFormat() + charformat.setFontFamily(font.family()) + charformat.setFontPointSize(font.pointSize()) + cursor.mergeCharFormat(charformat) #------ Context menu
Shell/bugfix: font was not changed in already opened shell
py
diff --git a/riotwatcher/riotwatcher.py b/riotwatcher/riotwatcher.py index <HASH>..<HASH> 100644 --- a/riotwatcher/riotwatcher.py +++ b/riotwatcher/riotwatcher.py @@ -190,6 +190,7 @@ class LoLException(Exception): error_400 = "Bad request" error_401 = "Unauthorized" +error_403 = "Blacklisted key" error_404 = "Game data not found" error_429 = "Too many requests" error_500 = "Internal server error" @@ -201,6 +202,8 @@ def raise_status(response): raise LoLException(error_400, response) elif response.status_code == 401: raise LoLException(error_401, response) + elif response.status_code == 403: + raise LoLException(error_403, response) elif response.status_code == 404: raise LoLException(error_404, response) elif response.status_code == 429:
Add check for <I> response code.
py
diff --git a/check_manifest.py b/check_manifest.py index <HASH>..<HASH> 100755 --- a/check_manifest.py +++ b/check_manifest.py @@ -381,7 +381,7 @@ class Bazaar(VCS): codecs.lookup('oem') except LookupError: pass - else: + else: # pragma: nocover return 'oem' # Based on bzrlib.osutils.get_terminal_encoding() encoding = getattr(sys.stdout, 'encoding', None)
Can't really test this code path on Linux
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ with open('CHANGES.txt') as changes: setup_params = dict( name="hgtools", - version=hgtools.plugins.calculate_version(options=dict(increment='0.0.1')), + version=hgtools.plugins.calculate_version(options=dict(increment='0.1')), author="Jannis Leidel/Jason R. Coombs", author_email="jaraco@jaraco.com", url="https://bitbucket.org/jaraco/hgtools/",
Next release will be <I>
py
diff --git a/luminoso_api/client.py b/luminoso_api/client.py index <HASH>..<HASH> 100644 --- a/luminoso_api/client.py +++ b/luminoso_api/client.py @@ -293,6 +293,7 @@ class LuminosoClient(object): This is only generally useful for specific URLs, such as documentation. """ + params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._request('get', url, params=params).text
get_raw now jsonifies its parameters Why did it not before? Who knows, really.
py
diff --git a/pyflare/client.py b/pyflare/client.py index <HASH>..<HASH> 100755 --- a/pyflare/client.py +++ b/pyflare/client.py @@ -70,7 +70,7 @@ class PyflareClient(object): 'z': zone }) has_more = records['response']['recs']['has_more'] - current_count = records['response']['recs']['count'] + current_count += records['response']['recs']['count'] for record in records['response']['recs']['objs']: yield record
accumulate over count, instead of taking it as an absolute offset records['response']['recs']['count'] is the number of items in the latest batch of records, but it was treated as thought it was an absolute offset within the list of all records. This would manifest as an infinite loop whenever you get more than two batches.
py
diff --git a/zipline/algorithm.py b/zipline/algorithm.py index <HASH>..<HASH> 100644 --- a/zipline/algorithm.py +++ b/zipline/algorithm.py @@ -1281,7 +1281,7 @@ class TradingAlgorithm(object): """ days = self.trading_environment.trading_days start_date_loc = days.get_loc(start_date) - sim_end = self.sim_params.period_end + sim_end = self.sim_params.last_close.normalize() end_loc = min(start_date_loc + 252, days.get_loc(sim_end)) end_date = days[end_loc] return self.engine.factor_matrix(
BUG: Use normed last_close instead of period_end. `period_end` can be outside the range of data for which we have dates. `last_close` properly gets pulled back to the last date for which we actually have data. We should consider whether or not we need to be storing period_end at all.
py
diff --git a/src/toil/batchSystems/slurm.py b/src/toil/batchSystems/slurm.py index <HASH>..<HASH> 100644 --- a/src/toil/batchSystems/slurm.py +++ b/src/toil/batchSystems/slurm.py @@ -42,9 +42,9 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem): currentjobs = dict((str(self.batchJobIDs[x][0]), x) for x in self.runningJobs) # currentjobs is a dictionary that maps a slurm job id (string) to our own internal job id # squeue arguments: - # -h for no header + # --noheader for no header # --format to get jobid i, state %t and time days-hours:minutes:seconds - + lines = subprocess.check_output(['squeue', '--noheader', '--format="%i %t %M"']).split('\n') for line in lines: values = line.split()
Update slurm.py
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -20,8 +20,8 @@ setup( # https://packaging.python.org/en/latest/single_source_version.html version='0.9.0', - description='A fully featured python package for quaternion representation, manipulation, 3D rotation and animation.', - long_description="A fully featured python package for quaternion representation, manipulation, 3D rotation and animation.", + description='A fully featured, pythonic library for representing and using quaternions.', + long_description="A fully featured, pythonic library for quaternion representation, manipulation, 3D animation and geometry.", # The project's main homepage. download_url='https://github.com/KieranWynn/pyquaternion',
Enhanced description for setup.py
py
diff --git a/paypal/standard/ipn/tests/test_ipn.py b/paypal/standard/ipn/tests/test_ipn.py index <HASH>..<HASH> 100644 --- a/paypal/standard/ipn/tests/test_ipn.py +++ b/paypal/standard/ipn/tests/test_ipn.py @@ -120,6 +120,6 @@ class IPNTest(TestCase): self.client.post("/ipn/", IPN_POST_PARAMS) self.client.post("/ipn/", IPN_POST_PARAMS) self.assertEqual(len(PayPalIPN.objects.all()), 2) - ipn_obj = PayPalIPN.objects.order_by('-created_at')[0] + ipn_obj = PayPalIPN.objects.order_by('-created_at', '-pk')[0] self.assertEqual(ipn_obj.flag, True) - self.assertEqual(ipn_obj.flag_info, "Duplicate txn_id. (51403485VH153354B)") \ No newline at end of file + self.assertEqual(ipn_obj.flag_info, "Duplicate txn_id. (51403485VH153354B)")
Making standard.ipn tests pass. On MySQL, ordering by date does not give the desired ordering of getting the most recently inserted row first. We need to order by the primary key too.
py
diff --git a/src/xray/utils.py b/src/xray/utils.py index <HASH>..<HASH> 100644 --- a/src/xray/utils.py +++ b/src/xray/utils.py @@ -287,13 +287,6 @@ def dict_equal(first, second): return False elif v1 != v2: return False - if isinstance(v1, np.ndarray) != isinstance(v2, np.ndarray): - return False # one is an ndarray, other is not - elif (isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)): - if not np.array_equal(v1, v2): - return False - elif v1 != v2: - return False return True def ordered_dict_intersection(first_dict, second_dict, compat=operator.eq):
Reverted an accidental change to utils.dict_equal
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ setuptools.setup( author='Niklas Rosenstein', author_email='rosensteinniklas@gmail.com', packages=setuptools.find_packages(), + package_data={'nr': ['tools/licenses/*/*']}, install_requires=[], entry_points = { 'console_scripts': [
setup.py: include license data files
py
diff --git a/tests/test_cpuid.py b/tests/test_cpuid.py index <HASH>..<HASH> 100644 --- a/tests/test_cpuid.py +++ b/tests/test_cpuid.py @@ -28,20 +28,24 @@ class MockCPUID(CPUID): b"\x0f\xa2" # cpuid b"\xC3",): # ret return 0x8000001f - # get_info - elif byte_code == \ + + # get_cache + if byte_code == \ (b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006 b"\x0f\xa2" # cpuid b"\x89\xC8" # mov ax,cx b"\xC3",): # ret)) return 0x2006140 - elif byte_code == \ + + # get_info + if byte_code == \ (self._one_eax(), # mov eax,0x1" b"\x0f\xa2" # cpuid b"\xC3",): # ret return 0x800f82 + # get_processor_brand - elif byte_code == \ + if byte_code == \ (b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002 b"\x0f\xa2" # cpuid b"\x89\xC0" # mov ax,ax
Cleanup. Moved each test for CPUID check into own if tree.
py
diff --git a/worker/buildbot_worker/__init__.py b/worker/buildbot_worker/__init__.py index <HASH>..<HASH> 100644 --- a/worker/buildbot_worker/__init__.py +++ b/worker/buildbot_worker/__init__.py @@ -57,7 +57,14 @@ def getVersion(init_file): if (not p.returncode) and out: v = VERSION_MATCH.search(out) if v: - return v.group(1) + version = v.group(1) + # Always return version of type str on Python 2 and 3. + if isinstance(version, str): + # Python 2 + return version + else: + # Python 3 + return version.decode("utf-8") except OSError: pass
Make sure that getVersion() always returns type str. Popen.communicate() returns bytes on Python 3, so we must convert it to str. This fixes "pip install -e worker" on Python 3.
py
diff --git a/sacad/sources/base.py b/sacad/sources/base.py index <HASH>..<HASH> 100644 --- a/sacad/sources/base.py +++ b/sacad/sources/base.py @@ -5,6 +5,7 @@ import logging import operator import os import random +import string import unicodedata import urllib.parse @@ -57,6 +58,8 @@ class CoverSource(metaclass=abc.ABCMeta): async def search(self, album, artist): """ Search for a given album/artist and return an iterable of CoverSourceResult. """ self.logger.debug("Searching with source '%s'..." % (self.__class__.__name__)) + album = __class__.unpunctuate(album) + artist = __class__.unpunctuate(artist) url_data = self.getSearchUrl(album, artist) if isinstance(url_data, tuple): url, post_data = url_data @@ -161,6 +164,11 @@ class CoverSource(metaclass=abc.ABCMeta): """ Replace accentuated chars in string by their non accentuated equivalent. """ return "".join(c for c in unicodedata.normalize("NFKD", s) if not unicodedata.combining(c)) + @staticmethod + def unpunctuate(s): + """ Remove punctuation from string s. """ + return "".join(c for c in s if c not in string.punctuation) + @abc.abstractmethod def getSearchUrl(self, album, artist): """
Remove punctuation when searching Improves results with some sources like Last.fm
py
diff --git a/bika/lims/jsonapi/read.py b/bika/lims/jsonapi/read.py index <HASH>..<HASH> 100644 --- a/bika/lims/jsonapi/read.py +++ b/bika/lims/jsonapi/read.py @@ -117,6 +117,7 @@ def read(context, request): except: val = str(val) obj_data[fieldname] = val + obj_data['path'] = "/".join(obj.getPhysicalPath()) ret['objects'].append(obj_data) return ret
ijsonapi read: Add a path to each returned object.
py
diff --git a/alignak/notification.py b/alignak/notification.py index <HASH>..<HASH> 100644 --- a/alignak/notification.py +++ b/alignak/notification.py @@ -267,8 +267,8 @@ class Notification(Action): data = {'_id': self._id} self.fill_data_brok_from(data, 'full_status') - b = Brok('notification_raise', data) - return b + brok = Brok('notification_raise', data) + return brok def __getstate__(self): """Call by pickle for dataify the comment
Enh: Pylint - C<I> on variables names in notification.py
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -46,5 +46,9 @@ setup( classifiers=['License :: OSI Approved :: Apache Software License', 'Intended Audience :: Developers', 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', 'Topic :: Database'] )
Add explicitly the supported versions for PyPI: <I>, <I>, <I> and <I>
py
diff --git a/digitalocean/baseapi.py b/digitalocean/baseapi.py index <HASH>..<HASH> 100644 --- a/digitalocean/baseapi.py +++ b/digitalocean/baseapi.py @@ -106,7 +106,6 @@ class BaseAPI(object): all_data = data while data.get("links", {}).get("pages", {}).get("next"): url, query = data["links"]["pages"]["next"].split("?", 1) - print(params) # Merge the query parameters for key, value in urlparse.parse_qs(query).items():
Removed debug print statement (fixes #<I>)
py
diff --git a/montblanc/impl/rime/tensorflow/RimeSolver.py b/montblanc/impl/rime/tensorflow/RimeSolver.py index <HASH>..<HASH> 100644 --- a/montblanc/impl/rime/tensorflow/RimeSolver.py +++ b/montblanc/impl/rime/tensorflow/RimeSolver.py @@ -713,8 +713,9 @@ class RimeSolver(MontblancTensorflowSolver): except (KeyboardInterrupt, SystemExit) as e: montblanc.log.exception('Solving interrupted') raise - except: + except Exception: montblanc.log.exception('Solving exception') + raise else: if self._should_trace: self._run_metadata.write(self._iterations)
Reraise exceptions instead of merely logging them (#<I>)
py
diff --git a/backtrader/feeds/csvgeneric.py b/backtrader/feeds/csvgeneric.py index <HASH>..<HASH> 100644 --- a/backtrader/feeds/csvgeneric.py +++ b/backtrader/feeds/csvgeneric.py @@ -87,10 +87,10 @@ class GenericCSVData(feed.CSVDataBase): def start(self): super(GenericCSVData, self).start() + self._dtstr = False if isinstance(self.p.dtformat, string_types): self._dtstr = True elif isinstance(self.p.dtformat, integer_types): - self._dtstr = False idt = int(self.p.dtformat) if idt == 1: self._dtconvert = lambda x: datetime.utcfromtimestamp(int(x))
Ensure a callable is taken in genericcsv
py
diff --git a/montblanc/tests/run_tests.py b/montblanc/tests/run_tests.py index <HASH>..<HASH> 100644 --- a/montblanc/tests/run_tests.py +++ b/montblanc/tests/run_tests.py @@ -68,7 +68,7 @@ def suite(): test_suite.addTest(unittest.makeSuite(TestSolver)) test_suite.addTest(unittest.makeSuite(TestUtils)) # Test recent code first, as it will be more likely to fail - test_suite.addTest(unittest.makeSuite(TestBiroV5)) + #test_suite.addTest(unittest.makeSuite(TestBiroV5)) test_suite.addTest(unittest.makeSuite(TestBiroV4)) test_suite.addTest(unittest.makeSuite(TestBiroV3)) test_suite.addTest(unittest.makeSuite(TestBiroV2))
Disable v5 test for now.
py
diff --git a/tests/test_embedded.py b/tests/test_embedded.py index <HASH>..<HASH> 100644 --- a/tests/test_embedded.py +++ b/tests/test_embedded.py @@ -532,8 +532,6 @@ class TestListEmbeddingIntegration(TestCase): OrderedDict((('name', "Baz"), ('foo', 321))) ] } - import pdb - pdb.set_trace() serializer = ListEmbeddingSerializer(instance, data=data) assert serializer.is_valid(), serializer.errors
Removed leftover import pdb; pdb.set_trace().
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,8 @@ setup( ], test_suite='nose.collector', url='https://github.com/Nextdoor/nose-leak-detector', - download_url='https://github.com/Nextdoor/nose-leak-detector/archive/0.1.1.tar.gz', + download_url='https://github.com/Nextdoor/nose-leak-detector/archive/0.1.1' + '#egg=nose-leak-detector-0.0.1', include_package_data=True, entry_points=""" [nose.plugins.0.10]
update download url so that pip can parse it
py
diff --git a/benchbuild/source/base.py b/benchbuild/source/base.py index <HASH>..<HASH> 100644 --- a/benchbuild/source/base.py +++ b/benchbuild/source/base.py @@ -154,7 +154,7 @@ class Versioned(Protocol): ... -class FetchableSource(Fetchable, Expandable, Versioned): +class FetchableSource: """ Base class for fetchable sources.
fix(sources): do not use protocol class as ABC This fixes #<I>.
py
diff --git a/beautysh/beautysh.py b/beautysh/beautysh.py index <HASH>..<HASH> 100755 --- a/beautysh/beautysh.py +++ b/beautysh/beautysh.py @@ -232,9 +232,9 @@ class Beautify: if func_decl_style != None: stripped_record = self.change_function_style(stripped_record, func_decl_style) - # an ad-hoc solution for the "else" or "elif" keyword - else_case = (0, -1)[re.search(r'^(else|elif)', - test_record) is not None] + # an ad-hoc solution for the "else" or "elif ... then" keywords + else_case = (0, -1)[re.search(r'^(else|elif\s.*?;\s+?then)', test_record) is not None] + net = inc - outc tab += min(net, 0)
beautysh/beautysh.py: fix outdent issues for elif (#2, #<I>) Make the ad-hoc oudent (added in #2) apply only when `elif` is followed by `then` on the same line. In my testing, this addresses #<I> without breaking the #2 test case. Test case to be added in #<I>
py
diff --git a/microraiden/microraiden/proxy/resources/expensive.py b/microraiden/microraiden/proxy/resources/expensive.py index <HASH>..<HASH> 100644 --- a/microraiden/microraiden/proxy/resources/expensive.py +++ b/microraiden/microraiden/proxy/resources/expensive.py @@ -111,7 +111,7 @@ class Expensive(Resource): def get(self, content): log.info(content) if self.channel_manager.node_online() is False: - return "Ethereum node is not responding", 500 + return "Ethereum node is not responding", 502 try: data = RequestData(request.headers, request.cookies) except ValueError as e:
Changed E<I> to E<I> if ethereum node is not responding
py
diff --git a/cumulusci/tasks/robotframework/debugger/ui.py b/cumulusci/tasks/robotframework/debugger/ui.py index <HASH>..<HASH> 100644 --- a/cumulusci/tasks/robotframework/debugger/ui.py +++ b/cumulusci/tasks/robotframework/debugger/ui.py @@ -19,10 +19,10 @@ class DebuggerCli(cmd.Cmd, object): ) prompt = "rdb> " - def __init__(self, listener): - - # robot redirects sys.stdout, use the original handle - cmd.Cmd.__init__(self, stdout=sys.__stdout__) + # Robot redirects sys.stdout, so use the original handle + # or whatever is passed in. + def __init__(self, listener, stdout=sys.__stdout__): + cmd.Cmd.__init__(self, stdout=stdout) self.listener = listener self.builtin = BuiltIn() @@ -70,10 +70,10 @@ class DebuggerCli(cmd.Cmd, object): self._highlight_element(element) except InvalidSelectorException: - print("invalid locator '{}'".format(arg)) + print("invalid locator '{}'".format(arg), file=self.stdout) except Exception as e: - print(str(e)) + print(str(e), file=self.stdout) def do_pdb(self, arg): """Start pdb
Let caller define stdout (mainly for testing); make sure all print statements write to self.stdout
py
diff --git a/indra/sources/reach/processor.py b/indra/sources/reach/processor.py index <HASH>..<HASH> 100644 --- a/indra/sources/reach/processor.py +++ b/indra/sources/reach/processor.py @@ -203,10 +203,25 @@ class ReachProcessor(object): for a in reg['arguments']: if self._get_arg_type(a) == 'controller': controller = a.get('arg') + controllers = a.get('args') + # There is either a single controller here if controller is not None: controller_agent = \ self._get_agent_from_entity(controller) - break + # Or the controller is a complex + elif controllers is not None and len(controllers) >= 2: + # This is actually a dict and we need to get the + # values + controllers = list(controllers.values()) + controller_agent = \ + self._get_agent_from_entity(controllers[0]) + for controller in controllers[1:]: + controller_bound = \ + self._get_agent_from_entity(controller) + if controller_bound: + bc = BoundCondition(controller_bound, True) + controller_agent.bound_conditions.append(bc) + sentence = reg['verbose-text'] ev = Evidence(source_api='reach', text=sentence,
Extract complex controllers of RegulateAmounts
py
diff --git a/openupgradelib/openupgrade.py b/openupgradelib/openupgrade.py index <HASH>..<HASH> 100644 --- a/openupgradelib/openupgrade.py +++ b/openupgradelib/openupgrade.py @@ -543,16 +543,16 @@ def rename_models(cr, model_spec): cr.execute( 'UPDATE ir_model_data SET name=%s ' "WHERE name=%s AND model = 'ir.model'", - ( - 'model_' + _new, 'model_' + _old, - ) + ('model_' + _new, 'model_' + _old), ) cr.execute( - 'UPDATE ir_model_data SET name=regexp_replace(name, %s, %s) ' - "WHERE name like %s AND model = 'ir.model.fields'", - ( - '^field_' + _old, 'field_' + _new, 'field_' + _old + '_%', - ) + """UPDATE ir_model_data imd + SET name = 'field_' || '%s' || '_' || imf.name + FROM ir_model_fields imf + WHERE imd.model = 'ir.model.fields' + AND imd.name = 'field_' || '%s' || '_' || imf.name + AND imf.model = %s""", + (AsIs(_new), AsIs(_old), old), ) cr.execute('UPDATE ir_attachment SET res_model = %s ' 'WHERE res_model = %s', (new, old,))
[FIX] rename_models: Apply universal query for renaming IMD for fields
py
diff --git a/kafka_scanner/__init__.py b/kafka_scanner/__init__.py index <HASH>..<HASH> 100644 --- a/kafka_scanner/__init__.py +++ b/kafka_scanner/__init__.py @@ -475,10 +475,14 @@ class KafkaScanner(object): self.__issued_batches += 1 messages.append(message) newmark = time.time() + new_batchsize = self.__batchsize if newmark - mark > 180: - self.__batchsize = max(100, self.__batchsize / 2) - elif newmark - mark < 30: - self.__batchsize = min(self.__max_batchsize, self.__batchsize * 2) + new_batchsize = max(100, self.__batchsize / 2) + elif newmark - mark < 10: + new_batchsize = min(self.__max_batchsize, self.__batchsize * 2) + if self.__batchsize != new_batchsize: + self.__batchsize = new_batchsize + log.info("Batchsize adjusted to %d", self.__batchsize) else: break if messages:
reduce limit for increase batchsize, and log when batchsize is changed
py
diff --git a/openquake/calculators/classical.py b/openquake/calculators/classical.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/classical.py +++ b/openquake/calculators/classical.py @@ -105,7 +105,7 @@ def classical_split_filter(srcs, srcfilter, gsims, params, monitor): if monitor.calc_id and nb > 1: try: logs.dbcmd('log', monitor.calc_id, datetime.utcnow(), 'INFO', - 'classical_split_filter:%d' % monitor.task_no, msg) + 'classical_split_filter#%d' % monitor.task_no, msg) except Exception: # a foreign key error in case of `oq run` is expected print(msg)
[skip CI] Former-commit-id: ace<I>ffe5d0c<I>f<I>e2bc3b<I>fd2cadfb<I>ec
py
diff --git a/pyrogram/__init__.py b/pyrogram/__init__.py index <HASH>..<HASH> 100644 --- a/pyrogram/__init__.py +++ b/pyrogram/__init__.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. -__version__ = "1.0.6" +__version__ = "1.0.7" __license__ = "GNU Lesser General Public License v3 or later (LGPLv3+)" __copyright__ = "Copyright (C) 2017-2020 Dan <https://github.com/delivrance>"
Update Pyrogram to <I>
py
diff --git a/eqcorrscan/core/match_filter.py b/eqcorrscan/core/match_filter.py index <HASH>..<HASH> 100644 --- a/eqcorrscan/core/match_filter.py +++ b/eqcorrscan/core/match_filter.py @@ -2628,6 +2628,11 @@ class Tribe(object): .. Note:: Methods: `from_contbase`, `from_sfile` and `from_sac` are not supported by Tribe.construct and must use Template.construct. + + .. Note:: + The Method `multi_template_gen` is not supported because the + processing parameters for the stream are not known. Use + `from_meta_file` instead. .. Note:: Templates will be named according to their start-time. """
Update doc-string for tribe.construct Addresses issue #<I>
py
diff --git a/txt2boil/version.py b/txt2boil/version.py index <HASH>..<HASH> 100644 --- a/txt2boil/version.py +++ b/txt2boil/version.py @@ -1 +1 @@ -version = '0.5.6' +version = '0.6'
Bumped version number to <I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f: setuptools_require = ["setuptools>=38.3.0"] excel_requires = ["excelrd>=2.0.2"] -markdown_requires = ["Markdown>=2.6.6,<3"] +markdown_requires = ["Markdown>=2.6.6,<4"] mediawiki_requires = ["pypandoc"] sqlite_requires = ["SimpleSQLite>=1.1.1,<2"] gs_requires = ["gspread", "oauth2client", "pyOpenSSL"] + sqlite_requires
Loosen version constraints of Markdown package
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ setup(name='playback', author_email='jiasir@icloud.com', url='https://github.com/jiasir/playback/', license='MIT License', - install_requires=['ansible'], + install_requires=['ansible', 'fabric'], packages=find_packages('libs'), package_dir={'': 'libs'}, scripts=[
Using fabric library for simplifying system administration tasks
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -48,7 +48,7 @@ install_requires = [ 'tzlocal~=1.2', ] -if tuple(map(int, setuptools.__version__.split("."))) < (6, 0, 0): +if tuple(map(int, setuptools.__version__.split(".")[:3])) < (6, 0, 0): for i, item in enumerate(install_requires): install_requires[i] = item.replace("~=", ">=")
setup: fix version check for odd versions of setuptools
py
diff --git a/pep517/build.py b/pep517/build.py index <HASH>..<HASH> 100644 --- a/pep517/build.py +++ b/pep517/build.py @@ -19,8 +19,9 @@ def validate_system(system): """ required = {'requires', 'build-backend'} if required > set(system): - missing = required - set(system) - message = "Missing required fields: {missing}".format(**locals()) + message = "Missing required fields: {missing}".format( + missing=required-set(system), + ) raise ValueError(message)
Inline the 'missing' name to avoid locals invocation.
py
diff --git a/bika/lims/utils.py b/bika/lims/utils.py index <HASH>..<HASH> 100644 --- a/bika/lims/utils.py +++ b/bika/lims/utils.py @@ -372,15 +372,19 @@ class bika_browserdata(BrowserView): ## Get partition setup records for this service separate = service.getSeparate() containers = service.getContainer() - if not isinstance(containers, (list, tuple)): + if containers and not isinstance(containers, (list, tuple)): containers = [containers,] + else: + containers = [] containers.sort(lambda a,b:cmp( int(a.getJSCapacity() and a.getJSCapacity().split(" ")[0] or '0'), int(b.getJSCapacity() and b.getJSCapacity().split(" ")[0] or '0') )) preservations = service.getPreservation() - if not isinstance(preservations, (list, tuple)): + if preservations and not isinstance(preservations, (list, tuple)): preservations = [preservations,] + else: + preservations = [] partsetup = service.getPartitionSetup() # Single values become lists here
Further compensate for Partitions and Containers being either singlular or plural
py
diff --git a/test/unit/__init__.py b/test/unit/__init__.py index <HASH>..<HASH> 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -1 +1,3 @@ """__init__.py""" +import logging +logging.disable(logging.CRITICAL)
Quiet logging while tests are running
py
diff --git a/bika/lims/content/analysis.py b/bika/lims/content/analysis.py index <HASH>..<HASH> 100644 --- a/bika/lims/content/analysis.py +++ b/bika/lims/content/analysis.py @@ -32,14 +32,6 @@ class Analysis(AbstractRoutineAnalysis): return sample @security.public - def getClientSampleID(self): - """Used to populate catalog values. - """ - sample = self.getSample() - if sample: - return sample.getClientSampleID() - - @security.public def getSiblings(self): """Returns the list of analyses of the Analysis Request to which this analysis belongs to, but with the current analysis excluded
getClientSampleID method removed
py
diff --git a/allianceauth/services/modules/discord/tasks.py b/allianceauth/services/modules/discord/tasks.py index <HASH>..<HASH> 100644 --- a/allianceauth/services/modules/discord/tasks.py +++ b/allianceauth/services/modules/discord/tasks.py @@ -5,7 +5,7 @@ from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from allianceauth.notifications import notify from celery import shared_task - +from requests.exceptions import HTTPError from allianceauth.services.hooks import NameFormatter from .manager import DiscordOAuthManager, DiscordApiBackoff from .models import DiscordUser @@ -72,6 +72,15 @@ class DiscordTasks: logger.info("Discord group sync API back off for %s, " "retrying in %s seconds" % (user, bo.retry_after_seconds)) raise task_self.retry(countdown=bo.retry_after_seconds) + except HTTPError as e: + if e.response.status_code == 404: + try: + if e.response.json()['code'] == 10007: + # user has left the server + DiscordTasks.delete_user(user) + return + finally: + raise e except Exception as e: if task_self: logger.exception("Discord group sync failed for %s, retrying in 10 mins" % user)
Delete Discord users if they've left the server. Closes #<I>
py
diff --git a/tests/settings.py b/tests/settings.py index <HASH>..<HASH> 100644 --- a/tests/settings.py +++ b/tests/settings.py @@ -21,7 +21,7 @@ else: # local sqlite database file DATABASES['default'] = { 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': INSTALL_DIR + 'db.sqlite3', + 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 'USER': '', 'PASSWORD': '', 'HOST': '',
Changes the location of db.sqlite3 for the test On linux based systems, the previous default location for db.sqlite3 needs super-user privileges to write to, which seems unnecessary. Additionally, the error message is not very informative ('unable to open database file')
py
diff --git a/indra/explanation/model_checker.py b/indra/explanation/model_checker.py index <HASH>..<HASH> 100644 --- a/indra/explanation/model_checker.py +++ b/indra/explanation/model_checker.py @@ -739,6 +739,7 @@ class PysbModelChecker(ModelChecker): return self.get_im() def process_statement(self, stmt): + self.get_im() # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)):
Make sure im is generated before processing stmt
py
diff --git a/thoth/solver/python/python.py b/thoth/solver/python/python.py index <HASH>..<HASH> 100644 --- a/thoth/solver/python/python.py +++ b/thoth/solver/python/python.py @@ -274,9 +274,19 @@ def _do_resolve_index( unresolved.append({"package_name": dependency.name, "version_spec": version_spec, "index": index_url}) else: for version in resolved_versions: - entry = (dependency.name, version) - packages_seen.add(entry) - queue.append(entry) + if not subgraph_check_api or ( + subgraph_check_api and + _should_resolve_subgraph(subgraph_check_api, dependency.name, version, index_url) + ): + entry = (dependency.name, version) + packages_seen.add(entry) + queue.append(entry) + else: + _LOGGER.info( + "Direct dependency %r in version % from %r was already resolved in one " + "of the previous solver runs based on sub-graph check", + dependency.name, version, index_url + ) while queue: package_name, package_version = queue.pop()
Do sub-graph checks also on direct dependencies To optimize solver runs inside the cluster, let's also perform sub-graph checks on resolved direct dependencies.
py
diff --git a/txtorcon/controller.py b/txtorcon/controller.py index <HASH>..<HASH> 100644 --- a/txtorcon/controller.py +++ b/txtorcon/controller.py @@ -42,7 +42,7 @@ from .interface import ITor try: from .controller_py3 import _AsyncOnionAuthContext HAVE_ASYNC = True -except SyntaxError: +except Exception: HAVE_ASYNC = False if sys.platform in ('linux', 'linux2', 'darwin'):
figure out if we have async in an even more robust way
py
diff --git a/meshio/med/_med.py b/meshio/med/_med.py index <HASH>..<HASH> 100644 --- a/meshio/med/_med.py +++ b/meshio/med/_med.py @@ -81,7 +81,7 @@ def read(filename): # Information for point tags point_tags = {} - fas = f["FAS"][mesh_name] + fas = mesh["FAS"] if "FAS" in mesh else f["FAS"][mesh_name] if "NOEUD" in fas: point_tags = _read_families(fas["NOEUD"])
update med: FAS can also be in mesh object
py
diff --git a/firebirdsql/fbcore.py b/firebirdsql/fbcore.py index <HASH>..<HASH> 100644 --- a/firebirdsql/fbcore.py +++ b/firebirdsql/fbcore.py @@ -300,7 +300,7 @@ def parse_xsqlda(buf, connection, stmt_handle): xsqlda = [None] * col_len next_index = parse_select_items(buf[11+l:], xsqlda, connection) while next_index > 0: # more describe vars - self.connection._op_info_sql(stmt_handle, + connection._op_info_sql(stmt_handle, bytes([isc_info_sql_sqlda_start, 2]) + int_to_bytes(next_index, 2) + INFO_SQL_SELECT_DESCRIBE_VARS)
bugfix reported from Ivica Paleka <EMAIL>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ with open('LICENSE') as fl: setup( name='CurrencyConverter', - version='0.14', + version='0.14.1', author='Alex Prengère', author_email='alexprengere@gmail.com', url='https://github.com/alexprengere/currencyconverter',
Bump to version <I>
py
diff --git a/rnftools/mishmash/wgSim.py b/rnftools/mishmash/wgSim.py index <HASH>..<HASH> 100644 --- a/rnftools/mishmash/wgSim.py +++ b/rnftools/mishmash/wgSim.py @@ -164,7 +164,7 @@ class WgSim(Source): fai=self._fai_fn, genome_id=self.genome_id, wgsim_fastq_1=self._tmp_fq1_fn, - wgsim_fastq_2=self._tmp_fq2_fn if self._reads_in_tuple==1 else None, + wgsim_fastq_2=self._tmp_fq2_fn if self._reads_in_tuple==2 else None, number_of_read_tuples=10**9, allow_unmapped=False, ) @@ -229,8 +229,8 @@ class WgSim(Source): if i%4==0: segments=[] - bases=["",""] - qualities=["",""] + #bases=[] + #qualities=[] m = wgsim_pattern.search(lines[0]) if m is None:
WgSim static method bug corrected
py
diff --git a/pywb/rewrite/test/test_rewrite_live.py b/pywb/rewrite/test/test_rewrite_live.py index <HASH>..<HASH> 100644 --- a/pywb/rewrite/test/test_rewrite_live.py +++ b/pywb/rewrite/test/test_rewrite_live.py @@ -82,6 +82,10 @@ def test_example_domain_specific_3(): # comment out bootloader assert '/* Bootloader.configurePage' in buff +def test_wombat_top(): + status_headers, buff = get_rewritten('https://assets-cdn.github.com/assets/github-0f06d0f46fe7bcfbf31f2380f23aec15ba21b8ec.js', urlrewriter) + + assert 'WB_wombat_top!==window' in buff def test_post(): buff = BytesIO('ABC=DEF')
tests: add test for wombat top
py
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index <HASH>..<HASH> 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -431,7 +431,8 @@ class LinePlot(MPLPlot): else: self.axes[0].legend(loc='best') - condition = (df.index.is_all_dates + condition = (not self.has_ts_index + and df.index.is_all_dates and not self.subplots or (self.subplots and self.sharex))
BUG: don't autofmt_xdate if use special ts formatter
py
diff --git a/clint/textui/__init__.py b/clint/textui/__init__.py index <HASH>..<HASH> 100644 --- a/clint/textui/__init__.py +++ b/clint/textui/__init__.py @@ -7,7 +7,10 @@ clint.textui This module provides the text output helper system. """ - +import sys +if sys.platform.startswith('win'): + from ..packages import colorama + colorama.init() from . import colored from . import progress
Initialise colorama for textui on Windows. Closes gh-<I>
py
diff --git a/click_log/core.py b/click_log/core.py index <HASH>..<HASH> 100644 --- a/click_log/core.py +++ b/click_log/core.py @@ -50,12 +50,13 @@ class ColorFormatter(logging.Formatter): class ClickHandler(logging.Handler): + _use_stderr = True + def emit(self, record): try: msg = self.format(record) level = record.levelname.lower() - err = level in ('warning', 'error', 'exception', 'critical') - click.echo(msg, err=err) + click.echo(msg, err=self._use_stderr) except Exception: self.handleError(record)
don't use stdout, fix #<I>
py
diff --git a/sos/collector/clusters/satellite.py b/sos/collector/clusters/satellite.py index <HASH>..<HASH> 100644 --- a/sos/collector/clusters/satellite.py +++ b/sos/collector/clusters/satellite.py @@ -24,14 +24,15 @@ class satellite(Cluster): return _cmd % quote(_dbcmd % quote(query)) def get_nodes(self): - cmd = self._psql_cmd('select name from smart_proxies') + cmd = self._psql_cmd('copy (select name from smart_proxies) to stdout') res = self.exec_master_cmd(cmd, need_root=True) if res['status'] == 0: - idx = 2 - if 'could not change' in res['stdout']: - idx = 3 - nodes = [n.strip() for n in res['stdout'].splitlines()[idx:-1]] + nodes = [ + n.strip() for n in res['stdout'].splitlines() + if 'could not change directory' not in n + ] return nodes + return [] def set_node_label(self, node): if node.address == self.master.address:
[satellite] Simplify node enumeration Simplifies the node enumeration from the db query using the `copy` function of psql to dump the results to stdout without the DB header/footer.
py
diff --git a/examples/stock/company.v.py b/examples/stock/company.v.py index <HASH>..<HASH> 100644 --- a/examples/stock/company.v.py +++ b/examples/stock/company.v.py @@ -72,7 +72,7 @@ def transfer_stock(receiver: address, transfer_order: currency_value): self.holdings[receiver] += transfer_order # Allows the company to pay someone for services rendered -def give(vendor: address, amount: wei_value): +def pay_bill(vendor: address, amount: wei_value): # Only the company can pay people assert msg.sender == self.company # And only if there's enough to pay them with
Changed 'give' to 'pay_bill'
py
diff --git a/pywbem/cim_operations.py b/pywbem/cim_operations.py index <HASH>..<HASH> 100644 --- a/pywbem/cim_operations.py +++ b/pywbem/cim_operations.py @@ -3451,7 +3451,6 @@ class WBEMConnection(object): self.operation_recorder.stage_pywbem_args( method='CloseEnumeration', context=context, - MaxObjectCount=MaxObjectCount, **extra) try:
Fixed incorrect MaxObjectCount on recorder call for CloseEnumeration.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f: SETUPTOOLS_REQUIRES = ["setuptools>=38.3.0"] PYTEST_RUNNER_REQUIRES = ["pytest-runner"] if need_pytest() else [] -dumps_requires = ["pytablewriter>=0.44.0,<1.0.0"] +dumps_requires = ["pytablewriter>=0.45.0,<1.0.0"] tests_requires = frozenset(tests_requires + dumps_requires) setuptools.setup(
Update dumps_requires
py
diff --git a/pythonforandroid/recipes/pymunk/__init__.py b/pythonforandroid/recipes/pymunk/__init__.py index <HASH>..<HASH> 100644 --- a/pythonforandroid/recipes/pymunk/__init__.py +++ b/pythonforandroid/recipes/pymunk/__init__.py @@ -10,7 +10,8 @@ class PymunkRecipe(CompiledComponentsPythonRecipe): def get_recipe_env(self, arch): env = super().get_recipe_env(arch) - env["LDFLAGS"] += " -llog" + env["LDFLAGS"] += " -llog" # Used by Chipmunk cpMessage + env["LDFLAGS"] += " -lm" # For older versions of Android return env
Fix Pymunk crash on older versions of Android Seems to be required to link -lm on at least <I>, but not on <I>
py
diff --git a/src/ossos-pipeline/ossos/fitsviewer/displayable.py b/src/ossos-pipeline/ossos/fitsviewer/displayable.py index <HASH>..<HASH> 100644 --- a/src/ossos-pipeline/ossos/fitsviewer/displayable.py +++ b/src/ossos-pipeline/ossos/fitsviewer/displayable.py @@ -349,7 +349,12 @@ class ErrEllipse(object): self.a = max(a, 10) self.b = max(b, 10) self.pa = pa - self.artist = Ellipse(self.center, self.a, self.b, self.pa, edgecolor='b', linewidth=3, facecolor='#E47833', alpha=0.1) + + angle = 90 - self.pa + + self.artist = Ellipse(self.center, self.a, self.b, angle=angle, + linewidth=3, edgecolor='b', facecolor='#E47833', + alpha=0.1) def add_to_axes(self, axes): self.artist.set_clip_box(axes.bbox)
Adjust the angle of the error ellipse due to difference in coordinate systems.
py
diff --git a/tests/util/test_util.py b/tests/util/test_util.py index <HASH>..<HASH> 100644 --- a/tests/util/test_util.py +++ b/tests/util/test_util.py @@ -178,10 +178,10 @@ def test_deepcopy_mixin(): pass class A: - def __init__(a): - a = a + def __init__(self, a): + self.a = a - def __deepcopy__(memo): + def __deepcopy__(self, memo): raise E class B(DeepcopyMixin, A):
fix incompatible deepcopy usage in test case
py
diff --git a/microsoftbotframework/response.py b/microsoftbotframework/response.py index <HASH>..<HASH> 100644 --- a/microsoftbotframework/response.py +++ b/microsoftbotframework/response.py @@ -143,9 +143,11 @@ class Response: "replyToId": reply_to_id, } - post_response = requests.post(response_url, json=response_json, headers=self.headers) - logger = logging.getLogger(__name__) + logger.info('response_headers: {}'.format(self.headers)) + logger.info('response_json: {}'.format(response_json)) + + post_response = requests.post(response_url, json=response_json, headers=self.headers) if post_response.status_code == 200 or post_response.status_code == 201: logger.info('Successfully posted to Microsoft Bot Connector. {}'.format(post_response.text))
Added logging for response json
py
diff --git a/openquake/nrmllib/hazard/parsers.py b/openquake/nrmllib/hazard/parsers.py index <HASH>..<HASH> 100644 --- a/openquake/nrmllib/hazard/parsers.py +++ b/openquake/nrmllib/hazard/parsers.py @@ -642,6 +642,8 @@ class HazardCurveParser(object): header['sa_damping'] = a.get('saDamping') header['statistics'] = a.get('statistics') header['quantile_value'] = a.get('quantileValue') + header['smlt_path'] = a.get('sourceModelTreePath') + header['gsimlt_path'] = a.get('gsimTreePath') header['imls'] = map(float, element[0].text.split()) yield header elif element.tag == self._CURVE_TAG and event == 'end':
hazard/parsers: Corrected hazard curve parser to extract logic tree metadata as well. Before, it was only designed to work with statistical results.
py
diff --git a/openquake/server/dbserver.py b/openquake/server/dbserver.py index <HASH>..<HASH> 100644 --- a/openquake/server/dbserver.py +++ b/openquake/server/dbserver.py @@ -112,7 +112,6 @@ class DbServer(object): def runserver(dbpathport=None, logfile=DATABASE['LOG'], loglevel='WARN'): - logging.basicConfig(level=getattr(logging, loglevel), filename=logfile) if dbpathport: # assume a string of the form "dbpath:port" dbpath, port = dbpathport.split(':') addr = (DATABASE['HOST'], int(port)) @@ -130,7 +129,8 @@ def runserver(dbpathport=None, logfile=DATABASE['LOG'], loglevel='WARN'): connection.cursor() # bind the db actions.upgrade_db() - # start the server + # configure logging and start the server + logging.basicConfig(level=getattr(logging, loglevel), filename=logfile) DbServer(addr, config.DBS_AUTHKEY).loop() parser = sap.Parser(runserver)
Configured the logger after the creation of the dbserver directory
py
diff --git a/ELiDE/ELiDE/timestream.py b/ELiDE/ELiDE/timestream.py index <HASH>..<HASH> 100644 --- a/ELiDE/ELiDE/timestream.py +++ b/ELiDE/ELiDE/timestream.py @@ -218,6 +218,7 @@ class TimestreamScreen(Screen): branch_lineage['trunk'] = trunk_lineage for row, branch in enumerate(sorted_branches): for turn in col2turn: + branch_split_turns_todo[branch].discard(turn) if branch == 'trunk' and turn == 0: data.append({ 'widget': 'ThornyRectangle', @@ -247,7 +248,6 @@ class TimestreamScreen(Screen): else: data.append({'widget': 'Widget'}) start_turn_branches[turn].discard(branch) - branch_split_turns_todo[branch].discard(turn) branch_split_turns_done[branch].add(turn) Logger.debug(f"Timestream: processed branch {branch}") self.timestream.cols = len(col2turn)
Don't always draw right thorn in timestream
py
diff --git a/py/h2o.py b/py/h2o.py index <HASH>..<HASH> 100644 --- a/py/h2o.py +++ b/py/h2o.py @@ -1370,7 +1370,7 @@ class H2O(object): h2b.browseJsonHistoryAsUrlLastMatch("RFView") return a - def generate_predictions(self, data_key, model_key, destination_key, timeoutSecs=300, print_params=True, **kwargs): + def generate_predictions(self, data_key, model_key, destination_key=None, timeoutSecs=300, print_params=True, **kwargs): params_dict = { 'data_key': data_key, 'model_key': model_key,
the way I had added destination_key param to h2o.generate_predictions() yesterday wasn't right. Probably made a handful of tests that use it with 2 params fail. Fixed it to a better way.
py
diff --git a/bingo/models.py b/bingo/models.py index <HASH>..<HASH> 100644 --- a/bingo/models.py +++ b/bingo/models.py @@ -220,7 +220,7 @@ class BingoBoard(models.Model): BingoField(word=word, board=self, position=None).save() def __unicode__(self): - return _(u"BingoBoard #{0} created by {1} (site {1})").format( + return _(u"BingoBoard #{0} created by {1} (site {2})").format( self.board_id, self.user if self.user else self.ip, self.game.site)
fix in BingoBoard.__unicode__, site showed the ip
py
diff --git a/neurom/features/__init__.py b/neurom/features/__init__.py index <HASH>..<HASH> 100644 --- a/neurom/features/__init__.py +++ b/neurom/features/__init__.py @@ -190,3 +190,31 @@ def feature(shape, namespace: NameSpace, name=None): # These imports are necessary in order to register the features from neurom.features import neurite, morphology, \ population # noqa, pylint: disable=wrong-import-position + + +def _features_catalogue(): + """Returns a string with all the available builtin features.""" + indentation = "\t" + preamble = "\n .. Builtin Features:\n" + + def format_category(category): + separator = "-" * len(category) + return f"\n{indentation}{category}\n{indentation}{separator}" + + def format_features(features): + prefix = f"\n{indentation}* " + return prefix + f"{prefix}".join(sorted(features)) + + return preamble + "".join( + [ + format_category(category) + format_features(features) + "\n" + for category, features in zip( + ("Population", "Morphology", "Neurite"), + (_POPULATION_FEATURES, _MORPHOLOGY_FEATURES, _NEURITE_FEATURES), + ) + ] + ) + + +# Update the get docstring to include all available builtin features +get.__doc__ += _features_catalogue()
Update `neurom.features.get` docstring with available builtin features (#<I>)
py
diff --git a/tests/perf/quick_perf.py b/tests/perf/quick_perf.py index <HASH>..<HASH> 100644 --- a/tests/perf/quick_perf.py +++ b/tests/perf/quick_perf.py @@ -25,7 +25,7 @@ import os, sys, socket, time -from example_test import background, verify, wait_addr, execute, pick_addr, cmdline +from example_test import Proc, pick_addr from subprocess import Popen, PIPE, STDOUT @@ -61,16 +61,16 @@ except: # Use Proton-C reactor-recv as a relatively fast loopback "broker" for these tests -server = cmdline("reactor-recv", "-a", linkaddr, "-c", str(mcount), "-R") +server = Proc(["reactor-recv", "-X", "listening", "-a", linkaddr, "-c", str(mcount), "-R"], ready="listening", + skip_valgrind=True, timeout=300) try: - recv = Popen(server, stdout=NULL, stderr=sys.stderr) - wait_addr(connaddr) start = time.time() - execute(*perf_target) + client = Proc(perf_target, skip_valgrind=True, timeout=300) + print client.wait_exit() + server.wait_exit() end = time.time() - verify(recv) except Exception as e: - if recv: recv.kill() + if server: server.safe_kill() raise Exception("Error running %s: %s", server, e)
PROTON-<I>: Fix quick_perf_xx targets.
py