diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/scripts/Catalog.py b/scripts/Catalog.py index <HASH>..<HASH> 100644 --- a/scripts/Catalog.py +++ b/scripts/Catalog.py @@ -6,7 +6,7 @@ from git import Repo from scripts import PATH -from .utils import pbar, tprint +from .utils import get_logger, pbar from .importer.funcs import (get_bibauthor_dict, get_biberror_dict, get_extinctions_dict, get_repos_dict) @@ -22,15 +22,17 @@ class Catalog(): biberror_dict = get_biberror_dict() extinctions_dict = get_extinctions_dict() - def __init__(self, log): + log = get_logger() + + def __init__(self): all_repos = [self.repos_dict[x] for x in self.repos_dict] all_repos = [i for x in all_repos for i in x] for repo in pbar(all_repos): if not os.path.isdir(PATH.ROOT + "/" + repo): try: - log.warning('Cloning "' + repo + - '" (only needs to be done ' + - 'once, may take few minutes per repo).') + self.log.warning('Cloning "' + repo + + '" (only needs to be done ' + + 'once, may take few minutes per repo).') Repo.clone_from("git@github.com:astrocatalogs/" + repo + ".git", PATH.ROOT + "/" + repo)
MAINT: moved logger to Catalog object
py
diff --git a/OpenPNM/Network/__MatFile__.py b/OpenPNM/Network/__MatFile__.py index <HASH>..<HASH> 100644 --- a/OpenPNM/Network/__MatFile__.py +++ b/OpenPNM/Network/__MatFile__.py @@ -194,7 +194,7 @@ class MatFile(GenericNetwork): add_boundaries = False Ps = sp.where([pore not in boundary_pores for pore in self.pores()])[0] Ts = sp.where([throat not in boundary_throats for throat in self.throats()])[0] - geom = OpenPNM.Geometry.GenericGeometry(network=self,pores=Ps,throats=Ts) + geom = OpenPNM.Geometry.GenericGeometry(network=self,pores=Ps,throats=Ts,name='internal') geom['pore.volume'] = sp.ravel(sp.array(self._dictionary['pvolume'][self._pore_map[Ps]],float)) geom['pore.diameter'] = sp.ravel(sp.array(self._dictionary['pdiameter'][self._pore_map[Ps]],float)) geom['throat.diameter'] = sp.ravel(sp.array(self._dictionary['tdiameter'][self._throat_map[Ts]],float))
Correct the MatFile to import the geometries of the extracted PN
py
diff --git a/aiosc.py b/aiosc.py index <HASH>..<HASH> 100644 --- a/aiosc.py +++ b/aiosc.py @@ -44,7 +44,7 @@ def translate_pattern(pattern): c = pattern[i] if c == '/': j = i + 1 - if pattern[j] == '/': + if j < len(pattern) and pattern[j] == '/': result += OSC_ADDR_SLASH_REGEXP + '*\/' i = j else:
Check boundaries when looking ahead for // operator
py
diff --git a/pylibsrtp/__init__.py b/pylibsrtp/__init__.py index <HASH>..<HASH> 100644 --- a/pylibsrtp/__init__.py +++ b/pylibsrtp/__init__.py @@ -52,9 +52,13 @@ class Policy: Policy for an SRTP session. """ + #: Indicates an undefined SSRC type SSRC_UNDEFINED = _lib.ssrc_undefined + #: Indicates a specific SSRC value SSRC_SPECIFIC = _lib.ssrc_specific + #: Indicates any inbound SSRC value SSRC_ANY_INBOUND = _lib.ssrc_any_inbound + #: Indicates any inbound SSRC value SSRC_ANY_OUTBOUND = _lib.ssrc_any_outbound def __init__(self, key=None, ssrc_type=SSRC_UNDEFINED, ssrc_value=0):
[docs] document some Policy constants
py
diff --git a/tests/unit/modules/boto_lambda_test.py b/tests/unit/modules/boto_lambda_test.py index <HASH>..<HASH> 100644 --- a/tests/unit/modules/boto_lambda_test.py +++ b/tests/unit/modules/boto_lambda_test.py @@ -21,10 +21,12 @@ ensure_in_syspath('../../') # Import Salt libs import salt.config +import salt.ext.six as six import salt.loader from salt.modules import boto_lambda from salt.exceptions import SaltInvocationError from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin +import salt.utils # Import 3rd-party libs from tempfile import NamedTemporaryFile @@ -134,7 +136,10 @@ class BotoLambdaTestCaseBase(TestCase): class TempZipFile(object): def __enter__(self): with NamedTemporaryFile(suffix='.zip', prefix='salt_test_', delete=False) as tmp: - tmp.write('###\n') + to_write = '###\n' + if six.PY3: + to_write = salt.utils.to_bytes(to_write) + tmp.write(to_write) self.zipfile = tmp.name return self.zipfile
When writing strings to tmp files, use bytes for Python 3
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -273,6 +273,12 @@ setup( 'data/*/*', ], }, + exclude_package_data={ + 'mimesis': [ + # It's for development. + 'data/locale_template/*' + ], + }, data_files=[ ('', ['LICENSE', 'PYPI_README.rst',
Excluded locale_template from final package
py
diff --git a/dialogs/grid_frame3.py b/dialogs/grid_frame3.py index <HASH>..<HASH> 100755 --- a/dialogs/grid_frame3.py +++ b/dialogs/grid_frame3.py @@ -620,8 +620,7 @@ class GridFrame(wx.Frame): # class GridFrame(wx.ScrolledWindow): if self.remove_cols_mode: self.remove_col_label(event) else: - pass # ** no drop_down_menus working yet - #self.drop_down_menu.on_label_click(event) + self.drop_down_menu.on_label_click(event) else: if event.Col < 0 and self.grid_type != 'age': self.onSelectRow(event)
magic gui: reactivate multiple cell editing
py
diff --git a/pqdict/__init__.py b/pqdict/__init__.py index <HASH>..<HASH> 100644 --- a/pqdict/__init__.py +++ b/pqdict/__init__.py @@ -545,7 +545,7 @@ def nlargest(n, mapping, key=None): it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) - pq = minpq(key=key) + pq = pqdict(key=key, precedes=lt) try: for i in range(n): pq.additem(*next(it))
use pqdict instead of minpq in nlargest
py
diff --git a/webassets_elm/__init__.py b/webassets_elm/__init__.py index <HASH>..<HASH> 100644 --- a/webassets_elm/__init__.py +++ b/webassets_elm/__init__.py @@ -53,7 +53,7 @@ class Elm(ExternalTool): with TemporaryDirectory("w+") as tempd: outf = os.path.join(tempd, "output.js") - write_args = [*args, '--output', outf] + write_args = args + ['--output', outf] self.subprocess(write_args, StringIO(), cwd=source_dir) with open(outf, "r") as inf: shutil.copyfileobj(inf, out)
Keep compatibility with Python versions <I> and <I>
py
diff --git a/tests/search/test_local.py b/tests/search/test_local.py index <HASH>..<HASH> 100644 --- a/tests/search/test_local.py +++ b/tests/search/test_local.py @@ -1,7 +1,8 @@ # coding=utf-8 import unittest from tests.search.dummies import DummyProblem, GOAL, DummyGeneticProblem -from simpleai.search.local import (beam, hill_climbing, +from simpleai.search.local import (beam, beam_best_first, + hill_climbing, hill_climbing_stochastic, simulated_annealing, hill_climbing_random_restarts, genetic) @@ -17,6 +18,10 @@ class TestLocalSearch(unittest.TestCase): result = beam(self.problem) self.assertEquals(result.state, GOAL) + def test_beam_best_first(self): + result = beam_best_first(self.problem) + self.assertEquals(result.state, GOAL) + def test_hill_climbing(self): result = hill_climbing(self.problem) self.assertEquals(result.state, GOAL)
Added test for beam_best_first algorithm
py
diff --git a/zappa/cli.py b/zappa/cli.py index <HASH>..<HASH> 100644 --- a/zappa/cli.py +++ b/zappa/cli.py @@ -1239,7 +1239,7 @@ def handle(): # pragma: no cover cli.remove_uploaded_zip() cli.remove_local_zip() - click.echo("Oh no! An " + click.style("error occured", fg='red', bold=True) + "! :(") + click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(") click.echo("\n==============\n") import traceback traceback.print_exc()
Fixed minor typo in `zappa/cli.py`
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -26,4 +26,17 @@ setup( include_package_data=True, packages=['kombine'], install_requires=['numpy', 'scipy'], + classifiers=[ + 'Development Status :: 4 - Beta', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'License :: OSI Approved :: MIT License', + ], )
setup.py: added classifiers
py
diff --git a/pyt/vulnerabilities.py b/pyt/vulnerabilities.py index <HASH>..<HASH> 100644 --- a/pyt/vulnerabilities.py +++ b/pyt/vulnerabilities.py @@ -74,7 +74,7 @@ def identify_triggers( sources, sinks, lattice, - nosec_lines=set() + nosec_lines ): """Identify sources, sinks and sanitisers in a CFG.
removed set() from nosec_lines
py
diff --git a/zipline/finance/slippage.py b/zipline/finance/slippage.py index <HASH>..<HASH> 100644 --- a/zipline/finance/slippage.py +++ b/zipline/finance/slippage.py @@ -94,9 +94,8 @@ class VolumeShareSlippage(object): desired_order = total_order + open_amount - volume_share = direction * (desired_order) / event.volume - if volume_share > self.volume_limit: - volume_share = self.volume_limit + volume_share = min(direction * (desired_order) / event.volume, + self.volume_limit) simulated_amount = int(volume_share * event.volume * direction) simulated_impact = (volume_share) ** 2 \ * self.price_impact * direction * event.price
Uses `min` function in place of taking the minimum with an if statement.
py
diff --git a/tempora/timing.py b/tempora/timing.py index <HASH>..<HASH> 100644 --- a/tempora/timing.py +++ b/tempora/timing.py @@ -187,15 +187,17 @@ class BackoffDelay: >>> at_least_one = lambda n: max(n, 1) >>> bd = BackoffDelay(delay=0.01, factor=2, limit=at_least_one) - >>> bd() - >>> bd.delay + >>> next(bd) + 0.01 + >>> next(bd) 1 Pass a jitter to add or subtract seconds to the delay. >>> bd = BackoffDelay(jitter=0.01) - >>> bd() - >>> bd.delay + >>> next(bd) + 0 + >>> next(bd) 0.01 Jitter may be a callable. To supply a non-deterministic jitter @@ -204,8 +206,9 @@ class BackoffDelay: >>> import random >>> jitter=functools.partial(random.uniform, -0.5, 0.5) >>> bd = BackoffDelay(jitter=jitter) - >>> bd() - >>> 0 <= bd.delay <= 0.5 + >>> next(bd) + 0 + >>> 0 <= next(bd) <= 0.5 True """
Update tests to work with iterable values rather than invoking the delays.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,9 @@ setup( install_requires=['numpy', 'pandas', 'requests', 'future'], extras_require={ 'igraph': ['python-igraph'], - 'all': ['python-igraph', 'numexpr', 'Bottleneck'] + 'networkx': ['networkx'], + 'pandas-extra': ['numexpr', 'Bottleneck'], + 'all': ['python-igraph', 'networkx', 'numexpr', 'Bottleneck'] }, license='BSD', classifiers=[
Update setup.py with optional networkx dependency
py
diff --git a/_pytest/main.py b/_pytest/main.py index <HASH>..<HASH> 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -200,10 +200,11 @@ class _CompatProperty(object): if obj is None: return self - warnings.warn( - "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( - name=self.name, owner=type(owner).__name__), - PendingDeprecationWarning, stacklevel=2) + # TODO: reenable in the features branch + # warnings.warn( + # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( + # name=self.name, owner=type(owner).__name__), + # PendingDeprecationWarning, stacklevel=2) return getattr(pytest, self.name) @@ -291,10 +292,10 @@ class Node(object): return getattr(pytest, name) else: cls = getattr(self, name) - - warnings.warn("use of node.%s is deprecated, " - "use pytest_pycollect_makeitem(...) to create custom " - "collection nodes" % name, category=DeprecationWarning) + # TODO: reenable in the features branch + # warnings.warn("use of node.%s is deprecated, " + # "use pytest_pycollect_makeitem(...) to create custom " + # "collection nodes" % name, category=DeprecationWarning) return cls def __repr__(self):
comment out compatproperty deprecations todo: reenable in the features branch
py
diff --git a/glad/generator/generator.py b/glad/generator/generator.py index <HASH>..<HASH> 100644 --- a/glad/generator/generator.py +++ b/glad/generator/generator.py @@ -38,7 +38,7 @@ class Generator(object): extension_names = list(chain.from_iterable(self.spec.extensions[a] for a in self.api)) - e = chain.from_iterable(self.spec.extensions[a] for a in self.api) + e = list(chain.from_iterable(self.spec.extensions[a] for a in self.api)) for ext in extension_names: enforce(ext in e, 'Invalid extension "{}"'.format(ext), ValueError)
fix issue #<I> - order of extension arguments is not important
py
diff --git a/custodian/vasp/handlers.py b/custodian/vasp/handlers.py index <HASH>..<HASH> 100644 --- a/custodian/vasp/handlers.py +++ b/custodian/vasp/handlers.py @@ -90,7 +90,7 @@ class VaspErrorHandler(ErrorHandler): vi["INCAR"].write_file("INCAR") vi["POSCAR"].write_file("POSCAR") vi["KPOINTS"].write_file("KPOINTS") - return {"errors": self.errors, "actions": actions} + return {"errors": list(self.errors), "actions": actions} def __str__(self): return "Vasp error"
Fix non-serializability of sets.
py
diff --git a/tests/integ/test_tf_cifar.py b/tests/integ/test_tf_cifar.py index <HASH>..<HASH> 100644 --- a/tests/integ/test_tf_cifar.py +++ b/tests/integ/test_tf_cifar.py @@ -35,7 +35,7 @@ class PickleSerializer(object): @pytest.mark.continuous_testing def test_cifar(sagemaker_session, tf_full_version): - with timeout(minutes=20): + with timeout(minutes=45): script_path = os.path.join(DATA_DIR, 'cifar_10', 'source') dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
increase timeout in cifar test (#<I>)
py
diff --git a/grimoire_elk/elk/meetup.py b/grimoire_elk/elk/meetup.py index <HASH>..<HASH> 100644 --- a/grimoire_elk/elk/meetup.py +++ b/grimoire_elk/elk/meetup.py @@ -219,7 +219,7 @@ class MeetupEnrich(Enrich): eitem['type'] = "meetup" # time_date is when the meetup will take place, the needed one in this index # created is when the meetup entry was created and it is not the interesting date - eitem.update(self.get_grimoire_fields(time_date, eitem['type'])) + eitem.update(self.get_grimoire_fields(eitem['time_date'], eitem['type'])) if self.sortinghat: eitem.update(self.get_item_sh(event))
[enrich][meetup] Fix typo in grimoire_creation_date compute
py
diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index <HASH>..<HASH> 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -2303,6 +2303,7 @@ Dict[str, List[str]]]): The names of the columns ] = None, prefetch_blocks: int = 0, batch_size: int = 1, + drop_last: bool = False, ) -> "tf.data.Dataset": """Return a TF Dataset over this dataset. @@ -2358,6 +2359,10 @@ List[str]]]): The names of the columns to use as the features. Can be a list of prefetch_blocks: The number of blocks to prefetch ahead of the current block during the scan. batch_size: Record batch size. Defaults to 1. + drop_last (bool): Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of dataset is not divisible by the batch + size, then the last batch will be smaller. Defaults to False. Returns: A tf.data.Dataset. @@ -2380,6 +2385,7 @@ List[str]]]): The names of the columns to use as the features. Can be a list of prefetch_blocks=prefetch_blocks, batch_size=batch_size, batch_format="pandas", + drop_last=drop_last, ): if label_column: targets = batch.pop(label_column).values
[data] Expose `drop_last` in `to_tf` (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ setup( 'pyOpenSSL>=16.2.0', 'pyyaml>=3.12', 'requests>=2.12.4', - 'responses>=0.5.1', + 'responses>=0.5.1,<0.6.1', # 0.6.1 is broken, but already fixed in master 'six>=1.10.0', 'tables>=3.3.0', 'tqdm>=4.11.2',
Blacklist responses==<I> cause of bugs. (#<I>)
py
diff --git a/qutepart/syntax/parser.py b/qutepart/syntax/parser.py index <HASH>..<HASH> 100644 --- a/qutepart/syntax/parser.py +++ b/qutepart/syntax/parser.py @@ -36,7 +36,10 @@ class ContextStack: """ if len(self._contexts) - 1 < count: _logger.error("#pop value is too big %d", len(self._contexts)) - return ContextStack(self._contexts[:1], self._data[:1]) + if len(self._contexts) > 1: + return ContextStack(self._contexts[:1], self._data[:1]) + else: + return self return ContextStack(self._contexts[:-count], self._data[:-count]) @@ -839,7 +842,7 @@ class Context: contextStack.currentData()) for rule in self.rules: ruleTryMatchResult = rule.tryMatch(textToMatchObject) - if ruleTryMatchResult is not None: + if ruleTryMatchResult is not None: # if something matched _logger.debug('\tmatched rule %s at %d', rule.shortId(), currentColumnIndex)
py-parser: Fix freeze
py
diff --git a/splinter/driver/webdriver/remote.py b/splinter/driver/webdriver/remote.py index <HASH>..<HASH> 100644 --- a/splinter/driver/webdriver/remote.py +++ b/splinter/driver/webdriver/remote.py @@ -20,8 +20,7 @@ class WebDriver(BaseWebDriver): def __init__(self, url=DEFAULT_URL, browser='firefox', wait_time=2, **ability_args): abilities = getattr(DesiredCapabilities, browser.upper(), {}) - for arg in ability_args: - ability_args[arg] = ability_args[arg] + abilities.update(ability_args) self.driver = Remote(url, abilities) self.element_class = WebDriverElement
Fix remote WebDriver to properly pass ability_args to Selenium
py
diff --git a/pymatgen/analysis/elasticity/elastic.py b/pymatgen/analysis/elasticity/elastic.py index <HASH>..<HASH> 100644 --- a/pymatgen/analysis/elasticity/elastic.py +++ b/pymatgen/analysis/elasticity/elastic.py @@ -495,7 +495,7 @@ def toec_fit(strains, stresses, eq_stress = None, zero_crit=1e-10): coef1 = central_diff_weights(len(mstresses), 1) coef2 = central_diff_weights(len(mstresses), 2) if eq_stress is not None: - mstresses[3] = veq_stress + mstresses[len(mstresses) // 2] = veq_stress dsde[:, n] = np.dot(np.transpose(mstresses), coef1) / h d2sde2[:, n] = np.dot(np.transpose(mstresses), coef2) / h**2
fixed bug in TOEC elastic fitting for non-7 stencils
py
diff --git a/TeamComp/main.py b/TeamComp/main.py index <HASH>..<HASH> 100644 --- a/TeamComp/main.py +++ b/TeamComp/main.py @@ -12,7 +12,7 @@ from persist.store import TierStore from persist.config import JSONConfigEncoder, datetime_to_dict from tier import TierSet, TierSeed, update_participants, Tier, Queue, Maps, summoner_names_to_id, leagues_by_summoner_ids -current_state_extension = '.state' +current_state_extension = '.checkpoint' epoch = datetime.datetime.utcfromtimestamp(0) delta_3_hours = datetime.timedelta(hours=3) @@ -166,7 +166,7 @@ def download_from_config(config, config_file, save_state=True): include_timeline, matches_per_time_slice, map_type, queue, ts_end_callback, prints_on, minimum_match_id) -if __name__ == '__main__': +def main(): parser = argparse.ArgumentParser() parser.add_argument('configuration_file',help='The json file to hold the configuration of the download session ' 'you want to start by running this script. Might be a file saved ' @@ -185,3 +185,6 @@ if __name__ == '__main__': json_conf.update(current_state) download_from_config(json_conf, args.configuration_file, not args.no_state) + +if __name__ == '__main__': + main() \ No newline at end of file
Created an explicit main function. Renamed the extension of the state to .checkpoint
py
diff --git a/tests/test_zookeeper.py b/tests/test_zookeeper.py index <HASH>..<HASH> 100644 --- a/tests/test_zookeeper.py +++ b/tests/test_zookeeper.py @@ -56,9 +56,9 @@ class MockKazooClient: func(*args, **kwargs) def get(self, path, watch=None): - if path == '/service/test/no_node': + if path == '/no_node': raise NoNodeError - elif path == '/service/test/other_exception': + elif path == '/other_exception': raise Exception() elif '/members/' in path: return (
Fix zookeeper test coverage
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from distutils.core import setup +from setuptools import setup # add dependency for python-bs4 @@ -10,5 +10,6 @@ setup( author_email='code@jeffreyforman.net', license='MIT', packages = ['pybindxml'], - url = 'https://github.com/jforman/pybindxml', + url = 'http:://github.com/jforman/pybindxml', + install_requires = ['bs4.BeautifulSoup'], )
convert to setuptools, and add dependency on bs4.beautifulsoup
py
diff --git a/dingo/core/__init__.py b/dingo/core/__init__.py index <HASH>..<HASH> 100644 --- a/dingo/core/__init__.py +++ b/dingo/core/__init__.py @@ -835,7 +835,8 @@ class NetworkDingo: for lv_load_area in grid_district.lv_load_areas(): for lv_grid_district in lv_load_area.lv_grid_districts(): station = lv_grid_district.lv_grid.station() - lv_stations.append((station.geo_data.x, station.geo_data.y)) + if station not in grid_district.mv_grid.graph_isolated_nodes(): + lv_stations.append((station.geo_data.x, station.geo_data.y)) lv_stations_wkb = from_shape(MultiPoint(lv_stations), srid=srid) # add dataset to session
don't export isolated stations to viz
py
diff --git a/tests/test_flask_buzz.py b/tests/test_flask_buzz.py index <HASH>..<HASH> 100644 --- a/tests/test_flask_buzz.py +++ b/tests/test_flask_buzz.py @@ -1,6 +1,7 @@ import flask import flask_buzz import flask_restplus +import http import json import pytest import re @@ -18,7 +19,7 @@ class TestFlaskBuzz: def test_raise(self): with pytest.raises(flask_buzz.FlaskBuzz) as err_info: raise flask_buzz.FlaskBuzz('i failed') - assert 'i failed (400)' in str(err_info.value) + assert 'i failed' in str(err_info.value) def test_jsonify__does_not_strip_headers_if_no_headers_kwarg(self, app): """ @@ -45,7 +46,6 @@ class TestFlaskBuzz: (out, err) = capsys.readouterr() assert stripped('message: basic test') in stripped(out) - assert stripped('status_code: 400') in stripped(out) def test_overloaded_status_code(self, app): """ @@ -87,4 +87,3 @@ class TestFlaskBuzz: (out, err) = capsys.readouterr() assert stripped('message: restplus test') in stripped(out) - assert stripped('status_code: 403') in stripped(out)
Issue #<I>: Fixed broken tests
py
diff --git a/localshop/apps/packages/views.py b/localshop/apps/packages/views.py index <HASH>..<HASH> 100644 --- a/localshop/apps/packages/views.py +++ b/localshop/apps/packages/views.py @@ -1,4 +1,5 @@ import logging +from wsgiref.util import FileWrapper from django.contrib.auth.decorators import login_required, permission_required from django.core.exceptions import ObjectDoesNotExist @@ -167,7 +168,7 @@ def download_file(request, name, pk, filename): return redirect(release_file.url) # TODO: Use sendfile if enabled - response = HttpResponse(release_file.distribution.file, + response = HttpResponse(FileWrapper(release_file.distribution.file), content_type='application/force-download') response['Content-Disposition'] = 'attachment; filename=%s' % ( release_file.filename)
Use wsgiref filewrapper for streaming downloads.
py
diff --git a/spyderlib/plugins/ipythonconsole.py b/spyderlib/plugins/ipythonconsole.py index <HASH>..<HASH> 100644 --- a/spyderlib/plugins/ipythonconsole.py +++ b/spyderlib/plugins/ipythonconsole.py @@ -785,12 +785,6 @@ class IPythonConsole(SpyderPluginWidget): def get_clients(self): """Return clients list""" return [cl for cl in self.clients if isinstance(cl, IPythonClient)] - -# def get_kernels(self): -# """Return IPython kernel widgets list""" -# return [sw for sw in self.shellwidgets -# if isinstance(sw, IPythonKernel)] -# def get_focus_client(self): """Return current client with focus, if any"""
IPython Console: Remove more commented code
py
diff --git a/ndb/model.py b/ndb/model.py index <HASH>..<HASH> 100644 --- a/ndb/model.py +++ b/ndb/model.py @@ -72,6 +72,7 @@ class Model(object): cls = self.__class__ self._key = key self._values = {} + # TODO: Factor out the following loop so Expando can override it. for name, value in kwds.iteritems(): prop = getattr(cls, name) assert isinstance(prop, Property) @@ -588,6 +589,8 @@ class GenericProperty(Property): class Expando(Model): + # TODO: Support Expando(attr1=val1, attr2=val2, ...). + def __getattr__(self, name): prop = self._properties.get(name) if prop is None:
Add a TODO about support for Expando(attr1=val1, attr2=val2, ...).
py
diff --git a/active_link/templatetags/active_link_tags.py b/active_link/templatetags/active_link_tags.py index <HASH>..<HASH> 100644 --- a/active_link/templatetags/active_link_tags.py +++ b/active_link/templatetags/active_link_tags.py @@ -11,12 +11,13 @@ register = template.Library() @register.simple_tag(takes_context=True) -def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs): +def active_link(context, viewnames, css_class=None, inactive_class='', strict=None, *args, **kwargs): """ Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. + :param inactive_class: The CSS class to render if the views is not active. :param strict: If True, the tag will perform an exact match with the request path. :return: """ @@ -48,4 +49,4 @@ def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs if active: return css_class - return '' + return inactive_class
Add support for specifying inactive class
py
diff --git a/cgutils/cgroup.py b/cgutils/cgroup.py index <HASH>..<HASH> 100644 --- a/cgutils/cgroup.py +++ b/cgutils/cgroup.py @@ -341,6 +341,9 @@ class SubsystemMemory(Subsystem): 'memsw.usage_in_bytes': long, 'stat': SimpleStat, 'numa_stat': NumaStat, + 'kmem.tcp.failcnt': long, + 'kmem.tcp.max_usage_in_bytes': long, + 'kmem.tcp.usage_in_bytes': long, } MAX_ULONGLONG = 2**63-1 CONFIGS = { @@ -351,6 +354,7 @@ class SubsystemMemory(Subsystem): 'soft_limit_in_bytes': MAX_ULONGLONG, 'swappiness': 60, 'use_hierarchy': 0, + 'kmem.tcp.limit_in_bytes': MAX_ULONGLONG, } CONTROLS = { 'force_empty': None,
Support kmem.tcp.* files of memory subsystem
py
diff --git a/test/test_add_command.py b/test/test_add_command.py index <HASH>..<HASH> 100644 --- a/test/test_add_command.py +++ b/test/test_add_command.py @@ -334,6 +334,16 @@ class AddCommandTest(CommandTest): self.assertEqual(self.todolist.todo(1).source(), "New todo") self.assertEqual(self.errors, "") + def test_add_completed(self): + """ Add a command that is completed automatically. """ + command = AddCommand.AddCommand(["x 2015-01-01 Already completed"], + self.todolist, self.out, self.error) + command.execute() + + self.assertEqual(self.output, + "| 1| x 2015-01-01 {} Already completed\n".format(self.today)) + self.assertEqual(self.errors, "") + def test_help(self): command = AddCommand.AddCommand(["help"], self.todolist, self.out, self.error)
Add test to add completed items Inspired by issue #<I>.
py
diff --git a/src/transformers/file_utils.py b/src/transformers/file_utils.py index <HASH>..<HASH> 100644 --- a/src/transformers/file_utils.py +++ b/src/transformers/file_utils.py @@ -827,8 +827,10 @@ def requires_backends(obj, backends): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ - if not all(BACKENDS_MAPPING[backend][0]() for backend in backends): - raise ImportError("".join([BACKENDS_MAPPING[backend][1].format(name) for backend in backends])) + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) class DummyObject(type):
Report only the failed imports in `requires_backends` (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,8 @@ setup( )], install_requires=['indy-plenum-dev==1.6.652', 'python-dateutil', - 'timeout-decorator==0.4.0'], + 'timeout-decorator==0.4.0', + 'distro==1.3.0'], setup_requires=['pytest-runner'], extras_require={ 'tests': tests_require
INDY-<I>: Add distro to requires
py
diff --git a/teslajsonpy/teslaproxy.py b/teslajsonpy/teslaproxy.py index <HASH>..<HASH> 100644 --- a/teslajsonpy/teslaproxy.py +++ b/teslajsonpy/teslaproxy.py @@ -100,8 +100,9 @@ class TeslaProxy(AuthCaptureProxy): return return_timer_countdown_refresh_html( max(30 * (self.waf_retry - self.waf_limit), 120) if self.waf_retry > self.waf_limit - else random.random() * self.waf_retry + 5, + else random.random() * self.waf_retry + 10, f"Detected Tesla web application firewall block #{self.waf_retry}. Please wait and then reload the page or wait for the auto reload.", + False, ) self.waf_retry = 0 if resp.content_type == "application/json":
fix: increase time for waf retry
py
diff --git a/src/astral/__init__.py b/src/astral/__init__.py index <HASH>..<HASH> 100644 --- a/src/astral/__init__.py +++ b/src/astral/__init__.py @@ -59,12 +59,13 @@ except ImportError: __all__ = [ - "LocationInfo", - "Observer", + "Depression", "SunDirection", - "latlng_to_float", + "Observer", + "LocationInfo", "now", "today", + "dms_to_float", ] __version__ = "2.0-alpha"
Re-ordered functions in __all__
py
diff --git a/dedupe/clustering.py b/dedupe/clustering.py index <HASH>..<HASH> 100644 --- a/dedupe/clustering.py +++ b/dedupe/clustering.py @@ -57,7 +57,7 @@ def connected_components(edgelist: numpy.ndarray, yield from _connected_components(edgelist, max_components) - edgelist._mmap.close() + edgelist._mmap.close() # type: ignore def _connected_components(edgelist: numpy.ndarray,
add back a few necessary type ignores
py
diff --git a/notifications/models.py b/notifications/models.py index <HASH>..<HASH> 100644 --- a/notifications/models.py +++ b/notifications/models.py @@ -23,18 +23,34 @@ class NotificationQuerySet(models.query.QuerySet): "Return only unread items in the current queryset" return self.filter(unread=True) - # Should we return self on these? + def read(self): + "Return only read items in the current queryset" + return self.filter(unread=False) + def mark_all_as_read(self, recipient=None): + """Mark as read any unread messages in the current queryset. + + Optionally, filter these by recipient first. + """ + # We want to filter out read ones, as later we will store + # the time they were marked as read. + qs = self.unread() if recipient: - self.filter(recipient=recipient).update(unread=False) - else: - self.update(unread=False) + qs = qs.filter(recipient=recipient) + + qs.update(unread=False) def mark_all_as_unread(self, recipient=None): + """Mark as unread any read messages in the current queryset. + + Optionally, filter these by recipient first. + """ + qs = self.read() + if recipient: - self.filter(recipient=recipient).update(unread=True) - else: - self.update(unread=True) + qs = qs.filter(recipient=recipient) + + qs.update(unread=True) class Notification(models.Model): """
Document/clean-up query set API.
py
diff --git a/json_schema_validator/tests/test_validator.py b/json_schema_validator/tests/test_validator.py index <HASH>..<HASH> 100644 --- a/json_schema_validator/tests/test_validator.py +++ b/json_schema_validator/tests/test_validator.py @@ -107,7 +107,7 @@ class ValidatorFailureTests(TestWithScenarios, TestCase): 'schema': '{"type": "boolean"}', 'data': '""', 'raises': ValidationError( - "'' does not match type 'boolean'", + "u'' does not match type 'boolean'", "Object has incorrect type (expected boolean)"), 'object_expr': 'object', 'schema_expr': 'schema.type', @@ -170,7 +170,7 @@ class ValidatorFailureTests(TestWithScenarios, TestCase): 'schema': '{"type": "null"}', 'data': '""', 'raises': ValidationError( - "'' does not match type 'null'", + "u'' does not match type 'null'", "Object has incorrect type (expected null)"), 'object_expr': 'object', 'schema_expr': 'schema.type',
Fix test suite. Closes: #9 The schema message needs to use the same type as the exception when comparing the string.
py
diff --git a/test/encrypted_transport.py b/test/encrypted_transport.py index <HASH>..<HASH> 100755 --- a/test/encrypted_transport.py +++ b/test/encrypted_transport.py @@ -336,6 +336,5 @@ class TestSecure(unittest.TestCase): conn.close() - if __name__ == '__main__': utils.main()
Fixing lint warning.
py
diff --git a/txaws/server/tests/test_schema.py b/txaws/server/tests/test_schema.py index <HASH>..<HASH> 100644 --- a/txaws/server/tests/test_schema.py +++ b/txaws/server/tests/test_schema.py @@ -10,7 +10,7 @@ from txaws.server.exception import APIError from txaws.server.schema import ( Arguments, Bool, Date, Enum, Integer, Parameter, RawStr, Schema, Unicode, List, Structure, - InconsistentParameterError, InvalidParameterValueError) + InconsistentParameterError) class ArgumentsTestCase(TestCase): @@ -711,7 +711,8 @@ class SchemaTestCase(TestCase): """ The default of a L{List} can be specified as a list. """ - schema = Schema(List("names", Unicode(), optional=True, default=[u"foo", u"bar"])) + schema = Schema(List("names", Unicode(), optional=True, + default=[u"foo", u"bar"])) arguments, _ = schema.extract({}) self.assertEqual([u"foo", u"bar"], arguments.names)
address jseutter's comment - pep8 and pyflakes fixes in test_schema.py
py
diff --git a/billy/importers/events.py b/billy/importers/events.py index <HASH>..<HASH> 100644 --- a/billy/importers/events.py +++ b/billy/importers/events.py @@ -102,6 +102,7 @@ def import_events(abbr, data_dir, import_actions=False): # also a committee considering a bill from the other chamber, or # something like that. bill['id'] = db_bill['_id'] + bill['bill_id'] = bill_id import_event(data) ensure_indexes()
save fixed bill_id on related bills
py
diff --git a/afsk/ax25.py b/afsk/ax25.py index <HASH>..<HASH> 100644 --- a/afsk/ax25.py +++ b/afsk/ax25.py @@ -254,7 +254,7 @@ def main(arguments=None): digipeaters=args.digipeaters.split(b','), ) - print("Sending packet: '{0}'".format(packet)) + logger.info(r"Sending packet: '{0}'".format(packet)) logger.debug(r"Packet bits:\n{0!r}".format(packet.unparse())) audio = afsk.encode(packet.unparse())
Convert print statement to logging call Fixes broken STDOUT Wave file output.
py
diff --git a/microbot/models/bot.py b/microbot/models/bot.py index <HASH>..<HASH> 100644 --- a/microbot/models/bot.py +++ b/microbot/models/bot.py @@ -319,8 +319,8 @@ class KikBot(IntegrationBot): msg.keyboards.append(SuggestedResponseKeyboard(to=to, responses=keyboard)) try: - logger.debug("Message to send:(%s)" % msg) + logger.debug("Message to send:(%s)" % msg.to_json()) self._bot.send_messages([msg]) - logger.debug("Message sent OK:(%s)" % msg) + logger.debug("Message sent OK:(%s)" % msg.to_json()) except: - logger.error("Error trying to send message:(%s)" % msg) \ No newline at end of file + logger.error("Error trying to send message:(%s)" % msg.to_json()) \ No newline at end of file
enhance logs form kik hook
py
diff --git a/test/test_injection.py b/test/test_injection.py index <HASH>..<HASH> 100644 --- a/test/test_injection.py +++ b/test/test_injection.py @@ -80,7 +80,7 @@ class MyInjection(object): row.taper = self.taper row.numrel_mode_min = 0 row.numrel_mode_max = 0 - row.numrel_data = 0 + row.numrel_data = None row.source = 'ANTANI' class TestInjection(unittest.TestCase):
numrel_data should be set either to None or "", if you DO NOT want to perform numrel injections
py
diff --git a/salt/state.py b/salt/state.py index <HASH>..<HASH> 100644 --- a/salt/state.py +++ b/salt/state.py @@ -549,8 +549,14 @@ class State(object): possible module type, e.g. a python, pyx, or .so. Always refresh if the function is recurse, since that can lay down anything. ''' + if data.get('reload_modules', False) is True: + # User explicitly requests a reload + self.module_refresh() + return + if not ret['changes']: return + if data['state'] == 'file': if data['fun'] == 'managed': if data['name'].endswith( @@ -1226,6 +1232,7 @@ class State(object): data ) ) + if 'provider' in data: self.load_modules(data) cdata = self.format_call(data)
Allow salt modules to be refreshed on demand. As an example for the above feature: ```yaml python-pip: cmd: - run - cwd: / - name: easy_install --script-dir=/usr/bin -U pip virtualenv - reload_modules: true ``` See the last line? If no exception occurs in this state, after it's execution, salt **will** reload it's modules.
py
diff --git a/ggplot/utils/utils.py b/ggplot/utils/utils.py index <HASH>..<HASH> 100644 --- a/ggplot/utils/utils.py +++ b/ggplot/utils/utils.py @@ -504,7 +504,7 @@ def remove_missing(df, na_rm=False, vars=None, name='', finite=False): if finite: lst = [np.inf, -np.inf] - to_replace = dict((v, lst) for v in vars) + to_replace = {v: lst for v in vars} df.replace(to_replace, np.nan, inplace=True) txt = 'non-finite' else: @@ -514,7 +514,7 @@ def remove_missing(df, na_rm=False, vars=None, name='', finite=False): df.reset_index(drop=True, inplace=True) if len(df) < n and not na_rm: msg = '{} : Removed {} rows containing {} values.' - gg_warn(msg.format(name, n-len(df), txt)) + gg_warn(msg.format(name, n-len(df), txt), stacklevel=3) return df
Accurate file & line no. for missing values
py
diff --git a/raven/contrib/django/client.py b/raven/contrib/django/client.py index <HASH>..<HASH> 100644 --- a/raven/contrib/django/client.py +++ b/raven/contrib/django/client.py @@ -64,9 +64,9 @@ class DjangoClient(Client): uri = '%s://%s%s' % (scheme, host, request.path) if request.method != 'GET': - if hasattr(request, 'body'): + try: data = request.body - else: + except: try: data = request.raw_post_data and request.raw_post_data or request.POST except Exception:
Workaround the hasattr behavior change between python 2 and 3.
py
diff --git a/Lib/ufo2fdk/makeotfParts.py b/Lib/ufo2fdk/makeotfParts.py index <HASH>..<HASH> 100644 --- a/Lib/ufo2fdk/makeotfParts.py +++ b/Lib/ufo2fdk/makeotfParts.py @@ -146,11 +146,11 @@ class MakeOTFPartsCompiler(object): if glyphName in self.font and self.font[glyphName].unicode is not None: code = self.font[glyphName].unicode code = "%04X" % code - if len(code) < 4: + if len(code) <= 4: code = "uni%s" % code else: code = "u%s" % code - line = "%s %s uni%s" % (glyphName, glyphName, code) + line = "%s %s %s" % (glyphName, glyphName, code) else: line = "%s %s" % (glyphName, glyphName) lines.append(line)
Correct glyphOrder file generation.
py
diff --git a/tests/micropython/extreme_exc.py b/tests/micropython/extreme_exc.py index <HASH>..<HASH> 100644 --- a/tests/micropython/extreme_exc.py +++ b/tests/micropython/extreme_exc.py @@ -126,8 +126,8 @@ def main(): ) except Exception as er: e = er - lst[0][0] = None - lst = None + while lst: + lst[0], lst = None, lst[0] # unlink lists to free up heap print(repr(e)[:10]) # raise a deep exception with the heap locked
tests/micropython: Fully unlink nested list in extreme exc test. To make sure there are no dangling references to the lists, and the GC can reclaim heap memory.
py
diff --git a/tests/lax_test.py b/tests/lax_test.py index <HASH>..<HASH> 100644 --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -489,6 +489,7 @@ class LaxTest(jtu.JaxTestCase): # TODO(mattjj): test conv_general_dilated against numpy + @jtu.skip_on_devices("gpu") # b/147488740 def testConv0DIsDot(self): rng = jtu.rand_default() def args_maker():
Disable failing GPU test for now pending XLA fix.
py
diff --git a/tests/db_routers_unittest.py b/tests/db_routers_unittest.py index <HASH>..<HASH> 100644 --- a/tests/db_routers_unittest.py +++ b/tests/db_routers_unittest.py @@ -113,7 +113,7 @@ class OQRouterTestCase(unittest.TestCase): For each model in the 'uiapi' schema, test for proper db routing for read operations. ''' - classes = [Upload, Input, OqJob, OqParams, Output, ErrorMsg] + classes = [Upload, Input, InputSet, OqJob, OqParams, Output, ErrorMsg] expected_db = 'reslt_writer' self._db_for_read_helper(classes, expected_db) @@ -123,7 +123,7 @@ class OQRouterTestCase(unittest.TestCase): For each model in the 'uiapi' schema, test for proper db routing for write operations. ''' - classes = [Upload, Input, OqJob, OqParams, ErrorMsg] + classes = [Upload, Input, InputSet, OqJob, OqParams, ErrorMsg] expected_db = 'job_init' self._db_for_write_helper(classes, expected_db)
Added test to verify InputSet is routed as expected. Former-commit-id: c<I>c<I>ab1ed8d<I>d0dbde<I>d<I>eb0e<I>
py
diff --git a/vdom/core.py b/vdom/core.py index <HASH>..<HASH> 100644 --- a/vdom/core.py +++ b/vdom/core.py @@ -202,8 +202,7 @@ class VDOM(object): """ Return inline CSS from CSS key / values """ - return "; ".join(['{}: {}'.format(k, v) - for k, v in convert_style_names(style.items())]) + return "; ".join(['{}: {}'.format(convert_style_key(k), v) for k, v in style.items()]) def _repr_html_(self): """ @@ -267,11 +266,13 @@ upper = re.compile(r'[A-Z]') def _upper_replace(matchobj): return '-' + matchobj.group(0).lower() -def convert_style_names(style): +def convert_style_key(key): """Converts style names from DOM to css styles. + + >>> convert_style_key("backgroundColor") + "background-color" """ - for k, v in style: - yield re.sub(upper, _upper_replace, k), v + return re.sub(upper, _upper_replace, key) def create_component(tag_name, allow_children=True):
Change to create_style_key; apply to key only, add example to docstring
py
diff --git a/examples/educational/coordinate_system.py b/examples/educational/coordinate_system.py index <HASH>..<HASH> 100644 --- a/examples/educational/coordinate_system.py +++ b/examples/educational/coordinate_system.py @@ -101,13 +101,13 @@ coordinate systems: .. note:: - In a two-layer scenario with only one ``depth`` it always assumes a **LHS**, - as it is not possible to detect the direction from only one interface. To - force any of the other system you can define ``+/-np.infty`` for the - down-most interface at the appropriate place: + In a two-layer scenario with only one interface ``depth = z`` it always + assumes a **LHS**, as it is not possible to detect the direction from only + one interface. To force a **RHS** you have to add ``-np.infty`` for the + down-most interface: - - ``0`` or ``[0, np.infty]``: **LHS** (+z down; default) - - ``[0, -np.infty]``: **RHS** (+z up) + - **LHS**: ``depth = z`` (+z down); default, corresponds to ``[z, np.infty]`` + - **RHS**: ``depth = [z, -np.infty]`` (+z up) In this example we first create a sketch of the LHS and RHS for visualization,
Add improved note (#<I>)
py
diff --git a/utilz/setup_utils.py b/utilz/setup_utils.py index <HASH>..<HASH> 100644 --- a/utilz/setup_utils.py +++ b/utilz/setup_utils.py @@ -23,7 +23,7 @@ def init(name, package_name, setup_py_fpath, kwargs=None): sys.exit() if cmd == 'publish': - run_cmdl('git clean -dfx && python setup.py sdist && twine upload dist/*') + run_cmdl('python setup.py sdist && twine upload dist/*') sys.exit() if cmd == 'up':
setup_utils publish: remove clean repo commmand
py
diff --git a/pyforms/gui/Controls/ControlList.py b/pyforms/gui/Controls/ControlList.py index <HASH>..<HASH> 100755 --- a/pyforms/gui/Controls/ControlList.py +++ b/pyforms/gui/Controls/ControlList.py @@ -188,6 +188,14 @@ class ControlList(ControlBase, QWidget): def get_cell(self, column, row): return self.tableWidget.item(row, column) + def set_sorting_enabled(self, value): + """ + Enable or disable columns sorting + + :param bool value: True to enable sorting, False otherwise + """ + self.tableWidget.setSortingEnabled(value) + ########################################################################## ############ EVENTS ###################################################### ##########################################################################
ControlList: adds auxiliary method for enable/disable sorting
py
diff --git a/tests/test_proliphix.py b/tests/test_proliphix.py index <HASH>..<HASH> 100755 --- a/tests/test_proliphix.py +++ b/tests/test_proliphix.py @@ -28,8 +28,11 @@ class TestProliphix(unittest.TestCase): self.assertEqual('4.1.13', px._get_oid('AverageTemp')) self.assertEqual(None, px._get_oid('AverageTemp2')) + # we don't want to actually do the clock drift work during tests + # here. + @mock.patch('proliphix.PDP._clock_drift') @mock.patch('requests.post') - def test_update(self, rp): + def test_update(self, rp, cd): pdp = proliphix.PDP(mock.sentinel.host, mock.sentinel.user, mock.sentinel.passwd)
Fix unit tests now that clock drift is added
py
diff --git a/src/setuptools_scm/version.py b/src/setuptools_scm/version.py index <HASH>..<HASH> 100644 --- a/src/setuptools_scm/version.py +++ b/src/setuptools_scm/version.py @@ -16,7 +16,7 @@ SEMVER_LEN = 3 def _parse_version_tag(tag, config): - tagstring = tag if not isinstance(tag, str) else str(tag) + tagstring = tag if isinstance(tag, str) else str(tag) match = config.tag_regex.match(tagstring) result = None
Fix TypeError with setuptools_scm_git_archive
py
diff --git a/kconfiglib.py b/kconfiglib.py index <HASH>..<HASH> 100644 --- a/kconfiglib.py +++ b/kconfiglib.py @@ -3187,8 +3187,6 @@ def _make_and(e1, e2): return e2 if e2 is None or e2 == "y": return e1 - if e1 == "n" or e2 == "n": - return "n" # Prefer to merge/update argument list if possible instead of creating # a new AND node @@ -3214,8 +3212,6 @@ def _make_or(e1, e2): return "y" if e1 == "n": return e2 - if e2 == "n": - return e1 # Prefer to merge/update argument list if possible instead of creating # a new OR node
Remove two seldom-used expression simplifications. These never trigger for the x<I> Kconfigs, wasting time instead.
py
diff --git a/salt/modules/mount.py b/salt/modules/mount.py index <HASH>..<HASH> 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -277,6 +277,7 @@ def fstab(config='/etc/fstab'): for line in ifile: try: entry = _fstab_entry.dict_from_line( line, _fstab_entry.compatibility_keys ) + entry[ 'opts' ] = entry[ 'opts' ].split( ',' ) ret[ entry.pop( 'name' ) ] = entry except _fstab_entry.ParseError: pass
Updating modules/mount.py (mount.fstab) to produce an array for opts, rather than the opts string literal. Allows mount to pass unit tests
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -51,7 +51,7 @@ setup( include_package_data=True, install_requires=[ 'django>=1.4.2', - 'pillow<4' if sys.version < (2, 7) else 'pillow', + 'pillow<4' if sys.version_info < (2, 7) else 'pillow', ], cmdclass={'test': DjangoTests}, classifiers=[
Fix version check in setup.py The tests are failing because on Python3 type comparison became stricter. ``sys.version`` is a string, ``sys.version_info`` is a tuple.
py
diff --git a/cloudsmith_cli/core/api/files.py b/cloudsmith_cli/core/api/files.py index <HASH>..<HASH> 100644 --- a/cloudsmith_cli/core/api/files.py +++ b/cloudsmith_cli/core/api/files.py @@ -6,6 +6,7 @@ import os import click import cloudsmith_api import requests +import six from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor from ..utils import calculate_file_md5 @@ -58,7 +59,7 @@ def request_file_upload(owner, repo, filepath, md5_checksum=None): def upload_file(upload_url, upload_fields, filepath, callback=None): """Upload a pre-signed file to Cloudsmith.""" - upload_fields = upload_fields.items() + upload_fields = list(six.iteritems(upload_fields)) upload_fields.append( ('file', (os.path.basename(filepath), click.open_file(filepath, 'rb'))) )
Fix for py3 compatibility in files API code Use six for python3 compatibility (@lskillen)
py
diff --git a/gitlab/v4/objects.py b/gitlab/v4/objects.py index <HASH>..<HASH> 100644 --- a/gitlab/v4/objects.py +++ b/gitlab/v4/objects.py @@ -1020,7 +1020,8 @@ class ProjectTag(ObjectDeleteMixin, RESTObject): GitlabCreateError: If the server fails to create the release GitlabUpdateError: If the server fails to update the release """ - path = '%s/%s/release' % (self.manager.path, self.get_id()) + id = self.get_id().replace('/', '%2F') + path = '%s/%s/release' % (self.manager.path, id) data = {'description': description} if self.release is None: try:
Tags release description: support / in tag names
py
diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index <HASH>..<HASH> 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -398,6 +398,9 @@ class RemoteFuncs(object): # If the command will make a recursive publish don't run if re.match('publish.*', load['fun']): return False + # Don't allow pillar or compound matching + if load.get('tgt_type', 'glob') in ('pillar', 'compound'): + return False # Check the permissions for this minion perms = [] for match in self.opts['peer']:
Disable pillar/compound matching for peer interface, master-side
py
diff --git a/tornado/options.py b/tornado/options.py index <HASH>..<HASH> 100644 --- a/tornado/options.py +++ b/tornado/options.py @@ -354,12 +354,17 @@ class _LogFormatter(logging.Formatter): logging.Formatter.__init__(self, *args, **kwargs) self._color = color if color: - # The curses module has some str/bytes confusion in python3. - # Most methods return bytes, but only accept strings. - # The explict calls to unicode() below are harmless in python2, - # but will do the right conversion in python3. - fg_color = unicode(curses.tigetstr("setaf") or - curses.tigetstr("setf") or "", "ascii") + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = unicode(fg_color, "ascii") self._colors = { logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue "ascii"),
Add a version check for the curses unicode hack so it won't break when python <I> or <I> are released. Closes #<I>.
py
diff --git a/airflow/hooks/hive_hooks.py b/airflow/hooks/hive_hooks.py index <HASH>..<HASH> 100644 --- a/airflow/hooks/hive_hooks.py +++ b/airflow/hooks/hive_hooks.py @@ -18,7 +18,7 @@ from airflow.utils import AirflowException from airflow.hooks.base_hook import BaseHook from airflow.utils import TemporaryDirectory from airflow.configuration import conf -import airflow.security.utils +import airflow.security.utils as utils class HiveCliHook(BaseHook): """ @@ -70,9 +70,7 @@ class HiveCliHook(BaseHook): hive_bin = 'beeline' if conf.get('security', 'enabled'): template = conn.extra_dejson.get('principal',"hive/_HOST@EXAMPLE.COM") - template = airflow.security.utils.replace_hostname_pattern( - airflow.security.utils.get_components(template) - ) + template = utils.replace_hostname_pattern(utils.get_components(template)) proxy_user = "" if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
Use short hand for security.utils
py
diff --git a/docs/source/conf.py b/docs/source/conf.py index <HASH>..<HASH> 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -53,7 +53,7 @@ latex_elements = {} latex_documents = [ (master_doc, 'HTTPretty.tex', 'HTTPretty Documentation', - 'Gabriel Falcão', 'manual'), + 'Gabriel Falcao', 'manual'), ] man_pages = [
fix docs/source/conf.py
py
diff --git a/flink-python/pyflink/datastream/tests/test_data_stream.py b/flink-python/pyflink/datastream/tests/test_data_stream.py index <HASH>..<HASH> 100644 --- a/flink-python/pyflink/datastream/tests/test_data_stream.py +++ b/flink-python/pyflink/datastream/tests/test_data_stream.py @@ -1156,6 +1156,14 @@ class ProcessDataStreamTests(DataStreamTests): side_expected = ['0', '1', '2'] self.assert_equals_sorted(side_expected, side_sink.get_results()) + def test_java_list_deserialization(self): + row_type_info = Types.ROW_NAMED(['list'], [Types.LIST(Types.INT())]) + ds = self.env.from_collection([Row(list=[1, 2, 3])], type_info=row_type_info) + ds.map(lambda e: str(e), Types.STRING()).add_sink(self.test_sink) + self.env.execute('test_java_list_deserialization') + expected = ['Row(list=[1, 2, 3])'] + self.assert_equals(self.test_sink.get_results(), expected) + class ProcessDataStreamStreamingTests(DataStreamStreamingTests, ProcessDataStreamTests, PyFlinkStreamingTestCase):
[FLINK-<I>][python] Fix LIST type in Python DataStream API This closes #<I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -46,6 +46,7 @@ setup( 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers',
Added python<I> version to supported versions list. All tests passed on this configuration: platform linux -- Python <I>, pytest-<I>, py-<I>, pluggy-<I>
py
diff --git a/torf/_torrent.py b/torf/_torrent.py index <HASH>..<HASH> 100644 --- a/torf/_torrent.py +++ b/torf/_torrent.py @@ -714,7 +714,7 @@ class Torrent(): 3. The number of pieces that have been hashed (:class:`int`) 4. The total number of pieces (:class:`int`) - If `callback` returns anything that is not None, hashing is stopped. + If `callback` returns anything that is not ``None``, hashing is stopped. :raises PathEmptyError: if :attr:`path` contains only empty files/directories
Torrent.generate(): Highlight 'None' properly in docstring
py
diff --git a/spyder_notebook/tests/test_plugin.py b/spyder_notebook/tests/test_plugin.py index <HASH>..<HASH> 100644 --- a/spyder_notebook/tests/test_plugin.py +++ b/spyder_notebook/tests/test_plugin.py @@ -107,7 +107,6 @@ def notebook(qtbot): notebook_plugin = NotebookPlugin(None, testing=True) qtbot.addWidget(notebook_plugin) notebook_plugin.create_new_client() - notebook_plugin.show() return notebook_plugin
Remove .show() from test This causes a segfault when running the test locally and does not seem necessary for testing.
py
diff --git a/vaex/dataset.py b/vaex/dataset.py index <HASH>..<HASH> 100644 --- a/vaex/dataset.py +++ b/vaex/dataset.py @@ -2298,7 +2298,8 @@ class Dataset(object): total_grid[i,j,:,:] = grid[:,None,...] labels["what"] = what_labels else: - total_grid = np.broadcast_to(grid, (1,) * 4 + grid.shape) + dims_left = 6-len(grid.shape) + total_grid = np.broadcast_to(grid, (1,) * dims_left + grid.shape) # visual=dict(x="x", y="y", selection="fade", subspace="facet1", what="facet2",) def _selection_name(name):
grid argument of plot can have multiple dimensions
py
diff --git a/pyrogram/client/storage/file_storage.py b/pyrogram/client/storage/file_storage.py index <HASH>..<HASH> 100644 --- a/pyrogram/client/storage/file_storage.py +++ b/pyrogram/client/storage/file_storage.py @@ -67,6 +67,17 @@ class FileStorage(SQLiteStorage): # noinspection PyTypeChecker self.update_peers(peers.values()) + def update(self): + version = self.version() + + if version == 1: + with self.lock, self.conn: + self.conn.execute("DELETE FROM peers") + + version += 1 + + self.version(version) + def open(self): path = self.database file_exists = path.is_file() @@ -97,6 +108,8 @@ class FileStorage(SQLiteStorage): if not file_exists: self.create() + else: + self.update() with self.conn: try: # Python 3.6.0 (exactly this version) is bugged and won't successfully execute the vacuum
Implement a storage update mechanism (for FileStorage) The idea is pretty simple: get the current database version and for each older version, do what needs to be done in order to get to the next version state. This will make schema changes transparent to the user in case they are needed.
py
diff --git a/subliminal/score.py b/subliminal/score.py index <HASH>..<HASH> 100755 --- a/subliminal/score.py +++ b/subliminal/score.py @@ -44,7 +44,7 @@ movie_scores = {'hash': 119, 'title': 60, 'year': 30, 'release_group': 15, 'format': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1} #: Equivalent release groups -equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}) +equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'}) def get_equivalent_release_groups(release_group):
Add SVA and AVS to equivalent release groups (#<I>) Additional equivalent release groups. See <URL>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ - Properties: Fancy properties for Python. + properties: Fancy properties for Python. """ import numpy as np @@ -69,7 +69,7 @@ with open("README.rst") as f: LONG_DESCRIPTION = ''.join(f.readlines()) setup( - name="Properties", + name="properties", version="0.0.1", packages=find_packages(), install_requires=['numpy>=1.7', @@ -78,7 +78,7 @@ setup( ], author="Rowan Cockett", author_email="rowan@3ptscience.com", - description="Properties", + description="properties", long_description=LONG_DESCRIPTION, keywords="property", url="http://steno3d.com/",
lowercase name (Properties --> properties)
py
diff --git a/py3status/modules/volume_status.py b/py3status/modules/volume_status.py index <HASH>..<HASH> 100644 --- a/py3status/modules/volume_status.py +++ b/py3status/modules/volume_status.py @@ -26,6 +26,10 @@ Configuration parameters: (default False) max_volume: Allow the volume to be increased past 100% if available. pactl and pamixer supports this. (default 120) + start_delay: Number of seconds to wait before starting this module. + This allows some systems to start the audio backend before we + try picking it up. + (default 0) thresholds: Threshold for percent volume. (default [(0, 'bad'), (20, 'degraded'), (50, 'good')]) volume_delta: Percentage amount that the volume is increased or @@ -82,6 +86,7 @@ mute import re import math +from time import sleep from py3status.exceptions import CommandError STRING_ERROR = "invalid command `{}`" @@ -324,6 +329,7 @@ class Py3status: format_muted = r"[\?if=is_input 😶|♪]: muted" is_input = False max_volume = 120 + start_delay = 0 thresholds = [(0, "bad"), (20, "degraded"), (50, "good")] volume_delta = 5 @@ -353,6 +359,8 @@ class Py3status: } def post_config_hook(self): + if self.start_delay: + sleep(int(self.start_delay)) if not self.command: commands = ["pamixer", "pactl", "amixer"] # pamixer, pactl requires pulseaudio to work
volume_status module: add start_delay to address issue #<I> (#<I>)
py
diff --git a/owslib/swe/sensor/sml.py b/owslib/swe/sensor/sml.py index <HASH>..<HASH> 100644 --- a/owslib/swe/sensor/sml.py +++ b/owslib/swe/sensor/sml.py @@ -45,12 +45,14 @@ class PropertyGroup(object): self.capabilities = {} for cap in element.findall(nsp('sml:capabilities')): name = testXMLAttribute(cap, "name") - self.capabilities[name] = cap[0] + if name is not None: + self.capabilities[name] = cap[0] self.characteristics = {} for cha in element.findall(nsp('sml:characteristics')): name = testXMLAttribute(cha, "name") - self.characteristics[name] = cha[0] + if name is not None: + self.characteristics[name] = cha[0] def get_capabilities_by_name(self, name): """ @@ -314,4 +316,4 @@ class Link(object): class ArrayLink(object): def __init__(self, element): - raise NotImplementedError("ArrayLink is not implemented in OWSLib (yet)") \ No newline at end of file + raise NotImplementedError("ArrayLink is not implemented in OWSLib (yet)")
Added a bit of safety to SML parsing
py
diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py index <HASH>..<HASH> 100644 --- a/salt/modules/freebsdpkg.py +++ b/salt/modules/freebsdpkg.py @@ -465,7 +465,7 @@ def _rehash(): Recomputes internal hash table for the PATH variable. Use whenever a new command is created during the current session. ''' - shell = __salt__['environ.get']('SHELL', output_loglevel='trace') + shell = __salt__['environ.get']('SHELL') if shell.split('/')[-1] in ('csh', 'tcsh'): __salt__['cmd.run']('rehash', output_loglevel='trace')
environ.get has no output_loglevel commit <I>a<I>fa<I>ba<I>e8b<I>fb6bede2e3ea6a<I> changed this call from `cmd.run` to `environ.get`, but `output_loglevel` remained, leading to stacktraces
py
diff --git a/spyder/widgets/tests/test_pathmanager.py b/spyder/widgets/tests/test_pathmanager.py index <HASH>..<HASH> 100644 --- a/spyder/widgets/tests/test_pathmanager.py +++ b/spyder/widgets/tests/test_pathmanager.py @@ -19,6 +19,7 @@ from qtpy.QtCore import Qt # Local imports from spyder.py3compat import PY3 from spyder.widgets import pathmanager as pathmanager_mod +from spyder.utils.programs import is_module_installed @pytest.fixture @@ -62,8 +63,9 @@ def test_check_uncheck_path(qtbot): assert pathmanager.not_active_pathlist == [] -@pytest.mark.skipif(os.name != 'nt', - reason="This feature is not applicable for Unix systems") +@pytest.mark.skipif(os.name != 'nt' or not is_module_installed('win32con'), + reason=("This feature is not applicable for Unix " + "systems and pywin32 is needed")) def test_synchronize_with_PYTHONPATH(qtbot, mocker): pathmanager = setup_pathmanager(qtbot, None, pathlist=['path1', 'path2', 'path3'],
Skip this test if pywin<I> is not present Skip this test if pywin<I> is not present because it's not part of Spyder listed dependencies
py
diff --git a/examples/basic_nest/make_nest.py b/examples/basic_nest/make_nest.py index <HASH>..<HASH> 100755 --- a/examples/basic_nest/make_nest.py +++ b/examples/basic_nest/make_nest.py @@ -1,18 +1,17 @@ #!/usr/bin/env python -import collections +import glob import os import os.path -import sys -from nestly import nestly +from nestly import Nest wd = os.getcwd() input_dir = os.path.join(wd, 'inputs') -ctl = collections.OrderedDict() +nest = Nest() +nest.add_level('strategy', ('exhaustive', 'approximate')) +nest.add_level('run_count', [10**i for i in xrange(3)]) +nest.add_level('input_file', glob.glob(os.path.join(input_dir, 'file*')), + label_func=os.path.basename) -ctl['strategy'] = nestly.repeat_iterable(('exhaustive', 'approximate')) -ctl['run_count'] = nestly.repeat_iterable([10**(i + 1) for i in xrange(3)]) -ctl['input_file'] = lambda x: map(nestly.file_nv, nestly.collect_globs(input_dir, ['file*'])) - -nestly.build(ctl, 'runs') +nest.build('runs')
Update basic_nest for new API
py
diff --git a/nodeshot/open311/views.py b/nodeshot/open311/views.py index <HASH>..<HASH> 100644 --- a/nodeshot/open311/views.py +++ b/nodeshot/open311/views.py @@ -145,7 +145,7 @@ class ServiceRequests(generics.ListCreateAPIView): } def get(self, request, *args, **kwargs): - + """ Retrieve list of service requests """ if 'service_code' not in request.GET.keys(): return Response({ 'detail': _('A service code must be inserted') }, status=404) service_code = request.GET['service_code'] @@ -248,6 +248,8 @@ class ServiceRequests(generics.ListCreateAPIView): return Response(data) def post(self, request, *args, **kwargs): + """ Post a service request ( requires authentication) """ + service_code = request.POST['service_code'] if service_code not in SERVICES.keys(): @@ -319,6 +321,8 @@ service_requests = ServiceRequests.as_view() class ServiceRequest(generics.RetrieveAPIView): + """ Retrieve the details of a service request """ + serializer_class= NodeRequestDetailSerializer def get(self, request, *args, **kwargs): context = self.get_serializer_context()
Updated Open <I> API methods descriptions
py
diff --git a/salt/modules/nova.py b/salt/modules/nova.py index <HASH>..<HASH> 100644 --- a/salt/modules/nova.py +++ b/salt/modules/nova.py @@ -11,6 +11,8 @@ Module for handling openstack nova calls. keystone.password: verybadpass keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' + # Optional + keystone.region_name: 'regionOne' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles:
Add comment for region_name to docstring
py
diff --git a/pymongo/pool.py b/pymongo/pool.py index <HASH>..<HASH> 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -233,7 +233,7 @@ class BasePool(object): checked_sock = self._check_closed(req_state, pair) if checked_sock != req_state: - self._set_request_state(req_state) + self._set_request_state(checked_sock) return checked_sock @@ -323,6 +323,9 @@ class BasePool(object): """ if time.time() - sock_info.last_checkout > 1: if _closed(sock_info.sock): + # Ensure sock_info doesn't return itself to pool + self.discard_socket(sock_info) + try: return self.connect(pair) except socket.error:
Bugfixes: Pool properly discards sockets if it detects they've died PYTHON-<I>
py
diff --git a/moto/ec2/models.py b/moto/ec2/models.py index <HASH>..<HASH> 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1949,11 +1949,12 @@ class ElasticAddress(object): properties = cloudformation_json.get('Properties') instance_id = None if properties: + domain=properties.get('Domain') eip = ec2_backend.allocate_address( - domain=properties.get('Domain')) + domain=domain if domain else 'standard') instance_id = properties.get('InstanceId') else: - eip = ec2_backend.allocate_address() + eip = ec2_backend.allocate_address(domain='standard') if instance_id: instance = ec2_backend.get_instance_by_id(instance_id)
default eip domain to 'standard' for cloudformation creations
py
diff --git a/trollimage/colormap.py b/trollimage/colormap.py index <HASH>..<HASH> 100644 --- a/trollimage/colormap.py +++ b/trollimage/colormap.py @@ -272,7 +272,7 @@ class Colormap(object): if num_bands1 == num_bands2: return cmap1, cmap2 if 4 in (num_bands1, num_bands2): - return cmap1.to_rgba(), cmap1.to_rgba() + return cmap1.to_rgba(), cmap2.to_rgba() raise ValueError("Can't normalize colors of colormaps. Unexpected " f"number of bands: {num_bands1} and {num_bands2}.")
Fix typo in Colormap color normalization
py
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index <HASH>..<HASH> 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -10,7 +10,6 @@ Support for APT (Advanced Packaging Tool) For repository management, the ``python-apt`` package must be installed. """ - import copy import datetime import fnmatch
Drop Py2 and six on salt/modules/aptpkg.py
py
diff --git a/keyring/tests/test_backend.py b/keyring/tests/test_backend.py index <HASH>..<HASH> 100644 --- a/keyring/tests/test_backend.py +++ b/keyring/tests/test_backend.py @@ -145,11 +145,9 @@ class BackendBasicTests: self.set_password('service1', 'user2', 'password2') cred = keyring.get_credential('service1', None) - assert cred is not None - assert (cred.username, cred.password) in ( + assert cred is None or (cred.username, cred.password) in ( ('user1', 'password1'), ('user2', 'password2'), - (None, None), ) cred = keyring.get_credential('service1', 'user2')
Fix handling of None case on macOS
py
diff --git a/wallace/models.py b/wallace/models.py index <HASH>..<HASH> 100644 --- a/wallace/models.py +++ b/wallace/models.py @@ -106,7 +106,8 @@ class Node(Base): self.transmit(info, vector.destination) def update(self, info): - pass + raise NotImplementedError( + "The update method of node '{}' has not been overridden".format(self)) @hybrid_property def outdegree(self):
changed node update method now raises not implemented error
py
diff --git a/zencoder/__init__.py b/zencoder/__init__.py index <HASH>..<HASH> 100644 --- a/zencoder/__init__.py +++ b/zencoder/__init__.py @@ -0,0 +1 @@ +from zencoder import Zencoder
put the Zencoder object at the top level of the module
py
diff --git a/examples/top_artists.py b/examples/top_artists.py index <HASH>..<HASH> 100755 --- a/examples/top_artists.py +++ b/examples/top_artists.py @@ -22,9 +22,9 @@ class Streams(luigi.Task): date = luigi.DateParameter() def run(self): - with open(self.output(), 'w') as output: + with self.output().open('w') as output: for i in xrange(1000): - output.write('{}{}{}\n'.format( + output.write('{} {} {}\n'.format( random.randint(0, 999), random.randint(0, 999), random.randint(0, 999))) @@ -75,7 +75,7 @@ class AggregateArtistsHadoop(luigi.hadoop.JobTask): def mapper(self, line): _, artist, _ = line.strip().split() yield artist, 1 - + def reducer(self, key, values): yield key, sum(values) @@ -105,7 +105,7 @@ class Top10Artists(luigi.Task): out_file.write(out_line + '\n') def _input_iterator(self): - with open(self.input(), 'r') as in_file: + with self.input().open('r') as in_file: for line in in_file: artist, streams = line.strip().split() yield int(streams), artist
Fix the top_artists example
py
diff --git a/splunklib/binding.py b/splunklib/binding.py index <HASH>..<HASH> 100644 --- a/splunklib/binding.py +++ b/splunklib/binding.py @@ -466,7 +466,7 @@ class Context(object): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.scheme == "https": sock = ssl.wrap_socket(sock) - sock.connect((self.host, self.port)) + sock.connect((socket.gethostbyname(self.host), self.port)) return sock @_authentication
Changed the socket binding to be more efficient on Windows.
py
diff --git a/holidays.py b/holidays.py index <HASH>..<HASH> 100644 --- a/holidays.py +++ b/holidays.py @@ -340,7 +340,7 @@ class Canada(HolidayBase): self[date(year, 8, 1) + rd(weekday=MO)] = "British Columbia Day" elif self.prov in ('NB') and year >= 1900: # https://en.wikipedia.org/wiki/Civic_Holiday - self[date(year, 8, 1) + rd(weekday=MO)] = "New Bruswick Day" + self[date(year, 8, 1) + rd(weekday=MO)] = "New Brunswick Day" elif self.prov in ('SK') and year >= 1900: # https://en.wikipedia.org/wiki/Civic_Holiday self[date(year, 8, 1) + rd(weekday=MO)] = "Saskatchewan Day"
Fix typo in "New Brunswick Day" holiday name
py
diff --git a/bcbio/structural/lumpy.py b/bcbio/structural/lumpy.py index <HASH>..<HASH> 100644 --- a/bcbio/structural/lumpy.py +++ b/bcbio/structural/lumpy.py @@ -40,7 +40,10 @@ def _run_lumpy(full_bams, sr_bams, disc_bams, work_dir, items): disc_bams = ",".join(disc_bams) exclude = "-x %s" % sv_exclude_bed if sv_exclude_bed else "" ref_file = dd.get_ref_file(items[0]) - cmd = ("speedseq sv -v -B {full_bams} -S {sr_bams} -D {disc_bams} -R {ref_file} " + # use our bcbio python for runs within speedseq + curpython_dir = os.path.dirname(sys.executable) + cmd = ("export PATH={curpython_dir}:$PATH && " + "speedseq sv -v -B {full_bams} -S {sr_bams} -D {disc_bams} -R {ref_file} " "{exclude} -A false -T {tmpdir} -o {out_base}") do.run(cmd.format(**locals()), "speedseq lumpy", items[0]) return out_file, sv_exclude_bed
Ensure speedseq calls to lumpy python scripts use installed bcbio python with dependencies. Fixes #<I>
py
diff --git a/icekit/plugins/image/tests.py b/icekit/plugins/image/tests.py index <HASH>..<HASH> 100644 --- a/icekit/plugins/image/tests.py +++ b/icekit/plugins/image/tests.py @@ -1,3 +1,5 @@ +from unittest import skip + from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django_dynamic_fixture import G @@ -52,6 +54,7 @@ class ImageItem(WebTest): self.image_item_1.caption = test_text self.assertEqual(self.image_item_1.caption, test_text) + @skip("Test fixture doesn't use a real image, so the thumbnailer doesn't like it") def test_render(self): self.page_1.publish() response = self.app.get(self.page_1.publishing_linked.get_absolute_url())
Skip test that fails because the test fixture isn't a real image file.
py
diff --git a/salt/modules/timezone.py b/salt/modules/timezone.py index <HASH>..<HASH> 100644 --- a/salt/modules/timezone.py +++ b/salt/modules/timezone.py @@ -127,8 +127,15 @@ def _get_zone_etc_localtime(): def _get_zone_etc_timezone(): - with salt.utils.fopen('/etc/timezone', 'r') as fp_: - return fp_.read().strip() + tzfile = '/etc/timezone' + try: + with salt.utils.fopen(tzfile, 'r') as fp_: + return fp_.read().strip() + except IOError as exc: + raise CommandExecutionError( + 'Problem reading timezone file {0}: {1}' + .format(tzfile, exc.strerror) + ) def get_zone():
timezone: Don't assume /etc/timezone exists Sometimes /etc/timezone might not exist
py