diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/irc3/rfc.py b/irc3/rfc.py index <HASH>..<HASH> 100644 --- a/irc3/rfc.py +++ b/irc3/rfc.py @@ -22,7 +22,7 @@ PONG = raw.new( 'PONG', r':(?P<server>\S+) PONG (?P=server) :(?P<data>.*)') -NEW_NICK = raw.new('NEW_NICK', r':(?P<nick>\S+) NICK (?P<new_nick>\S+)') +NEW_NICK = raw.new('NEW_NICK', r':(?P<nick>\S+) NICK :?(?P<new_nick>\S+)') JOIN = raw.new('JOIN', r':(?P<mask>\S+) JOIN (?P<channel>\S+)') PART = raw.new('PART',
Allow optional colon(:) before new nick in NICK This is to allow for networks (such as freenode) that send NICK in the format: :old_nick@host NICK :new_nick
py
diff --git a/phono3py/phonon3/fc3.py b/phono3py/phonon3/fc3.py index <HASH>..<HASH> 100644 --- a/phono3py/phonon3/fc3.py +++ b/phono3py/phonon3/fc3.py @@ -512,7 +512,7 @@ def show_drift_fc3(fc3, "Corresponding python code is not implemented.") raise RuntimeError(text) - text = "max drift of %s: " % name + text = "Max drift of %s: " % name text += "%f (%s%s%s) " % (maxval1, "xyz"[klm1[0]], "xyz"[klm1[1]], "xyz"[klm1[2]]) text += "%f (%s%s%s) " % (maxval2,
Slightly modify the output text for max drift fc
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -35,6 +35,6 @@ setup( version=verstr, py_modules=['git_archive_all'], entry_points={'console_scripts': 'git-archive-all=git_archive_all:main'}, - tests_require=['pytest', 'pytest-cov'], + tests_require=['pytest', 'pytest-cov', 'pycodestyle'], cmdclass={"test": PyTest}, )
Add missing tests dependency pycodestyle.
py
diff --git a/modconf/__init__.py b/modconf/__init__.py index <HASH>..<HASH> 100644 --- a/modconf/__init__.py +++ b/modconf/__init__.py @@ -1,4 +1,4 @@ -__version__ = '0.4b6' +__version__ = '0.4b7' import sys
PKGTOOL change version from <I>b6 to <I>b7
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -23,6 +23,6 @@ setup( install_requires=[ 'six', 'future', - 'enum34', + "enum34 ; python_version<'3.4'", ], )
fix: only require enum<I> in python 2 builds
py
diff --git a/ntlm3/U32.py b/ntlm3/U32.py index <HASH>..<HASH> 100644 --- a/ntlm3/U32.py +++ b/ntlm3/U32.py @@ -28,7 +28,7 @@ class U32: v = 0 def __init__(self, value=0): - if type(value) != int: + if not isinstance(value, (int, long)): value = six.byte2int(value) self.v = C + norm(abs(int(value)))
Recognize long integers as integers. Fixes #6.
py
diff --git a/gprof2dot.py b/gprof2dot.py index <HASH>..<HASH> 100755 --- a/gprof2dot.py +++ b/gprof2dot.py @@ -1330,8 +1330,8 @@ class OprofileParser(LineParser): """ _fields_re = { - 'samples': r'(?P<samples>\d+)', - '%': r'(?P<percentage>\S+)', + 'samples': r'(\d+)', + '%': r'(\S+)', 'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)', 'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)', 'app name': r'(?P<application>\S+)', @@ -1439,8 +1439,7 @@ class OprofileParser(LineParser): if not mo: raise ParseError('failed to parse', line) fields = mo.groupdict() - entry.samples = int(fields.get('samples', 0)) - entry.percentage = float(fields.get('percentage', 0.0)) + entry.samples = int(mo.group(1)) if 'source' in fields and fields['source'] != '(no location information)': source = fields['source'] filename, lineno = source.split(':')
Handle oprofile data with multiple events (issue #<I>). All but the first event are ignored for now.
py
diff --git a/nurbs/Curve.py b/nurbs/Curve.py index <HASH>..<HASH> 100644 --- a/nurbs/Curve.py +++ b/nurbs/Curve.py @@ -141,6 +141,18 @@ class Curve(object): # Delete the calculated curve points del self._mCurvePts[:] + def _check_variables(self): + works = True + # Check degree values + if self._mDegree == 0: + works = False + if not self._mCtrlPts: + works = False + if not self._mKnotVector: + works = False + if not works: + raise ValueError("Some required parameters for calculations are not set.") + def read_ctrlpts(self, filename=''): # Clean up the curve and control points lists, if necessary self._reset_curve() @@ -164,6 +176,8 @@ class Curve(object): sys.exit(1) def calculate(self): + # Check all parameters are set before calculations + self._check_variables() # Clean up the curve points, if necessary self._reset_curve() @@ -178,6 +192,8 @@ class Curve(object): self._mCurvePts.append(curvept) def calculatew(self): + # Check all parameters are set before calculations + self._check_variables() # Clean up the curve points, if necessary self._reset_curve()
Added pre-calculation checks to Curve class
py
diff --git a/formly/models.py b/formly/models.py index <HASH>..<HASH> 100644 --- a/formly/models.py +++ b/formly/models.py @@ -376,25 +376,25 @@ FIELD_TYPES = { Field.TEXT_AREA: dict( field_class=forms.CharField, kwargs=dict( - widget=forms.Textarea() + widget=forms.Textarea ) ), Field.RADIO_CHOICES: dict( field_class=forms.ChoiceField, kwargs=dict( - widget=forms.RadioSelect() + widget=forms.RadioSelect ) ), Field.LIKERT_FIELD: dict( field_class=forms.ChoiceField, kwargs=dict( - widget=LikertSelect() + widget=LikertSelect ) ), Field.RATING_FIELD: dict( field_class=forms.ChoiceField, kwargs=dict( - widget=RatingSelect() + widget=RatingSelect ) ), Field.DATE_FIELD: dict( @@ -404,13 +404,13 @@ FIELD_TYPES = { Field.SELECT_FIELD: dict( field_class=forms.ChoiceField, kwargs=dict( - widget=forms.Select() + widget=forms.Select ) ), Field.CHECKBOX_FIELD: dict( field_class=LimitedMultipleChoiceField, kwargs=dict( - widget=forms.CheckboxSelectMultiple() + widget=forms.CheckboxSelectMultiple ) ), Field.BOOLEAN_FIELD: dict(
Replace form field widgets with a class, not class instance
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ setup(name='cwltool', 'shellescape >= 3.4.1, < 3.5', 'schema-salad >= 2.7.20180719125426, < 3', 'mypy-extensions', - 'six >= 1.8.0', + 'six >= 1.9.0', 'psutil', 'prov == 1.5.1', 'bagit >= 1.6.4',
Bump dependency on six from <I> to <I> cwltool itself doesn't depend on six <I>, but prov does, and having matching dependencies helps Pip find a correct set of packages to install (see #<I>).
py
diff --git a/tensorpack/dataflow/common.py b/tensorpack/dataflow/common.py index <HASH>..<HASH> 100644 --- a/tensorpack/dataflow/common.py +++ b/tensorpack/dataflow/common.py @@ -198,10 +198,10 @@ class FixedSizeData(ProxyDataFlow): cnt = 0 while True: try: - dp = self.itr.next() + dp = next(self.itr) except StopIteration: self.itr = self.ds.get_data() - dp = self.itr.next() + dp = next(self.itr) cnt += 1 yield dp
fix <I> (#<I>)
py
diff --git a/tests/tests.py b/tests/tests.py index <HASH>..<HASH> 100644 --- a/tests/tests.py +++ b/tests/tests.py @@ -465,10 +465,6 @@ class MaintenanceModeTestCase(TestCase): response = self.middleware.process_request(request) self.assertEqual(response, None) - settings.MAINTENANCE_MODE_IGNORE_URLS = None - response = self.middleware.process_request(request) - self.assertMaintenanceMode(response) - class LazyUrl: def __init__(self, url): self.url = url @@ -480,6 +476,10 @@ class MaintenanceModeTestCase(TestCase): response = self.middleware.process_request(request) self.assertEqual(response, None) + settings.MAINTENANCE_MODE_IGNORE_URLS = None + response = self.middleware.process_request(request) + self.assertMaintenanceMode(response) + def test_middleware_redirect_url(self): self.__reset_state()
Fix ignore-LazyUrl test (settings weren't being unset)
py
diff --git a/Server/Python/src/dbs/business/DBSBlockInsert.py b/Server/Python/src/dbs/business/DBSBlockInsert.py index <HASH>..<HASH> 100644 --- a/Server/Python/src/dbs/business/DBSBlockInsert.py +++ b/Server/Python/src/dbs/business/DBSBlockInsert.py @@ -387,8 +387,13 @@ class DBSBlockInsert : conn = self.dbi.connection() # First, check and see if the dataset exists. - datasetID = self.datasetid.execute(conn, dataset['dataset']) - dataset['dataset_id'] = datasetID + try: + datasetID = self.datasetid.execute(conn, dataset['dataset']) + dataset['dataset_id'] = datasetID + except KeyError, ex: + dbsExceptionHandler("dbsException-invalid-input", "DBSBlockInsert/InsertDataset: Dataset is required.\ + Exception: %s. troubled dataset are: %s" %(ex.args[0], dataset) ) + if conn:conn.close() if datasetID > 0: # Then we already have a valid dataset. We only need to fill the map (dataset & output module config) # Skip to the END
Catch exception when dataset is missing during dataset insertion. YG From: yuyi <<EMAIL>> git-svn-id: svn+ssh://svn.cern.ch/reps/CMSDMWM/DBS/trunk@<I> <I>e-<I>-<I>b1-a<I>-d<I>a<I>b
py
diff --git a/dbbackup/settings.py b/dbbackup/settings.py index <HASH>..<HASH> 100644 --- a/dbbackup/settings.py +++ b/dbbackup/settings.py @@ -14,7 +14,7 @@ TMP_DIR = getattr(settings, 'DBBACKUP_TMP_DIR', tempfile.gettempdir()) TMP_FILE_MAX_SIZE = getattr(settings, 'DBBACKUP_TMP_FILE_MAX_SIZE', 10 * 1024 * 1024) TMP_FILE_READ_SIZE = getattr(settings, 'DBBACKUP_TMP_FILE_READ_SIZE', 1024 * 1000) -# Days to keep +# Number of old backup files to keep CLEANUP_KEEP = getattr(settings, 'DBBACKUP_CLEANUP_KEEP', 10) CLEANUP_KEEP_MEDIA = getattr(settings, 'DBBACKUP_CLEANUP_KEEP_MEDIA', CLEANUP_KEEP) CLEANUP_KEEP_FILTER = getattr(settings, 'DBBACKUP_CLEANUP_KEEP_FILTER', lambda x: False)
Update settings.py comment (#<I>)
py
diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py index <HASH>..<HASH> 100644 --- a/tensor2tensor/data_generators/cnn_dailymail.py +++ b/tensor2tensor/data_generators/cnn_dailymail.py @@ -74,7 +74,7 @@ def story_generator(tmp_dir): for path in paths: for story_file in tf.gfile.Glob(path + "*"): story = u"" - for line in tf.gfile.Open(story_file): + for line in tf.gfile.Open(story_file, 'rb'): line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8") story += line yield story
Fixing #<I>: decoding str object instead of bytes (#<I>)
py
diff --git a/ipywidgets/widgets/tests/test_interaction.py b/ipywidgets/widgets/tests/test_interaction.py index <HASH>..<HASH> 100644 --- a/ipywidgets/widgets/tests/test_interaction.py +++ b/ipywidgets/widgets/tests/test_interaction.py @@ -11,9 +11,9 @@ import pytest import ipywidgets as widgets -from traitlets import TraitError +from traitlets import TraitError, Float from ipywidgets import (interact, interact_manual, interactive, - interaction, Output) + interaction, Output, Widget) #----------------------------------------------------------------------------- # Utility stuff @@ -444,6 +444,16 @@ def test_custom_description(): w.value = 'different text' assert d == {'b': 'different text'} +def test_raises_on_non_value_widget(): + """ Test that passing in a non-value widget raises an error """ + + class BadWidget(Widget): + """ A widget that contains a `value` traitlet """ + value = Float() + + with pytest.raises(TypeError, match=".* not a ValueWidget.*"): + interactive(f, b=BadWidget()) + def test_interact_manual_button(): c = interact.options(manual=True).widget(f) w = c.children[0]
Verify that we explicitly disallow non-value widgets in interact
py
diff --git a/waterboy/api/model_config.py b/waterboy/api/model_config.py index <HASH>..<HASH> 100644 --- a/waterboy/api/model_config.py +++ b/waterboy/api/model_config.py @@ -76,6 +76,10 @@ class ModelConfig: """ Return data directory for given dataset """ return self.project_config.project_toplevel_dir(*args) + def openai_dir(self) -> str: + """ Return directory for openai output files for this model """ + return self.project_config.project_output_dir('openai', self.run_name) + #################################################################################################################### # NAME UTILITIES @property
Add OpenAI logging directory to the model config.
py
diff --git a/h2o-py/tests/testdir_algos/gbm/pyunit_imbalanced_gbm.py b/h2o-py/tests/testdir_algos/gbm/pyunit_imbalanced_gbm.py index <HASH>..<HASH> 100644 --- a/h2o-py/tests/testdir_algos/gbm/pyunit_imbalanced_gbm.py +++ b/h2o-py/tests/testdir_algos/gbm/pyunit_imbalanced_gbm.py @@ -40,7 +40,7 @@ def imbalanced_gbm(): print("") print("--------------------") - assert class_6_err_imbalanced >= 0.90*class_6_err_balanced, "balance_classes makes it at least 10% worse!" + assert class_6_err_imbalanced >= 0.90*class_6_err_balanced, "balance_classes makes it at least 10% worse: imbalanced %d, balanced %d" % (class_6_err_imbalanced, class_6_err_balanced)
Slightly better error message (#<I>)
py
diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index <HASH>..<HASH> 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -221,7 +221,11 @@ class SPMClient(object): digest = '' else: file_hash = hashlib.sha1() - digest = self.pkgfiles['{0}.hash_file'.format(self.files_prov)](out_path, file_hash, self.files_conn) + digest = self.pkgfiles['{0}.hash_file'.format(self.files_prov)]( + os.path.join(out_path, member.name), + file_hash, + self.files_conn + ) self.pkgdb['{0}.register_file'.format(self.db_prov)]( name, member,
Pass full path for digest (SPM)
py
diff --git a/qface/utils.py b/qface/utils.py index <HASH>..<HASH> 100644 --- a/qface/utils.py +++ b/qface/utils.py @@ -1,15 +1,10 @@ -def merge(a, b, path=None): - "merges b into a" - # import pdb; pdb.set_trace() - path = path or [] +def merge(a, b): + "merges b into a recursively if a and b are dicts" for key in b: - if key in a: - if isinstance(a[key], dict) and isinstance(b[key], dict): - merge(a[key], b[key], path + [str(key)]) - else: - a[key] = b[key] + if isinstance(a.get(key), dict) and isinstance(b.get(key), dict): + merge(a[key], b[key]) else: a[key] = b[key] return a
simplified the dict merging. small fix
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -74,7 +74,7 @@ setup( include_package_data=True, install_requires=['sovrin-common-dev==0.2.50', 'anoncreds-dev==0.3.8'], setup_requires=['pytest-runner'], - tests_require=['pytest', 'sovrin-node-dev==0.3.74'], + tests_require=['pytest', 'sovrin-node-dev==0.3.78'], scripts=['scripts/sovrin', 'scripts/change_node_ha', 'scripts/add_new_node', 'scripts/reset_client'], cmdclass={
Updated sovrin-node-dev dependency.
py
diff --git a/holoviews/core/ndmapping.py b/holoviews/core/ndmapping.py index <HASH>..<HASH> 100644 --- a/holoviews/core/ndmapping.py +++ b/holoviews/core/ndmapping.py @@ -495,7 +495,7 @@ class NdMapping(MultiDimensionalMapping): def __getitem__(self, indexslice): """ - Allows slicing operations along the map and data + Allows slicing operations along the key and data dimensions. If no data slice is supplied it will return all data elements, otherwise it will return the requested slice of the data.
Fixed outdated docstring in ndmapping
py
diff --git a/tasklib/backends.py b/tasklib/backends.py index <HASH>..<HASH> 100644 --- a/tasklib/backends.py +++ b/tasklib/backends.py @@ -272,3 +272,5 @@ class TaskWarrior(object): return json.loads(output[0]) + def sync(self): + self.execute_command(['sync'])
TaskWarrior: Implement sync method
py
diff --git a/zounds/spectral/test_frequencydimension.py b/zounds/spectral/test_frequencydimension.py index <HASH>..<HASH> 100644 --- a/zounds/spectral/test_frequencydimension.py +++ b/zounds/spectral/test_frequencydimension.py @@ -43,7 +43,7 @@ class ExplicitFrequencyDimensionTests(unittest2.TestCase): slices1 = [slice(0, 10), slice(10, 100), slice(100, 1000)] dim1 = ExplicitFrequencyDimension(scale1, slices1) - scale2 = GeometricScale(20, 5000, 0.02, 3) + scale2 = GeometricScale(20, 4500, 0.02, 3) slices2 = [slice(0, 10), slice(10, 100), slice(100, 1000)] dim2 = ExplicitFrequencyDimension(scale2, slices2)
Update a test that was seemingly making a bad assertion. How was this ever passing?
py
diff --git a/setuptools_rust/build.py b/setuptools_rust/build.py index <HASH>..<HASH> 100644 --- a/setuptools_rust/build.py +++ b/setuptools_rust/build.py @@ -181,17 +181,19 @@ class build_rust(Command): target_dir) else: if sys.platform == "win32": - wildcard_so = "*.dll" + dylib_ext = "dll" elif sys.platform == "darwin": - wildcard_so = "*.dylib" + dylib_ext = "dylib" else: - wildcard_so = "*.so" + dylib_ext = "so" + wildcard_so = "*{}.{}".format(ext.basename, dylib_ext) try: - dylib_paths.append( - (ext.name, glob.glob( - os.path.join(artifactsdir, wildcard_so))[0])) - except IndexError: + dylib_paths.append(( + ext.name, + next(glob.iglob(os.path.join(artifactsdir, wildcard_so))) + )) + except StopIteration: raise DistutilsExecError( "rust build failed; unable to find any %s in %s" % (wildcard_so, artifactsdir))
Fix wrong dylib being copied from multiple target directory
py
diff --git a/battlenet/things.py b/battlenet/things.py index <HASH>..<HASH> 100644 --- a/battlenet/things.py +++ b/battlenet/things.py @@ -514,7 +514,7 @@ class Build(Thing): self.build = data['build'] self.icon = data.get('icon') - self.name = data['name'] + self.name = data.get('name', '-') self.selected = data.get('selected', False) self.glyphs = { 'prime': [],
Prevent exception with non-existing build
py
diff --git a/pdb.py b/pdb.py index <HASH>..<HASH> 100644 --- a/pdb.py +++ b/pdb.py @@ -731,7 +731,7 @@ except for when using the function decorator. Used via do_list currently only, do_source and do_longlist are overridden. """ - if self.config.highlight and self.config.use_pygments: + if self.config.use_pygments: if lines: lines = self.format_source( "".join(lines) @@ -741,7 +741,7 @@ except for when using the function decorator. else: # Only for Python 2.7, where _print_lines is not used/available. def do_list(self, arg): - if not self.config.highlight or not self.config.use_pygments: + if not self.config.use_pygments: return super(Pdb, self).do_list(arg) oldstdout = self.stdout
Fixup list: pygments can be used without highlight (#<I>)
py
diff --git a/abydos/stemmer.py b/abydos/stemmer.py index <HASH>..<HASH> 100644 --- a/abydos/stemmer.py +++ b/abydos/stemmer.py @@ -150,7 +150,7 @@ def porter(word): word += 'e' # Step 1c - if (word[-1] == 'Y' or word[-1] == 'y') and _sb_has_vowel(word[:-1]): + if word[-1] in set('Yy') and _sb_has_vowel(word[:-1]): word = word[:-1] + 'i' # Step 2 @@ -466,6 +466,12 @@ def porter2(word): elif _sb_short_word(word, _vowels, _codanonvowels): word += 'e' + # Step 1c + if len(word) > 2 and word[-1] in set('Yy') and word[-2] not in _vowels: + word = word[:-1] + 'i' + + + # Change 'y' back to 'Y' if it survived stemming for i in _range(0, len(word)): if word[i] == 'Y':
added Porter2 step 1c small refinement to Porter step 1c
py
diff --git a/pep8radius.py b/pep8radius.py index <HASH>..<HASH> 100644 --- a/pep8radius.py +++ b/pep8radius.py @@ -623,7 +623,7 @@ def which_version_control(): # pragma: no cover # Not supported (yet) raise NotImplementedError("Unknown version control system, " - "or you're in the project directory.") + "or you're not in the project directory.") if __name__ == "__main__": # pragma: no cover
DOC typo in which_version_control, read *not* in project dir
py
diff --git a/commands.py b/commands.py index <HASH>..<HASH> 100755 --- a/commands.py +++ b/commands.py @@ -215,7 +215,7 @@ def release(version=None, date=None, tag_name=None, next_version=None, prepare=T changelog = 'CHANGELOG' # E.g.: __version__ = '1.0.dev0' - version_re = r"^__version__ = '(?P<version>.+)(?P<dev_marker>\.dev\d+)?'$" + version_re = r"^__version__ = '(?P<version>.+?)(?P<dev_marker>\.dev\d+)?'$" # E.g.: ## 1.0.0 - 2017-04-01 changelog_header_re = r'^## (?P<version>.+) - (?P<date>.+)$'
Tweak version regex in release command again Make the main version part non-greedy. Amends b3aa4c<I>cec5eeb<I>d<I>aa<I>c3b<I>d3
py
diff --git a/vertex/q2q.py b/vertex/q2q.py index <HASH>..<HASH> 100644 --- a/vertex/q2q.py +++ b/vertex/q2q.py @@ -441,7 +441,7 @@ class _PTCPConnectionAttemptPress(AbstractConnectionAttempt): if not self.cancelled: self.q2qproto.service.dispatcher.unbindPort(self.newPort) else: - print 'totally wacky, [press] cancelled twice!' + print('totally wacky, [press] cancelled twice!') AbstractConnectionAttempt.cancel(self) class PTCPMethod(TCPMethod):
at least valid 3 syntax, pls
py
diff --git a/gimmemotifs/prediction.py b/gimmemotifs/prediction.py index <HASH>..<HASH> 100644 --- a/gimmemotifs/prediction.py +++ b/gimmemotifs/prediction.py @@ -79,13 +79,15 @@ def pp_predict_motifs(fastafile, analysis="small", organism="hg18", single=False "rn4":"RN", "dm3":"DM", "fr2": "FR", + "danRer6": "DR", "danRer7": "DR", "galGal3": "GG", "ce3": "CE", "anoGam1": "AG", "yeast":"SC", "sacCer2":"SC", - "xenTro2":"XT"} + "xenTro2":"XT", + "xenTro3":"XT"} if weeder_organisms.has_key(organism): weeder_organism = weeder_organisms[organism] else:
Added Weeder organism, fixed bug that caused a crash when specifying an unsupported organism for Weeder
py
diff --git a/newsapi/newsapi_client.py b/newsapi/newsapi_client.py index <HASH>..<HASH> 100644 --- a/newsapi/newsapi_client.py +++ b/newsapi/newsapi_client.py @@ -50,17 +50,17 @@ class NewsApiClient(object): else: raise TypeError('keyword/phrase q param should be a str') - # Sources + # Sources + if (sources is not None) and ((country is not None) or (category is not None)): + raise ValueError('cannot mix country/category param with sources param.') + + # Sources if sources is not None: if type(sources) == str: payload['sources'] = sources else: raise TypeError('sources param should be a str') - # Sources - if (country is not None) and (category is not None): - raise ValueError('cannot mix country/category param with sources param.') - # Language if language is not None: if type(language) == str:
Allowing country + category searches for news. The newsapi allows news queries with both country and category parameters in the payload. However, it doesn't allow country and source or category and source mixed parameters. The update to the code reflects the corrected parameter options.
py
diff --git a/examples/scripts/add-storage-system.py b/examples/scripts/add-storage-system.py index <HASH>..<HASH> 100755 --- a/examples/scripts/add-storage-system.py +++ b/examples/scripts/add-storage-system.py @@ -91,8 +91,11 @@ def add_storage_system(sto, ip, usr, pas, domain, import_pools): conSys['unmanagedDomains'].remove(dom) found = True if not found: - print('Storage Domain ', domain, ' not found. Verify the domain ' - 'exsits on the storage system') + print('Storage Domain "',domain,'" not found. The following domains ' + 'have been found on the storage system') + for dom in reversed(conSys['unmanagedDomains']): + pprint(dom) + sto.remove_storage_system(conSys) sys.exit() if import_pools: found = False
Fix bug where a storage system can be added without a valid storage domain being specified.
py
diff --git a/playhouse/postgres_ext.py b/playhouse/postgres_ext.py index <HASH>..<HASH> 100644 --- a/playhouse/postgres_ext.py +++ b/playhouse/postgres_ext.py @@ -164,7 +164,7 @@ class ArrayField(IndexedFieldMixin, Field): class DateTimeTZField(DateTimeField): - db_field = 'datetime_tz' + db_field = 'timestamptz' class HStoreField(IndexedFieldMixin, Field):
Fixed db_field for DateTimeTz in postgres_ext cause since <I> `datetime_tz` does not supported.
py
diff --git a/src/graphql/pyutils/did_you_mean.py b/src/graphql/pyutils/did_you_mean.py index <HASH>..<HASH> 100644 --- a/src/graphql/pyutils/did_you_mean.py +++ b/src/graphql/pyutils/did_you_mean.py @@ -7,7 +7,7 @@ MAX_LENGTH = 5 def did_you_mean(suggestions: Sequence[str], sub_message: Optional[str] = None) -> str: """Given [ A, B, C ] return ' Did you mean A, B, or C?'""" - if not suggestions: + if not suggestions or not MAX_LENGTH: return "" parts = [" Did you mean "] if sub_message:
Simplify monkey-patching the did_you_mean function
py
diff --git a/test/test_multiple_values_for_tag_attribute.py b/test/test_multiple_values_for_tag_attribute.py index <HASH>..<HASH> 100644 --- a/test/test_multiple_values_for_tag_attribute.py +++ b/test/test_multiple_values_for_tag_attribute.py @@ -16,4 +16,4 @@ def test_parse_tag_attributes(): tag_column = parsed["tag"] eq_(len(tag_column), 1) tags = tag_column[0] - eq_(tags, ['cds_end_NF', 'mRNA_end_NF']) + eq_(tags, 'cds_end_NF,mRNA_end_NF')
updated test to have str vs. list
py
diff --git a/python_modules/libraries/dagster-snowflake/dagster_snowflake/resources.py b/python_modules/libraries/dagster-snowflake/dagster_snowflake/resources.py index <HASH>..<HASH> 100644 --- a/python_modules/libraries/dagster-snowflake/dagster_snowflake/resources.py +++ b/python_modules/libraries/dagster-snowflake/dagster_snowflake/resources.py @@ -3,8 +3,6 @@ import warnings from contextlib import closing, contextmanager from typing import Mapping -import pandas as pd - import dagster._check as check from dagster import resource @@ -121,6 +119,8 @@ class SnowflakeConnection: check.bool_param(fetch_results, "fetch_results") if use_pandas_result: + import pandas as pd + results = pd.DataFrame() else: results = []
gate import of pandas in dagster-snowflake (#<I>)
py
diff --git a/angr/sim_manager.py b/angr/sim_manager.py index <HASH>..<HASH> 100644 --- a/angr/sim_manager.py +++ b/angr/sim_manager.py @@ -14,6 +14,7 @@ from .errors import SimError, SimMergeError from .sim_state import SimState from .state_hierarchy import StateHierarchy from .errors import AngrError, SimUnsatError, SimulationManagerError +from .sim_options import LAZY_SOLVES l = logging.getLogger(name=__name__) @@ -424,11 +425,16 @@ class SimulationManager: 'unsat': successors.unsat_successors, 'unconstrained': successors.unconstrained_successors} - except (SimUnsatError, claripy.UnsatError): + except (SimUnsatError, claripy.UnsatError) as e: + if LAZY_SOLVES not in state.options: + self._errored.append(ErrorRecord(state, e, sys.exc_info()[2])) + stashes = {} + else: + stashes = {'pruned': [state]} + if self._hierarchy: self._hierarchy.unreachable_state(state) self._hierarchy.simplify() - stashes = {'pruned': [state]} except tuple(self._resilience) as e: self._errored.append(ErrorRecord(state, e, sys.exc_info()[2]))
Make state pruning conditional on LAZY_SOLVES (#<I>)
py
diff --git a/intranet/celery.py b/intranet/celery.py index <HASH>..<HASH> 100644 --- a/intranet/celery.py +++ b/intranet/celery.py @@ -1,9 +1,19 @@ import os +import logging from celery import Celery +from celery.signals import after_setup_logger, after_setup_task_logger os.environ.setdefault("DJANGO_SETTINGS_MODULE", "intranet.settings") app = Celery("intranet") app.config_from_object("django.conf:settings", namespace="CELERY") app.autodiscover_tasks() + + +@after_setup_logger.connect +@after_setup_task_logger.connect +def setup_logger(logger, **kwargs): # pylint: disable=unused-argument + from django.conf import settings + + logger.level = getattr(logging, settings.LOG_LEVEL)
refactor(celery): respect settings.LOG_LEVEL
py
diff --git a/ella/core/newman_admin.py b/ella/core/newman_admin.py index <HASH>..<HASH> 100644 --- a/ella/core/newman_admin.py +++ b/ella/core/newman_admin.py @@ -11,7 +11,6 @@ from ella.core.models import Author, Source, Category, Listing, HitCount, Placem from ella.core.models.publishable import Publishable from ella import newman from ella.newman import options, fields -from ella.core.models.main import Related class ListingForm(modelforms.ModelForm): class Meta:
Ambiguous import removed. (made by me during resolving conflicts after pull)
py
diff --git a/formlayout.py b/formlayout.py index <HASH>..<HASH> 100644 --- a/formlayout.py +++ b/formlayout.py @@ -271,7 +271,6 @@ class FileLayout(QHBoxLayout): def setStyleSheet(self, style): self.lineedit.setStyleSheet(style) - self.filebtn.setStyleSheet(style) class RadioLayout(QVBoxLayout):
Don't theme the FileLayout button to avoid ugly rendering
py
diff --git a/salt/utils/network.py b/salt/utils/network.py index <HASH>..<HASH> 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -1296,11 +1296,13 @@ def _filter_interfaces(interface=None, interface_data=None): ret = ifaces else: interface = salt.utils.args.split_input(interface) + # pylint: disable=not-an-iterable ret = { k: v for k, v in six.iteritems(ifaces) if any((fnmatch.fnmatch(k, pat) for pat in interface)) } + # pylint: enable=not-an-iterable return ret
Ignore false positive from pylint
py
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py index <HASH>..<HASH> 100644 --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -11,7 +11,7 @@ REQUIRES = [ "requests>=2.23.0,<2.26", "PyJWT>=1.5.3,<2.0.0", "botbuilder-schema==4.15.0", - "msal==1.6.0", + "msal==1.17.0", ] root = os.path.abspath(os.path.dirname(__file__))
Bump msal version (#<I>) * Bump MSAL to <I> * Bump botframework-connector version * Revert "Bump botframework-connector version" This reverts commit f4f2d<I>daaea<I>ca3af<I>a<I>f<I>ffa<I>df9c<I>.
py
diff --git a/salt/client/mixins.py b/salt/client/mixins.py index <HASH>..<HASH> 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -95,8 +95,7 @@ class SyncClientMixin(object): try: self._verify_fun(fun) - f_call = salt.utils.format_call(self.functions[fun], low) - data['return'] = self.functions[fun](*f_call.get('args', ()), **f_call.get('kwargs', {})) + data['return'] = self.functions[fun](*low.get('args', ()), **low.get('kwargs', {})) data['success'] = True except Exception as exc: data['return'] = 'Exception occurred in {0} {1}: {2}: {3}'.format(
Make the callers of this function parse their args correctly
py
diff --git a/sqlparse/sql.py b/sqlparse/sql.py index <HASH>..<HASH> 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -418,7 +418,8 @@ class Statement(TokenList): if isinstance(token, (Identifier, IdentifierList)): _, dml_keyword = self.token_next(tidx, skip_ws=True) - if dml_keyword.ttype == T.Keyword.DML: + if dml_keyword is not None \ + and dml_keyword.ttype == T.Keyword.DML: return dml_keyword.normalized # Hmm, probably invalid syntax, so return unknown.
fix "WITH name" case (#<I>) * fix "WITH name" case * fix "WITH name" case (flake8 fix)
py
diff --git a/rw/event.py b/rw/event.py index <HASH>..<HASH> 100644 --- a/rw/event.py +++ b/rw/event.py @@ -64,8 +64,10 @@ class Event(set): # wait for results for func, future in futures: try: - result = yield future - re.append(result) + if not future.done(): + yield future + re.append(future.result()) + except Exception: exceptions.append((func, traceback.format_exc()))
rw.event.Event make sure we are not waiting for already done Future's
py
diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py index <HASH>..<HASH> 100644 --- a/superset/db_engine_specs/presto.py +++ b/superset/db_engine_specs/presto.py @@ -730,10 +730,10 @@ class PrestoEngineSpec(BaseEngineSpec): ) if not latest_parts: - latest_parts = tuple([None] * len(col_names)) # type: ignore + latest_parts = tuple([None] * len(col_names)) metadata["partitions"] = { "cols": cols, - "latest": dict(zip(col_names, latest_parts)), # type: ignore + "latest": dict(zip(col_names, latest_parts)), "partitionQuery": pql, } @@ -925,6 +925,7 @@ class PrestoEngineSpec(BaseEngineSpec): return None @classmethod + @cache.memoize(timeout=60) def latest_partition( cls, table_name: str,
feat: improve presto query perf (#<I>)
py
diff --git a/src/authority/views.py b/src/authority/views.py index <HASH>..<HASH> 100644 --- a/src/authority/views.py +++ b/src/authority/views.py @@ -24,15 +24,17 @@ def add_permission(request, app_label, module_name, pk, approved=False, if approved: template_name = 'authority/permission_form.html' view_name = 'authority-add-permission' + user = None else: template_name = 'authority/permission_request_form.html' view_name = 'authority-add-request' + user = request.user if request.method == 'POST': if codename is None: return HttpResponseForbidden(next) form = UserPermissionForm(data=request.POST, obj=obj, approved=approved, - perm=codename, initial=dict(codename=codename)) + perm=codename, initial=dict(codename=codename, user=user)) if form.is_valid(): form.save(request) request.user.message_set.create( @@ -40,7 +42,7 @@ def add_permission(request, app_label, module_name, pk, approved=False, return HttpResponseRedirect(next) else: form = UserPermissionForm(obj=obj, perm=codename, approved=approved, - initial=dict(codename=codename)) + initial=dict(codename=codename, user=user)) context = { 'form': form,
bugfix: Initial user value in the view for permission requests
py
diff --git a/aws_ir_plugins/isolate_host.py b/aws_ir_plugins/isolate_host.py index <HASH>..<HASH> 100644 --- a/aws_ir_plugins/isolate_host.py +++ b/aws_ir_plugins/isolate_host.py @@ -63,9 +63,12 @@ class Plugin(object): self.exists = True security_group_result = self.client.describe_security_groups( DryRun=self.dry_run, - GroupNames=[ - self._generate_security_group_name() - ] + Filters=[{ + 'Name': 'group-name', + 'Values': [ + self._generate_security_group_name(), + ] + }] )['SecurityGroups'][0] return security_group_result['GroupId']
Use filter to find existing groupId - Fixes Issue #<I>
py
diff --git a/ailment/analyses/block_simplifier.py b/ailment/analyses/block_simplifier.py index <HASH>..<HASH> 100644 --- a/ailment/analyses/block_simplifier.py +++ b/ailment/analyses/block_simplifier.py @@ -69,7 +69,7 @@ class BlockSimplifier(Analysis): rd = self.project.analyses.ReachingDefinitions(block=block, track_tmps=True, - observation_points=[ (block.statements[-1].ins_addr, OP_AFTER )] + observation_points=[('insn', block.statements[-1].ins_addr, OP_AFTER)] ) used_tmp_indices = set(rd.one_result.tmp_uses.keys())
Adapt to the latest change in ReachingDefinitionAnalysis observation points in angr. (#<I>)
py
diff --git a/datanommer.models/datanommer/models/__init__.py b/datanommer.models/datanommer/models/__init__.py index <HASH>..<HASH> 100644 --- a/datanommer.models/datanommer/models/__init__.py +++ b/datanommer.models/datanommer/models/__init__.py @@ -271,11 +271,13 @@ class Message(DeclarativeBase, BaseMessage): )) total = query.count() - pages = int(math.ceil(total / float(rows_per_page))) - query = query.order_by(getattr(Message.timestamp, order)()) - query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) + if rows_per_page is None: + pages = 1 + else: + pages = int(math.ceil(total / float(rows_per_page))) + query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page) if defer: return total, page, query
grep: Permit rows_per_page=None to not limit rows
py
diff --git a/h5netcdf/core.py b/h5netcdf/core.py index <HASH>..<HASH> 100644 --- a/h5netcdf/core.py +++ b/h5netcdf/core.py @@ -562,11 +562,16 @@ class File(Group): def __init__(self, path, mode='a', invalid_netcdf=None, **kwargs): try: - if path.startswith('http'): + if path.startswith(('http', 'hdf5:')): if no_h5pyd: raise RuntimeError('h5pyd package required') + try: + with h5pyd.File(path, 'r') as f: # noqa + pass + self._preexisting_file = True + except OSError: + self._preexisting_file = False self._h5file = h5pyd.File(path, mode, **kwargs) - self._preexisting_file = True if 'r' in mode else False else: self._preexisting_file = os.path.exists(path) self._h5file = h5py.File(path, mode, **kwargs)
"hdf5" as HTTP scheme and better HDF5 resource exists check
py
diff --git a/inbox/client/restful_model_collection.py b/inbox/client/restful_model_collection.py index <HASH>..<HASH> 100644 --- a/inbox/client/restful_model_collection.py +++ b/inbox/client/restful_model_collection.py @@ -84,6 +84,8 @@ class RestfulModelCollection(): to_fetch = min(limit-len(accumulated), CHUNK_SIZE) results = self._get_model_collection(offset + len(accumulated), to_fetch) + results.reverse() # to keep ordering consistent across chunks + # since we access the first item via pop() accumulated.extend(results) # done if more than 'limit' items, less than asked for
Ordering is now consistent across chunks
py
diff --git a/uliweb/utils/generic.py b/uliweb/utils/generic.py index <HASH>..<HASH> 100644 --- a/uliweb/utils/generic.py +++ b/uliweb/utils/generic.py @@ -1207,7 +1207,7 @@ class EditView(AddView): # fields_list.insert(0, d) # fields_name.insert(0, 'id') - data = self.obj.to_dict(fields_name, convert=False).copy() + data = self.obj.to_dict(fields_name).copy() data.update(self.data) #add layout support
Fix EditView get object to dict bug
py
diff --git a/salt/loader/__init__.py b/salt/loader/__init__.py index <HASH>..<HASH> 100644 --- a/salt/loader/__init__.py +++ b/salt/loader/__init__.py @@ -129,7 +129,10 @@ def _module_dirs( if tag is None: tag = ext_type sys_types = os.path.join(base_path or str(SALT_BASE_PATH), int_type or ext_type) - ext_types = os.path.join(opts["extension_modules"], ext_type) + return_types = [sys_types] + if opts.get("extension_modules"): + ext_types = os.path.join(opts["extension_modules"], ext_type) + return_types.insert(0, ext_types) if not sys_types.startswith(SALT_INTERNAL_LOADERS_PATHS): raise RuntimeError( @@ -245,7 +248,7 @@ def _module_dirs( if os.path.isdir(maybe_dir): cli_module_dirs.insert(0, maybe_dir) - return cli_module_dirs + ext_type_types + [ext_types, sys_types] + return cli_module_dirs + ext_type_types + return_types def minion_mods(
Don't expect ``extension_modules`` to be set.
py
diff --git a/tests/test_icecream.py b/tests/test_icecream.py index <HASH>..<HASH> 100644 --- a/tests/test_icecream.py +++ b/tests/test_icecream.py @@ -25,7 +25,7 @@ from icecream import ic, argumentToString, stderrPrint, NoSourceAvailableError TEST_PAIR_DELIMITER = '| ' -MYFILENAME = basename(__file__) +MY_FILENAME = basename(__file__) a = 1 @@ -124,7 +124,7 @@ def lineIsContext(line): return ( int(lineNumber) > 0 and ext in ['.py', '.pyc', '.pyo'] and - name == splitext(MYFILENAME)[0] and + name == splitext(MY_FILENAME)[0] and (function == '<module>' or function.endswith('()')))
rename MYFILENAME to the better MY_FILENAME
py
diff --git a/ocrd_models/ocrd_models/ocrd_mets.py b/ocrd_models/ocrd_models/ocrd_mets.py index <HASH>..<HASH> 100644 --- a/ocrd_models/ocrd_models/ocrd_mets.py +++ b/ocrd_models/ocrd_models/ocrd_mets.py @@ -198,10 +198,8 @@ class OcrdMets(OcrdXmlDocument): el_pagediv.set('ORDER', order) if orderlabel: el_pagediv.set('ORDERLABEL', orderlabel) - el_fptr = el_pagediv.find('mets:fptr[@FILEID="%s"]' % ocrd_file.ID, NS) - if not el_fptr: - el_fptr = ET.SubElement(el_pagediv, TAG_METS_FPTR) - el_fptr.set('FILEID', ocrd_file.ID) + el_fptr = ET.SubElement(el_pagediv, TAG_METS_FPTR) + el_fptr.set('FILEID', ocrd_file.ID) def get_physical_page_for_file(self, ocrd_file): """
mets: remove useless xpath
py
diff --git a/openquake/baselib/workerpool.py b/openquake/baselib/workerpool.py index <HASH>..<HASH> 100644 --- a/openquake/baselib/workerpool.py +++ b/openquake/baselib/workerpool.py @@ -150,9 +150,11 @@ class WorkerPool(object): :param sock: a zeromq.Socket of kind PULL receiving (cmd, args) """ setproctitle('oq-zworker') + taskno = 0 with sock: - for cmd, args, mon in sock: - parallel.safely_call(cmd, args, mon) + for cmd, args, taskno, mon in sock: + parallel.safely_call(cmd, args, taskno, mon) + taskno += 1 def start(self): """
Fix in the workerpool [skip CI]
py
diff --git a/salt/modules/tomcat.py b/salt/modules/tomcat.py index <HASH>..<HASH> 100644 --- a/salt/modules/tomcat.py +++ b/salt/modules/tomcat.py @@ -77,7 +77,7 @@ def signal(signal=None): 'start': 'start', 'stop': 'stop'} - if not valid_signals[signal]: + if signal not in valid_signals: return cmd = '{0}/bin/catalina.sh {1}'.format(
Correct logic error, the if condition produces KeyError instead of verifying if signal is in valid_signals dictionary. modified: tomcat.py
py
diff --git a/galpy/df_src/diskdf.py b/galpy/df_src/diskdf.py index <HASH>..<HASH> 100644 --- a/galpy/df_src/diskdf.py +++ b/galpy/df_src/diskdf.py @@ -41,7 +41,7 @@ from galpy.actionAngle import actionAngleAxi try: sversion=re.split(r'\.',sc.__version__) _SCIPYVERSION=float(sversion[0])+float(sversion[1])/10. -except: +except: #pragma: no cover raise ImportError( "scipy.__version__ not understood, contact galpy developer, send scipy.__version__") _CORRECTIONSDIR=os.path.join(os.path.dirname(os.path.realpath(__file__)),'data') _DEGTORAD= math.pi/180.
do not cover scipy version exception
py
diff --git a/pyradio/main.py b/pyradio/main.py index <HASH>..<HASH> 100644 --- a/pyradio/main.py +++ b/pyradio/main.py @@ -59,13 +59,11 @@ def shell(): writter.writerow(params) sys.exit() - with open(args.stations, 'r') as cfgfile: - stations = [] - for row in csv.reader(cfgfile, skipinitialspace=True): - if not row or row[0].startswith('#'): - continue - name, url = [s.strip() for s in row] - stations.append((name, url)) + with open(args.stations, 'r') as cfgfile: + stations = [] + for row in csv.reader(filter(lambda row: row[0]!='#', cfgfile), skipinitialspace=True): + name, url = [s.strip() for s in row] + stations.append((name, url)) if args.list: for name, url in stations:
Filter comments before passing to csv reader. Fixes #<I>
py
diff --git a/peewee.py b/peewee.py index <HASH>..<HASH> 100644 --- a/peewee.py +++ b/peewee.py @@ -365,11 +365,12 @@ class BaseQuery(object): ) ) - for model in self._where: - for node in self._where[model]: - query, data = self.parse_node(node, model, alias_map) - where_with_alias.append(query) - where_data.extend(data) + for (model, join_type, on) in joins: + if model in self._where: + for node in self._where[model]: + query, data = self.parse_node(node, model, alias_map) + where_with_alias.append(query) + where_data.extend(data) return computed_joins, where_with_alias, where_data, alias_map
Trying to get a bit more deterministic ordering of the where clause
py
diff --git a/module/__init__.py b/module/__init__.py index <HASH>..<HASH> 100644 --- a/module/__init__.py +++ b/module/__init__.py @@ -178,15 +178,6 @@ class ExtensionKey(object): # xpyb doesn't ever set global_id, which seems wrong, but whatever. c_key.global_id = 0 - # This is a little wonky. Because CFFI sees a __del__ on the c_key - # name when the function returns, it may free the underlying memory. - # So, we say that self depends on the return value so the memory is - # frozen until self is deleted. The caller still has to manage the - # lifetime of the return value themselves, but at least this way it - # won't be deleted before they even see it, since we know self is in - # scope (the caller has a reference to it, otherwise they couldn't call - # us) when we return. - cffi_explicit_lifetimes[self] = c_key return c_key class Protobj(object):
don't be so paranoid about lifetimes This causes a resource leak in any case, and is (probably?) not needed. I should be less paranoid :)
py
diff --git a/libre/apps/data_drivers/renderers.py b/libre/apps/data_drivers/renderers.py index <HASH>..<HASH> 100644 --- a/libre/apps/data_drivers/renderers.py +++ b/libre/apps/data_drivers/renderers.py @@ -15,6 +15,9 @@ from icons.models import Icon from .encoders import JSONEncoder +class BoundsError(Exception): + pass + class LeafletRenderer(renderers.TemplateHTMLRenderer): template_name = 'leaflet.html' @@ -107,7 +110,7 @@ class LeafletRenderer(renderers.TemplateHTMLRenderer): # determine where to move the map ourselves try: extra_context['extents'] = self.determine_extents(features) - except StopIteration: + except (StopIteration, BoundsError): pass ret = json.dumps(new_data, cls=self.encoder_class, ensure_ascii=True) @@ -127,9 +130,14 @@ class LeafletRenderer(renderers.TemplateHTMLRenderer): def determine_extents(self, features): bounds_generator = (feature['geometry'].bounds for feature in features) + iterator = iter(bounds_generator) - first_feature_bounds = iterator.next() + try: + first_feature_bounds = iterator.next() + except AttributeError: + # No .bounds property? + raise BoundsError min_x, min_y, max_x, max_y = first_feature_bounds for bounds in bounds_generator:
Fail gracefully when features have no bounds
py
diff --git a/flask_oidc/__init__.py b/flask_oidc/__init__.py index <HASH>..<HASH> 100644 --- a/flask_oidc/__init__.py +++ b/flask_oidc/__init__.py @@ -254,11 +254,15 @@ class OpenIDConnect(object): def _get_cookie_id_token(self): try: - id_token_cookie = request.cookies[current_app.config[ - 'OIDC_ID_TOKEN_COOKIE_NAME']] + id_token_cookie = request.cookies.get(current_app.config[ + 'OIDC_ID_TOKEN_COOKIE_NAME']) + if not id_token_cookie: + # Do not error if we were unable to get the cookie. + # The user can debug this themselves. + return None return self.cookie_serializer.loads(id_token_cookie) - except (KeyError, SignatureExpired): - logger.debug("Missing or invalid ID token cookie", exc_info=True) + except SignatureExpired: + logger.debug("Invalid ID token cookie", exc_info=True) return None def set_cookie_id_token(self, id_token):
Do not complain if the user has no cookie
py
diff --git a/gnupg/test/test_gnupg.py b/gnupg/test/test_gnupg.py index <HASH>..<HASH> 100644 --- a/gnupg/test/test_gnupg.py +++ b/gnupg/test/test_gnupg.py @@ -291,10 +291,10 @@ class GPGTestCase(unittest.TestCase): """Test that 'gpg --version' returns the expected output.""" proc = self.gpg._open_subprocess(['--version']) result = proc.stdout.read(1024) - expected1 = "Supported algorithms:" - expected2 = "Pubkey:" - expected3 = "Cipher:" - expected4 = "Compression:" + expected1 = b"Supported algorithms:" + expected2 = b"Pubkey:" + expected3 = b"Cipher:" + expected4 = b"Compression:" self.assertGreater(result.find(expected1), 0) self.assertGreater(result.find(expected2), 0) self.assertGreater(result.find(expected3), 0)
stdout.read() wants bytes, which failed on Py3 str I don't know how far back the b"byte string" syntax is compatible with this change in Python 2.x, but in <I> it works as it should.
py
diff --git a/mythril/analysis/callgraph.py b/mythril/analysis/callgraph.py index <HASH>..<HASH> 100644 --- a/mythril/analysis/callgraph.py +++ b/mythril/analysis/callgraph.py @@ -3,7 +3,6 @@ graphs.""" import re -# ignore exists due to some problem in the typeshed https://github.com/python/mypy/issues/3589 from jinja2 import Environment, PackageLoader, select_autoescape from z3 import Z3Exception
Remove the type ignore's description comment
py
diff --git a/salt/config.py b/salt/config.py index <HASH>..<HASH> 100644 --- a/salt/config.py +++ b/salt/config.py @@ -2390,8 +2390,16 @@ def is_profile_configured(opts, provider, profile_name): .. versionadded:: Beryllium ''' - required_keys = ['image', 'provider', 'size'] + # Create a list of standard, required dict keys required by all drivers. + required_keys = ['image', 'provider'] alias, driver = provider.split(':') + + # Most drivers require a size to be set, but some do not. + driver_no_size = ['parallels', 'softlayer', 'softlayer_hw'] + + if driver not in driver_no_size: + required_keys.append('size') + provider_key = opts['providers'][alias][driver] profile_key = opts['providers'][alias][driver]['profiles'][profile_name]
Don't require size for all cloud drivers when checking profile configs Fixes #<I>
py
diff --git a/openquake/calculators/base.py b/openquake/calculators/base.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/base.py +++ b/openquake/calculators/base.py @@ -52,7 +52,7 @@ U64 = numpy.uint64 F32 = numpy.float32 TWO16 = 2 ** 16 TWO32 = 2 ** 32 -RUPTURES_PER_BLOCK = 100000 # used in classical_split_filter +RUPTURES_PER_BLOCK = 200000 # used in classical_split_filter class InvalidCalculationID(Exception):
Increased RUPTURES_PER_BLOCK [skip hazardlib]
py
diff --git a/requests_unixsocket/adapters.py b/requests_unixsocket/adapters.py index <HASH>..<HASH> 100644 --- a/requests_unixsocket/adapters.py +++ b/requests_unixsocket/adapters.py @@ -13,6 +13,7 @@ except ImportError: # The following was adapted from some code from docker-py # https://github.com/docker/docker-py/blob/master/docker/unixconn/unixconn.py class UnixHTTPConnection(HTTPConnection): + def __init__(self, unix_socket_url, timeout=60): """Create an HTTP connection to a unix domain socket @@ -33,6 +34,7 @@ class UnixHTTPConnection(HTTPConnection): class UnixHTTPConnectionPool(HTTPConnectionPool): + def __init__(self, socket_path, timeout=60): HTTPConnectionPool.__init__(self, 'localhost', timeout=timeout) self.socket_path = socket_path @@ -43,6 +45,7 @@ class UnixHTTPConnectionPool(HTTPConnectionPool): class UnixAdapter(HTTPAdapter): + def __init__(self, timeout=60): super(UnixAdapter, self).__init__() self.timeout = timeout
Make PEP8 compliant with autopep8.
py
diff --git a/torchvision/models/mobilenetv2.py b/torchvision/models/mobilenetv2.py index <HASH>..<HASH> 100644 --- a/torchvision/models/mobilenetv2.py +++ b/torchvision/models/mobilenetv2.py @@ -46,7 +46,7 @@ class ConvBNActivation(nn.Sequential): norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.ReLU6 - super(ConvBNReLU, self).__init__( + super().__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False), norm_layer(out_planes),
Keep consistency in Classes ConvBNActivation (#<I>) * Keep consistency of ConvBNActivation * Simplify using the Python3 idiom
py
diff --git a/planet/scripts/v1.py b/planet/scripts/v1.py index <HASH>..<HASH> 100644 --- a/planet/scripts/v1.py +++ b/planet/scripts/v1.py @@ -155,7 +155,7 @@ def stats(pretty, **kw): def download(asset_type, dest, limit, search_id, dry_run, **kw): '''Activate and download''' cl = clientv1() - page_size = min(limit, 250) + page_size = min(limit or 250, 250) asset_type = list(chain.from_iterable(asset_type)) if search_id: if dry_run:
prevent error in python3 if limit is None
py
diff --git a/VERSION.py b/VERSION.py index <HASH>..<HASH> 100644 --- a/VERSION.py +++ b/VERSION.py @@ -4,4 +4,5 @@ # This file should define a variable VERSION which we use as the # debugger version number. -VERSION='0.3.10' +# fmt: off +VERSION='0.3.11.dev0' # noqa
Black shouldn't format VERSION.py
py
diff --git a/osmnet/load.py b/osmnet/load.py index <HASH>..<HASH> 100644 --- a/osmnet/load.py +++ b/osmnet/load.py @@ -163,8 +163,7 @@ def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, for json in response_jsons_list: try: response_jsons.extend(json['elements']) - except Exception: - log('Exception while stitching individual JSON results.') + except KeyError: pass # remove duplicate records resulting from the json stitching @@ -711,8 +710,7 @@ def node_pairs(nodes, ways, waynodes, two_way=True): for tag in config.settings.keep_osm_tags: try: col_dict.update({tag: row[tag]}) - except Exception: - log('Exception while updating dictionary (two-way).') + except KeyError: pass pairs.append(col_dict) @@ -726,9 +724,7 @@ def node_pairs(nodes, ways, waynodes, two_way=True): for tag in config.settings.keep_osm_tags: try: col_dict.update({tag: row[tag]}) - except Exception: - log('Exception while updating dictionary ' - '(one-way).') + except KeyError: pass pairs.append(col_dict)
Remove unnecessary log statements and replace broad Exceptions with KeyErrors where appropriate
py
diff --git a/httpbin/core.py b/httpbin/core.py index <HASH>..<HASH> 100644 --- a/httpbin/core.py +++ b/httpbin/core.py @@ -468,12 +468,16 @@ def drip(): duration = float(args.get('duration', 2)) numbytes = min(int(args.get('numbytes', 10)),(10 * 1024 * 1024)) # set 10MB limit code = int(args.get('code', 200)) - pause = duration / numbytes delay = float(args.get('delay', 0)) if delay > 0: time.sleep(delay) + if numbytes <= 0: + response = Response('number of bytes must be positive', status=400) + return response + + pause = duration / numbytes def generate_bytes(): for i in xrange(numbytes): yield u"*".encode('utf-8')
Fix divide by zero error in drip It occured when 'numbytes' parameter was set to 0. In this case 'drip' should return <I>('Bad request').
py
diff --git a/panoramix/views.py b/panoramix/views.py index <HASH>..<HASH> 100644 --- a/panoramix/views.py +++ b/panoramix/views.py @@ -149,7 +149,7 @@ class TableView(PanoramixModelView, DeleteMixin): add_columns = ['table_name', 'database', 'default_endpoint', 'offset'] edit_columns = [ 'table_name', 'is_featured', 'database', 'description', 'owner', - 'main_dttm_col', 'default_endpoint', 'offset'] + 'main_dttm_col', 'default_endpoint', 'offset'] related_views = [TableColumnInlineView, SqlMetricInlineView] base_order = ('changed_on','desc') description_columns = {
Fixing alignment of a hanging indent for code quality
py
diff --git a/symfit/core/fit.py b/symfit/core/fit.py index <HASH>..<HASH> 100644 --- a/symfit/core/fit.py +++ b/symfit/core/fit.py @@ -1377,7 +1377,7 @@ class Minimize(BaseFit): # s_sq = (infodic['fvec'] ** 2).sum() / (len(self.ydata) - len(popt)) # pcov = cov_x * s_sq if cov_x is not None else None - self.__fit_results = FitResults( + self._fit_results = FitResults( params=self.model.params, popt=ans.x, pcov=None, @@ -1386,10 +1386,10 @@ class Minimize(BaseFit): ier=ans.nit, ) try: - self.__fit_results.r_squared = r_squared(self.model, self.__fit_results, self.data) + self._fit_results.r_squared = r_squared(self.model, self._fit_results, self.data) except ValueError: - self.__fit_results.r_squared = float('nan') - return self.__fit_results + self._fit_results.r_squared = float('nan') + return self._fit_results @property def scipy_constraints(self):
Removed __ elsewhere where it wasn't strictly needed.
py
diff --git a/tests/test_concordance.py b/tests/test_concordance.py index <HASH>..<HASH> 100644 --- a/tests/test_concordance.py +++ b/tests/test_concordance.py @@ -20,17 +20,20 @@ import shutil from pyani import anib, anim, tetra, pyani_files, pyani_config +# Work out where we are. We need to do this to find related data files +# for testing +curdir = os.path.dirname(os.path.abspath(__file__)) # Path to JSpecies output data. This data is pre-prepared. If you replace # the test data with your own data, you will need to replace this file, # or change the file path. -JSPECIES_OUTFILE = 'test_JSpecies/jspecies_results.tab' +JSPECIES_OUTFILE = os.path.join(curdir, './test_JSpecies/jspecies_results.tab') # Path to test input data -INDIRNAME = 'test_ani_data' +INDIRNAME = os.path.join(curdir, 'test_ani_data') # Path to directory for concordance test output -OUTDIRNAME = 'test_concordance' +OUTDIRNAME = os.path.join(curdir, 'test_concordance') # Thresholds for allowable difference TETRA_THRESHOLD = 0.1 @@ -84,6 +87,7 @@ def make_outdir(mode): os.mkdir(outdirname) return outdirname + # Test concordance of this code with JSpecies output def test_anib_concordance(): """Test concordance of ANIb method with JSpecies output.
make file-finding in test_concordance.py general
py
diff --git a/pylint/checkers/strings.py b/pylint/checkers/strings.py index <HASH>..<HASH> 100644 --- a/pylint/checkers/strings.py +++ b/pylint/checkers/strings.py @@ -401,16 +401,13 @@ class StringFormatChecker(BaseChecker): if (isinstance(node.func, astroid.Attribute) and not isinstance(node.func.expr, astroid.Const)): return + if node.starargs or node.kwargs: + return try: strnode = next(func.bound.infer()) except astroid.InferenceError: return - if not isinstance(strnode, astroid.Const): - return - if not isinstance(strnode.value, str): - return - - if node.starargs or node.kwargs: + if not (isinstance(strnode, astroid.Const) and isinstance(strnode.value, str)): return try: call_site = CallSite.from_call(node)
Squash some checks and move some of the around
py
diff --git a/pale/adapters/webapp2.py b/pale/adapters/webapp2.py index <HASH>..<HASH> 100644 --- a/pale/adapters/webapp2.py +++ b/pale/adapters/webapp2.py @@ -15,7 +15,7 @@ def pale_webapp2_request_handler_generator(pale_endpoint): for each pale endpoint. """ logging.info(pale_endpoint._route_name) - def pale_handler(self): + def pale_handler(self, *args, **kwargs): return pale_endpoint._execute(self.request) cls = type(pale_endpoint._route_name, (webapp2.RequestHandler,),
avoid argument error when route args & kwargs are present
py
diff --git a/scrapelib/__init__.py b/scrapelib/__init__.py index <HASH>..<HASH> 100644 --- a/scrapelib/__init__.py +++ b/scrapelib/__init__.py @@ -534,6 +534,17 @@ class Scraper(CachingSession): ) -> CacheResponse: _log.info("{} - {!r}".format(method.upper(), url)) + # allow modification of SSL ciphers list to accommodate misconfigured servers + # for example 'HIGH:!DH:!aNULL' to bypass "dh key too small" error + # https://stackoverflow.com/questions/38015537/python-requests-exceptions-sslerror-dh-key-too-small + if ciphers_list_addition: + requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ciphers_list_addition + try: + requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += ciphers_list_addition + except AttributeError: + # no pyopenssl support used / needed / available + pass + # apply global timeout if not timeout: timeout = self.timeout
Add request param to allow SSL cipherlist to be amended
py
diff --git a/encrypted_id/__init__.py b/encrypted_id/__init__.py index <HASH>..<HASH> 100644 --- a/encrypted_id/__init__.py +++ b/encrypted_id/__init__.py @@ -44,7 +44,7 @@ def encode(the_id): assert len(message) == 16 cypher = AES.new( - settings.SECRET_KEY[:24], AES.MODE_CBC, + settings.SECRET_KEY[:32], AES.MODE_CBC, settings.SECRET_KEY[-16:] ) @@ -62,7 +62,7 @@ def decode(e): raise EncryptedIDDecodeError() for skey in getattr(settings, "SECRET_KEYS", [settings.SECRET_KEY]): - cypher = AES.new(skey[:24], AES.MODE_CBC, skey[-16:]) + cypher = AES.new(skey[:32], AES.MODE_CBC, skey[-16:]) try: msg = cypher.decrypt(e) except ValueError:
Change key length from <I> to <I> (AES-<I> to AES-<I>)
py
diff --git a/Lib/fontbakery/utils.py b/Lib/fontbakery/utils.py index <HASH>..<HASH> 100644 --- a/Lib/fontbakery/utils.py +++ b/Lib/fontbakery/utils.py @@ -219,7 +219,7 @@ def ttfauto_fpgm_xheight_rounding(fpgm_tbl, which): return (warning, xheight_val) -def assertExists(fb, folderpath, filenames, err_msg, ok_msg): +def assertExists(folderpath, filenames, err_msg, ok_msg): if not isinstance(filenames, list): filenames = [filenames] @@ -229,6 +229,6 @@ def assertExists(fb, folderpath, filenames, err_msg, ok_msg): if os.path.exists(fullpath): missing.append(fullpath) if len(missing) > 0: - fb.error(err_msg.format(", ".join(missing))) + return FAIL, err_msg.format(", ".join(missing)) else: - fb.ok(ok_msg) + return PASS, ok_msg
fix fb.utils.assertExists implementation (issue #<I>)
py
diff --git a/fuzzywuzzy/process.py b/fuzzywuzzy/process.py index <HASH>..<HASH> 100644 --- a/fuzzywuzzy/process.py +++ b/fuzzywuzzy/process.py @@ -92,7 +92,10 @@ def extractWithoutOrder(query, choices, processor=default_processor, scorer=defa pass # If the scorer performs full_ratio with force ascii don't run full_process twice - if scorer in [fuzz.WRatio, fuzz.QRatio] and processor == utils.full_process: + if scorer in [fuzz.WRatio, fuzz.QRatio, + fuzz.token_set_ratio, fuzz.token_sort_ratio, + fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio] \ + and processor == utils.full_process: processor = no_process # If the processor was removed by setting it to None
Add token ratios to the list of scorers that skip running full_process as a processor.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ import subprocess from setuptools import setup, find_packages ##################################### -VERSION = "0.6.0" +VERSION = "0.6.1" ISRELEASED = True if ISRELEASED: __version__ = VERSION
Bump to version <I>
py
diff --git a/marktime.py b/marktime.py index <HASH>..<HASH> 100644 --- a/marktime.py +++ b/marktime.py @@ -2,7 +2,10 @@ import time +# module version +version = '0.1.0' +# global dict where all statictics are stored labels = {}
added version into module & some comments
py
diff --git a/application/briefkasten/commands.py b/application/briefkasten/commands.py index <HASH>..<HASH> 100644 --- a/application/briefkasten/commands.py +++ b/application/briefkasten/commands.py @@ -115,7 +115,7 @@ def worker(root, debug=False): # pragma: no cover else: process_drop(drop) else: - print('Not processing drop with status %d ' % drop.status_int) + print('Not processing drop %s with status %d ' % (drop.drop_id, drop.status_int)) # Wait for directory content to change condition.wait()
also include the drop_id (for easier debugging)
py
diff --git a/test/interactive/browser.py b/test/interactive/browser.py index <HASH>..<HASH> 100644 --- a/test/interactive/browser.py +++ b/test/interactive/browser.py @@ -17,13 +17,14 @@ def main(arguments=None): application = QtGui.QApplication(arguments) browser = riffle.browser.FilesystemBrowser() - browser.setMinimumSize(600, 300) + + screen_size = application.desktop().availableGeometry() + browser.setMinimumSize(screen_size.width() / 2, screen_size.height() / 2) + if browser.exec_(): selected = browser.selected() print('Selected: {0}'.format(selected)) - sys.exit(application.exec_()) - if __name__ == '__main__': raise SystemExit(main())
[#<I>] Fix misleading hang on close of dialog in interactive test. In addition, size dialog based on screen size for easier testing.
py
diff --git a/fusesoc/edatools/verilator.py b/fusesoc/edatools/verilator.py index <HASH>..<HASH> 100644 --- a/fusesoc/edatools/verilator.py +++ b/fusesoc/edatools/verilator.py @@ -83,11 +83,15 @@ class Verilator(Simulator): with open(os.path.join(self.work_root, 'Makefile'), 'w') as makefile: makefile.write(MAKEFILE_TEMPLATE) + if 'verilator_options' in self.tool_options: + verilator_options = ' '.join(self.tool_options['verilator_options']) + else: + verilator_options = '' with open(os.path.join(self.work_root, 'config.mk'), 'w') as config_mk: config_mk.write(CONFIG_MK_TEMPLATE.format( top_module = self.toplevel, vc_file = self.verilator_file, - verilator_options = ' '.join(self.tool_options['verilator_options']))) + verilator_options = verilator_options)) def build_main(self): logger.info("Building simulation model")
Fix verilator crash when verilator_options is not defined
py
diff --git a/tests/test_pwm.py b/tests/test_pwm.py index <HASH>..<HASH> 100644 --- a/tests/test_pwm.py +++ b/tests/test_pwm.py @@ -1,3 +1,4 @@ +import os import sys import periphery from .asserts import AssertRaises @@ -126,6 +127,10 @@ def test_interactive(): if __name__ == "__main__": + if os.environ.get("CI") == "true": + test_arguments() + sys.exit(0) + if len(sys.argv) < 3: print("Usage: python -m tests.test_pwm <PWM channel> <PWM pin number>") print("")
tests/pwm: run arguments test in ci environment
py
diff --git a/openquake/hazardlib/valid.py b/openquake/hazardlib/valid.py index <HASH>..<HASH> 100644 --- a/openquake/hazardlib/valid.py +++ b/openquake/hazardlib/valid.py @@ -48,8 +48,10 @@ def disagg_outputs(value): >>> disagg_outputs('TRT Mag_Dist') ['TRT', 'Mag_Dist'] + >>> disagg_outputs('TRT, Mag_Dist') + ['TRT', 'Mag_Dist'] """ - values = value.split() + values = value.replace(',', ' ').split() for val in values: if val not in disagg_outs: raise ValueError('Invalid disagg output: %s' % val)
Accept commas in disagg_outputs
py
diff --git a/pghoard/object_storage/google.py b/pghoard/object_storage/google.py index <HASH>..<HASH> 100644 --- a/pghoard/object_storage/google.py +++ b/pghoard/object_storage/google.py @@ -78,7 +78,12 @@ class GoogleTransfer(BaseTransfer): def _metadata_for_key(self, key): req = self.gs_objects.get(bucket=self.bucket_name, object=key) - obj = req.execute() + try: + obj = req.execute() + except HttpError as ex: + if ex.resp["status"] == "404": + raise FileNotFoundFromStorageError(key) + raise return obj.get("metadata", {}) def list_path(self, key):
google: raise FileNotFoundFromStorageError for missing key metadata requests
py
diff --git a/tweepy/streaming.py b/tweepy/streaming.py index <HASH>..<HASH> 100644 --- a/tweepy/streaming.py +++ b/tweepy/streaming.py @@ -124,7 +124,6 @@ class Stream: ) except Exception as exc: self.on_exception(exc) - raise finally: self.session.close() self.running = False
Stop reraising exceptions in Stream._connect Resolves #<I>
py
diff --git a/salt/utils/async.py b/salt/utils/async.py index <HASH>..<HASH> 100644 --- a/salt/utils/async.py +++ b/salt/utils/async.py @@ -7,19 +7,8 @@ from __future__ import absolute_import, print_function, unicode_literals import tornado.ioloop import tornado.concurrent -# attempt to use zmq-- if we have it otherwise fallback to tornado loop -try: - import zmq.eventloop.ioloop - # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x - if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): - zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop - LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop - HAS_ZMQ = True -except ImportError: - LOOP_CLASS = tornado.ioloop.IOLoop - HAS_ZMQ = False - import contextlib +from salt.utils import zeromq @contextlib.contextmanager @@ -53,7 +42,7 @@ class SyncWrapper(object): if kwargs is None: kwargs = {} - self.io_loop = LOOP_CLASS() + self.io_loop = zeromq.ZMQDefaultLoop() kwargs['io_loop'] = self.io_loop with current_ioloop(self.io_loop):
Use ZMQ importer utility in async
py
diff --git a/geotiepoints/simple_modis_interpolator.py b/geotiepoints/simple_modis_interpolator.py index <HASH>..<HASH> 100644 --- a/geotiepoints/simple_modis_interpolator.py +++ b/geotiepoints/simple_modis_interpolator.py @@ -146,8 +146,8 @@ def interpolate_geolocation_cartesian(lon_array, lat_array, res_factor=4): # Create an array of indexes that we want our result to have x = np.arange(res_factor * num_cols, dtype=np.float32) * (1. / res_factor) # 0.375 for 250m, 0.25 for 500m - y = np.arange(res_factor * ROWS_PER_SCAN, dtype=np.float32) * (1. / res_factor) - ( - res_factor * (1. / 16) + (1. / 8)) + y = np.arange(res_factor * ROWS_PER_SCAN, dtype=np.float32) * \ + (1. / res_factor) - (res_factor * (1. / 16) + (1. / 8)) x, y = np.meshgrid(x, y) coordinates = np.array([y, x]) # Used by map_coordinates, major optimization
Fix flake8 issue in simple modis interpolation
py
diff --git a/salt/states/pkg.py b/salt/states/pkg.py index <HASH>..<HASH> 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -1152,7 +1152,7 @@ def installed( package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. - This option is currently supported only for YUM/DNF. + Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0
Update apt module regarding upgrade against hold packages. Fixes #<I> and related to #<I>.
py
diff --git a/demo/kaggle-higgs/higgs-pred.py b/demo/kaggle-higgs/higgs-pred.py index <HASH>..<HASH> 100755 --- a/demo/kaggle-higgs/higgs-pred.py +++ b/demo/kaggle-higgs/higgs-pred.py @@ -43,7 +43,8 @@ for k, v in res: nhit += 1 else: lb = 'b' - fo.write('%s,%d,%s\n' % ( k, rorder[k], lb ) ) + # change output rank order to follow Kaggle convention + fo.write('%s,%d,%s\n' % ( k, len(rorder)+1-rorder[k], lb ) ) ntot += 1 fo.close()
change rank order output to follow kaggle convention
py
diff --git a/engine.py b/engine.py index <HASH>..<HASH> 100644 --- a/engine.py +++ b/engine.py @@ -74,7 +74,7 @@ def apply_template(jj2_template, data, output_file): """ with open(output_file, 'w') as output: rendered_content = jj2_template.render(**data) - output.write(rendered_content) + output.write(rendered_content.encode('utf-8')) class Strategy(object):
bug fix #1: encode utf-8 before saving it
py
diff --git a/wandb/cli.py b/wandb/cli.py index <HASH>..<HASH> 100644 --- a/wandb/cli.py +++ b/wandb/cli.py @@ -43,6 +43,11 @@ def display_error(func): return wrapper +def _require_init(): + if __stage_dir__ is None: + print('Directory not initialized. Please run "wandb init" to get started.') + sys.exit(1) + def editor(content='', marker='# Enter a description, markdown is allowed!\n'): message = click.edit(content + '\n\n' + marker) if message is not None: @@ -360,6 +365,7 @@ RUN_CONTEXT['ignore_unknown_options'] = True help='New files in <run_dir> that match will be saved to wandb. (default: \'*\')') @display_error def run(ctx, program, args, id, dir, glob): + _require_init() env = copy.copy(os.environ) env['WANDB_MODE'] = 'run' if id is None:
Show error message if "wandb init" hasn't been run, for run. Fixes # <I>.
py