diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/holoviews/plotting/bokeh/plot.py b/holoviews/plotting/bokeh/plot.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/bokeh/plot.py +++ b/holoviews/plotting/bokeh/plot.py @@ -34,7 +34,7 @@ class BokehPlot(DimensionedPlot): share their Bokeh data source allowing for linked brushing and other linked behaviors.""") - title_format = param.String(default="{label} {group} - {dimensions}", doc=""" + title_format = param.String(default="{label} {group} {dimensions}", doc=""" The formatting string for the title of this plot, allows defining a label group separator and dimension labels.""")
Removed dash separator in bokeh titles
py
diff --git a/atlassian/confluence.py b/atlassian/confluence.py index <HASH>..<HASH> 100644 --- a/atlassian/confluence.py +++ b/atlassian/confluence.py @@ -2100,6 +2100,34 @@ class Confluence(AtlassianRestAPI): return response + def get_user_details_by_accountid(self, accountid, expand=None): + """ + Get information about a user through accountid + :param accountid: The account id + :param expand: OPTIONAL expand for get status of user. + Possible param is "status". Results are "Active, Deactivated" + :return: Returns the user details + """ + url = "rest/api/user" + params = {"accountId": accountid} + if expand: + params["expand"] = expand + + try: + response = self.get(url, params=params) + except HTTPError as e: + if e.response.status_code == 403: + raise ApiPermissionError("The calling user does not have permission to view users", reason=e) + if e.response.status_code == 404: + raise ApiNotFoundError( + "The user with the given account does not exist", + reason=e, + ) + + raise + + return response + def get_user_details_by_userkey(self, userkey, expand=None): """ Get information about a user through user key
Adding get user account details by account id method based off username/userkey method (#<I>)
py
diff --git a/pysat/_instrument.py b/pysat/_instrument.py index <HASH>..<HASH> 100644 --- a/pysat/_instrument.py +++ b/pysat/_instrument.py @@ -868,6 +868,7 @@ class Instrument(object): # Check for download flags for tests try: # Used for instruments without download access + # Assume we test download routines regardless of env unless specified otherwise self._test_download = \ inst._test_download[self.sat_id][self.tag] except (AttributeError, KeyError): @@ -875,6 +876,7 @@ class Instrument(object): self._test_download = True try: # Used for tests which require FTP access + # Assume we test download routines on travis unless specified otherwise self._test_download_travis = \ inst._test_download_travis[self.sat_id][self.tag] except (AttributeError, KeyError): @@ -882,6 +884,7 @@ class Instrument(object): self._test_download_travis = True try: # Used for tests which require password access + # Assume password not required unless specified otherwise self._password_req = \ inst._password_req[self.sat_id][self.tag] except (AttributeError, KeyError):
DOC: comments for download flags
py
diff --git a/moztelemetry/spark.py b/moztelemetry/spark.py index <HASH>..<HASH> 100644 --- a/moztelemetry/spark.py +++ b/moztelemetry/spark.py @@ -60,7 +60,7 @@ def _read(filename): key = _bucket.get_key(filename) compressed = key.get_contents_as_string() raw = lzma.decompress(compressed).split("\n")[:-1] - return map(lambda x: x[37:], raw) + return map(lambda x: x.split("\t", 1)[1], raw) def get_pings(sc, appName, channel, version, buildid, submission_date, fraction=1.0): filter = _build_filter(appName, channel, version, buildid, submission_date)
Split incoming lines by the tab, rather than assuming that it's a <I>-byte UUID.
py
diff --git a/testing/run.py b/testing/run.py index <HASH>..<HASH> 100755 --- a/testing/run.py +++ b/testing/run.py @@ -19,9 +19,11 @@ def run_test(t): total += 1 c = glob.glob(t + "/cursor.*")[0] cursorpos = os.path.splitext(c)[1][1:] - outexpected = "" - with open(t + "/out.expected", "r") as f: - outexpected = f.read() + try: + with open(t + "/out.expected", "r") as f: + outexpected = f.read() + except: + outexpected = "To be determined" filename = t + "/test.go" gocode = subprocess.Popen("gocode -in %s autocomplete %s %s" % (filename, filename, cursorpos), shell=True, stdout=subprocess.PIPE)
testing: absence of out.expected is expected ;-D.
py
diff --git a/swiftwind/core/templatetags/banking.py b/swiftwind/core/templatetags/banking.py index <HASH>..<HASH> 100644 --- a/swiftwind/core/templatetags/banking.py +++ b/swiftwind/core/templatetags/banking.py @@ -22,7 +22,7 @@ def currency(value): locale_values = [] for money in value.monies(): locale_value = locale.currency(abs(money.amount), grouping=True, symbol=money.currency.code) - locale_value = locale_value if value >= 0 else "({})".format(locale_value) + locale_value = locale_value if money.amount >= 0 else "({})".format(locale_value) locale_values.append(locale_value) else: locale_value = locale.currency(abs(value), grouping=True)
Fixes for currency rendering (needs reworking down the line)
py
diff --git a/andes/routines/tds.py b/andes/routines/tds.py index <HASH>..<HASH> 100644 --- a/andes/routines/tds.py +++ b/andes/routines/tds.py @@ -149,7 +149,7 @@ class TDS(RoutineBase): # reduce time step for fixed_times events for fixed_t in self.fixed_times: - if (fixed_t > self.t) and (fixed_t < self.t + config.deltat): + if (fixed_t > self.t) and (fixed_t <= self.t + config.deltat): config.deltat = fixed_t - self.t self.switch = True break @@ -240,8 +240,8 @@ class TDS(RoutineBase): config.qrtstart = time() while self.t < config.tf: - self.calc_time_step() self.check_fixed_times() + self.calc_time_step() if self.callpert is not None: self.callpert(self.t, self.system)
check fixed times before calc time step
py
diff --git a/helusers/migrations/0001_add_ad_groups.py b/helusers/migrations/0001_add_ad_groups.py index <HASH>..<HASH> 100644 --- a/helusers/migrations/0001_add_ad_groups.py +++ b/helusers/migrations/0001_add_ad_groups.py @@ -31,7 +31,7 @@ class Migration(migrations.Migration): ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ad_groups', to='auth.Group')), ], options={ - 'verbose_name': 'AD Group Mapping', + 'verbose_name': 'AD group mapping', 'verbose_name_plural': 'AD group mappings' }, ), migrations.AlterUniqueTogether(
Fix migration for model verbose name changes
py
diff --git a/peyotl/nexson_syntax/__init__.py b/peyotl/nexson_syntax/__init__.py index <HASH>..<HASH> 100755 --- a/peyotl/nexson_syntax/__init__.py +++ b/peyotl/nexson_syntax/__init__.py @@ -36,7 +36,8 @@ import codecs import json _CONVERTIBLE_FORMATS = frozenset([DEFAULT_NEXSON_VERSION, - BADGER_FISH_NEXSON_VERSION]) + BADGER_FISH_NEXSON_VERSION, + BY_ID_HONEY_BADGERFISH]) _LOG = get_logger(__name__) def get_ot_study_info_from_nexml(src,
added <I> to list of convertible formats, as that seems to be working
py
diff --git a/pylabcontrol/core/script.py b/pylabcontrol/core/script.py index <HASH>..<HASH> 100644 --- a/pylabcontrol/core/script.py +++ b/pylabcontrol/core/script.py @@ -515,9 +515,9 @@ class Script(QObject): def check_filename(filename): if os.name == 'nt': if builtin_len(filename) >= 256 and not filename[0:4] == '\\\\?\\': - filename = '\\\\?\\' + filename # when using this long filename prefix, we must use only \ slashes as windows handles these differently - filename = filename.replace('/', '\\') + filename = os.path.normpath(filename) + filename = '\\\\?\\' + filename return filename def to_dict(self): @@ -1176,7 +1176,7 @@ class Script(QObject): script_settings = script_information['settings'] if 'filepath' in script_information: script_filepath = str(script_information['filepath']) - module_path, module_file = module_name_from_path(script_filepath, verbose = True) + module_path, module_file = module_name_from_path(script_filepath, verbose = False) if 'package' in script_information: package = script_information['package'] else:
Changed an explicit change of / to \ in filenames on windows to instead be done properly by os.path.normpath
py
diff --git a/src/bidi/chartypes.py b/src/bidi/chartypes.py index <HASH>..<HASH> 100644 --- a/src/bidi/chartypes.py +++ b/src/bidi/chartypes.py @@ -87,6 +87,10 @@ class TextOrdering(object): @property def ordering_type(self): + """Return the type of TextOrdering. One of `"sor"`, `"eor"`, + `"unkonown"`. + + """ if self.next_char: return 'sor'
Added docstring to TextOrdering.ordering_type
py
diff --git a/src/armet/query.py b/src/armet/query.py index <HASH>..<HASH> 100644 --- a/src/armet/query.py +++ b/src/armet/query.py @@ -47,7 +47,10 @@ class QueryList(list): qobjects = (Q(**{key: value}) for value in query.value) # Reduce them all to a single one via 'or'ing them - return reduce(lambda x, y: x | y, qobjects) + q = reduce(lambda x, y: x | y, qobjects) + + # Negate it if neccesary + return (~q) if query.negated else q @property def q(self): @@ -59,14 +62,16 @@ class QueryList(list): # Reduce them to a single q object qobject = reduce(lambda x, y: x & y, qobjects) - # gather all the valid sorting directions - so = ((x.direction, x.django_path).join() for x in self if x.direction) + return qobject - # apply sorting - if so: - qobject = qobject.order_by(*so) + def sort(self, queryset): + """Sorts a queryset based on the query objects within + """ + # Gather all the sorting params + so = ((x.direction, x.django_path).join() for x in self if x.direction) - return qobject + # Apply sorting on the queryset + return queryset.order_by(*so) class Query(object):
Add negation, and move sorting to its own function since you can't sort a Q object
py
diff --git a/githooks/git.py b/githooks/git.py index <HASH>..<HASH> 100644 --- a/githooks/git.py +++ b/githooks/git.py @@ -42,13 +42,13 @@ def description(revision): def author(revision): - # TODO - return "" + log = __execute_command(['git', 'log', revision, '-n', '1']).splitlines() + return log[2][8:] def date(revision): - # TODO - return "" + log = __execute_command(['git', 'log', revision, '-n', '1']).splitlines() + return log[3][8:] def file_names(old_revision, revision):
Implementation of the remaining git functions
py
diff --git a/planet/cli/orders.py b/planet/cli/orders.py index <HASH>..<HASH> 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -343,8 +343,6 @@ async def request(ctx, raise click.BadParameter(e) tools = [planet.order_request.clip_tool(clip)] - else: - tools = [] if cloudconfig: delivery = planet.order_request.delivery(cloud_config=cloudconfig)
Fixed bug, which would set tools as empty if request.
py
diff --git a/consul/base.py b/consul/base.py index <HASH>..<HASH> 100644 --- a/consul/base.py +++ b/consul/base.py @@ -77,6 +77,21 @@ class Check(object): return {'ttl': ttl} @classmethod + def docker(klass, container_id, shell, script, interval): + """ + Invoke *script* packaged within a running docker container with + *container_id* at a specified specified *interval* on the configured + *shell* using the Docker Exec API + """ + ret = { + 'docker_container_id': container_id, + 'shell': shell, + 'script': script, + 'interval': interval + } + return ret + + @classmethod def _compat( self, script=None,
Add support for Docker health check Relates-to: #<I>
py
diff --git a/nodeconductor/iaas/backend/openstack.py b/nodeconductor/iaas/backend/openstack.py index <HASH>..<HASH> 100644 --- a/nodeconductor/iaas/backend/openstack.py +++ b/nodeconductor/iaas/backend/openstack.py @@ -834,12 +834,6 @@ class OpenStackBackend(OpenStackClient): membership.set_quota_limit('security_group_count', neutron_quotas['security_group']) membership.set_quota_limit('security_group_rule_count', neutron_quotas['security_group_rule']) - # XXX Horrible hack -- to be removed once the Portal has moved to new quotas. NC-421 - membership.project.set_quota_limit('ram', self.get_core_ram_size(nova_quotas.ram)) - membership.project.set_quota_limit('vcpu', nova_quotas.cores) - membership.project.set_quota_limit('max_instances', nova_quotas.instances) - membership.project.set_quota_limit('storage', self.get_core_disk_size(cinder_quotas.gigabytes)) - def pull_resource_quota_usage(self, membership): try: session = self.create_session(membership=membership, dummy=self.dummy)
Remove a hack for setting project level quotas in pull_resource_quota (NC-<I>)
py
diff --git a/gitlint/__init__.py b/gitlint/__init__.py index <HASH>..<HASH> 100644 --- a/gitlint/__init__.py +++ b/gitlint/__init__.py @@ -1 +1 @@ -__version__ = "0.9.0" +__version__ = "0.10.0dev"
Version bump to <I>dev Bumped version to <I>dev.
py
diff --git a/prepro/macrocall.py b/prepro/macrocall.py index <HASH>..<HASH> 100644 --- a/prepro/macrocall.py +++ b/prepro/macrocall.py @@ -54,10 +54,10 @@ class MacroCall(object): # The macro is defined ID = TABLE[self.id] # Get the defined macro - if ID.hasArgs() and self.callargs is None: # If no args passed, returned as is + if ID.hasArgs and self.callargs is None: # If no args passed, returned as is return self.id - if not ID.hasArgs(): # The macro doesn't need args + if not ID.hasArgs: # The macro doesn't need args if self.callargs is None: # If none passed, return the evaluated ID() return ID(TABLE)
Fixed a call to a property
py
diff --git a/salt/modules/rsync.py b/salt/modules/rsync.py index <HASH>..<HASH> 100644 --- a/salt/modules/rsync.py +++ b/salt/modules/rsync.py @@ -161,6 +161,7 @@ def rsync(src, if not src or not dst: raise SaltInvocationError('src and dst cannot be empty') + tmp_src = None if src.startswith('salt://'): _src = src _path = re.sub('salt://', '', _src)
Fixing local variable 'tmp_src' referenced before assignment issue.
py
diff --git a/sorl/thumbnail/admin/current.py b/sorl/thumbnail/admin/current.py index <HASH>..<HASH> 100644 --- a/sorl/thumbnail/admin/current.py +++ b/sorl/thumbnail/admin/current.py @@ -1,9 +1,14 @@ +import logging + from django import forms from django.utils.safestring import mark_safe from sorl.thumbnail.fields import ImageField from sorl.thumbnail.shortcuts import get_thumbnail +logger = logging.getLogger(__name__) + + class AdminImageWidget(forms.ClearableFileInput): """ An ImageField Widget for django.contrib.admin that shows a thumbnailed @@ -18,8 +23,8 @@ class AdminImageWidget(forms.ClearableFileInput): if value and hasattr(value, 'url'): try: mini = get_thumbnail(value, 'x80', upscale=False) - except Exception: - pass + except Exception as e: + logger.warn("Unable to get the thumbnail", exc_info=e) else: output = ( u'<div style="float:left">'
Log the exception if the AdminImageWidget cannot generate the thumbnail instead of silencing it. This case can occur for example if the thumbnail_kvstore table has not bee created.
py
diff --git a/luigi/hdfs.py b/luigi/hdfs.py index <HASH>..<HASH> 100644 --- a/luigi/hdfs.py +++ b/luigi/hdfs.py @@ -67,7 +67,10 @@ def load_hadoop_cmd(): def tmppath(path=None): # No /tmp//tmp/luigi_tmp_testdir sorts of paths just /tmp/luigi_tmp_testdir. - if path is not None and path.startswith(tempfile.gettempdir()): + hdfs_tmp_dir = configuration.get_config().get('core', 'hdfs-tmp-dir', None) + if hdfs_tmp_dir is not None: + base = hdfs_tmp_dir + elif path is not None and path.startswith(tempfile.gettempdir()): base = '' else: base = tempfile.gettempdir()
Update hdfs.py Minimum viable change to allow setting an override to HDFS tmp dir in luigi.
py
diff --git a/setuptools/namespaces.py b/setuptools/namespaces.py index <HASH>..<HASH> 100755 --- a/setuptools/namespaces.py +++ b/setuptools/namespaces.py @@ -83,3 +83,11 @@ class Installer: while parts: yield '.'.join(parts) parts.pop() + + +class DevelopInstaller(Installer): + def _get_root(self): + return repr(str(self.egg_path)) + + def _get_target(self): + return self.egg_link
Create DevelopInstaller, inspired by the code in #<I>.
py
diff --git a/openquake/engine/export/risk.py b/openquake/engine/export/risk.py index <HASH>..<HASH> 100644 --- a/openquake/engine/export/risk.py +++ b/openquake/engine/export/risk.py @@ -274,7 +274,7 @@ def export_event_loss_csv(output, target_dir): output.id)) with open(filepath, 'wb') as csvfile: - writer = csv.writer(csvfile, delimiter='|') + writer = csv.writer(csvfile) writer.writerow(['Rupture', 'Magnitude', 'Aggregate Loss']) for event_loss in models.EventLoss.objects.filter(
export/risk: `event_loss` tables (csv) are now actually comma-delimited (instead of pipe delimited).
py
diff --git a/hpcbench/driver/slurm.py b/hpcbench/driver/slurm.py index <HASH>..<HASH> 100644 --- a/hpcbench/driver/slurm.py +++ b/hpcbench/driver/slurm.py @@ -1,8 +1,10 @@ from collections import Mapping import datetime +import inspect import logging import re import subprocess +import os from os import path as osp import sys @@ -83,10 +85,16 @@ class SbatchDriver(Enumerator): @cached_property def bensh_executable(self): - bensh = osp.realpath(osp.join(osp.dirname(sys.executable), 'ben-sh')) - if osp.exists(bensh): - return bensh - return 'ben-sh' + candidates = [] + main_script = inspect.stack()[-1][1] + if main_script.endswith('ben-sh'): + candidates.append(osp.realpath(osp.join(os.getcwd(), main_script))) + candidates.append(osp.realpath(osp.join(osp.dirname(sys.executable), 'ben-sh'))) + candidates.append('ben-sh') + for candidate in candidates: + if osp.exists(candidate): + break + return candidate @property def default_job_name(self):
sbatch: look for ben-sh script in stack Fixes #<I>
py
diff --git a/bt/core.py b/bt/core.py index <HASH>..<HASH> 100644 --- a/bt/core.py +++ b/bt/core.py @@ -546,8 +546,9 @@ class StrategyBase(Node): if c._issec and not c._needupdate: continue try: - c._weight = c.value / val - except ZeroDivisionError: + with np.errstate(divide='raise', invalid='raise'): + c._weight = c.value / val + except (ZeroDivisionError, FloatingPointError): c._weight = 0.0 # if we have strategy children, we will need to update them in universe @@ -976,7 +977,8 @@ class SecurityBase(Node): if amount == -self._value: q = -self._position else: - q = amount / (self._price * self.multiplier) + with np.errstate(divide='raise', invalid='raise'): + q = amount / (self._price * self.multiplier) if self.integer_positions: if (self._position > 0) or ((self._position == 0) and (amount > 0)): # if we're going long or changing long position
Forcing numpy to raise exception on floating point error and handling it
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,11 @@ CLASSIFIERS = [ setup( name='seacucumber', version=version_str, - packages=['seacucumber'], + packages=[ + 'seacucumber', + 'seacucumber.management', + 'seacucumber.management.commands', + ], author='Gregory Taylor', author_email='gtaylor@duointeractive.com', url='https://github.com/duointeractive/sea-cucumber/',
Updating setup.py to include management package.
py
diff --git a/datatableview/helpers.py b/datatableview/helpers.py index <HASH>..<HASH> 100644 --- a/datatableview/helpers.py +++ b/datatableview/helpers.py @@ -90,10 +90,11 @@ def itemgetter(k): def attrgetter(attr): def helper(instance, *args, **kwargs): - value = getattr(instance, attr) - - if callable(value): - return value() + value = instance + for bit in attr.split('.'): + value = getattr(value, bit) + if callable(value): + value = value() return value return helper
Allow attrgetter to call intermediate attributes
py
diff --git a/ovp_search/views.py b/ovp_search/views.py index <HASH>..<HASH> 100644 --- a/ovp_search/views.py +++ b/ovp_search/views.py @@ -139,6 +139,7 @@ class ProjectSearchResource(mixins.ListModelMixin, viewsets.GenericViewSet): if address: address = json.loads(address) + address = address[0] if u'address_components' in address: types = []
unpack first address item on ProjectSearch
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,12 @@ -from distutils.core import setup import sys import platform import os +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + def main(): # Check python version
Improved setup script: support of setuptools.
py
diff --git a/pypiper/manager.py b/pypiper/manager.py index <HASH>..<HASH> 100644 --- a/pypiper/manager.py +++ b/pypiper/manager.py @@ -678,9 +678,9 @@ class PipelineManager(object): and not any([os.path.isfile(l) for l in lock_files]) \ and not newstart: for tgt in target: - if os.path.exists(tgt): print("\nTarget exists: `" + tgt + "`") + if os.path.exists(tgt): print("Target exists: `" + tgt + "`") if self.new_start: - print("New start mode: run anyway") + print("New start mode; run anyway.") # Set the newstart flag so the command will run anyway. # Doing this in here instead of outside the loop allows us # to still report the target existence. @@ -706,8 +706,6 @@ class PipelineManager(object): recover_mode = True # the recovery flag is now spent; remove so we don't accidentally re-recover a failed job os.remove(recover_file) - elif self.new_start: - print("New start mode; overwriting target...") else: # don't overwrite locks self._wait_for_lock(lock_file) # when it's done loop through again to try one more
new start shouldn't ignore locks. new start mode overrides existing targets, but shouldn't override locks... that requires additionally using recover or override mode. otherwise, it gets stuck in an infinite loop because it's not programmed to override locks.
py
diff --git a/.travis/autobrew.py b/.travis/autobrew.py index <HASH>..<HASH> 100644 --- a/.travis/autobrew.py +++ b/.travis/autobrew.py @@ -23,6 +23,10 @@ class Ocrmypdf < Formula depends_on "unpaper" depends_on "qpdf" + # mactex installs its own ghostscript by default which causes problems + # mactex users should use caskroom/cask/mactex-no-ghostscript instead + conflicts_with cask: "caskroom/cask/mactex" + # For Pillow source install depends_on "openjpeg" depends_on "freetype"
autobrew: declare conflict with mactex As reported in <URL>
py
diff --git a/manticore/manticore.py b/manticore/manticore.py index <HASH>..<HASH> 100644 --- a/manticore/manticore.py +++ b/manticore/manticore.py @@ -49,12 +49,10 @@ def makeLinux(program, argv, env, concrete_start = ''): logger.info('Starting with concrete input: {}'.format(concrete_start)) for i, arg in enumerate(argv): - argv[i] = initial_state.symbolicate_buffer(arg, label='ARGV%d' % (i+1), - string=True) + argv[i] = initial_state.symbolicate_buffer(arg, label='ARGV%d' % (i+1)) for i, evar in enumerate(env): - env[i] = initial_state.symbolicate_buffer(evar, label='ENV%d' % (i+1), - string=True) + env[i] = initial_state.symbolicate_buffer(evar, label='ENV%d' % (i+1)) # If any of the arguments or environment refer to symbolic values, re- # initialize the stack
Don't use string=True for symbolic arg/env (#<I>) This very strictly constrains the arg/env to the length of the symbolic string which is too strict for many use cases. For example: manticore ./bin +++++ that symbolic string would only be allowed to be strings of length 5, and no shorter.
py
diff --git a/exchangelib/folders.py b/exchangelib/folders.py index <HASH>..<HASH> 100644 --- a/exchangelib/folders.py +++ b/exchangelib/folders.py @@ -925,14 +925,14 @@ class Root(Folder): if isinstance(f, (ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable)): # This is just a distinguished folder the server does not have continue - if isinstance(f, ErrorInvalidOperation) \ + if isinstance(f, ErrorInvalidOperation) and f.value == 'The distinguished folder name is unrecognized.': + # This is just a distinguished folder the server does not have + continue + if isinstance(f, ErrorItemNotFound) \ and f.value == 'The specified object was not found in the store., The process failed ' \ 'to get the correct properties.': # This another way of telling us that this is just a distinguished folder the server does not have continue - if isinstance(f, ErrorItemNotFound) and f.value == 'The distinguished folder name is unrecognized.': - # This is just a distinguished folder the server does not have - continue folders_map[f.folder_id] = f for f in FolderCollection(account=self.account, folders=[self]).find_folders(depth=DEEP): if isinstance(f, Exception):
Exception classes got mixed up. Refs #<I>
py
diff --git a/pupa/cli/commands/update.py b/pupa/cli/commands/update.py index <HASH>..<HASH> 100644 --- a/pupa/cli/commands/update.py +++ b/pupa/cli/commands/update.py @@ -298,7 +298,8 @@ class Command(BaseCommand): } print_report(report) - self.check_session_list(juris) + if 'scrape' in args.actions: + self.check_session_list(juris) try: if 'scrape' in args.actions:
don't scrape if --scrape isn't passed, #<I>
py
diff --git a/src/sos/executor_utils.py b/src/sos/executor_utils.py index <HASH>..<HASH> 100644 --- a/src/sos/executor_utils.py +++ b/src/sos/executor_utils.py @@ -202,13 +202,6 @@ def create_task(global_def, global_vars, task_stmt, task_params): env.sos_dict['_runtime']['run_mode'] = env.config.get( 'run_mode', 'run') env.sos_dict['_runtime']['home_dir'] = os.path.expanduser('~') - if 'workdir' in env.sos_dict['_runtime'] and not os.path.isdir(os.path.expanduser(env.sos_dict['_runtime']['workdir'])): - try: - os.makedirs(os.path.expanduser( - env.sos_dict['_runtime']['workdir'])) - except Exception: - raise RuntimeError( - f'Failed to create workdir {env.sos_dict["_runtime"]["workdir"]}') # NOTE: we do not explicitly include 'step_input', 'step_output', # 'step_depends' and 'CONFIG'
Stop interpreting workdir as local directory
py
diff --git a/libtmux/test.py b/libtmux/test.py index <HASH>..<HASH> 100644 --- a/libtmux/test.py +++ b/libtmux/test.py @@ -2,7 +2,7 @@ import contextlib import logging import os -import tempfile +import random import time import warnings from typing import Callable, Optional @@ -15,7 +15,19 @@ TEST_SESSION_PREFIX = "libtmux_" RETRY_TIMEOUT_SECONDS = int(os.getenv("RETRY_TIMEOUT_SECONDS", 8)) RETRY_INTERVAL_SECONDS = float(os.getenv("RETRY_INTERVAL_SECONDS", 0.05)) -namer = tempfile._RandomNameSequence() + +class RandomStrSequence: + def __init__(self, characters: str = "abcdefghijklmnopqrstuvwxyz0123456789_"): + self.characters: str = characters + + def __iter__(self): + return self + + def __next__(self): + return "".join(random.sample(self.characters, k=8)) + + +namer = RandomStrSequence() current_dir = os.path.abspath(os.path.dirname(__file__)) example_dir = os.path.abspath(os.path.join(current_dir, "..", "examples")) fixtures_dir = os.path.realpath(os.path.join(current_dir, "fixtures"))
chore(test): Replace temp._RandomNameSequence with separate function This was a private API and not mypy compatible (not in typeshed)
py
diff --git a/models/fallahi_eval/run_task5.py b/models/fallahi_eval/run_task5.py index <HASH>..<HASH> 100644 --- a/models/fallahi_eval/run_task5.py +++ b/models/fallahi_eval/run_task5.py @@ -23,20 +23,19 @@ def get_task_5(data, inverse=False): stmts_to_check[cell_line] = {} for drug in drug_names.keys(): stmts_to_check[cell_line][drug] = {} - target_agents = [agent_phos(target, []) for - target in drug_targets[drug]] + drug_agent = Agent(drug, db_refs=drug_grounding[drug]) for dose in drug_doses: if dose < dose_lower_bound: continue values = get_agent_values_for_condition(data, cell_line, drug, time, dose) stmts_to_check[cell_line][drug][dose] = [[], values] - for target, obs in itertools.product(target_agents, obs_agents): + for obs in obs_agents: if (cell_line == 'C32' and not inverse) or \ (cell_line == 'RVH421' and inverse): - st = IncreaseAmount(target, obs) + st = DecreaseAmount(drug_agent, obs) else: - st = DecreaseAmount(target, obs) + st = IncreaseAmount(drug_agent, obs) stmts_to_check[cell_line][drug][dose][0].append(st) return stmts_to_check
Implement drug-target nodes for Task 5
py
diff --git a/transformers/tokenization_utils.py b/transformers/tokenization_utils.py index <HASH>..<HASH> 100644 --- a/transformers/tokenization_utils.py +++ b/transformers/tokenization_utils.py @@ -634,7 +634,7 @@ class PreTrainedTokenizer(object): return result def split_on_tokens(tok_list, text): - if not text: + if not text.strip(): return [] if not tok_list: return self._tokenize(text, **kwargs)
handle string with only whitespaces as empty
py
diff --git a/visidata/path.py b/visidata/path.py index <HASH>..<HASH> 100644 --- a/visidata/path.py +++ b/visidata/path.py @@ -55,6 +55,11 @@ class FileProgress: self.fp.__enter__() return self + def __next__(self): + r = next(self.fp) + self.prog.addProgress(len(r)) + return r + def __iter__(self): if not self.prog: yield from self.fp
[path] auto-Progress for open non-compressed files
py
diff --git a/tests/jenkins.py b/tests/jenkins.py index <HASH>..<HASH> 100644 --- a/tests/jenkins.py +++ b/tests/jenkins.py @@ -91,10 +91,12 @@ def download_unittest_reports(options): print('Downloading remote unittest reports...') sys.stdout.flush() - if os.path.isdir('xml-test-reports'): - shutil.rmtree('xml-test-reports') + workspace = options.workspace + xml_reports_path = os.path.join(workspace, 'xml-test-reports') + if os.path.isdir(xml_reports_path): + shutil.rmtree(xml_reports_path) - os.makedirs('xml-test-reports') + os.makedirs(xml_reports_path) cmds = ( 'salt {0} archive.tar zcvf /tmp/xml-test-reports.tar.gz \'*.xml\' cwd=/tmp/xml-unitests-output/', @@ -105,7 +107,6 @@ def download_unittest_reports(options): ) vm_name = options.download_unittest_reports - workspace = options.workspace for cmd in cmds: cmd = cmd.format(vm_name, workspace) print('Running CMD: {0}'.format(cmd))
Don't ignore the workspace path option
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,8 @@ setup(name="pysnmp-apps", 'pysnmpap.cli.ucd.proto', 'pysnmpap.cli.ucd.carrier', 'pysnmpap.cli.ucd.carrier.udp' ], - data_files = [ ('local/bin', ['apps/pysnmpwalk']), + data_files = [ ('local/bin', ['apps/pysnmpget']), + ('local/bin', ['apps/pysnmpwalk']), ('local/bin', ['apps/pysnmpbulkwalk']), ('local/bin', ['apps/pysnmptrapd']), ], license="BSD"
missing pysnmpget/set committed
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -104,6 +104,8 @@ class Test(TestCommand): check_call(cmd, shell=True) except CalledProcessError as exc: print(exc) + print('Unit tests failed. Fix the error(s) above and try again.') + sys.exit(-1) class TestCoverage(Test): @@ -119,6 +121,8 @@ class TestCoverage(Test): check_call(cmd, shell=True) except CalledProcessError as exc: print(exc) + print('Coverage tests failed. Fix the errors above and try again.') + sys.exit(-1) class CITest(TestCommand):
Fix Travis false positive message (#<I>) Fix Coverage and Test not throwing errors when tests fail.
py
diff --git a/holoviews/core/data.py b/holoviews/core/data.py index <HASH>..<HASH> 100644 --- a/holoviews/core/data.py +++ b/holoviews/core/data.py @@ -314,7 +314,7 @@ class Columns(Element): if not len(dimensions): dimensions = self.dimensions('key', True) if group_type is None: group_type = type(self) - dimensions = [self.get_dimension(d).name for d in dimensions] + dimensions = [self.get_dimension(d, strict=True).name for d in dimensions] invalid_dims = list(set(dimensions) - set(self.dimensions('key', True))) if invalid_dims: raise Exception('Following dimensions could not be found:\n%s.'
Added strict checking of dimensions on Columns.groupby
py
diff --git a/unittests/test_packet_data.py b/unittests/test_packet_data.py index <HASH>..<HASH> 100755 --- a/unittests/test_packet_data.py +++ b/unittests/test_packet_data.py @@ -191,13 +191,6 @@ class DataPacketTestCase(unittest.TestCase): ] - def test_repr(self): - ''' - The representation is str(__dict__) - ''' - message = data.LISPDataPacket() - self.assertEqual(repr(message), str(message.__dict__)) - suite = unittest.TestLoader().loadTestsFromTestCase(DataPacketTestCase)
Remove (now broken) __repr__ test
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup from os import path -import re +import re PACKAGE_NAME='psaw' HERE = path.abspath(path.dirname(__file__)) @@ -22,6 +22,7 @@ setup(name=PACKAGE_NAME, url='http://github.com/dmarx/psaw', license='Simplified BSD License', install_requires=['requests'], + keywords='reddit api wrapper pushshift', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console',
Added keywords to setup.py
py
diff --git a/brothon/bro_log_reader.py b/brothon/bro_log_reader.py index <HASH>..<HASH> 100644 --- a/brothon/bro_log_reader.py +++ b/brothon/bro_log_reader.py @@ -42,15 +42,20 @@ class BroLogReader(file_tailer.FileTailer): as a dictionary of {key:value, ...} based on Bro header. """ # Calling the internal _readrows so we can catch issues/log rotations + reconnecting = True while True: # Yield the rows from the internal reader try: for row in self._readrows(): + if reconnecting: + print('Successfully monitoring {:s}...'.format(self._filepath)) + reconnecting = False yield row except IOError: # If the tail option is set then we do a retry (might just be a log rotation) if self._tail: print('Could not open file {:s} Retrying...'.format(self._filepath)) + reconnecting = True time.sleep(5) continue else: @@ -59,6 +64,7 @@ class BroLogReader(file_tailer.FileTailer): # If the tail option is set then we do a retry (might just be a log rotation) if self._tail: print('File closed {:s} Retrying...'.format(self._filepath)) + reconnecting = True time.sleep(5) continue else:
letting the user know that we are successfully monitoring the file
py
diff --git a/pytest-virtualenv/pytest_virtualenv.py b/pytest-virtualenv/pytest_virtualenv.py index <HASH>..<HASH> 100644 --- a/pytest-virtualenv/pytest_virtualenv.py +++ b/pytest-virtualenv/pytest_virtualenv.py @@ -170,7 +170,11 @@ class VirtualEnv(Workspace): """ installed = [p for p in working_set if p.project_name == pkg_name] if not installed or installed[0].location.endswith('.egg'): - installer = str(self.virtualenv / 'bin' / installer) + if sys.platform == 'win32': + # In virtualenv on windows "Scripts" folder is used instead of "bin". + installer = str(self.virtualenv / 'Scripts' / installer + '.exe') + else: + installer = str(self.virtualenv / 'bin' / installer) if not self.debug: installer += ' -q' # Note we're running this as 'python easy_install foobar', instead of 'easy_install foobar'
virtualenv/install_package: Add support for windows This commit introduces a change similar to <I>d3bd8 (fix pytest-virtualenv running on Windows)
py
diff --git a/wandb/sklearn/__init__.py b/wandb/sklearn/__init__.py index <HASH>..<HASH> 100644 --- a/wandb/sklearn/__init__.py +++ b/wandb/sklearn/__init__.py @@ -1093,6 +1093,12 @@ def plot_calibration_curve(clf=None, X=None, y=None, clf_name="Classifier"): random_state=42, ) + # ComplementNB (introduced in 0.20.0) requires non-negative features + if int(sklearn.__version__.split(".")[1]) >= 20 and isinstance( + clf, naive_bayes.ComplementNB + ): + X = X - X.min() + X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=0.99, random_state=42 )
[CLI-<I>]: Fixed Bug in plot_calibration_curve for ComplementNB (#<I>) * fixed bug in complementnb * fixed comment * fixed comment
py
diff --git a/tofu/geom/_plot_optics.py b/tofu/geom/_plot_optics.py index <HASH>..<HASH> 100644 --- a/tofu/geom/_plot_optics.py +++ b/tofu/geom/_plot_optics.py @@ -1024,9 +1024,13 @@ def CrystalBragg_plot_johannerror( if err is None: err = 'abs' - if err == 'rel': - err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb)) - err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi)) + if 'rel' in err: + if err == 'rel': + err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb)) + err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi)) + elif err == 'rel2': + err_lamb = 100.*err_lamb / np.mean(lamb) + err_phi = 100.*err_phi / np.mean(phi) err_lamb_units = '%' err_phi_units = '%' else:
[#<I>] Added err == 'rel2' to _plot_optics.CrystalBragg_plot_johannerror()
py
diff --git a/test/common/procutil.py b/test/common/procutil.py index <HASH>..<HASH> 100644 --- a/test/common/procutil.py +++ b/test/common/procutil.py @@ -45,6 +45,7 @@ class SubProcess(object): self.valgrind_tool = valgrind_tool if valgrind_tool is not None: cmd_line = ["valgrind", + "--log-file=%s" % test_dir.make_file(output_path + ".valgrind"), "--tool=%s" % valgrind_tool, "--error-exitcode=%d" % valgrind_error_code] if valgrind_tool == "memcheck":
Change where valgrind output goes. Valgrind output is now seperate from rethinkdb's output so that we can actually see stuff in our test failures.
py
diff --git a/gwpy/plotter/timeseries.py b/gwpy/plotter/timeseries.py index <HASH>..<HASH> 100644 --- a/gwpy/plotter/timeseries.py +++ b/gwpy/plotter/timeseries.py @@ -451,14 +451,12 @@ class TimeSeriesPlot(Plot): "'bottom'.") segax = divider.append_axes(location, height, pad=pad, axes_class=SegmentAxes, sharex=ax) - segax.set_xscale(ax.get_xscale()) + segax.set_xlim(*ax.get_xlim()) + segax.set_xlabel(ax.get_xlabel()) + ax.set_xlabel("") - # plot segments and set axes properties + # plot segments segax.plot(segments, **plotargs) segax.grid(b=False, which='both', axis='y') segax.autoscale(axis='y', tight=True) - # set ticks and label - segax.set_xlabel(ax.get_xlabel()) - ax.set_xlabel("") - segax.set_xlim(*ax.get_xlim()) return segax
TimeSeriesPlot.add_state_segments: simplified segax scaling new axes inherits scaling and limits from the original data axes, so we don't need to set_epoch or set_xscale
py
diff --git a/wpull/processor.py b/wpull/processor.py index <HASH>..<HASH> 100644 --- a/wpull/processor.py +++ b/wpull/processor.py @@ -145,7 +145,7 @@ class WebProcessorSession(BaseProcessorSession): url_info = self._next_url_info url_record = self._url_item.url_record - if self._test_url_filter(url_info, url_record): + if self._is_url_filtered(url_info, url_record): return True else: @@ -281,17 +281,26 @@ class WebProcessorSession(BaseProcessorSession): def wait_time(self): return self._waiter.get() - def _test_url_filter(self, url_info, url_record): - results = [] + def _filter_url(self, url_info, url_record): + passed = set() + failed = set() for url_filter in self._url_filters: result = url_filter.test(url_info, url_record) _logger.debug( 'URL Filter test {0} returned {1}'.format(url_filter, result)) - results.append(result) - return all(results) + if result: + passed.add(url_filter) + else: + failed.add(url_filter) + + return passed, failed + + def _is_url_filtered(self, url_info, url_record): + failed = self._filter_url(url_info, url_record)[1] + return len(failed) == 0 def _scrape_document(self, request, response): inline_urls = set()
Splits WebProcessorSession._test_url_filter. Adds _is_url_filtered and _filter_url.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -5,12 +5,12 @@ from setuptools import setup, find_packages setup( name='nautilus', - version='0.4.6', + version='0.4.7', description='A library for creating microservice applications', author='Alec Aivazis', author_email='alec@aivazis.com', url='https://github.com/AlecAivazis/nautilus', - download_url='https://github.com/aaivazis/nautilus/tarball/0.4.5', + download_url='https://github.com/aaivazis/nautilus/tarball/0.4.7', keywords=['microservice', 'tornado', 'graphql'], test_suite='nose2.collector.collector', packages=find_packages(exclude=['example', 'tests']),
package version and download link are now in sync
py
diff --git a/exchangelib/util.py b/exchangelib/util.py index <HASH>..<HASH> 100644 --- a/exchangelib/util.py +++ b/exchangelib/util.py @@ -22,6 +22,7 @@ from oauthlib.oauth2 import TokenExpiredError from pygments import highlight from pygments.formatters.terminal import TerminalFormatter from pygments.lexers.html import XmlLexer +from requests_oauthlib import OAuth2Session from .errors import ( InvalidTypeError, @@ -837,10 +838,12 @@ Response XML: %(xml_response)s""" d_start = time.monotonic() # Always create a dummy response for logging purposes, in case we fail in the following r = DummyResponse(url=url, request_headers=headers) + kwargs = dict(url=url, headers=headers, data=data, allow_redirects=False, timeout=timeout, stream=stream) + if isinstance(session, OAuth2Session): + # Fix token refreshing bug. Reported as https://github.com/requests/requests-oauthlib/issues/498 + kwargs.update(session.auto_refresh_kwargs) try: - r = session.post( - url=url, headers=headers, data=data, allow_redirects=False, timeout=timeout, stream=stream - ) + r = session.post(**kwargs) except TLS_ERRORS as e: # Don't retry on TLS errors. They will most likely be persistent. raise TransportError(str(e))
Add client_id and client_secret explicitly to session.post() to work around token refresh bug (#<I>) Fixes #<I>
py
diff --git a/cherrypy/wsgiserver/ssl_pyopenssl.py b/cherrypy/wsgiserver/ssl_pyopenssl.py index <HASH>..<HASH> 100644 --- a/cherrypy/wsgiserver/ssl_pyopenssl.py +++ b/cherrypy/wsgiserver/ssl_pyopenssl.py @@ -96,17 +96,8 @@ class SSL_fileobject(wsgiserver.CP_fileobject): if time.time() - start > self.ssl_timeout: raise socket.timeout("timed out") - def recv(self, *args, **kwargs): - buf = [] - r = super(SSL_fileobject, self).recv - while True: - data = self._safe_call(True, r, *args, **kwargs) - buf.append(data) - p = self._sock.pending() - if not p: - return "".join(buf) - elif len("".join(buf)) + p >= args[0]: - return "".join(buf) + def recv(self, size): + return self._safe_call(True, super(SSL_fileobject, self).recv, size) def sendall(self, *args, **kwargs): return self._safe_call(False, super(SSL_fileobject, self).sendall,
Simplified SSL_fileobject.recv(). This should be a better resolution for issue #<I>.
py
diff --git a/client/__init__.py b/client/__init__.py index <HASH>..<HASH> 100644 --- a/client/__init__.py +++ b/client/__init__.py @@ -1,4 +1,4 @@ -__version__ = 'v1.3.3' +__version__ = 'v1.3.4' import os import sys
Update ok to <I>
py
diff --git a/uncompyle6/semantics/fragments.py b/uncompyle6/semantics/fragments.py index <HASH>..<HASH> 100644 --- a/uncompyle6/semantics/fragments.py +++ b/uncompyle6/semantics/fragments.py @@ -105,6 +105,9 @@ TABLE_DIRECT_FRAGMENT = { 'pass': ( '%|%rpass\n', ), 'raise_stmt0': ( '%|%rraise\n', ), 'import': ( '%|import %c%x\n', 2, (2, (0, 1)), ), + 'import_cont': ( ', %c%x', (2, 'alias'), (2, (0, 1)), ), + 'import_from': ( '%|from %[2]{pattr}%x import %c\n', + (2, (0, 1)), (3, 'importlist'), ), 'importfrom': ( '%|from %[2]{pattr}%x import %c\n', (2, (0, 1)), 3), # FIXME only in <= 2.4
More complete fragment parsing for imports
py
diff --git a/holoviews/plotting/bokeh/annotation.py b/holoviews/plotting/bokeh/annotation.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/bokeh/annotation.py +++ b/holoviews/plotting/bokeh/annotation.py @@ -40,8 +40,11 @@ class TextPlot(ElementPlot): def get_batched_data(self, element, ranges=None): data = defaultdict(list) - for key, el in element.data.items(): - eldata, elmapping, style = self.get_data(el, ranges) + zorders = self._updated_zorders(element) + for (key, el), zorder in zip(element.data.items(), zorders): + style = self.lookup_options(element.last, 'style') + style = style.max_cycles(len(self.ordering))[zorder] + eldata, elmapping, style = self.get_data(el, ranges, style) for k, eld in eldata.items(): data[k].extend(eld) return data, elmapping, style
Fixed bug introduced in recent bokeh plots refactor (#<I>)
py
diff --git a/keyboard/keyboard.py b/keyboard/keyboard.py index <HASH>..<HASH> 100644 --- a/keyboard/keyboard.py +++ b/keyboard/keyboard.py @@ -192,9 +192,12 @@ def write(text, delay=0): Delay is a number of seconds to wait between keypresses. """ - for modifier in all_modifiers: - if is_pressed(modifier): - release(modifier) + initial_modifiers = {m for m in all_modifiers if is_pressed(m)} + + # If we were called during a hotkey the user may still be holding the hotkey + # modifier, which will affect the letters typed. + for modifier in initial_modifiers: + release(modifier) for letter in text: try: @@ -219,6 +222,10 @@ def write(text, delay=0): if delay: time.sleep(delay) + # Restore initial state of modifiers. + for modifier in initial_modifiers: + press(modifier) + def send(combination, do_press=True, do_release=True): """ Performs a given hotkey combination.
Restore state of modifiers after writing text
py
diff --git a/forms.py b/forms.py index <HASH>..<HASH> 100644 --- a/forms.py +++ b/forms.py @@ -14,10 +14,15 @@ class RegistrationForm(forms.Form): and that the username is not already taken. """ - username = forms.CharField(max_length=30, widget=forms.TextInput(attrs=attrs_dict)) - email = forms.EmailField(widget=forms.TextInput(attrs=attrs_dict)) - password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict)) - password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict)) + username = forms.CharField(max_length=30, + widget=forms.TextInput(attrs=attrs_dict), + label=u'Username') + email = forms.EmailField(widget=forms.TextInput(attrs=attrs_dict), + label=u'Email address') + password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict), + label=u'Password') + password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict), + label=u'Password (again, to catch typos)') def clean_username(self): """
Add labels to form fields so that built-in display methods will work nicely
py
diff --git a/tests/organization.py b/tests/organization.py index <HASH>..<HASH> 100644 --- a/tests/organization.py +++ b/tests/organization.py @@ -454,6 +454,11 @@ class organization(Spec): } self._expect_releases(entries, expected, skip_initial=True) + def affects_unreleased_buckets_too(self): + # I.e. there is no unreleased_bugfix vs unreleased_feature, only + # Zuul^Wunreleased + skip() + def does_not_affect_releases_after_1_0(self): # Mixed changelog crossing 1.0 boundary skip() @@ -465,3 +470,7 @@ class organization(Spec): def doesnt_care_if_you_skipped_1_0_entirely(self): # Mixed changelog where 1.0 is totally skipped and one goes to 2.0 skip() + + def is_not_enabled_by_default(self): + # Prove 'regular' behavior pre-1.0 if setting not enabled + skip()
Was missing some more skel tests
py
diff --git a/salt/grains/core.py b/salt/grains/core.py index <HASH>..<HASH> 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -515,8 +515,11 @@ def _virtual(osdata): sysctl = salt.utils.which('sysctl') if osdata['kernel'] in choices: if os.path.isfile('/proc/1/cgroup'): - if ':/lxc/' in salt.utils.fopen('/proc/1/cgroup', 'r').read(): - grains['virtual_subtype'] = 'LXC' + try: + if ':/lxc/' in salt.utils.fopen('/proc/1/cgroup', 'r').read(): + grains['virtual_subtype'] = 'LXC' + except IOError: + pass if isdir('/proc/vz'): if os.path.isfile('/proc/vz/version'): grains['virtual'] = 'openvzhn'
sometimes /proc/1/cgroup is not readable Prevent the code from throwing fatal exceptions when /proc/1/cgroup exists but is not readable (which can happen in VPS systems).
py
diff --git a/sshtunnel.py b/sshtunnel.py index <HASH>..<HASH> 100644 --- a/sshtunnel.py +++ b/sshtunnel.py @@ -610,15 +610,16 @@ class SSHTunnelForwarder(object): .format(ssh_config_file)) if not ssh_password: - ssh_private_key = paramiko.RSAKey.from_private_key_file( - ssh_private_key, - password=ssh_private_key_password - ) if ssh_private_key else None - # Check if a private key was supplied or found in ssh_config if not ssh_private_key: raise ValueError('No password or private key available!') + if isinstance(ssh_private_key, string_types): + ssh_private_key = paramiko.RSAKey.from_private_key_file( + ssh_private_key, + password=ssh_private_key_password + ) + if not ssh_port: ssh_port = 22
Support paramiko PKey as ssh_private_key
py
diff --git a/py/testdir_multi_jvm/test_KMeans_create_frame_fvec.py b/py/testdir_multi_jvm/test_KMeans_create_frame_fvec.py index <HASH>..<HASH> 100644 --- a/py/testdir_multi_jvm/test_KMeans_create_frame_fvec.py +++ b/py/testdir_multi_jvm/test_KMeans_create_frame_fvec.py @@ -15,7 +15,7 @@ def define_create_frame_params(SEED): 'factors': [None, 2, 10], # Factor levels for categorical variables 'integer_fraction': [None, 0.1, 1.0], # Fraction of integer columns (for randomize=true) 'integer_range': [None, 0, 1, 1234567890], # -range to range - 'missing_fraction': [None, 0.1, 1.0], + 'missing_fraction': [None, 0.1], 'response_factors': [None, 1, 2, 10], # Number of factor levels of the first column (1=real, 2=binomial, N=multinomial) } return paramDict
Don't test the case where all columns are missing values in combination with default argument of KMeans to ignore those columns!
py
diff --git a/tacl/corpus.py b/tacl/corpus.py index <HASH>..<HASH> 100644 --- a/tacl/corpus.py +++ b/tacl/corpus.py @@ -51,7 +51,12 @@ class Corpus: filename)) with open(os.path.join(self._path, filename), encoding='utf-8') \ as fh: - content = fh.read() + try: + content = fh.read() + except Exception: + self._logger.error('Failed to read witness text {}'.format( + filename)) + raise return text_class(work, siglum, content, self._tokenizer) def get_witnesses(self, name='*'):
Added useful logging on failure to read a witness file.
py
diff --git a/oauth/salesforce.py b/oauth/salesforce.py index <HASH>..<HASH> 100644 --- a/oauth/salesforce.py +++ b/oauth/salesforce.py @@ -8,7 +8,6 @@ from urlparse import urlparse import webbrowser HTTP_HEADERS = {'Content-Type': 'application/x-www-form-urlencoded'} -HTTP_TIMEOUT_S = 300 class SalesforceOAuth2(object): @@ -88,6 +87,7 @@ class CaptureSalesforceOAuth(object): self.oauth_api = self._get_oauth_api() self.response = None self.scope = scope + self.httpd_timeout = 300 def __call__(self): url = self.oauth_api.get_authorize_url(self.scope) @@ -101,7 +101,7 @@ class CaptureSalesforceOAuth(object): server_address = (url_parts.hostname, url_parts.port) OAuthCallbackHandler.parent = self self.httpd = HTTPServer(server_address, OAuthCallbackHandler) - self.httpd.timeout = HTTP_TIMEOUT_S + self.httpd.timeout = self.httpd_timeout def _get_oauth_api(self): return SalesforceOAuth2(
change httpd timeout to instance var
py
diff --git a/axes/decorators.py b/axes/decorators.py index <HASH>..<HASH> 100644 --- a/axes/decorators.py +++ b/axes/decorators.py @@ -106,8 +106,8 @@ def get_ip_address_from_request(request): if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for): ip_address = x_forwarded_for.strip() else: - ips = [ip.strip() for ip in x_forwarded_for.split(',')] - for ip in ips: + for ip_raw in x_forwarded_for.split(','): + ip = ip_raw.strip() if ip.startswith(PRIVATE_IPS_PREFIX): continue elif not is_valid_ip(ip):
Iterate over ip addresses only once
py
diff --git a/billing/tests/paylane_tests.py b/billing/tests/paylane_tests.py index <HASH>..<HASH> 100644 --- a/billing/tests/paylane_tests.py +++ b/billing/tests/paylane_tests.py @@ -106,11 +106,6 @@ class PaylaneTestCase(TestCase): self.assertTrue('transaction' in bill1['response']) self.assertTrue('authorization' in bill1['response']) - bill2 = self.merchant.bill_recurring(12.0, bill1['response']['authorization'], 'OK recurring') - self.assertEqual(bill2['status'], 'SUCCESS', unicode(bill2['response'])) - self.assertTrue('transaction' in bill2['response']) - self.assertTrue('authorization' in bill2['response']) - def testRecurringBillingFailWithChargeback(self): credit_card = Visa(first_name='Celso', last_name='Pinto', month=10, year=2020, number='4111111111111111', verification_value=435) options = {}
fix duplicate test case fixes: <PaylaneTransaction: Transaction for ....>, 'error': Error Code: <I> (Multiple same transactions lock triggered. Wait 7 s and try again.). Acquirer Error:
py
diff --git a/isort/isort.py b/isort/isort.py index <HASH>..<HASH> 100644 --- a/isort/isort.py +++ b/isort/isort.py @@ -844,9 +844,9 @@ class SortImports(object): self._in_top_comment = False while not self._at_end(): line = self._get_line() - line = line.replace("from.import", "from . import") + line = line.replace("from.import ", "from . import ") line = line.replace("\t", " ").replace('import*', 'import *') - line = line.replace(" .import", " . import") + line = line.replace(" .import ", " . import ") statement_index = self.index skip_line = self._skip_line(line)
Implement fix for issue #<I>
py
diff --git a/src/streamlink/plugins/periscope.py b/src/streamlink/plugins/periscope.py index <HASH>..<HASH> 100644 --- a/src/streamlink/plugins/periscope.py +++ b/src/streamlink/plugins/periscope.py @@ -10,7 +10,7 @@ STREAM_INFO_URL = "https://api.periscope.tv/api/v2/getAccessPublic" STATUS_GONE = 410 STATUS_UNAVAILABLE = (STATUS_GONE,) -_url_re = re.compile(r"http(s)?://(www\.)?periscope.tv/[^/]+/(?P<broadcast_id>[\w\-\=]+)") +_url_re = re.compile(r"http(s)?://(www\.)?(periscope|pscp)\.tv/[^/]+/(?P<broadcast_id>[\w\-\=]+)") _stream_schema = validate.Schema( validate.any( None,
New Periscope URL #<I> Old periscope URL still works accessible. Using new one temporarily (?) during legal action in turkey. API URL (STREAM_INFO_URL) not interchangeable yet.
py
diff --git a/ppb/systems/clocks.py b/ppb/systems/clocks.py index <HASH>..<HASH> 100644 --- a/ppb/systems/clocks.py +++ b/ppb/systems/clocks.py @@ -1,3 +1,7 @@ +""" +This module performs time keeping of subsystems +""" + import time import ppb
Add module docstrings clocks Change reflected on clocks.py
py
diff --git a/tests/test_buku.py b/tests/test_buku.py index <HASH>..<HASH> 100644 --- a/tests/test_buku.py +++ b/tests/test_buku.py @@ -568,6 +568,17 @@ def test_sigint_handler(capsys): 'serp..1.2.311.06cSKPTLo18', ('xkbcomp alt gr', 0, 0) ], + [ + 'http://www.vim.org/scripts/script.php?script_id=4641', + ( + 'mlessnau_case - "in-case" selection, deletion and substitution ' + 'for underscore, camel, mixed case : vim online', 0, 0 + ) + ], + [ + 'http://www.kadrof.ru/cat_exchange.shtml', + ('Все биржи фриланса и удаленной работы - больше 110 сайтов | Kadrof.ru', 0, 0) + ], ] ) def test_network_handler_with_url(url, exp_res):
new: test: 2 url for testing
py
diff --git a/peep.py b/peep.py index <HASH>..<HASH> 100755 --- a/peep.py +++ b/peep.py @@ -162,7 +162,7 @@ def version_of_download(filename, package_name): # Since we know the project_name, we can strip that off the left, strip # any archive extensions off the right, and take the rest as the # version. - extensions = ['.tar.gz', '.tgz', '.tar', '.zip'] + extensions = ['.tar.bz2', '.tar.gz', '.tgz', '.tar', '.zip'] for ext in extensions: if filename.endswith(ext): filename = filename[:-len(ext)]
Add support for .tar.bz2 archives. Found that at least pytz provides a tar.bz2 archive.
py
diff --git a/validator/sawtooth_validator/networking/interconnect.py b/validator/sawtooth_validator/networking/interconnect.py index <HASH>..<HASH> 100644 --- a/validator/sawtooth_validator/networking/interconnect.py +++ b/validator/sawtooth_validator/networking/interconnect.py @@ -350,7 +350,14 @@ class _SendReceive(object): self._socket.curve_publickey = self._server_public_key self._socket.curve_server = True - self._socket.bind(self._address) + try: + self._socket.bind(self._address) + except zmq.error.ZMQError as e: + raise LocalConfigurationError( + "Can't bind to {}: {}".format(self._address, + str(e))) + else: + LOGGER.info("Listening on %s", self._address) self._dispatcher.add_send_message(self._connection, self.send_message)
Handle ZMQError as LocalConfigurationError Exit the validator if we can't bind to a provided address.
py
diff --git a/angr/analyses/cfg/cfg_fast.py b/angr/analyses/cfg/cfg_fast.py index <HASH>..<HASH> 100644 --- a/angr/analyses/cfg/cfg_fast.py +++ b/angr/analyses/cfg/cfg_fast.py @@ -934,7 +934,17 @@ class CFGFast(ForwardAnalysis, CFGBase): # pylint: disable=abstract-method addr += 1 if sz and is_sz: - l.debug("Got a string of %d chars: [%s]", len(sz), bytes(sz).decode()) + # avoid commonly seen ambiguous cases + if is_arm_arch(self.project.arch): + # little endian + sz_bytes = bytes(sz) + if self.project.arch.memory_endness == Endness.LE: + if b"\x70\x47" in sz_bytes: # bx lr + return 0 + if self.project.arch.memory_endness == Endness.BE: + if b"\x47\x70" in sz_bytes: # bx lr + return 0 + l.debug("Got a string of %d chars", len(sz)) string_length = len(sz) + 1 return string_length
CFGFast: Do not match against bx lr in strings heuristics. (#<I>)
py
diff --git a/ddsc/core/download.py b/ddsc/core/download.py index <HASH>..<HASH> 100644 --- a/ddsc/core/download.py +++ b/ddsc/core/download.py @@ -304,8 +304,11 @@ class ProjectFileDownloader(object): def show_progress_bar(self): downloaded_files, download_percent, total_bytes_downloaded = self.get_downloaded_files_and_percent() - sys.stdout.write("\r{:.0f}% {} - Downloaded {} of {} files".format( - download_percent, humanize_bytes(total_bytes_downloaded), downloaded_files, self.files_to_download) + format_pattern = "\rDownloaded {} ({} of {} files complete) " + sys.stdout.write(format_pattern.format( + humanize_bytes(total_bytes_downloaded).ljust(12), + downloaded_files, + self.files_to_download) ) def get_downloaded_files_and_percent(self):
download progress bar show bytes and files Changes progress bar to show human size of data download and number of complete files. Removes percent since it is not performant to calculate.
py
diff --git a/paramiko/channel.py b/paramiko/channel.py index <HASH>..<HASH> 100644 --- a/paramiko/channel.py +++ b/paramiko/channel.py @@ -889,7 +889,8 @@ class Channel(ClosingContextManager): client, it only makes sense to open this file for reading. For a server, it only makes sense to open this file for writing. - :return: `.ChannelFile` object which can be used for Python file I/O. + :returns: + `.ChannelStderrFile` object which can be used for Python file I/O. .. versionadded:: 1.1 """
Fix old docstring re: returned object class
py
diff --git a/pyemma/msm/analysis/dense/pcca.py b/pyemma/msm/analysis/dense/pcca.py index <HASH>..<HASH> 100644 --- a/pyemma/msm/analysis/dense/pcca.py +++ b/pyemma/msm/analysis/dense/pcca.py @@ -420,5 +420,5 @@ def coarsegrain(P, n): """ M = pcca(P,n) A = np.dot(np.dot(M.T, P), M) - B = np.invert(np.dot(M.T,M)) + B = np.linalg.inv(np.dot(M.T,M)) return np.dot(A,B)
[pcca] fix: call correct function for matrix inversion in coarse_grain.
py
diff --git a/nptdms/tdms.py b/nptdms/tdms.py index <HASH>..<HASH> 100644 --- a/nptdms/tdms.py +++ b/nptdms/tdms.py @@ -7,8 +7,12 @@ from collections import namedtuple try: from collections import OrderedDict except ImportError: - # For Python < 2.7, just use a normal dict - OrderedDict = dict + try: + # ordereddict available on pypi for Python < 2.7 + from ordereddict import OrderedDict + except ImportError: + # Otherwise fall back on normal dict + OrderedDict = dict from copy import copy import numpy as np
Use ordereddict package if it's available on Python < <I> Issue #6
py
diff --git a/smartmin/views.py b/smartmin/views.py index <HASH>..<HASH> 100644 --- a/smartmin/views.py +++ b/smartmin/views.py @@ -6,7 +6,7 @@ from django.views.generic.base import TemplateView, View from django.views.generic import DetailView, ListView import django.forms.models as model_forms from guardian.utils import get_anonymous_user -from django.utils.http import urlquote, urlquote_plus +from django.utils.http import urlquote from django.db.models import Q from django.db import IntegrityError from django.conf import settings @@ -351,7 +351,7 @@ class SmartView(object): for key in self.request.REQUEST.keys(): if key != 'page' and key != 'pjax' and key[0] != '_': for value in self.request.REQUEST.getlist(key): - url_params += "%s=%s&" % (key, urlquote_plus(value)) + url_params += "%s=%s&" % (key, urlquote(value)) elif key == '_order': order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.REQUEST.getlist(key)])
Changed urlquote_plus to urlquote
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -95,7 +95,8 @@ master_doc = 'index' # General information about the project. project = 'MetPy' # noinspection PyShadowingBuiltins -copyright = ('2019, MetPy Developers. Development supported by National Science Foundation grants ' +copyright = ('2019-2020, MetPy Developers. ' + 'Development supported by National Science Foundation grants ' 'AGS-1344155, OAC-1740315, and AGS-1901712.') # The version info for the project you're documenting, acts as replacement for
Update copyright year to include <I>
py
diff --git a/src/rez/tests/test_context.py b/src/rez/tests/test_context.py index <HASH>..<HASH> 100644 --- a/src/rez/tests/test_context.py +++ b/src/rez/tests/test_context.py @@ -114,7 +114,8 @@ class TestContext(TestBase, TempdirMixin): # check the pkg we contain is in the copied pkg repo variant = r2.resolved_packages[0] - self.assertTrue(variant.root.startswith(packages_path2 + os.path.sep)) + prefix = packages_path2 + os.path.sep + self.assertEqual(variant.root[:len(prefix)], prefix) self._test_execute_command_environ(r2)
debugging failing test on osx
py
diff --git a/uncompyle6/parsers/parse36.py b/uncompyle6/parsers/parse36.py index <HASH>..<HASH> 100644 --- a/uncompyle6/parsers/parse36.py +++ b/uncompyle6/parsers/parse36.py @@ -122,9 +122,12 @@ class Python36Parser(Python35Parser): try_except36 ::= SETUP_EXCEPT returns except_handler36 opt_come_from_except try_except36 ::= SETUP_EXCEPT suite_stmts + try_except36 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK + except_handler36 opt_come_from_except # 3.6 omits END_FINALLY sometimes except_handler36 ::= COME_FROM_EXCEPT except_stmts + except_handler36 ::= JUMP_FORWARD COME_FROM_EXCEPT except_stmts except_handler ::= jmp_abs COME_FROM_EXCEPT except_stmts stmt ::= tryfinally36 @@ -170,6 +173,7 @@ class Python36Parser(Python35Parser): JUMP_ABSOLUTE END_FINALLY COME_FROM for_block pb_ja else_suite COME_FROM_LOOP + """) self.check_reduce['call_kw'] = 'AST'
Python <I>+ try/else with no trailing END_FINALLY
py
diff --git a/openquake/calculators/disaggregation.py b/openquake/calculators/disaggregation.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/disaggregation.py +++ b/openquake/calculators/disaggregation.py @@ -303,12 +303,13 @@ class DisaggregationCalculator(base.HazardCalculator): self.save_bin_edges() sd = shapedic.copy() sd.pop('trt') + sd.pop('M') nbytes, msg = get_array_nbytes(sd) if nbytes > oq.max_data_transfer: raise ValueError( 'Estimated data transfer too big\n%s > max_data_transfer=%s' % (msg, humansize(oq.max_data_transfer))) - logging.info('Estimated data transfer: %s', msg) + logging.info('Estimated data transfer:\n%s', msg) tot = get_outputs_size(shapedic, oq.disagg_outputs or disagg.pmf_map) logging.info('Total output size: %s', humansize(sum(tot.values()))) self.imldic = {} # sid, rlz, poe, imt -> iml
Improved logging [skip CI]
py
diff --git a/pyana/examples/gp_rdiff.py b/pyana/examples/gp_rdiff.py index <HASH>..<HASH> 100644 --- a/pyana/examples/gp_rdiff.py +++ b/pyana/examples/gp_rdiff.py @@ -204,7 +204,7 @@ def gp_rdiff(version, nomed, noxerr, diffRel, divdNdy): #lines = { ('x=1' if diffRel else 'x=0'): 'lc 0 lw 4 lt 2' }, gpcalls = gpcalls, lmargin = 0.12, bmargin = 0.12, tmargin = 0.9, rmargin = 0.98, - size = '12in,9in', arrow_length = 0.4, arrow_offset = 0.9, + size = '12in,9in', arrow_length = 0.4, ) if nomed or noxerr or version == 'QM12': return 'done'
gp_rdiff: small arrow_offset fix
py
diff --git a/openquake/hazardlib/__init__.py b/openquake/hazardlib/__init__.py index <HASH>..<HASH> 100644 --- a/openquake/hazardlib/__init__.py +++ b/openquake/hazardlib/__init__.py @@ -26,5 +26,5 @@ from openquake.hazardlib import ( tom, near_fault) # the version is managed by packager.sh with a sed -__version__ = '0.21.0' +__version__ = '0.22.0' __version__ += git_suffix(__file__)
update development version to <I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -75,6 +75,7 @@ classifiers = [ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: System', 'Topic :: Software Development :: Debuggers', ] @@ -100,6 +101,12 @@ class build(_build): _build.run(self) def cythonize(self, extensions): + # Run Cython with -Werror on continuous integration services + # with Python 3.6 or later + if "CI" in os.environ and sys.version_info >= (3, 6): + from Cython.Compiler import Options + Options.warning_errors = True + from Cython.Build.Dependencies import cythonize return cythonize(extensions, build_dir=cythonize_dir,
Run Cython with -Werror on continuous integration services
py
diff --git a/abilian/services/indexing/schema.py b/abilian/services/indexing/schema.py index <HASH>..<HASH> 100644 --- a/abilian/services/indexing/schema.py +++ b/abilian/services/indexing/schema.py @@ -26,4 +26,4 @@ class DefaultSearchSchema(SchemaClass): name = TEXT(stored=True, analyzer=accent_folder) description = TEXT(stored=True, analyzer=accent_folder) - text = TEXT(stored=True, analyzer=accent_folder) + text = TEXT(stored=False, analyzer=accent_folder)
don't store text by default, it's supposed to be the "full text" index
py
diff --git a/tools/merge_flink_pr.py b/tools/merge_flink_pr.py index <HASH>..<HASH> 100755 --- a/tools/merge_flink_pr.py +++ b/tools/merge_flink_pr.py @@ -92,7 +92,7 @@ def continue_maybe(prompt): fail("Okay, exiting") -original_head = run_cmd("git rev-parse HEAD")[:8] +original_head = run_cmd("git rev-parse --abbrev-ref HEAD").rstrip("/\n") def clean_up():
[tools] Add --abbrev-ref to get the right branch name of HEAD Add --abbrev-ref to get the right branch name of HEAD rather than checksum to return back to original branch. Without it will make merge tool to go to unnamed branch. Somehow old PR #<I> could not be reopen so submit new one. Sorry
py
diff --git a/bcbio/cwl/hpc.py b/bcbio/cwl/hpc.py index <HASH>..<HASH> 100644 --- a/bcbio/cwl/hpc.py +++ b/bcbio/cwl/hpc.py @@ -78,7 +78,7 @@ def _args_to_cromwell(args): """Convert input arguments into cromwell inputs for config and command line. """ default_config = {"slurm": {"timelimit": "1-00:00", "account": ""}, - "sge": {"memtype": "mem_type", "pename": "smp"}, + "sge": {"memtype": "mem_free", "pename": "smp"}, "lsf": {"walltime": "24:00", "account": ""}, "htcondor": {}, "torque": {"walltime": "24:00:00", "account": ""},
Cromwell SGE: default to mem_free for memory specification #<I>
py
diff --git a/animal/views.py b/animal/views.py index <HASH>..<HASH> 100644 --- a/animal/views.py +++ b/animal/views.py @@ -126,12 +126,12 @@ def breeding_change(request, breeding_id): """ breeding = Breeding.objects.select_related().get(id=breeding_id) strain = breeding.Strain - PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, fields=['MouseID', 'Gender','Cage', 'Genotype', 'Death','Cause_of_Death','Born','Rack', 'Rack_Position','Markings']) + PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'CageID', 'Breeding', 'Notes')) if request.method =="POST": formset = PupsFormSet(request.POST, instance=breeding) if formset.is_valid(): formset.save() - return HttpResponseRedirect("/mousedb/breeding/") + return HttpResponseRedirect( breeding.get_absolute_url() ) else: formset = PupsFormSet(instance=breeding,) return render_to_response("breeding_change.html", {"formset":formset, 'breeding':breeding},context_instance=RequestContext(request))
Altered fields displayed in breeding_change view. Also changed redirect upon success
py
diff --git a/git/remote.py b/git/remote.py index <HASH>..<HASH> 100644 --- a/git/remote.py +++ b/git/remote.py @@ -186,7 +186,7 @@ class FetchInfo(object): FAST_FORWARD, ERROR = [1 << x for x in range(8)] # %c %-*s %-*s -> %s (%s) - re_fetch_result = re.compile("^\s*(.) (\[?[\w\s\.]+\]?)\s+(.+) -> ([/\w_\+\.\-#]+)( \(.*\)?$)?") + re_fetch_result = re.compile("^\s*(.) (\[?[\w\s\.$]+\]?)\s+(.+) -> ([/\w_\+\.\-$#]+)( \(.*\)?$)?") _flag_map = {'!': ERROR, '+': FORCED_UPDATE,
Allow "$" sign in fetch output lines
py
diff --git a/pelix/shell/core.py b/pelix/shell/core.py index <HASH>..<HASH> 100644 --- a/pelix/shell/core.py +++ b/pelix/shell/core.py @@ -186,7 +186,7 @@ class IOHandler(object): def prompt(self, prompt=None): """ Reads a line written by the user - + :param prompt: An optional prompt message :return: The read line, after a conversion to str """ @@ -913,7 +913,7 @@ class Shell(object): # Get the documentation string doc = inspect.getdoc(method) or "(Documentation missing)" - return ', '.join(args), ' '.join(doc.split()) + return ' '.join(args), ' '.join(doc.split()) def __print_command_help(self, io_handler, namespace, cmd_name):
Removed the ',' in the list of shell command arguments
py
diff --git a/hydpy/core/timetools.py b/hydpy/core/timetools.py index <HASH>..<HASH> 100644 --- a/hydpy/core/timetools.py +++ b/hydpy/core/timetools.py @@ -2730,8 +2730,10 @@ has already been set to `31`. def __init__(self, value: Union[str, Date] = ''): try: if isinstance(value, Date): + datetime = value.datetime + dict_ = vars(self) for name in self._PROPERTIES.keys(): - self.__dict__[name] = getattr(value, name) + dict_[name] = getattr(datetime, name) else: values = value.split('_') if not values[0].isdigit():
Speed up the initialisation of `TOY` objects based on `Date` objects (module `timetools`).
py
diff --git a/django_extensions/tests/uuid_field.py b/django_extensions/tests/uuid_field.py index <HASH>..<HASH> 100644 --- a/django_extensions/tests/uuid_field.py +++ b/django_extensions/tests/uuid_field.py @@ -48,7 +48,7 @@ class UUIDFieldTest(unittest.TestCase): def testUUIDField_pkAgregateCreate(self): j = TestAgregateModel.objects.create(a=6) self.assertEqual(j.a, 6) - self.assertIsInstance(j.pk, basestring) + self.assertIsInstance(j.pk, six.string_types) self.assertEqual(len(j.pk), 36) def testUUIDFieldManyToManyCreate(self):
Make assertion on pk type compatible with Python 3
py
diff --git a/functional_tests/conftest.py b/functional_tests/conftest.py index <HASH>..<HASH> 100644 --- a/functional_tests/conftest.py +++ b/functional_tests/conftest.py @@ -225,13 +225,17 @@ class Helpers: return newRole def waitOnJobByID(self, jobId): + sleepTime = 0 while True: loggingStuff = self.swimlane_instance.helpers.check_bulk_job_status( jobId) if (True in (ele['status'] == 'completed' for ele in loggingStuff)): break + elif ( sleepTime > 3): + raise Exception("Timedout waiting for the job to complete") else: time.sleep(0.1) + sleepTime += 0.1 def updateApp(self, appID): newapp = self.swimlane_instance.request('get', 'app/%s' % appID).json()
SPT-<I> do not infinite loop. (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -72,7 +72,7 @@ class InstallWithKernelspec(install): setup(name='SAS_kernel', version='2.1.0', - description='A SAS kernel for IPython', + description='A SAS kernel for Jupyter', long_description=open('README.rst', 'rb').read().decode('utf-8'), author='Jared Dean', license='Apache Software License',
inital push of doc build system
py
diff --git a/poet/poet.py b/poet/poet.py index <HASH>..<HASH> 100755 --- a/poet/poet.py +++ b/poet/poet.py @@ -13,7 +13,7 @@ spits out Homebrew resource stanzas. from __future__ import print_function import argparse from collections import OrderedDict -from hashlib import sha1 +from hashlib import sha256 import json import sys import urllib2 @@ -28,7 +28,7 @@ FORMULA_TEMPLATE = Template( """class {{ package.name|capitalize }} < Formula homepage "{{ package.homepage }}" url "{{ package.url }}" - sha1 "{{ package.checksum }}" + sha256 "{{ package.checksum }}" {% if resources %} {% for resource in resources %} @@ -80,8 +80,8 @@ def research_package(name, version=None): if url['packagetype'] == 'sdist': d['url'] = url['url'] f = urllib2.urlopen(url['url']) - d['checksum'] = sha1(f.read()).hexdigest() - d['checksum_type'] = 'sha1' + d['checksum'] = sha256(f.read()).hexdigest() + d['checksum_type'] = 'sha256' f.close() break return d
Replace sha1 with sha<I>
py
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index <HASH>..<HASH> 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -404,7 +404,7 @@ def _get_yum_config(): conf[opt] = cp.get("main", opt) else: log.warning( - "Could not find [main] section in %s, using internal " "defaults", fn + "Could not find [main] section in %s, using internal defaults", fn ) return conf @@ -1691,9 +1691,7 @@ def install( holds = list_holds(full=False) except SaltInvocationError: holds = [] - log.debug( - "Failed to get holds, versionlock plugin is probably not " "installed" - ) + log.debug("Failed to get holds, versionlock plugin is probably not installed") unhold_prevented = [] @contextlib.contextmanager @@ -2346,7 +2344,7 @@ def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W06 else: ret[target][ "comment" - ] = "Package {} was unable to be " "unheld.".format(target) + ] = "Package {} was unable to be unheld.".format(target) else: ret[target].update(result=True) ret[target]["comment"] = "Package {} is not being held.".format(target)
Clean up some split strings in yumpkg
py
diff --git a/libact/base/interfaces.py b/libact/base/interfaces.py index <HASH>..<HASH> 100644 --- a/libact/base/interfaces.py +++ b/libact/base/interfaces.py @@ -26,13 +26,13 @@ class Model(metaclass=ABCMeta): #TODO: documentation @abstractmethod - def fit(self, dataset): + def fit(self, dataset, *args, **kwargs): pass @abstractmethod - def predict(self, feature): + def predict(self, feature, *args, **kwargs): pass @abstractmethod - def score(self, testing_dataset): + def score(self, testing_dataset, *args, **kwargs): pass
add Model method arguments for passing to sklearn methods
py