diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/bidict/_common.py b/bidict/_common.py
index <HASH>..<HASH> 100644
--- a/bidict/_common.py
+++ b/bidict/_common.py
@@ -56,14 +56,14 @@ class BidirectionalMapping(Mapping):
Returns True if only its start is not None and False if only its stop
is not None.
"""
- start_missing = slice.start is None
- start_found = not start_missing
- stop_missing = slice.stop is None
- step_found = slice.step is not None
-
- if step_found or start_missing == stop_missing:
- raise TypeError('Slice must specify only either start or stop')
- return start_found
+ if slice.step is not None:
+ raise TypeError('Slice may not specify step')
+ none_start = slice.start is None
+ none_stop = slice.stop is None
+ if none_start == none_stop:
+ raise TypeError('Exactly one of slice start or stop must be None '
+ 'and the other must not be')
+ return not none_start
def __getitem__(self, keyorslice):
"""
|
improve TypeError messages with invalid slices + small refactor
|
py
|
diff --git a/src/pyshark/capture/capture.py b/src/pyshark/capture/capture.py
index <HASH>..<HASH> 100644
--- a/src/pyshark/capture/capture.py
+++ b/src/pyshark/capture/capture.py
@@ -130,7 +130,7 @@ class Capture(object):
try:
self.apply_on_packets(keep_packet, timeout=timeout, packet_count=packet_count)
self.loaded = True
- except asyncio.exceptions.TimeoutError:
+ except (concurrent.futures.TimeoutError, asyncio.exceptions.TimeoutError):
pass
def set_debug(self, set_to=True, log_level=logging.DEBUG):
@@ -410,7 +410,7 @@ class Capture(object):
try:
process.kill()
return await asyncio.wait_for(process.wait(), 1)
- except asyncio.exceptions.TimeoutError:
+ except (concurrent.futures.TimeoutError, asyncio.exceptions.TimeoutError):
self._log.debug("Waiting for process to close failed, may have zombie process.")
except ProcessLookupError:
pass
|
capture `TimeoutError` for both python<I> and below refer to <URL> now correctly waits for cancellation when using an instance of asyncio.Task. Previously, upon reaching timeout, it was cancelled and immediately returned. (Contributed by Elvis Pranskevichus in bpo-<I>.) This change make `TimeoutError` different.
|
py
|
diff --git a/smuggler/views.py b/smuggler/views.py
index <HASH>..<HASH> 100644
--- a/smuggler/views.py
+++ b/smuggler/views.py
@@ -10,6 +10,7 @@ import os
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import CommandError
+from django.core.serializers.base import DeserializationError
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
@@ -112,7 +113,7 @@ def load_data(request):
'file_count': len(data)
}
messages.add_message(request, messages.INFO, user_msg)
- except (IntegrityError, ObjectDoesNotExist), e:
+ except (IntegrityError, ObjectDoesNotExist, DeserializationError), e:
messages.add_message(request, messages.ERROR,
_(u'An exception occurred while loading data: %s')
% unicode(e))
|
Handle DeserializationError in view and show it as a error message
|
py
|
diff --git a/salt/minion.py b/salt/minion.py
index <HASH>..<HASH> 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -599,10 +599,10 @@ class Matcher(object):
if comps[0] not in self.opts['grains']:
log.error('Got unknown grain from master: {0}'.format(comps[0]))
return False
- if isinstance(self.opts['grains'][comps[0], list):
+ if isinstance(self.opts['grains'][comps[0]], list):
# We are matching a single component to a single list member
for member in self.opts['grains'][comps[0]:
- if re.match(comps[1], str(self.opts['grains'][comps[0]])):
+ if re.match(comps[1], str(member)):
return True
return False
return bool(re.match(comps[1], str(self.opts['grains'][comps[0]])))
|
Fix issues in code from previous commit I might need to get some sleep in...
|
py
|
diff --git a/tests/test_convert_img2pdf.py b/tests/test_convert_img2pdf.py
index <HASH>..<HASH> 100644
--- a/tests/test_convert_img2pdf.py
+++ b/tests/test_convert_img2pdf.py
@@ -16,13 +16,14 @@ class TestConvertImg2Pdf(unittest.TestCase):
self.temp = TemporaryDirectory()
def tearDown(self):
+ self.temp.cleanup()
if os.path.exists(self.pdf):
os.remove(self.pdf)
@Timer.decorator
def test_convert_img2pdf(self):
"""Create a 'flattened' pdf file without layers."""
- pdf = IMG2PDF([self.img_path], destination=test_data_dir, tempdir=self.temp.name).save()
+ pdf = IMG2PDF([self.img_path], destination=test_data_dir).save(remove_temps=False)
# Assert pdf file exists
self.assertTrue(os.path.exists(pdf))
|
FIX issues with assertions in test_convert_img2pdf method
|
py
|
diff --git a/troposphere/ask.py b/troposphere/ask.py
index <HASH>..<HASH> 100644
--- a/troposphere/ask.py
+++ b/troposphere/ask.py
@@ -4,17 +4,15 @@
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
-from .validators import json_checker
-class AuthenticationConfiguration(AWSProperty):
+class Overrides(AWSProperty):
props = {
- 'DefaultAttributes': (json_checker, False),
- 'DeviceTemplates': (json_checker, False),
+ 'Manifest': (dict, False),
}
-class SkillPackage(AWSProperty):
+class AuthenticationConfiguration(AWSProperty):
props = {
'ClientId': (basestring, True),
'ClientSecret': (basestring, True),
@@ -22,6 +20,16 @@ class SkillPackage(AWSProperty):
}
+class SkillPackage(AWSProperty):
+ props = {
+ 'Overrides': (Overrides, False),
+ 'S3Bucket': (basestring, True),
+ 'S3BucketRole': (basestring, False),
+ 'S3Key': (basestring, True),
+ 'S3ObjectVersion': (basestring, False),
+ }
+
+
class Skill(AWSObject):
resource_type = "Alexa::ASK::Skill"
|
Update ASK to the latest AWS documentation (#<I>)
|
py
|
diff --git a/djstripe/settings.py b/djstripe/settings.py
index <HASH>..<HASH> 100644
--- a/djstripe/settings.py
+++ b/djstripe/settings.py
@@ -8,16 +8,12 @@
"""
from __future__ import unicode_literals
-import sys
-
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.module_loading import import_string
-PY3 = sys.version > "3"
-
def get_callback_function(setting_name, default=None):
"""
|
Remove unused import/variable in settings.py
|
py
|
diff --git a/tests/test_integration.py b/tests/test_integration.py
index <HASH>..<HASH> 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -1347,11 +1347,17 @@ def assert_reproducible_build(args):
with temporary_dir() as td:
pex1 = os.path.join(td, '1.pex')
pex2 = os.path.join(td, '2.pex')
+
# Note that we change the `PYTHONHASHSEED` to ensure that there are no issues resulting
# from the random seed, such as data structures, as Tox sets this value by default. See
# https://tox.readthedocs.io/en/latest/example/basic.html#special-handling-of-pythonhashseed.
def create_pex(path, seed):
- run_pex_command(args + ['-o', path, '--no-compile'], env=make_env(PYTHONHASHSEED=seed))
+ result = run_pex_command(
+ args + ['-o', path, '--no-compile'],
+ env=make_env(PYTHONHASHSEED=seed)
+ )
+ result.assert_success()
+
create_pex(pex1, seed=111)
# We sleep to ensure that there is no non-reproducibility from timestamps or
# anything that may depend on the system time. Note that we must sleep for at least
|
Refactor reproducible build tests to assert that the original pex command succeeded (#<I>) If the `run_pex_command` failed, this would not be surfaced until `zipfile.py` would try to read the zipfile and find that the file is not existing. Instead, we should eagerly assert that the command worked and output stdout and stderr if it did not.
|
py
|
diff --git a/mcpack/datapack.py b/mcpack/datapack.py
index <HASH>..<HASH> 100644
--- a/mcpack/datapack.py
+++ b/mcpack/datapack.py
@@ -127,6 +127,7 @@ class LootTable(JsonItem):
pools: list = field(default_factory=list)
type: str = 'generic'
+ functions: Optional[list] = None
@dataclass
|
Add functions to LootTable as Optional[list]
|
py
|
diff --git a/pyrogram/client/methods/chats/get_chat_member.py b/pyrogram/client/methods/chats/get_chat_member.py
index <HASH>..<HASH> 100644
--- a/pyrogram/client/methods/chats/get_chat_member.py
+++ b/pyrogram/client/methods/chats/get_chat_member.py
@@ -55,7 +55,7 @@ class GetChatMember(BaseClient):
)
for member in pyrogram.ChatMembers._parse(self, full_chat).chat_members:
- if member.user.id == user_id.user_id:
+ if member.user.is_self:
return member
else:
raise errors.UserNotParticipant
|
Fix get_chat_member not working when passing "me" in basic groups
|
py
|
diff --git a/iopipe/iopipe.py b/iopipe/iopipe.py
index <HASH>..<HASH> 100644
--- a/iopipe/iopipe.py
+++ b/iopipe/iopipe.py
@@ -50,7 +50,7 @@ class IOpipe(object):
if v in dir(context):
self.report[aws_key][k] = getattr(context, v)
- if context and 'get_remaining_time_in_millis' in context:
+ if context and 'get_remaining_time_in_millis' in dir(context):
try:
self.report['aws']['getRemainingTimeInMillis'] = context.get_remaining_time_in_millis()
except Exception as aws_lambda_err: pass # @TODO handle this more gracefully
|
Fixed bug in method check for lambda context
|
py
|
diff --git a/werkzeug/debug/render.py b/werkzeug/debug/render.py
index <HASH>..<HASH> 100644
--- a/werkzeug/debug/render.py
+++ b/werkzeug/debug/render.py
@@ -11,6 +11,7 @@
import pprint
from os.path import dirname, join
+from werkzeug.utils import escape
from werkzeug.minitmpl import Template
from werkzeug.debug.util import Namespace
@@ -67,7 +68,7 @@ def var_table(var):
if len(line) > 79:
line = line[:79] + '...'
tmp.append(line)
- return '\n'.join(tmp)
+ return escape('\n'.join(tmp))
# dicts
if isinstance(var, dict) or hasattr(var, 'items'):
@@ -77,7 +78,8 @@ def var_table(var):
else:
typ = 'dict'
value.sort()
- value = [(repr(key), safe_pformat(val)) for key, val in value]
+ value = [(escape(repr(key)), safe_pformat(val))
+ for key, val in value]
# lists
elif isinstance(var, list):
@@ -90,7 +92,7 @@ def var_table(var):
# others
else:
typ = 'simple'
- value = repr(var)
+ value = escape(repr(var))
return t_vartable.render(type=typ, value=value)
|
[svn] added escaping for var table --HG-- branch : trunk
|
py
|
diff --git a/valigator/mailutils.py b/valigator/mailutils.py
index <HASH>..<HASH> 100644
--- a/valigator/mailutils.py
+++ b/valigator/mailutils.py
@@ -2,6 +2,11 @@ import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
+"""Mailing utils.
+Use this object to send emails.
+It uses the mail parameters from the configuration file.
+"""
+
class MailUtils(object):
|
Added class documentation for mailutils.
|
py
|
diff --git a/h2o-py/h2o/frame.py b/h2o-py/h2o/frame.py
index <HASH>..<HASH> 100644
--- a/h2o-py/h2o/frame.py
+++ b/h2o-py/h2o/frame.py
@@ -108,9 +108,6 @@ class H2OFrame(Keyed):
self._upload_python_object(python_obj, destination_frame, header, separator,
column_names, column_types, na_strings, skipped_columns)
- def __del__(self):
- h2o.remove(self)
-
@staticmethod
def _expr(expr, cache=None):
# TODO: merge this method with `__init__`
@@ -1851,6 +1848,7 @@ class H2OFrame(Keyed):
splits.append(tmp_slice)
i += 1
+ h2o.remove(tmp_runif)
del tmp_runif
return splits
|
Deleting frame creating temporarily during split locally.
|
py
|
diff --git a/tornado/iostream.py b/tornado/iostream.py
index <HASH>..<HASH> 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -359,7 +359,7 @@ class IOStream(object):
raise IOError("Stream is closed")
def _add_io_state(self, state):
- if socket is None:
+ if self.socket is None:
# connection has been closed, so there can be no future events
return
if not self._state & state:
|
Fix typo from commit 2b<I>fac Closes #<I>.
|
py
|
diff --git a/integration_test/integration_tests.py b/integration_test/integration_tests.py
index <HASH>..<HASH> 100644
--- a/integration_test/integration_tests.py
+++ b/integration_test/integration_tests.py
@@ -200,7 +200,8 @@ def test_integration():
get_all_templates_for_type(client, EMAIL_TYPE)
get_all_templates_for_type(client, SMS_TYPE)
- get_received_text_messages()
+ if (os.environ['INBOUND_SMS_QUERY_KEY']):
+ get_received_text_messages()
print("notifications-python-client integration tests are successful")
|
Test received messages if INBOUND_SMS_QUERY_KEY set
|
py
|
diff --git a/bbc_tracklist.py b/bbc_tracklist.py
index <HASH>..<HASH> 100755
--- a/bbc_tracklist.py
+++ b/bbc_tracklist.py
@@ -4,7 +4,7 @@
# bbc_radio_tracklisting_downloader: Download radio tracklistings from
# BBC's website and outputs to a text file.
-# Copyright 2013 Steven Maude
+# Copyright 2015 Steven Maude
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
|
It's now <I>, not <I>; bump date
|
py
|
diff --git a/scanpy/preprocessing/_highly_variable_genes.py b/scanpy/preprocessing/_highly_variable_genes.py
index <HASH>..<HASH> 100644
--- a/scanpy/preprocessing/_highly_variable_genes.py
+++ b/scanpy/preprocessing/_highly_variable_genes.py
@@ -256,7 +256,7 @@ def _highly_variable_genes_single_batch(
) / disp_mad_bin[df['mean_bin'].values].values
else:
raise ValueError('`flavor` needs to be "seurat" or "cell_ranger"')
- dispersion_norm = df['dispersions_norm'].values.astype('float32')
+ dispersion_norm = df['dispersions_norm'].values
if n_top_genes is not None:
dispersion_norm = dispersion_norm[~np.isnan(dispersion_norm)]
dispersion_norm[
|
fixes type bug, now tagging exactly n_top_genes (#<I>) Due to type conversion, often <I> genes gets tagged as 'highly_variable' even though n_top_genes is set to <I>. Now, this minor change guarantees that exactly (!) n_top_genes get tagged.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -8,8 +8,22 @@ from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
-
-import fc
+# To scrape version information
+import re
+
+def find_version(file_path):
+ """
+ Scrape version information from specified file path.
+
+ """
+ with open(file_path, 'r') as f:
+ file_contents = f.read()
+ version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
+ file_contents, re.M)
+ if version_match:
+ return version_match.group(1)
+ else:
+ raise RuntimeError("unable to find version string")
here = path.abspath(path.dirname(__file__))
@@ -23,7 +37,7 @@ setup(
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
- version=fc.__version__,
+ version=find_version(path.join(here, 'fc', '__init__.py')),
description='Flow cytometry library',
long_description=long_description,
|
Updated setup to scrape version from __init__.py Fixes castillohair/fc#<I>.
|
py
|
diff --git a/lib/webcomment_webinterface.py b/lib/webcomment_webinterface.py
index <HASH>..<HASH> 100644
--- a/lib/webcomment_webinterface.py
+++ b/lib/webcomment_webinterface.py
@@ -118,11 +118,15 @@ class WebInterfaceCommentsPages(WebInterfaceDirectory):
uid=uid)
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
- self.recid)
+ self.recid,
+ ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x,y: cmp(x[1],y[1]))
+ link_ln = ''
+ if argd['ln'] != cdslang:
+ link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
- '%s/record/%s/%s' % (weburl, self.recid, tab_id), \
+ '%s/record/%s/%s%s' % (weburl, self.recid, tab_id, link_ln), \
tab_id in ['comments', 'reviews'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
|
Added 'citations' tab. 'ln' parameter is preserved by tabs.
|
py
|
diff --git a/_pydevd_bundle/pydevd_vars.py b/_pydevd_bundle/pydevd_vars.py
index <HASH>..<HASH> 100644
--- a/_pydevd_bundle/pydevd_vars.py
+++ b/_pydevd_bundle/pydevd_vars.py
@@ -571,9 +571,9 @@ def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
for col in range(cols):
- dtype = df.dtypes.iloc[col].kind
+ dtype = df.dtypes.iloc[coffset + col].kind
if dtype in "biufc":
- cvalues = df.iloc[:, col]
+ cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
|
PY-<I>: Min and max values for columns <I> and above were computed incorrectly sometimes causing exceptions during rendering. (cherry picked from commit <I>f)
|
py
|
diff --git a/rawdisk/util/rawstruct.py b/rawdisk/util/rawstruct.py
index <HASH>..<HASH> 100644
--- a/rawdisk/util/rawstruct.py
+++ b/rawdisk/util/rawstruct.py
@@ -48,7 +48,10 @@ class RawStruct(object):
elif filename is not None:
with open(filename, 'rb') as f:
f.seek(offset)
- self._data = f.read(length)
+ if length is None:
+ self._data = f.read()
+ else:
+ self._data = f.read(length)
@property
def data(self):
|
Bugfix for rawstruct If only filename is supplied, read all file contents
|
py
|
diff --git a/__pkginfo__.py b/__pkginfo__.py
index <HASH>..<HASH> 100644
--- a/__pkginfo__.py
+++ b/__pkginfo__.py
@@ -59,8 +59,8 @@ install_requires = [
"pygments %s" % pygments_version,
"spark_parser >= 1.8.9, <1.9.0",
"tracer >= 0.3.2",
- "uncompyle6 >= 3.6.6",
- "xdis >= 4.5.0, < 4.6.0",
+ "uncompyle6 >= 3.6.7",
+ "xdis >= 4.5.1, < 4.6.0",
]
license = "GPL3"
mailing_list = "python-debugger@googlegroups.com"
|
Bump uncompyle6 version
|
py
|
diff --git a/warrant/__init__.py b/warrant/__init__.py
index <HASH>..<HASH> 100644
--- a/warrant/__init__.py
+++ b/warrant/__init__.py
@@ -241,11 +241,12 @@ class Cognito(object):
"""
self.client = session.client('cognito-idp')
- def check_token(self):
+ def check_token(self, renew=True):
"""
Checks the exp attribute of the access_token and either refreshes
the tokens by calling the renew_access_tokens method or does nothing
- :return: None
+ :param renew: bool indicating whether to refresh on expiration
+ :return: bool indicating whether access_token has expired
"""
if not self.access_token:
raise AttributeError('Access Token Required to Check Token')
@@ -253,9 +254,12 @@ class Cognito(object):
dec_access_token = jwt.get_unverified_claims(self.access_token)
if now > datetime.datetime.fromtimestamp(dec_access_token['exp']):
- self.renew_access_token()
- return True
- return False
+ expired = True
+ if renew:
+ self.renew_access_token()
+ else:
+ expired = False
+ return expired
def register(self, username, password, attr_map=None, **kwargs):
"""
|
Add 'renew' flag to .check_token method This allows users of the warrant client to create different workflows on expired tokens. Function doc comments also updated.
|
py
|
diff --git a/synapse/daemon.py b/synapse/daemon.py
index <HASH>..<HASH> 100644
--- a/synapse/daemon.py
+++ b/synapse/daemon.py
@@ -238,10 +238,10 @@ class Daemon(EventBus,DmonConf):
if item == None:
raise NoSuchObj(name)
- if opts.get('onfini'):
- self.onfini(item.fini)
-
- self.share(asname,item)
+ # keeping as "onfini" for backward compat
+ # FIXME CHANGE WITH MAJOR REV
+ fini = opts.get('onfini',False)
+ self.share(asname,item,fini=fini)
# process a few daemon specific options
for url in conf.get('listen',()):
@@ -500,7 +500,7 @@ class Daemon(EventBus,DmonConf):
'''
return list(self._dmon_links)
- def share(self, name, item):
+ def share(self, name, item, fini=False):
'''
Share an object via the telepath protocol.
@@ -511,3 +511,6 @@ class Daemon(EventBus,DmonConf):
'''
self.shared[name] = item
+ if fini:
+ self.onfini( item.fini )
+
|
updated share() method to allow fini
|
py
|
diff --git a/multiqc/modules/bcl2fastq/bcl2fastq.py b/multiqc/modules/bcl2fastq/bcl2fastq.py
index <HASH>..<HASH> 100644
--- a/multiqc/modules/bcl2fastq/bcl2fastq.py
+++ b/multiqc/modules/bcl2fastq/bcl2fastq.py
@@ -458,4 +458,9 @@ class MultiqcModule(BaseMultiqcModule):
bar_data[barcode] = OrderedDict(
[(pbc, count) for pbc in paste_key]
)
+ bar_data = OrderedDict(sorted(
+ iteritems(bar_data),
+ key=lambda x: sum(x[1].values()),
+ reverse=True
+ ))
return bar_data
|
[MODIF] Sort undetermined index plot
|
py
|
diff --git a/djcelery/management/commands/celeryd_multi.py b/djcelery/management/commands/celeryd_multi.py
index <HASH>..<HASH> 100644
--- a/djcelery/management/commands/celeryd_multi.py
+++ b/djcelery/management/commands/celeryd_multi.py
@@ -17,4 +17,4 @@ class Command(CeleryCommand):
def run_from_argv(self, argv):
argv.append("--cmd=%s celeryd_detach" % (argv[0], ))
- celeryd_multi.MultiTool()(argv[2:])
+ celeryd_multi.MultiTool().execute_from_commandline(argv[2:])
|
Fixed typo in celeryd_multi command
|
py
|
diff --git a/visidata/freeze.py b/visidata/freeze.py
index <HASH>..<HASH> 100644
--- a/visidata/freeze.py
+++ b/visidata/freeze.py
@@ -10,7 +10,16 @@ globalCommand("zg'", "gz'")
def StaticColumn(rows, col):
c = deepcopy(col)
- frozenData = {id(r):col.getValue(r) for r in rows}
+ frozenData = {}
+ @async
+ def _calcRows(sheet):
+ for r in Progress(rows):
+ try:
+ frozenData[id(r)] = col.getValue(r)
+ except Exception as e:
+ frozenData[id(r)] = e
+
+ _calcRows(col.sheet)
c.calcValue=lambda row,d=frozenData: d[id(row)]
c.setter=lambda col,row,val,d=frozenData: setitem(d, id(row), val)
c.name = c.name + '_frozen'
|
[freeze-column] add Progress and catch Exceptions
|
py
|
diff --git a/opf.py b/opf.py
index <HASH>..<HASH> 100644
--- a/opf.py
+++ b/opf.py
@@ -2,11 +2,17 @@ import os
import os.path
import utils
import dublincore
+from xml.dom.minidom import getDOMImplementation
+
+class contentOPF(object):
+ '''A class to represent the OPF document.'''
+
+ def __init__(self):
+ pass
def generateOPF(article, dirname):
'''Creates the content.opf document from an Article instance issued as
input'''
- from xml.dom.minidom import getDOMImplementation
#Initiate a DOMImplementation for the OPF
impl = getDOMImplementation()
|
Beginnings of new contentOPF class
|
py
|
diff --git a/xport.py b/xport.py
index <HASH>..<HASH> 100644
--- a/xport.py
+++ b/xport.py
@@ -265,19 +265,28 @@ def _read_observations(fp, variables):
padding = b' '
sentinel = padding * blocksize
+ # at the end of the file, the last block should be all padding
+ # in Python 3, looping over a bytes object gives integers, not bytes
+ # therefore, instead of ``all(c = padding for c in block)``
+ # we must write ``len(block) == block.count(padding)``
+
count = 0
while True:
block = fp.read(blocksize)
if len(block) < blocksize:
- assert len(block) == 80 - (count * blocksize % 80)
+ if not len(block) == block.count(padding):
+ raise ValueError('Incomplete record, {!r}'.format(block))
+ remainder = count * blocksize % 80
+ if remainder and len(block) != 80 - remainder:
+ raise ValueError('Insufficient padding at end of file')
break
elif block == sentinel:
rest = fp.read()
- if len(rest) == rest.count(padding):
- assert blocksize + len(rest) == 80 - (count * blocksize % 80)
- break
- else:
+ if not len(rest) == rest.count(padding):
raise NotImplementedError('Cannot read multiple members.')
+ if blocksize + len(rest) != 80 - (count * blocksize % 80):
+ raise ValueError('Incorrect padding at end of file')
+ break
count += 1
chunks = [block[v.position : v.position + v.size] for v in variables]
|
Fix check that file end is correctly padded If the number of records times the blocksize is a multiple of <I>, the file can end without any padding. Also, the check that all bytes are padding in the last chunk read deserves a comment explaining the awkwardness to avoid the bytes/int confusion.
|
py
|
diff --git a/src/sos/jupyter/kernel.py b/src/sos/jupyter/kernel.py
index <HASH>..<HASH> 100644
--- a/src/sos/jupyter/kernel.py
+++ b/src/sos/jupyter/kernel.py
@@ -1672,7 +1672,7 @@ Available subkernels:\n{}'''.format(
from sos.utils import pexpect_run
try:
with self.redirect_sos_io():
- pexpect_run(cmd, shell=True, win_width=40 if self.cell_idx < 0 else 80)
+ pexpect_run(cmd, shell=True, win_width=40 if isinstance(self.cell_idx, int) and self.cell_idx < 0 else 80)
except Exception as e:
self.warn(e)
|
Fix window width for unit test for which self.cell_idx is None
|
py
|
diff --git a/eternalegypt/eternalegypt.py b/eternalegypt/eternalegypt.py
index <HASH>..<HASH> 100644
--- a/eternalegypt/eternalegypt.py
+++ b/eternalegypt/eternalegypt.py
@@ -228,7 +228,8 @@ class LB2120:
result.serial_number = data['general']['FSN']
result.usage = data['wwan']['dataUsage']['generic']['dataTransferred']
if 'failover' in data:
- result.upstream = data['failover']['backhaul']
+ if 'backhaul' in data['failover']:
+ result.upstream = data['failover']['backhaul']
result.wire_connected = data['failover']['wanConnected']
result.mobile_connected = (data['wwan']['connection'] == 'Connected')
result.connection_text = data['wwan']['connectionText']
|
Test for existence of backhaul field (#<I>) This is apparently missing with Netgear MR<I> (Nighthawk M1).
|
py
|
diff --git a/cassandra/cluster.py b/cassandra/cluster.py
index <HASH>..<HASH> 100644
--- a/cassandra/cluster.py
+++ b/cassandra/cluster.py
@@ -393,7 +393,9 @@ class Cluster(object):
contact_points = ['127.0.0.1']
"""
- The list of contact points to try connecting for cluster discovery.
+ The list of contact points to try connecting for cluster discovery. A
+ contact point can be a string (ip, hostname) or a
+ :class:`.connection.EndPoint` instance.
Defaults to loopback interface.
|
Minor docs improvement about a cluster contact point
|
py
|
diff --git a/pyemma/_base/parallel.py b/pyemma/_base/parallel.py
index <HASH>..<HASH> 100644
--- a/pyemma/_base/parallel.py
+++ b/pyemma/_base/parallel.py
@@ -68,22 +68,20 @@ class NJobsMixIn(object):
@n_jobs.setter
def n_jobs(self, val):
""" set number of jobs/threads to use via assignment of data.
+
Parameters
----------
val: int or None
a positive int for the number of jobs. Or None to usage all available resources.
- Notes
- -----
+ If set to None, this will use all available CPUs or respect the environment variable "OMP_NUM_THREADS"
+ to obtain a job number.
"""
- from pyemma.util.reflection import get_default_args
- def_args = get_default_args(self.__init__)
-
- # default value from constructor not valid?
- if val is None or def_args['n_jobs'] is None:
+ if val is None:
import psutil
import os
+ # TODO: aint it better to use a distinct variable for this use case eg. PYEMMA_NJOBS in order to avoid multiplying OMP threads with njobs?
omp_threads_from_env = os.getenv('OMP_NUM_THREADS', None)
n_cpus = psutil.cpu_count()
if omp_threads_from_env:
|
[njobs_mixin] fix preference of env over n_jobs=None.
|
py
|
diff --git a/pyt/cfg.py b/pyt/cfg.py
index <HASH>..<HASH> 100644
--- a/pyt/cfg.py
+++ b/pyt/cfg.py
@@ -58,8 +58,10 @@ class Node(object):
class CFG(ast.NodeVisitor):
- nodes = list()
+ def __init__(self):
+ self.nodes = list()
+
def create(self, ast):
'''
Creates a Control Flow Graph.
|
nodes was a class variable DOH!
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ setup_requires = [
]
install_requires = [
- 'inspire-schemas~=59.0,>=59.0.0',
+ 'inspire-schemas~=60.0,>=60.0.1',
'inspire-utils~=3.0,>=3.0.0',
'pypeg2~=2.0,>=2.15.2',
'python-dateutil~=2.0,>=2.6.1',
|
setup: bump inspire-schemas to <I>.*
|
py
|
diff --git a/djangocms_page_meta/admin.py b/djangocms_page_meta/admin.py
index <HASH>..<HASH> 100644
--- a/djangocms_page_meta/admin.py
+++ b/djangocms_page_meta/admin.py
@@ -3,7 +3,6 @@ from __future__ import absolute_import, print_function, unicode_literals
from cms.admin.pageadmin import PageAdmin
from cms.extensions import PageExtensionAdmin, TitleExtensionAdmin
-from cms.models import Page
from cms.utils import get_language_from_request
from django.conf import settings
from django.contrib import admin
@@ -104,5 +103,3 @@ def get_form(self, request, obj=None, **kwargs):
return form
PageAdmin.get_form = get_form
-
-admin.site.unregister(Page)
|
Missed an old unregister command
|
py
|
diff --git a/lightsteem/helpers/rc.py b/lightsteem/helpers/rc.py
index <HASH>..<HASH> 100644
--- a/lightsteem/helpers/rc.py
+++ b/lightsteem/helpers/rc.py
@@ -22,14 +22,18 @@ class ResourceCredit:
signed_tx_hex = self.client.get_transaction_hex(tx)
tx_size = len(bytes.fromhex(signed_tx_hex))
- chain_props = self.client(
- 'condenser_api').get_dynamic_global_properties()
+ self.client('condenser_api').get_dynamic_global_properties(
+ batch=True)
+ self.client('rc_api').get_resource_params(batch=True)
+ self.client('rc_api').get_resource_pool(batch=True)
+
+ chain_props, resource_params, resource_pool = self.client.\
+ process_batch()
+
+ resource_pool = resource_pool["resource_pool"]
+
total_vesting_shares = int(
Amount(chain_props["total_vesting_shares"]).amount)
- resource_params = self.client(
- 'rc_api').get_resource_params()
- resource_pool = self.client('rc_api').get_resource_pool()[
- "resource_pool"]
rc_regen = total_vesting_shares // (
STEEM_RC_REGEN_TIME // STEEM_BLOCK_INTERVAL)
model = RCModel(resource_params=resource_params,
|
Use batch calls to optimize rc cost calculation
|
py
|
diff --git a/server/system.py b/server/system.py
index <HASH>..<HASH> 100644
--- a/server/system.py
+++ b/server/system.py
@@ -49,6 +49,10 @@ def parseBlastResult(data, session, lineLenght = 60):
row["identity"] = "{0:.2f}".format(float(row["identity"]) / blastResult["BlastOutput2"][0]["report"]["results"]["search"]["query_len"])
row["coverage"] = "{0:.2f}".format( float(row["align_len"]-row["gaps"]) / blastResult["BlastOutput2"][0]["report"]["results"]["search"]["query_len"])
+ if blastResult["BlastOutput2"][0]["report"]["program"] == "blastx":
+ row["identity"] = "{0:.2f}".format( 3 * float(row["identity"]) )
+ row["coverage"] = "{0:.2f}".format( 3 * float(row["coverage"]) )
+
row["qseq"] = splitString(row["qseq"], lineLenght )
row["hseq"] = splitString(row["hseq"], lineLenght )
row["midline"] = [ s.replace(" ", " ") for s in splitString(row["midline"], lineLenght )]
|
Adjusting coverage/identity scores for blastx(multiplying by 3)
|
py
|
diff --git a/vcr/matchers.py b/vcr/matchers.py
index <HASH>..<HASH> 100644
--- a/vcr/matchers.py
+++ b/vcr/matchers.py
@@ -38,16 +38,16 @@ def headers(r1, r2):
return r1.headers == r2.headers
-def _log_matches(matches):
+def _log_matches(r1, r2, matches):
differences = [m for m in matches if not m[0]]
if differences:
log.debug(
- 'Requests differ according to the following matchers: ' +
- str(differences)
+ "Requests {0} and {1} differ according to "
+ "the following matchers: {2}".format(r1, r2, differences)
)
def requests_match(r1, r2, matchers):
matches = [(m(r1, r2), m) for m in matchers]
- _log_matches(matches)
+ _log_matches(r1, r2, matches)
return all([m[0] for m in matches])
|
better logging when matches aren't working.
|
py
|
diff --git a/vlcp/utils/dataobject.py b/vlcp/utils/dataobject.py
index <HASH>..<HASH> 100644
--- a/vlcp/utils/dataobject.py
+++ b/vlcp/utils/dataobject.py
@@ -339,9 +339,11 @@ def dump(obj, attributes = True, _refset = None):
clsname = getattr(cls, '__module__', '<unknown>') + '.' + getattr(cls, '__name__', '<unknown>')
baseresult = {'_type': clsname, '_key': obj.getkey()}
if not attributes:
+ _refset.remove(id(obj))
return baseresult
else:
baseresult.update((k,dump(v, attributes, _refset)) for k,v in vars(obj).items() if k[:1] != '_')
+ _refset.remove(id(obj))
return baseresult
elif isinstance(obj, ReferenceObject):
if obj._ref is not None:
|
bug fix: circle detect is not working correctly
|
py
|
diff --git a/bumpy.py b/bumpy.py
index <HASH>..<HASH> 100644
--- a/bumpy.py
+++ b/bumpy.py
@@ -8,6 +8,7 @@ CONFIG = {
'cli': False,
'abbrev': True,
+ 'suppress': (),
}
LOCALE = {
@@ -80,7 +81,7 @@ class _Task:
return _highlight('[' + self.name + ']', color)
def __print(self, id, *args):
- if ('all' not in self.suppress) and (id not in self.suppress):
+ if ('all' not in self.suppress) and (id not in self.suppress) and ('all' not in CONFIG['suppress']) and (id not in CONFIG['suppress']):
print LOCALE[id].format(*args)
def match(self, name):
|
Add a config option to suppress messages for all commands
|
py
|
diff --git a/doc/conf.py b/doc/conf.py
index <HASH>..<HASH> 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -30,7 +30,7 @@ sys.path.insert(0, os.path.abspath('..'))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.autodoc']
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
|
Enable Sphinx viewcode extension (links to source code)
|
py
|
diff --git a/ics/event.py b/ics/event.py
index <HASH>..<HASH> 100644
--- a/ics/event.py
+++ b/ics/event.py
@@ -147,7 +147,6 @@ class Event(Component):
| Will return a timedelta object.
| May be set to anything that timedelta() understands.
| May be set with a dict ({"days":2, "hours":6}).
- | If dict is used, both `days` and `hours` are needed.
| If set to a non null value, removes any already
existing end time.
"""
@@ -156,7 +155,7 @@ class Event(Component):
@duration.setter
def duration(self, value):
if type(value) is dict:
- value = timedelta(days=+value['days'], hours=+value['hours'])
+ value = timedelta(**value)
else:
value = timedelta(value)
|
[fix] Moar efficient code for duration setter
|
py
|
diff --git a/squad/core/migrations/0116_make_group_membership_unique.py b/squad/core/migrations/0116_make_group_membership_unique.py
index <HASH>..<HASH> 100644
--- a/squad/core/migrations/0116_make_group_membership_unique.py
+++ b/squad/core/migrations/0116_make_group_membership_unique.py
@@ -6,6 +6,24 @@ from django.conf import settings
from django.db import migrations
+def remove_duplicate_memberships(apps, schema_editor):
+ GroupMember = apps.get_model('core', 'GroupMember')
+ hierarchy = {'admin': 3, 'submitter': 2, 'member': 1}
+ keep = {}
+ for membership in GroupMember.objects.all().iterator():
+ key = (membership.group, membership.user)
+ if key in keep:
+ current = hierarchy[membership.access]
+ previous = hierarchy[keep[key].access]
+ if current < previous:
+ membership.delete()
+ else:
+ keep[key].delete()
+ keep[key] = membership
+ else:
+ keep[(membership.group, membership.user)] = membership
+
+
class Migration(migrations.Migration):
dependencies = [
@@ -14,6 +32,10 @@ class Migration(migrations.Migration):
]
operations = [
+ migrations.RunPython(
+ remove_duplicate_memberships,
+ reverse_code=migrations.RunPython.noop,
+ ),
migrations.AlterUniqueTogether(
name='groupmember',
unique_together=set([('group', 'user')]),
|
core: remove duplicate membersships before making them unique The migration will keep the membership with higher access.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -38,11 +38,11 @@ setup(
author='Sepand Haghighi',
author_email='sepand@qpage.ir',
url='https://github.com/sepandhaghighi/art',
- download_url='https://github.com/sepandhaghighi/art/tarball/v1.4',
keywords="ascii art python3 python text font",
project_urls={
'Webpage': 'http://art.shaghighi.ir',
'Source': 'https://github.com/sepandhaghighi/art',
+ 'Tracker': 'https://github.com/sepandhaghighi/art/issues',
},
install_requires=get_requires(),
python_requires='>=2.7',
|
fix : setup file url_download removed
|
py
|
diff --git a/ReText/window.py b/ReText/window.py
index <HASH>..<HASH> 100644
--- a/ReText/window.py
+++ b/ReText/window.py
@@ -87,7 +87,7 @@ class ReTextWindow(QMainWindow):
self.treeView.doubleClicked.connect(self.treeItemSelected)
self.tabWidget = QTabWidget(self.splitter)
self.initTabWidget()
- self.splitter.setSizes([self.width() / 5, self.width() * 4 / 5])
+ self.splitter.setSizes([self.width() // 5, self.width() * 4 // 5])
self.initDirectoryTree(globalSettings.showDirectoryTree, globalSettings.directoryPath)
self.setCentralWidget(self.splitter)
self.tabWidget.currentChanged.connect(self.changeIndex)
|
Fix DeprecationWarning about implicit conversion to integers
|
py
|
diff --git a/cobra/core/Reaction.py b/cobra/core/Reaction.py
index <HASH>..<HASH> 100644
--- a/cobra/core/Reaction.py
+++ b/cobra/core/Reaction.py
@@ -581,6 +581,9 @@ class Reaction(Object):
if metabolite.charge is not None:
reaction_element_dict["charge"] += \
coefficient * metabolite.charge
+ if metabolite.elements is None:
+ raise ValueError("No elements found in metabolite %s"
+ % metabolite.id)
for element, amount in iteritems(metabolite.elements):
reaction_element_dict[element] += coefficient * amount
# filter out 0 values
|
Fixes #<I>, raise a ValueError from check_mass_balance() when Metabolite.elements returns None because there is a problem parsing the formula
|
py
|
diff --git a/py/h2o.py b/py/h2o.py
index <HASH>..<HASH> 100644
--- a/py/h2o.py
+++ b/py/h2o.py
@@ -2342,7 +2342,7 @@ class RemoteHost(object):
except IOError, e:
if e.errno == errno.ENOENT:
sftp.put(f, dest, callback=progress)
- print "\n{0:.3f} seconds".format(time.time() - start)
+ ### print "\n{0:.3f} seconds".format(time.time() - start)
finally:
sftp.close()
self.uploaded[f] = dest
|
don't print ftp times any more during build_cloud
|
py
|
diff --git a/networking_cisco/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py b/networking_cisco/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py
index <HASH>..<HASH> 100644
--- a/networking_cisco/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py
+++ b/networking_cisco/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py
@@ -328,7 +328,7 @@ class CiscoNexusMechanismDriver(api.MechanismDriver):
# Save dynamic switch information
self._switch_state = {}
- self.driver = self._load_nexus_cfg_driver(self._nexus_switches)
+ self.driver = self._load_nexus_cfg_driver()
self._initialize_vpc_alloc_pools()
# This method is only called once regardless of number of
|
Nexus ml2 plugin causing neutron crash at _load_nexus_cfg_driver Last minute change did not make it in which eliminated extra argument getting passed in. Change-Id: I<I>c9c<I>b4dcf<I>a<I>e0c1c4add5fd2b9 Closes-bug: #<I>
|
py
|
diff --git a/indra/tests/test_preassembler.py b/indra/tests/test_preassembler.py
index <HASH>..<HASH> 100644
--- a/indra/tests/test_preassembler.py
+++ b/indra/tests/test_preassembler.py
@@ -687,9 +687,9 @@ def test_influence_duplicate():
pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2
- assert len(unique_stmts[0].evidence) == 2
- assert len(unique_stmts[1].evidence) == 1
- sources = [e.source_api for e in unique_stmts[0].evidence]
+ assert len(unique_stmts[1].evidence) == 2
+ assert len(unique_stmts[0].evidence) == 1
+ sources = [e.source_api for e in unique_stmts[1].evidence]
assert set(sources) == set(['eidos1', 'eidos3'])
|
New matches key changes order of combined stmts
|
py
|
diff --git a/habu/lib/http.py b/habu/lib/http.py
index <HASH>..<HASH> 100644
--- a/habu/lib/http.py
+++ b/habu/lib/http.py
@@ -2,7 +2,7 @@
import requests
import urllib3
-urllib3.disable_warnings()
+#urllib3.disable_warnings()
def get_headers(server):
|
commented disable warnings because gives an error
|
py
|
diff --git a/grip/readers.py b/grip/readers.py
index <HASH>..<HASH> 100644
--- a/grip/readers.py
+++ b/grip/readers.py
@@ -222,9 +222,7 @@ class DirectoryReader(ReadmeReader):
Gets whether the specified subpath is a supported binary file.
"""
mimetype = self.mimetype_for(subpath)
- if mimetype and mimetype.startswith('text/'):
- return False
- return True
+ return mimetype and not mimetype.startswith('text/')
def last_updated(self, subpath=None):
"""
|
Cleanup and assume text for missing MIME type
|
py
|
diff --git a/stone/data_type.py b/stone/data_type.py
index <HASH>..<HASH> 100644
--- a/stone/data_type.py
+++ b/stone/data_type.py
@@ -785,6 +785,9 @@ class UserDefined(Composite):
d[key] = inner_d['.tag']
else:
make_compact(inner_d)
+ if isinstance(d[key], list):
+ for item in d[key]:
+ make_compact(item)
for example in examples.values():
if (isinstance(example.value, dict) and
|
include lists in make_compact fixes issue where a list of parameters doesn’t exclude unnecessary lone .tag keys when generating examples
|
py
|
diff --git a/gwpy/io/cache.py b/gwpy/io/cache.py
index <HASH>..<HASH> 100644
--- a/gwpy/io/cache.py
+++ b/gwpy/io/cache.py
@@ -70,7 +70,7 @@ except NameError: # python3.x
# -- cache I/O ----------------------------------------------------------------
def read_cache(cachefile, coltype=LIGOTimeGPS):
- """Read a LAL- for FFL-format cache file as a list of file paths
+ """Read a LAL- or FFL-format cache file as a list of file paths
Parameters
----------
|
gwpy.io: fixed typo in docstring [skip ci] [skip appveyor]
|
py
|
diff --git a/salt/minion.py b/salt/minion.py
index <HASH>..<HASH> 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -876,9 +876,6 @@ class Minion(MinionBase):
errors = functions['_errors']
functions.pop('_errors')
- functions.clear()
- returners.clear()
-
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
|
We aren't doing singleton module loaders, lets not clear what we just made
|
py
|
diff --git a/allegedb/allegedb/cache.py b/allegedb/allegedb/cache.py
index <HASH>..<HASH> 100644
--- a/allegedb/allegedb/cache.py
+++ b/allegedb/allegedb/cache.py
@@ -174,7 +174,7 @@ class Cache(object):
Deeper layers of this cache are keyed by branch, turn, and tick.
"""
- self.keycache = PickyDefaultDict(TurnDict)
+ self.keycache = PickyDefaultDict(SettingsTurnDict)
"""Keys an entity has at a given turn and tick."""
self.branches = StructuredDefaultDict(1, TurnDict)
"""A less structured alternative to ``keys``.
|
Switch to SettingsturnDict for the keycache It is no longer necessarily filled forward, and therefore should not prevent skipping about in time.
|
py
|
diff --git a/generator/python/python.babelg.py b/generator/python/python.babelg.py
index <HASH>..<HASH> 100644
--- a/generator/python/python.babelg.py
+++ b/generator/python/python.babelg.py
@@ -324,7 +324,7 @@ class PythonGenerator(CodeGeneratorMonolingual):
class_name = self._class_name_for_data_type(data_type)
self.generate_multiline_list(
[self.lang.format_method(f.name, True)
- for f in data_type.supertype.fields],
+ for f in data_type.supertype.all_fields],
before='super({}, self).__init__'.format(class_name))
# initialize each field
|
Bug fix: Generated Python classes for structs weren't calling super constructor with all fields. Summary: Issue is exposed with three classes that linearly inherit: A -> B -> C C will call B's constructor with only fields pertinent to B, when it should be calling B's constructor with fields pertinent to A and B. Reviewed By: guido
|
py
|
diff --git a/giotto/contrib/static/programs.py b/giotto/contrib/static/programs.py
index <HASH>..<HASH> 100644
--- a/giotto/contrib/static/programs.py
+++ b/giotto/contrib/static/programs.py
@@ -1,6 +1,7 @@
import os
import mimetypes
+from giotto import get_config
from giotto.programs import GiottoProgram
from giotto.views import GiottoView, renders
from giotto.utils import super_accept_to_mimetype
@@ -24,7 +25,7 @@ def StaticServe(base_path):
Meta program for serving any file based on the path
"""
def get_file(path):
- fullpath = base_path + path
+ fullpath = get_config('project_path') + os.path.join(base_path, path)
try:
mime, encoding = mimetypes.guess_type(fullpath)
return open(fullpath, 'rb'), mime or 'application/octet-stream'
@@ -44,7 +45,8 @@ def SingleStaticServe(file_path):
"""
def get_file():
mime, encoding = mimetypes.guess_type(file_path)
- return open(file_path, 'rb'), mime or 'application/octet-stream'
+ fullpath = os.path.join(get_config('project_path'), file_path)
+ return open(fullpath, 'rb'), mime or 'application/octet-stream'
class SingleStaticServe(GiottoProgram):
controllers = ['http-get']
|
changed static serve to implicitly use project_path
|
py
|
diff --git a/panels/_version.py b/panels/_version.py
index <HASH>..<HASH> 100644
--- a/panels/_version.py
+++ b/panels/_version.py
@@ -1,2 +1,2 @@
# Versions compliant with PEP 440 https://www.python.org/dev/peps/pep-0440
-__version__ = "0.0.29"
+__version__ = "0.0.30"
|
Update version number to <I>
|
py
|
diff --git a/app_helper/__init__.py b/app_helper/__init__.py
index <HASH>..<HASH> 100644
--- a/app_helper/__init__.py
+++ b/app_helper/__init__.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
-__version__ = "2.1.1"
+__version__ = "2.1.2.dev1"
__author__ = "Iacopo Spalletti <i.spalletti@nephila.it>"
__all__ = ["runner"]
|
Bump develop version [ci skip]
|
py
|
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -56,6 +56,7 @@ extensions = [
'sphinx.ext.autodoc',
]
+autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
|
Order in autodoc set to 'bysource' This order is more natural than 'alphabetical'
|
py
|
diff --git a/sharepoint/lists/__init__.py b/sharepoint/lists/__init__.py
index <HASH>..<HASH> 100644
--- a/sharepoint/lists/__init__.py
+++ b/sharepoint/lists/__init__.py
@@ -95,7 +95,7 @@ class SharePointList(object):
@property
def row_class(self):
if not hasattr(self, '_row_class'):
- attrs = {'fields': self.fields, 'list': self}
+ attrs = {'fields': self.fields, 'list': self, 'opener': self.opener}
for field in self.fields:
attrs[field.name] = field.descriptor
self._row_class = type('SharePointListRow', (SharePointListRow,), attrs)
|
Added missing opener attribute to row class
|
py
|
diff --git a/jf_agent/gh_download.py b/jf_agent/gh_download.py
index <HASH>..<HASH> 100644
--- a/jf_agent/gh_download.py
+++ b/jf_agent/gh_download.py
@@ -37,9 +37,6 @@ def get_all_users(client, include_orgs):
users = [_normalize_user(user) for org in include_orgs for user in client.get_all_users(org)]
print('✓')
- if not users:
- raise ValueError('No users found. Make sure your token has appropriate access to GitHub.')
-
return users
|
don't throw if no members of an org; we'll create people on demand (#<I>)
|
py
|
diff --git a/Tank/stepper/load_plan.py b/Tank/stepper/load_plan.py
index <HASH>..<HASH> 100644
--- a/Tank/stepper/load_plan.py
+++ b/Tank/stepper/load_plan.py
@@ -39,7 +39,7 @@ class Const(object):
return self.duration / 1000 * self.rps
def get_rps_list(self):
- return [(self.rps, self.duration / 1000)]
+ return [(int(self.rps), self.duration / 1000)]
class Line(object):
|
publish rounded rps in const lp
|
py
|
diff --git a/ariba/clusters.py b/ariba/clusters.py
index <HASH>..<HASH> 100644
--- a/ariba/clusters.py
+++ b/ariba/clusters.py
@@ -1,3 +1,4 @@
+import signal
import os
import copy
import tempfile
@@ -124,6 +125,21 @@ class Clusters:
if self.verbose:
print('Temporary directory:', self.tmp_dir)
+ wanted_signals = [signal.SIGABRT, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM]
+ for s in wanted_signals:
+ signal.signal(s, self._receive_signal)
+
+
+ def _receive_signal(self, signum, stack):
+ print('Signal received:', signum, file=sys.stderr)
+ if os.path.exists(self.tmp_dir):
+ print('... deleting tmp directory', self.tmp_dir, '(unless it already has been)', file=sys.stderr)
+ try:
+ shutil.rmtree(self.tmp_dir)
+ except:
+ pass
+ sys.exit(1)
+
@classmethod
def _load_reference_data_info_file(cls, filename):
|
Delete temp directory if ctrl-c used
|
py
|
diff --git a/unyt/equivalencies.py b/unyt/equivalencies.py
index <HASH>..<HASH> 100644
--- a/unyt/equivalencies.py
+++ b/unyt/equivalencies.py
@@ -38,9 +38,6 @@ class _RegisteredEquivalence(type):
type.__init__(cls, name, b, d)
if hasattr(cls, "type_name"):
equivalence_registry[cls.type_name] = cls
- if hasattr(cls, "alternate_names"):
- for name in cls.alternate_names:
- equivalence_registry[name] = cls
@add_metaclass(_RegisteredEquivalence)
|
delete alternate_names handling from equivalency metaclass, this was removed
|
py
|
diff --git a/diff_cover/git_path.py b/diff_cover/git_path.py
index <HASH>..<HASH> 100644
--- a/diff_cover/git_path.py
+++ b/diff_cover/git_path.py
@@ -28,9 +28,12 @@ class GitPathTool(object):
# and src_path is `diff_cover/violations_reporter.py`
# search for `violations_reporter.py`
root_rel_path = os.path.relpath(self._cwd, self._root)
- if six.PY2:
- git_diff_path = git_diff_path.encode('utf-8')
- return os.path.relpath(git_diff_path, root_rel_path)
+ if isinstance(root_rel_path, six.binary_type):
+ root_rel_path = root_rel_path.decode()
+ rel_path = os.path.relpath(git_diff_path, root_rel_path)
+ if isinstance(rel_path, six.binary_type):
+ rel_path = rel_path.decode()
+ return rel_path
def absolute_path(self, src_path):
"""
|
While the previous way worked. This is more correct. relpath needs to have matching types and this function assumes it gets a string/unicode. However, relpath will sometimes return a binary type. This happens because sometimes it returns a hardcoded value. See posixpath.py in python<I>. These hardcoded values are not unicode in python2.
|
py
|
diff --git a/registration/backends/default/views.py b/registration/backends/default/views.py
index <HASH>..<HASH> 100644
--- a/registration/backends/default/views.py
+++ b/registration/backends/default/views.py
@@ -6,5 +6,5 @@ warnings.warn(
DeprecationWarning
)
-from registration.backends.model_activation import ActivationView
-from registration.backends.model_activation import RegistrationView
+from registration.backends.model_activation.views import ActivationView
+from registration.backends.model_activation.views import RegistrationView
|
Fix import path to new views location in deprecated 'default' backend views file.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -81,7 +81,7 @@ setup(
maintainer_email='hello@pixelapse.com',
packages=find_packages(),
package_data={'': package_libs},
- python_requires='>=3'
+ python_requires='>=3',
install_requires=['pxprocess', 'pyunicode', 'PyPDF2'],
include_package_data=True,
zip_safe=False,
|
Missing comma on line <I>
|
py
|
diff --git a/LiSE/character.py b/LiSE/character.py
index <HASH>..<HASH> 100644
--- a/LiSE/character.py
+++ b/LiSE/character.py
@@ -333,11 +333,14 @@ class CharacterPlaceMapping(MutableMapping, RuleFollower):
except KeyError:
return False
for (branch, tick) in self.engine._active_branches():
+ if branch not in cache:
+ continue
try:
- return cache[branch][
+ if cache[branch][
window_left(cache[branch].keys(), tick)
- ]
- except (KeyError, ValueError):
+ ]:
+ return not self.engine._is_thing(self.character.name, place)
+ except ValueError:
continue
return False
(branch, tick) = self.engine.time
@@ -408,12 +411,10 @@ class CharacterThingPlaceMapping(GraphNodeMapping, RuleFollower):
def __getitem__(self, k):
"""Return a :class:`Thing` or :class:`Place` as appropriate"""
- if k in self.character.thing:
- return self.character.thing[k]
- elif k in self.character.place:
+ try:
return self.character.place[k]
- else:
- raise KeyError("No such Thing or Place in this Character")
+ except KeyError:
+ return self.character.thing[k]
def __setitem__(self, k, v):
"""Assume you're trying to create a :class:`Place`"""
|
Place mapping throws KeyError when a node exists but is a Thing
|
py
|
diff --git a/bsdploy/fabrics.py b/bsdploy/fabrics.py
index <HASH>..<HASH> 100644
--- a/bsdploy/fabrics.py
+++ b/bsdploy/fabrics.py
@@ -270,10 +270,10 @@ def bootstrap_mfsbsd(**kwargs):
if not yesno("\nContinuing will destroy the existing data on the following devices:\n %s\n\nContinue?" % ' '.join(devices)):
return
- template_context = env.server.config.copy()
- template_context.update(devices=sysctl_devices,
+ template_context = dict(devices=sysctl_devices,
interfaces=real_interfaces,
hostname=env.server.id)
+ template_context.update(env.server.config)
if bootstrap_files['rc.conf'].get('use_jinja'):
from jinja2 import Template
|
allow to override the defaults with values from ploy.conf
|
py
|
diff --git a/packages/sdk/snet/sdk/version.py b/packages/sdk/snet/sdk/version.py
index <HASH>..<HASH> 100644
--- a/packages/sdk/snet/sdk/version.py
+++ b/packages/sdk/snet/sdk/version.py
@@ -1 +1 @@
-__version__ = "0.2.2"
+__version__ = "0.2.3"
|
Updating the SDK version -> <I>
|
py
|
diff --git a/netmiko/cisco/cisco_xr.py b/netmiko/cisco/cisco_xr.py
index <HASH>..<HASH> 100644
--- a/netmiko/cisco/cisco_xr.py
+++ b/netmiko/cisco/cisco_xr.py
@@ -136,6 +136,9 @@ class CiscoXrBase(CiscoBaseConnection):
output += self.read_until_pattern(
pattern=re.escape(exit_config.strip())
)
+ # Read until we detect either an Uncommitted change or the end prompt
+ if not re.search(r"(Uncommitted|#$)", output):
+ output += self.read_until_pattern(pattern=r"(Uncommitted|#$)")
if "Uncommitted changes found" in output:
self.write_channel(self.normalize_cmd("no\n"))
output += self.read_until_pattern(pattern=r"[>#]")
|
Fix XR exit_config_mode issue (#<I>)
|
py
|
diff --git a/mtglib/gatherer_request.py b/mtglib/gatherer_request.py
index <HASH>..<HASH> 100644
--- a/mtglib/gatherer_request.py
+++ b/mtglib/gatherer_request.py
@@ -274,4 +274,4 @@ class SearchRequest(object):
def url(self):
return (base_url +
'&'.join([fl.url_fragment() for fl in self.get_filters()]) +
- self.special_fragment)
\ No newline at end of file
+ self.special_fragment) + '&action=advanced'
\ No newline at end of file
|
add '&action=advanced' needed for listing all cards from a set
|
py
|
diff --git a/tornado/web.py b/tornado/web.py
index <HASH>..<HASH> 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -446,12 +446,14 @@ class RequestHandler(object):
if head_part: html_heads.append(_utf8(head_part))
body_part = module.html_body()
if body_part: html_bodies.append(_utf8(body_part))
+ def is_absolute(path):
+ return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
- if not path.startswith("/") and not path.startswith("http:"):
+ if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
@@ -470,7 +472,7 @@ class RequestHandler(object):
paths = []
unique_paths = set()
for path in css_files:
- if not path.startswith("/") and not path.startswith("http:"):
+ if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
|
Support https for UIModule javascript_files and css_files. Closes #<I>.
|
py
|
diff --git a/py/h2o.py b/py/h2o.py
index <HASH>..<HASH> 100644
--- a/py/h2o.py
+++ b/py/h2o.py
@@ -520,7 +520,8 @@ def check_sandbox_for_errors(sandbox_ignore_errors=False):
# don't detect these class loader info messags as errors
#[Loaded java.lang.Error from /usr/lib/jvm/java-7-oracle/jre/lib/rt.jar]
foundBad = regex1.search(line) and not (
- ('error rate' in line) or ('[Loaded ' in line) or ('[WARN]' in line))
+ ('error rate' in line) or ('[Loaded ' in line) or
+ ('[WARN]' in line) or ('CalcSquareErrorsTasks' in line))
if (printing==0 and foundBad):
printing = 1
|
Log line containg reference to the class CalcSquareErrorsTasks causes false test fail.
|
py
|
diff --git a/will/backends/pubsub/zeromq_pubsub.py b/will/backends/pubsub/zeromq_pubsub.py
index <HASH>..<HASH> 100644
--- a/will/backends/pubsub/zeromq_pubsub.py
+++ b/will/backends/pubsub/zeromq_pubsub.py
@@ -34,7 +34,7 @@ Examples:
def __init__(self, settings, *args, **kwargs):
self.verify_settings(quiet=True)
- logging.warning(
+ logging.error(
"The ZeroMQ Backend isn't ready for prime-time yet. Please "
"test closely, and report any problems at Will's github page!"
)
|
Explicitly notes that ZeroMQ won't be in <I>.
|
py
|
diff --git a/pymc/sampling.py b/pymc/sampling.py
index <HASH>..<HASH> 100644
--- a/pymc/sampling.py
+++ b/pymc/sampling.py
@@ -2125,18 +2125,18 @@ def draw(
Parameters
----------
- vars
+ vars : Variable or iterable of Variable
A variable or a list of variables for which to draw samples.
- draws : int
- Number of samples needed to draw. Detaults to 500.
- mode
- The mode used by ``aesara.function`` to compile the graph.
- **kwargs
- Keyword arguments for :func:`pymc.aesara.compile_pymc`
+ draws : int, default 1
+ Number of samples needed to draw.
+ mode : str or aesara.compile.mode.Mode, optional
+ The mode used by :func:`aesara.function` to compile the graph.
+ **kwargs : dict, optional
+ Keyword arguments for :func:`pymc.aesara.compile_pymc`.
Returns
-------
- List[np.ndarray]
+ list of ndarray
A list of numpy arrays.
Examples
|
Doc change the default value of pymc.draw from <I> to 1. (#<I>) * changed the default value of drwas from <I> to 1 * fixed samplers_draw docsting * fixed samplers_draw docsting
|
py
|
diff --git a/py/selenium/webdriver/firefox/webdriver.py b/py/selenium/webdriver/firefox/webdriver.py
index <HASH>..<HASH> 100644
--- a/py/selenium/webdriver/firefox/webdriver.py
+++ b/py/selenium/webdriver/firefox/webdriver.py
@@ -92,6 +92,7 @@ class WebDriver(RemoteWebDriver):
if "marionette" in self.capabilities and self.capabilities['marionette'] is True:
self.service.stop()
else:
+ self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
|
Kill binary so profile clean up is no longer blocked by file locks
|
py
|
diff --git a/tests/test_gnupg.py b/tests/test_gnupg.py
index <HASH>..<HASH> 100644
--- a/tests/test_gnupg.py
+++ b/tests/test_gnupg.py
@@ -662,10 +662,10 @@ class GPGTestCase(unittest.TestCase):
"""Test encryption of a message string"""
key = self.generate_key("Craig Gentry", "xorr.ox",
passphrase="craiggentry")
- gentry = key.fingerprint
+ gentry = str(key.fingerprint)
key = self.generate_key("Marten van Dijk", "xorr.ox",
passphrase="martenvandijk")
- dijk = key.fingerprint
+ dijk = str(key.fingerprint)
gpg = self.gpg
message = """
In 2010 Riggio and Sicari presented a practical application of homomorphic
@@ -683,9 +683,9 @@ authentication."""
"""Test encryption with latin-1 encoding"""
key = self.generate_key("Craig Gentry", "xorr.ox",
passphrase="craiggentry")
- gentry = key.fingerprint
+ gentry = str(key.fingerprint)
key = self.generate_key("Marten van Dijk", "xorr.ox")
- dijk = key.fingerprint
+ dijk = str(key.fingerprint)
self.gpg.encoding = 'latin-1'
if _util._py3k:
data = 'Hello, André!'
|
Awkwardly, fingerprints need to always be strings. Change unittests to do so.
|
py
|
diff --git a/commitizen/commands/version.py b/commitizen/commands/version.py
index <HASH>..<HASH> 100644
--- a/commitizen/commands/version.py
+++ b/commitizen/commands/version.py
@@ -16,14 +16,14 @@ class Version:
if version:
out.write(f"{version}")
else:
- out.error(f"No project information in this project.")
+ out.error("No project information in this project.")
elif self.parameter.get("verbose"):
out.write(f"Installed Commitizen Version: {__version__}")
version = self.config.settings["version"]
if version:
out.write(f"Project Version: {version}")
else:
- out.error(f"No project information in this project.")
+ out.error("No project information in this project.")
else:
# if no argument is given, show installed commitizen version
out.write(f"{__version__}")
|
style: align with flake8 newest version
|
py
|
diff --git a/tests/gui/test_semantic_data.py b/tests/gui/test_semantic_data.py
index <HASH>..<HASH> 100644
--- a/tests/gui/test_semantic_data.py
+++ b/tests/gui/test_semantic_data.py
@@ -37,7 +37,7 @@ def change_semantic_data_values():
state_machine_model = gui_singleton.state_machine_manager_model.state_machines[state_machine.state_machine_id]
states_editor_controller = gui_singleton.main_window_controller.get_controller("states_editor_ctrl")
- page_info, state_identifier = states_editor_controller.find_page_of_state_m(state_machine_model.root_state)
+ page_info, state_identifier = states_editor_controller.get_page_of_state_m(state_machine_model.root_state)
# print page_info, state_identifier
state_editor_controller = states_editor_controller.tabs[state_identifier]["controller"]
|
fix(semantic_data): Adapt to renamed method
|
py
|
diff --git a/mythril/ether/soliditycontract.py b/mythril/ether/soliditycontract.py
index <HASH>..<HASH> 100644
--- a/mythril/ether/soliditycontract.py
+++ b/mythril/ether/soliditycontract.py
@@ -43,6 +43,7 @@ class SolidityContract(ETHContract):
self.name = name
self.code = contract['bin-runtime']
self.creation_code = contract['bin']
+ srcmap = contract['srcmap-runtime'].split(";")
has_contract = True
break
|
Bugfix: Assign srcmap when contract name is specified explicitly
|
py
|
diff --git a/example.py b/example.py
index <HASH>..<HASH> 100644
--- a/example.py
+++ b/example.py
@@ -1,13 +1,10 @@
-import json
import os
-import asyncio
-from urllib.request import urlopen
import aiohttp
from sirbot import SirBot
-token = os.environ.get('SIRBOT_TOKEN')
+token = os.environ['SIRBOT_TOKEN']
bot = SirBot(token)
@@ -15,14 +12,11 @@ bot = SirBot(token)
# Example quote of the day plugin
async def get_quote_of_the_day():
url = 'http://api.theysaidso.com/qod.json'
- loop = asyncio.get_event_loop()
async with aiohttp.ClientSession() as session:
- response = await session.get(url)
-
- if response.status != 200:
- raise Exception('There was a api error')
-
- quote_r = json.loads(response.read().decode('utf-8'))
+ async with session.get(url) as response:
+ if response.status != 200:
+ raise Exception('Error talking to quote api')
+ quote_r = await response.json()
quote = quote_r['contents']['quotes'][0]['quote']
author = quote_r['contents']['quotes'][0]['author']
|
Clean up some updates to aiohttp from the previous urllib, also removes unused imports
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -12,5 +12,6 @@ if __name__ == '__main__':
author='Rogerio Hilbert Lima',
author_email='rogerhil@gmail.com',
url='https://github.com/rogerhil/thegamesdb',
+ download_url='https://github.com/rogerhil/thegamesdb/tarball/0.2',
packages=find_packages()
)
|
Preparing to register at PyPI
|
py
|
diff --git a/mockito/mockito.py b/mockito/mockito.py
index <HASH>..<HASH> 100644
--- a/mockito/mockito.py
+++ b/mockito/mockito.py
@@ -332,7 +332,14 @@ def unstub(*objs):
def forget_invocations(*objs):
- """Forget all invocations of given objs."""
+ """Forget all invocations of given objs.
+
+ If you already *call* mocks during your setup routine, you can now call
+ ``forget_invocations`` at the end of your setup, and have a clean
+ 'recording' for your actual test code. T.i. you don't have
+ to count the invocations from your setup code anymore when using
+ :func:`verify` afterwards.
+ """
for obj in objs:
theMock = _get_mock_or_raise(obj)
theMock.clear_invocations()
|
Add basic documentation to `forget_invocations`
|
py
|
diff --git a/gsh/control_commands.py b/gsh/control_commands.py
index <HASH>..<HASH> 100644
--- a/gsh/control_commands.py
+++ b/gsh/control_commands.py
@@ -66,7 +66,7 @@ def do_help(command):
max_name_len = max(map(len, names))
for i in xrange(len(names)):
name = names[i]
- txt = (max_name_len - len(name)) * ' ' + ':' + name + ' - '
+ txt = ':' + name + (max_name_len - len(name) + 2) * ' '
doc = get_control_command(name).__doc__
txt += doc.split('\n')[2].strip() + '\n'
console_output(txt)
|
Left justify control command names for better readability
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
-VERSION = '2.0.3'
+VERSION = '2.0.4'
long_description = 'This package contains the tools you need to quickly ' \
'integrate your Python back-end with Yoti, so that your ' \
'users can share their identity details with your ' \
|
[SDK-<I>]: Updated version
|
py
|
diff --git a/pypsa/descriptors.py b/pypsa/descriptors.py
index <HASH>..<HASH> 100644
--- a/pypsa/descriptors.py
+++ b/pypsa/descriptors.py
@@ -284,6 +284,17 @@ def allocate_series_dataframes(network, series):
pnl[attr] = pnl[attr].reindex(columns=df.index,
fill_value=network.components[component]["attrs"].at[attr,"default"])
+def free_output_series_dataframes(network, components=None):
+ if components is None:
+ components = network.all_components
+
+ for component in components:
+ attrs = network.components[component]['attrs']
+ pnl = network.pnl(component)
+
+ for attr in attrs.index[attrs['varying'] & (attrs['status'] == 'Output')]:
+ pnl[attr] = pd.DataFrame(index=network.snapshots, columns=[])
+
def zsum(s, *args, **kwargs):
"""
pandas 0.21.0 changes sum() behavior so that the result of applying sum
|
descriptors: Add function to clear pypsa output series data Meant to be run before solving lopf to temporarily free up data for the solver.
|
py
|
diff --git a/bugzoo/version.py b/bugzoo/version.py
index <HASH>..<HASH> 100644
--- a/bugzoo/version.py
+++ b/bugzoo/version.py
@@ -1 +1 @@
-__version__ = '2.0.8'
+__version__ = '2.0.9'
|
minor version bump to <I>
|
py
|
diff --git a/rflint/__main__.py b/rflint/__main__.py
index <HASH>..<HASH> 100644
--- a/rflint/__main__.py
+++ b/rflint/__main__.py
@@ -8,7 +8,7 @@ def main(args=None):
return result
except Exception, e:
- print str(e)
+ sys.stderr.write(str(e) + "\n")
return 1
if __name__ == "__main__":
|
write caught exceptions to stderr instead of stdout
|
py
|
diff --git a/src/scout_apm/core/config/config.py b/src/scout_apm/core/config/config.py
index <HASH>..<HASH> 100644
--- a/src/scout_apm/core/config/config.py
+++ b/src/scout_apm/core/config/config.py
@@ -23,7 +23,13 @@ class ScoutConfig():
ScoutConfigNull()]
def value(self, key):
- return self.locate_layer_for_key(key).value(key)
+ value = self.locate_layer_for_key(key).value(key)
+ if key in CONVERSIONS:
+ converted_value = CONVERSIONS[key].convert(value)
+ else:
+ converted_value = value
+
+ return converted_value
def locate_layer_for_key(self, key):
for layer in self.layers:
@@ -185,3 +191,14 @@ class ScoutConfigNull():
def value(self, key):
return None
+
+
+class BooleanConversion():
+ @classmethod
+ def convert(cls, value):
+ return value.lower() in ('yes', 'true', 't', '1')
+
+
+CONVERSIONS = {
+ 'monitor': BooleanConversion,
+}
|
Add a BooleanConversion to configs, applied to 'monitor'
|
py
|
diff --git a/dsub/providers/local.py b/dsub/providers/local.py
index <HASH>..<HASH> 100644
--- a/dsub/providers/local.py
+++ b/dsub/providers/local.py
@@ -464,7 +464,7 @@ class LocalJobProvider(base.JobProvider):
with open(os.path.join(task_dir, 'end-time.txt'), 'wt') as f:
f.write(today)
msg = 'Operation canceled at %s\n' % today
- with open(os.path.join(task_dir, 'runner-log.txt'), 'a') as f:
+ with open(os.path.join(task_dir, 'runner-log.txt'), 'at') as f:
f.write(msg)
return (canceled, cancel_errors)
@@ -634,7 +634,7 @@ class LocalJobProvider(base.JobProvider):
def _get_log_detail_from_task_dir(self, task_dir):
try:
with open(os.path.join(task_dir, 'runner-log.txt'), 'r') as f:
- return [line.decode('utf-8') for line in f.read().splitlines()]
+ return [line for line in f.read().splitlines()]
except (IOError, OSError):
return None
|
local: ensure runner-log.txt is written as text. PiperOrigin-RevId: <I>
|
py
|
diff --git a/bibliopixel/animation/tests.py b/bibliopixel/animation/tests.py
index <HASH>..<HASH> 100644
--- a/bibliopixel/animation/tests.py
+++ b/bibliopixel/animation/tests.py
@@ -62,9 +62,12 @@ class MatrixCalibrationTest(BaseMatrixAnim):
class PixelTester(Animation):
""""""
- BRIGHTNESS = 0.5
PAUSE = 10
+ def __init__(self, *args, brightness=1.0, **kwds):
+ self.brightness = brightness
+ super().__init__(*args, **kwds)
+
def pre_run(self):
self.stepper = self.steps()
@@ -77,7 +80,7 @@ class PixelTester(Animation):
def steps(self):
for color in (colors.Red, colors.Green, colors.Blue, colors.Yellow,
colors.Fuchsia, colors.Aqua, colors.White):
- color = tuple(c * self.BRIGHTNESS for c in color)
+ color = tuple(c * self.brightness for c in color)
for i in range(len(self.color_list)):
self.color_list[i] = color
yield
|
Make `brightness` a field of PixelTester
|
py
|
diff --git a/motor/__init__.py b/motor/__init__.py
index <HASH>..<HASH> 100644
--- a/motor/__init__.py
+++ b/motor/__init__.py
@@ -1419,9 +1419,10 @@ class MotorCursor(MotorBase):
self.close()
return False
- if not self.alive:
+ if not self.alive and not self.closed:
# Cursor died because collection was dropped, or we started with
- # the collection empty. Start over soon.
+ # the collection empty, and a callback hasn't closed us since we
+ # started this method. Start over soon.
self.rewind()
self.get_io_loop().add_timeout(
time.time() + 0.5,
|
MotorCursor.tail must check if its callback called MotorCursor.close() on it
|
py
|
diff --git a/testbot.py b/testbot.py
index <HASH>..<HASH> 100755
--- a/testbot.py
+++ b/testbot.py
@@ -84,7 +84,7 @@ class TestBot(SingleServerIRCBot):
voiced.sort()
c.notice(nick, "Voiced: " + string.join(voiced, ", "))
elif cmd == "dcc":
- dcc = self.dcc_listen(nm_to_h(e.source()))
+ dcc = self.dcc_listen()
c.ctcp("DCC", nick, "CHAT chat %s %d" % (
ip_quad_to_numstr(dcc.localaddress),
dcc.localport))
|
Removed address argument in dcc_listen call.
|
py
|
diff --git a/python_utils/data_utils.py b/python_utils/data_utils.py
index <HASH>..<HASH> 100644
--- a/python_utils/data_utils.py
+++ b/python_utils/data_utils.py
@@ -176,13 +176,7 @@ def convert_columns_to_multinomial(T, M_c, multinomial_indices):
def all_continuous_from_file(filename, max_rows=None, gen_seed=0, has_header=True):
header, T = read_csv(filename, has_header=has_header)
T = numpy.array(T, dtype=float).tolist()
- num_rows = len(T)
- if (max_rows is not None) and (num_rows > max_rows):
- # randomly sample max_rows rows
- random_state = numpy.random.RandomState(gen_seed)
- which_rows = random_state.permutation(xrange(num_rows))
- which_rows = which_rows[:max_rows]
- T = [T[which_row] for which_row in which_rows]
+ T = at_most_N_rows(T, N=max_rows, gen_seed=gen_seed)
M_r = gen_M_r_from_T(T)
M_c = gen_M_c_from_T(T)
return T, M_r, M_c, header
|
use at_most_N_rows instead of re-implementing
|
py
|
diff --git a/shap/common.py b/shap/common.py
index <HASH>..<HASH> 100644
--- a/shap/common.py
+++ b/shap/common.py
@@ -267,7 +267,7 @@ def convert_name(ind, shap_values, feature_names):
elif ind == "sum()":
return "sum()"
else:
- print("Could not find feature named: " + ind)
+ raise ValueError("Could not find feature named: " + ind)
return None
else:
return nzinds[0]
|
raise an exception when name to index conversion fails
|
py
|
diff --git a/honcho/command.py b/honcho/command.py
index <HASH>..<HASH> 100644
--- a/honcho/command.py
+++ b/honcho/command.py
@@ -129,8 +129,7 @@ class Honcho(compat.with_metaclass(Commander, object)):
try:
options.func(self, options)
except CommandError as e:
- if e.message:
- log.error(e.message)
+ log.error(str(e))
sys.exit(1)
@arg('task', help='Task to show help for', nargs='?')
|
Python 3: exceptions don't have message attributes
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.