diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/django_x509/tests/test_cert.py b/django_x509/tests/test_cert.py
index <HASH>..<HASH> 100644
--- a/django_x509/tests/test_cert.py
+++ b/django_x509/tests/test_cert.py
@@ -409,3 +409,21 @@ k9Y1S1C9VB0YsDZTeZUggJNSDN4YrKjIevYZQQIhAOWec6vngM/PlI1adrFndd3d
self._create_cert(serial_number='notIntegers')
except ValidationError as e:
self.assertEqual("Serial number must be an integer", str(e.message_dict['serial_number'][0]))
+
+ def test_serial_number_clash(self):
+ ca = Ca(name='TestSerialClash')
+ ca.certificate = self.import_ca_certificate
+ ca.private_key = self.import_ca_private_key
+ ca.save()
+ cert = self._create_cert(serial_number=123456, ca=ca)
+ cert.full_clean()
+ cert.save()
+ _cert = Cert(name='TestClash',
+ ca=ca,
+ certificate=self.import_certificate,
+ private_key=self.import_private_key)
+ try:
+ _cert.full_clean()
+ except ValidationError as e:
+ self.assertEqual("Certificate with this CA and Serial number already exists.",
+ str(e.message_dict['__all__'][0]))
|
Added test to make sure error is raised for serial number clashes. Fixes and closes #<I>
|
py
|
diff --git a/metal/mmtl/dataset.py b/metal/mmtl/dataset.py
index <HASH>..<HASH> 100644
--- a/metal/mmtl/dataset.py
+++ b/metal/mmtl/dataset.py
@@ -448,14 +448,15 @@ class MNLIDataset(BERTDataset):
"test": -1,
"test_mismatched": -1,
"test_matched": -1,
+ "diagnostic": -1,
}
label_fn, inv_label_fn = get_label_fn(
{"entailment": 1, "contradiction": 2, "neutral": 3}
)
super(MNLIDataset, self).__init__(
tsv_path=tsv_path_for_dataset("MNLI", split),
- sent1_idx=8,
- sent2_idx=9,
+ sent1_idx=8 if split != "diagnostic" else 1,
+ sent2_idx=9 if split != "diagnostic" else 2,
label_idx=gold_cols[split],
skip_rows=1,
bert_model=bert_model,
|
Adding column indices for diagnostic task
|
py
|
diff --git a/phileo/__init__.py b/phileo/__init__.py
index <HASH>..<HASH> 100644
--- a/phileo/__init__.py
+++ b/phileo/__init__.py
@@ -1,2 +1,2 @@
# following PEP 386
-__version__ = "0.6"
+__version__ = "1.0"
|
<I> - for real this time
|
py
|
diff --git a/holoviews/plotting/bokeh/chart.py b/holoviews/plotting/bokeh/chart.py
index <HASH>..<HASH> 100644
--- a/holoviews/plotting/bokeh/chart.py
+++ b/holoviews/plotting/bokeh/chart.py
@@ -110,9 +110,12 @@ class PointPlot(LegendPlot, ColorbarPlot):
style['angle'] = np.deg2rad(style['angle'])
if self.jitter:
- axrange = 'y_range' if self.invert_axes else 'x_range'
- mapping['x'] = jitter(dims[xidx], self.jitter,
- range=self.handles[axrange])
+ if self.invert_axes:
+ mapping['y'] = jitter(dims[yidx], self.jitter,
+ range=self.handles['y_range'])
+ else:
+ mapping['x'] = jitter(dims[xidx], self.jitter,
+ range=self.handles['x_range'])
self._get_hover_data(data, element)
return data, mapping, style
@@ -306,7 +309,7 @@ class CurvePlot(ElementPlot):
style_opts = line_properties
_nonvectorized_styles = line_properties
-
+
_plot_methods = dict(single='line', batched='multi_line')
_batched_style_opts = line_properties
|
Fix jitter when axes inverted (#<I>)
|
py
|
diff --git a/pyiso.py b/pyiso.py
index <HASH>..<HASH> 100644
--- a/pyiso.py
+++ b/pyiso.py
@@ -2841,8 +2841,6 @@ class PyIso(object):
current_extent += -(-child.data_length // self.pvd.log_block_size)
# After we have reshuffled the extents we need to update the ptr
# records.
- # FIXME: we can optimize this by setting the ptr dirrecord at the time
- # we assign the extent.
for ptr in self.pvd.path_table_records:
ptr.update_extent_location_from_dirrecord()
|
Remove a wrong FIXME.
|
py
|
diff --git a/tests/unit/test_object_storage.py b/tests/unit/test_object_storage.py
index <HASH>..<HASH> 100644
--- a/tests/unit/test_object_storage.py
+++ b/tests/unit/test_object_storage.py
@@ -3238,7 +3238,8 @@ class ObjectStorageTest(unittest.TestCase):
fnames = ["test1", "test2", "test3", "fake1", "fake2"]
for fname in fnames:
pth = os.path.join(folder_path, fname)
- open(pth, "w").write(txt)
+ with open(pth, "w") as f:
+ f.write(txt)
mock_listdir.return_value = fnames
clt._sync_folder_to_container(folder_path, cont, prefix, delete,
include_hidden, ignore, ignore_timestamps, object_prefix,
|
Safely open and write to file to fix pypy test errors
|
py
|
diff --git a/examples/python/conditional_gate_example.py b/examples/python/conditional_gate_example.py
index <HASH>..<HASH> 100644
--- a/examples/python/conditional_gate_example.py
+++ b/examples/python/conditional_gate_example.py
@@ -115,21 +115,4 @@ c = backend.get_compiled_circuit(c)
counts = backend.run_circuit(c, 1024).get_counts()
print(counts)
-# Beyond the ability to perform conditional gates, we might want to include more complex classical logic in the form of control flow, including loops, branching code, and jumps. Again, several proposed low-level quantum programming languages have sufficient expressivity to capture these, such as the Quil language.
-#
-# This control flow is hard to represent from within the circuit model, so tket contains the `Program` class, which builds up a flow graph whose basic blocks are individual circuits. Currently, you can add conditional blocks and loops, where the conditions are whether an individual classical bit is 1.
-
-from pytket.program import Program
-
-checked_x_p = Program(2, 1)
-checked_x_p.append_circuit(checked_x)
-
-p = Program(2, 1)
-p.append(checked_x_p)
-p.append_if_else(Bit(0), Program(2, 1), checked_x_p)
-
-p
-
-# Support for compiling and optimising `Program`s and full classical data manipulation will be added in a future version of tket.
-
# Try out mid-circuit measurement and conditional gate support on the `AerBackend` simulator, or ask about accessing the `HoneywellBackend` to try on a hardware device.
|
Remove reference to Program class in manual.
|
py
|
diff --git a/jax/api.py b/jax/api.py
index <HASH>..<HASH> 100644
--- a/jax/api.py
+++ b/jax/api.py
@@ -984,8 +984,7 @@ class CustomTransformsFunction(object):
def __call__(self, *args, **kwargs):
def pv_like(x):
- aval = x.aval if hasattr(x, 'aval') else xla.abstractify(x)
- return pe.PartialVal((aval, core.unit))
+ return pe.PartialVal((batching.get_aval(x), core.unit)) # Use shaped aval
jax_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
jax_kwargs, kwargs_tree = pytree_to_jaxtupletree(kwargs)
jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun2(
|
Used shaped aval for custom_transforms jaxpr
|
py
|
diff --git a/gremlin-python/src/main/python/gremlin_python/statics.py b/gremlin-python/src/main/python/gremlin_python/statics.py
index <HASH>..<HASH> 100644
--- a/gremlin-python/src/main/python/gremlin_python/statics.py
+++ b/gremlin-python/src/main/python/gremlin_python/statics.py
@@ -59,7 +59,7 @@ class SingleByte(int):
"""
def __new__(cls, b):
if -128 <= b < 128:
- int.__new__(cls, b)
+ return int.__new__(cls, b)
else:
raise ValueError("value must be between -128 and 127 inclusive")
@@ -69,8 +69,8 @@ class SingleChar(str):
Provides a way to pass a single character via Gremlin.
"""
def __new__(cls, c):
- if len(b) == 1:
- str.__new__(cls, c)
+ if len(c) == 1:
+ return str.__new__(cls, c)
else:
raise ValueError("string must contain a single character")
|
added return in SingleByte and SingleChar & fix if check bug
|
py
|
diff --git a/pypeerassets/protocol.py b/pypeerassets/protocol.py
index <HASH>..<HASH> 100644
--- a/pypeerassets/protocol.py
+++ b/pypeerassets/protocol.py
@@ -325,7 +325,7 @@ class DeckState:
except KeyError:
self.balances[receiver] = amount
- def _sort_cards(self, cards: list) -> None:
+ def _sort_cards(self, cards: list) -> Generator:
'''sort cards by blocknum and blockseq'''
return sorted([card.__dict__ for card in cards],
|
get DeckState::_sort_card annotation right
|
py
|
diff --git a/trionyx/settings.py b/trionyx/settings.py
index <HASH>..<HASH> 100644
--- a/trionyx/settings.py
+++ b/trionyx/settings.py
@@ -107,6 +107,7 @@ SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LOGIN_EXEMPT_URLS = [
'static',
+ 'api',
]
"""A list of urls that dont require a login"""
|
[BUGFIX] API cookie login was reqeured
|
py
|
diff --git a/holoviews/core/ndmapping.py b/holoviews/core/ndmapping.py
index <HASH>..<HASH> 100644
--- a/holoviews/core/ndmapping.py
+++ b/holoviews/core/ndmapping.py
@@ -514,7 +514,7 @@ class NdMapping(MultiDimensionalMapping):
else:
condition = self._range_condition(dim_ind)
dim_vals = unique_iterator(k[idx] for k in keys)
- expanded.append([k for k in dim_vals if condition(k)][::int(ind.step)])
+ expanded.append(set([k for k in dim_vals if condition(k)][::int(ind.step)]))
else:
expanded.append(ind)
return tuple(expanded)
@@ -549,6 +549,7 @@ class NdMapping(MultiDimensionalMapping):
conditions.append(self._all_condition())
elif isinstance(dim, (list, tuple)):
raise ValueError("Keys may only be selected with sets, not lists or tuples.")
+ else:
conditions.append(self._value_condition(dim))
return conditions
|
Fixed step slicing and string indexing on NdMapping
|
py
|
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
index <HASH>..<HASH> 100644
--- a/tests/integration/__init__.py
+++ b/tests/integration/__init__.py
@@ -145,9 +145,13 @@ class TestDaemon(object):
self.minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'minion')
)
+ if sys.version_info < (2, 7):
+ self.minion_opts['multiprocessing'] = False
self.sub_minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
+ if sys.version_info < (2, 7):
+ self.sub_minion_opts['multiprocessing'] = False
self.smaster_opts = salt.config.master_config(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic_master'
|
Is this what's needed under python <I> for tests to complete?
|
py
|
diff --git a/dotlink/dotlink.py b/dotlink/dotlink.py
index <HASH>..<HASH> 100755
--- a/dotlink/dotlink.py
+++ b/dotlink/dotlink.py
@@ -234,7 +234,7 @@ class Dotlink(object):
source_path = path.join(self.source, source_path)
target_path = path.join(target_root, target_path)
- if path.isfile(target_path):
+ if path.isfile(target_path) or path.islink(target_path):
self.log.debug('Removing existing file at %s', target_path)
os.unlink(target_path)
|
Fix removal of existing symlinks that point to directories
|
py
|
diff --git a/lhc/binf/feature/seq.py b/lhc/binf/feature/seq.py
index <HASH>..<HASH> 100644
--- a/lhc/binf/feature/seq.py
+++ b/lhc/binf/feature/seq.py
@@ -20,7 +20,11 @@ class NucleotideFrequency(Feature):
class NucleotideSkew(Feature):
def __init__(self):
+ self.depends = [(NucleotideFrequency, 1)]
self.transform = [string.lower]
- def calculate(self, seq):
- pass
+ def calculate(self, frq):
+ res = OrderedDict([
+ ('at', (frq['a'] - frq['t']) / (frq['a'] + frq['t'])),
+ ('gc', (frq['g'] - frq['c']) / (frq['g'] + frq['c']))
+ ])
|
Added at and gc skew to features
|
py
|
diff --git a/lib/webuser.py b/lib/webuser.py
index <HASH>..<HASH> 100644
--- a/lib/webuser.py
+++ b/lib/webuser.py
@@ -400,6 +400,10 @@ def loginUser(req, p_un, p_pw, login_method):
try:
groups = CFG_EXTERNAL_AUTHENTICATION[login_method][0].fetch_user_groups_membership(p_email, p_pw)
# groups is a dictionary {group_name : group_description,}
+ new_groups = {}
+ for key, value in groups.items():
+ new_groups[key + " [" + str(login_method) + "]"] = value
+ groups = new_groups
except AttributeError:
pass
except WebAccessExternalAuthError:
|
Changed the external group names to make clear what login method they belong to.
|
py
|
diff --git a/lib/reda/containers/sEIT.py b/lib/reda/containers/sEIT.py
index <HASH>..<HASH> 100644
--- a/lib/reda/containers/sEIT.py
+++ b/lib/reda/containers/sEIT.py
@@ -4,7 +4,7 @@ This container holds multi-frequency (spectral) imaging data, that is multiple
SIP/EIS spectra for different four-point spreads, usually used for subsequent
tomographic analysis.
"""
-# import functools
+import gc
import os
from numbers import Number
@@ -594,6 +594,7 @@ class sEIT(BaseContainer, sEITImporters):
plot_filename=plot_filename
)
plt.close(spec_fig)
+ gc.collect()
def plot_pseudosections(self, column, filename=None, return_fig=False):
"""Create a multi-plot with one pseudosection for each frequency.
|
[sEIT] try to fix a memory leak when plotting many spectra
|
py
|
diff --git a/grappa/reporters/code.py b/grappa/reporters/code.py
index <HASH>..<HASH> 100644
--- a/grappa/reporters/code.py
+++ b/grappa/reporters/code.py
@@ -37,7 +37,8 @@ class CodeReporter(BaseReporter):
# Context manager based assertions that does not imply new test calls.
CONTEXT_EXPR = re.compile(
- r'[\.](not)?[\_]?(have|has|be|to|that|satisfy|which|include)[\.]')
+ r'[\.](not)?[\_]?(have|has|be|to|that|'
+ r'satisfy|which|include)[\_]?(not)?[\.]')
def match_line(self, line):
return any([
|
refactor(reporter): match additional negation assertions
|
py
|
diff --git a/sentry_sdk/scope.py b/sentry_sdk/scope.py
index <HASH>..<HASH> 100644
--- a/sentry_sdk/scope.py
+++ b/sentry_sdk/scope.py
@@ -241,7 +241,9 @@ class Scope(object):
if self._level is not None:
event["level"] = self._level
- event.setdefault("breadcrumbs", []).extend(self._breadcrumbs)
+ if event.get("type") != "transaction":
+ event.setdefault("breadcrumbs", []).extend(self._breadcrumbs)
+
if event.get("user") is None and self._user is not None:
event["user"] = self._user
|
fix: Remove breadcrumbs from transaction events (#<I>)
|
py
|
diff --git a/safe/engine/interpolation.py b/safe/engine/interpolation.py
index <HASH>..<HASH> 100644
--- a/safe/engine/interpolation.py
+++ b/safe/engine/interpolation.py
@@ -94,7 +94,7 @@ def assign_hazard_values_to_exposure_data(hazard, exposure,
# Make sure attribute name can be stored in a shapefile
if attribute_name is not None and len(attribute_name) > 10:
- msg = ('Specfied attribute name "%s"\
+ msg = ('Specified attribute name "%s"\
has length = %i. '
'To fit into a shapefile it must be at most 10 characters '
'long. How about naming it "%s"?' % (attribute_name,
@@ -402,6 +402,8 @@ def interpolate_raster_vector_points(source, target,
N = len(target)
if attribute_name is None:
attribute_name = source.get_name()
+ # FIXME (Ole): Launder for shape files
+ attribute_name = str(attribute_name[:10])
try:
values = interpolate_raster(longitudes, latitudes, A,
|
Added explicit laundering of attribute_name for shapefiles (grrr)
|
py
|
diff --git a/django_fsm_log/admin.py b/django_fsm_log/admin.py
index <HASH>..<HASH> 100644
--- a/django_fsm_log/admin.py
+++ b/django_fsm_log/admin.py
@@ -11,7 +11,7 @@ class StateLogInline(GenericTabularInline):
model = StateLog
can_delete = False
- def has_add_permission(self, request):
+ def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
|
admin: StateLogInline: has_add_permission: dj<I> compatibility (#<I>)
|
py
|
diff --git a/py3status/modules/dropboxd_status.py b/py3status/modules/dropboxd_status.py
index <HASH>..<HASH> 100644
--- a/py3status/modules/dropboxd_status.py
+++ b/py3status/modules/dropboxd_status.py
@@ -72,7 +72,7 @@ class Py3status:
raise Exception(STRING_NOT_INSTALLED)
def dropbox(self):
- status = self.py3.command_output('dropbox-cli status').splitlines()[0]
+ status = self.py3.command_output('dropbox-cli status', localized=True).splitlines()[0]
if status == "Dropbox isn't running!":
color = self.py3.COLOR_BAD
|
dropbox_status module: add support for unicode character output from dropbox-cli (#<I>)
|
py
|
diff --git a/kafka/errors.py b/kafka/errors.py
index <HASH>..<HASH> 100644
--- a/kafka/errors.py
+++ b/kafka/errors.py
@@ -468,10 +468,6 @@ class KafkaConnectionError(KafkaError):
invalid_metadata = True
-class ConnectionError(KafkaConnectionError):
- """Deprecated"""
-
-
class ProtocolError(KafkaError):
pass
|
Remove deprecated `ConnectionError` (#<I>) This has been deprecated for a bit in favor of `KafkaConnectionError` because it conflicts with Python's built-in `ConnectionError`. Time to remove it as part of cleaning up our old deprecated code.
|
py
|
diff --git a/pysnmp/carrier/twisted/dgram/udp.py b/pysnmp/carrier/twisted/dgram/udp.py
index <HASH>..<HASH> 100644
--- a/pysnmp/carrier/twisted/dgram/udp.py
+++ b/pysnmp/carrier/twisted/dgram/udp.py
@@ -23,4 +23,9 @@ class UdpTwistedTransport(DgramTwistedTransport):
raise error.CarrierError(sys.exc_info()[1])
return self
+ def closeTransport(self):
+ d = self._lport.stopListening()
+ d and d.addCallback(lambda x: None)
+ DgramTwistedTransport.closeTransport(self)
+
UdpTransport = UdpTwistedTransport
|
overload closeTransport() to close UDP port on transport shutdown
|
py
|
diff --git a/qgrid/grid.py b/qgrid/grid.py
index <HASH>..<HASH> 100644
--- a/qgrid/grid.py
+++ b/qgrid/grid.py
@@ -1411,6 +1411,11 @@ class QgridWidget(widgets.DOMWidget):
elif content['type'] == 'change_viewport':
old_viewport_range = self._viewport_range
self._viewport_range = (content['top'], content['bottom'])
+
+ # if the viewport didn't change, do nothing
+ if old_viewport_range == self._viewport_range:
+ return
+
self._update_table(triggered_by='change_viewport')
self._notify_listeners({
'name': 'viewport_changed',
|
Fix issue where scroll event could be sent repeatedly, causing the grid to flicker and the kernel to remain in use.
|
py
|
diff --git a/blueqat/_version.py b/blueqat/_version.py
index <HASH>..<HASH> 100644
--- a/blueqat/_version.py
+++ b/blueqat/_version.py
@@ -14,4 +14,4 @@
"""The version of blueqat."""
-__version__ = "0.3.16-dev"
+__version__ = "0.3.16"
|
MAINT: Version updated for hotfix release
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@ setup(
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
- version='1.7.3',
+ version='1.7.4',
description='Transliteration tools to convert text in one indic script encoding to another',
long_description=long_description,
|
Move out unmaintained modules. Drop python 2 support.
|
py
|
diff --git a/selenic/__init__.py b/selenic/__init__.py
index <HASH>..<HASH> 100644
--- a/selenic/__init__.py
+++ b/selenic/__init__.py
@@ -1,10 +1,20 @@
+from distutils.version import StrictVersion
+
from selenium.webdriver.common.action_chains import ActionChains
import selenium
-from config import *
+from .config import *
+
+sel_ver = StrictVersion(selenium.__version__)
+v2_37_2 = StrictVersion("2.37.2")
+
+if sel_ver > v2_37_2:
+ raise Exception("please ascertain whether the ActionChains.send_keys "
+ "patch is required for Selenium version: " +
+ selenium.__version__)
-if selenium.__version__ == "2.35.0":
- # Work around bug in 2.35.0
+if sel_ver >= StrictVersion("2.35.0") and sel_ver <= v2_37_2:
+ # Work around bug
def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
|
Updated the ActionChains.send_keys patch.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,6 @@ install_requires = [
"Flask-BabelEx>=0.9.2",
"Flask-Breadcrumbs>=0.3.0",
"Flask-Menu>=0.4.0",
- # "invenio-assets>=0.1.0.dev20150000",
]
packages = find_packages()
@@ -124,12 +123,15 @@ setup(
include_package_data=True,
platforms='any',
entry_points={
+ 'invenio_base.apps': [
+ 'invenio_theme = invenio_theme:InvenioTheme'
+ ],
'invenio_assets.bundles': [
'invenio_theme_css = invenio_theme.bundles:css',
'invenio_theme_js = invenio_theme.bundles:js',
],
- "invenio_i18n.translations": [
- "messages = invenio_theme"
+ 'invenio_i18n.translations': [
+ 'messages = invenio_theme'
],
},
extras_require=extras_require,
|
installation: entry points addition * Adds entry points for Invenio-Base to discover the extension.
|
py
|
diff --git a/dingo/tools/geo.py b/dingo/tools/geo.py
index <HASH>..<HASH> 100644
--- a/dingo/tools/geo.py
+++ b/dingo/tools/geo.py
@@ -98,3 +98,24 @@ def calc_geo_dist_matrix_vincenty(nodes_pos):
matrix[i][j] = distance
return matrix
+
+
+def calc_geo_centre_point(node_source, node_target, proj_source, proj_target):
+ """ Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor in
+ config_calc.
+ Args:
+ node_source: source node (Dingo object), member of _graph
+ node_target: target node (Dingo object), member of _graph
+
+ Returns:
+ Distance in m
+ """
+
+ branch_shp = transform(proj_source, LineString([node_source.geo_data, node_target.geo_data]))
+
+ distance = vincenty((node_source.geo_data.y, node_source.geo_data.x),
+ (node_target.geo_data.y, node_target.geo_data.x)).m
+
+ centre_point_shp = transform(proj_target, branch_shp.interpolate(distance/2))
+
+ return centre_point_shp
|
add method to calc centre point from two points for determining position of circuit breaker
|
py
|
diff --git a/anytemplate/engines/tenjin.py b/anytemplate/engines/tenjin.py
index <HASH>..<HASH> 100644
--- a/anytemplate/engines/tenjin.py
+++ b/anytemplate/engines/tenjin.py
@@ -9,7 +9,12 @@ from __future__ import absolute_import
import logging
import tenjin # :throw: ImportError
tenjin.set_template_encoding('utf-8') # FIXME
-from tenjin.helpers import *
+
+# TODO: It seems that tenjin forces this to make it work factually.
+from tenjin.helpers import CaptureContext, cache_as, capture_as, \
+ captured_as, echo, echo_cached, escape, fragment_cache, \
+ generate_tostrfunc, html, new_cycle, not_cached, start_capture, \
+ stop_capture, to_str, unquote # flake8: noqa
import anytemplate.compat
import anytemplate.engines.base
|
replace '*' (glob imports) with explicit imports and suppress flake8's warning as an workaround
|
py
|
diff --git a/utils/utils.py b/utils/utils.py
index <HASH>..<HASH> 100644
--- a/utils/utils.py
+++ b/utils/utils.py
@@ -1,6 +1,7 @@
import os
import imp
import json
+import ConfigParser
from xml.dom import minidom
from xml.etree import cElementTree as ctree
@@ -39,3 +40,24 @@ def create_tables_file(path=None):
def get_module(path):
module = imp.load_package('module',path)
return module
+
+def create_config_file():
+ config = ConfigParser.RawConfigParser()
+
+ # default section
+ config.add_section('default')
+ config.set('default','format','json')
+ # json section
+ config.add_section('json')
+ config.set('json', 'diagnostics', 'False')
+ config.set('json', 'debug', 'False')
+ config.set('json', 'jsonCompact', 'False')
+ # xml section
+ config.add_section('xml')
+ config.set('xml', 'diagnostics', 'False')
+ config.set('xml', 'debug', 'False')
+ # oauth section
+ config.add_section('auth')
+ config.set('auth', 'consumer_key', '')
+ config.set('auth', 'consumer_secret', '')
+
|
#9 : function to create config file added
|
py
|
diff --git a/openquake/calculators/classical.py b/openquake/calculators/classical.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/classical.py
+++ b/openquake/calculators/classical.py
@@ -664,7 +664,8 @@ class ClassicalCalculator(base.HazardCalculator):
slicedic = performance.get_slices(
dstore['_poes/sid'][:] // sites_per_task)
nslices = sum(len(slices) for slices in slicedic.values())
- logging.info('There are %d slices of poes', nslices)
+ logging.info('There are %d slices of poes [%.1f per task]', nslices,
+ nslices / len(slicedic))
allargs = [
(getters.PmapGetter(dstore, ws, slices, oq.imtls, oq.poes),
N, hstats, individual, oq.max_sites_disagg, self.amplifier)
|
Better logging [ci skip]
|
py
|
diff --git a/pypodio2/transport.py b/pypodio2/transport.py
index <HASH>..<HASH> 100644
--- a/pypodio2/transport.py
+++ b/pypodio2/transport.py
@@ -41,7 +41,6 @@ class OAuthAuthorization(object):
def __call__(self):
return self.token
-
class UserAgentHeaders(object):
def __init__(self, base_headers_factory, user_agent):
self.base_headers_factory = base_headers_factory
@@ -112,7 +111,7 @@ class HttpTransport(object):
self._attribute_stack = []
handler = kwargs.get('handler', _handle_response)
- handler(response, data)
+ return handler(response, data)
def _generate_params(self, params):
body = self._params_template % urlencode(params)
|
Transport.__call__ needs to return the results from the handler
|
py
|
diff --git a/kubernetes_asyncio/config/kube_config.py b/kubernetes_asyncio/config/kube_config.py
index <HASH>..<HASH> 100644
--- a/kubernetes_asyncio/config/kube_config.py
+++ b/kubernetes_asyncio/config/kube_config.py
@@ -326,7 +326,7 @@ class KubeConfigLoader(object):
def _load_cluster_info(self):
if 'server' in self._cluster:
- self.host = self._cluster['server']
+ self.host = self._cluster['server'].rstrip('/')
if self.host.startswith("https"):
self.ssl_ca_cert = FileOrData(
self._cluster, 'certificate-authority',
|
fix: trailing slash on kube/config (#<I>)
|
py
|
diff --git a/src/robotsuite/__init__.py b/src/robotsuite/__init__.py
index <HASH>..<HASH> 100644
--- a/src/robotsuite/__init__.py
+++ b/src/robotsuite/__init__.py
@@ -11,6 +11,7 @@ import string
import unicodedata
import robot
+from robot.common.model import _Critical
import unittest2 as unittest
from lxml import etree
@@ -348,8 +349,12 @@ class RobotTestCase(unittest.TestCase):
log='robot_log.html', report='robot_report.html',
critical=self._critical, noncritical=self._noncritical)
- # Raise AssertionError when the test has failed
- assert last_status == 'PASS', last_message
+ # If the test is critical, raise AssertionError when it has failed
+ is_critical = _Critical(tags=self._critical,
+ nons=self._noncritical
+ ).are_critical(self._tags or [])
+ if is_critical:
+ assert last_status == 'PASS', last_message
def RobotTestSuite(*paths, **kw):
|
only raise an AssertionError if the test is critical
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -732,7 +732,7 @@ INSTALL_REQUIREMENTS = [
'sqlalchemy~=1.3',
'sqlalchemy_jsonfield~=0.9',
'tabulate>=0.7.5, <0.9',
- 'tenacity==4.12.0',
+ 'tenacity>=4.12.0, <5.2',
'thrift>=0.9.2',
'typing;python_version<"3.6"',
'typing-extensions>=3.7.4;python_version<"3.8"',
|
Relax requirement to allow latest version of tenacity (#<I>)
|
py
|
diff --git a/scripts/rnaseq2ga.py b/scripts/rnaseq2ga.py
index <HASH>..<HASH> 100644
--- a/scripts/rnaseq2ga.py
+++ b/scripts/rnaseq2ga.py
@@ -144,7 +144,8 @@ class AbstractWriter(object):
featureSet = dataset.getFeatureSets()[0]
featureId = ""
for feature in featureSet.getFeatures(name=featureName):
- featureId = feature[0].id
+ featureId = feature.id
+ break
datafields = (expressionId, rnaQuantificationId, name, featureId,
expressionLevel, isNormalized,
rawCount, score, units, confidenceLow, confidenceHi)
|
Fix a bug in loading RNA feature IDs for prep compliance
|
py
|
diff --git a/spyderlib/restart_app.py b/spyderlib/restart_app.py
index <HASH>..<HASH> 100644
--- a/spyderlib/restart_app.py
+++ b/spyderlib/restart_app.py
@@ -266,7 +266,7 @@ def main():
# Before launching a new Spyder instance we need to make sure that the
# reset subprocess has closed. We wait for a fixed and "reasonable"
# amount of time and check, otherwise an error is launched.
- wait_time = 60 # Seconds
+ wait_time = 20 # Seconds
for counter in range(int(wait_time/SLEEP_TIME)):
if not is_pid_running(pid_reset):
break
|
Reduce max waiting time when resetting
|
py
|
diff --git a/thredds_crawler/__init__.py b/thredds_crawler/__init__.py
index <HASH>..<HASH> 100644
--- a/thredds_crawler/__init__.py
+++ b/thredds_crawler/__init__.py
@@ -1 +1 @@
-__version__ = '0.9'
+__version__ = '1.0-dev'
|
Bump to <I>-dev
|
py
|
diff --git a/cpuinfo.py b/cpuinfo.py
index <HASH>..<HASH> 100644
--- a/cpuinfo.py
+++ b/cpuinfo.py
@@ -920,13 +920,13 @@ def get_cpu_info_from_dmesg():
processor_brand = processor_brand.strip()
# Various fields
- fields = fields.split(' ')
+ fields = fields.strip().split(' ')
vendor_id = None
stepping = None
model = None
family = None
for field in fields:
- name, value = field.split(' = '):
+ name, value = field.split(' = ')
name = name.lower()
if name == 'origin':
vendor_id = value.strip('"')
|
Fixed more bugs that broke dmesg.
|
py
|
diff --git a/paradigm/definitions/unsupported.py b/paradigm/definitions/unsupported.py
index <HASH>..<HASH> 100644
--- a/paradigm/definitions/unsupported.py
+++ b/paradigm/definitions/unsupported.py
@@ -269,6 +269,7 @@ if platform.python_implementation() != 'PyPy':
# not supported by ``typeshed`` package
methods_descriptors.update({_collections_abc.dict_items.isdisjoint,
+ _collections_abc.dict_keys.isdisjoint,
_collections_abc.generator.close,
_collections_abc.generator.send,
_collections_abc.generator.throw,
|
Complete unsupported methods descriptors
|
py
|
diff --git a/qiskit/aqua/algorithms/quantum_algorithm.py b/qiskit/aqua/algorithms/quantum_algorithm.py
index <HASH>..<HASH> 100644
--- a/qiskit/aqua/algorithms/quantum_algorithm.py
+++ b/qiskit/aqua/algorithms/quantum_algorithm.py
@@ -82,7 +82,7 @@ class QuantumAlgorithm(Pluggable):
"""
if not self.configuration.get('classical', False):
if quantum_instance is None:
- AquaError("Quantum device or backend is needed since you are running quanutm algorithm.")
+ AquaError("Quantum device or backend is needed since you are running quantum algorithm.")
if isinstance(quantum_instance, BaseBackend):
quantum_instance = QuantumInstance(quantum_instance)
quantum_instance.set_config(**kwargs)
|
Create QiskitAqua class to contain Aqua properties
|
py
|
diff --git a/kafka/consumer/new.py b/kafka/consumer/new.py
index <HASH>..<HASH> 100644
--- a/kafka/consumer/new.py
+++ b/kafka/consumer/new.py
@@ -172,7 +172,7 @@ class KafkaConsumer(object):
self._msg_iter = self.fetch_messages()
# Check for auto-commit
- if self.should_auto_commit():
+ if self._should_auto_commit():
self.commit()
try:
@@ -220,7 +220,7 @@ class KafkaConsumer(object):
self._offsets.task_done[topic_partition] = offset
- def should_auto_commit(self):
+ def _should_auto_commit(self):
if not self._config['auto_commit_enable']:
return False
|
_should_auto_commit is private
|
py
|
diff --git a/papermill/execute.py b/papermill/execute.py
index <HASH>..<HASH> 100644
--- a/papermill/execute.py
+++ b/papermill/execute.py
@@ -13,7 +13,13 @@ from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
from nbconvert.preprocessors.base import Preprocessor
from six import string_types, integer_types
-from tqdm import tqdm
+# tqdm creates 2 globals lock which raise OSException if the execution
+# environment does not have shared memory for processes, e.g. AWS Lambda
+try:
+ from tqdm import tqdm
+ no_tqdm = False
+except OSError:
+ no_tqdm = True
from .conf import settings
from .exceptions import PapermillException, PapermillExecutionError
@@ -170,7 +176,7 @@ def execute_notebook(notebook,
processor = ExecutePreprocessor(
timeout=None,
kernel_name=kernel_name or nb.metadata.kernelspec.name, )
- processor.progress_bar = progress_bar
+ processor.progress_bar = progress_bar and not no_tqdm
processor.log_output = log_output
processor.preprocess(nb, {})
|
Provides the ability to run in environment execution where multiprocessing locks are not implemented, e.g. AWS Lambda
|
py
|
diff --git a/bebop/query.py b/bebop/query.py
index <HASH>..<HASH> 100644
--- a/bebop/query.py
+++ b/bebop/query.py
@@ -57,7 +57,7 @@ class SolrQuery(object):
return self
def offset(self, offset):
- self.params['offset'] = offset
+ self.params['start'] = offset
return self
def queried_fields(self, *fields):
|
[fix] offset param was wrong, thank you nosetests
|
py
|
diff --git a/h2o-py/h2o/frame.py b/h2o-py/h2o/frame.py
index <HASH>..<HASH> 100644
--- a/h2o-py/h2o/frame.py
+++ b/h2o-py/h2o/frame.py
@@ -4030,8 +4030,9 @@ class H2OFrame(Keyed):
data = pandaF[newX].values
label = pandaF[[yresp]].values
- return xgb.DMatrix(data=csr_matrix(data), label=label) \
- if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, label=label)
+ return xgb.DMatrix(data=csr_matrix(data), label=label, feature_names=newX) \
+ if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data,
+ label=label, feature_names=newX)
def pivot(self, index, column, value):
"""
|
Add feature names to DMatrix
|
py
|
diff --git a/tests/test_assess_model_change_watcher.py b/tests/test_assess_model_change_watcher.py
index <HASH>..<HASH> 100644
--- a/tests/test_assess_model_change_watcher.py
+++ b/tests/test_assess_model_change_watcher.py
@@ -1,7 +1,8 @@
"""Tests for assess_model_change_watcher module."""
import logging
-from io import BytesIO
+from io import BytesIO, StringIO, TextIOWrapper
+import sys
from mock import (
Mock,
@@ -31,11 +32,14 @@ class TestParseArgs(TestCase):
self.assertEqual(False, args.debug)
def test_help(self):
- fake_stdout = BytesIO()
+ if isinstance(sys.stdout, TextIOWrapper):
+ fake_stdout = StringIO()
+ else:
+ fake_stdout = BytesIO()
with parse_error(self) as fake_stderr:
with patch("sys.stdout", fake_stdout):
parse_args(["--help"])
- self.assertEqual("", fake_stderr.getvalue())
+ self.assertEqual(b"", fake_stderr.getvalue())
class TestMain(TestCase):
|
Fix stdout mocking.
|
py
|
diff --git a/luminoso_api/client.py b/luminoso_api/client.py
index <HASH>..<HASH> 100644
--- a/luminoso_api/client.py
+++ b/luminoso_api/client.py
@@ -244,9 +244,9 @@ class LuminosoClient(object):
Returns a copy of the client to avoid breaking old code.
"""
if path.startswith('/'):
- self.url = self.root_url + path
+ self.url = ensure_trailing_slash(self.root_url + path)
else:
- self.url = self.url + path
+ self.url = ensure_trailing_slash(self.url + path)
return self
def upload(self, path, docs, **params):
|
Restore ensure_trailing_slash as part of change_path Previously change_path() got ensure_trailing_slash() because it was going through __init__(). It no longer is, so the ensuring function needs to be called directly in change_path().
|
py
|
diff --git a/dss/api/files.py b/dss/api/files.py
index <HASH>..<HASH> 100644
--- a/dss/api/files.py
+++ b/dss/api/files.py
@@ -142,13 +142,18 @@ def put(uuid: str, version: str=None):
)).lower()
# does it exist? if so, we can skip the copy part.
+ do_copy = True
try:
- handle.get_metadata(
- dst_bucket, dst_object_name)
+ if hca_handle.verify_blob_checksum(dst_bucket, dst_object_name, metadata):
+ do_copy = False
except BlobNotFoundError:
- hca_handle.copy_blob_from_staging(
- src_bucket, src_object_name,
- dst_bucket, dst_object_name)
+ pass
+
+ if do_copy:
+ handle.copy(src_bucket, src_object_name, dst_bucket, dst_object_name)
+
+ # verify the copy was done correctly.
+ assert hca_handle.verify_blob_checksum(dst_bucket, dst_object_name, metadata)
# what's the target object name for the file metadata?
metadata_object_name = "files/" + uuid + "." + version
|
Checksum-based mechanism for determining whether we copy. (#<I>) Now the new approach is: 1. Does the checksum on disk match our expectations? If so, skip the copy. 2. Do the copy and verify that the copy was done correctly.
|
py
|
diff --git a/scripts/analyze_quality_recal.py b/scripts/analyze_quality_recal.py
index <HASH>..<HASH> 100644
--- a/scripts/analyze_quality_recal.py
+++ b/scripts/analyze_quality_recal.py
@@ -41,7 +41,7 @@ import pysam
from mako.template import Template
try:
import rpy2.robjects as robjects
-except ImportError:
+except (ImportError, LookupError):
robjects = None
def main(recal_bam, fastq1, fastq2=None, chunk_size=None, input_format=None,
|
Catch errors with problem rpy2 intallations to skip plotting step
|
py
|
diff --git a/natsort/__main__.py b/natsort/__main__.py
index <HASH>..<HASH> 100644
--- a/natsort/__main__.py
+++ b/natsort/__main__.py
@@ -78,6 +78,7 @@ def main():
# Make sure the filter range is given properly. Does nothing if no filter
args.filter = check_filter(args.filter)
+ args.reverse_filter = check_filter(args.reverse_filter)
# Remove trailing whitespace from all the entries
entries = [e.strip() for e in args.entries]
|
Added a filter range check to --reverse-filter.
|
py
|
diff --git a/health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py b/health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py
index <HASH>..<HASH> 100755
--- a/health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py
+++ b/health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py
@@ -406,7 +406,7 @@ if __name__ == '__main__':
# verify that seclevel is correctly used, otherwise there will be an exception
verify_seclevel(seclevel, helper)
- sess = netsnmp.Session(Version=version, DestHost=host, SecLevel = seclevel, SecName = secname, AuthProto = authproto, AuthPass = authpass, PrivProto = privproto, PrivPass = privpass)
+ sess = netsnmp.Session(Version=version, DestHost=host, SecLevel = seclevel, SecName = secname, AuthProto = authproto, AuthPass = authpass, PrivProto = privproto, PrivPass = privpass, Community = community)
# If the --scan option is set, we show all components and end the script
if scan:
|
fixed check_snmp_ilo4 to work with snmpv2 (#<I>) The community string was not being passed as a parameter to the netsnmp.Session constructor
|
py
|
diff --git a/tests/test_record.py b/tests/test_record.py
index <HASH>..<HASH> 100644
--- a/tests/test_record.py
+++ b/tests/test_record.py
@@ -12,6 +12,7 @@ import pickle
import sys
import unittest
+from asyncpg import _testbase as tb
from asyncpg.protocol.protocol import _create_record as Record
@@ -21,7 +22,7 @@ R_AC = collections.OrderedDict([('a', 0), ('c', 1)])
R_ABC = collections.OrderedDict([('a', 0), ('b', 1), ('c', 2)])
-class TestRecord(unittest.TestCase):
+class TestRecord(tb.ConnectedTestCase):
@contextlib.contextmanager
def checkref(self, *objs):
@@ -279,3 +280,11 @@ class TestRecord(unittest.TestCase):
r = Record(R_A, (42,))
with self.assertRaises(Exception):
pickle.dumps(r)
+
+ @unittest.expectedFailure
+ async def test_record_duplicate_colnames(self):
+ """Test that Record handles duplicate column names."""
+ r = await self.con.fetchrow('SELECT 1 as a, 2 as a')
+ self.assertEqual(r['a'], 2)
+ self.assertEqual(r[0], 1)
+ self.assertEqual(repr(r), '<Record a=1 a=2>')
|
Add a (failing) test for duplicate field names handling in Records
|
py
|
diff --git a/telemetry/telemetry/core/platform/power_monitor/ippet_power_monitor.py b/telemetry/telemetry/core/platform/power_monitor/ippet_power_monitor.py
index <HASH>..<HASH> 100644
--- a/telemetry/telemetry/core/platform/power_monitor/ippet_power_monitor.py
+++ b/telemetry/telemetry/core/platform/power_monitor/ippet_power_monitor.py
@@ -106,7 +106,7 @@ class IppetPowerMonitor(power_monitor.PowerMonitor):
self._ippet_port = util.GetUnreservedAvailableLocalPort()
parameters = ['-log_dir', self._output_dir,
'-web_port', str(self._ippet_port),
- '-zip', 'n', '-all_processes', '-l', '0']
+ '-zip', 'n']
self._ippet_handle = self._backend.LaunchApplication(
IppetPath(), parameters, elevate_privilege=True)
|
[telemetry] Limit IPPET process list. This may reduce IPPET overhead and runtime, leading to lower noise and cycle time on the bots. -all_processes means include processes with 0 CPU time, which is useless to us anyway. -l 0 means unlimited number of processes. The default is the top <I> processes by CPU time. BUG=None. TEST=None. Review URL: <URL>
|
py
|
diff --git a/wcmatch/glob.py b/wcmatch/glob.py
index <HASH>..<HASH> 100644
--- a/wcmatch/glob.py
+++ b/wcmatch/glob.py
@@ -293,8 +293,8 @@ class Glob(object):
"""Init the directory walker object."""
self.flags = _flag_transform(flags)
- self.dot = not bool(self.flags & NODOT)
- self.globstar = not bool(self.flags & fnmatch.GLOBSTAR)
+ self.dot = bool(self.flags & NODOT)
+ self.globstar = bool(self.flags & fnmatch.GLOBSTAR)
self.case_sensitive = fnmatch.get_case(self.flags)
self.is_bytes = isinstance(pattern, bytes)
self.pattern = _magicsplit(pattern, self.flags)
|
Enable dot and globstar in glob by default
|
py
|
diff --git a/condoor/version.py b/condoor/version.py
index <HASH>..<HASH> 100644
--- a/condoor/version.py
+++ b/condoor/version.py
@@ -1,3 +1,3 @@
"""Version information."""
-__version__ = '1.0.11'
+__version__ = '1.0.12'
|
Bumping version number to <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from setuptools import find_packages
setup(
name='django-db-pool',
- version='0.0.1',
+ version='0.0.2',
author=u'Greg McGuire',
author_email='gregjmcguire+github@gmail.com',
packages=find_packages(),
@@ -25,7 +25,7 @@ setup(
],
zip_safe=False,
install_requires=[
- "Django == 1.3",
- "psycopg2 >= 2.4",
+ "Django>=1.3.0,<1.4",
+ "psycopg2>=2.4",
],
)
|
Fix incorrect install_requires
|
py
|
diff --git a/cherrypy/_cputil.py b/cherrypy/_cputil.py
index <HASH>..<HASH> 100644
--- a/cherrypy/_cputil.py
+++ b/cherrypy/_cputil.py
@@ -54,7 +54,7 @@ def get_object_trail(objectpath=None):
return objectTrail
-def get_special_attribute(name, alternate_name = None):
+def get_special_attribute(name, old_name = None):
"""Return the special attribute. A special attribute is one that
applies to all of the children from where it is defined, such as
_cp_filters."""
@@ -71,10 +71,13 @@ def get_special_attribute(name, alternate_name = None):
return getattr(obj, name)
try:
- return globals()[name]
+ if old_name:
+ return globals()[old_name]
+ else:
+ return globals()[name]
except KeyError:
- if alternate_name:
- return get_special_attribute(alternate_name)
+ if old_name:
+ return get_special_attribute(name)
msg = "Special attribute %s could not be found" % repr(name)
raise cherrypy.HTTPError(500, msg)
|
Fix for #<I>: Try old name first
|
py
|
diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py
index <HASH>..<HASH> 100644
--- a/salt/modules/iptables.py
+++ b/salt/modules/iptables.py
@@ -314,6 +314,7 @@ def check(table='filter', chain=None, rule=None):
def check_chain(table='filter', chain=None):
'''
+ .. versionadded:: Hydrogen
Check for the existance of a chain in the table
@@ -340,6 +341,7 @@ def check_chain(table='filter', chain=None):
def new_chain(table='filter', chain=None):
'''
+ .. versionadded:: Hydrogen
Create new custom chain to the specified table.
@@ -363,6 +365,7 @@ def new_chain(table='filter', chain=None):
def delete_chain(table='filter', chain=None):
'''
+ .. versionadded:: Hydrogen
Delete custom chain to the specified table.
|
Add versionadded directives to newer iptables functions
|
py
|
diff --git a/tests/example_project/settings/base.py b/tests/example_project/settings/base.py
index <HASH>..<HASH> 100644
--- a/tests/example_project/settings/base.py
+++ b/tests/example_project/settings/base.py
@@ -71,6 +71,8 @@ INSTALLED_APPS = (
'ella.ellaadmin',
'ella.newman',
+ 'ella.newman.markup',
+ 'ella.newman.licenses',
'django.contrib.admin',
)
|
Markup and licenses apps added to INSTALLED_APPS.
|
py
|
diff --git a/salt/proxy/esxi.py b/salt/proxy/esxi.py
index <HASH>..<HASH> 100644
--- a/salt/proxy/esxi.py
+++ b/salt/proxy/esxi.py
@@ -192,9 +192,9 @@ States
------
Associated states are thoroughly documented in
-:doc:`salt.states.vsphere </ref/states/all/salt.states.vsphere>`. Look there
-to find an example structure for Pillar as well as an example ``.sls`` file f
-or standing up an ESXi host from scratch.
+:doc:`salt.states.esxi </ref/states/all/salt.states.esxi>`. Look there
+to find an example structure for Pillar as well as an example ``.sls`` file
+for standing up an ESXi host from scratch.
'''
|
ESXi Proxy minions states are located at salt.states.esxi, not vsphere.
|
py
|
diff --git a/datahandling/datahandling/datahandling.py b/datahandling/datahandling/datahandling.py
index <HASH>..<HASH> 100644
--- a/datahandling/datahandling/datahandling.py
+++ b/datahandling/datahandling/datahandling.py
@@ -487,7 +487,6 @@ def fit_PSD(Data, bandwidth, NMovAve, TrapFreqGuess, AGuess=0.1e10, GammaGuess=4
[AErr, TrappingFrequencyErr, GammaErr]
"""
- print(TrapFreqGuess, AGuess, GammaGuess)
AngFreqs = 2 * _np.pi * Data.freqs
Angbandwidth = 2 * _np.pi * bandwidth
AngTrapFreqGuess = 2 * _np.pi * TrapFreqGuess
|
removed print statement from fit_PSD that i left in from testing
|
py
|
diff --git a/mackup.py b/mackup.py
index <HASH>..<HASH> 100755
--- a/mackup.py
+++ b/mackup.py
@@ -297,6 +297,9 @@ SUPPORTED_APPS = {
'Transmission': [PREFERENCES + 'org.m0k.transmission.plist'],
+ 'Vagrant': ['.vagrant',
+ '.vagrant.d'],
+
'Ventrilo': [PREFERENCES + 'Ventrilo'],
'Vim': ['.gvimrc',
|
Add support for vagrant dotfiles They are some very large files (machines, boxes) which may not be desired in all cases
|
py
|
diff --git a/TableParser.py b/TableParser.py
index <HASH>..<HASH> 100644
--- a/TableParser.py
+++ b/TableParser.py
@@ -47,7 +47,6 @@ class TableParser( htmllib.HTMLParser ):
The parser will accept nested tables as <table> inside <td> elements.
"""
def __init__( self ):
- #raise Exception, 'The table parser doesn\'t work right now'
htmllib.HTMLParser.__init__( self, formatter.NullFormatter() )
self.tables = []
self.currentTable = None
@@ -90,6 +89,9 @@ class TableParser( htmllib.HTMLParser ):
self.currentTable.currentRow = None
def start_td( self, attrs ):
+ if self.currentTable.currentRow is None:
+ # found a <td> tag not preceeded by a <tr> tag, so one is implied.
+ self.start_tr( {} )
self.checkForExtraRows()
attrs = dict( attrs )
try:
|
Removed commented raise Exception in __init__. Added code to start_td to infer a start_tr when no current row exists. ********************** Label: "ICIMOD" User: Jaraco Date: <I>-<I>-<I> Time: <I>:<I> Labeled Label comment: This is the version of the modules as packaged for transfer to ICIMOD.
|
py
|
diff --git a/core/rq-worker/file_watch.py b/core/rq-worker/file_watch.py
index <HASH>..<HASH> 100644
--- a/core/rq-worker/file_watch.py
+++ b/core/rq-worker/file_watch.py
@@ -148,7 +148,7 @@ def template_queue(path, base_dir="/var/lib/docker/data/"):
try:
if container["Status"] == "exited":
c.remove_container(container["Id"])
- elif container["Name"].startswith(os.environ.get('HOSTNAME')):
+ elif container["Id"].startswith(os.environ.get('HOSTNAME')):
# skip this container until the end
this_container = container["Id"]
else:
|
fixing skipped core container from name-based to id-based
|
py
|
diff --git a/transitfeed/trip.py b/transitfeed/trip.py
index <HASH>..<HASH> 100755
--- a/transitfeed/trip.py
+++ b/transitfeed/trip.py
@@ -234,6 +234,10 @@ class Trip(GtfsObjectBase):
'trip_id=? ORDER BY stop_sequence', (self.trip_id,))
stop_times = []
stoptime_class = self.GetGtfsFactory().StopTime
+ if problems is None:
+ # TODO: delete this branch when StopTime.__init__ doesn't need a
+ # ProblemReporter
+ problems = problems_module.default_problem_reporter
for row in cursor.fetchall():
stop = self._schedule.GetStop(row[6])
stop_times.append(stoptime_class(problems=problems,
|
Issue <I>: Crash in kmlwriter Reviewed at <URL>
|
py
|
diff --git a/hgvs/__init__.py b/hgvs/__init__.py
index <HASH>..<HASH> 100644
--- a/hgvs/__init__.py
+++ b/hgvs/__init__.py
@@ -52,14 +52,18 @@ BaseOffsetPosition(base=1582, offset=0, datum=1, uncertain=False)
from __future__ import absolute_import, division, print_function, unicode_literals
+import logging
import pkg_resources
import re
import warnings
from .config import global_config
+logger = logging.getLogger(__name__)
+
_is_released_version = False
+
try:
__version__ = pkg_resources.get_distribution('hgvs').version
if re.match('^\d+\.\d+\.\d+$', __version__) is not None:
@@ -71,6 +75,9 @@ except pkg_resources.DistributionNotFound as e:
# Make sure we're showing DeprecationWarnings
warnings.filterwarnings('default', '', DeprecationWarning)
+logger.info("hgvs " + __version__ + "; released: " + str(_is_released_version))
+
+
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/biocommons/hgvs)
##
|
on import of hgvs, emit logging info line w/version
|
py
|
diff --git a/plenum/common/ledger_manager.py b/plenum/common/ledger_manager.py
index <HASH>..<HASH> 100644
--- a/plenum/common/ledger_manager.py
+++ b/plenum/common/ledger_manager.py
@@ -177,9 +177,12 @@ class LedgerManager(HasActionQueue):
catchUpReplies = ledgerInfo.receivedCatchUpReplies
logger.info("{} requesting {} missing transactions after timeout".format(self, num_missing))
+ eligible_nodes = self.nodes_to_request_txns_from
eligible_nodes = [n
- for n in self.nodes_to_request_txns_from
- if n not in self.wait_catchup_rep_from]
+ for n in eligible_nodes
+ if n not in self.wait_catchup_rep_from] \
+ if not self.wait_catchup_rep_from.issuperset(eligible_nodes) \
+ else eligible_nodes
self.wait_catchup_rep_from.clear()
if not eligible_nodes:
|
INDY-<I>: fix a problem in re-asking CatchupRep
|
py
|
diff --git a/django_transfer/__init__.py b/django_transfer/__init__.py
index <HASH>..<HASH> 100644
--- a/django_transfer/__init__.py
+++ b/django_transfer/__init__.py
@@ -82,12 +82,12 @@ def check_acl(path):
if white:
allow = False
for pattern in white:
- if re.fullmatch(pattern, path):
+ if re.match(pattern, path):
allow = True
break
if black:
for pattern in black:
- if re.fullmatch(pattern, path):
+ if re.match(pattern, path):
allow = False
break
return allow
|
re.fullmatch() is Py3 only.
|
py
|
diff --git a/zipline/pipeline/data/dataset.py b/zipline/pipeline/data/dataset.py
index <HASH>..<HASH> 100644
--- a/zipline/pipeline/data/dataset.py
+++ b/zipline/pipeline/data/dataset.py
@@ -33,12 +33,21 @@ class _BoundColumnDescr(object):
"""
Intermediate class that sits on `DataSet` objects and returns memoized
`BoundColumn` objects when requested.
+
+ This exists so that subclasses of DataSets don't share columns with their
+ parent classes.
"""
def __init__(self, dtype, name):
self.dtype = dtype
self.name = name
def __get__(self, instance, owner):
+ """
+ Produce a concrete BoundColumn object when accessed.
+
+ We don't bind to datasets at class creation time so that subclasses of
+ DataSets produce different BoundColumns.
+ """
return BoundColumn(
dtype=self.dtype,
dataset=owner,
|
DOC: Explain why _BoundColumnDescr exists. We don't want to bind to parent DataSets when we bind to names because we want to dynamically create new BoundColumns in subclasses.
|
py
|
diff --git a/parsl/monitoring/monitoring.py b/parsl/monitoring/monitoring.py
index <HASH>..<HASH> 100644
--- a/parsl/monitoring/monitoring.py
+++ b/parsl/monitoring/monitoring.py
@@ -6,6 +6,7 @@ import time
import typeguard
import datetime
import zmq
+from functools import wraps
import queue
from parsl.multiprocessing import ForkProcess, SizedQueue
@@ -324,6 +325,7 @@ class MonitoringHub(RepresentationMixin):
""" Internal
Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
"""
+ @wraps(f)
def wrapped(*args: List[Any], **kwargs: Dict[str, Any]) -> Any:
# Send first message to monitoring router
send_first_message(try_id,
|
adds wraps to monitoring so functions have their names back (#<I>) Callables lose their name attributes when monitoring is turned on as the wrapper function for monitoring doesn't wrap callables correctly. More details can be found in this issue: <URL>
|
py
|
diff --git a/eliot/_bytesjson.py b/eliot/_bytesjson.py
index <HASH>..<HASH> 100644
--- a/eliot/_bytesjson.py
+++ b/eliot/_bytesjson.py
@@ -24,21 +24,21 @@ _encoder = JSONEncoder()
-def loads(s):
+def _loads(s):
if isinstance(s, bytes):
s = s.decode("utf-8")
return pyjson.loads(s)
-def dumps(obj):
+def _dumps(obj):
return _encoder.encode(obj).encode("utf-8")
if PY2:
# No need for the above on Python 2
- del loads, dumps
loads, dumps = pyjson.loads, pyjson.dumps
-
+else:
+ loads, dumps = _loads, dumps
__all__ = ["loads", "dumps"]
|
Satisfy pyflakes.
|
py
|
diff --git a/atomic_reactor/plugins/pre_flatpak_create_dockerfile.py b/atomic_reactor/plugins/pre_flatpak_create_dockerfile.py
index <HASH>..<HASH> 100644
--- a/atomic_reactor/plugins/pre_flatpak_create_dockerfile.py
+++ b/atomic_reactor/plugins/pre_flatpak_create_dockerfile.py
@@ -37,6 +37,10 @@ LABEL version="{stream}"
LABEL release="{version}"
ADD atomic-reactor-includepkgs /tmp/
+
+RUN mkdir -p /var/tmp/flatpak-build/dev && \
+ for i in null zero random urandom ; do cp -a /dev/$i /var/tmp/flatpak-build/dev ; done
+
RUN cat /tmp/atomic-reactor-includepkgs >> /etc/dnf/dnf.conf && \\
dnf -y --nogpgcheck \\
--disablerepo=* \\
|
flatpak_create_dockerfile: create a skeleton /dev in the installroot rpm scripts that are being run in the installroot to create the Flatpak filesystem may access certain standard files in /dev - create a very small /dev/ to make such scripts work reliably. See, for example: <URL>
|
py
|
diff --git a/backend/pfamserver/services/uniprot_service.py b/backend/pfamserver/services/uniprot_service.py
index <HASH>..<HASH> 100644
--- a/backend/pfamserver/services/uniprot_service.py
+++ b/backend/pfamserver/services/uniprot_service.py
@@ -32,7 +32,6 @@ def uniprot_query_filter(uniprot, query):
query = query.filter(
or_(Uniprot.uniprot_id == uniprot, Uniprot.uniprot_acc == uniprot)
)
-
return query
|
[#<I>] Optimize uniprots description to be a bit faster.
|
py
|
diff --git a/pyghmi/ipmi/oem/lenovo/imm.py b/pyghmi/ipmi/oem/lenovo/imm.py
index <HASH>..<HASH> 100644
--- a/pyghmi/ipmi/oem/lenovo/imm.py
+++ b/pyghmi/ipmi/oem/lenovo/imm.py
@@ -954,6 +954,9 @@ class XCCClient(IMMClient):
if rt['return'] in (657, 659, 656):
raise pygexc.InvalidParameterValue(
'Given location was unreachable by the XCC')
+ if rt['return'] == 32:
+ raise pygexc.InvalidParameterValue(
+ 'XCC does not have required license for operation')
raise Exception('Unhandled return: ' + repr(rt))
rt = self.wc.grab_json_response('/api/providers/rp_vm_remote_mountall',
'{}')
|
Add specific message for missing media license XCC error for license was not handled. Change-Id: I<I>d2c<I>cbbba8ea<I>bf<I>c<I>cd<I>b2
|
py
|
diff --git a/python_modules/dagster/dagster/check/__init__.py b/python_modules/dagster/dagster/check/__init__.py
index <HASH>..<HASH> 100644
--- a/python_modules/dagster/dagster/check/__init__.py
+++ b/python_modules/dagster/dagster/check/__init__.py
@@ -859,3 +859,7 @@ def class_param(obj: Any, param_name: str) -> Union[ParameterCheckError, type]:
f'Param "{param_name}" is not a class. Got {repr(obj)} which is type {type(obj)}.'
)
return obj
+
+
+def assert_never(value: NoReturn) -> NoReturn:
+ failed(f"Unhandled value: {value} ({type(value).__name__})")
|
Add exhaustive check for mypy Summary: Ran into this when refactoring `PartitionSetDefinition`. This idiom appears in <URL>
|
py
|
diff --git a/timepiece/migrations/0007_contact_user.py b/timepiece/migrations/0007_contact_user.py
index <HASH>..<HASH> 100644
--- a/timepiece/migrations/0007_contact_user.py
+++ b/timepiece/migrations/0007_contact_user.py
@@ -8,6 +8,8 @@ class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
+ null_contacts = orm.ProjectRelationship.objects.filter(contact__user__isnull=True)
+ null_contacts.delete()
for project in orm.ProjectRelationship.objects.all():
project.user = project.contact.user
project.save()
|
added a check to delete project relationships without users
|
py
|
diff --git a/python/ray/tune/utils/placement_groups.py b/python/ray/tune/utils/placement_groups.py
index <HASH>..<HASH> 100644
--- a/python/ray/tune/utils/placement_groups.py
+++ b/python/ray/tune/utils/placement_groups.py
@@ -508,6 +508,8 @@ class PlacementGroupManager:
head_bundle = pg.bundle_specs[0].copy()
num_cpus = head_bundle.pop("CPU", 0)
num_gpus = head_bundle.pop("GPU", 0)
+ memory = head_bundle.pop("memory", None)
+ object_store_memory = head_bundle.pop("object_store_memory", None)
# Only custom resources remain in `head_bundle`
resources = head_bundle
@@ -517,6 +519,8 @@ class PlacementGroupManager:
placement_group_capture_child_tasks=True,
num_cpus=num_cpus,
num_gpus=num_gpus,
+ memory=memory,
+ object_store_memory=object_store_memory,
resources=resources,
)
else:
|
[tune] Fix memory resources for head bundle (#<I>) Fixes memory and object_store_memory actor options not being set properly for the Tune trainable.
|
py
|
diff --git a/albumentations/augmentations/transforms.py b/albumentations/augmentations/transforms.py
index <HASH>..<HASH> 100644
--- a/albumentations/augmentations/transforms.py
+++ b/albumentations/augmentations/transforms.py
@@ -435,6 +435,7 @@ class RandomSizedCrop(DualTransform):
Image types:
uint8, float32
"""
+
def __init__(self, min_max_height, height, width, w2h_ratio=1., interpolation=cv2.INTER_LINEAR, p=1.0):
super(RandomSizedCrop, self).__init__(p)
self.height = height
|
blank line after docstring in RandomSizedCrop
|
py
|
diff --git a/sc2/helpers/control_group.py b/sc2/helpers/control_group.py
index <HASH>..<HASH> 100644
--- a/sc2/helpers/control_group.py
+++ b/sc2/helpers/control_group.py
@@ -15,14 +15,14 @@ class ControlGroup(set):
def empty(self):
return self.amount == 0
- def add_unit(self, units):
+ def add_unit(self, unit):
self.add(unit.tag)
def add_units(self, units):
for unit in units:
self.add_unit(unit)
- def remove_unit(self, units):
+ def remove_unit(self, unit):
self.remove(unit.tag)
def remove_units(self, units):
|
Fix typo in control_group.py There was a typo in the add_unit and remove_unit methods
|
py
|
diff --git a/tests/test_ruleset.py b/tests/test_ruleset.py
index <HASH>..<HASH> 100755
--- a/tests/test_ruleset.py
+++ b/tests/test_ruleset.py
@@ -12,13 +12,14 @@ import unittest
from smartfilesorter.ruleset import RuleSet
from smartfilesorter.matchrules.filenamestartswith import FilenameStartsWith
+from smartfilesorter.matchrules.fileextensionis import FileExtensionIs
from smartfilesorter.actionrules.stopprocessing import StopProcessing
class TestRuleSet(unittest.TestCase):
def setUp(self):
# Create mock objects for the plugins
- match_plugins = {'filename-starts-with': FilenameStartsWith}
+ match_plugins = {'filename-starts-with': FilenameStartsWith, 'file-extension-is': FileExtensionIs}
action_plugins = {'stop-processing': StopProcessing}
# Represents a single section of the YAML config file
test_yaml = {'action': 'stop-processing',
|
test_ruleset Fixed bug in test where the test was calling for a plugin that was not being loaded in the mock plugins object
|
py
|
diff --git a/src/calmjs/parse/unparsers/walker.py b/src/calmjs/parse/unparsers/walker.py
index <HASH>..<HASH> 100644
--- a/src/calmjs/parse/unparsers/walker.py
+++ b/src/calmjs/parse/unparsers/walker.py
@@ -208,9 +208,9 @@ class Dispatcher(object):
def token(self, token, node, value):
return self.__token_handler(token, self, node, value)
- def __call__(self, rule):
+ def layout(self, rule):
"""
- This is to find a callable for the particular rule encountered.
+ Get handler for this layout rule.
"""
return self.__layout_handlers.get(rule, NotImplemented)
@@ -350,7 +350,7 @@ def walk(
# first pass: generate both the normalized/finalized lrcs.
for lrc in layout_rule_chunks:
rule_stack.append(lrc.rule)
- handler = dispatcher(tuple(rule_stack))
+ handler = dispatcher.layout(tuple(rule_stack))
if handler is NotImplemented:
# not implemented so we keep going; also add the chunk
# to the stack.
|
Dispatcher call method renamed to layout - Since that is all it does now.
|
py
|
diff --git a/scoped_nodes.py b/scoped_nodes.py
index <HASH>..<HASH> 100644
--- a/scoped_nodes.py
+++ b/scoped_nodes.py
@@ -284,6 +284,8 @@ class Module(LocalsDictNodeNG):
return [self.import_module(name, relative_only=True)]
except AstroidBuildingException:
raise NotFoundError(name)
+ except SyntaxError:
+ raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
|
Do not print the traceback for SyntaxErrors.
|
py
|
diff --git a/xero/basemanager.py b/xero/basemanager.py
index <HASH>..<HASH> 100644
--- a/xero/basemanager.py
+++ b/xero/basemanager.py
@@ -70,6 +70,9 @@ class BaseManager(object):
'IncludeInEmails',
'SentToContact',
'CanApplyToRevenue',
+ 'IsReconciled',
+ 'EnablePaymentsToAccount',
+ 'ShowInExpenseClaims'
)
DECIMAL_FIELDS = (
'Hours',
|
Extend BOOLEAN_FIELDS tuple Add IsReconciled, EnablePaymentsToAccount and ShowInExpenseClaims to BOOLEAN_FIELDS. This fixes issue #<I>
|
py
|
diff --git a/plotnine/mapping/evaluation.py b/plotnine/mapping/evaluation.py
index <HASH>..<HASH> 100644
--- a/plotnine/mapping/evaluation.py
+++ b/plotnine/mapping/evaluation.py
@@ -42,6 +42,14 @@ class stage:
self.after_scale = after_scale
def __repr__(self):
+ # Shorter representation when the mapping happens at a
+ # single stage
+ if self.after_stat is None and self.after_scale is None:
+ return f'{repr(self.start)}'
+ if self.start is None and self.after_scale is None:
+ return f'after_stat({repr(self.after_stat)})'
+ if self.start is None and self.after_stat is None:
+ return f'after_scale({repr(self.after_scale)})'
return (
f'stage(start={repr(self.start)}, '
f'after_stat={repr(self.after_stat)}, '
|
DOC: Use shorter repr for aes stages if possible The docs get to show `y=after_stat('count')` instead of `y=stage(start=None, after_stat='count', after_scale=None)`!
|
py
|
diff --git a/dipper/utils/GraphUtils.py b/dipper/utils/GraphUtils.py
index <HASH>..<HASH> 100644
--- a/dipper/utils/GraphUtils.py
+++ b/dipper/utils/GraphUtils.py
@@ -63,7 +63,7 @@ class GraphUtils:
def add_property_axioms(graph, properties):
ontology_graph = ConjunctiveGraph()
GH = 'https://raw.githubusercontent.com'
- OBO = 'https://purl.obolibrary.org/obo'
+ OBO = 'http://purl.obolibrary.org/obo'
ontologies = [
OBO + '/sepio.owl',
OBO + '/geno.owl',
|
OBO url https -> http (the former apparently doesn't resolve, causes multiple ingest failures
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -67,8 +67,6 @@ setup(
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
|
Remove pypi classifiers for py<I>/<I> (#<I>)
|
py
|
diff --git a/duct.py b/duct.py
index <HASH>..<HASH> 100644
--- a/duct.py
+++ b/duct.py
@@ -398,8 +398,8 @@ class Expression:
def dir(self, path):
r"""Set the working directory for the expression.
- >>> cmd("pwd").dir("/tmp").read()
- '/tmp'
+ >>> cmd("pwd").dir("/").read()
+ '/'
Note that :func:`dir` does *not* affect the meaning of relative exe
paths. For example in the expression ``cmd("./foo.sh").dir("bar")``,
|
avoid using /tmp in tests, because it's a symlink on macOS
|
py
|
diff --git a/ddmrp/models/mrp_bom.py b/ddmrp/models/mrp_bom.py
index <HASH>..<HASH> 100644
--- a/ddmrp/models/mrp_bom.py
+++ b/ddmrp/models/mrp_bom.py
@@ -28,8 +28,10 @@ class MrpBom(models.Model):
)
def _get_search_buffer_domain(self):
- product = self.product_id or \
- self.product_tmpl_id.product_variant_ids[0]
+ product = self.product_id
+ if not product:
+ if self.product_tmpl_id.product_variant_ids:
+ product = self.product_tmpl_id.product_variant_ids[0]
domain = [('product_id', '=', product.id),
('buffer_profile_id', '!=', False)]
if self.location_id:
|
[REF]Check product variant recordset Check product variant recordset, before referencing them.
|
py
|
diff --git a/taskw/warrior.py b/taskw/warrior.py
index <HASH>..<HASH> 100644
--- a/taskw/warrior.py
+++ b/taskw/warrior.py
@@ -629,8 +629,7 @@ class TaskWarriorShellout(TaskWarriorBase):
"""
query_args = taskw.utils.encode_query(filter_dict, self.get_version())
return self._get_task_objects(
- *query_args,
- 'export'
+ *(query_args + ['export'])
)
def get_task(self, **kw):
@@ -668,7 +667,7 @@ class TaskWarriorShellout(TaskWarriorBase):
else:
search = [value]
- task = self._get_task_objects(*search, 'export')
+ task = self._get_task_objects(*(search + ['export']))
if task:
if isinstance(task, list):
|
TaskWarriorShellout: Workaround Python2's lack of support of PEP <I> The additional unpacking generalization is only available in Python<I>+, hence we need to construct a new list before unpacking instead. See: <URL>
|
py
|
diff --git a/chess/svg.py b/chess/svg.py
index <HASH>..<HASH> 100644
--- a/chess/svg.py
+++ b/chess/svg.py
@@ -123,7 +123,7 @@ def board(board=None, squares=None, flipped=False, coordinates=True, lastmove=No
"""
Renders a board with pieces and/or selected squares as an SVG.
- :param board: A :class:`chess.Board` for a chessboard with pieces or
+ :param board: A :class:`chess.BaseBoard` for a chessboard with pieces or
``None`` (the default) for a chessboard without pieces.
:param squares: A :class:`chess.SquareSet` with selected squares.
:param flipped: Pass ``True`` to flip the board.
|
chess.svg.board requires just a BaseBoard (#<I>)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ def generate_readme_rst():
subprocess.call(pandoc_cmd)
except (IOError, OSError) as e:
print('Could not run "pandoc". Error: %s' % e, file=sys.stderr)
- sys.exit(1)
+ print('Generating only a stub instead of the real documentation.')
def read_file(filename, alt=None):
|
setup.py: Be more lenient if pandoc can't be found. This was brought to my attention by @victorwestmann. Thanks.
|
py
|
diff --git a/udiskie/cli.py b/udiskie/cli.py
index <HASH>..<HASH> 100644
--- a/udiskie/cli.py
+++ b/udiskie/cli.py
@@ -49,10 +49,9 @@ def get_backend(clsname, version=None):
try:
return udisks1()
except DBusException:
- msg = sys.exc_info()[1].get_dbus_message()
log = logging.getLogger(__name__)
- log.warning('Failed to connect UDisks1 dbus service: %s.\n'
- 'Falling back to UDisks2 [experimental].' % (msg,))
+ log.warning('Failed to connect UDisks1 dbus service.\n'
+ 'Falling back to UDisks2 [experimental].')
return udisks2()
elif version == 1:
return udisks1()
|
Fix incorrect usage of GLib.GError GLib.GError has no method `.get_dbus_message()` like dbus.DBusException had. Instead, it has an attribute `.message`. But this is not very useful for the user anyway in the case, so we just leave it out.
|
py
|
diff --git a/project/apps.py b/project/apps.py
index <HASH>..<HASH> 100644
--- a/project/apps.py
+++ b/project/apps.py
@@ -16,28 +16,3 @@ class Home(project.handler.RequestHandler):
def get(self):
self.render('templates/apps/home.html', title='Hello, World!');
-
-class Foo(project.handler.RequestHandler):
-
- def get(self):
- self.render('templates/apps/home.html', title='Hello, World!');
-
-class Bar(project.handler.RequestHandler):
-
- def get(self):
- self.render('templates/apps/home.html', title='Hello, World!');
-
-class Baz(project.handler.RequestHandler):
-
- def get(self):
- self.render('templates/apps/home.html', title='Hello, World!');
-
-class Spam(project.handler.RequestHandler):
-
- def get(self):
- self.render('templates/apps/home.html', title='Hello, World!');
-
-class Eggs(project.handler.RequestHandler):
-
- def get(self):
- self.render('templates/apps/home.html', title='Hello, World!');
|
Cleaned up how RequestHandlers are parsed from the config, and altered YAML config file format for RequestHandlers to remove the need to manually number handlers.
|
py
|
diff --git a/rqalpha/core/executor.py b/rqalpha/core/executor.py
index <HASH>..<HASH> 100644
--- a/rqalpha/core/executor.py
+++ b/rqalpha/core/executor.py
@@ -56,11 +56,12 @@ class Executor(object):
if self._last_before_trading:
on_settlement()
- self._last_before_trading = e.trading_dt.date()
- update_time(e)
- event_bus.publish_event(PRE_BEFORE_TRADING)
- event_bus.publish_event(Event(EVENT.BEFORE_TRADING, calendar_dt=e.calendar_dt, trading_dt=e.trading_dt))
- event_bus.publish_event(POST_BEFORE_TRADING)
+ if not self._env.config.extra.is_hold:
+ self._last_before_trading = e.trading_dt.date()
+ update_time(e)
+ event_bus.publish_event(PRE_BEFORE_TRADING)
+ event_bus.publish_event(Event(EVENT.BEFORE_TRADING, calendar_dt=e.calendar_dt, trading_dt=e.trading_dt))
+ event_bus.publish_event(POST_BEFORE_TRADING)
PRE_BAR.bar_dict = bar_dict
POST_BAR.bar_dict = bar_dict
|
publish BEFORE_TRADING only when strategy is not holding
|
py
|
diff --git a/hdl_toolkit/synthetisator/rtlLevel/context.py b/hdl_toolkit/synthetisator/rtlLevel/context.py
index <HASH>..<HASH> 100644
--- a/hdl_toolkit/synthetisator/rtlLevel/context.py
+++ b/hdl_toolkit/synthetisator/rtlLevel/context.py
@@ -144,7 +144,7 @@ class Context():
# instanciate subUnits in architecture
for u in self.subUnits:
- arch.componentInstances.append(u.asVHDLComponentInstance(u._name))
+ arch.componentInstances.append(u.asVHDLComponentInstance(u._name + "_inst"))
# add components in architecture
for su in distinctBy(self.subUnits, lambda x: x.name):
|
fix: instance name now with _suffix to prevent name colision with entities/components
|
py
|
diff --git a/src/wormhole/servers/relay.py b/src/wormhole/servers/relay.py
index <HASH>..<HASH> 100644
--- a/src/wormhole/servers/relay.py
+++ b/src/wormhole/servers/relay.py
@@ -406,7 +406,8 @@ class RelayServer(service.MultiService):
t = internet.TimerService(EXPIRATION_CHECK_PERIOD,
self.relay.prune_old_channels)
t.setServiceParent(self)
- self.transit = Transit()
- self.transit.setServiceParent(self) # for the timer
- self.transport_service = strports.service(transitport, self.transit)
- self.transport_service.setServiceParent(self)
+ if transitport:
+ self.transit = Transit()
+ self.transit.setServiceParent(self) # for the timer
+ self.transport_service = strports.service(transitport, self.transit)
+ self.transport_service.setServiceParent(self)
|
relay: make it possible to omit the Transit server
|
py
|
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
index <HASH>..<HASH> 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
@@ -3880,8 +3880,9 @@ def save(name,
saved_path = salt.utils.files.mkstemp()
else:
saved_path = path
-
- cmd = ['docker', 'save', '-o', saved_path, inspect_image(name)['Id']]
+ # use the image name if its valid if not use the image id
+ image_to_save = name if name in inspect_image(name)['RepoTags'] else inspect_image(name)['Id']
+ cmd = ['docker', 'save', '-o', saved_path, image_to_save]
time_started = time.time()
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
|
Have docker.save use the image name when valid if not use image id, issue when loading and image is savid with id issue #<I>
|
py
|
diff --git a/tests/providers/test_cloudxns.py b/tests/providers/test_cloudxns.py
index <HASH>..<HASH> 100644
--- a/tests/providers/test_cloudxns.py
+++ b/tests/providers/test_cloudxns.py
@@ -7,10 +7,15 @@ import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
-class DnsParkProviderTests(TestCase, IntegrationTests):
+class CloudXNSProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'cloudxns'
domain = 'capsulecd.com'
def _filter_post_data_parameters(self):
return ['login_token']
+
+ # TODO: the following skipped suite and fixtures should be enabled
+ @pytest.mark.skip(reason="new test, missing recording")
+ def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
+ return
\ No newline at end of file
|
fixing misnamed test class, skipping tests without recordings.
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.