diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/hdbscan/hdbscan_.py b/hdbscan/hdbscan_.py
index <HASH>..<HASH> 100644
--- a/hdbscan/hdbscan_.py
+++ b/hdbscan/hdbscan_.py
@@ -641,7 +641,8 @@ class HDBSCAN (BaseEstimator, ClusterMixin):
core_dist_n_jobs : int, optional (default=4)
Number of parallel jobs to run in core distance computations (if
- supported by the specific algorithm).
+ supported by the specific algorithm). For ``core_dist_n_jobs``
+ below -1, (n_cpus + 1 + core_dist_n_jobs) are used.
allow_single_cluster : bool, optional (default=False)
By default HDBSCAN* will not produce a single cluster, setting this
|
fix for issue #<I> supplying n_jobs as a parameter for robust single linkage.
|
py
|
diff --git a/version.py b/version.py
index <HASH>..<HASH> 100755
--- a/version.py
+++ b/version.py
@@ -1,2 +1,2 @@
#/usr/bin/env python
-VERSION = '1.3.0-s928'
+VERSION = '1.3.0-s934'
|
Ready for release s<I>
|
py
|
diff --git a/SpiffWorkflow/util/deep_merge.py b/SpiffWorkflow/util/deep_merge.py
index <HASH>..<HASH> 100644
--- a/SpiffWorkflow/util/deep_merge.py
+++ b/SpiffWorkflow/util/deep_merge.py
@@ -14,7 +14,7 @@ class DeepMerge(object):
if isinstance(a[key], list) and isinstance(b[key], list):
a[key] = list(set().union(a[key], b[key]))
else:
- a[key] == b[key]
+ a[key] = b[key]
else:
a[key] = b[key]
return a
|
fixed problem where we were doing equivilance test rather than assignment
|
py
|
diff --git a/pylint_plugin_utils/__init__.py b/pylint_plugin_utils/__init__.py
index <HASH>..<HASH> 100644
--- a/pylint_plugin_utils/__init__.py
+++ b/pylint_plugin_utils/__init__.py
@@ -1,8 +1,6 @@
import sys
-try:
- from pylint.exceptions import UnknownMessageError as UnknownMessage
-except ImportError:
- from pylint.exceptions import UnknownMessage
+
+from pylint.exceptions import UnknownMessageError
def get_class(module_name, kls):
@@ -137,7 +135,7 @@ def suppress_message(linter, checker_method, message_id_or_symbol, test_func):
for pylint_message in pylint_messages
for symbol in (pylint_message.msgid, pylint_message.symbol)
if symbol is not None]
- except UnknownMessage:
+ except UnknownMessageError:
# This can happen due to mismatches of pylint versions and plugin expectations of available messages
symbols = [message_id_or_symbol]
|
Cleanup import and drop support for pylint < <I>
|
py
|
diff --git a/tests/ipopo_test.py b/tests/ipopo_test.py
index <HASH>..<HASH> 100644
--- a/tests/ipopo_test.py
+++ b/tests/ipopo_test.py
@@ -190,6 +190,8 @@ class DecoratorsTest(unittest.TestCase):
# Assert it has been validated
ref = context.get_service_reference(svc_spec)
+ self.assertIsNotNone(ref, "No reference found (component not validated)")
+
compo = context.get_service(ref)
self.assertEquals(compo.states, [IPopoEvent.INSTANTIATED,
|
Added a small assertion in ipopo tests
|
py
|
diff --git a/python/perspective/perspective/manager/manager.py b/python/perspective/perspective/manager/manager.py
index <HASH>..<HASH> 100644
--- a/python/perspective/perspective/manager/manager.py
+++ b/python/perspective/perspective/manager/manager.py
@@ -231,8 +231,11 @@ class PerspectiveManager(object):
elif msg["method"] != "delete":
# otherwise parse args as list
result = getattr(table_or_view, msg["method"])(*args)
- if type(result) == bytes:
- # return result to the client without JSON serialization
+ if isinstance(result, bytes) and msg["method"] != "to_csv":
+ # return a binary to the client without JSON serialization,
+ # i.e. when we return an Arrow. If a method is added that
+ # returns a string, this condition needs to be updated as
+ # an Arrow binary is both `str` and `bytes` in Python 2.
self._process_bytes(result, msg, post_callback)
else:
# return the result to the client
|
fix csv serialization issue in python2 jupyter
|
py
|
diff --git a/fireplace/cards/tgt/neutral_common.py b/fireplace/cards/tgt/neutral_common.py
index <HASH>..<HASH> 100644
--- a/fireplace/cards/tgt/neutral_common.py
+++ b/fireplace/cards/tgt/neutral_common.py
@@ -15,7 +15,8 @@ AT_082e = buff(atk=1)
class AT_083:
inspire = Buff(SELF, "AT_083e")
-AT_083e = buff(windfury=True)
+class AT_083e:
+ windfury = SET(1)
# Lance Carrier
|
Change Dragonhawk Rider's buff to set Windfury to 1 This is needed in an int-based Windfury model
|
py
|
diff --git a/easyaudit/apps.py b/easyaudit/apps.py
index <HASH>..<HASH> 100644
--- a/easyaudit/apps.py
+++ b/easyaudit/apps.py
@@ -3,6 +3,7 @@ from django.apps import AppConfig
class EasyAuditConfig(AppConfig):
name = 'easyaudit'
verbose_name = 'Easy Audit Application'
+ default_auto_field = 'django.db.models.AutoField'
def ready(self):
from easyaudit.signals import auth_signals, model_signals, request_signals
\ No newline at end of file
|
fix: migration missing for field ID (#<I>) * fix: migration missing for field ID * add default_auto_field to EasyAuditConfig
|
py
|
diff --git a/setuptools/archive_util.py b/setuptools/archive_util.py
index <HASH>..<HASH> 100755
--- a/setuptools/archive_util.py
+++ b/setuptools/archive_util.py
@@ -8,6 +8,8 @@ import posixpath
import contextlib
from distutils.errors import DistutilsError
+from setuptools.extern import six
+
from pkg_resources import ensure_directory, ContextualZipFile
@@ -164,6 +166,8 @@ def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
+ if six.PY2:
+ final_dst = final_dst.encode(tarfile.ENCODING)
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
|
_extract_member needs final_dst to be a native string. Ref #<I>.
|
py
|
diff --git a/data/migrations/deb/1_3_428_to_1_3_429.py b/data/migrations/deb/1_3_428_to_1_3_429.py
index <HASH>..<HASH> 100644
--- a/data/migrations/deb/1_3_428_to_1_3_429.py
+++ b/data/migrations/deb/1_3_428_to_1_3_429.py
@@ -163,6 +163,8 @@ def migrate_txn_log(db_dir, db_name):
val = ledger_txn_serializer.deserialize(val)
if val.get(TXN_TYPE) == REVOC_REG_DEF:
val = add_tag_into_cred_def_id(val)
+ if val == False:
+ return False
new_val = transform_to_new_format(txn=val, seq_no=int(key))
digest = put_into_seq_no_db(new_val)
# add digest into txn
|
[TAG migration] return if something wrong with cred_def_id replacing
|
py
|
diff --git a/glue/statedb.py b/glue/statedb.py
index <HASH>..<HASH> 100644
--- a/glue/statedb.py
+++ b/glue/statedb.py
@@ -285,7 +285,7 @@ class StateSegmentDatabase:
sql += "start_time,start_time_ns,end_time,end_time_ns,lfn_id)"
sql += "VALUES (?,?,?,?,?,?,?,?)"
- for attempt in range(4):
+ for attempt in range(6):
try:
self.cursor.execute(sql, (self.process_id,segment_id,sv_id,
start_time,start_time_ns,end_time,end_time_ns,self.lfn_id))
@@ -295,7 +295,7 @@ class StateSegmentDatabase:
# catch a deadlock and re-try up to three times
except mx.ODBC.DB2.InternalError, e:
if e[1] == -911 and int(e[0]) == 40001:
- if attempt < 3:
+ if attempt < 5:
time.sleep(random.randrange(0,5,1))
else:
msg = "error inserting segment information : %s" % e
|
try 5 times if we encounter a deadlock
|
py
|
diff --git a/galpy/orbit_src/linearOrbit.py b/galpy/orbit_src/linearOrbit.py
index <HASH>..<HASH> 100644
--- a/galpy/orbit_src/linearOrbit.py
+++ b/galpy/orbit_src/linearOrbit.py
@@ -74,16 +74,15 @@ class linearOrbit(OrbitTop):
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
- if not kwargs.has_key('pot') or kwargs['pot'] is None:
+ if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
- if kwargs.has_key('pot') and kwargs['pot'] is None:
+ if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
- pot= kwargs['pot']
- kwargs.pop('pot')
+ pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
|
python3 dictionary compatibility for linearOrbit plus improvements
|
py
|
diff --git a/examples/tests.py b/examples/tests.py
index <HASH>..<HASH> 100644
--- a/examples/tests.py
+++ b/examples/tests.py
@@ -247,6 +247,12 @@ class GroupTestCase(TestCase):
qs = LdapGroup.objects.all()
self.assertEquals(len(qs), 0)
+ def test_bulk_delete_none(self):
+ LdapGroup.objects.none().delete()
+
+ qs = LdapGroup.objects.all()
+ self.assertEquals(len(qs), 3)
+
def test_slice(self):
qs = LdapGroup.objects.order_by('gid')
objs = list(qs)
|
test deletion of .none() does nothing
|
py
|
diff --git a/skink/remote.py b/skink/remote.py
index <HASH>..<HASH> 100644
--- a/skink/remote.py
+++ b/skink/remote.py
@@ -65,6 +65,10 @@ class JSObject(object):
def _eval(self):
return self._page.eval(self._command)
+ def __add__(self, other):
+ assert type(other) == JSObject
+ return JSObject(self._command + ' + ' + other._command, self._page)
+
class RemotePage(object):
|
Added addition between JSObkects
|
py
|
diff --git a/plenum/server/node.py b/plenum/server/node.py
index <HASH>..<HASH> 100644
--- a/plenum/server/node.py
+++ b/plenum/server/node.py
@@ -383,7 +383,12 @@ class Node(HasActionQueue, Motor, Propagator, MessageProcessor, HasFileStorage,
# the batch in which those transactions were included. The txn range is
# exclusive of last seq no so to store txns from 1 to 100 add a range
# of `1:101`
- self.txn_seq_range_to_3phase_key = {} # type: Dict[int, List[IntervalTree, int]]
+ # Second element of list stands for freshness 3pc.
+ # It is set to None by default and when txn batch ordered.
+ # When it is None LedgerStatus takes last from IntervalTree
+ # It is set to 3pc number when freshness or empty batch ordered
+ # When it is set to some number, LedgerStatus takes it
+ self.txn_seq_range_to_3phase_key = {} # type: Dict[int, List[IntervalTree, Tuple[int, int]]]
# Number of rounds of catchup done during a view change.
self.catchup_rounds_without_txns = 0
|
INDY-<I>: Additional comment
|
py
|
diff --git a/spinoff/util/testing/actor.py b/spinoff/util/testing/actor.py
index <HASH>..<HASH> 100644
--- a/spinoff/util/testing/actor.py
+++ b/spinoff/util/testing/actor.py
@@ -164,7 +164,7 @@ def test_errorcollector_can_be_used_with_assert_raises():
@contextmanager
-def expect_failure(exc, message=None, timeout=0.1):
+def expect_failure(exc, message=None, timeout=None):
with assert_raises(exc, message=message, timeout=timeout) as basket:
with ErrorCollector():
yield basket
@@ -182,7 +182,7 @@ class DebugActor(object):
@contextmanager
-def assert_one_event(ev, timeout=0.1):
+def assert_one_event(ev, timeout=None):
result = Events.consume_one(type(ev) if not isinstance(ev, type) else ev)
try:
yield
|
Removed default timeouts on all async testing helpers
|
py
|
diff --git a/rui/rui.py b/rui/rui.py
index <HASH>..<HASH> 100755
--- a/rui/rui.py
+++ b/rui/rui.py
@@ -125,7 +125,10 @@ class World(object):
Gets a specific group
group is the string of a Group
'''
- return self._groups[group]
+ if group in self._groups:
+ return self._groups[group]
+ else:
+ return []
def get_delta(self):
'''
|
Groups will now return an empty list if there is no group with that name
|
py
|
diff --git a/util/plot/save.py b/util/plot/save.py
index <HASH>..<HASH> 100644
--- a/util/plot/save.py
+++ b/util/plot/save.py
@@ -97,7 +97,7 @@ def data(file, data, land_value=np.nan, no_data_value=np.inf, land_brightness=0,
for z in range(z_len):
# append depth to filename
if file_add_depth_info:
- depth_str = str(z + 1).zfill(len(z_len_str)) + '_of_' + z_len_str
+ depth_str = z_len_str + '_' + str(z + 1).zfill(len(z_len_str))
if file_add_time_info:
current_file_with_z = file.format(depth=depth_str, time='{time}')
else:
@@ -108,7 +108,7 @@ def data(file, data, land_value=np.nan, no_data_value=np.inf, land_brightness=0,
for t in range(t_len):
# append time to filename
if file_add_time_info:
- time_str = str(t + 1).zfill(len(t_len_str)) + '_of_' + t_len_str
+ time_str = t_len_str + '_' + str(t + 1).zfill(len(t_len_str))
current_file = current_file_with_z.format(time=time_str)
else:
current_file = current_file_with_z
|
API: util.plot.save: data plot now uses time and depth dimension first in file name
|
py
|
diff --git a/addok/shell.py b/addok/shell.py
index <HASH>..<HASH> 100644
--- a/addok/shell.py
+++ b/addok/shell.py
@@ -415,14 +415,19 @@ class Cmd(cmd.Cmd):
def do_SCRIPT(self, args):
"""Run a Lua script. Takes the raw Redis arguments.
- SCRIPT manual_scan number_of_keys key1 key2… arg1 arg2
+ SCRIPT script_name number_of_keys key1 key2… arg1 arg2
"""
try:
name, keys_count, *args = args.split()
except ValueError:
print(red('Not enough arguments'))
return
- keys_count = int(keys_count)
+ try:
+ keys_count = int(keys_count)
+ except ValueError:
+ print(red('You must pass the number of keys as first argument'))
+ self.do_HELP('SCRIPT')
+ return
keys = args[:keys_count]
args = args[keys_count:]
try:
@@ -433,6 +438,9 @@ class Cmd(cmd.Cmd):
except DB.Error as e:
print(red(e))
return
+ if not isinstance(output, list):
+ # Script may return just an integer.
+ output = [output]
for line in output:
print(white(line))
|
Be more defensive when running script from shell
|
py
|
diff --git a/fermipy/gtanalysis.py b/fermipy/gtanalysis.py
index <HASH>..<HASH> 100644
--- a/fermipy/gtanalysis.py
+++ b/fermipy/gtanalysis.py
@@ -4930,12 +4930,12 @@ class GTBinnedAnalysis(fermipy.config.Configurable):
if scale<1e-20:
self.logger.warning("The expscale parameter was zero, setting it to 1e-8")
scale = 1e-8
- if 'SRCMAP_SCALE' in hdu.header:
- old_scale = hdu.header['SRCMAP_SCALE']
+ if 'EXPSCALE' in hdu.header:
+ old_scale = hdu.header['EXPSCALE']
else:
old_scale = 1.0
hdu.data *= scale/old_scale
- hdu.header['SRCMAP_SCALE'] = scale
+ hdu.header['EXPSCALE'] = scale
srcmap.writeto(self.files['srcmap'], clobber=True)
|
Change name of header keyword for exposure correction.
|
py
|
diff --git a/spyder/plugins/editor/widgets/base.py b/spyder/plugins/editor/widgets/base.py
index <HASH>..<HASH> 100644
--- a/spyder/plugins/editor/widgets/base.py
+++ b/spyder/plugins/editor/widgets/base.py
@@ -65,7 +65,6 @@ class TextEditBaseWidget(QPlainTextEdit, BaseEditMixin):
# Code snippets
self.code_snippets = True
- self.textChanged.connect(self.changed)
self.cursorPositionChanged.connect(self.cursor_position_changed)
self.indent_chars = " "*4
@@ -214,10 +213,6 @@ class TextEditBaseWidget(QPlainTextEdit, BaseEditMixin):
self.extra_selections_dict[key] = []
self.update()
- def changed(self):
- """Emit changed signal"""
- self.modificationChanged.emit(self.document().isModified())
-
def get_visible_block_numbers(self):
"""Get the first and last visible block numbers."""
first = self.firstVisibleBlock().blockNumber()
|
Do not check for change on every keystroke
|
py
|
diff --git a/examples/update/update-campaign.py b/examples/update/update-campaign.py
index <HASH>..<HASH> 100755
--- a/examples/update/update-campaign.py
+++ b/examples/update/update-campaign.py
@@ -8,13 +8,6 @@ import string
import sys
-def _read_file(f):
- fh = open(f)
- d = fh.read()
- fh.close()
- return d
-
-
def _rand_id(N=6):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
|
Remove unused method in update campaign example
|
py
|
diff --git a/src/python/dxpy/scripts/dx_bed_to_spans.py b/src/python/dxpy/scripts/dx_bed_to_spans.py
index <HASH>..<HASH> 100755
--- a/src/python/dxpy/scripts/dx_bed_to_spans.py
+++ b/src/python/dxpy/scripts/dx_bed_to_spans.py
@@ -595,7 +595,7 @@ def import_BED(**args):
return job_outputs
def main(**args):
- return import_BED(**args)
+ import_BED(**args)
if __name__ == '__main__':
import_BED()
|
Fix exit code of dx-bed-to-spans.
|
py
|
diff --git a/jax/interpreters/parallel.py b/jax/interpreters/parallel.py
index <HASH>..<HASH> 100644
--- a/jax/interpreters/parallel.py
+++ b/jax/interpreters/parallel.py
@@ -291,18 +291,23 @@ def reducer_papply(prim, cprim, name, vals, papply_axes, input_shape, axes):
new_papply_axis = papply_axis - onp.sum(onp.less(other_axes, papply_axis))
return result, new_papply_axis
+
def broadcasting_papply(prim, name, vals, axes, **params):
x, y = vals
xdim, ydim = axes
if xdim is None:
+ assert x.shape[ydim] == 1
+ x = x.reshape(onp.delete(x.shape, ydim))
return prim.bind(x, y, **params), ydim
elif ydim is None:
+ assert y.shape[xdim] == 1
+ y = y.reshape(onp.delete(y.shape, xdim))
return prim.bind(x, y, **params), xdim
elif xdim == ydim:
return prim.bind(x, y, **params), xdim
else:
- x = psplit(x, axis_name, xdim)
+ x = psplit(x, axis_name, ydim)
return prim.bind(x, y, **params), ydim
|
fix shape mismatch in broadcasting parallelization rule Namely, remove the dimension of non-split operands matching the split operand's hidden dimension.
|
py
|
diff --git a/zipline/gens/utils.py b/zipline/gens/utils.py
index <HASH>..<HASH> 100644
--- a/zipline/gens/utils.py
+++ b/zipline/gens/utils.py
@@ -26,16 +26,6 @@ from zipline.protocol import (
)
-def mock_raw_event(sid, dt):
- event = {
- 'sid': sid,
- 'dt': dt,
- 'price': 1.0,
- 'volume': 1
- }
- return event
-
-
def alternate(g1, g2):
"""Specialized version of roundrobin for just 2 generators."""
for e1, e2 in izip_longest(g1, g2):
|
MAINT: Removes unused mock data function from gens.utils This function is unused, and on the general path of moving testing functions out of gens.utils # Please enter the commit message for your changes. Lines starting
|
py
|
diff --git a/anchore/controller.py b/anchore/controller.py
index <HASH>..<HASH> 100644
--- a/anchore/controller.py
+++ b/anchore/controller.py
@@ -53,7 +53,7 @@ class Controller(object):
action = polinput[2]
modparams = ""
if (len(polinput) > 3):
- modparams = polinput[3]
+ modparams = ':'.join(polinput[3:])
if module not in policies:
policies[module] = {}
|
ensure that all params are passed from policy definition
|
py
|
diff --git a/mt940/processors.py b/mt940/processors.py
index <HASH>..<HASH> 100644
--- a/mt940/processors.py
+++ b/mt940/processors.py
@@ -145,7 +145,6 @@ def _parse_mt940_details(detail_str):
tmp[segment_type] = segment if not segment_type else segment[2:]
for key, value in tmp.items():
- print('%s = %s' % (key, value))
if key in DETAIL_KEYS:
result[DETAIL_KEYS[key]] = value
elif key == '33':
|
Removed print command from processor.py.
|
py
|
diff --git a/tests/functional/test_examples.py b/tests/functional/test_examples.py
index <HASH>..<HASH> 100644
--- a/tests/functional/test_examples.py
+++ b/tests/functional/test_examples.py
@@ -20,7 +20,8 @@ def test_examples():
'../../examples/*/*.py')
# Filter out __init__.py
- examples = [f for f in glob.glob(examples_pat) if f != '__init__.py']
+ examples = [f for f in
+ sorted(glob.glob(examples_pat)) if f != '__init__.py']
for e in examples:
print("Running example:", e)
example_dir = os.path.dirname(e)
|
Process examples in deterministic order Sort input file list so that .pyc files in python-textX build in a reproducible way in spite of indeterministic filesystem readdir order and <URL>
|
py
|
diff --git a/addons/after/gemini_viewer_addon/__init__.py b/addons/after/gemini_viewer_addon/__init__.py
index <HASH>..<HASH> 100644
--- a/addons/after/gemini_viewer_addon/__init__.py
+++ b/addons/after/gemini_viewer_addon/__init__.py
@@ -29,6 +29,7 @@ def exec(report: Report, config_dict: dict, output_summary: OutputSummary):
"title": report.title,
"one_host": report.summary.one.host,
"other_host": report.summary.other.host,
+ "paths": set(report.summary.paths),
"same_count": Decimal(report.summary.status.same),
"different_count": Decimal(report.summary.status.different),
"failure_count": Decimal(report.summary.status.failure),
|
:new: Send paths property as a string set (gemini_viewer_addon)
|
py
|
diff --git a/biocommons/seqrepo/seqaliasdb/seqaliasdb.py b/biocommons/seqrepo/seqaliasdb/seqaliasdb.py
index <HASH>..<HASH> 100644
--- a/biocommons/seqrepo/seqaliasdb/seqaliasdb.py
+++ b/biocommons/seqrepo/seqaliasdb/seqaliasdb.py
@@ -55,7 +55,9 @@ class SeqAliasDB(object):
# Public methods
def commit(self):
- self._db.commit()
+ if self._writeable:
+ self._db.commit()
+ pass
def fetch_aliases(self, seq_id, current_only=True):
"""return list of alias annotation records (dicts) for a given seq_id"""
|
make SeqAliasDB.commit() respect to self._writeable
|
py
|
diff --git a/datetime_tz/__init__.py b/datetime_tz/__init__.py
index <HASH>..<HASH> 100644
--- a/datetime_tz/__init__.py
+++ b/datetime_tz/__init__.py
@@ -211,6 +211,9 @@ def _detect_timezone_windows():
kernel32 = ctypes.windll.kernel32
getter = kernel32.GetTimeZoneInformation
getter = getattr(kernel32, 'GetDynamicTimeZoneInformation', getter)
+ # code is for daylight savings: 0 means disabled/not defined, 1 means enabled but inactive, 2 means enabled and active
+ code = getter(ctypes.byref(tzi))
+
win32tz_key_name = tzi.key_name
if not win32tz_key_name:
# we're on Windows before Vista/Server 2008 - need to look up the standard_name in the registry
|
Add back an important call which went missing while moving things around
|
py
|
diff --git a/tests/io/file-with.py b/tests/io/file-with.py
index <HASH>..<HASH> 100644
--- a/tests/io/file-with.py
+++ b/tests/io/file-with.py
@@ -10,3 +10,12 @@ except:
# Note: CPython and us throw different exception trying to read from
# close file.
print("can't read file after with")
+
+
+# Regression test: test that exception in with initialization properly
+# thrown and doesn't crash.
+try:
+ with open('__non_existent', 'r'):
+ pass
+except OSError:
+ print("OSError")
|
tests/file-with: Add testcase which failed for @dpgeorge. Works on x<I>.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
from distutils.core import setup
setup(name='injections',
- version='0.2.1',
+ version='0.2.2',
description='Simple dependency injection library',
author='Paul Colomiets',
author_email='paul@colomiets.name',
|
Version bumped to <I>
|
py
|
diff --git a/horizon/exceptions.py b/horizon/exceptions.py
index <HASH>..<HASH> 100644
--- a/horizon/exceptions.py
+++ b/horizon/exceptions.py
@@ -272,7 +272,8 @@ def handle(request, message=None, redirect=None, ignore=False,
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
message = encoding.force_unicode(message) % {"exc": log_entry}
- message = encoding.force_unicode(message)
+ if message:
+ message = encoding.force_unicode(message)
if issubclass(exc_type, UNAUTHORIZED):
if ignore:
|
Fixed error message for recoverable exceptions Issue was introduced by <URL> message becomes u'None' and all checks on "if message" pass. This result in popup screen with 'None' text instead of problem description. Change-Id: I7bc2dd<I>f<I>ba2bb<I>a7fadd2c1b<I>b<I>eb<I> Closes-Bug: #<I>
|
py
|
diff --git a/luigi/worker.py b/luigi/worker.py
index <HASH>..<HASH> 100644
--- a/luigi/worker.py
+++ b/luigi/worker.py
@@ -60,6 +60,9 @@ class TaskProcess(multiprocessing.Process):
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
+ status = FAILED
+ error_message = ''
+ missing = []
try:
# Verify that all the tasks are fulfilled!
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
|
Make sure variables exists when put on queue
|
py
|
diff --git a/hwt/simulator/agentBase.py b/hwt/simulator/agentBase.py
index <HASH>..<HASH> 100644
--- a/hwt/simulator/agentBase.py
+++ b/hwt/simulator/agentBase.py
@@ -98,7 +98,7 @@ class SyncAgentBase(AgentWitReset):
SELECTED_EDGE_CALLBACK = OnRisingCallbackLoop
def __init__(self, intf, allowNoReset=False):
- super().__init__(intf, allowNoReset=allowNoReset)
+ super(SyncAgentBase, self).__init__(intf, allowNoReset=allowNoReset)
# resolve clk and rstn
self.clk = self.intf._getAssociatedClk()
|
SyncAgentBase: use super of thic class in constructor
|
py
|
diff --git a/autopep8.py b/autopep8.py
index <HASH>..<HASH> 100755
--- a/autopep8.py
+++ b/autopep8.py
@@ -494,6 +494,9 @@ class FixPEP8(object):
if not target.lstrip().startswith('import'):
return []
+ # pep8 (1.3.1) reports false positive if there is an import statement
+ # followed by a semicolon and some unrelated statement with commas in
+ # it.
if ';' in target:
return []
|
Add comment regarding semicolon in E<I>
|
py
|
diff --git a/tests/basics/special_methods2.py b/tests/basics/special_methods2.py
index <HASH>..<HASH> 100644
--- a/tests/basics/special_methods2.py
+++ b/tests/basics/special_methods2.py
@@ -115,6 +115,13 @@ cud1 / cud2
cud2 // cud1
cud1 += cud2
cud1 -= cud2
+cud1 % 2
+cud1 ** 2
+cud1 | cud2
+cud1 & cud2
+cud1 ^ cud2
+cud1 << 1
+cud1 >> 1
# test that dir() delegates to __dir__ special method
print(dir(cud1))
@@ -127,27 +134,6 @@ print('a' in dir(Cud))
# ne is not supported, !(eq) is called instead
#cud1 != cud2
#
-# binary and is not supported
-# cud1 & cud2
-#
-# binary lshift is not supported
-# cud1<<1
-#
-# modulus is not supported
-# cud1 % 2
-#
-# binary or is not supported
-# cud1 | cud2
-#
-# pow is not supported
-# cud1**2
-#
-# rshift is not suported
-# cud1>>1
-#
-# xor is not supported
-# cud1^cud2
-#
# in the followin test, cpython still calls __eq__
# cud3=cud1
# cud3==cud1
|
tests/basics/special_methods2: Enable some additional tests that work. These special methods are all available if MICROPY_PY_ALL_SPECIAL_METHODS is enabled.
|
py
|
diff --git a/bloop/search.py b/bloop/search.py
index <HASH>..<HASH> 100644
--- a/bloop/search.py
+++ b/bloop/search.py
@@ -420,7 +420,7 @@ class SearchIterator:
self.scanned += response["ScannedCount"]
# Each item is a dict of attributes
- self.buffer.extend(response["Items"])
+ self.buffer.extend(response.get("Items", []))
if self.buffer:
return self.buffer.popleft()
|
SearchIterator.__next__ handles missing 'Items' key from response #<I>
|
py
|
diff --git a/test/test_action.py b/test/test_action.py
index <HASH>..<HASH> 100755
--- a/test/test_action.py
+++ b/test/test_action.py
@@ -188,6 +188,8 @@ class TestAction(ShinkenTest):
if os.name == 'nt':
a.command = r"""python -c 'print "A"*1000000'"""
+ # FROM NOW IT4S FAIL ON WINDOWS :(
+ return
else:
a.command = r"""python -u -c 'print "A"*100000'"""
print "EXECUTE"
|
Fix : test big actions is failed on windows.
|
py
|
diff --git a/graphyte.py b/graphyte.py
index <HASH>..<HASH> 100644
--- a/graphyte.py
+++ b/graphyte.py
@@ -20,7 +20,7 @@ import time
__all__ = ['Sender', 'init', 'send']
-__version__ = '1.4'
+__version__ = '1.5'
default_sender = None
logger = logging.getLogger(__name__)
|
Bump up version number (with tag support)
|
py
|
diff --git a/src/hack/azext_hack/_website_utils.py b/src/hack/azext_hack/_website_utils.py
index <HASH>..<HASH> 100644
--- a/src/hack/azext_hack/_website_utils.py
+++ b/src/hack/azext_hack/_website_utils.py
@@ -113,10 +113,10 @@ class Website:
return self.__deployment_info
def __get_or_create_app_service_plan(self) -> str:
- plans = list_app_service_plans(self.__cmd)
+ plans = list_app_service_plans(self.__cmd, self.name)
for plan in plans:
if plan.sku.family == 'F':
- logger.warning('Using existing free plan...')
+ logger.warning('Using existing free plan: {}'.format(plan.name))
return plan
# Reached here, no free plan found
logger.warning(
@@ -131,7 +131,7 @@ class Website:
# create in the app plans resource group
# TODO: get logger
runtime_setting = _RUNTIME_SETTINGS[self.runtime]
- logger.warning('Creating website...')
+ logger.warning('Creating website: {}'.format(self.name))
webapp = create_webapp(self.__cmd, resource_group_name=app_service_plan.resource_group, name=self.name,
plan=app_service_plan.name, runtime=runtime_setting['name'], deployment_local_git=True)
self.resource_group = app_service_plan.resource_group
|
[Hack] Fix selection logic of app service plan, make logs a bit better (#<I>)
|
py
|
diff --git a/jax/api.py b/jax/api.py
index <HASH>..<HASH> 100644
--- a/jax/api.py
+++ b/jax/api.py
@@ -380,7 +380,8 @@ def grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
return grad_f_aux if has_aux else grad_f
def value_and_grad(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
- has_aux: bool = False, holomorphic: bool = False) -> Callable:
+ has_aux: bool = False, holomorphic: bool = False
+ ) -> Callable[..., Tuple[Any, Any]]:
"""Create a function which evaluates both ``fun`` and the gradient of ``fun``.
Args:
|
Make type of value_and_grad slightly more precise. (#<I>)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,8 @@ setup(
],
extras_require={
'RANSAC': ['sklearn>=0.19'],
- 'qrandom': ['quantumrandom']
+ 'qrandom': ['quantumrandom'],
+ 'plots': ['matplotlib']
},
cmdclass={
'clean': CleanCommand
|
adds matplotlib as extra dependency
|
py
|
diff --git a/sitecats/utils.py b/sitecats/utils.py
index <HASH>..<HASH> 100644
--- a/sitecats/utils.py
+++ b/sitecats/utils.py
@@ -183,12 +183,12 @@ class Cache(object):
else:
filter_kwargs = {}
child_ids = self.get_child_ids(parent_alias)
- if child_ids:
- filter_kwargs.update({'category_id__in': child_ids})
-
+ if not child_ids:
+ return []
filter_kwargs.update({
'content_type': ContentType.objects.get_for_model(target_object),
- 'object_id': target_object.id
+ 'object_id': target_object.id,
+ 'category_id__in': child_ids
})
# Calculating categories weight too.
|
Fixed rendering category with no subcategories in editor.
|
py
|
diff --git a/ch.py b/ch.py
index <HASH>..<HASH> 100755
--- a/ch.py
+++ b/ch.py
@@ -583,6 +583,9 @@ class Ch(object):
return np.hstack([np.array(lhs.dot(rhs[:,i].todense())) for i in range(rhs.shape[1])])
else:
return lhs.matmat(rhs)
+
+ # TODO: Figure out how/whether to do this.
+ #lhs, rhs = utils.convert_inputs_to_sparse_if_possible(lhs, rhs)
if not sp.issparse(lhs) and sp.issparse(rhs):
return rhs.T.dot(lhs.T).T
@@ -719,6 +722,13 @@ class Ch(object):
else:
result = reduce(lambda x, y: x+y, drs)
+
+ # TODO: figure out how/whether to do this.
+ # if result is not None and not sp.issparse(result):
+ # nonzero = np.count_nonzero(result)
+ # if nonzero > 0 and hasattr(result, 'size') and result.size / nonzero >= 10.0:
+ # #import pdb; pdb.set_trace()
+ # result = sp.csc_matrix(result)
if (result is not None) and (not sp.issparse(result)):
|
ch.py: added placeholders for maybe doing auto-sparsification
|
py
|
diff --git a/wxmplot/config.py b/wxmplot/config.py
index <HASH>..<HASH> 100644
--- a/wxmplot/config.py
+++ b/wxmplot/config.py
@@ -171,6 +171,11 @@ for tname in ('light', 'white-background', 'dark',
if tname.startswith('seaborn-'):
theme.update(matplotlib.style.library['seaborn'])
theme.update(matplotlib.style.library[tname])
+ if tname == 'fivethirtyeight__test': # text sizes are way off the norm
+ theme.update({'legend.fontsize': 10, 'xtick.labelsize': 9,
+ 'ytick.labelsize': 9, 'axes.labelsize': 9,
+ 'axes.titlesize': 13})
+
Themes[tname.lower()] = theme
def bool_ifnotNone(val, default):
|
monkeypatch fivethirtyeight font sizes
|
py
|
diff --git a/src/colab_gitlab/views.py b/src/colab_gitlab/views.py
index <HASH>..<HASH> 100644
--- a/src/colab_gitlab/views.py
+++ b/src/colab_gitlab/views.py
@@ -28,8 +28,9 @@ class GitlabProxyView(ColabProxyView):
self.request = request
if self.verify_forbidden_path(self.request.path, self.request.user):
+ tab = '#gitlab_profile'
path = r'/account/{}/edit'.format(self.request.user)
- return redirect(path)
+ return redirect(path+tab)
return super(GitlabProxyView, self).dispatch(request, *args, **kwargs)
|
Adding redirect with tab #gitlab_profile
|
py
|
diff --git a/src/hamster/widgets/tags.py b/src/hamster/widgets/tags.py
index <HASH>..<HASH> 100644
--- a/src/hamster/widgets/tags.py
+++ b/src/hamster/widgets/tags.py
@@ -136,7 +136,8 @@ class TagsEntry(gtk.Entry):
height = self.tag_box.count_height(w)
- #self.tag_box.modify_bg(gtk.StateType.NORMAL, "#eee") #self.get_style().base[gtk.StateType.NORMAL])
+ _, color = gdk.Color.parse("#000")
+ self.tag_box.modify_bg(gtk.StateType.NORMAL, color) #self.get_style().base[gtk.StateType.NORMAL])
self.scroll_box.set_size_request(w, height)
self.popup.resize(w, height)
|
try to fix background color does nothing.
|
py
|
diff --git a/isochrones/isochrones.py b/isochrones/isochrones.py
index <HASH>..<HASH> 100644
--- a/isochrones/isochrones.py
+++ b/isochrones/isochrones.py
@@ -114,7 +114,7 @@ class Isochrone(object):
self.mag = {band:interpnd(self.tri,mags[band]) for band in self.bands}
- def __call__(self,mass,age,feh,return_df=False):
+ def __call__(self,mass,age,feh,return_df=True):
"""returns properties (or arrays of properties) at given mass, age, feh
Parameters
|
made __call__ return datafram true by default
|
py
|
diff --git a/pyvlx/parameter.py b/pyvlx/parameter.py
index <HASH>..<HASH> 100644
--- a/pyvlx/parameter.py
+++ b/pyvlx/parameter.py
@@ -193,7 +193,8 @@ class Position(Parameter):
def to_percent(raw):
"""Create percent position value out of raw."""
# The first byte has the vlue from 0 to 200. Ignoring the second one.
- return int(raw[0] / 2)
+ # Adding 0.5 allows a slight tolerance for devices (e.g. Velux SML) that do not return exactly 51200 as final position when closed.
+ return int(raw[0] / 2 + 0.5)
def __str__(self):
"""Return string representation of object."""
|
Improvement of percent calculation Improvement of percent calculation in to_percent() for devices that do not return exactly <I> as final position when closed.
|
py
|
diff --git a/pinax/messages/models.py b/pinax/messages/models.py
index <HASH>..<HASH> 100644
--- a/pinax/messages/models.py
+++ b/pinax/messages/models.py
@@ -1,6 +1,9 @@
+from __future__ import unicode_literals
+
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
+from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
@@ -8,6 +11,7 @@ from .managers import ThreadManager, MessageManager
from .utils import cached_attribute
+@python_2_unicode_compatible
class Thread(models.Model):
subject = models.CharField(max_length=150)
@@ -15,6 +19,12 @@ class Thread(models.Model):
objects = ThreadManager()
+ def __str__(self):
+ return "{}: {}".format(
+ self.subject,
+ ", ".join([user for user in self.users.all()])
+ )
+
def get_absolute_url(self):
return reverse("messages_thread_detail", kwargs={"thread_id": self.pk})
|
Add some basic admin Closes #6
|
py
|
diff --git a/hexify.py b/hexify.py
index <HASH>..<HASH> 100755
--- a/hexify.py
+++ b/hexify.py
@@ -4,10 +4,9 @@ import sys
import os
_HELP_TEXT = """
-Creates hexified versions of micropython scripts.
-Intended for saving files to the local filesystem, _NOT_ the microbit.
-Does not autodetect a microbit.
-Accepts multiple imput scripts and optionally one output directory.
+A simple utility script intended for creating hexified versions of MicroPython
+scripts on the local filesystem _NOT_ the microbit. Does not autodetect a
+microbit. Accepts multiple input scripts and optionally one output directory.
"""
def main(argv=None):
|
Help text edit Attempted to better word the help text.
|
py
|
diff --git a/panoramisk/fast_agi.py b/panoramisk/fast_agi.py
index <HASH>..<HASH> 100644
--- a/panoramisk/fast_agi.py
+++ b/panoramisk/fast_agi.py
@@ -23,7 +23,7 @@ class Request:
return parse_agi_result(response.decode(self.encoding))
-class Application(OrderedDict):
+class Application(dict):
def __init__(self, default_encoding='utf-8', loop=None):
super(Application, self).__init__()
|
Application can be a simple dict, order isn't important
|
py
|
diff --git a/tests/test_osmnx.py b/tests/test_osmnx.py
index <HASH>..<HASH> 100644
--- a/tests/test_osmnx.py
+++ b/tests/test_osmnx.py
@@ -19,7 +19,7 @@ ox.log('test info', level=lg.INFO)
ox.log('test warning', level=lg.WARNING)
ox.log('test error', level=lg.ERROR)
-import httmock, gzip
+import httmock, gzip, io
try:
from urllib.parse import parse_qsl
@@ -34,7 +34,7 @@ def get_mock_response_content(overpass_filename=None):
Nominatim_Searches = {}
# Load all Nominatim searches into a dictionary
- with open(os.path.join(tests_dir, 'nominatim-responses.txt'), 'r', encoding='utf8') as file:
+ with io.open(os.path.join(tests_dir, 'nominatim-responses.txt'), 'r', encoding='utf8') as file:
while True:
try:
key, value, _ = next(file).strip(), next(file), next(file)
|
Switched to io.open for Python <I> encoding support
|
py
|
diff --git a/neo4j/bolt/connection.py b/neo4j/bolt/connection.py
index <HASH>..<HASH> 100644
--- a/neo4j/bolt/connection.py
+++ b/neo4j/bolt/connection.py
@@ -213,7 +213,10 @@ class Connection(object):
self.packer.supports_bytes = self.server.supports_bytes()
def __del__(self):
- self.close()
+ try:
+ self.close()
+ except (AttributeError, TypeError):
+ pass
def append(self, signature, fields=(), response=None):
""" Add a message to the outgoing queue.
|
Ignore errors on Connection.__del__
|
py
|
diff --git a/tests/multi_net/tcp_accept_recv.py b/tests/multi_net/tcp_accept_recv.py
index <HASH>..<HASH> 100644
--- a/tests/multi_net/tcp_accept_recv.py
+++ b/tests/multi_net/tcp_accept_recv.py
@@ -17,7 +17,7 @@ def instance0():
try:
print("recv", s.recv(10)) # should raise Errno 107 ENOTCONN
except OSError as er:
- print(er.errno)
+ print(er.errno in (107, 128))
s.close()
|
tests/multi_net: Fix TCP accept test when using system error numbers. If a port is not using internal error numbers, which match both lwIP and Linux error numbers, ENTOCONN from standard libraries errno.h equals <I>, not <I>.
|
py
|
diff --git a/packages/vaex-astro/vaex/astro/gadget.py b/packages/vaex-astro/vaex/astro/gadget.py
index <HASH>..<HASH> 100644
--- a/packages/vaex-astro/vaex/astro/gadget.py
+++ b/packages/vaex-astro/vaex/astro/gadget.py
@@ -50,6 +50,7 @@ def getinfo(filename, seek=None):
class MemoryMappedGadget(DatasetMemoryMapped):
+ snake_name = 'gadget'
def __init__(self, filename, fs_options={}, fs=None):
super(MemoryMappedGadget, self).__init__(filename)
#h5file = h5py.File(self.filename)
|
🐛 gadget dataset had no snake_name
|
py
|
diff --git a/webexteamssdk/models/mixins/room.py b/webexteamssdk/models/mixins/room.py
index <HASH>..<HASH> 100644
--- a/webexteamssdk/models/mixins/room.py
+++ b/webexteamssdk/models/mixins/room.py
@@ -84,6 +84,11 @@ class RoomBasicPropertiesMixin(object):
return self._json_data.get('creatorId')
@property
+ def ownerId(self):
+ """The ID of the organization which owns this room."""
+ return self._json_data.get('ownerId')
+
+ @property
def created(self):
"""The date and time the room was created."""
created = self._json_data.get('created')
|
Added ownerId to property list - resolves issue CiscoDevNet/webexteamssdk#<I>
|
py
|
diff --git a/bids/layout/layout.py b/bids/layout/layout.py
index <HASH>..<HASH> 100644
--- a/bids/layout/layout.py
+++ b/bids/layout/layout.py
@@ -116,7 +116,13 @@ class BIDSLayout(Layout):
if isinstance(p, six.string_types):
paths[i] = (p, conf_path % 'bids')
elif isinstance(p, tuple):
- doms = [map_conf(d) for d in listify(p[1])]
+ conf_names = listify(p[1])
+ # All 'derivatives' files are also 'bids' files. This is hacky
+ # and should be replaced with something more principled.
+ if 'derivatives' in conf_names:
+ conf_names = list(set(conf_names) | {'bids'})
+ # Map each built-in config name to the JSON file
+ doms = [map_conf(d) for d in conf_names]
paths[i] = (p[0], doms)
# Determine which subdirectories to exclude from indexing
|
make 'bids' config a dependency of 'derivatives'; closes #<I>
|
py
|
diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py
index <HASH>..<HASH> 100755
--- a/src/transformers/models/visual_bert/modeling_visual_bert.py
+++ b/src/transformers/models/visual_bert/modeling_visual_bert.py
@@ -929,7 +929,7 @@ class VisualBertForPreTraining(VisualBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
- inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
+ inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt")
visual_embeds = get_visual_embeddings(image).unsqueeze(0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
|
Fix mask token in the example (#<I>) VIsualBert uses bert-base-uncased tokenizer, therefore, instead of {mask}, the mask token should be [MASK]
|
py
|
diff --git a/pysat/_instrument.py b/pysat/_instrument.py
index <HASH>..<HASH> 100644
--- a/pysat/_instrument.py
+++ b/pysat/_instrument.py
@@ -1109,7 +1109,7 @@ class Instrument(object):
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
- mdata.accept_default_labels(self)
+ mdata.accept_default_labels(self.meta)
bad_datetime = False
except pds.errors.OutOfBoundsDatetime:
bad_datetime = True
@@ -2639,11 +2639,16 @@ class Instrument(object):
if not self.empty:
self.meta = meta
+ # If only some metadata included, define the remaining variables
+ for var in self.variables:
+ case_var = meta.var_case_name(var)
+ if case_var not in self.meta.keys() \
+ and case_var not in self.meta.keys_nD():
+ self.meta[case_var] = {self.labels.name: var}
+
# check if load routine actually returns meta
if self.meta.data.empty:
- self.meta[self.variables] = {self.labels.name: self.variables,
- self.labels.units:
- [''] * len(self.variables)}
+ self.meta[self.variables] = {self.labels.name: self.variables}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
|
ENH: updated meta loading Updated meta loading to load defaults for all variables not explicitly treated by the load routine.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,7 @@ setup(
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires = [
- 'pandas>=0.18.0'
+ 'pandas>=0.18.0',
'numpy>=1.9.0',
'python-dateutil>=2.4',
'scikit-learn>=0.16.1',
|
fixed issue with dependencies in setup.py
|
py
|
diff --git a/awp/packager.py b/awp/packager.py
index <HASH>..<HASH> 100755
--- a/awp/packager.py
+++ b/awp/packager.py
@@ -9,6 +9,7 @@ import glob
import plistlib
import os
import os.path
+import re
import shutil
from zipfile import ZipFile, ZIP_DEFLATED
@@ -151,8 +152,8 @@ def update_workflow_readme(info, readme_path):
# Set the workflow version to a new version number if one is given
def update_workflow_version(info, new_version_num):
if new_version_num:
- info['version'] = new_version_num
- print('Set version to v{}'.format(new_version_num))
+ info['version'] = re.sub(r'^v', '', new_version_num)
+ print('Set version to v{}'.format(info['version']))
# Write installed workflow subdirectory files to the given zip file
|
Strip superfluous 'v' prefix from version numbers Alfred already prepends a 'v' to the given version number.
|
py
|
diff --git a/serenata_toolbox/chamber_of_deputies/chamber_of_deputies_dataset.py b/serenata_toolbox/chamber_of_deputies/chamber_of_deputies_dataset.py
index <HASH>..<HASH> 100644
--- a/serenata_toolbox/chamber_of_deputies/chamber_of_deputies_dataset.py
+++ b/serenata_toolbox/chamber_of_deputies/chamber_of_deputies_dataset.py
@@ -1,4 +1,5 @@
import os.path
+import csv
from datetime import date
from urllib.request import urlretrieve
from zipfile import ZipFile
@@ -53,10 +54,9 @@ class ChamberOfDeputiesDataset:
.replace('Ano-', 'reimbursements-')
data = pd.read_csv(csv_path,
- error_bad_lines=False, #some old reimbursements are messed up
- warn_bad_lines=False,
encoding='utf-8',
delimiter=";",
+ quoting=csv.QUOTE_NONE,
dtype={'ideDocumento': np.str,
'idecadastro': np.str,
'nuCarteiraParlamentar': np.str,
|
Remove error silencing and specify quotation option Since the Camara's Open Data team fixed most issues with CSV formating, we are removing the fail-safe options on the CSV reading call. Also we are specifying now that the files have no quotation marks of any kind to separate values. This last change avoid silent row dropping when a valeu has no escape characters on quotation for text values.
|
py
|
diff --git a/src/timelog/middleware.py b/src/timelog/middleware.py
index <HASH>..<HASH> 100644
--- a/src/timelog/middleware.py
+++ b/src/timelog/middleware.py
@@ -20,7 +20,7 @@ class TimeLogMiddleware(object):
sqltime = 0.0
for q in connection.queries:
- sqltime += float(q['time'])
+ sqltime += float(getattr(q, 'time', 0.0))
if hasattr(request, '_start'):
d = {
|
Sometimes, `time` attribute on a query is not set -- don't ask me why, it just seems to happen. If so, keep a default.
|
py
|
diff --git a/gspread/client.py b/gspread/client.py
index <HASH>..<HASH> 100644
--- a/gspread/client.py
+++ b/gspread/client.py
@@ -279,7 +279,7 @@ class Client(object):
try:
r = self.session.put(url, data, headers=headers)
except HTTPError as ex:
- if ex.code == 403:
+ if getattr(ex, 'code', None) == 403:
raise UpdateCellError(ex.message)
else:
raise ex
|
Fixed AttributeError when URLError caught by HTTPError catch block Fixes #<I>
|
py
|
diff --git a/spikeextractors/extractors/tridesclousextractor/tridesclousextractor.py b/spikeextractors/extractors/tridesclousextractor/tridesclousextractor.py
index <HASH>..<HASH> 100644
--- a/spikeextractors/extractors/tridesclousextractor/tridesclousextractor.py
+++ b/spikeextractors/extractors/tridesclousextractor/tridesclousextractor.py
@@ -21,8 +21,7 @@ class TridesclousSortingExtractor(SortingExtractor):
def getUnitSpikeTrain(self, unit_id, start_frame=None, end_frame=None):
spikes = self.dataio.get_spikes(seg_num=0, chan_grp=self.chan_grp, i_start=None, i_stop=None)
spikes = spikes[spikes['cluster_label'] == unit_id]
- spike_indexes = spikes['index']
- spike_times = spikes['index'] / self.dataio.sample_rate
+ spike_times = spikes['index']
if start_frame is not None:
spike_times = spike_times[spike_times >= start_frame]
if end_frame is not None:
|
Miss understanding the "times" of spike that is index in fact.
|
py
|
diff --git a/dailymotion.py b/dailymotion.py
index <HASH>..<HASH> 100755
--- a/dailymotion.py
+++ b/dailymotion.py
@@ -9,7 +9,7 @@ import json
from collections import defaultdict
__author__ = 'Samir AMZANI <samir.amzani@gmail.com>'
-__version__ = '0.2.0'
+__version__ = '0.2.1'
__python_version__ = '.'.join([str(i) for i in sys.version_info[:3]])
try:
|
Bumped the revision version to <I>
|
py
|
diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py
index <HASH>..<HASH> 100644
--- a/tests/test_s3/test_s3.py
+++ b/tests/test_s3/test_s3.py
@@ -88,6 +88,22 @@ def test_multipart_upload():
@mock_s3
+def test_multipart_upload_out_of_order():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+
+ multipart = bucket.initiate_multipart_upload("the-key")
+ # last part, can be less than 5 MB
+ part2 = b'1'
+ multipart.upload_part_from_file(BytesIO(part2), 4)
+ part1 = b'0' * 5242880
+ multipart.upload_part_from_file(BytesIO(part1), 2)
+ multipart.complete_upload()
+ # we should get both parts as the key contents
+ bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
+
+
+@mock_s3
def test_multipart_upload_with_headers():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
|
add test for uploading parts out of order in S3 multipart upload
|
py
|
diff --git a/psiturk/experiment.py b/psiturk/experiment.py
index <HASH>..<HASH> 100644
--- a/psiturk/experiment.py
+++ b/psiturk/experiment.py
@@ -69,7 +69,6 @@ if 'gunicorn' in os.environ.get('SERVER_SOFTWARE',''):
# Set cache timeout to 10 seconds for static files
app.config.update(SEND_FILE_MAX_AGE_DEFAULT=10)
app.secret_key = CONFIG.get('Server Parameters', 'secret_key')
-app.logger.info("Secret key: " + app.secret_key)
# this checks for templates that are required if you are hosting your own ad.
def check_templates_exist():
|
Remove INFO level logging of secret key
|
py
|
diff --git a/troposphere/amazonmq.py b/troposphere/amazonmq.py
index <HASH>..<HASH> 100644
--- a/troposphere/amazonmq.py
+++ b/troposphere/amazonmq.py
@@ -3,7 +3,7 @@
#
# See LICENSE file for full license.
-from . import AWSObject, AWSProperty
+from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer
@@ -55,6 +55,7 @@ class Broker(AWSObject):
'PubliclyAccessible': (boolean, True),
'SecurityGroups': ([basestring], False),
'SubnetIds': ([basestring], False),
+ 'Tags': ((Tags, list), False),
}
|
Add Tags property to AWS::AmazonMQ::Broker, per <I> Jan 3 update (#<I>)
|
py
|
diff --git a/plenum/cli/cli.py b/plenum/cli/cli.py
index <HASH>..<HASH> 100644
--- a/plenum/cli/cli.py
+++ b/plenum/cli/cli.py
@@ -106,7 +106,6 @@ class Cli:
def __init__(self, looper, basedirpath, nodeReg, cliNodeReg, output=None,
debug=False, logFileName=None):
self.curClientPort = None
- logging.root.handlers = []
logging.root.addHandler(CliHandler(self.out))
self.looper = looper
self.basedirpath = os.path.expanduser(basedirpath)
|
[#<I>] minor refacotring
|
py
|
diff --git a/python/chronos/src/setup.py b/python/chronos/src/setup.py
index <HASH>..<HASH> 100644
--- a/python/chronos/src/setup.py
+++ b/python/chronos/src/setup.py
@@ -54,7 +54,7 @@ def setup_package():
url='https://github.com/intel-analytics/BigDL',
packages=get_bigdl_packages(),
install_requires=['bigdl-orca=='+VERSION, 'bigdl-nano[pytorch]',
- 'pandas>=1.0.5', 'scikit-learn', 'statsmodels'],
+ 'pandas>=1.0.5, <1.3.0', 'scikit-learn', 'statsmodels'],
extras_require={'all': ['bigdl-orca[automl]=='+VERSION, 'scipy==1.5',
'protobuf==3.12.0', 'tsfresh==0.17.0']},
dependency_links=['https://d3kbcqa49mib13.cloudfront.net/spark-2.0.0-bin-hadoop2.7.tgz'],
|
Add upper version limit for pandas dependency (#<I>) * add upper range for pandas * change to <I>
|
py
|
diff --git a/pcef/core/editor.py b/pcef/core/editor.py
index <HASH>..<HASH> 100644
--- a/pcef/core/editor.py
+++ b/pcef/core/editor.py
@@ -123,6 +123,22 @@ class QCodeEdit(QtGui.QPlainTextEdit):
"""
return self.__blocks
+ @property
+ def style(self):
+ return self.__style
+
+ @style.setter
+ def style(self, value):
+ """
+ Sets the editor style. The valueChanged signal will be emitted with all
+ parameters set to an empty string ("").
+
+ :param value: The new editor style
+ :type value: PropertyRegistry
+ """
+ self.__style = value
+ self.__style.valueChanged.emit("", "", "")
+
def __init__(self, parent=None, createDefaultActions=True):
"""
:param parent: Parent widget
@@ -955,7 +971,7 @@ class QCodeEdit(QtGui.QPlainTextEdit):
"""
Init the style PropertyRegistry
"""
- self.style = PropertyRegistry()
+ self.__style = PropertyRegistry()
self.style.valueChanged.connect(self.__resetPalette)
self.style.addProperty("font", constants.FONT)
self.style.addProperty("fontSize", constants.FONT_SIZE)
|
Make style a property and emit valueChanged with all param set to "" in the setter.
|
py
|
diff --git a/pycm/pycm_obj.py b/pycm/pycm_obj.py
index <HASH>..<HASH> 100644
--- a/pycm/pycm_obj.py
+++ b/pycm/pycm_obj.py
@@ -695,7 +695,7 @@ class ConfusionMatrix():
except Exception:
return "None"
- def weighted_kappa(self,weight):
+ def weighted_kappa(self,weight=None):
"""
Calculate weighted kappa.
@@ -705,8 +705,8 @@ class ConfusionMatrix():
"""
if matrix_check(weight) is False:
warn(WEIGHTED_KAPPA_WARNING, RuntimeWarning)
- return cm.Kappa
+ return self.Kappa
if set(weight.keys()) != set(self.classes):
warn(WEIGHTED_KAPPA_WARNING, RuntimeWarning)
- return cm.Kappa
+ return self.Kappa
return weighted_kappa_calc(self.classes,self.table,self.P,self.TOP,self.POP,weight)
|
fix : minor eidt in weighted_kappa method #<I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -60,7 +60,6 @@ setup(
namespace_packages=[],
zip_safe=False,
install_requires=[
- 'setuptools',
# -*- Extra requirements: -*-
'gitchangelog',
],
|
fix: pkg: remove setuptools from the requirements list. The requirement is a bit pointless as the setup.py file cannot even be executed and be parsed to find out setuptools is required if setuptools isn't found. This also is seeming to cause this package to break when installing from PyPi.
|
py
|
diff --git a/xml2json/bin/dispatch.py b/xml2json/bin/dispatch.py
index <HASH>..<HASH> 100644
--- a/xml2json/bin/dispatch.py
+++ b/xml2json/bin/dispatch.py
@@ -191,7 +191,13 @@ def create_job(request, *args, **kwargs):
def job_data(request, data_source, *args, **kwargs):
- # Modify the arguments
+ """Handle GET responses from /search/jobs/<sid>/<data_source>.
+
+ """
+ # Splunk currently offers this endpoint as XML or JSON. We need to
+ # get XML back so we can use the same error handlers as elsewhere,
+ # but the request was obviously made in JSON, so we have to set
+ # the output.
mode = request["get"].get("output_mode", "")
request["get"]["output_mode"] = "xml"
if data_source == 'summary':
@@ -226,8 +232,11 @@ def job_data(request, data_source, *args, **kwargs):
def unless_error(ok_handler, request_filter=lambda x: x, path_args=[]):
def f(request, *args, **kwargs):
- request = request_filter(request)
- status, content = forward_request(request)
+ if callable(request_filter):
+ request = request_filter(request)
+ else:
+ for rf in request_filter:
+ request = rf(request)
if status_ok(status):
return status, ok_handler(content, *[args[k] for k in path_args])
else:
|
Made request_filter in unless_error handle multiple filters
|
py
|
diff --git a/sirbot/core.py b/sirbot/core.py
index <HASH>..<HASH> 100644
--- a/sirbot/core.py
+++ b/sirbot/core.py
@@ -100,11 +100,13 @@ class SirBot:
msg_type = 'channel'
if msg_type == 'hello':
- logger.debug('login data ok')
+ logger.info('login data ok')
elif ok:
+ if msg.get('warning'):
+ logger.info('API response: %s, %S', msg.get('warning'), msg)
logger.debug('API response: %s', msg)
- elif not ok:
- logger.debug('API error: %s, %s', msg.get('error'), msg)
+ elif ok is False:
+ logger.info('API error: %s, %s', msg.get('error'), msg)
elif msg_type is None:
logging.debug('Ignoring non event message %s', msg)
return
|
Too much log message Fix an error with the API response logging
|
py
|
diff --git a/nefertari/view.py b/nefertari/view.py
index <HASH>..<HASH> 100644
--- a/nefertari/view.py
+++ b/nefertari/view.py
@@ -344,9 +344,13 @@ class BaseView(OptionsViewMixin):
class PolymorphicESView(BaseView):
- def index(self, collections):
+ def __init__(self, *args, **kwargs):
+ super(PolymorphicESView, self).__init__(*args, **kwargs)
+ collections = self.request.matchdict['collections']
collections = collections.split('/')[0]
self.Model = dictset({'__name__': collections})
+
+ def index(self, collections):
self._query_params.process_int_param('_limit', 20)
return self.get_collection_es()
|
Set model in init of PolymorphicESView to enable aggregations
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ setuptools.setup(
"argcomplete >= 1.8.2, < 2",
"paramiko >= 2.1.1, < 3",
"requests >= 2.12.4, < 3",
- "tweak >= 0.5.1, < 0.6",
+ "tweak >= 0.6.4, < 1",
"keymaker >= 0.3.3, < 1",
"pyyaml >= 3.11, < 4",
"python-dateutil >= 2.5.3, < 3",
|
Bump tweak dependency with upstream fix
|
py
|
diff --git a/salt/pillar/pepa.py b/salt/pillar/pepa.py
index <HASH>..<HASH> 100755
--- a/salt/pillar/pepa.py
+++ b/salt/pillar/pepa.py
@@ -77,7 +77,6 @@ except ImportError:
HAS_OS_PATH = False
-
def __virtual__():
'''
Only return if all the modules are available
@@ -220,7 +219,8 @@ if __name__ == '__main__' and sys.stdout.isatty():
import pygments.lexers
import pygments.formatters
print pygments.highlight(yaml.safe_dump(result), pygments.lexers.YamlLexer(), pygments.formatters.TerminalFormatter())
- except ImportError:
- print yaml.safe_dump(result, indent = 4, default_flow_style = False)
+ except ImportError:
+ print yaml.safe_dump(result, indent=4, default_flow_style=False)
else:
- print yaml.safe_dump(result, indent = 4, default_flow_style = False)
\ No newline at end of file
+ print yaml.safe_dump(result, indent=4, default_flow_style=False)
+
|
Attempt at fixin pylint errors
|
py
|
diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py
index <HASH>..<HASH> 100644
--- a/salt/runners/git_pillar.py
+++ b/salt/runners/git_pillar.py
@@ -86,7 +86,8 @@ def update(branch=None, repo=None):
else:
pillar = salt.utils.gitfs.GitPillar(__opts__)
pillar.init_remotes(pillar_conf,
- salt.pillar.git_pillar.PER_REMOTE_OVERRIDES)
+ salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
+ salt.pillar.git_pillar.PER_REMOTE_ONLY)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
|
Add PER_REMOTE_ONLY to init_remotes call in git_pillar runner When the mountpoint feature was added, it added a per-remote-only parameter called `mountpoint`. While this is reflected in salt.pillar.git_pillar, it was not in salt.runners.git_pillar. This corrects that oversight, fixing a traceback when the `git_pillar.update` runner is executed and one or more remotes have a `mountpoint` parameter configured.
|
py
|
diff --git a/synapse/cores/common.py b/synapse/cores/common.py
index <HASH>..<HASH> 100644
--- a/synapse/cores/common.py
+++ b/synapse/cores/common.py
@@ -2975,8 +2975,8 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
The input value, unchanged.
'''
buf = s_common.msgenpack(valu)
- self.savebus.fire('syn:core:blob:set', key=key, valu=buf)
self._setBlobValu(key, buf)
+ self.savebus.fire('syn:core:blob:set', key=key, valu=buf)
return valu
# TODO: Wrap this in a userauth layer
@@ -3009,8 +3009,8 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
'''
if not self.hasBlobValu(key):
raise s_common.NoSuchName(name=key, mesg='Cannot delete key which is not present in the blobstore.')
- self.savebus.fire('syn:core:blob:del', key=key)
buf = self._delBlobValu(key)
+ self.savebus.fire('syn:core:blob:del', key=key)
return s_common.msgunpack(buf)
def _onSetBlobValu(self, mesg):
|
Invert order of savebus.fire() calls for blob store.
|
py
|
diff --git a/pythonforandroid/recipes/pygame/__init__.py b/pythonforandroid/recipes/pygame/__init__.py
index <HASH>..<HASH> 100644
--- a/pythonforandroid/recipes/pygame/__init__.py
+++ b/pythonforandroid/recipes/pygame/__init__.py
@@ -13,7 +13,7 @@ class Pygame2Recipe(CompiledComponentsPythonRecipe):
not part of the build. It's usable, but not complete.
"""
- version = '2.0.1'
+ version = '2.1.0'
url = 'https://github.com/pygame/pygame/archive/{version}.tar.gz'
site_packages_name = 'pygame'
|
Updated version of pygame from <I> to <I> (#<I>) Changed the version of pygame in the recipe from <I> (released in Dec <I>) to the latest release <I> (released in Nov <I>).
|
py
|
diff --git a/werkzeug/wrappers.py b/werkzeug/wrappers.py
index <HASH>..<HASH> 100644
--- a/werkzeug/wrappers.py
+++ b/werkzeug/wrappers.py
@@ -241,7 +241,8 @@ class BaseRequest(object):
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
- function is passed the request object as first argument::
+ function is passed the request object as first argument and the
+ request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
@@ -255,7 +256,11 @@ class BaseRequest(object):
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
- return _patch_wrapper(f, lambda *a: f(*a[:-2]+(cls(a[-2]),))(*a[-2:]))
+ def application(*args):
+ request = cls(args[-2])
+ with request:
+ return f(*args[:-2] + (request,))(*args[-2:])
+ return _patch_wrapper(f, application)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
|
Changed Request.application to automatically close the request object
|
py
|
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index <HASH>..<HASH> 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -208,7 +208,7 @@ def lexsort_indexer(keys, orders=None, na_position="last"):
cat = Categorical(key, ordered=True)
if na_position not in ["last", "first"]:
- raise ValueError(f"invalid na_position: {repr(na_position)}")
+ raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
@@ -264,7 +264,7 @@ def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
- raise ValueError(f"invalid na_position: {repr(na_position)}")
+ raise ValueError(f"invalid na_position: {na_position}")
return indexer
|
f strings in core/sorting (#<I>)
|
py
|
diff --git a/jira/resources.py b/jira/resources.py
index <HASH>..<HASH> 100644
--- a/jira/resources.py
+++ b/jira/resources.py
@@ -1298,10 +1298,6 @@ class RequestType(Resource):
session: ResilientSession,
raw: Dict[str, Any] = None,
):
- if raw:
- self._parse_raw(raw)
- self.raw: Dict[str, Any] = cast(Dict[str, Any], self.raw)
-
Resource.__init__(
self,
"servicedesk/{0}/requesttype",
@@ -1310,6 +1306,10 @@ class RequestType(Resource):
"{server}/rest/servicedeskapi/{path}",
)
+ if raw:
+ self._parse_raw(raw)
+ self.raw: Dict[str, Any] = cast(Dict[str, Any], self.raw)
+
# Utilities
|
fix TB on RequestType instanciation with raw content (fixes #<I>) (#<I>) * fix TB on RequestType instanciation with raw content (fixes #<I>)
|
py
|
diff --git a/script/upload-node-headers.py b/script/upload-node-headers.py
index <HASH>..<HASH> 100755
--- a/script/upload-node-headers.py
+++ b/script/upload-node-headers.py
@@ -112,7 +112,7 @@ def upload_node(bucket, access_key, secret_key, version):
if TARGET_PLATFORM == 'win32':
# Copy atom.lib to node.lib
node_lib = os.path.join(OUT_DIR, 'node.lib')
- atom_lib = os.path.join(OUT_DIR, 'atom.lib')
+ atom_lib = os.path.join(OUT_DIR, 'node.dll.lib')
shutil.copy2(atom_lib, node_lib)
# Upload the 32bit node.lib.
|
win: Generate node.lib from node.dll
|
py
|
diff --git a/backtrader/indicators/macd.py b/backtrader/indicators/macd.py
index <HASH>..<HASH> 100644
--- a/backtrader/indicators/macd.py
+++ b/backtrader/indicators/macd.py
@@ -46,7 +46,7 @@ class MACD(Indicator):
class MACDHisto(MACD):
lines = ('histo',)
- plotlines = dict(histo=dict(_method='bar', alpha=0.66, width=0.66))
+ plotlines = dict(histo=dict(_method='bar', alpha=0.50, width=0.66))
def __init__(self):
super(MACDHisto, self).__init__()
|
macd changed transparency level of histogram bar
|
py
|
diff --git a/stomp/transport.py b/stomp/transport.py
index <HASH>..<HASH> 100644
--- a/stomp/transport.py
+++ b/stomp/transport.py
@@ -212,10 +212,10 @@ class BaseTransport(stomp.listener.Publisher):
if receipt_value == CMD_DISCONNECT:
self.set_connected(False)
- self.__disconnect_receipt = None
# received a stomp 1.1+ disconnect receipt
if receipt == self.__disconnect_receipt:
self.disconnect_socket()
+ self.__disconnect_receipt = None
elif frame_type == 'connected':
self.set_connected(True)
@@ -224,9 +224,9 @@ class BaseTransport(stomp.listener.Publisher):
self.set_connected(False)
with self.__listeners_change_condition:
- listeners = list(self.listeners.values())
+ listeners = sorted(self.listeners.items())
- for listener in listeners:
+ for (_, listener) in listeners:
if not listener:
continue
@@ -258,9 +258,9 @@ class BaseTransport(stomp.listener.Publisher):
:param Frame frame: the Frame object to transmit
"""
with self.__listeners_change_condition:
- listeners = list(self.listeners.values())
+ listeners = sorted(self.listeners.items())
- for listener in listeners:
+ for (_, listener) in listeners:
if not listener:
continue
try:
|
minor fix for disconnect receipt handling, change listener loops to be sorted
|
py
|
diff --git a/cm-devops/automation/script/module.py b/cm-devops/automation/script/module.py
index <HASH>..<HASH> 100644
--- a/cm-devops/automation/script/module.py
+++ b/cm-devops/automation/script/module.py
@@ -1284,6 +1284,8 @@ def get_script_name(env, path):
return 'run-' + tmp_suff1 + '-' + tmp_suff2 + '.sh'
elif exists(os.path.join(path, 'run-' + tmp_suff1 + '.sh')):
return 'run-' + tmp_suff1 + '.sh'
+ elif exists(os.path.join(path, 'run-' + tmp_suff3 + '.sh')):
+ return 'run-' + tmp_suff3 + '.sh'
else:
return 'run.sh';
|
Added changes to pick run script for a given platform
|
py
|
diff --git a/frisbee/__init__.py b/frisbee/__init__.py
index <HASH>..<HASH> 100644
--- a/frisbee/__init__.py
+++ b/frisbee/__init__.py
@@ -83,7 +83,8 @@ class Frisbee:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
- self._log.debug("Queue is empty, QUIT")
+ name = current_process().name
+ self._log.debug("Queue is empty, QUIT: %s" % name)
break
else:
self._log.debug("Job: %s" % str(task))
@@ -154,9 +155,11 @@ class Frisbee:
launch = self.PROCESSES
if greed:
launch = len(jobs)
- for _ in range(launch):
- proc: Process = Process(target=self._job_handler)
+ for idx in range(launch):
+ proc: Process = Process(name="w-%d" % idx,
+ target=self._job_handler)
self._processes.append(proc)
+ self._log.debug("Starting: w-%d" % idx)
proc.start()
for proc in self._processes:
|
Label workers as they are launched and killed
|
py
|
diff --git a/dallinger/experiment_server/experiment_server.py b/dallinger/experiment_server/experiment_server.py
index <HASH>..<HASH> 100644
--- a/dallinger/experiment_server/experiment_server.py
+++ b/dallinger/experiment_server/experiment_server.py
@@ -1300,6 +1300,13 @@ def worker_complete():
assignment_id=participant.assignment_id,
participant_id=participant.id,
)
+ if config.get('recruiter', None) == "bots":
+ # Trigger notification directly
+ # Handled same as debug, but added separetely for clarity
+ _debug_notify(
+ assignment_id=participant.assignment_id,
+ participant_id=participant.id,
+ )
return success_response(field="status",
data=status,
request_type="worker complete")
|
make sure we notify when a job is complete
|
py
|
diff --git a/hitman.py b/hitman.py
index <HASH>..<HASH> 100755
--- a/hitman.py
+++ b/hitman.py
@@ -13,7 +13,6 @@ from subprocess import Popen, PIPE
import six
-import baker
import click
import feedparser
import requests
@@ -409,4 +408,4 @@ def get_settings(all,key):
if __name__ == "__main__":
- cli_base()
+ cli_base() # noqa
|
see if we can fix up our score
|
py
|
diff --git a/dashboard/tests/test_dashboard.py b/dashboard/tests/test_dashboard.py
index <HASH>..<HASH> 100644
--- a/dashboard/tests/test_dashboard.py
+++ b/dashboard/tests/test_dashboard.py
@@ -78,7 +78,9 @@ def test_basic(ray_start_with_dashboard):
dashboard_proc_info = all_processes[ray_constants.PROCESS_TYPE_DASHBOARD][
0]
dashboard_proc = psutil.Process(dashboard_proc_info.process.pid)
- assert dashboard_proc.status() == psutil.STATUS_RUNNING
+ assert dashboard_proc.status() in [
+ psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING
+ ]
raylet_proc_info = all_processes[ray_constants.PROCESS_TYPE_RAYLET][0]
raylet_proc = psutil.Process(raylet_proc_info.process.pid)
|
Fix new dashboard test process check (#<I>)
|
py
|
diff --git a/mouse/_winmouse.py b/mouse/_winmouse.py
index <HASH>..<HASH> 100644
--- a/mouse/_winmouse.py
+++ b/mouse/_winmouse.py
@@ -151,6 +151,7 @@ def listen(queue):
event = ButtonEvent(type, button, t)
if (event.event_type == DOWN) and previous_button_event is not None:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/gg153548%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
if event.time - previous_button_event.time <= GetDoubleClickTime() / 1000.0:
event = ButtonEvent(DOUBLE, event.button, event.time)
|
Fix double click events not generating on Window (#3)
|
py
|
diff --git a/src/concurrency/fields.py b/src/concurrency/fields.py
index <HASH>..<HASH> 100755
--- a/src/concurrency/fields.py
+++ b/src/concurrency/fields.py
@@ -74,11 +74,10 @@ class VersionField(Field):
db_tablespace=db_tablespace,
db_column=db_column)
- # def deconstruct(self):
- # name, path, args, kwargs = super(VersionField, self).deconstruct()
- # del kwargs["max_length"]
- # kwargs['default'] = 1
- # return name, path, args, kwargs
+ def deconstruct(self):
+ name, path, args, kwargs = super(VersionField, self).deconstruct()
+ kwargs['default'] = 1
+ return name, path, args, kwargs
def get_default(self):
return 0
@@ -110,14 +109,10 @@ class VersionField(Field):
setattr(model_instance, self.attname, int(value))
def pre_save(self, model_instance, add):
- # if conf.PROTOCOL >= 2:
if add:
value = self._get_next_version(model_instance)
self._set_version_value(model_instance, value)
return getattr(model_instance, self.attname)
- # value = self._get_next_version(model_instance)
- # self._set_version_value(model_instance, value)
- # return value
@staticmethod
def _wrap_save(func):
|
re-enable deconstruct() commented by mistake
|
py
|
diff --git a/openquake/calculators/classical.py b/openquake/calculators/classical.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/classical.py
+++ b/openquake/calculators/classical.py
@@ -292,9 +292,8 @@ class ClassicalCalculator(base.HazardCalculator):
mags_by_trt, self.sitecol.one(), gsims_by_trt, oq)
if hasattr(aw, 'array'):
self.datastore['effect_by_mag_dst_trt'] = aw
- smap = parallel.Starmap(
- self.core_task.__func__, h5=self.datastore.hdf5,
- num_cores=oq.num_cores)
+ smap = parallel.Starmap(classical, h5=self.datastore.hdf5,
+ num_cores=oq.num_cores)
self.submit_tasks(smap)
acc0 = self.acc0() # create the rup/ datasets BEFORE swmr_on()
self.datastore.swmr_on()
|
Better logging [skip CI]
|
py
|
diff --git a/datascience/tables.py b/datascience/tables.py
index <HASH>..<HASH> 100644
--- a/datascience/tables.py
+++ b/datascience/tables.py
@@ -2267,8 +2267,8 @@ class Table(collections.abc.MutableMapping):
if 'height' in options:
height = options.pop('height')
- else:
- height = None
+ elif len(yticks) > 5:
+ height = len(yticks) * 100
if overlay:
fig = go.Figure()
@@ -2282,7 +2282,6 @@ class Table(collections.abc.MutableMapping):
y = yticks,
name = labels[i],
orientation = 'h',
- width = [0.8] * len(yticks),
marker_color = colors[i]))
fig.update_yaxes(title_text = ylabel, type = 'category', dtick = 1, showticklabels = True)
if len(labels) == 1:
@@ -2299,7 +2298,6 @@ class Table(collections.abc.MutableMapping):
y = yticks,
name = labels[i],
orientation = 'h',
- width = [0.8] * len(yticks),
marker_color = colors[i]), row = i + 1, col = 1)
fig.update_yaxes(title_text = ylabel, type = 'category', dtick = 1, showticklabels = True)
fig.show()
|
try modifying height instead of bin width
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.