diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@ os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-easy-audit',
- version='1.0',
+ version='1.1',
packages=find_packages(),
include_package_data=True,
license='GPL3',
|
Changed version to <I>
|
py
|
diff --git a/spyder/widgets/sourcecode/codeeditor.py b/spyder/widgets/sourcecode/codeeditor.py
index <HASH>..<HASH> 100644
--- a/spyder/widgets/sourcecode/codeeditor.py
+++ b/spyder/widgets/sourcecode/codeeditor.py
@@ -1214,7 +1214,7 @@ class CodeEditor(TextEditBaseWidget):
"""
self.scrollpastend_enabled = state
self.setCenterOnScroll(state)
- self.update()
+ self.setDocument(self.document())
def resizeEvent(self, event):
"""Reimplemented Qt method to handle p resizing"""
|
Ensure update happens immediatly
|
py
|
diff --git a/blockstack/lib/config.py b/blockstack/lib/config.py
index <HASH>..<HASH> 100644
--- a/blockstack/lib/config.py
+++ b/blockstack/lib/config.py
@@ -120,7 +120,7 @@ EPOCH_FEATURE_MULTISIG = "BLOCKSTACK_MULTISIG"
# when epochs end (-1 means "never")
EPOCH_NOW = -1
-EPOCH_1_END_BLOCK = 436660 # F-Day 2016
+EPOCH_1_END_BLOCK = 436650 # F-Day 2016
EPOCH_2_END_BLOCK = EPOCH_NOW
EPOCH_1_NAMESPACE_LIFETIME_MULTIPLIER_id = 1
|
push deadline for hard fork to <I>
|
py
|
diff --git a/saltcloud/clouds/parallels.py b/saltcloud/clouds/parallels.py
index <HASH>..<HASH> 100644
--- a/saltcloud/clouds/parallels.py
+++ b/saltcloud/clouds/parallels.py
@@ -334,7 +334,7 @@ def query(action=None, command=None, args=None, method='GET', data=None):
try:
result = urllib2.urlopen(req)
- log.debug('EC2 Response Status Code: {0}'.format(result.getcode()))
+ log.debug('PARALLELS Response Status Code: {0}'.format(result.getcode()))
if 'content-length' in result.headers:
content = result.read()
@@ -344,7 +344,7 @@ def query(action=None, command=None, args=None, method='GET', data=None):
else:
return {}
except urllib2.URLError as exc:
- log.error('EC2 Response Status Code: {0} {1}'.format(exc.code,
+ log.error('PARALLELS Response Status Code: {0} {1}'.format(exc.code,
exc.msg))
root = ET.fromstring(exc.read())
log.error(_xml_to_dict(root))
|
Should say PARALLELS, not EC2
|
py
|
diff --git a/horizon/exceptions.py b/horizon/exceptions.py
index <HASH>..<HASH> 100644
--- a/horizon/exceptions.py
+++ b/horizon/exceptions.py
@@ -334,14 +334,14 @@ def handle(request, message=None, redirect=None, ignore=False,
log_entry = encoding.force_text(exc_value)
- # We trust messages from our own exceptions
user_message = ""
+ # We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
- user_message = exc_value
+ user_message = log_entry
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
user_message = encoding.force_text(message) % {"exc": log_entry}
- if message:
+ elif message:
user_message = encoding.force_text(message)
for exc_handler in HANDLE_EXC_METHODS:
@@ -349,7 +349,7 @@ def handle(request, message=None, redirect=None, ignore=False,
if exc_handler['set_wrap']:
wrap = True
handler = exc_handler['handler']
- ret = handler(request, message, redirect, ignore,
+ ret = handler(request, user_message, redirect, ignore,
exc_handler.get('escalate', escalate),
handled, force_silence, force_log,
log_method, log_entry, log_level)
|
Ensure exc message is included in user_message In exception.handle the user_message was being overridden so failed to include the exception message even if %(exc)s was in the message. This patch ensures the correct user_message is produced by exception.handle() Change-Id: I2bcc<I>a2f8a3b<I>c1acbbf3d3ff1e<I>b<I>e9 Closes-Bug: #<I>
|
py
|
diff --git a/tests/testwidgets.py b/tests/testwidgets.py
index <HASH>..<HASH> 100644
--- a/tests/testwidgets.py
+++ b/tests/testwidgets.py
@@ -63,4 +63,4 @@ class TestWidgets(IPTestCase):
def test_selection_widget_2(self):
html = normalize(SelectionWidget(self.plot2, display_options={'figure_format': 'png'})())
- self.assertEqual(digest_data(html), '7af5adfdf8a30dbf98f699f462e817255f26bd19bb46dcea1626259054719dd4')
+ self.assertEqual(digest_data(html), 'd02b3701c3d90b7f4ad72253aa3225ab7bdd67d0aa6b59077e7f42872aea3c15')
|
Fixed outdated hash in TestWidgets
|
py
|
diff --git a/digitalocean/__init__.py b/digitalocean/__init__.py
index <HASH>..<HASH> 100644
--- a/digitalocean/__init__.py
+++ b/digitalocean/__init__.py
@@ -8,7 +8,7 @@ __license__ = "See: http://creativecommons.org/licenses/by-nd/3.0/ "
__copyright__ = "Copyright (c) 2012, 2013, 2014 Lorenzo Setale"
from .Manager import Manager
-from .Droplet import Droplet
+from .Droplet import Droplet, DropletError, BadKernelObject, BadSSHKeyFormat
from .Region import Region
from .Size import Size
from .Image import Image
@@ -16,4 +16,5 @@ from .Action import Action
from .Domain import Domain
from .Record import Record
from .SSHKey import SSHKey
-from .Kernel import Kernel
\ No newline at end of file
+from .Kernel import Kernel
+from .baseapi import Error, TokenError, DataReadError
|
add new exception classes to package exports
|
py
|
diff --git a/tests/test_cmd2.py b/tests/test_cmd2.py
index <HASH>..<HASH> 100644
--- a/tests/test_cmd2.py
+++ b/tests/test_cmd2.py
@@ -310,6 +310,7 @@ def test_send_to_paste_buffer(base_app):
assert normalize(c) == expected
+
def test_base_timing(base_app, capsys):
out = run_cmd(base_app, 'set timing True')
expected = normalize("""timing - was: False
@@ -321,3 +322,23 @@ now: True
assert out.startswith('Elapsed: 0:00:00')
else:
assert out.startswith('Elapsed: 0:00:00.0')
+
+
+def test_base_debug(base_app, capsys):
+ # Try to load a non-existent file with debug set to False by default
+ run_cmd(base_app, 'load does_not_exist.txt')
+ out, err = capsys.readouterr()
+ assert err.startswith('ERROR')
+
+ # Set debug true
+ out = run_cmd(base_app, 'set debug True')
+ expected = normalize("""
+debug - was: False
+now: True
+""")
+ assert out == expected
+
+ # Verify that we now see the exception traceback
+ run_cmd(base_app, 'load does_not_exist.txt')
+ out, err = capsys.readouterr()
+ assert str(err).startswith('Traceback (most recent call last):')
|
Added unit test of debug output capability
|
py
|
diff --git a/parsl/dataflow/dflow.py b/parsl/dataflow/dflow.py
index <HASH>..<HASH> 100644
--- a/parsl/dataflow/dflow.py
+++ b/parsl/dataflow/dflow.py
@@ -703,7 +703,7 @@ class DataFlowKernel(object):
'memoize': cache,
'callback': None,
'exec_fu': None,
- 'checkpoint': None,
+ 'checkpoint': False,
'fail_count': 0,
'fail_history': [],
'status': States.unsched,
|
Represented task checkpointedness with True/False, not True/None (#<I>)
|
py
|
diff --git a/python/src/nnabla/functions.py b/python/src/nnabla/functions.py
index <HASH>..<HASH> 100644
--- a/python/src/nnabla/functions.py
+++ b/python/src/nnabla/functions.py
@@ -384,3 +384,26 @@ def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fin
if not quantize:
return x
return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs)
+
+
+def clip_by_value(x, min, max):
+ r"""Clip inputs by values.
+
+ .. math::
+
+ y = \begin{cases}
+ max & (x > max) \\
+ x & (otherwise) \\
+ min & (x < min)
+ \end{cases}.
+
+ Args:
+ x (Variable): An input variable.
+ min (Variable): An min variable by which `x` is clipped.
+ max (Variable): An max variable by which `x` is clipped.
+
+ Returns:
+ ~nnabla.Variable: N-D array.
+
+ """
+ return F.minimum2(F.maximum2(x, min), max)
|
Add clip_by_value as composite function.
|
py
|
diff --git a/kerncraft/models/benchmark.py b/kerncraft/models/benchmark.py
index <HASH>..<HASH> 100644
--- a/kerncraft/models/benchmark.py
+++ b/kerncraft/models/benchmark.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""Benchmark model and helper functions."""
+import os
import subprocess
from functools import reduce
import operator
@@ -280,11 +281,15 @@ class Benchmark(PerformanceModel):
perf_cmd += cmd
if self.verbose > 1:
print(' '.join(perf_cmd))
+ orig_OMP_NUM_THREADS = os.environ['OMP_NUM_THREADS']
+ os.environ['OMP_NUM_THREADS'] = str(self._args.cores)
try:
output = subprocess.check_output(perf_cmd).decode('utf-8').split('\n')
except subprocess.CalledProcessError as e:
print("Executing benchmark failed: {!s}".format(e), file=sys.stderr)
sys.exit(1)
+ finally:
+ os.environ['OMP_NUM_THREADS'] = orig_OMP_NUM_THREADS
# TODO multicore output is different and needs to be considered here!
results = {}
|
added setting of OMP_NUM_THREADS
|
py
|
diff --git a/sh.py b/sh.py
index <HASH>..<HASH> 100644
--- a/sh.py
+++ b/sh.py
@@ -958,7 +958,7 @@ class RunningCommand(object):
return unicode("")
def __eq__(self, other):
- return unicode(self) == unicode(other)
+ return id(self) == id(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
|
Use id() for equality between running commands Use id() instead of the expensive unicode() that has the side effect of actually running the command if it is not already running, an blocking until the command has finished running.
|
py
|
diff --git a/hgtools/managers/cmd.py b/hgtools/managers/cmd.py
index <HASH>..<HASH> 100644
--- a/hgtools/managers/cmd.py
+++ b/hgtools/managers/cmd.py
@@ -4,6 +4,8 @@ import operator
import itertools
import collections
+import pkg_resources
+
TaggedRevision = collections.namedtuple('TaggedRevision', 'tag revision')
@@ -137,6 +139,14 @@ class Mercurial(Command):
class Git(Command):
exe = 'git'
+ def is_valid(self):
+ return super(Command, self).is_valid() and self.version_suitable()
+
+ def version_suitable(self):
+ req_ver = pkg_resources.parse_version('1.7.10')
+ act_ver = pkg_resources.parse_version(self.version())
+ return act_ver >= req_ver
+
def find_root(self):
try:
return self._invoke('rev-parse', '--top-level').strip()
|
Git command is only suitable if the command supports --points-to. Fixes #<I>.
|
py
|
diff --git a/cqlengine/tests/columns/test_container_columns.py b/cqlengine/tests/columns/test_container_columns.py
index <HASH>..<HASH> 100644
--- a/cqlengine/tests/columns/test_container_columns.py
+++ b/cqlengine/tests/columns/test_container_columns.py
@@ -67,6 +67,16 @@ class TestSetColumn(BaseCassEngTestCase):
m = TestSetModel.get(partition=m.partition)
self.assertNotIn(5, m.int_set)
+ def test_blind_deleting_last_item_should_succeed(self):
+ m = TestSetModel.create()
+ m.int_set.add(5)
+ m.save()
+
+ TestSetModel.objects(partition=m.partition).update(int_set=set())
+
+ m = TestSetModel.get(partition=m.partition)
+ self.assertNotIn(5, m.int_set)
+
def test_empty_set_retrieval(self):
m = TestSetModel.create()
m2 = TestSetModel.get(partition=m.partition)
|
test to show blind update of empty set causes CQLEngineException
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ from setuptools import setup, find_packages
setup(
name = 'reap',
- version = '0.4-pre',
+ version = '0.4',
description = 'A command line interface for the Harvest time tracking tool.',
author = 'Jake Basile',
author_email = 'jakebasile@me.com',
|
Bumped version to <I>
|
py
|
diff --git a/mtools/mlogfilter/mlogfilter.py b/mtools/mlogfilter/mlogfilter.py
index <HASH>..<HASH> 100755
--- a/mtools/mlogfilter/mlogfilter.py
+++ b/mtools/mlogfilter/mlogfilter.py
@@ -94,7 +94,7 @@ class MLogFilterTool(LogFileTool):
return line[:last_index] + ("").join(splitted)
- def run(self):
+ def run(self, arguments=None):
""" parses the logfile and asks each filter if it accepts the line.
it will only be printed if all filters accept the line.
"""
|
fixed bug from merge, run needs 2 parameters.
|
py
|
diff --git a/ieml/dictionary/script/script.py b/ieml/dictionary/script/script.py
index <HASH>..<HASH> 100644
--- a/ieml/dictionary/script/script.py
+++ b/ieml/dictionary/script/script.py
@@ -77,7 +77,7 @@ class Script(TreeStructure):
def __eq__(self, other):
if isinstance(other, Script):
- return self.__hash__() == other.__hash__()
+ return self._str == other._str
else:
return super().__eq__(other)
|
Make the __eq__ test between morpheme more efficient
|
py
|
diff --git a/yandextank/stepper/main.py b/yandextank/stepper/main.py
index <HASH>..<HASH> 100644
--- a/yandextank/stepper/main.py
+++ b/yandextank/stepper/main.py
@@ -229,21 +229,9 @@ class StepperWrapper(object):
hashed_str += sep + str(self.enum_ammo)
if self.instances_schedule:
hashed_str += sep + str(self.instances)
-
if self.ammo_file:
- if not os.path.exists(self.ammo_file):
- raise RuntimeError(
- "Ammo file not found: %s" % self.ammo_file)
-
- hashed_str += sep + os.path.realpath(self.ammo_file)
- stat = os.stat(self.ammo_file)
- cnt = 0
- for stat_option in stat:
- if cnt == 7: # skip access time
- continue
- cnt += 1
- hashed_str += ";" + str(stat_option)
- hashed_str += ";" + str(os.path.getmtime(self.ammo_file))
+ opener = get_opener(self.ammo_file)
+ hashed_str += sep + opener.hash
else:
if not self.uris:
raise RuntimeError(
|
Update main.py use opener's hash property for stpd file hash
|
py
|
diff --git a/synphot/spectrum.py b/synphot/spectrum.py
index <HASH>..<HASH> 100644
--- a/synphot/spectrum.py
+++ b/synphot/spectrum.py
@@ -597,7 +597,8 @@ class BaseSpectrum(object):
# This logic assumes __call__ never returns mag or count!
if ((isinstance(other.model, Empirical1D) and
other.model.is_tapered() or
- not isinstance(other.model, _CompoundModel)) and
+ not isinstance(other.model,
+ (Empirical1D, _CompoundModel))) and
np.allclose(other(x1[::x1.size-1]).value, 0)):
result = 'full'
|
Fixed check_overlap() logic again.
|
py
|
diff --git a/commands/wai.py b/commands/wai.py
index <HASH>..<HASH> 100755
--- a/commands/wai.py
+++ b/commands/wai.py
@@ -30,4 +30,4 @@ def cmd(send, msg, args):
c = ["broke", "exploded", "corrupted", "melted", "froze", "died", "reset",
"was seen by the godofskies", "burned", "corroded", "reversed polarity",
"was accidentallied", "nuked"]
- send("%s %s %s" % ((choice(a), choice(b), choice(c))))
+ send("because %s %s %s" % ((choice(a), choice(b), choice(c))))
|
if i mess this up i will officially stop work on cslbot.
|
py
|
diff --git a/slackeventsapi/server.py b/slackeventsapi/server.py
index <HASH>..<HASH> 100644
--- a/slackeventsapi/server.py
+++ b/slackeventsapi/server.py
@@ -1,4 +1,4 @@
-from flask import Flask, request, make_response
+from flask import Flask, request, make_response, Blueprint
import json
import platform
import sys
@@ -18,7 +18,7 @@ class SlackServer(Flask):
# If a server is passed in, bind the event handler routes to it,
# otherwise create a new Flask instance.
if server:
- if isinstance(server, Flask):
+ if isinstance(server, Flask) or isinstance(server, Blueprint):
self.bind_route(server)
else:
raise TypeError("Server must be an instance of Flask")
|
Accept usage of Flask Blueprints as app instance
|
py
|
diff --git a/tgext/admin/tgadminconfig.py b/tgext/admin/tgadminconfig.py
index <HASH>..<HASH> 100644
--- a/tgext/admin/tgadminconfig.py
+++ b/tgext/admin/tgadminconfig.py
@@ -40,7 +40,7 @@ class UserControllerConfig(CrudRestControllerConfig):
if not getattr(self, 'table_filler_type', None):
class MyTableFiller(TableFiller):
__entity__ = self.model
- __omit_fields__ = ['_password', password_field]
+ __omit_fields__ = ['_password', password_field, '_groups']
self.table_filler_type = MyTableFiller
if hasattr(TextField, 'req'):
|
Skip _groups field in case of MongoDB
|
py
|
diff --git a/hotdoc/core/project.py b/hotdoc/core/project.py
index <HASH>..<HASH> 100644
--- a/hotdoc/core/project.py
+++ b/hotdoc/core/project.py
@@ -256,9 +256,7 @@ class Project(Configurable):
if not self.project_version:
error('invalid-config', 'No project version was provided')
- self.sanitized_name = '%s-%s' % (re.sub(r'\W+', '-',
- self.project_name),
- self.project_version)
+ self.sanitized_name = '%s' % (re.sub(r'\W+', '-', self.project_name))
# pylint: disable=arguments-differ
def parse_config(self, config, toplevel=False):
|
Remove version in output names It was not really useful as we won't be able to build the documentation of a same lib for two of its versions at the same time because of symbole name uniqueness
|
py
|
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py
index <HASH>..<HASH> 100644
--- a/pandas/tests/frame/indexing/test_categorical.py
+++ b/pandas/tests/frame/indexing/test_categorical.py
@@ -394,3 +394,14 @@ class TestDataFrameIndexingCategorical:
result = df.loc[["a"]].index.levels[0]
tm.assert_index_equal(result, expected)
+
+ def test_categorical_filtering(self):
+ # GH22609 Verify filtering operations on DataFrames with categorical Series
+ df = pd.DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"])
+ df["b"] = df.b.astype("category")
+
+ result = df.where(df.a > 0)
+ expected = df.copy()
+ expected.loc[0, :] = np.nan
+
+ tm.assert_equal(result, expected)
|
TST: Verify filtering operations on DataFrames with categorical Series (#<I>)
|
py
|
diff --git a/managers/pipeline.py b/managers/pipeline.py
index <HASH>..<HASH> 100644
--- a/managers/pipeline.py
+++ b/managers/pipeline.py
@@ -35,7 +35,7 @@ def request_user(strategy, details, user=None, request=None, is_new=False, uid=N
if user:
return
elif is_new:
- username = details["username"]
+ username = details["username"].replace(".", "_")
try:
ProfileRequest.objects.get(username=username)
|
Don't allow periods in usernames
|
py
|
diff --git a/isso/utils/__init__.py b/isso/utils/__init__.py
index <HASH>..<HASH> 100644
--- a/isso/utils/__init__.py
+++ b/isso/utils/__init__.py
@@ -19,7 +19,7 @@ def anonymize(remote_addr):
and /48 (zero'd).
"""
- if not isinstance(remote_addr, str) and isinstance(remote_addr, str):
+ if isinstance(remote_addr, bytes):
remote_addr = remote_addr.decode('ascii', 'ignore')
try:
ipv4 = ipaddress.IPv4Address(remote_addr)
|
utils: Test remote_addr as bytes, not str This was probably an oversight while porting to py2-only: python2 support was removed in dd<I>aa0cf<I>e<I>dd7b2aff<I>b<I>f<I>f<I>e Checking for not(str) and str at the same time is a no-op. What makes sense is to check whether a byte object can be decoded to unicode.
|
py
|
diff --git a/fusesoc/section.py b/fusesoc/section.py
index <HASH>..<HASH> 100644
--- a/fusesoc/section.py
+++ b/fusesoc/section.py
@@ -349,8 +349,8 @@ Testbench source type : {source_type}
Verilog top module : {top_module}
"""
return s.format(verilator_options=' '.join(self.verilator_options),
- src_files = ' '.join(self.src_files),
- include_files=' '.join(self.include_files),
+ src_files = ' '.join([f.name for f in self.src_files]),
+ include_files=' '.join([f.name for f in self.include_files]),
define_files=' '.join(self.define_files),
libs=' '.join(self.libs),
tb_toplevel=self.tb_toplevel,
|
Fix core-info for verilator sections
|
py
|
diff --git a/system_tests/language.py b/system_tests/language.py
index <HASH>..<HASH> 100644
--- a/system_tests/language.py
+++ b/system_tests/language.py
@@ -74,7 +74,8 @@ class TestLanguage(unittest.TestCase):
# Verify entity 1.
self.assertEqual(entity1.name, self.NAME1)
self.assertEqual(entity1.entity_type, EntityType.PERSON)
- self.assertTrue(0.7 < entity1.salience < 0.8)
+ # NOTE: As of Sept. 8, 2016, entity1.salience == 0.81992269
+ self.assertTrue(0.6 < entity1.salience < 1.0)
self.assertEqual(entity1.mentions, [entity1.name])
self.assertEqual(entity1.wikipedia_url,
'http://en.wikipedia.org/wiki/Caravaggio')
@@ -82,7 +83,8 @@ class TestLanguage(unittest.TestCase):
# Verify entity 2.
self.assertEqual(entity2.name, self.NAME2)
self.assertEqual(entity2.entity_type, EntityType.LOCATION)
- self.assertTrue(0.15 < entity2.salience < 0.25)
+ # NOTE: As of Sept. 8, 2016, entity2.salience == 0.14427181
+ self.assertTrue(0.0 < entity2.salience < 0.4)
self.assertEqual(entity2.mentions, [entity2.name])
self.assertEqual(entity2.wikipedia_url,
'http://en.wikipedia.org/wiki/Italy')
|
Widening the salience range for natural language system tests. Fixes #<I>. However, the test will likely remain flaky if the output is a moving target.
|
py
|
diff --git a/salt/modules/postgres.py b/salt/modules/postgres.py
index <HASH>..<HASH> 100644
--- a/salt/modules/postgres.py
+++ b/salt/modules/postgres.py
@@ -911,6 +911,8 @@ def _role_cmd_args(name,
if sub_cmd.endswith('WITH'):
sub_cmd = sub_cmd.replace(' WITH', '')
if groups:
+ if isinstance(groups, list):
+ groups = ','.join(groups)
for group in groups.split(','):
sub_cmd = '{0}; GRANT "{1}" TO "{2}"'.format(sub_cmd, group, name)
return sub_cmd
|
Handle group lists as well as comma-separated group strings. Fixes #<I>
|
py
|
diff --git a/demo.py b/demo.py
index <HASH>..<HASH> 100644
--- a/demo.py
+++ b/demo.py
@@ -19,7 +19,7 @@ mwoauth = MWOAuth(consumer_key=consumer_key, consumer_secret=consumer_secret)
app.register_blueprint(mwoauth.bp)
@app.route("/")
-def gcu():
+def index():
return "logged in as: " + repr(mwoauth.get_current_user(False)) + "<br>" + \
"<a href=login>login</a> / <a href=logout>logout</a>"
|
rename gcu to index
|
py
|
diff --git a/flask_github.py b/flask_github.py
index <HASH>..<HASH> 100644
--- a/flask_github.py
+++ b/flask_github.py
@@ -224,6 +224,7 @@ class GitHub(object):
kwargs.setdefault('headers', {})
if access_token is None:
access_token = self.get_access_token()
+ kwargs['headers'] = kwargs['headers'].copy()
kwargs['headers'].setdefault('Authorization', 'token %s' % access_token)
if resource.startswith(("http://", "https://")):
|
Copy the headers so the access token doesn't leak
|
py
|
diff --git a/src/datamodel/gfunc.py b/src/datamodel/gfunc.py
index <HASH>..<HASH> 100644
--- a/src/datamodel/gfunc.py
+++ b/src/datamodel/gfunc.py
@@ -350,7 +350,8 @@ class Gfunc(Ugrid):
self.xmin[1], self.xmax[1]]
aspect = self.tsize[1] / self.tsize[0]
dsp_kwargs.update({'interpolation': 'none', 'cmap': gray,
- 'extent': extent, 'aspect': aspect})
+ 'extent': extent, 'aspect': aspect,
+ 'origin': 'lower'})
elif method == 'scatter':
coo_arr = self.coord.asarr()
args_re = [coo_arr[:, 0], coo_arr[:, 1], self.fvals.real]
|
fix y axis swap in imshow method
|
py
|
diff --git a/gwpy/timeseries/statevector.py b/gwpy/timeseries/statevector.py
index <HASH>..<HASH> 100644
--- a/gwpy/timeseries/statevector.py
+++ b/gwpy/timeseries/statevector.py
@@ -334,7 +334,7 @@ class StateVector(TimeSeries):
try:
return self.metadata['bitmask']
except:
- self.bitmask = BitMask()
+ self.bitmask = BitMask([])
return self.bitmask
@bitmask.setter
|
StateVector: handle empty bitmask better
|
py
|
diff --git a/plugins/candela/girder_candela/__init__.py b/plugins/candela/girder_candela/__init__.py
index <HASH>..<HASH> 100644
--- a/plugins/candela/girder_candela/__init__.py
+++ b/plugins/candela/girder_candela/__init__.py
@@ -19,7 +19,7 @@ from girder.plugin import getPlugin, GirderPlugin
class CandelaPlugin(GirderPlugin):
DISPLAY_NAME = 'Candela Visualization'
- NPM_PACKAGE_NAME = '@girder/candela'
+ CLIENT_SOURCE_PATH = 'web_client'
def load(self, info):
getPlugin('item_tasks').load(info)
|
Automatically inspect npm package names for plugins This change removes the NPM_PACKAGE_NAME property on the GirderPlugin class. Instead, plugin developers must explicitly set the path to the web client package and the npm package name will be inferred. Advanced usage (such as fetching the package from npm) is still supported by overriding the `npmPackages` method.
|
py
|
diff --git a/mktplace/mktplace_client.py b/mktplace/mktplace_client.py
index <HASH>..<HASH> 100644
--- a/mktplace/mktplace_client.py
+++ b/mktplace/mktplace_client.py
@@ -100,7 +100,10 @@ class MarketPlaceClient(MarketPlaceCommunication):
self.LastTransaction = None
self.CurrentState = state or MarketPlaceState(self.BaseURL)
- self.CurrentState.fetch()
+
+ # fetch the current state if it has not already be fetched.
+ if 0 == len(self.CurrentState.State.keys()):
+ self.CurrentState.fetch()
self.TokenStore = tokenstore
|
remove duplicate state fetch from mktclient at start up.
|
py
|
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -26,7 +26,7 @@ except ImportError:
__version__ = '0.1.14'
__doc_version__ = '3'
-rst_epilog = ".. |doc_version| replace:: %s" % __doc_version__
+rst_epilog = "\n.. |doc_version| replace:: %s" % __doc_version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
|
updated rst_epilog to include a newline
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
setup(name='DukeDSClient',
- version='1.0.5',
+ version='2.0.0',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
|
create version <I> Bumping major version due to removing `--skip-copy` and adding the `--copy` flags for the `deliver` command.
|
py
|
diff --git a/pirate/torrent.py b/pirate/torrent.py
index <HASH>..<HASH> 100644
--- a/pirate/torrent.py
+++ b/pirate/torrent.py
@@ -100,7 +100,10 @@ def parse_page(html):
# parse the rows one by one (skipping headings)
for row in table('tr')[1:]:
# grab info about the row
- id_ = row.find('a', class_='detLink')['href'].split('/')[2]
+ row_link = row.find('a', class_='detLink')
+ if row_link is None:
+ continue
+ id_ = row_link['href'].split('/')[2]
seeds, leechers = [i.text for i in row('td')[-2:]]
magnet = row.find(lambda tag:
tag.name == 'a' and
@@ -207,4 +210,4 @@ def copy_magnets(printer, chosen_links, results):
clipboard_text += magnet + "\n"
printer.print('Copying {:X} to clipboard'.format(info_hash))
- pyperclip.copy(clipboard_text)
\ No newline at end of file
+ pyperclip.copy(clipboard_text)
|
fix for empty href on links that ocasionally cause an uncaught exception
|
py
|
diff --git a/pendulum/__init__.py b/pendulum/__init__.py
index <HASH>..<HASH> 100644
--- a/pendulum/__init__.py
+++ b/pendulum/__init__.py
@@ -6,6 +6,11 @@ from .time import Time
from .interval import Interval
from .period import Period
+# Mimicking standard library
+datetime = Pendulum
+date = Date
+time = Time
+
# Constants
from .constants import (
MONDAY, TUESDAY, WEDNESDAY,
|
Adds datetime, date and time to module level to mimic standard library
|
py
|
diff --git a/python/jsbeautifier/__init__.py b/python/jsbeautifier/__init__.py
index <HASH>..<HASH> 100644
--- a/python/jsbeautifier/__init__.py
+++ b/python/jsbeautifier/__init__.py
@@ -232,7 +232,7 @@ def main():
argv = sys.argv[1:]
try:
- opts, args = getopt.getopt(argv, "f:s:c:e:o:rdEPjabkil:xhtvXnCO:w:m:",
+ opts, args = getopt.getopt(argv, "f:s:c:e:o:rdEPjab:kil:xhtvXnCO:w:m:",
['file=', 'indent-size=', 'indent-char=', 'eol=', 'outfile=', 'replace', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'space-after-anon-function',
'brace-style=', 'indent-level=', 'unescape-strings',
|
The -b option should be followed by an argument
|
py
|
diff --git a/openpnm/materials/BereaCubic.py b/openpnm/materials/BereaCubic.py
index <HASH>..<HASH> 100644
--- a/openpnm/materials/BereaCubic.py
+++ b/openpnm/materials/BereaCubic.py
@@ -36,8 +36,6 @@ class BereaCubic(Project):
Examples
--------
- >>> import openpnm as op
- >>> proj = op.materials.BereaCubic(shape=[8, 9, 10])
"""
def __init__(self, shape, name=None, **kwargs):
|
Deleting docstring example in Berea file since it's being run
|
py
|
diff --git a/python/orca/src/bigdl/orca/common.py b/python/orca/src/bigdl/orca/common.py
index <HASH>..<HASH> 100644
--- a/python/orca/src/bigdl/orca/common.py
+++ b/python/orca/src/bigdl/orca/common.py
@@ -183,7 +183,7 @@ def init_orca_context(cluster_mode="local", cores=2, memory="2g", num_nodes=1,
"but got: %s".format(cluster_mode))
ray_args = {}
for key in ["redis_port", "password", "object_store_memory", "verbose", "env",
- "extra_params", "num_ray_nodes", "ray_node_cpu_cores"]:
+ "extra_params", "num_ray_nodes", "ray_node_cpu_cores", "include_webui"]:
if key in kwargs:
ray_args[key] = kwargs[key]
from zoo.ray import RayContext
|
attempt to fix ray memory (#<I>) * attempt to fix ray memory * exclude webui
|
py
|
diff --git a/documentation_builder/conf.py b/documentation_builder/conf.py
index <HASH>..<HASH> 100644
--- a/documentation_builder/conf.py
+++ b/documentation_builder/conf.py
@@ -51,7 +51,7 @@ MOCK_MODULES = [
'optlang', 'optlang.interface', 'optlang.symbolics',
'optlang.symbolics.core',
'future', 'future.utils',
- 'ruamel.yaml'
+ 'ruamel', 'ruamel.yaml'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
|
fix: add ruamel back into the mock list (#<I>)
|
py
|
diff --git a/bottle_sqlalchemy.py b/bottle_sqlalchemy.py
index <HASH>..<HASH> 100644
--- a/bottle_sqlalchemy.py
+++ b/bottle_sqlalchemy.py
@@ -120,7 +120,7 @@ class SQLAlchemyPlugin(object):
config = route.config
_callback = route.callback
- g = lambda key, default: config.get(key, default)
+ g = lambda key, default: config.get('sqlalchemy', {}).get(key, default)
# hack to support route based config with `ConfigDict`
if bottle.__version__.startswith('0.12'):
g = lambda key, default: config.get('sqlalchemy.' + key, default)
|
Broke old bottle versions. Fixing that now. :/
|
py
|
diff --git a/tools/harness-thci/OpenThread.py b/tools/harness-thci/OpenThread.py
index <HASH>..<HASH> 100755
--- a/tools/harness-thci/OpenThread.py
+++ b/tools/harness-thci/OpenThread.py
@@ -80,7 +80,8 @@ class OpenThread(IThci):
self.pskc = ModuleHelper.Default_PSKc
self.securityPolicySecs = ModuleHelper.Default_SecurityPolicy
self.activetimestamp = ModuleHelper.Default_ActiveTimestamp
- self.sedPollingRate = ModuleHelper.Default_Harness_SED_Polling_Rate
+ #self.sedPollingRate = ModuleHelper.Default_Harness_SED_Polling_Rate
+ self.sedPollingRate = 3
self.deviceRole = None
self.provisioningUrl = ''
self.logThread = Queue()
|
THCI: update default SED polling rate as 3s (#<I>)
|
py
|
diff --git a/LiSE/LiSE/rule.py b/LiSE/LiSE/rule.py
index <HASH>..<HASH> 100644
--- a/LiSE/LiSE/rule.py
+++ b/LiSE/LiSE/rule.py
@@ -543,10 +543,16 @@ class RuleMapping(MutableMapping):
return 'RuleMapping({})'.format([k for k in self])
def __iter__(self):
- return self.engine.db.active_rules_rulebook(
- self.rulebook.name,
- *self.engine.time
- )
+ cache = self.engine._active_rules_cache[self.rulebook.name]
+ seen = set()
+ for rule in cache:
+ if rule in seen:
+ continue
+ for (branch, tick) in self.engine._active_branches():
+ if branch in cache[rule]:
+ yield cache[rule][branch][tick]
+ seen.add(rule)
+ break
def __len__(self):
n = 0
|
Rewrite RuleMapping.__iter__ to use the cache I can't believe I didn't do this already.
|
py
|
diff --git a/CSHLDAP.py b/CSHLDAP.py
index <HASH>..<HASH> 100755
--- a/CSHLDAP.py
+++ b/CSHLDAP.py
@@ -215,7 +215,7 @@ class Member(object):
return 'rtp' in self.groups
def isBirthday(self):
- if not birthday:
+ if not self.birthday:
return False
birthday = self.birthdate()
today = date.today()
@@ -251,6 +251,13 @@ class Member(object):
return
self.memberDict = self.ldap.member(self.uid)
+ def __str__(self):
+ string = ""
+ for key in self.memberDict.keys():
+ thing = self.__getattr__(key)
+ string += str(key) + ": " + str(thing) + "\n"
+ return string
+
def dateFromLDAPTimestamp(timestamp):
# only check the first 8 characters: YYYYmmdd
numberOfCharacters = len("YYYYmmdd")
|
Added __str__ for members
|
py
|
diff --git a/pyEX/common.py b/pyEX/common.py
index <HASH>..<HASH> 100644
--- a/pyEX/common.py
+++ b/pyEX/common.py
@@ -873,9 +873,9 @@ def _quoteSymbols(symbols):
"""urlquote a potentially comma-separate list of symbols"""
if isinstance(symbols, list) or "," not in symbols:
# comma separated, quote separately
- return ",".join(quote(symbol) for symbol in symbols.split(","))
+ return ",".join(quote(symbol, safe='') for symbol in symbols.split(","))
# not comma separated, just quote
- return quote(symbols)
+ return quote(symbols, safe='')
def _timeseriesWrapper(kwargs, key=True, subkey=True):
|
mark '/' to be quoted
|
py
|
diff --git a/ibis/tests/all/test_join.py b/ibis/tests/all/test_join.py
index <HASH>..<HASH> 100644
--- a/ibis/tests/all/test_join.py
+++ b/ibis/tests/all/test_join.py
@@ -2,7 +2,7 @@ import pandas as pd
import pytest
from pytest import param
-from ibis.tests.backends import Csv, Pandas, PySpark
+from ibis.tests.backends import BigQuery, Csv, Pandas, PySpark
# add here backends that passes join tests
all_db_join_supported = [Pandas, PySpark]
@@ -30,8 +30,9 @@ all_db_join_supported = [Pandas, PySpark]
],
)
@pytest.mark.only_on_backends(all_db_join_supported)
-# Csv is a subclass of Pandas so need to skip it explicited
-@pytest.mark.skip_backends([Csv])
+# Csv is a subclass of Pandas so need to skip it explicitly.
+# BigQuery is also not skipped for unknown reason.
+@pytest.mark.skip_backends([Csv, BigQuery])
@pytest.mark.xfail_unsupported
def test_join_project_left_table(backend, con, batting, awards_players, how):
|
SUPP: Disable BigQuery explicitly in all/test_join.py Strangely, `pytest.mark.only_on_backends` doesn't skip BigQuery, so I added an explicit skip as a workaround.
|
py
|
diff --git a/salt/modules/apt.py b/salt/modules/apt.py
index <HASH>..<HASH> 100644
--- a/salt/modules/apt.py
+++ b/salt/modules/apt.py
@@ -319,8 +319,8 @@ def _get_upgradable():
rexp = re.compile('(?m)^Conf '
'([^ ]+) ' # Package name
'\(([^ ]+) ' # Version
- '([^ ]+) ' # Release
- '\[([^\]]+)\]\)$') # Arch
+ '([^ ]+)' # Release
+ '(?: \[([^\]]+)\])?\)$') # Arch
keys = ['name', 'version', 'release', 'arch']
_get = lambda l, k: l[keys.index(k)]
|
Updated the regex to match when there is no [arch] field. - Thanks @avimar for the suggested regexes - Tested on <I>, <I>
|
py
|
diff --git a/src/holodeck/agents.py b/src/holodeck/agents.py
index <HASH>..<HASH> 100644
--- a/src/holodeck/agents.py
+++ b/src/holodeck/agents.py
@@ -544,6 +544,9 @@ class TurtleAgent(HolodeckAgent):
``[forward_force, rot_force]``
Inherits from :class:`HolodeckAgent`."""
+
+ agent_type = "TurtleAgent"
+
@property
def control_schemes(self):
return [("[forward_force, rot_force]", ContinuousActionSpace([2]))]
@@ -577,7 +580,8 @@ class AgentDefinition:
"UavAgent": UavAgent,
"NavAgent": NavAgent,
"AndroidAgent": AndroidAgent,
- "HandAgent": HandAgent
+ "HandAgent": HandAgent,
+ "TurtleAgent": TurtleAgent
}
def __init__(self, agent_name, agent_type, sensors=None, starting_loc=(0, 0, 0),
|
Allow TurtleAgent to be spawned (#<I>)
|
py
|
diff --git a/umap/umap_.py b/umap/umap_.py
index <HASH>..<HASH> 100644
--- a/umap/umap_.py
+++ b/umap/umap_.py
@@ -1996,10 +1996,12 @@ class UMAP(BaseEstimator):
if self._small_data:
try:
+ # sklearn pairwise_distances fails for callable metric on sparse data
+ _m = self.metric if self._sparse_data else self._input_distance_func
dmat = pairwise_distances(
X,
self._raw_data,
- metric=self._input_distance_func,
+ metric=_m,
**self.metric_kwds
)
except (TypeError, ValueError):
|
fixed pairwise_distances/sparse matrix bug for transform
|
py
|
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index <HASH>..<HASH> 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -12,7 +12,7 @@ from redis.exceptions import ConnectionError
from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt
-def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
+def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
|
Increased pubsub's wait_for_messages timeout to prevent flaky tests (#<I>)
|
py
|
diff --git a/safe/impact_functions/inundation/flood_OSM_building_impact.py b/safe/impact_functions/inundation/flood_OSM_building_impact.py
index <HASH>..<HASH> 100644
--- a/safe/impact_functions/inundation/flood_OSM_building_impact.py
+++ b/safe/impact_functions/inundation/flood_OSM_building_impact.py
@@ -151,7 +151,7 @@ class FloodBuildingImpactFunction(FunctionProvider):
# Generate simple impact report
table_body = [question,
TableRow([tr('Building type'),
- tr('Temporarily closed'),
+ tr('Number flooded'),
tr('Total')],
header=True),
TableRow([tr('All'), count, N])]
|
Changed Temporarily closed to Number flooded closing issue #<I>
|
py
|
diff --git a/colorama/tests/ansitowin32_test.py b/colorama/tests/ansitowin32_test.py
index <HASH>..<HASH> 100644
--- a/colorama/tests/ansitowin32_test.py
+++ b/colorama/tests/ansitowin32_test.py
@@ -1,11 +1,5 @@
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-try:
- # python3
- from io import StringIO
-except ImportError:
- # python2
- import StringIO
-
+from io import StringIO
from unittest import TestCase, main
from mock import Mock, patch
|
Remove unused "except ImportError" The io module is available on all supported Pythons. It was added in version <I>. The fallback is never used. For additional details on the module, see: <URL>
|
py
|
diff --git a/brownant/pipeline/base.py b/brownant/pipeline/base.py
index <HASH>..<HASH> 100644
--- a/brownant/pipeline/base.py
+++ b/brownant/pipeline/base.py
@@ -57,9 +57,9 @@ class PipelineProperty(cached_property):
# optional attrs
else:
self.options[name] = value
- lacked_attrs = self.required_attrs - assigned_attrs
- if lacked_attrs:
- raise TypeError("required attrs %r" % ", ".join(lacked_attrs))
+ missing_attrs = self.required_attrs - assigned_attrs
+ if missing_attrs:
+ raise TypeError("missing %r" % ", ".join(missing_attrs))
self.prepare()
|
refine a variable's name.
|
py
|
diff --git a/src/pybel/io/nodelink.py b/src/pybel/io/nodelink.py
index <HASH>..<HASH> 100644
--- a/src/pybel/io/nodelink.py
+++ b/src/pybel/io/nodelink.py
@@ -98,7 +98,7 @@ def node_link_data(graph: BELGraph) -> Mapping[str, Any]:
'multigraph': True,
'graph': graph.graph.copy(),
'nodes': [
- _augment_node_with_sha512(node)
+ _augment_node(node)
for node in nodes
],
'links': [
@@ -111,12 +111,13 @@ def node_link_data(graph: BELGraph) -> Mapping[str, Any]:
}
-def _augment_node_with_sha512(node: BaseEntity) -> BaseEntity:
+def _augment_node(node: BaseEntity) -> BaseEntity:
"""Add the SHA-512 identifier to a node's dictionary."""
rv = node.copy()
rv['id'] = node.as_sha512()
+ rv['bel'] = node.as_bel()
for m in chain(node.get(MEMBERS, []), node.get(REACTANTS, []), node.get(PRODUCTS, [])):
- m.update(_augment_node_with_sha512(m))
+ m.update(_augment_node(m))
return rv
|
Make sure nodes also get BEL in node-link JSON
|
py
|
diff --git a/tests/schema.py b/tests/schema.py
index <HASH>..<HASH> 100644
--- a/tests/schema.py
+++ b/tests/schema.py
@@ -337,6 +337,8 @@ class TestModelDDL(ModelDatabaseTestCase):
class PG10Identity(TestModel):
id = IdentityField()
data = TextField()
+ class Meta:
+ database = self.database
self.assertCreateTable(PG10Identity, [
('CREATE TABLE "pg10identity" ('
|
Fix failing test related to using wrong db with schema test model.
|
py
|
diff --git a/vprof/runner.py b/vprof/runner.py
index <HASH>..<HASH> 100644
--- a/vprof/runner.py
+++ b/vprof/runner.py
@@ -92,7 +92,7 @@ def run(func, options, args=(), kwargs={}, host='localhost', port=8000): # pyli
result = None
for prof in run_stats:
- if not result:
+ if result is None:
result = run_stats[prof]['result']
del run_stats[prof]['result'] # Don't send result to remote host
|
Any return type for result (#<I>) It is safer to check for None, in case the function returns non-standard python variable (ex numpy of tensorflow) Should fix <URL>
|
py
|
diff --git a/dallinger/recruiters.py b/dallinger/recruiters.py
index <HASH>..<HASH> 100644
--- a/dallinger/recruiters.py
+++ b/dallinger/recruiters.py
@@ -258,7 +258,7 @@ class ProlificRecruiter(object):
"reward": self.config.get(
"prolific:reward"
), # This is the hourly rate, in cents. Prolific uses the currency of your account.
- "status": "PUBLISHED",
+ "status": "ACTIVE",
"total_available_places": n,
}
study_info = self.prolificservice.create_study(**study_request)
|
Prolific studies are "ACTIVE", not "PUBLISHED"
|
py
|
diff --git a/spyderlib/plugins/sphinxify.py b/spyderlib/plugins/sphinxify.py
index <HASH>..<HASH> 100644
--- a/spyderlib/plugins/sphinxify.py
+++ b/spyderlib/plugins/sphinxify.py
@@ -26,6 +26,7 @@ from tempfile import mkdtemp
# Sphinx = None
from sphinx.application import Sphinx
from pygments import plugin
+from docutils.utils import SystemMessage as SystemMessage
try:
from sage.misc.misc import SAGE_DOC
@@ -115,7 +116,12 @@ def sphinxify(docstring, format='html'):
sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, format,
confoverrides, None, None, True)
- sphinx_app.build(None, [rst_name])
+ try:
+ sphinx_app.build(None, [rst_name])
+ except SystemMessage:
+ output = 'It\'s not possible to generate rich text help for this object. \
+ Please see it in plain text'
+ return output
if os.path.exists(output_name):
output = open(output_name, 'r').read()
|
Print a warning message when sphinx can't process a docstring -. Not every package use rst docstrings, so we have to account for these cases.
|
py
|
diff --git a/gluish/database.py b/gluish/database.py
index <HASH>..<HASH> 100644
--- a/gluish/database.py
+++ b/gluish/database.py
@@ -35,14 +35,16 @@ class sqlite3db(object):
(1, "Hello World"))
"""
- def __init__(self, path, copy_on_exit=None):
+ def __init__(self, path, timeout=5.0, detect_types=0, copy_on_exit=None):
self.path = path
self.conn = None
self.cursor = None
+ self.timeout = timeout
+ self.detect_types = detect_types
self.copy_on_exit = copy_on_exit
def __enter__(self):
- self.conn = sqlite3.connect(self.path)
+ self.conn = sqlite3.connect(self.path, timeout=self.timeout, detect_types=self.detect_types)
self.conn.text_factory = str
self.cursor = self.conn.cursor()
return self.cursor
|
pass on timeout and detect_types
|
py
|
diff --git a/scripts/validate_docs_snippet_line_numbers.py b/scripts/validate_docs_snippet_line_numbers.py
index <HASH>..<HASH> 100644
--- a/scripts/validate_docs_snippet_line_numbers.py
+++ b/scripts/validate_docs_snippet_line_numbers.py
@@ -195,6 +195,12 @@ if __name__ == "__main__":
broken_refs: List[DocusaurusRef] = evaluate_snippet_validity(docusaurus_refs)
if broken_refs:
print_diagnostic_report(broken_refs)
- # sys.exit(1) # TODO(cdkini): Enable once all errors are resolved
else:
print("[SUCCESS] All snippets are valid and referenced properly!")
+
+ # Chetan - 20220316 - While this number should be 0, getting the number of warnings down takes time
+ # and effort. In the meanwhile, we want to set an upper bound on warnings to ensure we're not introducing
+ # further regressions. As snippets are validated, developers should update this number.
+ assert (
+ len(broken_refs) <= 542
+ ), "A broken snippet reference was introduced; please resolve the matter before merging."
|
[MAINTENANCE] Set upper bound on number of allowed warnings in snippet validation script
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ def read(fname):
setup(
name='pipe2py',
- version='0.10.0',
+ version='0.11.0',
description=(
'A project to compile Yahoo! Pipes into Python. '
'The pipe2py package can compile a Yahoo! Pipe into pure Python source'
|
Bump to version <I>
|
py
|
diff --git a/airflow/sensors/sql.py b/airflow/sensors/sql.py
index <HASH>..<HASH> 100644
--- a/airflow/sensors/sql.py
+++ b/airflow/sensors/sql.py
@@ -43,12 +43,12 @@ class SqlSensor(BaseSensorOperator):
:type parameters: dict or iterable
:param success: Success criteria for the sensor is a Callable that takes first_cell
as the only argument, and returns a boolean (optional).
- :type: success: Optional<Callable[[Any], bool]>
+ :type success: Optional<Callable[[Any], bool]>
:param failure: Failure criteria for the sensor is a Callable that takes first_cell
as the only argument and return a boolean (optional).
- :type: failure: Optional<Callable[[Any], bool]>
+ :type failure: Optional<Callable[[Any], bool]>
:param fail_on_empty: Explicitly fail on no rows returned.
- :type: fail_on_empty: bool
+ :type fail_on_empty: bool
"""
template_fields: Iterable[str] = ('sql',)
|
Fix docstring of SqlSensor (#<I>) Fix docstring of SqlSensor by removing `:` from `type:`.
|
py
|
diff --git a/tests/test_pickle_core.py b/tests/test_pickle_core.py
index <HASH>..<HASH> 100644
--- a/tests/test_pickle_core.py
+++ b/tests/test_pickle_core.py
@@ -271,7 +271,7 @@ def _helper_bad_cache_file(sleeptime):
# we want this to succeed at leat once
def test_bad_cache_file():
"""Test pickle core handling of bad cache files."""
- sleeptimes = [0.5, 0.1, 0.2, 0.3, 0.8, 1]
+ sleeptimes = [0.5, 0.1, 0.2, 0.3, 0.8, 1, 2]
sleeptimes = sleeptimes + sleeptimes
for sleeptime in sleeptimes:
if _helper_bad_cache_file(sleeptime):
|
try to fix bad_cache_file test
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,7 @@ setup(
'jsonschema>=2.4.0',
'lockfile>=0.12.2',
'MarkupSafe>=0.23',
- # 'MySQL-python>=1.2.5',
+ 'MySQL-python>=1.2.5',
'oauth2client>=2.0.0',
'pycrypto>=2.6.1', # required by GCP Ansible Dynamic Inventory (gce.py)
'python-daemon>=2.0.0,<2.1.0',
|
Added MySQL-python module to setup.py.
|
py
|
diff --git a/pymc3/smc/sample_smc.py b/pymc3/smc/sample_smc.py
index <HASH>..<HASH> 100644
--- a/pymc3/smc/sample_smc.py
+++ b/pymc3/smc/sample_smc.py
@@ -180,8 +180,8 @@ def sample_smc(
if chains is None:
chains = max(2, cores)
- elif chains == 1:
- cores = 1
+ else:
+ cores = min(chains, cores)
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
|
Fix sample_smc chains/cores info
|
py
|
diff --git a/src/diamond/handler/Handler.py b/src/diamond/handler/Handler.py
index <HASH>..<HASH> 100644
--- a/src/diamond/handler/Handler.py
+++ b/src/diamond/handler/Handler.py
@@ -29,7 +29,6 @@ class Handler(object):
self.log.debug("Running Handler %s locked" % (self))
self.lock.acquire()
self.process(metric)
- self.lock.release()
except Exception:
self.log.error(traceback.format_exc())
finally:
|
Only release in the finally block, this prevents a double lock release issue
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,8 @@ setuptools.setup(
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
|
Add Python <I> and <I> support to package
|
py
|
diff --git a/meshio/off_io.py b/meshio/off_io.py
index <HASH>..<HASH> 100644
--- a/meshio/off_io.py
+++ b/meshio/off_io.py
@@ -2,7 +2,8 @@
#
"""
I/O for the OFF surface format, cf.
-<https://en.wikipedia.org/wiki/OFF_(file_format)>.
+<https://en.wikipedia.org/wiki/OFF_(file_format)>,
+<http://www.geomview.org/docs/html/OFF.html>.
"""
from itertools import islice
import logging
@@ -79,7 +80,9 @@ def read_buffer(f):
data = stripped.split()
num_points = int(data[0])
- assert num_points == len(data) - 1
+ # Don't be too strict with the len(data) assertions here; the OFF specifications
+ # allows for RGB colors.
+ # assert num_points == len(data) - 1
assert num_points == 3, "Can only handle triangular faces"
data = [int(data[1]), int(data[2]), int(data[3])]
|
be less strict in the OFF assertions Fixes #<I>.
|
py
|
diff --git a/pyAudioAnalysis/ShortTermFeatures.py b/pyAudioAnalysis/ShortTermFeatures.py
index <HASH>..<HASH> 100644
--- a/pyAudioAnalysis/ShortTermFeatures.py
+++ b/pyAudioAnalysis/ShortTermFeatures.py
@@ -396,7 +396,6 @@ def spectrogram(signal, sampling_rate, window, step, plot=False,
signal = (signal - dc_offset) / (maximum - dc_offset)
num_samples = len(signal) # total number of signals
- cur_p = 0
count_fr = 0
num_fft = int(window / 2)
specgram = np.array([], dtype=np.float64)
|
added tqdm in spegram calculation
|
py
|
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index <HASH>..<HASH> 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1348,7 +1348,9 @@ def assert_frame_equal(left, right, check_dtype=True,
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
- If true, ignore the order of rows & columns
+ If True, ignore the order of index & columns.
+ Note: index labels must match their respective rows
+ (same as in columns) - same labels must be with the same data
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
|
DOC: Clarify check_like behavior in assert_frame_equal (#<I>)
|
py
|
diff --git a/tornado/autoreload.py b/tornado/autoreload.py
index <HASH>..<HASH> 100644
--- a/tornado/autoreload.py
+++ b/tornado/autoreload.py
@@ -233,7 +233,7 @@ def _reload():
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
- sys.exit(0)
+ os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
|
autoreload: Improve autoreload wrapper on windows On platforms without execv(), we must spawn a separate subprocess. This breaks down when an internal autoreload is firing in a wrapped process, since the internal reload raises an exception with the wrapper catches (triggering a second reload and another copy of the process).
|
py
|
diff --git a/zipline/test/test_messaging.py b/zipline/test/test_messaging.py
index <HASH>..<HASH> 100644
--- a/zipline/test/test_messaging.py
+++ b/zipline/test/test_messaging.py
@@ -104,8 +104,6 @@ class SimulatorTestCase(object):
sim.register_controller( con )
sim.register_components([ret1, ret2, client])
- assert False
-
# Simulation
# ----------
sim.simulate()
|
Woops, left pdb assert in.
|
py
|
diff --git a/aegean.py b/aegean.py
index <HASH>..<HASH> 100644
--- a/aegean.py
+++ b/aegean.py
@@ -906,8 +906,11 @@ def find_sources_in_image(filename, hdu_index=0, outfile=None,rms=None, max_summ
# flux values
#the background is taken from background map
- source.background=bkgimg[int(round(dec_pix)),int(round(ra_pix))]
- source.local_rms=rmsimg[int(round(dec_pix)),int(round(ra_pix))]
+ # Clamp the pixel location to the edge of the background map (see Trac #51)
+ x = min(int(round(ra_pix)), bkgimg.shape[1]-1)
+ y = min(int(round(dec_pix)), bkgimg.shape[0]-1)
+ source.background=bkgimg[y,x]
+ source.local_rms=rmsimg[y,x]
source.peak_flux = mp.params[j*6]
source.err_peak_flux = mp.perror[j*6]
|
#<I> Aegean fix crash for sources at edge of image (clamp background pixel location to edge)
|
py
|
diff --git a/configure.py b/configure.py
index <HASH>..<HASH> 100755
--- a/configure.py
+++ b/configure.py
@@ -246,7 +246,7 @@ n.comment('Main executable is library plus main() function.')
objs = cxx('ninja')
ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
-if ninja != 'ninja':
+if 'ninja' not in ninja:
n.build('ninja', 'phony', ninja)
n.newline()
all_targets += ninja
@@ -300,7 +300,7 @@ if platform != 'mingw' and platform != 'windows':
ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
variables=[('ldflags', test_ldflags),
('libs', test_libs)])
-if ninja_test != 'ninja_test':
+if 'ninja_test' not in ninja_test:
n.build('ninja_test', 'phony', ninja_test)
n.newline()
all_targets += ninja_test
|
ninja_syntax.build() returns a list, not a single string. Fixes 'multiple rules' warnings on mac/linux.
|
py
|
diff --git a/vcr/cassette.py b/vcr/cassette.py
index <HASH>..<HASH> 100644
--- a/vcr/cassette.py
+++ b/vcr/cassette.py
@@ -16,11 +16,13 @@ from .util import partition_dict
try:
from asyncio import iscoroutinefunction
- from ._handle_coroutine import handle_coroutine
except ImportError:
def iscoroutinefunction(*args, **kwargs):
return False
+if sys.version_info[:2] >= (3, 5):
+ from ._handle_coroutine import handle_coroutine
+else:
def handle_coroutine(*args, **kwags):
raise NotImplementedError('Not implemented on Python 2')
|
Fix cassette module to work with py<I>
|
py
|
diff --git a/salt/modules/npm.py b/salt/modules/npm.py
index <HASH>..<HASH> 100644
--- a/salt/modules/npm.py
+++ b/salt/modules/npm.py
@@ -76,13 +76,16 @@ def install(pkg=None,
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
+ # npm >1.2.21 is putting the output to stderr even though retcode is 0
+ npm_output = result['stdout'] or result['stderr']
try:
- return json.loads(result['stdout'])
+ return json.loads(npm_output)
except ValueError:
# Not JSON! Try to coax the json out of it!
pass
- lines = result['stdout'].splitlines()
+ lines = npm_output.splitlines()
+ log.error(lines)
# Strip all lines until JSON output starts
while not lines[0].startswith("{") and not lines[0].startswith("["):
@@ -92,7 +95,7 @@ def install(pkg=None,
return json.loads(''.join(lines))
except ValueError:
# Still no JSON!! Return the stdout as a string
- return result['stdout']
+ return npm_output
def uninstall(pkg,
|
Fixed issue with npm.install for npm>=<I>. Closes #<I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@ from setuptools import setup
setup(
name='Flask-Security',
- version='1.6.10',
+ version='1.6.9',
url='https://github.com/mattupstate/flask-security',
license='MIT',
author='Matt Wright',
|
Fix version to be able to merge
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ with open('docido_sdk/__init__.py') as istr:
exec(l)
install_requires = [
- 'elasticsearch>=2.3.0,<5',
+ 'elasticsearch==2.3.0',
'ProxyTypes==0.9',
'pymongo>=2.9.4',
'python-dateutil>=2.5.3',
|
Force ES to <I>
|
py
|
diff --git a/mambuloan.py b/mambuloan.py
index <HASH>..<HASH> 100644
--- a/mambuloan.py
+++ b/mambuloan.py
@@ -280,11 +280,13 @@ class MambuLoan(MambuStruct):
holder.attrs['clients'] = clients
self.attrs['clients'] = loanclients
-
+ self.attrs['holderType'] = "Grupo"
+
else: # "CLIENT"
holder = MambuClient(entid=self['accountHolderKey'],
urlfunc=getclienturl,
**params)
+ self.attrs['holderType'] = "Cliente"
self.attrs['holder'] = holder
|
Add holderType attr on MambuLoan.setHolder to diff between groups and clients
|
py
|
diff --git a/src/main/config/portal/default/redbox/scripts/actions/workflow.py b/src/main/config/portal/default/redbox/scripts/actions/workflow.py
index <HASH>..<HASH> 100644
--- a/src/main/config/portal/default/redbox/scripts/actions/workflow.py
+++ b/src/main/config/portal/default/redbox/scripts/actions/workflow.py
@@ -65,6 +65,14 @@ class WorkflowData(DefaultWorkflowData):
uploadFile = uploadFile.replace("C:\\fakepath\\", "")
fileDetails = self.vc("sessionState").get(uploadFile)
+ if fileDetails is None:
+ uploadFile = uploadFile.rsplit("\\", 1)[-1]
+ fileDetails = self.vc("sessionState").get(uploadFile)
+ if fileDetails is None:
+ print "**** fileDetails is None!!! ***"
+ return self.__toJson({
+ "error": "fileDetails is None (no upload file!)"
+ })
#self.log.debug("fileDetails:%s" % fileDetails)
errorDetails = fileDetails.get("error")
if errorDetails:
|
Fixed attach file error in IE 8
|
py
|
diff --git a/src/mbed_cloud/account_management/account_management.py b/src/mbed_cloud/account_management/account_management.py
index <HASH>..<HASH> 100644
--- a/src/mbed_cloud/account_management/account_management.py
+++ b/src/mbed_cloud/account_management/account_management.py
@@ -598,8 +598,7 @@ class Account(BaseObject):
@property
def sales_contact_email(self):
- """
- Gets the sales_contact_email of this AccountInfo.
+ """Gets the sales_contact_email of this AccountInfo.
Email address of the sales contact.
|
:tshirt: docstring
|
py
|
diff --git a/gooey/python_bindings/gooey_decorator.py b/gooey/python_bindings/gooey_decorator.py
index <HASH>..<HASH> 100644
--- a/gooey/python_bindings/gooey_decorator.py
+++ b/gooey/python_bindings/gooey_decorator.py
@@ -58,7 +58,9 @@ def Gooey(f=None,
build_spec = None
if load_build_config:
try:
- build_spec = json.load(open(load_build_config, "r"))
+ exec_dir = os.path.dirname(sys.argv[0])
+ open_path = os.path.join(exec_dir,load_build_config)
+ build_spec = json.load(open(open_path, "r"))
except Exception as e:
print( 'Exception loading Build Config from {0}: {1}'.format(load_build_config, e))
sys.exit(1)
|
load build config file from directory where the running python script is located
|
py
|
diff --git a/holoviews/plotting/mpl/chart.py b/holoviews/plotting/mpl/chart.py
index <HASH>..<HASH> 100644
--- a/holoviews/plotting/mpl/chart.py
+++ b/holoviews/plotting/mpl/chart.py
@@ -66,7 +66,7 @@ class CurvePlot(ChartPlot):
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
- style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker']
+ style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker', 'ms']
_plot_methods = dict(single='plot')
|
Added missing 'ms' style option to matplotlib CurvePlot (#<I>)
|
py
|
diff --git a/nameko/extensions.py b/nameko/extensions.py
index <HASH>..<HASH> 100644
--- a/nameko/extensions.py
+++ b/nameko/extensions.py
@@ -311,6 +311,8 @@ def is_entrypoint(obj):
def iter_extensions(extension):
+ """ Depth-first iterator over sub-extensions on `extension`.
+ """
for _, ext in inspect.getmembers(extension, is_extension):
for item in iter_extensions(ext):
yield item
|
Comment for iter_extensions
|
py
|
diff --git a/fault/verilator_target.py b/fault/verilator_target.py
index <HASH>..<HASH> 100644
--- a/fault/verilator_target.py
+++ b/fault/verilator_target.py
@@ -187,7 +187,7 @@ class VerilatorTarget(VerilogTarget):
def make_print(self, i, action):
name = verilog_name(action.port.name)
- if action.format_str.__contains__("%x"):
+ if action.format_str.__contains__("%x") or action.format_str.__contains__("%d"):
return [f'printf("'
f'{action.format_str}", top->{name});']
else:
|
allowing printing %d or %x
|
py
|
diff --git a/folium/plugins/heat_map_withtime.py b/folium/plugins/heat_map_withtime.py
index <HASH>..<HASH> 100644
--- a/folium/plugins/heat_map_withtime.py
+++ b/folium/plugins/heat_map_withtime.py
@@ -42,7 +42,7 @@ class HeatMapWithTime(JSCSSMixin, Layer):
display_index: default True
Display the index (usually time) in the time control.
index_steps: default 1
- Steps to take in the index dimension between aimation steps.
+ Steps to take in the index dimension between animation steps.
min_speed: default 0.1
Minimum fps speed for animation.
max_speed: default 10
|
docs: fix simple typo, aimation -> animation There is a small typo in folium/plugins/heat_map_withtime.py. Should read `animation` rather than `aimation`.
|
py
|
diff --git a/cachalot/tests/models.py b/cachalot/tests/models.py
index <HASH>..<HASH> 100644
--- a/cachalot/tests/models.py
+++ b/cachalot/tests/models.py
@@ -18,20 +18,13 @@ class Test(Model):
bin = BinaryField(null=True, blank=True)
class Meta(object):
- app_label = 'cachalot'
ordering = ('name',)
class TestParent(Model):
name = CharField(max_length=20)
- class Meta(object):
- app_label = 'cachalot'
-
class TestChild(TestParent):
public = BooleanField(default=False)
permissions = ManyToManyField('auth.Permission', blank=True)
-
- class Meta(object):
- app_label = 'cachalot'
|
Removes the useless explicit app_label since Django <I>.
|
py
|
diff --git a/dispatch/api/serializers.py b/dispatch/api/serializers.py
index <HASH>..<HASH> 100644
--- a/dispatch/api/serializers.py
+++ b/dispatch/api/serializers.py
@@ -53,7 +53,7 @@ class ImageSerializer(serializers.HyperlinkedModelSerializer):
def update(self, instance, validated_data):
# Save properties
- instance.title = validated_data.get('title', instance.title)
+ instance.title = validated_data.get('title', instance.title)
instance.save()
# Save relationships
@@ -213,8 +213,8 @@ class ArticleSerializer(serializers.HyperlinkedModelSerializer):
content = serializers.ReadOnlyField(source='get_json')
content_json = serializers.CharField(write_only=True)
- authors = PersonSerializer(many=True)
- author_ids = serializers.CharField(write_only=True)
+ authors = PersonSerializer(many=True, read_only=True)
+ author_ids = serializers.ListField(write_only=True, child=serializers.IntegerField())
authors_string = serializers.CharField(source='get_author_string',read_only=True)
tags = TagSerializer(many=True, read_only=True)
|
ArticleSerializer: change author_ids from string to list
|
py
|
diff --git a/nfc/npp/server.py b/nfc/npp/server.py
index <HASH>..<HASH> 100644
--- a/nfc/npp/server.py
+++ b/nfc/npp/server.py
@@ -50,6 +50,8 @@ class NPPServer(Thread):
try:
data = nfc.llcp.recv(socket)
+ while nfc.llcp.poll(socket, "recv"):
+ data += nfc.llcp.recv(socket)
if not data:
log.debug("no data")
return # connection closed
|
Reverted patch <I>.
|
py
|
diff --git a/mapchete/mapchete.py b/mapchete/mapchete.py
index <HASH>..<HASH> 100644
--- a/mapchete/mapchete.py
+++ b/mapchete/mapchete.py
@@ -129,12 +129,16 @@ class Mapchete(object):
del self.tile_cache[tile.id]
tile_event.set()
tile_process = None
+ message = None
if result:
if result == "empty":
status = "empty"
+ else:
+ status = "custom"
+ message = result
else:
status = "ok"
- return tile.id, status, None
+ return tile.id, status, message
def get(self, tile, overwrite=False):
"""
@@ -399,6 +403,8 @@ class MapcheteProcess():
pixelbuffer=pixelbuffer
)
else:
+ # if zoom < baselevel, iterate per zoom level until baselevel
+ # and resample tiles using their 4 parents.
return RasterFileTile(
input_file,
self.tile,
|
experimenting with status messages & adding pyramid plan
|
py
|
diff --git a/djstripe/models.py b/djstripe/models.py
index <HASH>..<HASH> 100644
--- a/djstripe/models.py
+++ b/djstripe/models.py
@@ -1741,6 +1741,10 @@ class Source(StripeObject):
)
)
+ source_data = StripeJSONField(help_text=(
+ "The data corresponding to the source type."
+ ))
+
customer = models.ForeignKey(
"Customer", on_delete=models.CASCADE, related_name="sources_v3"
)
@@ -1748,6 +1752,12 @@ class Source(StripeObject):
stripe_class = stripe.Source
stripe_dashboard_item_name = "sources"
+ @classmethod
+ def _manipulate_stripe_object_hook(cls, data):
+ # The source_data dict is an alias of all the source types
+ data["source_data"] = data[data["type"]]
+ return data
+
def _attach_objects_hook(self, cls, data):
customer = cls._stripe_object_to_customer(target_cls=Customer, data=data)
if customer:
|
Add a `source_data` field on Source which aliases the core source field
|
py
|
diff --git a/lib/fileutil.py b/lib/fileutil.py
index <HASH>..<HASH> 100644
--- a/lib/fileutil.py
+++ b/lib/fileutil.py
@@ -126,9 +126,20 @@ def getDate():
def isFits(input):
"""
- Returns a tuple (isfits, fitstype)
+ Always returns a tuple (isfits, fitstype)
isfits - True|False
- fitstype - one of 'waiver', 'mef', 'simple'
+ fitstype - if True, one of 'waiver', 'mef', 'simple'
+ if False, None
+ if None, Exception description
+
+ Input images which do not have a valid FITS filename will automatically
+ result in a return of (False, None).
+
+ In the case that the input has a valid FITS filename but runs into some
+ error upon opening, this routine will return a value of 'isfits = None' along
+ with the Exception strings as the 'fitstype'.
+
+
"""
isfits = False
fitstype = None
@@ -140,8 +151,9 @@ def isFits(input):
if isfits:
try:
f = pyfits.open(input)
- except IOError:
- raise
+ except Exception, e:
+ f.close()
+ return None, str(type(e))+e.args[0]+' for '+input
data0 = f[0].data
if data0 != None:
try:
|
This behavior has been changed for Ticket #<I>. WJH git-svn-id: <URL>
|
py
|
diff --git a/client-python/setup.py b/client-python/setup.py
index <HASH>..<HASH> 100644
--- a/client-python/setup.py
+++ b/client-python/setup.py
@@ -5,16 +5,15 @@ pep420_package_finder = PEP420PackageFinder()
setup(
name='grakn',
packages=pep420_package_finder.find('.', include=['grakn*']),
- version='1.2.4.6',
+ version='1.3.0',
license='Apache-2.0',
description='A Python client for Grakn',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author='Grakn Labs',
author_email='community@grakn.ai',
- url='https://github.com/flyingsilverfin/grakn/tree/client-python-dev',
-# download_url='https://github.com/graknlabs/grakn-python/archive/v0.8.1.tar.gz',
+ url='https://github.com/graknlabs/grakn/tree/master/client-python',
keywords=['grakn', 'database', 'graph', 'knowledgebase', 'knowledge-engineering'],
- python_requires='>=3.4.0',
+ python_requires='>=3.6.0',
install_requires=['grpcio', 'protobuf']
)
|
Update version and URL for <I> and required python version
|
py
|
diff --git a/scoped_nodes.py b/scoped_nodes.py
index <HASH>..<HASH> 100644
--- a/scoped_nodes.py
+++ b/scoped_nodes.py
@@ -927,13 +927,15 @@ class Class(StmtMixIn, LocalsDictNodeNG, FilterStmtsMixin):
if not herited and not implements.frame() is self:
return
found = set()
+ missing = False
for iface in unpack_infer(implements):
if iface is YES:
+ missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
- if not found:
+ if missing:
raise InferenceError()
|
fix Class.interfaces so that no InferenceError raised on empty __implements__ --HG-- branch : stable
|
py
|
diff --git a/crossplane/__init__.py b/crossplane/__init__.py
index <HASH>..<HASH> 100644
--- a/crossplane/__init__.py
+++ b/crossplane/__init__.py
@@ -11,7 +11,7 @@ __title__ = 'crossplane'
__summary__ = 'Reliable and fast NGINX configuration file parser.'
__url__ = 'https://github.com/nginxinc/crossplane'
-__version__ = '0.4.15'
+__version__ = '0.5.0'
__author__ = 'Arie van Luttikhuizen'
__email__ = 'aluttik@gmail.com'
|
Increased version to <I>
|
py
|
diff --git a/simuvex/s_state.py b/simuvex/s_state.py
index <HASH>..<HASH> 100644
--- a/simuvex/s_state.py
+++ b/simuvex/s_state.py
@@ -404,6 +404,7 @@ class SimState(object): # pylint: disable=R0904
strs.append(self.BVV(s))
# end string table with NULL
+ ptrs = ptrs[::-1]
ptrs.append(self.BVV(0, self.arch.bits))
strs = strs[::-1]
@@ -434,6 +435,7 @@ class SimState(object): # pylint: disable=R0904
ptrs.append(p)
ps = []
+ ptrs = ptrs[::-1]
for p in ptrs:
ps += p
if self.arch.memory_endness == "Iend_LE":
|
env pointers were before argv pointers and NULL was at wrong place
|
py
|
diff --git a/examples/Example2_ScheduledReporters.py b/examples/Example2_ScheduledReporters.py
index <HASH>..<HASH> 100644
--- a/examples/Example2_ScheduledReporters.py
+++ b/examples/Example2_ScheduledReporters.py
@@ -50,8 +50,6 @@ results = workspace.getScheduledReporterResults()
print('\t...and put these results into a pandas dataframe: import pandas as pd \n pd.DataFrame(result)')
-resultframe = pandas.DataFrame(results)
-resultframe.columns = ['ticks','stop1','stop2','sheep','wolves']
print(resultframe)
print(workspace.report("ticks"))
print('\n3) Shutdown the server to release compute resources using: nl4py.stopServer()')
|
getScheduledReporterResults now returns a pandas dataframe. Example adjusted accordingly
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.