diff stringlengths 139 3.65k | message stringlengths 8 627 | diff_languages stringclasses 1 value |
|---|---|---|
diff --git a/safe_qgis/tools/shake_grid/shake_grid.py b/safe_qgis/tools/shake_grid/shake_grid.py
index <HASH>..<HASH> 100644
--- a/safe_qgis/tools/shake_grid/shake_grid.py
+++ b/safe_qgis/tools/shake_grid/shake_grid.py
@@ -533,6 +533,11 @@ class ShakeGrid(object):
# Now run GDAL warp scottie...
self._run_command(command)
+ # We will use keywords file name with simple algorithm name since it
+ # will raise an error in windows related to having double colon in path
+ if 'invdist' in algorithm:
+ algorithm = 'invdist'
+
# copy the keywords file from fixtures for this layer
self.create_keyword_file(algorithm) | [Realtime] Use simple keywords name to pass the tests on windows. | py |
diff --git a/a10_neutron_lbaas/logging_client.py b/a10_neutron_lbaas/logging_client.py
index <HASH>..<HASH> 100644
--- a/a10_neutron_lbaas/logging_client.py
+++ b/a10_neutron_lbaas/logging_client.py
@@ -3,6 +3,8 @@ import six.moves.builtins
from oslo_log.helpers import logging as logging
+from a10_neutron_lbaas import appliance_client_base
+
LOG = logging.getLogger(__name__)
@@ -11,9 +13,10 @@ def is_builtin(a):
return False
return type(a).__module__ == six.moves.builtins.__name__
-class LoggingProxy(object):
+
+class LoggingProxy(appliance_client_base.StupidSimpleProxy):
def __init__(self, underlying, path=[]):
- self._underlying = underlying
+ super(LoggingProxy, self).__init__(underlying)
self._path = path
def __getattr__(self, attr): | Implementation of PatientProxy to deal with Invalid Admin Session errors | py |
diff --git a/web3/providers/eth_tester/middleware.py b/web3/providers/eth_tester/middleware.py
index <HASH>..<HASH> 100644
--- a/web3/providers/eth_tester/middleware.py
+++ b/web3/providers/eth_tester/middleware.py
@@ -263,7 +263,7 @@ def guess_from(web3, transaction):
def guess_gas(web3, transaction):
- return web3.eth.estimateGas(transaction)
+ return web3.eth.estimateGas(transaction) * 2
@curry
@@ -281,6 +281,7 @@ fill_default_gas = fill_default('gas', guess_gas)
def default_transaction_fields_middleware(make_request, web3):
def middleware(method, params):
+ # TODO send call to eth-tester without gas, and remove guess_gas entirely
if method == 'eth_call':
filled_transaction = pipe(
params[0], | ugly, temporary patch until gas estimates improve | py |
diff --git a/reana_db/version.py b/reana_db/version.py
index <HASH>..<HASH> 100755
--- a/reana_db/version.py
+++ b/reana_db/version.py
@@ -14,4 +14,4 @@ and parsed by ``setup.py``.
from __future__ import absolute_import, print_function
-__version__ = "0.5.0.dev20190125"
+__version__ = "0.5.0.dev20190213" | release: <I>.de<I> | py |
diff --git a/muffin_redis.py b/muffin_redis.py
index <HASH>..<HASH> 100644
--- a/muffin_redis.py
+++ b/muffin_redis.py
@@ -340,6 +340,14 @@ try:
self._pubsubs.append(fps)
return fps
+ @asyncio.coroutine
+ def pubsub_channels(self):
+ channels = set()
+ for ps in self._pubsubs:
+ for channel in ps.channels:
+ channels.add(channel)
+ return list(channels)
+
class FakePubSub(fakeredis.FakePubSub):
def __getattribute__(self, name):
"""Make a coroutine.""" | Added simulated FakeRedis.pubsub_channels | py |
diff --git a/u2flib_server/__init__.py b/u2flib_server/__init__.py
index <HASH>..<HASH> 100644
--- a/u2flib_server/__init__.py
+++ b/u2flib_server/__init__.py
@@ -25,4 +25,4 @@
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-__version__ = "3.3.0-dev"
+__version__ = "4.0.0-dev" | Increment version to <I> | py |
diff --git a/vasppy/poscar.py b/vasppy/poscar.py
index <HASH>..<HASH> 100644
--- a/vasppy/poscar.py
+++ b/vasppy/poscar.py
@@ -20,7 +20,29 @@ class Poscar:
self.coordinate_type = 'Direct'
self.coordinates = np.array( [ [ 0.0, 0.0, 0.0 ] ] )
self.selective_dynamics = False
-
+
+ def coordinates_by_species( self, species ):
+ return self.coordinates[ self.range_by_species( species ) ]
+
+ def range_by_species( self, species ):
+ i = 0
+ atom_range = {}
+ for a, n in zip( self.atoms, self.atom_numbers ):
+ atom_range[ a ] = range(i, i+n)
+ i += n
+ return atom_range[ species ]
+
+ def atom_number_by_species( self, species ):
+ atom_numbers = { a : n for a, n in zip( self.atoms, self.atom_numbers ) }
+ return atom_numbers[ species ]
+
+ def sorted( self, species ):
+ new_poscar = copy.deepcopy( self )
+ new_poscar.atoms = species
+ new_poscar.atom_numbers = [ self.atom_number_by_species( s ) for s in species ]
+ new_poscar.coordinates = np.concatenate( [ self.coordinates_by_species( s ) for s in species ], axis = 0 )
+ return new_poscar
+
def read_from( self, filename ):
try:
with open( filename ) as f: | Added methods for sorting POSCAR by atomic species | py |
diff --git a/napalm/ios/ios.py b/napalm/ios/ios.py
index <HASH>..<HASH> 100644
--- a/napalm/ios/ios.py
+++ b/napalm/ios/ios.py
@@ -540,9 +540,10 @@ class IOSDriver(NetworkDriver):
elif source_config:
kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file,
direction='put', file_system=file_system)
- enable_scp = True
+ use_scp = True
if self.inline_transfer:
- enable_scp = False
+ use_scp = False
+
with TransferClass(**kwargs) as transfer:
# Check if file already exists and has correct MD5
@@ -553,8 +554,14 @@ class IOSDriver(NetworkDriver):
msg = "Insufficient space available on remote device"
return (False, msg)
- if enable_scp:
- transfer.enable_scp()
+ if use_scp:
+ cmd = 'ip scp server enable'
+ show_cmd = "show running-config | inc {}".format(cmd)
+ output = self.device.send_command_expect(show_cmd)
+ if cmd not in output:
+ msg = "SCP file transfers are not enabled. " \
+ "Configure 'ip scp server enable' on the device."
+ raise CommandErrorException(msg)
# Transfer file
transfer.transfer_file() | throw an error if scp not configured | py |
diff --git a/prefsync.py b/prefsync.py
index <HASH>..<HASH> 100755
--- a/prefsync.py
+++ b/prefsync.py
@@ -48,6 +48,10 @@ def main():
xml = quote(os.path.abspath(os.path.expanduser(args.destination)))
throttleinterval = quote(str(args.throttle_interval))
+ # Make sure the xml file exists, since launchd won't work if it doesn't
+ with open(xml, 'a'):
+ pass
+
with open("binarytoxml.plist") as f:
binarytoxml = f.read() | Make sure the xml file exists | py |
diff --git a/django_auth_adfs/rest_framework.py b/django_auth_adfs/rest_framework.py
index <HASH>..<HASH> 100644
--- a/django_auth_adfs/rest_framework.py
+++ b/django_auth_adfs/rest_framework.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
import logging
from django.contrib.auth import authenticate | Make sure we don't have a import namespace clash with DRF For python <I> you need to add from __future__ import absolute_import | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from setuptools import find_packages, setup
MIN_PY_VERSION = "3.9"
PACKAGES = find_packages(exclude=["tests", "tests.*"])
-VERSION = "103"
+VERSION = "104"
setup(
name="pydeconz", | Bump to <I> (#<I>) | py |
diff --git a/source/rafcon/gui/models/abstract_state.py b/source/rafcon/gui/models/abstract_state.py
index <HASH>..<HASH> 100644
--- a/source/rafcon/gui/models/abstract_state.py
+++ b/source/rafcon/gui/models/abstract_state.py
@@ -213,9 +213,10 @@ class AbstractStateModel(MetaModel, Hashable):
self.input_data_ports = None
self.output_data_ports = None
self.outcomes = None
- self.meta_signal = None
- self.action_signal = None
- self.destruction_signal = None
+ # History TODO: these are needed by the modification history
+ # self.action_signal = None
+ # self.meta_signal = None
+ # self.destruction_signal = None
self.observe = None
super(AbstractStateModel, self).prepare_destruction() | fix(abstract_state model): fix destructor action_signal is needed by the modification history | py |
diff --git a/python_modules/dagster/dagster/core/telemetry.py b/python_modules/dagster/dagster/core/telemetry.py
index <HASH>..<HASH> 100644
--- a/python_modules/dagster/dagster/core/telemetry.py
+++ b/python_modules/dagster/dagster/core/telemetry.py
@@ -24,6 +24,7 @@ import sys
import time
import uuid
import zlib
+from functools import wraps
from logging.handlers import RotatingFileHandler
import click
@@ -223,6 +224,7 @@ def telemetry_wrapper(f):
)
)
+ @wraps(f)
def wrap(*args, **kwargs):
start_time = datetime.datetime.now()
log_action(action=f.__name__ + '_started', client_time=start_time) | Wrap telemetry decorator Summary: This is required for docstrings to be available on the wrapped functions. Test Plan: Unit Reviewers: catherinewu, sashank Reviewed By: sashank Differential Revision: <URL> | py |
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -30,7 +30,6 @@ extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
] | DOC: Removed mathjax from docs. Closes #<I>. | py |
diff --git a/hot_redis.py b/hot_redis.py
index <HASH>..<HASH> 100644
--- a/hot_redis.py
+++ b/hot_redis.py
@@ -110,6 +110,17 @@ class Iterable(Base):
def __iter__(self):
return iter(self.value)
+ def __lt__(self, value):
+ return self.value < self._to_value(value)
+
+ def __le__(self, value):
+ return self.value <= self._to_value(value)
+
+ def __gt__(self, value):
+ return self.value > self._to_value(value)
+
+ def __ge__(self, value):
+ return self.value >= self._to_value(value)
class List(Iterable):
@@ -304,11 +315,13 @@ class Set(Iterable):
raise NotImplemented
def issubset(self, value):
- raise NotImplemented
+ return self <= value
+
def issuperset(self, value):
- raise NotImplemented
+ return self >= value
+
def isdisjoint(self, value):
- raise NotImplemented
+ return not self.intersection(value)
class Dict(Iterable): | Iterable comparison methods and Set counterparts | py |
diff --git a/satpy/dataset/dataid.py b/satpy/dataset/dataid.py
index <HASH>..<HASH> 100644
--- a/satpy/dataset/dataid.py
+++ b/satpy/dataset/dataid.py
@@ -469,7 +469,7 @@ class DataID(dict):
popitem = _immutable
clear = _immutable
update = _immutable # type: ignore
- setdefault = _immutable
+ setdefault = _immutable # type: ignore
def _find_modifiers_key(self):
for key, val in self.items(): | Ignore mypy error on stdlib overloaded method | py |
diff --git a/config/selenium_config.py b/config/selenium_config.py
index <HASH>..<HASH> 100644
--- a/config/selenium_config.py
+++ b/config/selenium_config.py
@@ -17,9 +17,25 @@ dirname = os.path.dirname(os.path.abspath(filename))
# LOGS determines whether Selenium tests will capture logs. Turning it
# on makes the tests much slower.
#
+# False (or anything considered False): no logging.
+#
+# True: turns logging on but **automatically turned off in builders!**
+# (Builders = buildbot, jenkins, etc.)
+#
+# "force": turns logging on, **even when using builders**.
+#
+#
if "LOGS" not in globals():
LOGS = False
+# If we are running in something like Buildbot or Jenkins, we don't
+# want to have the logs be turned on because we forgot to turn them
+# off. So unless LOGS is set to "force", we turn off the logs when
+# running in that environment.
+if LOGS and LOGS != "force" and \
+ (os.environ.get('BUILDBOT') or os.environ.get('JENKINS_HOME')):
+ LOGS = False
+
class Config(selenic.Config): | Added support for LOGS="force". | py |
diff --git a/moto/sqs/models.py b/moto/sqs/models.py
index <HASH>..<HASH> 100644
--- a/moto/sqs/models.py
+++ b/moto/sqs/models.py
@@ -281,7 +281,7 @@ class SQSBackend(BaseBackend):
if len(result) >= count:
break
- if time.time() > polling_end:
+ if result or time.time() > polling_end:
break
return result | Return messages once they are gathered If one or more messages are available, stop waiting and return them. | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@ def readme():
setup(
name="quilt",
- version="2.1.0",
+ version="2.2.0",
packages=find_packages(),
description='Quilt is an open-source data frame registry',
long_description=readme(),
@@ -31,7 +31,7 @@ setup(
author_email='founders@quiltdata.io',
license='LICENSE',
url='https://github.com/quiltdata/quilt',
- download_url='https://github.com/quiltdata/quilt/releases/tag/v2.0.0-alpha',
+ download_url='https://github.com/quiltdata/quilt/releases/tag/v2.2.0-beta',
keywords='quilt quiltdata shareable data dataframe package platform pandas',
install_requires=[
'appdirs>=1.4.0', | Update version (#<I>) * Update version * Update release tag | py |
diff --git a/src/automate/statusobject.py b/src/automate/statusobject.py
index <HASH>..<HASH> 100644
--- a/src/automate/statusobject.py
+++ b/src/automate/statusobject.py
@@ -86,7 +86,7 @@ class StatusObject(AbstractStatusObject, ProgrammableSystemObject, CompareMixin)
history_transpose = Property(transient=True, depends_on='history, _status')
#: Amount of status change events to be stored in history
- history_length = CInt(100)
+ history_length = CInt(1000)
#: How often new values are saved to history, in seconds
history_frequency = CFloat(0) | Change default history length from <I> to <I> | py |
diff --git a/tests_python/test_convert_utilities.py b/tests_python/test_convert_utilities.py
index <HASH>..<HASH> 100644
--- a/tests_python/test_convert_utilities.py
+++ b/tests_python/test_convert_utilities.py
@@ -143,10 +143,10 @@ def test_zip_paths(tmpdir):
for i, zip_basename in enumerate(('MY1.zip', 'my2.egg!')):
zipfile_path = str(tmpdir.join(zip_basename))
- with zipfile.ZipFile(zipfile_path, 'w') as zip_file:
- zip_file.writestr('zipped%s/__init__.py' % (i,), '')
- zip_file.writestr('zipped%s/zipped_contents.py' % (i,), 'def call_in_zip():\n return 1')
- zip_file.close()
+ zip_file = zipfile.ZipFile(zipfile_path, 'w')
+ zip_file.writestr('zipped%s/__init__.py' % (i,), '')
+ zip_file.writestr('zipped%s/zipped_contents.py' % (i,), 'def call_in_zip():\n return 1')
+ zip_file.close()
sys.path.append(zipfile_path)
import importlib | Fix for tests in python <I>. | py |
diff --git a/disqus/templatetags/disqus_tags.py b/disqus/templatetags/disqus_tags.py
index <HASH>..<HASH> 100644
--- a/disqus/templatetags/disqus_tags.py
+++ b/disqus/templatetags/disqus_tags.py
@@ -49,7 +49,7 @@ def disqus_show_comments(shortname=''):
shortname = getattr(settings, 'DISQUS_WEBSITE_SHORTNAME', shortname)
return """
<div id="disqus_thread"></div>
- <script type="text/javascript" src="http://disqus.com/forums/%(shortname)s/embed.js"></script>
+ <script type="text/javascript" async src="http://disqus.com/forums/%(shortname)s/embed.js"></script>
<noscript><p><a href="http://%(shortname)s.disqus.com/?url=ref">View the discussion thread.</a></p></noscript>
<p><a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a></p>
""" % dict(shortname=shortname) | asynchronous javascript added for faster loading. Thanks egonSchiele | py |
diff --git a/psamm/commands/fluxcheck.py b/psamm/commands/fluxcheck.py
index <HASH>..<HASH> 100644
--- a/psamm/commands/fluxcheck.py
+++ b/psamm/commands/fluxcheck.py
@@ -102,8 +102,6 @@ class FluxConsistencyCommand(SolverCommandMixin, Command):
for reaction_id, (lo, hi) in fluxanalysis.flux_variability(
self._mm, sorted(self._mm.reactions), {},
tfba=enable_tfba, solver=solver):
- logger.info('Reaction {} is {}, {}'.format(
- reaction_id, lo, hi))
if abs(lo) < epsilon and abs(hi) < epsilon:
inconsistent.add(reaction_id) | fluxcheck: Remove excessive output when using FVA method. | py |
diff --git a/h2o-py/tests/testdir_tree/pyunit_xgboost_tree.py b/h2o-py/tests/testdir_tree/pyunit_xgboost_tree.py
index <HASH>..<HASH> 100644
--- a/h2o-py/tests/testdir_tree/pyunit_xgboost_tree.py
+++ b/h2o-py/tests/testdir_tree/pyunit_xgboost_tree.py
@@ -62,7 +62,7 @@ def xgboost_tree_test():
H2OTree(xgbModel, -1, "NO") # There is only one tree, tree index of 1 points to a second tree
assert False;
except h2o.exceptions.H2OResponseError as e:
- assert e.args[0].dev_msg == "There is no such tree number for given class. Total number of trees is 1."
+ assert e.args[0].dev_msg == "Invalid tree number: -1. Tree number must be >= 0."
# Multinomial model | Fixed assertion in XGBoost tree fetching test | py |
diff --git a/spyder_kernels/utils/nsview.py b/spyder_kernels/utils/nsview.py
index <HASH>..<HASH> 100644
--- a/spyder_kernels/utils/nsview.py
+++ b/spyder_kernels/utils/nsview.py
@@ -533,13 +533,13 @@ def is_known_type(item):
def get_human_readable_type(item):
"""Return human-readable type string of an item"""
if isinstance(item, (ndarray, MaskedArray)):
- return item.dtype.name
+ return u'Array of ' + item.dtype.name
elif isinstance(item, Image):
return "Image"
else:
text = get_type_string(item)
if text is None:
- text = to_text_string('unknown')
+ text = to_text_string('Unknown')
else:
return text[text.find('.')+1:] | Improve type of Numpy arrays | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -48,8 +48,12 @@ setup(
license="BSDv3",
url="https://github.com/hyperspy/start_jupyter_cm",
classifiers=[
- "Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: BSD License", | Update python minimum requirement to <I> since we use shutil.which. | py |
diff --git a/test/unit/Geometry/SGL10Test.py b/test/unit/Geometry/SGL10Test.py
index <HASH>..<HASH> 100644
--- a/test/unit/Geometry/SGL10Test.py
+++ b/test/unit/Geometry/SGL10Test.py
@@ -1,3 +1,9 @@
+import OpenPNM
+
+
class SGL10Test:
- def test_generate(self):
- pass
+ def setup_class(self):
+ self.net = OpenPNM.Network.Cubic(shape=[3, 3, 3])
+ self.geo = OpenPNM.Geometry.SGL10(network=self.net,
+ pores=self.net.Ps,
+ throats=self.net.Ts) | Testing SGL<I>, just basically running the code. | py |
diff --git a/python/segyio/line.py b/python/segyio/line.py
index <HASH>..<HASH> 100644
--- a/python/segyio/line.py
+++ b/python/segyio/line.py
@@ -14,10 +14,10 @@ def sanitize_slice(s, source):
increasing = step is None or step > 0
if start is None:
- start = source[0] if increasing else source[-1]
+ start = min(source) if increasing else max(source)
if stop is None:
- stop = source[-1] + 1 if increasing else source[0] - 1
+ stop = max(source) + 1 if increasing else min(source) - 1
return slice(start, stop, step) | Use min/max over getitem when sanitising slices By using min/max over source[0]/source[-1], sanitize_slice handles all iterables, and not just lists. | py |
diff --git a/python/gumbo/html5lib_adapter_test.py b/python/gumbo/html5lib_adapter_test.py
index <HASH>..<HASH> 100644
--- a/python/gumbo/html5lib_adapter_test.py
+++ b/python/gumbo/html5lib_adapter_test.py
@@ -135,6 +135,10 @@ class Html5libAdapterTest(unittest.TestCase):
expected = re.compile(r'^(\s*)<(\S+)>', re.M).sub(
r'\1<html \2>', convertExpected(expected, 2))
+ # html5lib doesn't yet support the template tag, but it appears in the
+ # tests with the expectation that the template contents will be under the
+ # word 'contents', so we need to reformat that string a bit.
+ expected = reformatTemplateContents(expected)
error_msg = '\n'.join(['\n\nInput:', input, '\nExpected:', expected,
'\nReceived:', output]) | Handle template contents in html5lib_adapter_tests. | py |
diff --git a/salt/runners/manage.py b/salt/runners/manage.py
index <HASH>..<HASH> 100644
--- a/salt/runners/manage.py
+++ b/salt/runners/manage.py
@@ -261,7 +261,6 @@ def versions():
for minion in sorted(version_status[key]):
ret.setdefault(labels[key], {})[minion] = version_status[key][minion]
-
salt.output.display_output(ret, '', __opts__)
return ret | Remove extra line to fix manage.py pylint error | py |
diff --git a/bika/lims/browser/dashboard/dashboard.py b/bika/lims/browser/dashboard/dashboard.py
index <HASH>..<HASH> 100644
--- a/bika/lims/browser/dashboard/dashboard.py
+++ b/bika/lims/browser/dashboard/dashboard.py
@@ -8,6 +8,7 @@ from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser import BrowserView
from bika.lims.catalog import CATALOG_ANALYSIS_REQUEST_LISTING
from bika.lims.catalog import CATALOG_ANALYSIS_LISTING
+from bika.lims.catalog import CATALOG_WORKSHEET_LISTING
from bika.lims import bikaMessageFactory as _
from bika.lims import logger
from calendar import monthrange
@@ -248,7 +249,7 @@ class DashboardView(BrowserView):
WS to be verified, WS with results pending, etc.)
"""
out = []
- bc = getToolByName(self.context, "bika_catalog")
+ bc = getToolByName(self.context, CATALOG_WORKSHEET_LISTING)
query = {'portal_type':"Worksheet",}
filtering_allowed = self.context.bika_setup.getAllowDepartmentFiltering()
if filtering_allowed: | Changed to the new Worksheet Listing Catalog. | py |
diff --git a/semver.py b/semver.py
index <HASH>..<HASH> 100644
--- a/semver.py
+++ b/semver.py
@@ -26,9 +26,15 @@ def parse(version):
def compare(ver1, ver2):
+ def nat_cmp(a, b):
+ a, b = a and str(a) or '', b and str(b) or ''
+ convert = lambda text: int(text) if text.isdigit() else text.lower()
+ alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
+ return cmp(alphanum_key(a), alphanum_key(b))
+
def compare_by_keys(d1, d2, keys):
for key in keys:
- v = cmp(d1.get(key), d2.get(key))
+ v = nat_cmp(d1.get(key), d2.get(key))
if v != 0:
return v
return 0 | Fix rc build comparison via natural cmp helper. | py |
diff --git a/post_office/tests/backends.py b/post_office/tests/backends.py
index <HASH>..<HASH> 100644
--- a/post_office/tests/backends.py
+++ b/post_office/tests/backends.py
@@ -4,7 +4,7 @@ from django.test import TestCase
from django.test.utils import override_settings
from ..models import Email, STATUS, PRIORITY
-from ..utils import get_email_backend
+from ..settings import get_email_backend
class ErrorRaisingBackend(backends.base.BaseEmailBackend): | Changed the import path of one of the test, to explicitly require settings file | py |
diff --git a/tests/test_graphics.py b/tests/test_graphics.py
index <HASH>..<HASH> 100644
--- a/tests/test_graphics.py
+++ b/tests/test_graphics.py
@@ -16,7 +16,7 @@ import shutil
from nose.tools import assert_equal, assert_less, nottest
from pyani import pyani_graphics, pyani_config
-from average_nucleotide_identity import get_labels
+from bin.average_nucleotide_identity import get_labels
# Work out where we are. We need to do this to find related data files
# for testing | fix import from command-line script in tests | py |
diff --git a/sdk/schemaregistry/azure-schemaregistry-avroencoder/samples/async_samples/encode_and_decode_with_message_content_async.py b/sdk/schemaregistry/azure-schemaregistry-avroencoder/samples/async_samples/encode_and_decode_with_message_content_async.py
index <HASH>..<HASH> 100644
--- a/sdk/schemaregistry/azure-schemaregistry-avroencoder/samples/async_samples/encode_and_decode_with_message_content_async.py
+++ b/sdk/schemaregistry/azure-schemaregistry-avroencoder/samples/async_samples/encode_and_decode_with_message_content_async.py
@@ -28,7 +28,8 @@ import asyncio
from azure.identity.aio import ClientSecretCredential
from azure.schemaregistry.aio import SchemaRegistryClient
-from azure.schemaregistry.encoder.avroencoder.aio import AvroEncoder, MessageContent
+from azure.schemaregistry.encoder.avroencoder import MessageContent
+from azure.schemaregistry.encoder.avroencoder.aio import AvroEncoder
from azure.eventhub import EventData
TENANT_ID = os.environ["AZURE_TENANT_ID"] | [SchemaRegistry] avro fix sample (#<I>) | py |
diff --git a/pyads/symbol.py b/pyads/symbol.py
index <HASH>..<HASH> 100644
--- a/pyads/symbol.py
+++ b/pyads/symbol.py
@@ -141,7 +141,7 @@ class AdsSymbol:
# type
self.symbol_type = info.symbol_type # Save the type as string
- def _read_write_check(self) -> None:
+ def _check_for_open_connection(self) -> None:
"""Assert the current object is ready to read from/write to.
This checks only if the Connection is open.
@@ -156,7 +156,7 @@ class AdsSymbol:
The new read value is also saved in the buffer.
"""
- self._read_write_check()
+ self._check_for_open_connection()
self._value = self._plc.read(self.index_group, self.index_offset, self.plc_type)
return self._value
@@ -168,7 +168,7 @@ class AdsSymbol:
:param new_value Value to be written to symbol (if None,
the buffered value is send instead)
"""
- self._read_write_check()
+ self._check_for_open_connection()
if new_value is None:
new_value = self._value # Send buffered value instead
else: | Rename _read_write_check to _check_for_open_connection Just to make the function more descriptive | py |
diff --git a/iyzipay/__init__.py b/iyzipay/__init__.py
index <HASH>..<HASH> 100644
--- a/iyzipay/__init__.py
+++ b/iyzipay/__init__.py
@@ -5,9 +5,9 @@
# Nurettin Bakkal <nurettin.bakkal@iyzico.com>
# Configuration variables
-api_key = 'mrI3mIMuNwGiIxanQslyJBRYa8nYrCU5'
-secret_key = '9lkVluNHBABPw0LIvyn50oYZcrSJ8oNo'
-base_url = 'localhost:8080'
+api_key = '1'
+secret_key = '1'
+base_url = 'localhost'
# Resource
from iyzipay.iyzipay_resource import ( # noqa | ide file added to gitignore | py |
diff --git a/discord/gateway.py b/discord/gateway.py
index <HASH>..<HASH> 100644
--- a/discord/gateway.py
+++ b/discord/gateway.py
@@ -241,7 +241,7 @@ DWS = TypeVar('DWS', bound='DiscordWebSocket')
class DiscordWebSocket:
- """Implements a WebSocket for Discord's gateway v6.
+ """Implements a WebSocket for Discord's gateway v10.
Attributes
----------- | Update gateway version docstring in DiscordWebSocket | py |
diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py
index <HASH>..<HASH> 100644
--- a/tests/integration/modules/test_pkg.py
+++ b/tests/integration/modules/test_pkg.py
@@ -14,6 +14,7 @@ from tests.support.helpers import (
requires_network,
requires_salt_modules,
requires_system_grains)
+from tests.support.unit import skipIf
# Import Salt libs
from salt.utils import six
@@ -329,6 +330,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
self.assertNotEqual(ret, {})
@destructiveTest
+ @skipIf(salt.utils.platform.is_darwin(), 'The jenkins user is equivalent to root on mac, causing the test to be unrunnable')
@requires_salt_modules('pkg.remove', 'pkg.latest_version')
@requires_system_grains
def test_pkg_latest_version(self, grains): | skip pkg test on mac that can't run as root | py |
diff --git a/gwpy/table/gravityspy.py b/gwpy/table/gravityspy.py
index <HASH>..<HASH> 100644
--- a/gwpy/table/gravityspy.py
+++ b/gwpy/table/gravityspy.py
@@ -192,7 +192,8 @@ class GravitySpyTable(EventTable):
@classmethod
def search(cls, uniqueID, howmany=10):
- """If table contains Gravity Spy triggers `EventTable`
+ """perform restful API version of search available here:
+ https://gravityspytools.ciera.northwestern.edu/search/
Parameters
----------
@@ -203,11 +204,11 @@ class GravitySpyTable(EventTable):
howmany : `int`, optional, default: 10
number of similar images you would like
- kwargs:
-
Returns
-------
- Folder containing omega scans sorted by label
+ `GravitySpyTable` containing similar events based on
+ an evaluation of the Euclidean distance of the input image
+ to all other images in some Feature Space
"""
from astropy.utils.data import get_readable_fileobj
import json | Removing some non sensical doc strings that where inherited from a copy and paste. Adding in some useful docstrings | py |
diff --git a/tornado/websocket.py b/tornado/websocket.py
index <HASH>..<HASH> 100644
--- a/tornado/websocket.py
+++ b/tornado/websocket.py
@@ -210,6 +210,7 @@ class WebSocketHandler(tornado.web.RequestHandler):
Once the close handshake is successful the socket will be closed.
"""
self.ws_connection.close()
+ self.ws_connection = None
def allow_draft76(self):
"""Override to enable support for the older "draft76" protocol. | Break reference cycle between WebSocketHandler and WebSocketProtocol on close. This isn't strictly necessary, but it allows the refcounting CPython GC to clean things up without waiting for a full GC. Closes #<I>. | py |
diff --git a/i3situation/plugins/_plugin.py b/i3situation/plugins/_plugin.py
index <HASH>..<HASH> 100644
--- a/i3situation/plugins/_plugin.py
+++ b/i3situation/plugins/_plugin.py
@@ -52,8 +52,8 @@ class Plugin():
doesn't set it.
"""
if event['button'] == 1 and 'button1' in self.options:
- subprocess.call(self.options['button1'])
+ subprocess.call(self.options['button1'].split())
elif event['button'] == 2 and 'button2' in self.options:
- subprocess.call(self.options['button2'])
+ subprocess.call(self.options['button2'].split())
elif event['button'] == 3 and 'button3' in self.options:
- subprocess.call(self.options['button3'])
+ subprocess.call(self.options['button3'].split()) | Fixed a bug that meant multi word commands caused exceptions. | py |
diff --git a/gsh/buffered_dispatcher.py b/gsh/buffered_dispatcher.py
index <HASH>..<HASH> 100644
--- a/gsh/buffered_dispatcher.py
+++ b/gsh/buffered_dispatcher.py
@@ -91,3 +91,4 @@ class buffered_dispatcher(asyncore.file_dispatcher):
(len(self.write_buffer), str(self)),
output=sys.stderr)
raise asyncore.ExitNow(1)
+ self.handle_write() | Try to write the data just dispatched. This fix the 'add' command in the ctrl shell when using Zsh. | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@ setup(name='blimpy',
description='Python utilities for Breakthrough Listen SETI observations',
long_description="Python utilities for Breakthrough Listen SETI observations. It includes data handling, formating, dicing and plotting.",
platform=['*nix'],
- license='MIT',
+ license='BSD',
install_requires=install_requires,
extras_require=extras_require,
url='https://github.com/ucberkeleyseti/blimpy', | Update license in setup.py to BSD | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,6 @@ tests_require = [
'invenio-records>=1.0.0b4',
'mock>=1.3.0',
'pydocstyle>=1.0.0',
- 'pytest-cache>=1.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'pytest>=2.8.0,!=3.3.0', | installation: removed pytest-cache dependency | py |
diff --git a/rtv/submission_page.py b/rtv/submission_page.py
index <HASH>..<HASH> 100644
--- a/rtv/submission_page.py
+++ b/rtv/submission_page.py
@@ -412,7 +412,7 @@ class SubmissionPage(Page):
attr = self.term.attr('Created')
self.term.add_space(win)
- self.term.add_line(win, '{created_long}{edited}'.format(**data),
+ self.term.add_line(win, '{created_long}{edited_long}'.format(**data),
attr=attr)
row = len(data['split_title']) + 2 | Verbose time stamp on submission page. | py |
diff --git a/pydle/features/rfc1459/client.py b/pydle/features/rfc1459/client.py
index <HASH>..<HASH> 100644
--- a/pydle/features/rfc1459/client.py
+++ b/pydle/features/rfc1459/client.py
@@ -283,6 +283,12 @@ class RFC1459Support(BasicClient):
and yield from this function as follows:
info = yield self.whois('Nick')
"""
+
+ if " " in nickname:
+ fut = Future()
+ fut.set_result(None)
+ return fut
+
if nickname not in self._requests['whois']:
self.rawmsg('WHOIS', nickname)
self._whois_info[nickname] = { | If a nickname contains a space, then the WHOIS result should just be None. Of course, this won't work great on Rizon where the stupi^Wbrilliant network administrators put spaces in their nicknames... | py |
diff --git a/pyxmpp/streamtls.py b/pyxmpp/streamtls.py
index <HASH>..<HASH> 100644
--- a/pyxmpp/streamtls.py
+++ b/pyxmpp/streamtls.py
@@ -149,19 +149,20 @@ class StreamTLSMixIn:
"""Read data pending on the stream socket and pass it to the parser."""
if self.eof:
return
- try:
+ while True:
try:
- r=self.socket.read()
- except TypeError:
- # workarund for M2Crypto 0.13.1 'feature'
- r=self.socket.read(self.socket)
- if r is None:
+ try:
+ r=self.socket.read()
+ except TypeError:
+ # workarund for M2Crypto 0.13.1 'feature'
+ r=self.socket.read(self.socket)
+ if r is None:
+ return
+ except socket.error,e:
+ if e.args[0]!=errno.EINTR:
+ raise
return
- except socket.error,e:
- if e.args[0]!=errno.EINTR:
- raise
- return
- self._feed_reader(r)
+ self._feed_reader(r)
def _read(self):
"""Read data pending on the stream socket and pass it to the parser.""" | - fixed delay on encrypted stream input: now all data is processed when received | py |
diff --git a/tests/test.py b/tests/test.py
index <HASH>..<HASH> 100644
--- a/tests/test.py
+++ b/tests/test.py
@@ -524,7 +524,6 @@ class TestComponentGetterAPI(HedgehogAPITestCase):
)
-@unittest.skip
class TestComponentGetterProcessAPI(HedgehogAPITestCase):
class HedgehogComponentGetterClient(HedgehogComponentGetterMixin, HedgehogClient):
pass | activate TestComponentGetterProcessAPI tests, as they do work | py |
diff --git a/code/macroeco/form_func.py b/code/macroeco/form_func.py
index <HASH>..<HASH> 100644
--- a/code/macroeco/form_func.py
+++ b/code/macroeco/form_func.py
@@ -46,17 +46,15 @@ def get_metadata(asklist, folder_name, dataname):
def get_files(filetype, num, direct):
- '''This functoin gets the .txt files from the
- data directory /archival/BCIS and returns the
- names of the .txt files in the directory. It is
- assumed that the .txt files are BCIS data.
-
+ '''This function gets the filetype files from the
+ data directory /archival/direct and returns the
+ names of the filetype files in the directory.
filetype -- a string specifying the type of the file, i.e. 'csv' or 'txt'
num -- expected number of files of type 'direct_????.filetype'
direct -- the directory within /data/archival/ where the files are.
- example 'BCIS' of 'COCO'
+ example 'BCIS' or 'COCO'
returns:
A list of strings
@@ -188,6 +186,7 @@ def create_intcodes(speclist, unq_specs, unq_ints):
'''
assert len(speclist) > 0, "Species array cannot be empty"
+ speclist = speclist.astype(unq_specs.dtype)
tot_int = np.empty(len(speclist))
for s in xrange(len(unq_specs)):
check = (unq_specs[s] == speclist) | Checked in KRIL and RMJB datasets Had to make alot of assumptions about the KRIL dataset that need to be validated. | py |
diff --git a/pipenv/cli/command.py b/pipenv/cli/command.py
index <HASH>..<HASH> 100644
--- a/pipenv/cli/command.py
+++ b/pipenv/cli/command.py
@@ -734,7 +734,7 @@ def scripts(state, args):
for k, v in scripts.items():
rpt += u"{0}\t{1}".format(k, v)
echo(rpt)
- return
+ return 0
if __name__ == "__main__": | Provide a success return value to the `scripts` command. | py |
diff --git a/examples/charts/file/lines.py b/examples/charts/file/lines.py
index <HASH>..<HASH> 100644
--- a/examples/charts/file/lines.py
+++ b/examples/charts/file/lines.py
@@ -18,7 +18,6 @@ df['date'] = pd.date_range('1/1/2015', periods=len(df.index), freq='D')
# default behavior for dataframe input is to plot each numerical column as a line
line = Line(df)
-show(line)
# build the line plots
line0 = Line(df, y=['python', 'pypy', 'jython'], | Only show all line charts at once in example file. | py |
diff --git a/telethon/client/dialogs.py b/telethon/client/dialogs.py
index <HASH>..<HASH> 100644
--- a/telethon/client/dialogs.py
+++ b/telethon/client/dialogs.py
@@ -41,7 +41,7 @@ class _DialogsIter(RequestIter):
messages = {}
for m in r.messages:
- m._finish_init(self, entities, None)
+ m._finish_init(self.client, entities, None)
messages[m.id] = m
for d in r.dialogs:
@@ -56,7 +56,7 @@ class _DialogsIter(RequestIter):
peer_id = utils.get_peer_id(d.peer)
if peer_id not in self.seen:
self.seen.add(peer_id)
- cd = custom.Dialog(self, d, entities, messages)
+ cd = custom.Dialog(self.client, d, entities, messages)
if cd.dialog.pts:
self.client._channel_pts[cd.id] = cd.dialog.pts | Fix DialogsIter not passing the client to the built objects | py |
diff --git a/amo2kinto/synchronize.py b/amo2kinto/synchronize.py
index <HASH>..<HASH> 100644
--- a/amo2kinto/synchronize.py
+++ b/amo2kinto/synchronize.py
@@ -33,6 +33,10 @@ def get_diff(source, dest):
to_check = source_keys - to_create - to_delete
for record_id in to_check:
+ # Make sure to remove properties that are part of kinto
+ # records and not amo records.
+ # Here we will compare the record properties ignoring:
+ # ID, last_modified and enabled.
new = canonical_json(source_dict[record_id])
old = canonical_json(dest_dict[record_id])
if new != old:
@@ -62,7 +66,14 @@ def push_changes(diff, kinto_client, bucket, collection):
record['enabled'] = True
batch.create_record(record)
for record in to_update:
- batch.patch_record(strip_keys(record, ['id']))
+ # Patch the record with the new properties from AMO.
+ # This will override changes that were made before in
+ # Kinto if any. But json2kinto should be used only to
+ # rsync AMO database so it is fine.
+ patch_record = strip_keys(record, ['id'])
+ # Make sure the record is correcly activated.
+ patch_record['enabled'] = True
+ batch.patch_record(patch_record)
if to_create or to_update or to_delete:
logger.info('Trigger the signature.') | Add comments. @leplatrem review. | py |
diff --git a/imhotep/main.py b/imhotep/main.py
index <HASH>..<HASH> 100644
--- a/imhotep/main.py
+++ b/imhotep/main.py
@@ -74,6 +74,7 @@ class RepoManager(object):
log.debug("Cleaning up %s", repo_dir)
run('rm -rf %s' % repo_dir)
+
def run_analysis(repo, filenames=set()):
results = {}
for tool in repo.tools:
@@ -110,6 +111,7 @@ if __name__ == '__main__':
description="Posts static analysis results to github.")
parser.add_argument(
'--config-file',
+ default="imhotep_config.json",
type=str,
help="Configuration file in json.")
parser.add_argument(
@@ -152,9 +154,10 @@ if __name__ == '__main__':
help="Path to directory to cache the repository",
type=str,
required=False)
+
# parse out repo name
args = parser.parse_args()
- config = load_config(file)
+ config = load_config(args.config_file)
if args.commit == "" and args.pr_number == "":
print "You must specify a commit or PR number"
@@ -199,7 +202,6 @@ if __name__ == '__main__':
manager = RepoManager(authenticated=args.authenticated,
cache_directory=cache_directory,
tools=tools,
- repo=repo_name,
executor=run)
try: | defaults for config file location, since it not existing is idempotent. | py |
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
index <HASH>..<HASH> 100644
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -86,8 +86,8 @@ def make_dataset(
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
- path = os.path.join(root, fname)
- if is_valid_file(path):
+ if is_valid_file(fname):
+ path = os.path.join(root, fname)
item = path, class_index
instances.append(item) | Faster dataset indexing (#<I>) | py |
diff --git a/tests/test_suite.py b/tests/test_suite.py
index <HASH>..<HASH> 100755
--- a/tests/test_suite.py
+++ b/tests/test_suite.py
@@ -87,3 +87,13 @@ class AdminHoneypotTest(TestCase):
response = self.client.get(url.rstrip('/'), follow=True)
self.assertRedirects(response, redirect_url, status_code=301)
+
+ def test_real_url_leak(self):
+ """
+ A test to make sure the real admin URL isn't leaked in the honeypot
+ login form page.
+ """
+
+ honeypot_html = self.client.get(self.honeypot_url, follow=True).content.decode('utf-8')
+ self.assertNotIn('{0}'.format(self.admin_url), honeypot_html)
+ self.assertNotIn('{0}'.format(self.admin_login_url), honeypot_html) | Add a test to check that the real admin url isn't leaked in the honeypot page. | py |
diff --git a/cobe/brain.py b/cobe/brain.py
index <HASH>..<HASH> 100644
--- a/cobe/brain.py
+++ b/cobe/brain.py
@@ -297,7 +297,7 @@ with its two nodes"""
pass
def _get_reply_key(self, reply):
- return tuple([(edge.prev, edge.next) for edge in reply.edges])
+ return tuple([edge.edge_id for edge in reply.edges])
def _babble(self):
token_ids = [] | Use edge_id in _get_reply_key rather than prev/next ids This is a mild memory improvement. | py |
diff --git a/Geometry/constants.py b/Geometry/constants.py
index <HASH>..<HASH> 100644
--- a/Geometry/constants.py
+++ b/Geometry/constants.py
@@ -4,6 +4,8 @@
from sys import float_info
epsilon = float_info.epsilon
+pi_half = 1.5707963267948966
+two_pi = 6.283185307179586
del(float_info) | two_pi and pi_half seemed useful | py |
diff --git a/helpers/postgresql.py b/helpers/postgresql.py
index <HASH>..<HASH> 100644
--- a/helpers/postgresql.py
+++ b/helpers/postgresql.py
@@ -85,8 +85,12 @@ class Postgresql:
os.fchmod(f.fileno(), 0600)
f.write('{hostname}:{port}:*:{username}:{password}\n'.format(**r))
- return os.system('PGPASSFILE={pgpass} pg_basebackup -R -D {data_dir} --host={hostname} --port={port} -U {username}'.format(
- pgpass=pgpass, data_dir=self.data_dir, **r)) == 0
+ try:
+ os.environ['PGPASSFILE'] = pgpass
+ return os.system('pg_basebackup -R -D {data_dir} --host={hostname} --port={port} -U {username}'.format(
+ data_dir=self.data_dir, **r)) == 0
+ finally:
+ os.environ.pop('PGPASSFILE')
def is_leader(self):
return not self.query('SELECT pg_is_in_recovery()').fetchone()[0] | Set environment variable PGPASSFILE via os.environ before running pg_basebackup and unset it afterwards | py |
diff --git a/mesh_tensorflow/ops.py b/mesh_tensorflow/ops.py
index <HASH>..<HASH> 100644
--- a/mesh_tensorflow/ops.py
+++ b/mesh_tensorflow/ops.py
@@ -1201,7 +1201,9 @@ class MeshImpl(object):
divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:])
modulus = self.shape[mesh_axis].size
def my_fn(pnum):
- return (pnum // divisor) % modulus
+ # TODO(noam): casting to float32 for the floordiv masks a bug.
+ # document and file the bug.
+ return tf.cast((tf.cast(pnum, tf.float32) // divisor), tf.int32) % modulus
return self.slicewise(my_fn, self.laid_out_pnum())
def laid_out_slice_num(self, tensor_shape): | Mask a bug in TPU execution. PiperOrigin-RevId: <I> | py |
diff --git a/scour/scour.py b/scour/scour.py
index <HASH>..<HASH> 100644
--- a/scour/scour.py
+++ b/scour/scour.py
@@ -2913,8 +2913,8 @@ def scourString(in_string, options=None):
return False
else:
for element in doc.getElementsByTagName("*"):
- for attrName in six.iterkeys(element.attributes):
- if attrName.startswith(prefix):
+ for attribute in element.attributes.values():
+ if attribute.name.startswith(prefix):
return False
return True | Compatibility fix for Python <I> (NamedNodeList seems not to have had the iterkeys method back then) | py |
diff --git a/grammpy/IsMethodsRuleExtension.py b/grammpy/IsMethodsRuleExtension.py
index <HASH>..<HASH> 100644
--- a/grammpy/IsMethodsRuleExtension.py
+++ b/grammpy/IsMethodsRuleExtension.py
@@ -13,12 +13,11 @@ from .exceptions import RuleException, UselessEpsilonException, RuleSyntaxExcept
NonterminalDoesNotExistsException
from .Constants import EPS
from .Nonterminal import Nonterminal
-from . import Grammar
class IsMethodsRuleExtension(Rule):
@staticmethod
- def _controlSide(cls, side, grammar: Grammar):
+ def _controlSide(cls, side, grammar):
if not isinstance(side, list):
raise RuleSyntaxException(cls, 'One side of rule is not enclose by list', side)
if len(side) == 0: | Remove recursive requires betwen grammar and rule | py |
diff --git a/instalooter/pages.py b/instalooter/pages.py
index <HASH>..<HASH> 100644
--- a/instalooter/pages.py
+++ b/instalooter/pages.py
@@ -139,6 +139,15 @@ class HashtagIterator(PageIterator):
"after": cursor
}
+ def __next__(self):
+ item = super(HashtagIterator, self).__next__()
+ for media in item[self._section_media].get("edges", []):
+ media["node"].setdefault(
+ "__typename",
+ "GraphVideo" if media["node"].get("is_video", False) else "GraphImage"
+ )
+ return item
+
class ProfileIterator(PageIterator):
"""An iterator over the pages of a user profile. | Fix missing `__typename` from hashtag medias | py |
diff --git a/looper/looper.py b/looper/looper.py
index <HASH>..<HASH> 100755
--- a/looper/looper.py
+++ b/looper/looper.py
@@ -440,7 +440,8 @@ class Runner(Executor):
except AttributeError:
skip_reasons.append("Sample has no protocol")
else:
- if protocol not in mapped_protos:
+ if protocol not in mapped_protos and \
+ GENERIC_PROTOCOL_KEY not in mapped_protos:
skip_reasons.append("No pipeline for protocol")
if skip_reasons: | allow samples through if specific protocol isn't mapped but the generic one is | py |
diff --git a/asn1crypto/core.py b/asn1crypto/core.py
index <HASH>..<HASH> 100644
--- a/asn1crypto/core.py
+++ b/asn1crypto/core.py
@@ -15,7 +15,7 @@ from ._int_conversion import int_to_bytes, int_from_bytes
if sys.version_info <= (3,):
str_cls = unicode #pylint: disable=E0602
byte_cls = str
- int_types = (int, long)
+ int_types = (int, long) #pylint: disable=E0602
py2 = True
chr_cls = chr
range = xrange #pylint: disable=E0602,W0622 | Fixed a linting error in core | py |
diff --git a/tests/test_pgextras.py b/tests/test_pgextras.py
index <HASH>..<HASH> 100755
--- a/tests/test_pgextras.py
+++ b/tests/test_pgextras.py
@@ -97,13 +97,10 @@ class TestPgextras(unittest.TestCase):
with PgExtras(dsn=self.dsn) as pg:
if pg.is_pg_at_least_nine_two():
self.create_pg_stat_statement()
-
- results = pg.calls()
-
- if pg.pg_stat_statement():
+ results = pg.calls()
self.assertTrue(len(results), 10)
else:
- self.assertTrue(len(results), 0)
+ self.assertRaises(Exception, pg.calls)
def test_blocking(self):
statement = """ | Bug fix for unit test when running against postgres version less than <I> | py |
diff --git a/tests.py b/tests.py
index <HASH>..<HASH> 100644
--- a/tests.py
+++ b/tests.py
@@ -88,7 +88,7 @@ class WooCommerceTestCase(unittest.TestCase):
with HTTMock(woo_test_mock):
# call requests
- status = self.api.get("products", params={'sku': 10001}).status_code
+ status = self.api.get("products", params={"per_page": 10, "page": 1, "offset": 0}).status_code
self.assertEqual(status, 200)
def test_get_with_requests_kwargs(self): | better example of using params for real-world usage | py |
diff --git a/vaex/file/other.py b/vaex/file/other.py
index <HASH>..<HASH> 100644
--- a/vaex/file/other.py
+++ b/vaex/file/other.py
@@ -992,7 +992,9 @@ class DatasetAstropyTable(DatasetArrays):
if "ucd" in column._meta:
self.ucds[clean_name] = column._meta["ucd"]
if column.unit:
- self.units[clean_name] = column.unit
+ unit = _try_unit(column.unit)
+ if unit:
+ self.units[clean_name] = unit
if column.description:
self.descriptions[clean_name] = column.description
if hasattr(masked_array, "mask"): | fix: converting unit string to units for astropy tables | py |
diff --git a/ropetest/refactor/extracttest.py b/ropetest/refactor/extracttest.py
index <HASH>..<HASH> 100644
--- a/ropetest/refactor/extracttest.py
+++ b/ropetest/refactor/extracttest.py
@@ -1208,6 +1208,7 @@ class ExtractMethodTest(unittest.TestCase):
''')
self.assertEqual(expected, refactored)
+ @testutils.only_for_versions_higher('3.8')
def test_extract_function_with_inline_assignment_in_condition(self):
code = dedent('''\
def foo(a): | fixed version restriction in tests for NamedExpr | py |
diff --git a/draw_test.py b/draw_test.py
index <HASH>..<HASH> 100644
--- a/draw_test.py
+++ b/draw_test.py
@@ -18,12 +18,28 @@ class App(pyxel.App):
self.image.set(0, 0, 16, 16, image_data)
self.bank(0, self.image)
+ self.space = False
+
def update(self):
+ self.space = self.btn(pyxel.KEY_SPACE)
+
if self.btnp(pyxel.KEY_Q):
exit()
def draw(self):
+ if (self.frame_count // 30) % 10 >= 5:
+ self.pal(2, 3)
+ self.pal(7, 1)
+ else:
+ self.pal()
+
self.test_cls(4, 6)
+
+ self.clip()
+ if self.space:
+ self.rectb(31, 31, 168, 118, 14)
+ self.clip(32, 32, 167, 117)
+
self.test_pix(4, 20)
self.test_line(104, 6)
self.test_rect(4, 40) | Added the clip and pal command tests | py |
diff --git a/tests/test_repr.py b/tests/test_repr.py
index <HASH>..<HASH> 100644
--- a/tests/test_repr.py
+++ b/tests/test_repr.py
@@ -4,18 +4,21 @@ import six
class ReprTest(unittest.TestCase):
+ """Checks that the string representations of charts and entries are correct.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.chart = billboard.ChartData("hot-100", date="2010-01-02")
+
def testReprChart(self):
- """Checks that the string representation of a chart is correct."""
- chart = billboard.ChartData("hot-100", date="1996-08-03")
self.assertEqual(
- repr(chart), "billboard.ChartData('hot-100', date='1996-08-03')"
+ repr(self.chart), "billboard.ChartData('hot-100', date='2010-01-02')"
)
def testReprEntry(self):
- """Checks that the string representation of an entry is correct."""
- chart = billboard.ChartData("hot-100", date="2010-01-02")
self.assertEqual(
- repr(chart[0]),
+ repr(self.chart[0]),
"billboard.ChartEntry(title={!r}, artist={!r})".format(
six.text_type("TiK ToK"), six.text_type("Ke$ha")
), | Merge tests to reduce number of HTTP requests | py |
diff --git a/gns3server/version.py b/gns3server/version.py
index <HASH>..<HASH> 100644
--- a/gns3server/version.py
+++ b/gns3server/version.py
@@ -23,8 +23,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
-__version__ = "2.2.3"
-__version_info__ = (2, 2, 3, 0)
+__version__ = "2.2.4dev1"
+__version_info__ = (2, 2, 4, 99)
if "dev" in __version__:
try: | Development on <I>dev1 | py |
diff --git a/cirq-google/cirq_google/engine/engine_client.py b/cirq-google/cirq_google/engine/engine_client.py
index <HASH>..<HASH> 100644
--- a/cirq-google/cirq_google/engine/engine_client.py
+++ b/cirq-google/cirq_google/engine/engine_client.py
@@ -922,19 +922,13 @@ def _processor_name_from_ids(project_id: str, processor_id: str) -> str:
def _calibration_name_from_ids(
project_id: str, processor_id: str, calibration_time_seconds: int
) -> str:
- return 'projects/%s/processors/%s/calibrations/%d' % (
- project_id,
- processor_id,
- calibration_time_seconds,
+ return (
+ f'projects/{project_id}/processors/{processor_id}/calibrations/{calibration_time_seconds}'
)
def _reservation_name_from_ids(project_id: str, processor_id: str, reservation_id: str) -> str:
- return 'projects/%s/processors/%s/reservations/%s' % (
- project_id,
- processor_id,
- reservation_id,
- )
+ return f'projects/{project_id}/processors/{processor_id}/reservations/{reservation_id}'
def _ids_from_program_name(program_name: str) -> Tuple[str, str]: | Use f-strings consistently in engine_client.py helpers (#<I>) Review: @wcourtney | py |
diff --git a/webvtt/parsers.py b/webvtt/parsers.py
index <HASH>..<HASH> 100644
--- a/webvtt/parsers.py
+++ b/webvtt/parsers.py
@@ -27,7 +27,7 @@ class TextBasedParser(GenericParser):
encoding = 'utf-8'
with open(file, encoding=encoding) as f:
- lines = [line.rstrip() for line in f.readlines()]
+ lines = [line.rstrip('\n') for line in f.readlines()]
if not lines:
raise MalformedFileError('The file is empty.') | Only strip return when parsing the lines from the captions file | py |
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -17,9 +17,9 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# local
# 3rd-party imports | will this fix readthedocs? | py |
diff --git a/mhctools/mhcflurry.py b/mhctools/mhcflurry.py
index <HASH>..<HASH> 100644
--- a/mhctools/mhcflurry.py
+++ b/mhctools/mhcflurry.py
@@ -37,20 +37,21 @@ class MHCflurry(BasePredictor):
self,
alleles,
default_peptide_lengths=[9],
- models_dir=None):
+ predictor=None):
"""
Parameters
-----------
- models_dir : string
- MHCflurry models to load
+ predictor : mhcflurry.Class1AffinityPredictor (optional)
+ MHCflurry predictor to use
"""
BasePredictor.__init__(
self,
alleles=alleles,
default_peptide_lengths=default_peptide_lengths)
- self.predictor = Class1AffinityPredictor.load(
- models_dir=models_dir)
+ if predictor is None:
+ predictor = Class1AffinityPredictor()
+ self.predictor = predictor
def predict_peptides(self, peptides):
binding_predictions = [] | update MHCflurry to take predictor instead of models_dir Enables user to reuse one MHCflurry predictor for queries across different alleles, an important speed optimization | py |
diff --git a/parsl/providers/slurm/slurm.py b/parsl/providers/slurm/slurm.py
index <HASH>..<HASH> 100644
--- a/parsl/providers/slurm/slurm.py
+++ b/parsl/providers/slurm/slurm.py
@@ -255,15 +255,6 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
return rets
- def _test_add_resource(self, job_id):
- self.resources.extend([{'job_id': job_id, 'status': JobStatus(JobState.PENDING), 'size': 1}])
- return True
-
@property
def status_polling_interval(self):
return 60
-
-
-if __name__ == "__main__":
-
- print("None") | Remove unused test code from slurm provider (#<I>) | py |
diff --git a/rootpy/plotting/hist.py b/rootpy/plotting/hist.py
index <HASH>..<HASH> 100644
--- a/rootpy/plotting/hist.py
+++ b/rootpy/plotting/hist.py
@@ -1319,7 +1319,7 @@ class _HistBase(Plottable, NamedObject):
# skip this axis
continue
elif binning is not None:
- if isinstance(binning, list):
+ if hasattr(binning, '__iter__'):
binning = (binning,)
args.extend(binning)
continue | hist: better binning duck typing with __iter__ | py |
diff --git a/curtsies/curtsieskeys.py b/curtsies/curtsieskeys.py
index <HASH>..<HASH> 100644
--- a/curtsies/curtsieskeys.py
+++ b/curtsies/curtsieskeys.py
@@ -104,4 +104,6 @@ CURTSIES_NAMES = dict([
(b"\x1b[OF", u'<END>'), # end (1)
(b"\x1b[OH", u'<HOME>'), # home (7)
+ # reported by cool-RR
+ (b"\x1b[[C", u'<F3>'),
]) | F3 as submitted by cool-RR | py |
diff --git a/dvc/version.py b/dvc/version.py
index <HASH>..<HASH> 100644
--- a/dvc/version.py
+++ b/dvc/version.py
@@ -7,7 +7,7 @@ import os
import subprocess
-_BASE_VERSION = "0.77.3"
+_BASE_VERSION = "0.78.0"
def _generate_version(base_version): | dvc: bump to <I> | py |
diff --git a/src/future/builtins/newround.py b/src/future/builtins/newround.py
index <HASH>..<HASH> 100644
--- a/src/future/builtins/newround.py
+++ b/src/future/builtins/newround.py
@@ -32,10 +32,10 @@ def newround(number, ndigits=None):
exponent = Decimal('10') ** (-ndigits)
- if PYPY:
- # Work around issue #24: round() breaks on PyPy with NumPy's types
- if 'numpy' in repr(type(number)):
- number = float(number)
+ # Work around issue #24: round() breaks on PyPy with NumPy's types
+ # Also breaks on CPython with NumPy's specialized int types like uint64
+ if 'numpy' in repr(type(number)):
+ number = float(number)
if isinstance(number, Decimal):
d = number | Support NumPy's specialized int types in builtins.round | py |
diff --git a/chardet/charsetprober.py b/chardet/charsetprober.py
index <HASH>..<HASH> 100644
--- a/chardet/charsetprober.py
+++ b/chardet/charsetprober.py
@@ -118,6 +118,7 @@ class CharSetProber(object):
prev = 0
for curr in range(len(buf)):
+ # Slice here to get bytes instead of an int with Python 3
buf_char = buf[curr:curr + 1]
# Check if we're coming out of or entering an HTML tag
if buf_char == b'>': | Add comment about why we're slicing in filter_with_english_letters | py |
diff --git a/py3status/module.py b/py3status/module.py
index <HASH>..<HASH> 100644
--- a/py3status/module.py
+++ b/py3status/module.py
@@ -458,6 +458,12 @@ class Module(Thread):
del mod_config[param]
deprecation_log(item)
+ # apply module configuration
+ for config, value in mod_config.items():
+ # names starting with '.' are private
+ if not config.startswith('.'):
+ setattr(self.module_class, config, value)
+
# process any update_config settings
try:
update_config = class_inst.Meta.update_config
@@ -479,12 +485,6 @@ class Module(Thread):
)
mod_config[format_param] = format
- # apply module configuration
- for config, value in mod_config.items():
- # names starting with '.' are private
- if not config.startswith('.'):
- setattr(self.module_class, config, value)
-
# Add the py3 module helper if modules self.py3 is not defined
if not hasattr(self.module_class, 'py3'):
setattr(self.module_class, 'py3', Py3(self)) | fix bug for update_placeholder_format | py |
diff --git a/devassistant/utils.py b/devassistant/utils.py
index <HASH>..<HASH> 100644
--- a/devassistant/utils.py
+++ b/devassistant/utils.py
@@ -3,7 +3,7 @@ try: # ugly hack for using imp instead of importlib on Python <= 2.6
except ImportError:
import imp as importlib
def import_module(name):
- fp, pathname, description = importlib.find_module(name.replace('.', '/')
+ fp, pathname, description = importlib.find_module(name.replace('.', '/'))
return importlib.load_module(name, fp, pathname, description)
importlib.import_module = import_module
del import_module | Argh... now fix it without a typo | py |
diff --git a/superset/views/core.py b/superset/views/core.py
index <HASH>..<HASH> 100755
--- a/superset/views/core.py
+++ b/superset/views/core.py
@@ -1084,7 +1084,10 @@ class Superset(BaseSupersetView):
return json_error_response(utils.error_msg_from_exception(e))
status = 200
- if payload.get('status') == QueryStatus.FAILED:
+ if (
+ payload.get('status') == QueryStatus.FAILED or
+ payload.get('error') is not None
+ ):
status = 400
return json_success(viz_obj.json_dumps(payload), status=status) | [payload] Set status code on error rather than query status | py |
diff --git a/src/feat/database/query.py b/src/feat/database/query.py
index <HASH>..<HASH> 100644
--- a/src/feat/database/query.py
+++ b/src/feat/database/query.py
@@ -39,8 +39,10 @@ class Field(object):
keeps_value = False
- def __init__(self, field, view, id_key='_id', **kwargs):
+ def __init__(self, field, view, id_key='_id', index_field=None,
+ **kwargs):
self.field = field
+ self.index_field = index_field or field
self.view = view
self.keeps_value = kwargs.pop('keeps_value', type(self).keeps_value)
self.id_key = id_key
@@ -68,7 +70,7 @@ class Field(object):
### protected ###
def generate_keys(self, evaluator, value):
- return generate_keys(self.transform, self.field, evaluator, value)
+ return generate_keys(self.transform, self.index_field, evaluator, value)
def parse_view_result(self, rows, tag):
# If the row emitted the link with _id=doc_id this value is used, | Support customizing index name used by the query.Field(). | py |
diff --git a/tests/bagatom/test_getters.py b/tests/bagatom/test_getters.py
index <HASH>..<HASH> 100644
--- a/tests/bagatom/test_getters.py
+++ b/tests/bagatom/test_getters.py
@@ -46,7 +46,7 @@ def people_xml():
"""
-def test_getBagTags_returns_dict(monkeypatch):
+def test_getBagTags_returns_dict():
"""
Check the return value of getBagTags.
"""
@@ -58,7 +58,7 @@ def test_getBagTags_returns_dict(monkeypatch):
assert tags == {'tag': 'tag'}
-def test_getBagTags_open(monkeypatch):
+def test_getBagTags_open():
"""
Check that getBagTags opens and reads the file contents. | Remove monkeypatch where it is no more used | py |
diff --git a/tests/test_main.py b/tests/test_main.py
index <HASH>..<HASH> 100755
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -2743,6 +2743,32 @@ def test_illidan():
assert illidan.dead
+def test_illidan_knife_juggler():
+ game = prepare_game()
+ illidan = game.player1.give("EX1_614")
+ illidan.play()
+ juggler = game.player1.give("NEW1_019")
+ juggler.play()
+ assert len(game.player1.field) == 3
+ assert game.player2.hero.health == 30 - 1
+
+
+def test_illidan_full_board():
+ game = prepare_game()
+ illidan = game.player1.give("EX1_614")
+ illidan.play()
+ game.player1.give(THE_COIN).play()
+ game.player1.give(THE_COIN).play()
+ game.player1.give(THE_COIN).play()
+ game.player1.give(THE_COIN).play()
+ game.player1.give(THE_COIN).play()
+ assert len(game.player1.field) == 6
+ juggler = game.player1.give("NEW1_019")
+ juggler.play()
+ assert len(game.player1.field) == 7
+ assert game.player2.hero.health == 30
+
+
def test_leeroy():
game = prepare_game()
leeroy = game.current_player.give("EX1_116") | Add more Illidan/Knife Juggler tests | py |
diff --git a/pyxel/__init__.py b/pyxel/__init__.py
index <HASH>..<HASH> 100644
--- a/pyxel/__init__.py
+++ b/pyxel/__init__.py
@@ -189,6 +189,31 @@ GAMEPAD_2_LEFT: int = _get_constant_number("GAMEPAD_2_LEFT")
#
+# Color class
+#
+class Color:
+ BLACK = 0
+ NAVY = 1
+ PERPLE = 2
+ GREEN = 3
+ BROWN = 4
+ DARKGRAY = 5
+ LIGHTGRAY = 6
+ WHITE = 7
+ RED = 8
+ ORANGE = 9
+ YELLOW = 10
+ LIME = 11
+ CYAN = 12
+ STEELBLUE = 13
+ PINK = 14
+ PEACH = 15
+
+ def __init__(self):
+ pass
+
+
+#
# Image class
#
class Image: | Add Color class (#<I>) To set colors with words | py |
diff --git a/moto/sqs/models.py b/moto/sqs/models.py
index <HASH>..<HASH> 100644
--- a/moto/sqs/models.py
+++ b/moto/sqs/models.py
@@ -8,7 +8,7 @@ from xml.sax.saxutils import escape
import boto.sqs
from moto.core import BaseBackend
-from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time_millis
+from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis
from .utils import generate_receipt_handle
from .exceptions import (
ReceiptHandleIsInvalid,
@@ -115,7 +115,7 @@ class Queue(object):
self.wait_time_seconds = wait_time_seconds or 0
self._messages = []
- now = time.time()
+ now = unix_time()
self.created_timestamp = now
self.delay_seconds = 0
@@ -281,7 +281,7 @@ class SQSBackend(BaseBackend):
queue = self.get_queue(queue_name)
result = []
- polling_end = time.time() + wait_seconds_timeout
+ polling_end = unix_time() + wait_seconds_timeout
# queue.messages only contains visible messages
while True:
@@ -295,7 +295,7 @@ class SQSBackend(BaseBackend):
if len(result) >= count:
break
- if result or time.time() > polling_end:
+ if result or unix_time() > polling_end:
break
return result | sqs: Use unix_time in place of time.time() (#<I>) unix_time() from moto.core.utils is used as the time source through moto, and it is identical to time.time() in output. Hence, using unix_time() since it makes mocking easier during testing (when time is mocked out). | py |
diff --git a/src/vistir/misc.py b/src/vistir/misc.py
index <HASH>..<HASH> 100644
--- a/src/vistir/misc.py
+++ b/src/vistir/misc.py
@@ -383,7 +383,7 @@ def run(
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
- write_to_stdout=True,
+ write_to_stdout=write_to_stdout,
) | Fix write_to_stdout=True | py |
diff --git a/arcrest/admin/cmdline.py b/arcrest/admin/cmdline.py
index <HASH>..<HASH> 100644
--- a/arcrest/admin/cmdline.py
+++ b/arcrest/admin/cmdline.py
@@ -421,7 +421,8 @@ createcacheschemaargs.add_argument('-TO', '--tile-origin',
'Ex: "-20037508.342787 20037508.342787"')
createcacheschemaargs.add_argument('-TF', '--tile-format',
help='Description: Tile format',
- choices=['PNG8', 'PNG24', 'PNG32', 'JPEG', 'MIXED'])
+ choices=['PNG', 'PNG8', 'PNG24', 'PNG32',
+ 'JPEG', 'MIXED'])
createcacheschemaargs.add_argument('-TC', '--tile-compression',
help='Description: Compression (if JPEG or MIXED)',
default=0, | Tweaks to format list in cache | py |
diff --git a/tests/test_blob.py b/tests/test_blob.py
index <HASH>..<HASH> 100644
--- a/tests/test_blob.py
+++ b/tests/test_blob.py
@@ -23,8 +23,8 @@ def test_pack():
x = [1, 2, 3, 4]
assert_array_equal(x, unpack(pack(x)), "List did not pack/unpack correctly")
- x = [1, 2, 3, 4].__iter__()
- assert_array_equal(x, unpack(pack(x)), "Iterator did not pack/unpack correctly")
+ x = [1, 2, 3, 4]
+ assert_array_equal(x, unpack(pack(x.__iter__())), "Iterator did not pack/unpack correctly")
def test_complex(): | Change testing of iterator serialization to BLOB | py |
diff --git a/eventsourcing/__init__.py b/eventsourcing/__init__.py
index <HASH>..<HASH> 100644
--- a/eventsourcing/__init__.py
+++ b/eventsourcing/__init__.py
@@ -238,4 +238,4 @@ Example application
"""
-__version__ = '3.0.0dev'
+__version__ = '3.0.0.dev0' | Changed version number, to be a proper version number. | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@ if __name__ == '__main__':
zip_safe=False,
extras_require={
'docs': [
- 'sphinx<1.7.0',
+ 'sphinx>=1.7.4',
],
'tests': [
'flake8>=3.5.0', | attempting to fix readthedocs | py |
diff --git a/napalm_logs/server.py b/napalm_logs/server.py
index <HASH>..<HASH> 100644
--- a/napalm_logs/server.py
+++ b/napalm_logs/server.py
@@ -304,6 +304,8 @@ class NapalmLogsServerProc(NapalmLogsProc):
message_key = base64.b64encode(message)
if six.PY3:
message_key = base64.b64encode(bytes(message, 'utf-8')).decode()
+ else:
+ message_key = base64.b64encode(message)
if self._buffer[message_key]:
log.info('"%s" seems to be already buffered, skipping', msg_dict['message'])
napalm_logs_server_skipped_buffered_messages.labels(device_os=dev_os.decode()).inc() | Including an else statment for the default message_key | py |
diff --git a/script/lib/git.py b/script/lib/git.py
index <HASH>..<HASH> 100644
--- a/script/lib/git.py
+++ b/script/lib/git.py
@@ -40,13 +40,17 @@ def get_repo_root(path):
return get_repo_root(parent_path)
-def am(repo, patch_data, threeway=False, directory=None,
+def am(repo, patch_data, threeway=False, directory=None, exclude=None,
committer_name=None, committer_email=None):
args = []
if threeway:
args += ['--3way']
if directory is not None:
args += ['--directory', directory]
+ if exclude is not None:
+ for path_pattern in exclude:
+ args += ['--exclude', path_pattern]
+
root_args = ['-C', repo]
if committer_name is not None:
root_args += ['-c', 'user.name=' + committer_name] | build: optionally exclude some parts of patches from being applied (#<I>) | py |
diff --git a/Lib/fontmake/font_project.py b/Lib/fontmake/font_project.py
index <HASH>..<HASH> 100644
--- a/Lib/fontmake/font_project.py
+++ b/Lib/fontmake/font_project.py
@@ -22,7 +22,6 @@ import plistlib
import re
import tempfile
-from booleanOperations import BooleanOperationManager
from cu2qu.pens import ReverseContourPen
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
from defcon import Font
@@ -95,6 +94,7 @@ class FontProject:
@timer()
def remove_overlaps(self, ufos):
"""Remove overlaps in UFOs' glyphs' contours."""
+ from booleanOperations import BooleanOperationManager
manager = BooleanOperationManager()
for ufo in ufos: | font_project: move booleanOperations import inside 'remove_overalap' method If one passes --keep-overalap option, then booleanOperations should not be required by fontmake. | py |
diff --git a/src/cryptojwt/jws.py b/src/cryptojwt/jws.py
index <HASH>..<HASH> 100644
--- a/src/cryptojwt/jws.py
+++ b/src/cryptojwt/jws.py
@@ -540,8 +540,7 @@ class JWS(JWx):
raise UnknownAlgorithm(_alg)
_input = jwt.pack(parts=[self.msg])
- sig = _signer.sign(_input.encode("utf-8"),
- key.get_key(alg=_alg, private=True))
+ sig = _signer.sign(_input.encode("utf-8"), key.get_key(private=True))
logger.debug("Signed message using key with kid=%s" % key.kid)
return ".".join([_input, b64encode_item(sig).decode("utf-8")])
@@ -621,7 +620,7 @@ class JWS(JWx):
try:
verifier.verify(jwt.sign_input(), jwt.signature(),
- key.get_key(alg=_alg, private=False))
+ key.get_key(private=False))
except (BadSignature, IndexError):
pass
else: | Removed alg as parameter from get_key method call. | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.