diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/telemetry/telemetry/core/browser_finder.py b/telemetry/telemetry/core/browser_finder.py
index <HASH>..<HASH> 100644
--- a/telemetry/telemetry/core/browser_finder.py
+++ b/telemetry/telemetry/core/browser_finder.py
@@ -78,13 +78,13 @@ def FindBrowser(options):
logging.warning('--browser omitted. Using most recent local build: %s' %
default_browser.browser_type)
- # TODO: We should do this even when --browser is specified.
default_browser.UpdateExecutableIfNeeded()
return default_browser
if len(browsers) == 1:
logging.warning('--browser omitted. Using only available browser: %s' %
browsers[0].browser_type)
+ browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
raise BrowserTypeRequiredException(
@@ -99,6 +99,7 @@ def FindBrowser(options):
return x_idx - y_idx
browsers.sort(CompareBrowsersOnTypePriority)
if len(browsers) >= 1:
+ browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
else:
return None
@@ -117,6 +118,7 @@ def FindBrowser(options):
if chosen_browser:
logging.info('Chose browser: %s' % (repr(chosen_browser)))
+ chosen_browser.UpdateExecutableIfNeeded()
return chosen_browser
|
[Telemetry] Always update device apk. This allows the bisect bot to rely on Telemetry to do apk installation of the most recently built local ChromeShell. BUG=<I> Review URL: <URL>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ setup(
include_package_data = True,
install_requires = ['httplib2', 'simplejson'],
license='LICENSE.txt',
- url = 'https://github.com/maxeventbrite/zendesk/tree/master',
+ url = 'https://github.com/eventbrite/zendesk',
keywords = 'zendesk api helpdesk',
description = 'Python API Wrapper for Zendesk',
classifiers = [
|
correcting for matching public github repo
|
py
|
diff --git a/src/feat/test/integration/test_agencies_standalone_agency.py b/src/feat/test/integration/test_agencies_standalone_agency.py
index <HASH>..<HASH> 100644
--- a/src/feat/test/integration/test_agencies_standalone_agency.py
+++ b/src/feat/test/integration/test_agencies_standalone_agency.py
@@ -152,15 +152,7 @@ class FullIntegrationTestCase(FullIntegrationTest):
return command, args, env
def wait_for_master_gone(self, timeout=20):
-
- def broker_disconnected():
- return self.agency._broker.state == BrokerRole.disconnected
-
- return self.wait_for(broker_disconnected, timeout)
+ return self.agency._broker.wait_for_state(BrokerRole.disconnected)
def wait_for_master_back(self, timeout=20):
-
- def broker_connected():
- return self.agency._broker.state == BrokerRole.slave
-
- return self.wait_for(broker_connected, timeout)
+ return self.agency._broker.wait_for_state(BrokerRole.slave)
|
Use wait_for_state to avoid races
|
py
|
diff --git a/demo/TestSuites/PlayBack/Selection/TestScript.py b/demo/TestSuites/PlayBack/Selection/TestScript.py
index <HASH>..<HASH> 100644
--- a/demo/TestSuites/PlayBack/Selection/TestScript.py
+++ b/demo/TestSuites/PlayBack/Selection/TestScript.py
@@ -22,7 +22,7 @@ def step1():
@expected Description of the expected result
"""
- javaguiMI.selectTabId("TABBED_PANE", "SELECTION_PANEL")
+ javaguiMI.selectTabId("TABBED_PANE", testData.getValue("TAB_ID"))
subtitler.setSubtitle(testData.getValue("COMMENT"))
component = testData.getValue("COMPONENT_NAME")
value = testData.getIntValue("INDEX")
|
#<I> Fix a regression introduced - wrong tab selection
|
py
|
diff --git a/tests/test_facets.py b/tests/test_facets.py
index <HASH>..<HASH> 100644
--- a/tests/test_facets.py
+++ b/tests/test_facets.py
@@ -119,9 +119,9 @@ class FacetSearchTestCase(ESTestCase):
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.date_facet.entries, [{u'count': 2, u'time': 1301616000000},
{u'count': 1, u'time': 1304208000000}])
- self.assertEquals(datetime.datetime.fromtimestamp(1301616000000 / 1000.).date(),
+ self.assertEquals(datetime.datetime.utcfromtimestamp(1301616000000 / 1000.).date(),
datetime.date(2011, 04, 01))
- self.assertEquals(datetime.datetime.fromtimestamp(1304208000000 / 1000.).date(),
+ self.assertEquals(datetime.datetime.utcfromtimestamp(1304208000000 / 1000.).date(),
datetime.date(2011, 05, 01))
def test_date_facet_filter(self):
|
Fixed facet test to use UTC timestamp
|
py
|
diff --git a/mistletoe/span_token.py b/mistletoe/span_token.py
index <HASH>..<HASH> 100644
--- a/mistletoe/span_token.py
+++ b/mistletoe/span_token.py
@@ -24,7 +24,7 @@ def tokenize_inner(content):
See also: span_tokenizer.tokenize, block_token.tokenize.
"""
- yield from tokenizer.tokenize(content, _token_types, RawText)
+ return tokenizer.tokenize(content, _token_types, RawText)
def add_token(token_cls):
|
🐎 removed yield from in tokenize_inner
|
py
|
diff --git a/glue/ligolw/lsctables.py b/glue/ligolw/lsctables.py
index <HASH>..<HASH> 100644
--- a/glue/ligolw/lsctables.py
+++ b/glue/ligolw/lsctables.py
@@ -1335,6 +1335,7 @@ class CoincInspiral(object):
def get_ifos(self):
return instrument_set_from_ifos(self.ifos)
+
CoincInspiralTable.RowType = CoincInspiral
|
Put whitespace back in lsctables.py (accidently deleted).
|
py
|
diff --git a/pachyderm/plot.py b/pachyderm/plot.py
index <HASH>..<HASH> 100644
--- a/pachyderm/plot.py
+++ b/pachyderm/plot.py
@@ -9,6 +9,7 @@ import logging
from typing import Optional, Union
import matplotlib
+import matplotlib.axes
import matplotlib.colors
import numpy as np
|
Fix import issues I have no idea why mypy doesn't catch this. I'm guessing because it shows up as Any
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ requires = [
setup(
name='amaascore',
- version='0.4.0',
+ version='0.4.1',
description='Asset Management as a Service - Core SDK',
license='Apache License 2.0',
url='https://github.com/amaas-fintech/amaas-core-sdk-python',
|
AMAAS-<I> increase sdk version
|
py
|
diff --git a/ipyrad/__init__.py b/ipyrad/__init__.py
index <HASH>..<HASH> 100644
--- a/ipyrad/__init__.py
+++ b/ipyrad/__init__.py
@@ -165,7 +165,11 @@ def _getbins():
_platform = _sys.platform
## get current location
- path = _os.path.abspath(_os.path.dirname(__file__))
+ if 'VIRTUAL_ENV' in _os.environ:
+ ipyrad_path = _os.environ['VIRTUAL_ENV']
+ else:
+ path = _os.path.abspath(_os.path.dirname(__file__))
+ ipyrad_path = _os.path.dirname(path)
## find bin directory
ipyrad_path = _os.path.dirname(path)
|
Add support for finding bins in a virtualenv environment installed with pip
|
py
|
diff --git a/persephone/model.py b/persephone/model.py
index <HASH>..<HASH> 100644
--- a/persephone/model.py
+++ b/persephone/model.py
@@ -177,10 +177,7 @@ class Model:
"""
def __init__(self, exp_dir: Union[Path, str], corpus_reader: CorpusReader) -> None:
- if isinstance(exp_dir, Path):
- self.exp_dir = str(exp_dir) # type: str
- else:
- self.exp_dir = exp_dir # type: str
+ self.exp_dir = str(exp_dir) if isinstance(exp_dir, Path) else exp_dir # type: str
self.corpus_reader = corpus_reader
self.log_softmax = None
self.batch_x = None
|
Hack around mypy isinstance check for type generating error
|
py
|
diff --git a/vaex/dataset.py b/vaex/dataset.py
index <HASH>..<HASH> 100644
--- a/vaex/dataset.py
+++ b/vaex/dataset.py
@@ -1444,7 +1444,7 @@ class Dataset(object):
@docsubst
@stat_1d
- def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=1024*16, percentile_limits="minmax", selection=False, async=False):
+ def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, async=False):
"""Calculate the median , possible on a grid defined by binby
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
@@ -2430,7 +2430,7 @@ class Dataset(object):
arguments = groups[1].strip()
if "," in arguments:
arguments = arguments.split(",")
- functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max", "median"]
+ functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max", "median_approx"]
unit_expression = None
if function in ["mean", "sum", "std", "min", "max", "median"]:
unit_expression = arguments
|
change: median_approx uses less memory by default
|
py
|
diff --git a/fluent_contents/plugins/sharedcontent/templatetags/sharedcontent_tags.py b/fluent_contents/plugins/sharedcontent/templatetags/sharedcontent_tags.py
index <HASH>..<HASH> 100644
--- a/fluent_contents/plugins/sharedcontent/templatetags/sharedcontent_tags.py
+++ b/fluent_contents/plugins/sharedcontent/templatetags/sharedcontent_tags.py
@@ -1,4 +1,5 @@
from django.template import Library, TemplateSyntaxError
+from django.contrib.sites.models import Site
from fluent_contents import rendering
from fluent_contents.plugins.sharedcontent.models import SharedContent
from tag_parser import template_tag
@@ -50,7 +51,8 @@ class SharedContentNode(BaseNode):
# Get the placeholder
try:
- sharedcontent = SharedContent.objects.get(slug=slot)
+ site = Site.objects.get_current()
+ sharedcontent = SharedContent.objects.parent_site(site).get(slug=slot)
except SharedContent.DoesNotExist:
return "<!-- shared content '{0}' does not yet exist -->".format(slot)
|
Fix sharedcontent tag in multisite environments
|
py
|
diff --git a/respite/serializers/base.py b/respite/serializers/base.py
index <HASH>..<HASH> 100644
--- a/respite/serializers/base.py
+++ b/respite/serializers/base.py
@@ -20,7 +20,7 @@ class Serializer(object):
"""
def serialize(anything):
-
+
def serialize_dictionary(dictionary):
data = OrderedDict()
@@ -93,9 +93,15 @@ class Serializer(object):
def serialize_date(datetime):
return datetime.isoformat()
- def serialize_fieldfile(fieldfile):
+ def serialize_field_file(field_file):
try:
- return fieldfile.url
+ return field_file.url
+ except ValueError:
+ return None
+
+ def serialize_image_field_file(image_field_file):
+ try:
+ return image_field_file.url
except ValueError:
return None
@@ -123,8 +129,11 @@ class Serializer(object):
if isinstance(anything, (datetime.date, datetime.datetime)):
return serialize_date(anything)
- if isinstance(anything, django.db.models.fields.files.FieldFile):
- return serialize_fieldfile(anything)
+ if type(anything) is django.db.models.fields.files.FieldFile:
+ return serialize_field_file(anything)
+
+ if type(anything) is django.db.models.fields.files.ImageFieldFile:
+ return serialize_image_field_file(anything)
if anything is None:
return None
|
Add serializer for image_field_files
|
py
|
diff --git a/can/interfaces/usb2can.py b/can/interfaces/usb2can.py
index <HASH>..<HASH> 100644
--- a/can/interfaces/usb2can.py
+++ b/can/interfaces/usb2can.py
@@ -12,6 +12,10 @@ handle = c_long
timeout = c_ulong
filter = c_ulong
+#flags mappings
+IS_ERROR_FRAME = 4
+IS_REMOTE_FRAME = 2
+IS_ID_TYPE = 1
|
Added mappings for the flags
|
py
|
diff --git a/tests/integration/states/file.py b/tests/integration/states/file.py
index <HASH>..<HASH> 100644
--- a/tests/integration/states/file.py
+++ b/tests/integration/states/file.py
@@ -103,3 +103,16 @@ class FileTest(integration.ModuleCase):
result = ret[ret.keys()[0]]['result']
self.assertTrue(result)
+ def test_test_managed(self):
+ '''
+ file.managed test interface
+ '''
+ name = os.path.join(integration.TMP, 'grail_not_scene33')
+ ret = self.run_state(
+ 'file.managed',
+ test=True,
+ name=name,
+ source='salt://grail/scene33')
+ self.assertFalse(os.path.isfile(name))
+ result = ret[ret.keys()[0]]['result']
+ self.assertIsNone(result)
|
Add test interface test for file.managed
|
py
|
diff --git a/pyatv/__main__.py b/pyatv/__main__.py
index <HASH>..<HASH> 100644
--- a/pyatv/__main__.py
+++ b/pyatv/__main__.py
@@ -160,7 +160,7 @@ class GlobalCommands:
if pairing.has_paired:
print('Pairing seems to have succeeded, yey!')
print('You may now use these credentials: {0}'.format(
- pairing.credentials))
+ pairing.service.credentials))
else:
print('Pairing failed!')
|
fixed credentials reference to .service.credentials
|
py
|
diff --git a/kuyruk/worker.py b/kuyruk/worker.py
index <HASH>..<HASH> 100644
--- a/kuyruk/worker.py
+++ b/kuyruk/worker.py
@@ -379,7 +379,7 @@ class Worker:
lost during the execution of the task.
"""
- logger.warning("Catched SIGHUP")
+ logger.debug("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
|
sighup message is debug level
|
py
|
diff --git a/djedi/admin/api.py b/djedi/admin/api.py
index <HASH>..<HASH> 100644
--- a/djedi/admin/api.py
+++ b/djedi/admin/api.py
@@ -195,7 +195,7 @@ class NodeEditor(JSONResponseMixin, DjediContextMixin, APIView):
def post(self, request, uri):
uri = self.decode_uri(uri)
data, meta = self.get_post_data(request)
- meta['author'] = request.user.username
+ meta['author'] = auth.get_username(request.user)
node = cio.set(uri, data, publish=False, **meta)
context = cio.load(node.uri)
|
Remove usages of user.username and try to use user.get_username if available.
|
py
|
diff --git a/ethpm/package.py b/ethpm/package.py
index <HASH>..<HASH> 100644
--- a/ethpm/package.py
+++ b/ethpm/package.py
@@ -222,7 +222,7 @@ class Package(object):
)
for deployment_data in deployments.values()
}
- validate_deployments_tx_receipt(deployments, self.w3)
+ validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True)
linked_deployments = get_linked_deployments(deployments)
if linked_deployments:
for deployment_data in linked_deployments.values():
|
Set all_missing_data to default to True in Package.deployments: validate_deployments_tx_receipt
|
py
|
diff --git a/rtv/content.py b/rtv/content.py
index <HASH>..<HASH> 100644
--- a/rtv/content.py
+++ b/rtv/content.py
@@ -338,7 +338,6 @@ class SubredditContent(BaseContent):
"""
Grab the `i`th submission, with the title field formatted to fit inside
of a window of width `n`
-
"""
if index < 0:
|
/top and /new functionality added
|
py
|
diff --git a/bakery/tasks.py b/bakery/tasks.py
index <HASH>..<HASH> 100644
--- a/bakery/tasks.py
+++ b/bakery/tasks.py
@@ -208,11 +208,12 @@ def process_project(login, project_id, conn, log):
# login — user login
# project_id - database project_id
# conn - redis connection
+ state = project_state_get(login, project_id)
log.write('Copy [and Rename] UFOs\n', prefix = 'Header: ')
copy_and_rename_ufos_process(login, project_id, log)
- # autoprocess is set after setup is completed
+ # autoprocess is set after setup is completed once
if state['autoprocess']:
log.write('Build Begins!\n', prefix = 'Header: ')
|
Put state variable back into process_project() 8)
|
py
|
diff --git a/androguard/gui/TextDecorators.py b/androguard/gui/TextDecorators.py
index <HASH>..<HASH> 100644
--- a/androguard/gui/TextDecorators.py
+++ b/androguard/gui/TextDecorators.py
@@ -175,7 +175,7 @@ class HighlightASCII(PageDecorator):
off = self.dataModel.getOffset()
- Match = [(m.start(), m.end()) for m in re.finditer(r'([a-zA-Z0-9\-\\.%*:/? _<>]){4,}', page)]
+ Match = [(m.start(), m.end()) for m in re.finditer(b'([a-zA-Z0-9\\-\\\\.%*:/? _<>]){4,}', page)]
for s, e in Match:
for i in range(e-s):
idx = off + s + i
|
fix regex to work on binary data
|
py
|
diff --git a/tests/conftest.py b/tests/conftest.py
index <HASH>..<HASH> 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -5,6 +5,7 @@ import logging
import os
import sys
+import celery
import psutil
import pytest
import wrapt
@@ -16,6 +17,11 @@ from scout_apm.core.config import SCOUT_PYTHON_VALUES, scout_config
from scout_apm.core.tracked_request import TrackedRequest
from tests.compat import TemporaryDirectory
+# Activate the celery pytest plugin
+# https://docs.celeryproject.org/en/latest/userguide/testing.html#pytest-plugin
+if celery.VERSION >= (5, 0):
+ pytest_plugins = ["celery.contrib.pytest"]
+
# Env variables have precedence over Python configs in ScoutConfig.
# Unset all Scout env variables to prevent interference with tests.
|
Update pytest setup for Celery 5 (#<I>)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ setup(
install_requires=[
'boto >= 2.32',
- 'httpretty==0.8.6',
+ 'httpretty>=0.8.8',
'bz2file',
'requests',
],
|
Upgraded httpretty version; fixing #<I>
|
py
|
diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py
index <HASH>..<HASH> 100644
--- a/tests/test_route53/test_route53.py
+++ b/tests/test_route53/test_route53.py
@@ -58,6 +58,16 @@ def test_rrset():
changes = ResourceRecordSets(conn, zoneid)
changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A")
+ change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A")
+ change.add_value("5.6.7.8")
+ changes.commit()
+
+ rrsets = conn.get_all_rrsets(zoneid, type="A")
+ rrsets.should.have.length_of(1)
+ rrsets[0].resource_records[0].should.equal('5.6.7.8')
+
+ changes = ResourceRecordSets(conn, zoneid)
+ changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A")
changes.commit()
rrsets = conn.get_all_rrsets(zoneid)
|
Updated test_route<I>.test_rrset() for batch update * This should provide a test for [spulec/moto#<I>]. * Prior to deleting the existing A record, it sends a batch update that includes DELETE and a CREATE, which is what boto performs when updating a ResourceRecord.
|
py
|
diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
index <HASH>..<HASH> 100644
--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
@@ -33,6 +33,9 @@ class MicrosoftAppCredentials(AppCredentials, ABC):
self.microsoft_app_password = password
self.app = None
+
+ if self.oauth_scope and not self.oauth_scope.endswith("/.default"):
+ self.oauth_scope += "/.default"
self.scopes = [self.oauth_scope]
@staticmethod
|
Adding default scope if missing to make MSAL happy
|
py
|
diff --git a/iota/commands/extended/replay_bundle.py b/iota/commands/extended/replay_bundle.py
index <HASH>..<HASH> 100644
--- a/iota/commands/extended/replay_bundle.py
+++ b/iota/commands/extended/replay_bundle.py
@@ -47,7 +47,7 @@ class ReplayBundleCommand(FilterCommand):
minWeightMagnitude = min_weight_magnitude,
- trytes = bundle.as_tryte_strings(head_to_tail=True),
+ trytes = bundle.as_tryte_strings(),
)
|
Fixes replayed bundle being reversed
|
py
|
diff --git a/scripts/daemon.py b/scripts/daemon.py
index <HASH>..<HASH> 100755
--- a/scripts/daemon.py
+++ b/scripts/daemon.py
@@ -37,6 +37,7 @@ class OpenFlowHandler(BaseRequestHandler):
#TODO: Create method to handle header + raw_message
if header.ofp_type == OFPType.OFPT_HELLO:
reply = self.handle_hello(header)
+ print(header.length.value)
self.request.sendall(reply)
else:
print("Couldn't call handle_hello method")
|
Fixed a minor error in OFP Type IF statement.
|
py
|
diff --git a/loam/manager.py b/loam/manager.py
index <HASH>..<HASH> 100644
--- a/loam/manager.py
+++ b/loam/manager.py
@@ -63,7 +63,7 @@ class Section:
"""
def __init__(self, **options: ConfOpt):
- self._def = {}
+ super().__setattr__("_def", {})
for opt_name, opt_meta in options.items():
if _is_valid(opt_name):
self._def[opt_name] = opt_meta
@@ -93,6 +93,12 @@ class Section:
def __getattr__(self, opt: str) -> Any:
raise error.OptionError(opt)
+ def __setattr__(self, opt: str, val: Any) -> None:
+ if opt in self._def:
+ super().__setattr__(opt, val)
+ else:
+ raise error.OptionError(opt)
+
def __iter__(self) -> Iterator[str]:
return iter(self.def_.keys())
|
Protect Section against illicit setattr
|
py
|
diff --git a/test/test_relative_date.py b/test/test_relative_date.py
index <HASH>..<HASH> 100644
--- a/test/test_relative_date.py
+++ b/test/test_relative_date.py
@@ -29,6 +29,7 @@ class RelativeDateTester(TopydoTest):
self.today = date(2015, 11, 6)
self.tomorrow = date(2015, 11, 7)
self.monday = date(2015, 11, 9)
+ self.friday = date(2015, 11, 13)
def test_zero_days(self):
result = relative_date_to_date('0d')
@@ -142,5 +143,13 @@ class RelativeDateTester(TopydoTest):
result = relative_date_to_date('-0d')
self.assertTrue(result, self.today)
+ def test_weekday_next_week(self):
+ """
+ When entering "Friday" on a Friday, return next week Friday instead of
+ today.
+ """
+ result = relative_date_to_date("fri")
+ self.assertTrue(result, self.friday)
+
if __name__ == '__main__':
unittest.main()
|
Add test to check weekday pattern for today's weekday. This corresponds to the change made in commit <I>.
|
py
|
diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py
index <HASH>..<HASH> 100644
--- a/WrightTools/_dataset.py
+++ b/WrightTools/_dataset.py
@@ -117,9 +117,12 @@ class Dataset(h5py.Dataset):
@property
def _leaf(self):
out = self.natural_name
+ if self.size == 1:
+ out += f" = {self.points}"
if self.units is not None:
out += " ({0})".format(self.units)
- out += " {0}".format(self.shape)
+ if self.size != 1:
+ out += " {0}".format(self.shape)
return out
@property
|
print_tree with values for scalars (#<I>)
|
py
|
diff --git a/tests/main_test.py b/tests/main_test.py
index <HASH>..<HASH> 100644
--- a/tests/main_test.py
+++ b/tests/main_test.py
@@ -179,7 +179,7 @@ def run(args=[], agnostic_target=None, use_run_arg=False):
run_src()
if use_run_arg:
- comp_extras(["--run"] + agnostic_args + (["--jobs", "0"] if IPY and not PY2 else []))
+ comp_extras(["--run"] + agnostic_args)
else:
comp_extras(agnostic_args)
run_extras()
|
Removes a potentially unnecessary test option
|
py
|
diff --git a/lenstronomy/LightModel/light_model_base.py b/lenstronomy/LightModel/light_model_base.py
index <HASH>..<HASH> 100644
--- a/lenstronomy/LightModel/light_model_base.py
+++ b/lenstronomy/LightModel/light_model_base.py
@@ -91,6 +91,9 @@ class LightModelBase(object):
elif profile_type == 'STARLETS':
from lenstronomy.LightModel.Profiles.starlets import Starlets
self.func_list.append(Starlets(fast_inverse=True, second_gen=False))
+ elif profile_type == 'STARLETS_GEN2':
+ from lenstronomy.LightModel.Profiles.starlets import Starlets
+ self.func_list.append(Starlets(second_gen=True))
else:
raise ValueError('Warning! No light model of type', profile_type, ' found!')
self._num_func = len(self.func_list)
|
Add support of Starlets second generation as light profile
|
py
|
diff --git a/hedgehog/client/client_registry.py b/hedgehog/client/client_registry.py
index <HASH>..<HASH> 100644
--- a/hedgehog/client/client_registry.py
+++ b/hedgehog/client/client_registry.py
@@ -32,6 +32,13 @@ class _EventHandler(object):
self.backend = backend
self.handler = handler
+ def __enter__(self):
+ self.spawn()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.shutdown()
+
def spawn(self):
self.backend.spawn(self.run, async=True)
@@ -159,10 +166,8 @@ def process_handler(on_stdout, on_stderr, on_exit):
stdout_handler = _EventHandler(backend, handle_stdout_exit())
stderr_handler = _EventHandler(backend, handle_stderr())
- stdout_handler.spawn()
- stderr_handler.spawn()
- try:
+ with stdout_handler, stderr_handler:
# update
update = yield events
while True:
@@ -177,10 +182,6 @@ def process_handler(on_stdout, on_stderr, on_exit):
assert False, update
update = yield
- finally:
- # shutdown
- stdout_handler.shutdown()
- stderr_handler.shutdown()
_IDLE = 0
|
make _EventHandler a context manager
|
py
|
diff --git a/salt/runners/survey.py b/salt/runners/survey.py
index <HASH>..<HASH> 100644
--- a/salt/runners/survey.py
+++ b/salt/runners/survey.py
@@ -152,7 +152,7 @@ def _get_pool_results(*args, **kwargs):
expr_form = kwargs.get('expr_form', 'compound')
if expr_form not in ['compound', 'pcre']:
- expr_form='compound'
+ expr_form = 'compound'
client = salt.client.get_local_client(__opts__['conf_file'])
try:
|
regular expressions in specifiing targets for salt-run survey.diff, hash
|
py
|
diff --git a/src/collectors/filestat/filestat.py b/src/collectors/filestat/filestat.py
index <HASH>..<HASH> 100644
--- a/src/collectors/filestat/filestat.py
+++ b/src/collectors/filestat/filestat.py
@@ -235,7 +235,7 @@ class FilestatCollector(diamond.collector.Collector):
data = self.process_lsof(self.get_userlist(), self.get_typelist())
for ukey in data.iterkeys():
for tkey in data[ukey].iterkeys():
- self.log.info('files.user.%s.%s %s' % (
+ self.log.debug('files.user.%s.%s %s' % (
ukey, tkey, int(data[ukey][tkey])))
self.publish('user.%s.%s' % (ukey, tkey),
int(data[ukey][tkey]))
|
filestat : reduce log at info level
|
py
|
diff --git a/abilian/web/views/object.py b/abilian/web/views/object.py
index <HASH>..<HASH> 100644
--- a/abilian/web/views/object.py
+++ b/abilian/web/views/object.py
@@ -464,6 +464,7 @@ class ObjectDelete(ObjectEdit):
"""
Delete object. Supports DELETE verb.
"""
+ methods = ['POST']
activity_verb = 'delete'
_message_success = _l(u"Entity deleted")
|
ObjectDelete: ensure only POST is allowed
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,8 @@ setup(
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
+ 'Framework :: Django :: 1.10',
+ 'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
|
Update classifiers in setup.py with Django <I> and <I>
|
py
|
diff --git a/registration/backends/hmac/views.py b/registration/backends/hmac/views.py
index <HASH>..<HASH> 100644
--- a/registration/backends/hmac/views.py
+++ b/registration/backends/hmac/views.py
@@ -148,12 +148,11 @@ class ActivationView(BaseActivationView):
"""
User = get_user_model()
- lookup_kwargs = {
- User.USERNAME_FIELD: username,
- 'is_active': False
- }
try:
- user = User.objects.get(**lookup_kwargs)
+ user = User.objects.get(**{
+ User.USERNAME_FIELD: username,
+ 'is_active': False
+ })
return user
except User.DoesNotExist:
return None
|
Don't need to put that in a variable when it's only used once.
|
py
|
diff --git a/ELiDE/ELiDE/screen.py b/ELiDE/ELiDE/screen.py
index <HASH>..<HASH> 100644
--- a/ELiDE/ELiDE/screen.py
+++ b/ELiDE/ELiDE/screen.py
@@ -156,16 +156,16 @@ class TimePanel(BoxLayout):
self.screen.charmenu._switch_to_menu()
@mainthread
- def _upd_branch_hint(self, *args):
- self.ids.branchfield.hint_text = self.screen.app.branch
+ def _upd_branch_hint(self, app, *args):
+ self.ids.branchfield.hint_text = app.branch
@mainthread
- def _upd_turn_hint(self, *args):
- self.ids.turnfield.hint_text = str(self.screen.app.turn)
+ def _upd_turn_hint(self, app, *args):
+ self.ids.turnfield.hint_text = str(app.turn)
@mainthread
- def _upd_tick_hint(self, *args):
- self.ids.tickfield.hint_text = str(self.screen.app.tick)
+ def _upd_tick_hint(self, app, *args):
+ self.ids.tickfield.hint_text = str(app.tick)
def on_screen(self, *args):
if not all(field in self.ids
|
Fix some tests I really don't think this is going to come up in prod
|
py
|
diff --git a/gandi/cli/core/utils/__init__.py b/gandi/cli/core/utils/__init__.py
index <HASH>..<HASH> 100644
--- a/gandi/cli/core/utils/__init__.py
+++ b/gandi/cli/core/utils/__init__.py
@@ -5,6 +5,7 @@ Also custom exceptions and method to generate a random string.
import time
+import click
class MissingConfiguration(Exception):
@@ -152,7 +153,7 @@ def check_domain_available(ctx, param, domain):
result = gandi.call('domain.available', [domain])
if result[domain] == 'unavailable':
- gandi.echo('%s is not available' % domain)
+ raise click.ClickException('%s is not available' % domain)
return
return domain
|
Exit cleanly if domain is not available
|
py
|
diff --git a/airtest/utils/version.py b/airtest/utils/version.py
index <HASH>..<HASH> 100644
--- a/airtest/utils/version.py
+++ b/airtest/utils/version.py
@@ -1,4 +1,4 @@
-__version__ = "1.1.11"
+__version__ = "1.2.0"
import os
import sys
|
level up to <I> (cherry picked from commit 0dd<I>ae<I>e<I>f<I>a<I>c<I>ab<I>c<I>ed)
|
py
|
diff --git a/coaster/utils/classes.py b/coaster/utils/classes.py
index <HASH>..<HASH> 100644
--- a/coaster/utils/classes.py
+++ b/coaster/utils/classes.py
@@ -223,6 +223,9 @@ class LabeledEnum(six.with_metaclass(_LabeledEnumMeta)):
return list((name, title) for name, title in cls.values())
+_marker = [] # For InspectableSet's default parameter
+
+
class InspectableSet(Set):
"""
Given a set, mimics a read-only dictionary where the items are keys and
@@ -257,9 +260,14 @@ class InspectableSet(Set):
True
>>> 'inspectable' in joinset
True
+ >>> emptyset = InspectableSet()
+ >>> len(emptyset)
+ 0
"""
- def __init__(self, members):
- if not isinstance(members, set):
+ def __init__(self, members=_marker):
+ if members is _marker:
+ members = set()
+ elif not isinstance(members, set):
members = set(members)
object.__setattr__(self, '_members', members)
|
Bugfix: support empty InspectableSet instances.
|
py
|
diff --git a/odl/set/space.py b/odl/set/space.py
index <HASH>..<HASH> 100644
--- a/odl/set/space.py
+++ b/odl/set/space.py
@@ -803,6 +803,16 @@ class LinearSpaceElement(object):
"""Return ``+self``."""
return self.copy()
+ def __cmp__(self, other):
+ """Comparsion not implemented.
+
+ Raises
+ ------
+ TypeError
+ """
+ raise TypeError('unorderable type `{}`'
+ ''.format(self.__class__.__name__))
+
# Metric space method
def __eq__(self, other):
"""Return ``self == other``.
@@ -856,6 +866,9 @@ class LinearSpaceElement(object):
"""Return ``self != other``."""
return not self.__eq__(other)
+ # Disable hash since vectors are mutable
+ __hash__ = None
+
def __str__(self):
"""Return ``str(self)``.
|
MAINT: Disable hashing and comparsion where it shouldnt be possible, see #<I>
|
py
|
diff --git a/gitlab/__init__.py b/gitlab/__init__.py
index <HASH>..<HASH> 100644
--- a/gitlab/__init__.py
+++ b/gitlab/__init__.py
@@ -26,7 +26,7 @@ class Gitlab(object):
self.host = host[:-1]
else:
self.host = host
- if self.host[7:] != 'http://':
+ if self.host[:7] != 'http://':
self.host = 'http://' + self.host
self.projects_url = self.host + "/api/v3/projects"
self.users_url = self.host + "/api/v3/users"
|
fixed the hostname, i was slicing the incorrect part!
|
py
|
diff --git a/oauthlib/oauth1/rfc5849/signature.py b/oauthlib/oauth1/rfc5849/signature.py
index <HASH>..<HASH> 100644
--- a/oauthlib/oauth1/rfc5849/signature.py
+++ b/oauthlib/oauth1/rfc5849/signature.py
@@ -82,6 +82,8 @@ def collect_parameters(uri_query='', body=[], headers=None,
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
+ Headers must be supplied as a dict.
+
Per `section 3.4.1.3.1`_ of the spec.
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
|
Document that collect_parameters expects headers as a dict
|
py
|
diff --git a/pycbc/filter/matchedfilter.py b/pycbc/filter/matchedfilter.py
index <HASH>..<HASH> 100644
--- a/pycbc/filter/matchedfilter.py
+++ b/pycbc/filter/matchedfilter.py
@@ -121,8 +121,8 @@ def matchedfilter(template,data,psd=None,low_frequency_cutoff=None,
N = (len(stilde)-1) * 2
kmin,kmax = get_cutoff_indices(low_frequency_cutoff,
- high_frequency_cutoff,stilde.delta_f,N)
-
+ high_frequency_cutoff,stilde.delta_f,N)
+
if (_q is None) or (len(_q) != N) or _q.dtype != data.dtype:
_q = zeros(N,dtype=complex_same_precision_as(data))
else:
@@ -131,7 +131,7 @@ def matchedfilter(template,data,psd=None,low_frequency_cutoff=None,
if (_qtilde is None) or (len(_qtilde) != N) or _qtilde.dtype != data.dtype:
_qtilde = zeros(N,dtype=complex_same_precision_as(data))
else:
- _q.fill(0)
+ _qtilde.fill(0)
#REPLACE with in place operations once they are fixed in PyCUDA
_qtilde[kmin:kmax] = htilde[kmin:kmax].conj() * stilde[kmin:kmax]
|
fixed bug in matchedfilter temporary memory
|
py
|
diff --git a/tests/commands/autoupdate_test.py b/tests/commands/autoupdate_test.py
index <HASH>..<HASH> 100644
--- a/tests/commands/autoupdate_test.py
+++ b/tests/commands/autoupdate_test.py
@@ -114,10 +114,8 @@ def test_autoupdate_out_of_date_repo(
)
write_config('.', config)
- runner = Runner('.', C.CONFIG_FILE)
before = open(C.CONFIG_FILE).read()
- # It will update the repo, because the name matches
- ret = autoupdate(runner, tags_only=False)
+ ret = autoupdate(Runner('.', C.CONFIG_FILE), tags_only=False)
after = open(C.CONFIG_FILE).read()
assert ret == 0
assert before != after
@@ -137,7 +135,6 @@ def test_autoupdate_out_of_date_repo_with_correct_repo_name(
runner = Runner('.', C.CONFIG_FILE)
before = open(C.CONFIG_FILE).read()
repo_name = 'file://{}'.format(out_of_date_repo.path)
- # It will update the repo, because the name matches
ret = autoupdate(runner, tags_only=False, repo=repo_name)
after = open(C.CONFIG_FILE).read()
assert ret == 0
|
Keep original test as is, for real
|
py
|
diff --git a/mythril/support/signatures.py b/mythril/support/signatures.py
index <HASH>..<HASH> 100644
--- a/mythril/support/signatures.py
+++ b/mythril/support/signatures.py
@@ -10,7 +10,7 @@ def add_signatures_from_file(file, sigs={}):
code = f.read()
- funcs = re.findall(r'function[\s]+(.*?\))', code, re.DOTALL)
+ funcs = re.findall(r'function[\s]+(\w+\([^\)]*\))', code, re.DOTALL)
for f in funcs:
|
Change regex to the following: function + minimum 1 whitespace + minimum 1 word letter + ( + * characters that are not ) + )
|
py
|
diff --git a/mtcnn/__init__.py b/mtcnn/__init__.py
index <HASH>..<HASH> 100644
--- a/mtcnn/__init__.py
+++ b/mtcnn/__init__.py
@@ -24,4 +24,4 @@
#SOFTWARE.
__author__ = "Iván de Paz Centeno"
-__version__= "0.0.5"
\ No newline at end of file
+__version__= "0.0.6"
|
Changed to version <I>
|
py
|
diff --git a/src/rez/utils/colorize.py b/src/rez/utils/colorize.py
index <HASH>..<HASH> 100644
--- a/src/rez/utils/colorize.py
+++ b/src/rez/utils/colorize.py
@@ -298,6 +298,7 @@ class Printer(object):
def __call__(self, msg='', style=None):
print >> self.buf, self.get(msg, style)
+ self.buf.flush()
def get(self, msg, style=None):
if style and self.colorize:
|
Call the flush method every time a Printer instance is called (so everytime it prints)
|
py
|
diff --git a/setuptools/tests/test_easy_install.py b/setuptools/tests/test_easy_install.py
index <HASH>..<HASH> 100644
--- a/setuptools/tests/test_easy_install.py
+++ b/setuptools/tests/test_easy_install.py
@@ -170,9 +170,9 @@ class TestEasyInstallTest:
sdist_zip.close()
return str(sdist)
+ @pytest.mark.xfail(reason="#709 and #710")
@pytest.mark.xfail(setuptools.tests.is_ascii,
reason="https://github.com/pypa/setuptools/issues/706")
- @pytest.mark.xfail(reason="#709 and #710")
def test_unicode_filename_in_sdist(self, sdist_unicode, tmpdir, monkeypatch):
"""
The install command should execute correctly even if
|
Change order of xfail, giving the unfiltered one precedence.
|
py
|
diff --git a/blockstore/lib/config.py b/blockstore/lib/config.py
index <HASH>..<HASH> 100644
--- a/blockstore/lib/config.py
+++ b/blockstore/lib/config.py
@@ -84,9 +84,9 @@ DEFAULT_BITCOIND_PASSWD = 'opennamesystem'
""" block indexing configs
"""
-REINDEX_FREQUENCY = 30 # seconds
+REINDEX_FREQUENCY = 60 # seconds
-FIRST_BLOCK_MAINNET = 370607 # 343883
+FIRST_BLOCK_MAINNET = 372693 # 343883
FIRST_BLOCK_MAINNET_TESTSET = FIRST_BLOCK_MAINNET
# FIRST_BLOCK_TESTNET = 343883
FIRST_BLOCK_TESTNET = 529008
|
Bump reindex frequency and start block
|
py
|
diff --git a/ibis/backends/tests/test_temporal.py b/ibis/backends/tests/test_temporal.py
index <HASH>..<HASH> 100644
--- a/ibis/backends/tests/test_temporal.py
+++ b/ibis/backends/tests/test_temporal.py
@@ -1,3 +1,4 @@
+import datetime
import operator
import warnings
from operator import methodcaller
@@ -776,3 +777,26 @@ def test_integer_cast_to_timestamp(backend, alltypes, df):
expected = pd.to_datetime(df.int_col, unit="s").rename(expr.get_name())
result = expr.execute()
backend.assert_series_equal(result, expected)
+
+
+@pytest.mark.broken(
+ ["clickhouse", "impala"],
+ reason=(
+ "Impala returns a string; "
+ "the clickhouse driver returns invalid results for big timestamps"
+ ),
+)
+@pytest.mark.notimpl(
+ ["datafusion", "duckdb"],
+ reason="DataFusion and DuckDB backends assume ns resolution timestamps",
+)
+@pytest.mark.notyet(
+ ["pyspark"],
+ reason="PySpark doesn't handle big timestamps",
+)
+def test_big_timestamp(con):
+ # TODO: test with a timezone
+ value = ibis.timestamp("2419-10-11 10:10:25")
+ result = con.execute(value)
+ expected = datetime.datetime(2419, 10, 11, 10, 10, 25)
+ assert result == expected
|
test(backends): add failing test for big timestamps
|
py
|
diff --git a/tornado/web.py b/tornado/web.py
index <HASH>..<HASH> 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -1810,6 +1810,10 @@ class StaticFileHandler(RequestHandler):
size = self.get_content_size()
if start < 0:
start += size
+ # Note: only return HTTP 206 if less than the entire range has been
+ # requested. Not only is this semantically correct, but Chrome
+ # refuses to play audio if it gets an HTTP 206 in response to
+ # ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
|
Note about conditional return of HTTP <I>
|
py
|
diff --git a/tests/test_hx.py b/tests/test_hx.py
index <HASH>..<HASH> 100644
--- a/tests/test_hx.py
+++ b/tests/test_hx.py
@@ -918,7 +918,9 @@ def test_NTU_from_P_basic():
# We have to compare the re calculated P1 values, because for many values of NTU1,
# at the initial far guess of 10000 P1 = 1 and at the random NTU1 P1 is also 1
P1_calc = temperature_effectiveness_basic(R1=R1, NTU1=NTU1_calc, subtype='crossflow approximate')
- assert_allclose(P1, P1_calc)
+ # In python 2.6 and 3.3 the solver doesn't converge as well, so we need
+ # to add a little tolerance
+ assert_allclose(P1, P1_calc, rtol=5E-6)
# Crossflow approximate test case
R1 = .1
|
Fix a test tolerance for Python <I> and <I> tests to pass
|
py
|
diff --git a/OpenSSL/test/test_ssl.py b/OpenSSL/test/test_ssl.py
index <HASH>..<HASH> 100644
--- a/OpenSSL/test/test_ssl.py
+++ b/OpenSSL/test/test_ssl.py
@@ -1158,6 +1158,12 @@ class ServerNameCallbackTests(TestCase, _LoopbackMixin):
del callback
context.set_tlsext_servername_callback(replacement)
+
+ # One run of the garbage collector happens to work on CPython. PyPy
+ # doesn't collect the underlying object until a second run for whatever
+ # reason. That's fine, it still demonstrates our code has properly
+ # dropped the reference.
+ collect()
collect()
callback = tracker()
|
Add a second `collect()` call which hopefully will make the test pass on PyPy.
|
py
|
diff --git a/hvac/tests/test_kubernetes_methods.py b/hvac/tests/test_kubernetes_methods.py
index <HASH>..<HASH> 100644
--- a/hvac/tests/test_kubernetes_methods.py
+++ b/hvac/tests/test_kubernetes_methods.py
@@ -252,8 +252,10 @@ class TestKubernetesMethods(TestCase):
'lease_duration': 10000,
'metadata': {
'role': 'custom_role',
- 'service_account_email': 'dev1@project-123456.iam.gserviceaccount.com',
- 'service_account_id': '111111111111111111111'
+ 'service_account_name': 'vault-auth',
+ 'service_account_namespace': 'default',
+ 'service_account_secret_name': 'vault-auth-token-pd21c',
+ 'service_account_uid': 'aa9aa8ff-98d0-11e7-9bb7-0800276d99bf'
},
'policies': [
'default',
|
Fix mock response for Kubernetes auth unit test
|
py
|
diff --git a/tilequeue/command.py b/tilequeue/command.py
index <HASH>..<HASH> 100755
--- a/tilequeue/command.py
+++ b/tilequeue/command.py
@@ -988,7 +988,7 @@ def tilequeue_prune_tiles_of_interest(cfg, peripherals):
cur.execute("""
select x, y, z, tilesize, count(*)
from tile_traffic_v4
- where (date >= dateadd(day, -{days}, current_date))
+ where (date >= dateadd(day, -{days}, getdate()))
and (z between 0 and {max_zoom})
and (x between 0 and pow(2,z)-1)
and (y between 0 and pow(2,z)-1)
|
Use getdate\(\) instead of current_date in RedShift query current_date would return only the date, not the whole datetime. getdate\(\) returns the current datetime.
|
py
|
diff --git a/datatableview/utils.py b/datatableview/utils.py
index <HASH>..<HASH> 100644
--- a/datatableview/utils.py
+++ b/datatableview/utils.py
@@ -15,11 +15,11 @@ MINIMUM_PAGE_LENGTH = 5
DEFAULT_OPTIONS = {
'columns': [], # table headers
- 'ordering': [], # override to Model._meta.ordering
+ 'ordering': None, # override to Model._meta.ordering
# 'filters': {}, # field_name__lookuptype: value
'start_offset': 0, # results to skip ahead
'page_length': 25, # length of a single result page
- 'search': None, # client search string
+ 'search': '', # client search string
'search_fields': [], # extra ORM paths to search; not displayed
'unsortable_columns': [], # table headers not allowed to be sorted
'hidden_columns': [], # table headers to be generated, but hidden by the client
|
Adjust DEFAULT_OPTIONS to reflect normalization algorithm
|
py
|
diff --git a/tests/test_examples.py b/tests/test_examples.py
index <HASH>..<HASH> 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -277,6 +277,17 @@ class TestExamples(unittest.TestCase):
else:
self.fail("Failed while running example number %d" % i)
+ def test_large_db(self):
+ """
+ Generates a large database, then runs query
+ """
+
+ num_facts = 1250000
+ prolog = Prolog()
+ _ = [prolog.assertz(f'p({i})') for i in range(num_facts)]
+
+ results = [r for r in prolog.query('p(I)')]
+ self.assertEqual(len(results), num_facts)
def example_path(path):
import os.path
|
Test added to check large db crash
|
py
|
diff --git a/tests/test_snippets.py b/tests/test_snippets.py
index <HASH>..<HASH> 100644
--- a/tests/test_snippets.py
+++ b/tests/test_snippets.py
@@ -762,6 +762,20 @@ def test_escaped_string():
assert(output(s, schema_name="layer") == exp)
+@pytest.mark.xfail
+def test_filename():
+
+ s = """
+ WEB
+ IMAGEURL "/tmp/"
+ TEMPLATE example3.html
+ END
+ """
+ exp = "WEB IMAGEURL '/tmp/' TEMPLATE 'example3.html' END"
+ print(output(s, schema_name="web"))
+ assert(output(s, schema_name="web") == exp)
+
+
def run_tests():
"""
Need to comment out the following line in C:\VirtualEnvs\mappyfile\Lib\site-packages\pep8.py
@@ -774,6 +788,6 @@ def run_tests():
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
- # test_label()
+ # test_filename()
run_tests()
print("Done!")
|
Failing filename with number, see #<I>
|
py
|
diff --git a/karaage/applications/views/admin.py b/karaage/applications/views/admin.py
index <HASH>..<HASH> 100644
--- a/karaage/applications/views/admin.py
+++ b/karaage/applications/views/admin.py
@@ -82,7 +82,7 @@ def send_invitation(request):
@login_required
def application_list(request):
- apps = Application.objects.select_related()
+ apps = Application.objects.select_related().order_by('-id')
try:
page_no = int(request.GET.get('page', 1))
|
Reverse order of applications in admin site
|
py
|
diff --git a/localshop/packages/models.py b/localshop/packages/models.py
index <HASH>..<HASH> 100644
--- a/localshop/packages/models.py
+++ b/localshop/packages/models.py
@@ -115,4 +115,4 @@ class ReleaseFile(models.Model):
url = reverse('packages:download', kwargs={
'pk': self.pk, 'filename': self.filename
})
- return '%s#md5=%s' % (url, self.digest)
+ return '%s#md5=%s' % (url, self.md5_digest)
|
Fix error after renaming ReleaseFile.digest -> .md5_digest
|
py
|
diff --git a/copy_stream.py b/copy_stream.py
index <HASH>..<HASH> 100755
--- a/copy_stream.py
+++ b/copy_stream.py
@@ -17,9 +17,8 @@ def copy(location, from_stream, to_stream, dry_run=False, verbose=False):
index = json.load(index_file)
from_key = STREAM_KEY_TEMPLATE.format(from_stream)
to_key = STREAM_KEY_TEMPLATE.format(to_stream)
- if to_key in index['index']:
- raise ValueError('{} is already defined in {}'.format(
- to_key, location))
+ if to_key in index['index'] and verbose:
+ print('Redefining {} in index2.json'.print(to_key))
stanza = dict(index['index'][from_key])
index['index'][to_key] = stanza
if verbose:
|
It is not an error to redfine the devel stanza to use the released product file; juju preserves it.
|
py
|
diff --git a/path.py b/path.py
index <HASH>..<HASH> 100644
--- a/path.py
+++ b/path.py
@@ -702,8 +702,10 @@ class path(unicode):
:example:
- >>> for chunk in path("file.txt").chunk(8192):
- ... print(chunk)
+ >>> import hashlib
+ >>> hash = hashlib.md5()
+ >>> for chunk in path("path.py").chunks(8192, mode='rb'):
+ ... hash.update(chunk)
This will read the file by chunks of 8192 bytes.
"""
|
Update doctest in .chunks for correctness and so it actually passes.
|
py
|
diff --git a/lib/search_engine.py b/lib/search_engine.py
index <HASH>..<HASH> 100644
--- a/lib/search_engine.py
+++ b/lib/search_engine.py
@@ -1454,6 +1454,10 @@ def browse_pattern(req, colls, p, f, rg, ln=cdslang):
if not f:
return browse_in_bibwords(req, p, f)
+ ## is p enclosed in quotes? (coming from exact search)
+ if p.startswith('"') and p.endswith('"'):
+ p = p[1:-1]
+
p_orig = p
## okay, "real browse" follows:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, rg, 1)
|
Updated browse_pattern() in order to discard enclosing double quotes should the search pattern have them around. Useful if the search pattern comes from explicit exact phrase search kind of searches.
|
py
|
diff --git a/peri/states.py b/peri/states.py
index <HASH>..<HASH> 100644
--- a/peri/states.py
+++ b/peri/states.py
@@ -83,7 +83,7 @@ class State(ParameterGroup):
return self.params
def _grad_one_param(self, func, p, dl=1e-3, f0=None, rts=True, **kwargs):
- vals = self.get_values(p)
+ vals = np.array(self.get_values(p))
f0 = util.callif(func(**kwargs)) if f0 is None else f0
self.update(p, vals+dl)
@@ -95,8 +95,8 @@ class State(ParameterGroup):
return (f1 - f0) / dl
def _hess_two_param(self, func, p0, p1, dl=1e-3, f0=None, rts=True, **kwargs):
- vals0 = self.get_values(p0)
- vals1 = self.get_values(p1)
+ vals0 = np.array(self.get_values(p0))
+ vals1 = np.array(self.get_values(p1))
f00 = util.callif(func(**kwargs)) if f0 is None else f0
|
need np arrays for grads, hess
|
py
|
diff --git a/gpiozero/input_devices.py b/gpiozero/input_devices.py
index <HASH>..<HASH> 100644
--- a/gpiozero/input_devices.py
+++ b/gpiozero/input_devices.py
@@ -149,8 +149,8 @@ class WaitableInputDevice(InputDevice):
This can be set to a function which accepts no (mandatory) parameters,
or a Python function which accepts a single mandatory parameter (with
as many optional parameters as you like). If the function accepts a
- single mandatory parameter, the device that activates will be passed as
- that parameter.
+ single mandatory parameter, the device that activated will be passed
+ as that parameter.
Set this property to `None` (the default) to disable the event.
@@ -169,10 +169,10 @@ class WaitableInputDevice(InputDevice):
inactive.
This can be set to a function which accepts no (mandatory) parameters,
- or a Python function which accepts a single mandatory parameter (which
+ or a Python function which accepts a single mandatory parameter (with
as many optional parameters as you like). If the function accepts a
- single mandatory parameter, the device the deactives will be passed as
- that parameter.
+ single mandatory parameter, the device that deactivated will be
+ passed as that parameter.
Set this property to `None` (the default) to disable the event.
|
Fix speling and grandma mistakes Several in WaitableInputDevice's attribute docstrings.
|
py
|
diff --git a/etrago/appl.py b/etrago/appl.py
index <HASH>..<HASH> 100644
--- a/etrago/appl.py
+++ b/etrago/appl.py
@@ -154,6 +154,7 @@ def etrago(args):
minimize_loading : bool
False,
+ ...
k_mean_clustering : bool
False,
|
changed RTD see #<I>
|
py
|
diff --git a/m9dicts/tests/api.py b/m9dicts/tests/api.py
index <HASH>..<HASH> 100644
--- a/m9dicts/tests/api.py
+++ b/m9dicts/tests/api.py
@@ -99,9 +99,9 @@ class Test_20_make(unittest.TestCase):
self.assertTrue(isinstance(md2, MD.UpdateWoReplaceDict))
self.assertTrue(isinstance(md3, MD.UpdateWithMergeListsDict))
- for mdn in (md1, md2, md3):
+ for idx, mdn in enumerate((md1, md2, md3)):
self.assertTrue(isinstance(mdn["b"], type(mdn)),
- "%r (%r)" % (mdn["b"], type(mdn["b"])))
+ "#%d %r (%r)" % (idx, mdn["b"], type(mdn["b"])))
for k in "name a c e f".split():
self.assertTrue(mdn[k] == _CNF_0[k],
"%r vs. %r" % (mdn[k], _CNF_0[k]))
|
refactor: add index for debug
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -23,8 +23,8 @@ import subprocess
# in development set version to none and ...
-PYPI_VERSION = 1.2.0
-
+PYPI_VERSION = "1.2.0
+"
# Return the git revision as a string (from numpy)
def git_version():
|
Update setup.py I am an idiot
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -463,7 +463,7 @@ devel_all = (all_dbs + atlas + aws + azure + celery + cgroups + datadog + devel
sendgrid + sentry + singularity + slack + snowflake + ssh + statsd + tableau +
virtualenv + webhdfs + yandexcloud + zendesk)
-# Snakebite are not Python 3 compatible :'(
+# Snakebite is not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
|
Fix grammar in setup.py (#<I>)
|
py
|
diff --git a/bcbio/ngsalign/tophat.py b/bcbio/ngsalign/tophat.py
index <HASH>..<HASH> 100644
--- a/bcbio/ngsalign/tophat.py
+++ b/bcbio/ngsalign/tophat.py
@@ -42,7 +42,8 @@ def align(fastq_file, pair_file, ref_file, out_base, align_dir, config):
with file_transaction([os.path.join(out_dir, f) for f in _out_fnames]):
child = subprocess.check_call(cl)
out_file_final = os.path.join(out_dir, "%s.sam" % out_base)
- os.symlink(out_file, out_file_final)
+ if not os.path.exists(out_file_final):
+ os.symlink(out_file, out_file_final)
return out_file_final
def _estimate_paired_innerdist(fastq_file, pair_file, ref_file, out_base,
|
Avoid symlinking tophat output file if previously exists
|
py
|
diff --git a/mtools/mlaunch/mlaunch.py b/mtools/mlaunch/mlaunch.py
index <HASH>..<HASH> 100755
--- a/mtools/mlaunch/mlaunch.py
+++ b/mtools/mlaunch/mlaunch.py
@@ -288,7 +288,7 @@ class MLaunchTool(BaseCmdLineTool):
t = threading.Thread(target=pingMongoDS, args=(host, 1.0, 30))
t.start()
if self.args['verbose']:
- print "waiting for mongod config server to start up..."
+ print "waiting for mongod config server at %s to start up..."%host
t.join()
print "mongod config server at %s running."%host
@@ -301,7 +301,7 @@ class MLaunchTool(BaseCmdLineTool):
t = threading.Thread(target=pingMongoDS, args=(host, 1.0, 30))
t.start()
if self.args['verbose']:
- print "waiting for mongod to start up..."
+ print "waiting for mongod at %s to start up..."%host
t.join()
print "mongod at %s running."%host
|
mlaunch verbose output prints host:port information for all processes.
|
py
|
diff --git a/patchboard/endpoints.py b/patchboard/endpoints.py
index <HASH>..<HASH> 100644
--- a/patchboard/endpoints.py
+++ b/patchboard/endpoints.py
@@ -34,6 +34,7 @@ class Endpoints(object):
# the endpoint method takes parameters and instantiates a
# resource of the correct class.
# FIXME: this implementation may not be correct
+
def fn(self, params={}):
if isinstance(params, str):
url = params
@@ -41,18 +42,17 @@ class Endpoints(object):
url = mapping.generate_url(params)
return cls(context, {u'url': url})
- endpoint = MethodType(fn, self, type(self))
-
elif mapping.path:
# When a mapping has the 'path' property, all that is needed to
# create a usable resource is the full URL. Thus this endpoint
# method returns an instantiated resource directly.
- endpoint = cls(context, {u'url': mapping.generate_url()})
+ fn = lambda(self): cls(context, {u'url': mapping.generate_url()})
elif mapping.url:
- endpoint = cls(context, {u'url': mapping.url})
+ fn = lambda(self): cls(context, {u'url': mapping.url})
else:
raise PatchboardError(u"Mapping '{0}' is invalid".format(name))
+ endpoint = MethodType(fn, self, type(self))
setattr(self, name, endpoint)
|
Restore (more) correct implementation for endpoint
|
py
|
diff --git a/claripy/ast/bv.py b/claripy/ast/bv.py
index <HASH>..<HASH> 100644
--- a/claripy/ast/bv.py
+++ b/claripy/ast/bv.py
@@ -89,8 +89,10 @@ class BV(Bits):
:param size: the number of bytes to extract
:return: A BV of size ``size * 8``
"""
- pos = self.size() // 8 - 1 - index
- return self[pos * 8 + 7 : (pos - size + 1) * 8]
+ assert size > 0
+ hi = (index + size) * 8 - 1
+ lo = index * 8
+ return self[hi:lo]
def zero_extend(self, n):
"""
|
fixed and simplified incorrect indexing computations
|
py
|
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -119,7 +119,7 @@ try:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pass
-except Exception, e:
+except:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
|
Update docs/conf.py for python 3
|
py
|
diff --git a/salt/runner.py b/salt/runner.py
index <HASH>..<HASH> 100644
--- a/salt/runner.py
+++ b/salt/runner.py
@@ -306,7 +306,7 @@ class Runner(RunnerClient):
is reached.
'''
if timeout is None:
- timeout = self.opts['timeout']
+ timeout = self.opts['timeout'] * 2
timeout_at = time.time() + timeout
last_progress_timestamp = time.time()
@@ -318,11 +318,14 @@ class Runner(RunnerClient):
# we have reached the total timeout
# AND
# have not seen any progress events for the length of the timeout.
- if raw is None or (time.time() > timeout_at and \
+ if raw is None and (time.time() > timeout_at and \
time.time() - last_progress_timestamp > timeout):
# Timeout reached
break
try:
+ # Handle a findjob that might have been kicked off under the covers
+ if raw['data']['fun'] == 'saltutil.findjob':
+ timeout_at = timeout_at + 10
if not raw['tag'].split('/')[1] == 'runner':
continue
elif raw['tag'].split('/')[3] == 'progress':
@@ -331,5 +334,5 @@ class Runner(RunnerClient):
elif raw['tag'].split('/')[3] == 'return':
yield(raw['data']['return'])
break
- except IndexError:
+ except (IndexError, KeyError):
continue
|
Use double the default timeout for runners This isn't a great solution but we need to account for cases where a runner might kick off a findjob and we don't know about it. There is almost certainly a better way to do this but this should suffice for now.
|
py
|
diff --git a/troposphere/autoscaling.py b/troposphere/autoscaling.py
index <HASH>..<HASH> 100644
--- a/troposphere/autoscaling.py
+++ b/troposphere/autoscaling.py
@@ -107,7 +107,7 @@ class AutoScalingGroup(AWSObject):
'HealthCheckGracePeriod': (int, False),
'HealthCheckType': (basestring, False),
'InstanceId': (basestring, False),
- 'LaunchConfigurationName': (basestring, True),
+ 'LaunchConfigurationName': (basestring, False),
'LoadBalancerNames': (list, False),
'MaxSize': (integer, True),
'MetricsCollection': ([MetricsCollection], False),
@@ -135,6 +135,16 @@ class AutoScalingGroup(AWSObject):
"The UpdatePolicy attribute "
"MinInstancesInService must be less than the "
"autoscaling group's MaxSize")
+ launch_config = self.properties.get('LaunchConfigurationName')
+ instance_id = self.properties.get('InstanceId')
+ if launch_config and instance_id:
+ raise ValueError("LaunchConfigurationName and InstanceId "
+ "are mutually exclusive.")
+ if not launch_config and not instance_id:
+ raise ValueError("Must specify either LaunchConfigurationName or "
+ "InstanceId: http://docs.aws.amazon.com/AWSCloud"
+ "Formation/latest/UserGuide/aws-properties-as-gr"
+ "oup.html#cfn-as-group-instanceid")
return True
|
Fixes #<I> Makes InstanceId and LaunchConfigurationName mutually exclusive, and conditionally requires one or the other. <URL>
|
py
|
diff --git a/tests/test_library.py b/tests/test_library.py
index <HASH>..<HASH> 100644
--- a/tests/test_library.py
+++ b/tests/test_library.py
@@ -125,7 +125,6 @@ def test_library_add_edit_delete(plex, movies, photos):
# Create Other Videos library = No external metadata scanning
section_name = "plexapi_test_section"
movie_location = movies.locations[0]
- movie_path = plex.browse(path=movie_location)[0]
photo_location = photos.locations[0]
plex.library.add(
name=section_name,
@@ -173,12 +172,6 @@ def test_library_add_edit_delete(plex, movies, photos):
section.addLocations(photo_location)
section.reload()
assert len(section.locations) == 2
- section.removeLocations(movie_path)
- section.reload()
- assert len(section.locations) == 1
- section.addLocations(movie_path)
- section.reload()
- assert len(section.locations) == 2
section.edit(**{'location': [movie_location]})
section.reload()
assert len(section.locations) == 1
|
removing testing with `Path` object
|
py
|
diff --git a/tests/test_parser.py b/tests/test_parser.py
index <HASH>..<HASH> 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -125,6 +125,11 @@ class WordTokenizeTestCase(unittest.TestCase):
text='one two 3 four',
)) == [('one', ), ('two', ), ('four', )]
+ def test_retains_casing(self):
+ assert list(textparser.word_tokenize(
+ text='Three letter acronym (TLA)',
+ )) == [('Three', ), ('letter', ), ('acronym', ), ('TLA',)]
+
def test_ngrams(self):
assert list(textparser.word_tokenize(
text='foo bar bomb blar',
|
Add illustrative test case
|
py
|
diff --git a/astrobase/plotbase.py b/astrobase/plotbase.py
index <HASH>..<HASH> 100644
--- a/astrobase/plotbase.py
+++ b/astrobase/plotbase.py
@@ -10,6 +10,7 @@ Contains various useful functions for plotting light curves and associated data.
'''
import os
import os.path
+import gzip
try:
import cPickle as pickle
@@ -781,8 +782,13 @@ def make_checkplot(lspinfo,
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
- with open(lspinfo,'rb') as infd:
- lspinfo = pickle.load(infd)
+
+ if '.gz' in lspinfo:
+ with gzip.open(lspinfo,'rb') as infd:
+ lspinfo = pickle.load(infd)
+ else:
+ with open(lspinfo,'rb') as infd:
+ lspinfo = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo and
|
plotbase.make_checkplot: transparently handle gzipped LSP pickles
|
py
|
diff --git a/plenum/server/request_managers/request_manager.py b/plenum/server/request_managers/request_manager.py
index <HASH>..<HASH> 100644
--- a/plenum/server/request_managers/request_manager.py
+++ b/plenum/server/request_managers/request_manager.py
@@ -37,8 +37,8 @@ class RequestManager(AbstractRequestManager):
self._add_handler(typ, handler)
self.txn_types.add(typ)
self.type_to_ledger_id[typ] = ledger_id
- self.ledger_id_to_types.setdefault(ledger_id, [])
- self.ledger_id_to_types[ledger_id].append(typ)
+ self.ledger_id_to_types.setdefault(ledger_id, set())
+ self.ledger_id_to_types[ledger_id].add(typ)
def is_valid_type(self, txn_type):
return txn_type in self.txn_types
|
ST-<I>: Fix for plugins integration
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@ entry_points = {
setup(name='filterbank',
version=version,
description='Python utilities for Breakthrough Listen SETI observations',
- install_requires=['astropy', 'numpy'],
+ install_requires=['astropy', 'numpy', 'cython', 'h5py'],
url='https://github.com/ucberkeleyseti/filterbank',
author='Danny Price',
author_email='dancpr@berkeley.edu',
|
Update setup.py with dependencies
|
py
|
diff --git a/salt/modules/win_servermanager.py b/salt/modules/win_servermanager.py
index <HASH>..<HASH> 100644
--- a/salt/modules/win_servermanager.py
+++ b/salt/modules/win_servermanager.py
@@ -74,14 +74,17 @@ def list_installed():
salt '*' win_servermanager.list_installed
'''
ret = {}
- for line in list_available().splitlines()[2:]:
+ names = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select DisplayName,Name')
+ for line in names.splitlines()[2:]:
splt = line.split()
- if splt[0] == '[X]':
- name = splt.pop(-1)
- splt.pop(0)
- display_name = ' '.join(splt)
- ret[name] = display_name
-
+ name = splt.pop(-1)
+ display_name = ' '.join(splt)
+ ret[name] = display_name
+ state = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select InstallState,Name')
+ for line in state.splitlines()[2:]:
+ splt = line.split()
+ if splt[0] != 'Installed' and splt[1] in ret:
+ del ret[splt[1]]
return ret
|
Fix win_servermanager for services or features with long names that might contain dots
|
py
|
diff --git a/salt/modules/portage_config.py b/salt/modules/portage_config.py
index <HASH>..<HASH> 100644
--- a/salt/modules/portage_config.py
+++ b/salt/modules/portage_config.py
@@ -3,11 +3,10 @@
Configure ``portage(5)``
'''
-from __future__ import absolute_import
-
# Import python libs
import os
import shutil
+from __future__ import absolute_import
# Import salt libs
import salt.utils
@@ -221,7 +220,7 @@ def _merge_flags(*args):
else:
flags[flag] = True
tmp = []
- for k, v in list(flags.items()):
+ for k, v in flags.items():
if v:
tmp.append(k)
else:
|
List call not needed. Changing it back to what it was
|
py
|
diff --git a/bcbio/pipeline/run_info.py b/bcbio/pipeline/run_info.py
index <HASH>..<HASH> 100644
--- a/bcbio/pipeline/run_info.py
+++ b/bcbio/pipeline/run_info.py
@@ -1073,6 +1073,8 @@ def _add_algorithm_defaults(algorithm, analysis, is_cwl):
"ensemble": None,
"exclude_regions": [],
"variant_regions": None,
+ "variantcaller": [],
+ "svcaller": [],
"svvalidate": None,
"svprioritize": None,
"validate": None,
|
CWL: attempt two to provide default svcaller for seq2c
|
py
|
diff --git a/certsuite/gaiautils.py b/certsuite/gaiautils.py
index <HASH>..<HASH> 100644
--- a/certsuite/gaiautils.py
+++ b/certsuite/gaiautils.py
@@ -7,7 +7,7 @@ import os
__all__ = ["Settings", "LockScreen", "Screen"]
-atoms_dir = os.path.join(__file__, os.path.pardir, "atoms")
+atoms_dir = os.path.abspath(os.path.join(__file__, os.path.pardir, "atoms"))
class Settings(object):
|
fixup! Bug <I>: Disable screen lock, display sleep, and switch on screen in super harness
|
py
|
diff --git a/librosa/core/constantq.py b/librosa/core/constantq.py
index <HASH>..<HASH> 100644
--- a/librosa/core/constantq.py
+++ b/librosa/core/constantq.py
@@ -150,9 +150,13 @@ def cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
basis[i] = util.pad_center(basis[i], n_fft)
basis = np.asarray(basis)
- # Conjugate-transpose the basis
+ # FFT the filters
fft_basis = np.fft.fft(basis, n=n_fft, axis=1).conj()
+ # Only need positive frequencies
+ fft_basis = fft_basis[:,:(n_fft/2+1)]
+
+ # Sparsify fft_basis
fft_basis = util.sparsify(fft_basis)
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
@@ -186,7 +190,7 @@ def cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
# Compute the STFT matrix
D = stft(my_y, n_fft=n_fft, hop_length=my_hop)
- D = np.vstack([D.conj(), D[-2:0:-1]])
+ D = D.conj() # stft returns conjugate for some reason
# And filter response energy
my_cqt = np.abs(fft_basis.dot(D))
|
compute CQT only over positive frequencies for efficiency
|
py
|
diff --git a/pykube/objects.py b/pykube/objects.py
index <HASH>..<HASH> 100644
--- a/pykube/objects.py
+++ b/pykube/objects.py
@@ -29,6 +29,10 @@ class APIObject(object):
def name(self):
return self.obj["metadata"]["name"]
+ @property
+ def annotations(self):
+ return self.obj["metadata"].get("annotations", {})
+
def api_kwargs(self, **kwargs):
kw = {}
collection = kwargs.pop("collection", False)
|
Added APIObject.annotations
|
py
|
diff --git a/internals/states.py b/internals/states.py
index <HASH>..<HASH> 100644
--- a/internals/states.py
+++ b/internals/states.py
@@ -1102,9 +1102,6 @@ class HSMMStatesIntegerNegativeBinomial(_HSMMStatesIntegerNegativeBinomialBase):
self.stateseq = stateseq
self.stateseq_norep, self.durations = util.rle(self.stateseq)
- for state, distn in enumerate(self.model.dur_distns):
- assert np.all(distn.r <= self.durations[:-1][self.stateseq_norep[:-1] == state])
-
#################
# eigen stuff #
#################
|
removed incorrect assert from HSMMIntNegBin
|
py
|
diff --git a/nfc/llcp/tco.py b/nfc/llcp/tco.py
index <HASH>..<HASH> 100644
--- a/nfc/llcp/tco.py
+++ b/nfc/llcp/tco.py
@@ -126,7 +126,8 @@ class TransmissionControlObject(object):
with self.recv_ready:
try: return self.recv_queue.popleft()
except IndexError: self.recv_ready.wait()
- return self.recv_queue.popleft()
+ try: return self.recv_queue.popleft()
+ except IndexError: return None
def close(self):
with self.lock:
|
fix: in LLCP operation 'pop from an empty deque' was raised when closing a TransmissionControlObject while a consumer thread was waiting to receive data
|
py
|
diff --git a/lint.py b/lint.py
index <HASH>..<HASH> 100644
--- a/lint.py
+++ b/lint.py
@@ -780,7 +780,7 @@ them in the generated configuration.'''}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
- 'help' : "Generate pylint's man page."}),
+ 'help' : "Generate pylint's man page.",'hide': 'True'}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_debug_mode,
|
[#<I>,#<I>] generate-man option has a new key in its dictionnary : 'hide' : True and will not be print in man or --help
|
py
|
diff --git a/starlette/status.py b/starlette/status.py
index <HASH>..<HASH> 100644
--- a/starlette/status.py
+++ b/starlette/status.py
@@ -4,6 +4,7 @@ See RFC 2616 - https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
And RFC 6585 - https://tools.ietf.org/html/rfc6585
And RFC 4918 - https://tools.ietf.org/html/rfc4918
And RFC 8470 - https://tools.ietf.org/html/rfc8470
+And RFC 2324 - https://tools.ietf.org/html/rfc2324
"""
HTTP_100_CONTINUE = 100
HTTP_101_SWITCHING_PROTOCOLS = 101
@@ -41,6 +42,7 @@ HTTP_414_REQUEST_URI_TOO_LONG = 414
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_417_EXPECTATION_FAILED = 417
+HTTP_418_IM_A_TEAPOT = 418
HTTP_422_UNPROCESSABLE_ENTITY = 422
HTTP_423_LOCKED = 423
HTTP_424_FAILED_DEPENDENCY = 424
|
Add <I> I'm A Teapot status code (#<I>)
|
py
|
diff --git a/test_projects/django/testproj/settings.py b/test_projects/django/testproj/settings.py
index <HASH>..<HASH> 100644
--- a/test_projects/django/testproj/settings.py
+++ b/test_projects/django/testproj/settings.py
@@ -69,7 +69,6 @@ CELERY_RESULT_BACKEND = 'cache'
from celery import VERSION
if VERSION[0] < 3:
# Use Django's syntax instead of Celery's, which would be:
- # CELERY_CACHE_BACKEND = 'dummy://'
CELERY_CACHE_BACKEND = 'locmem://'
else:
CELERY_CACHE_BACKEND = 'memory'
|
Removed a commented-out settings line
|
py
|
diff --git a/indra/java_vm.py b/indra/java_vm.py
index <HASH>..<HASH> 100644
--- a/indra/java_vm.py
+++ b/indra/java_vm.py
@@ -2,6 +2,7 @@
to set JVM options while the VM is already running."""
import os
+import warnings
import jnius_config
if '-Xmx4g' not in jnius_config.get_options():
|
Fix missing import in java_vm
|
py
|
diff --git a/pympfr.py b/pympfr.py
index <HASH>..<HASH> 100644
--- a/pympfr.py
+++ b/pympfr.py
@@ -60,10 +60,11 @@ def format_finite(digits, dot_pos):
################################################################################
# Locate and load the library
-#mpfr = ctypes.cdll.LoadLibrary(ctypes.util.find_library('mpfr'))
-
+mpfr_library_name = ctypes.util.find_library('mpfr')
# temporary hack to make this work with mpfr from macports
-mpfr = ctypes.cdll.LoadLibrary('/opt/local/lib/libmpfr.dylib')
+if mpfr_library_name is None:
+ mpfr_library_name = '/opt/local/lib/libmpfr.dylib'
+mpfr = ctypes.cdll.LoadLibrary(mpfr_library_name)
################################################################################
# Platform dependent values
|
Better library location, still a hack
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -10,11 +10,12 @@ setup(
name='simple-ai',
version='0.1',
description=u'An implementation of AI algorithms based on aima-python',
- long_description=__doc__,
+ long_description=open('README.md').read(),
author = u'Juan Pedro Fisanotti',
author_email = 'fisadev@gmail.com',
url='',
- packages=['simple_ai'],
+ packages=['simple_ai', 'simple_ai.tests'],
+ license='LICENSE.txt',
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
|
Changed description and included tests on package
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.