diff stringlengths 139 3.65k | message stringlengths 8 627 | diff_languages stringclasses 1 value |
|---|---|---|
diff --git a/rhaptos/cnxmlutils/odt2cnxml.py b/rhaptos/cnxmlutils/odt2cnxml.py
index <HASH>..<HASH> 100644
--- a/rhaptos/cnxmlutils/odt2cnxml.py
+++ b/rhaptos/cnxmlutils/odt2cnxml.py
@@ -144,7 +144,7 @@ def transform(odtfile, debug=False, outputdir=None):
try:
xml = etree.fromstring(etree.tostring(result))
except etree.XMLSyntaxError, e:
- xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % e.msg)
+ xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % str(e))
xml = xml.getroot()
return xml | In python<I> apparently etree.XMLSyntaxError.msg is always the empty string | py |
diff --git a/benchexec/tools/veriabs.py b/benchexec/tools/veriabs.py
index <HASH>..<HASH> 100644
--- a/benchexec/tools/veriabs.py
+++ b/benchexec/tools/veriabs.py
@@ -38,15 +38,15 @@ class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable('scripts/veriabs')
-
+
def name(self):
return 'VeriAbs'
-
+
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
if propertyfile:
- options += ['--property-file', propertyfile]
+ options = options + ['--property-file', propertyfile]
return [executable] + options + tasks
-
+
def determine_result(self, returncode, returnsignal, output, isTimeout):
lines = " ".join(output[-10:])
if isTimeout: | Fix bug in tool-info module for Veriabs, it must not change the passed list options. | py |
diff --git a/src/_pytest/main.py b/src/_pytest/main.py
index <HASH>..<HASH> 100644
--- a/src/_pytest/main.py
+++ b/src/_pytest/main.py
@@ -542,7 +542,15 @@ class Session(nodes.FSCollector):
col = root._collectfile(argpath)
if col:
self._node_cache[argpath] = col
- for y in self.matchnodes(col, names):
+ m = self.matchnodes(col, names)
+ # If __init__.py was the only file requested, then the matched node will be
+ # the corresponding Package, and the first yielded item will be the __init__
+ # Module itself, so just use that. If this special case isn't taken, then all
+ # the files in the package will be yielded.
+ if argpath.basename == "__init__.py":
+ yield next(m[0].collect())
+ return
+ for y in m:
yield y
def _collectfile(self, path): | Fix __init__.py as argument also including other package files | py |
diff --git a/scripts/experiments/scrape_expout.py b/scripts/experiments/scrape_expout.py
index <HASH>..<HASH> 100644
--- a/scripts/experiments/scrape_expout.py
+++ b/scripts/experiments/scrape_expout.py
@@ -63,6 +63,7 @@ class DPScraper(Scraper):
exp.update(relaxTime = get_following_literal(stdout_lines, "relaxTime(ms): ", -1))
exp.update(relaxBound = get_following_literal(stdout_lines, "relaxBound: ", -1))
exp.update(relative = get_following_literal(stdout_lines, "relative: ", -1))
+ exp.update(projLogLikelihood = get_following_literal(stdout_lines, "projLogLikelihood: ", -1))
else:
exp.update(trainAccuracy = get_following_literal(stdout_lines, "Accuracy on train: ", -1))
exp.update(trainLogLikelihood = get_following_literal(stdout_lines, "LogLikelihood on train: ", -1)) | scraping projected likelihood git-svn-id: svn+ssh://external.hltcoe.jhu.edu/home/hltcoe/mgormley/public/repos/dep_parse_filtered/trunk@<I> <I>f-cb4b-<I>-8b<I>-c<I>bcb<I> | py |
diff --git a/mapchete/__init__.py b/mapchete/__init__.py
index <HASH>..<HASH> 100644
--- a/mapchete/__init__.py
+++ b/mapchete/__init__.py
@@ -547,12 +547,11 @@ class Mapchete(object):
elif isinstance(process_data, (list, types.GeneratorType)):
return list(process_data)
# for data, metadata tuples
- elif isinstance(process_data, tuple):
- if not all([
- len(process_data) == 2,
- isinstance(process_data[1], dict),
- ]):
- raise MapcheteProcessOutputError("malformed process output")
+ elif all([
+ isinstance(process_data, tuple),
+ len(process_data) == 2,
+ isinstance(process_data[1], dict)
+ ]):
data, metadata = process_data
return self._streamline_output(data), metadata
elif not process_data: | better capture data, metadata tuples | py |
diff --git a/cloudvolume/secrets.py b/cloudvolume/secrets.py
index <HASH>..<HASH> 100644
--- a/cloudvolume/secrets.py
+++ b/cloudvolume/secrets.py
@@ -80,10 +80,11 @@ def google_credentials(bucket = ''):
project_name = json.loads(f.read())['project_id']
break
- GOOGLE_CREDENTIALS_CACHE[bucket] = (project_name, google_credentials)
-
if google_credentials == None:
print(colorize('yellow', 'Using default Google credentials. There is no ~/.cloudvolume/secrets/google-secret.json set.'))
+ else:
+ GOOGLE_CREDENTIALS_CACHE[bucket] = (project_name, google_credentials)
+
return project_name, google_credentials
AWS_CREDENTIALS_CACHE = defaultdict(dict)
@@ -106,7 +107,7 @@ def aws_credentials(bucket = '', service = 'aws'):
if bucket:
paths = [ secretpath('secrets/{}-{}-secret.json'.format(bucket, service)) ] + paths
- aws_credentials = ''
+ aws_credentials = {}
aws_credentials_path = secretpath(default_file_path)
for aws_credentials_path in paths:
if os.path.exists(aws_credentials_path): | misc enhancements (#<I>) | py |
diff --git a/cassandra/cluster.py b/cassandra/cluster.py
index <HASH>..<HASH> 100644
--- a/cassandra/cluster.py
+++ b/cassandra/cluster.py
@@ -837,8 +837,12 @@ class Session(object):
self._load_balancer = cluster.load_balancing_policy
self._metrics = cluster.metrics
+ # create connection pools in parallel
+ futures = []
for host in hosts:
- future = self.add_or_renew_pool(host, is_host_addition=False)
+ futures.append(self.add_or_renew_pool(host, is_host_addition=False))
+
+ for future in futures:
future.result()
def execute(self, query, parameters=None, trace=False): | Create conn pools in parallel when creating sesssions | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -112,11 +112,18 @@ def find_package_data(
out.setdefault(package, []).append(prefix+name)
return out
+try:
+ import pypandoc
+ description = pypandoc.convert('README.md', 'rst')
+except ImportError:
+ description = ''
+
setup(
name="django-graphos",
version=get_version(),
description="Django app to provide a JS agnostic way to work with charts.",
- long_description=read("README.md"),
+ #long_description=read("README.md"),
+ long_description=description,
author="Agiliq",
author_email="hello@agiliq.com",
license="BSD", | Convert md to rst | py |
diff --git a/psyplot/data.py b/psyplot/data.py
index <HASH>..<HASH> 100755
--- a/psyplot/data.py
+++ b/psyplot/data.py
@@ -782,7 +782,10 @@ class CFDecoder(object):
elif nans == 'skip':
dims = [dim for dim in set(var.dims) - set(bounds.dims)]
mask = var.notnull().all(list(dims)) if dims else var.notnull()
- bounds = bounds[mask.values]
+ try:
+ bounds = bounds[mask.values]
+ except IndexError: # 3D bounds
+ bounds = bounds.where(mask)
elif nans == 'only':
dims = [dim for dim in set(var.dims) - set(bounds.dims)]
mask = var.isnull().all(list(dims)) if dims else var.isnull() | Minor bug fix for 3D bounds | py |
diff --git a/openquake/calculators/hazard/classical/core.py b/openquake/calculators/hazard/classical/core.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/hazard/classical/core.py
+++ b/openquake/calculators/hazard/classical/core.py
@@ -226,7 +226,6 @@ class ClassicalHazardCalculator(haz_general.BaseHazardCalculatorNext):
is_complete=False, lt_realization=lt_rlz).order_by('id')
source_ids = source_progress.values_list('parsed_source_id',
flat=True)
- self.progress['total'] += len(source_ids)
for offset in xrange(0, len(source_ids), block_size):
task_args = (
@@ -260,10 +259,17 @@ class ClassicalHazardCalculator(haz_general.BaseHazardCalculatorNext):
# work is complete.
self.initialize_realizations(
rlz_callbacks=[self.initialize_hazard_curve_progress])
- self.initialize_pr_data()
self.record_init_stats()
+ # Set the progress counters:
+ num_sources = models.SourceProgress.objects.filter(
+ is_complete=False,
+ lt_realization__hazard_calculation=self.hc).count()
+ self.progress['total'] = num_sources
+
+ self.initialize_pr_data()
+
def post_execute(self):
"""
Create the final output records for hazard curves. This is done by | calcs/hazard/classical/core: Cleaned up classical progress counter initialization code, to be more consistent with the approach in disagg. Former-commit-id: fd5be<I>c1b9addc<I>c7c3da6be<I>ce<I>fbb1 | py |
diff --git a/src/jsonlogger.py b/src/jsonlogger.py
index <HASH>..<HASH> 100644
--- a/src/jsonlogger.py
+++ b/src/jsonlogger.py
@@ -2,6 +2,11 @@ import logging
import json
import re
+#Support order in python 2.7 and 3
+try:
+ from collections import OrderedDict
+except:
+ pass
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
@@ -21,8 +26,6 @@ class JsonFormatter(logging.Formatter):
record.asctime = self.formatTime(record, self.datefmt)
try:
- #Support order in python 2.7 and 3
- from collections import OrderedDict
log_record = OrderedDict()
except:
log_record = {} | Took advice to not import during log event | py |
diff --git a/python/ray/worker.py b/python/ray/worker.py
index <HASH>..<HASH> 100644
--- a/python/ray/worker.py
+++ b/python/ray/worker.py
@@ -494,6 +494,12 @@ def get_gpu_ids():
worker = global_worker
worker.check_connected()
+ if worker.mode != WORKER_MODE:
+ logger.warning(
+ "`ray.get_gpu_ids()` will always return the empty list when "
+ "called from the driver. This is because Ray does not manage "
+ "GPU allocations to the driver process.")
+
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set() | Add warning if get_gpu_ids() is called on the driver. (#<I>) | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@ reqs = [str(req.req) for req in install_reqs]
setup(
name="astrocats",
- version="0.1.6",
+ version=version,
author="James Guillochon",
author_email="guillochon@gmail.com",
description=("Package for downloading, analyzing, and constructing open " | BUG: wasn't passing version to setup | py |
diff --git a/holoviews/plotting/plotly/__init__.py b/holoviews/plotting/plotly/__init__.py
index <HASH>..<HASH> 100644
--- a/holoviews/plotting/plotly/__init__.py
+++ b/holoviews/plotting/plotly/__init__.py
@@ -126,6 +126,11 @@ options.Raster = Options('style', cmap=dflt_cmap)
options.QuadMesh = Options('style', cmap=dflt_cmap)
options.HeatMap = Options('style', cmap='RdBu_r')
+# Disable padding for image-like elements
+options.Image = Options("plot", padding=0)
+options.Raster = Options("plot", padding=0)
+options.RGB = Options("plot", padding=0)
+
# 3D
options.Scatter3D = Options('style', color=Cycle(), size=6) | Fix datashader instability by disabling plot padding for Plotly RGB elements. (#<I>) * Disable plot padding for plotly RGB, Raster, and Image elements. | py |
diff --git a/sortedm2m_filter_horizontal_widget/forms.py b/sortedm2m_filter_horizontal_widget/forms.py
index <HASH>..<HASH> 100644
--- a/sortedm2m_filter_horizontal_widget/forms.py
+++ b/sortedm2m_filter_horizontal_widget/forms.py
@@ -7,7 +7,12 @@ from django.db.models.query import QuerySet
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, escape
from django.utils.safestring import mark_safe
-from django.forms.util import flatatt
+
+try:
+ from django.forms.utils import flatatt
+except ImportError:
+ from django.forms.util import flatatt
+
from django.utils.translation import ugettext_lazy as _ | Fix Django <I> warning This patch fix Django <I> warning: lib/python<I>/site-packages/sortedm2m_filter_horizontal_widget/forms.py:<I>: RemovedInDjango<I>Warning: The django.forms.util module has been renamed. Use django.forms.utils instead. from django.forms.util import flatatt | py |
diff --git a/salt/modules/boto_ec2.py b/salt/modules/boto_ec2.py
index <HASH>..<HASH> 100644
--- a/salt/modules/boto_ec2.py
+++ b/salt/modules/boto_ec2.py
@@ -81,9 +81,9 @@ def __virtual__():
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
if not HAS_BOTO:
- return False
+ return (False, "The boto_ec2 module cannot be loaded: boto library not found")
elif _LooseVersion(boto.__version__) < _LooseVersion(required_boto_version):
- return False
+ return (False, "The boto_ec2 module cannot be loaded: boto library verion incorrect ")
return True | Add return messages on boto library load failure lines for library not avalible and wrong version added. | py |
diff --git a/ros_buildfarm/sourcedeb_job.py b/ros_buildfarm/sourcedeb_job.py
index <HASH>..<HASH> 100644
--- a/ros_buildfarm/sourcedeb_job.py
+++ b/ros_buildfarm/sourcedeb_job.py
@@ -116,6 +116,11 @@ def build_sourcedeb(sources_dir, os_name=None, os_code_name=None):
cmd += [
# dpkg-buildpackage args
'-us', '-uc',
+ # set the option for dpkg-source to auto-commit upstream changes
+ # This is needed for Debian increments where the upstream has changed.
+ # It's not the best practice but we have people doing it a bunch.
+ '--hook-source=\'bash -c "echo >> debian/source/options'
+ ' && echo auto-commit >> debian/source/options"\'',
# debuild args for lintian
'--lintian-opts', '--suppress-tags', 'newer-standards-version'] | add support for changing upstream content (#<I>) * add support for changing tarballs. This will auto commit changes to the upstream tarball as diffs for the debian increment. Addresses issue in #<I> * fix spelling * Change to append * making sure there's a new line * wrap | py |
diff --git a/claripy/vsa/strided_interval.py b/claripy/vsa/strided_interval.py
index <HASH>..<HASH> 100644
--- a/claripy/vsa/strided_interval.py
+++ b/claripy/vsa/strided_interval.py
@@ -411,7 +411,12 @@ class StridedInterval(object):
stride = fractions.gcd(self.stride, b.stride)
if overflow:
- return self.top(new_bits)
+ if b.is_integer() and b.lower_bound >> (new_bits - 1) == 1:
+ # Treat it as a minus then
+ operand = - ((0 - b.lower_bound) & ((1 << new_bits) - 1))
+ return self.__add__(operand)
+ else:
+ return self.top(new_bits)
else:
# new_lb = self.lower(new_bits, lb_, stride) if lb_underflow_ else lb_
# new_ub = self.upper(new_bits, ub_, stride) if ub_overflow_ else ub_ | more intelligently performing additions between BIs when there is an overflow. | py |
diff --git a/KCOJ_api/api.py b/KCOJ_api/api.py
index <HASH>..<HASH> 100644
--- a/KCOJ_api/api.py
+++ b/KCOJ_api/api.py
@@ -63,7 +63,7 @@ class KCOJ:
except requests.exceptions.Timeout:
return None
- def get_question(self) -> dict:
+ def get_question(self, number: str=None) -> dict:
"""
取得課程中的所有題目資訊
"""
@@ -79,10 +79,8 @@ class KCOJ:
if tag.find('a') == None:
continue
- # 取得題號
- number = tag.find('a').get_text().strip()
# 儲存題目資訊
- questions[number] = {
+ questions[tag.find('a').get_text().strip()] = {
# 繳交期限
'deadline': tag.find_all('td')[3].get_text().strip(),
# 是否已經過期限
@@ -93,7 +91,10 @@ class KCOJ:
'language': tag.find_all('td')[5].get_text().strip(),
}
# 回傳結果
- return questions
+ if number != None:
+ return questions.get(number)
+ else:
+ return questions
except requests.exceptions.Timeout:
return { | Use get_question(number) to get specific question info | py |
diff --git a/src/pybel/manager/models.py b/src/pybel/manager/models.py
index <HASH>..<HASH> 100644
--- a/src/pybel/manager/models.py
+++ b/src/pybel/manager/models.py
@@ -651,15 +651,15 @@ class Evidence(Base):
def __str__(self):
return '{}:{}'.format(self.citation, self.sha512[:8])
- def to_json(self, include_id=False):
+ def to_json(self, include_id: bool = False):
"""Create a dictionary that is used to recreate the edge data dictionary for a :class:`BELGraph`.
- :param bool include_id: If true, includes the model identifier
+ :param include_id: If true, includes the model identifier
:return: Dictionary containing citation and evidence for a :class:`BELGraph` edge.
:rtype: dict
"""
result = {
- CITATION: self.citation.to_json(),
+ CITATION: self.citation.to_json(include_id=include_id),
EVIDENCE: self.text
} | Propgate inclusion of id's to citation | py |
diff --git a/claripy/frontends/replacement_frontend.py b/claripy/frontends/replacement_frontend.py
index <HASH>..<HASH> 100644
--- a/claripy/frontends/replacement_frontend.py
+++ b/claripy/frontends/replacement_frontend.py
@@ -78,6 +78,14 @@ class ReplacementFrontend(ConstrainedFrontend):
self._replacements[old.cache_key] = new
self._replacement_cache[old.cache_key] = new
+ def remove_replacements(self, old_entries):
+ self._replacements = {k: v for k, v in self._replacements if k not in old_entries}
+ self._replacement_cache = weakref.WeakKeyDictionary(self._replacements)
+
+ def clear_replacements(self):
+ self._replacements = dict()
+ self._replacement_cache = weakref.WeakKeyDictionary(self._replacements)
+
def _replacement(self, old):
if not isinstance(old, Base):
return old | Added methods to safely allow removing replacements | py |
diff --git a/umis/umis.py b/umis/umis.py
index <HASH>..<HASH> 100644
--- a/umis/umis.py
+++ b/umis/umis.py
@@ -101,6 +101,8 @@ def tagcount(genemap, sam, out, output_evidence_table, positional, cb_filter):
if cb_filter:
with open(cb_filter) as fh:
cb_filter = set(cb.strip() for cb in fh)
+ else:
+ cb_filter = type('universe', (object,), {'__contains__' : lambda self, other: True})()
sam_file = Reader(sam)
@@ -114,9 +116,8 @@ def tagcount(genemap, sam, out, output_evidence_table, positional, cb_filter):
CB = match['CB']
MB = match['MB']
- if cb_filter:
- if CB not in cb_filter:
- continue
+ if CB not in cb_filter:
+ continue
if gene_map:
target_name = gene_map[aln.rname] | using universal set type for default cb_filter Because I'm bored | py |
diff --git a/bananas/admin/api/router.py b/bananas/admin/api/router.py
index <HASH>..<HASH> 100644
--- a/bananas/admin/api/router.py
+++ b/bananas/admin/api/router.py
@@ -1,12 +1,14 @@
-from typing import Type
-
-from .mixins import BananasAPI
from .schemas import BananasRouter
__all__ = ["register"]
-def register(view: Type[BananasAPI]):
+def register(view): # Type[BananasAPI]
+ """
+ Register the API view class in the bananas router.
+
+ :param BananasAPI view:
+ """
meta = view.get_admin_meta()
prefix = meta.basename.replace(".", "/")
router.register(prefix, view, meta.basename) | Remove typing for py<I> | py |
diff --git a/instaloader/__main__.py b/instaloader/__main__.py
index <HASH>..<HASH> 100644
--- a/instaloader/__main__.py
+++ b/instaloader/__main__.py
@@ -93,6 +93,7 @@ def _main(instaloader: Instaloader, targetlist: List[str],
latest_stamps = None
if latest_stamps_file is not None:
latest_stamps = LatestStamps(latest_stamps_file)
+ instaloader.context.log(f"Using latest stamps from {latest_stamps_file}.")
# Login, if desired
if username is not None:
if not re.match(r"^[A-Za-z0-9._]+$", username): | Log "Using latest stamps from ..." message Instaloader always logs which files are loaded and considered in the current run (such as session files or resume files). This commit adds a log message "Using latest stamps from ..." when using --latest-stamps, to be consistent. | py |
diff --git a/test/feedforward_test.py b/test/feedforward_test.py
index <HASH>..<HASH> 100644
--- a/test/feedforward_test.py
+++ b/test/feedforward_test.py
@@ -82,9 +82,8 @@ class TestWeightedClassifier(TestClassifier):
def test_score_onelayer(self):
net = self._build(13)
- z = net.score(self.images,
- self.labels,
- np.random.randint(0, 2, size=self.labels.shape))
+ z = net.score(
+ self.images, self.labels, 0.5 * np.ones(self.labels.shape, 'f'))
assert 0 < z < 1 | Make weights for test deterministic. | py |
diff --git a/lib/svtplay/output.py b/lib/svtplay/output.py
index <HASH>..<HASH> 100644
--- a/lib/svtplay/output.py
+++ b/lib/svtplay/output.py
@@ -1,7 +1,30 @@
import sys
+import os
from svtplay.log import log
+progress_stream = sys.stderr
+
+def progress(byte, total, extra = ""):
+ """ Print some info about how much we have downloaded """
+ ratio = float(byte) / total
+ percent = round(ratio*100, 2)
+ tlen = str(len(str(total)))
+ fmt = "Downloaded %"+tlen+"dkB of %dkB bytes (% 3.2f%%)"
+ progresstr = fmt % (byte >> 10, total >> 10, percent)
+
+ columns = int(os.getenv("COLUMNS", "80"))
+ if len(progresstr) < columns - 13:
+ p = int((columns - len(progresstr) - 3) * ratio)
+ q = int((columns - len(progresstr) - 3) * (1 - ratio))
+ progresstr = "[" + ("#" * p) + (" " * q) + "] " + progresstr
+ progress_stream.write(progresstr + ' ' + extra + '\r')
+
+ if byte >= total:
+ progress_stream.write('\n')
+
+ progress_stream.flush()
+
def progressbar(total, pos, msg=""):
"""
Given a total and a progress position, output a progress bar | Break out progress() to svtplay.output module Now it is very clear that progressbar() duplicates an existing function. But obsoleting one or the other is work for the future. | py |
diff --git a/anyconfig/backend/bson.py b/anyconfig/backend/bson.py
index <HASH>..<HASH> 100644
--- a/anyconfig/backend/bson.py
+++ b/anyconfig/backend/bson.py
@@ -68,7 +68,7 @@ class Parser(anyconfig.backend.base.FromStringLoader,
_load_opts = [] if bson.has_c() else ["codec_options"]
_dump_opts = [] if bson.has_c() else ["check_keys", "codec_options"]
_open_flags = ('rb', 'wb')
- _ordered = True
+ _ordered = not bson.has_c()
def _load_options(self, container, **options):
""" | fix: [bson] order of items are not kept if C implemetation is used | py |
diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
index <HASH>..<HASH> 100644
--- a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
+++ b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
@@ -115,7 +115,7 @@ def test_error_is_not_nan():
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors
-def _test_distrib_compute(device, tol=1e-6):
+def _test_distrib_compute(device, tol=1e-5):
rank = idist.get_rank()
def _test(metric_device):
@@ -149,7 +149,7 @@ def _test_distrib_compute(device, tol=1e-6):
_test(idist.device())
-def _test_distrib_integration(device, tol=1e-6):
+def _test_distrib_integration(device, tol=1e-5):
rank = idist.get_rank()
torch.manual_seed(12) | reduce tolerence (#<I>) (#<I>) | py |
diff --git a/app/preparation/mzidtsv/quant.py b/app/preparation/mzidtsv/quant.py
index <HASH>..<HASH> 100644
--- a/app/preparation/mzidtsv/quant.py
+++ b/app/preparation/mzidtsv/quant.py
@@ -16,6 +16,13 @@ def generate_psms_quanted(quantdbfn, tsvfn, quantheader, oldheader):
yield outpsm
+def get_full_and_quant_headers(oldheader, quantdbfn):
+ quantdb = sqlite.QuantDB(quantdbfn)
+ quantmap = quantdb.get_all_quantmaps()
+ qheader = sorted([x[0] for x in quantmap])
+ return oldheader + qheader, qheader
+
+
def get_quant_header(oldheader, quantdbfn):
quantdb = sqlite.QuantDB(quantdbfn)
quantmap = quantdb.get_all_quantmaps()
@@ -40,4 +47,4 @@ def get_quant_NAs(quantdata, quantheader):
def lookup_quant(spectrafile, psm_scan_nr, quantdb):
"""Outputs dict with keys == quantname, values == quantintensity."""
dbquants = quantdb.lookup_quant(spectrafile, psm_scan_nr)
- return {x[0]: x[1] for x in dbquants}
+ return {x[0]: str(x[1]) for x in dbquants} | Quant prep header maker disappeared. Also return strings to tsv writer | py |
diff --git a/tests/test_Optimiser.py b/tests/test_Optimiser.py
index <HASH>..<HASH> 100644
--- a/tests/test_Optimiser.py
+++ b/tests/test_Optimiser.py
@@ -31,7 +31,7 @@ class OptimiserTest(unittest.TestCase):
self.optimiser.move_nodes(partition, fixed_nodes=fixed_nodes, consider_comms=leidenalg.ALL_NEIGH_COMMS);
self.assertListEqual(
partition.sizes(), [1, 1, 1],
- msg="CPMVertexPartition(resolution_parameter=0.5) of complete graph after move nodes incorrect.");
+ msg="CPMVertexPartition(resolution_parameter=0.1) of one edge plus singleton after move nodes with fixed nodes is incorrect.");
def test_merge_nodes(self):
G = ig.Graph.Full(100);
@@ -71,10 +71,9 @@ class OptimiserTest(unittest.TestCase):
partition = leidenalg.CPMVertexPartition(
G,
resolution_parameter=0.01,
- initial_membership=[2, 1, 2])
+ initial_membership=[2, 1, 0])
# Equivalent to setting initial membership
#partition.set_membership([2, 1, 2])
- print(partition.membership)
opt = leidenalg.Optimiser()
fixed_nodes = [True, False, False]
opt.optimise_partition(partition, fixed_nodes=fixed_nodes) | Changed some text in test, and using a different initial membership. | py |
diff --git a/zipline/pipeline/loaders/blaze.py b/zipline/pipeline/loaders/blaze.py
index <HASH>..<HASH> 100644
--- a/zipline/pipeline/loaders/blaze.py
+++ b/zipline/pipeline/loaders/blaze.py
@@ -39,6 +39,7 @@ valid_deltas_node_types = (
bz.expr.ReLabel,
bz.expr.Symbol,
)
+is_invalid_deltas_node = complement(flip(isinstance, valid_deltas_node_types))
getname = attrgetter('__name__')
@@ -328,10 +329,7 @@ def from_blaze(expr,
"""
deltas = _get_deltas(expr, deltas, no_deltas_rule)
if deltas is not None:
- invalid_nodes = tuple(filter(
- complement(flip(isinstance, valid_deltas_node_types)),
- expr._subterms(),
- ))
+ invalid_nodes = tuple(filter(is_invalid_deltas_node, expr._subterms()))
if invalid_nodes:
raise TypeError(
'expression with deltas may only contain (%s) nodes,' | MAINT: name filter predicate | py |
diff --git a/arctic/_compression.py b/arctic/_compression.py
index <HASH>..<HASH> 100644
--- a/arctic/_compression.py
+++ b/arctic/_compression.py
@@ -1,5 +1,10 @@
from .logging import logger
-import _compress as clz4
+
+try:
+ from . import _compress as clz4
+except ImportError:
+ logger.warn("Couldn't import cython lz4")
+ import lz4 as clz4
USE_LZ4HC = True # switch to use LZ4HC. Default True | Fallback to using the lz4 package if we can't import / build the cythonized lz4 | py |
diff --git a/cosmic_ray/cli.py b/cosmic_ray/cli.py
index <HASH>..<HASH> 100644
--- a/cosmic_ray/cli.py
+++ b/cosmic_ray/cli.py
@@ -99,7 +99,10 @@ options:
def _get_db_name(session_name):
- return '{}.json'.format(session_name)
+ if session_name.endswith('.json'):
+ return session_name
+ else:
+ return '{}.json'.format(session_name)
def handle_init(configuration): | Allow session-name.json to be passed on the command line this makes it easier to use bash tab completion | py |
diff --git a/indra/sources/cwms/processor.py b/indra/sources/cwms/processor.py
index <HASH>..<HASH> 100644
--- a/indra/sources/cwms/processor.py
+++ b/indra/sources/cwms/processor.py
@@ -148,11 +148,14 @@ class CWMSProcessor(object):
if event_id in self.relation_subj_obj_ids:
continue
# Make an Event statement if it is a standalone event
+ ev_type = event_entry.find('type').text
+ polarity = POLARITY_DICT['EVENT'][ev_type]
evidence = self._get_evidence(event_entry, context=None)
event = self._get_event(event_entry, "*[@role=':AFFECTED']",
evidence=[evidence])
if event is None:
continue
+ event.delta['polarity'] = polarity
self.statements.append(event)
self._remove_multi_extraction_artifacts() | Add polarity to Event in CWMS | py |
diff --git a/keyring/backends/kwallet.py b/keyring/backends/kwallet.py
index <HASH>..<HASH> 100644
--- a/keyring/backends/kwallet.py
+++ b/keyring/backends/kwallet.py
@@ -30,7 +30,7 @@ class DBusKeyring(KeyringBackend):
bus.get_object('org.kde.kwalletd5', '/modules/kwalletd5')
except dbus.DBusException:
raise RuntimeError('cannot connect to org.kde.kwalletd5')
- return 5.1
+ return 4.9
def __init__(self, *arg, **kw):
super(DBusKeyring, self).__init__(*arg, **kw) | Make KWallet backend have lower priority than Secret Service backend Secret Service is an XDG standard and thus should be preferred on systems where both protocols are available. | py |
diff --git a/ELiDE/ELiDE/screen.py b/ELiDE/ELiDE/screen.py
index <HASH>..<HASH> 100644
--- a/ELiDE/ELiDE/screen.py
+++ b/ELiDE/ELiDE/screen.py
@@ -294,6 +294,7 @@ class MainScreen(Screen):
if not self.dialog_todo:
return
self._update_dialog(self.dialog_todo.pop(0))
+ self.app.engine.universal['last_result_idx'] += 1
def _update_dialog(self, diargs, **kwargs):
if diargs is None:
@@ -335,7 +336,6 @@ class MainScreen(Screen):
else:
raise TypeError("Don't know how to turn {} into a dialog".format(type(diargs)))
self.ids.dialoglayout.add_widget(dia)
- self.app.engine.universal['last_result_idx'] += 1
def ok(self, cb=None, *args):
self.ids.dialoglayout.clear_widgets() | Don't increment the last_result_idx until you've advanced the dialog | py |
diff --git a/passwords/validators.py b/passwords/validators.py
index <HASH>..<HASH> 100644
--- a/passwords/validators.py
+++ b/passwords/validators.py
@@ -26,6 +26,9 @@ COMMON_SEQUENCES = [
"qwertzuiopü*asdfghjklöä'>yxcvbnm;:_",
"qaywsxedcrfvtgbzhnujmikolp",
]
+DICT_CACHE = []
+DICT_FILESIZE = -1
+DICT_MAX_CACHE = 1000000
# Settings
PASSWORD_MIN_LENGTH = getattr(
@@ -179,6 +182,17 @@ class DictionaryValidator(BaseSimilarityValidator):
threshold=threshold)
def get_dictionary_words(self, dictionary):
+ if DICT_CACHE:
+ return DICT_CACHE
+ if DICT_FILESIZE is -1:
+ f = open(dictionary)
+ f.seek(0,2)
+ DICT_FILESIZE = f.tell()
+ f.close()
+ if DICT_FILESIZE < 1000000:
+ with open(dictionary) as dictionary:
+ DICT_CACHE = [smart_text(x.strip()) for x in dictionary.readlines()]
+ return DICT_CACHE
with open(dictionary) as dictionary:
return [smart_text(x.strip()) for x in dictionary.readlines()] | Add simple dict cacheing for small dict sizes | py |
diff --git a/backend/tests/fixtures/base.py b/backend/tests/fixtures/base.py
index <HASH>..<HASH> 100644
--- a/backend/tests/fixtures/base.py
+++ b/backend/tests/fixtures/base.py
@@ -66,9 +66,8 @@ def client(app):
def testdb(session_app):
"""Establish an application context before running the tests."""
app = session_app
- # app.db.create_all()
+ app.db.create_all()
yield app.db
-
app.db.session.remove()
engine = app.db.get_engine(app)
metadata = app.db.Model.metadata
@@ -83,14 +82,12 @@ def testdb(session_app):
@pytest.fixture(scope="function")
-def db(app):
+def db(app, testdb):
"""
SetUp before each test is run: push a context and use subtransactions.
"""
-
app.db.session.begin(subtransactions=True)
yield app.db
app.db.session.rollback()
- app.db.session.close() | [#<I>] Refactor Model. | py |
diff --git a/galpy/orbit/integrateLinearOrbit.py b/galpy/orbit/integrateLinearOrbit.py
index <HASH>..<HASH> 100644
--- a/galpy/orbit/integrateLinearOrbit.py
+++ b/galpy/orbit/integrateLinearOrbit.py
@@ -25,8 +25,13 @@ if PY3:
else: #pragma: no cover
_ext_suffix= '.so'
for path in sys.path:
+ if not os.path.isdir(path): continue
try:
- _lib = ctypes.CDLL(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix))
+ if sys.platform == 'win32' and sys.version_info >= (3,8): # pragma: no cover
+ # winmode=0x008 is easy-going way to call LoadLibraryExA
+ _lib = ctypes.CDLL(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix),winmode=0x008)
+ else:
+ _lib = ctypes.CDLL(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix))
except OSError as e:
if os.path.exists(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix)): #pragma: no cover
outerr= e | Also use winmode to load DLL in integrateLinearOrbit | py |
diff --git a/flask_appbuilder/tests/test_api.py b/flask_appbuilder/tests/test_api.py
index <HASH>..<HASH> 100644
--- a/flask_appbuilder/tests/test_api.py
+++ b/flask_appbuilder/tests/test_api.py
@@ -2537,7 +2537,7 @@ class APITestCase(FABTestCase):
"""
client = self.app.test_client()
self.browser_login(client, USERNAME_ADMIN, PASSWORD_ADMIN)
- uri = "swaggerview/v1"
+ uri = "swagger/v1"
rv = client.get(uri)
self.assertEqual(rv.status_code, 200) | fix: swagger test (#<I>) | py |
diff --git a/dwave/cloud/testing/mocks.py b/dwave/cloud/testing/mocks.py
index <HASH>..<HASH> 100644
--- a/dwave/cloud/testing/mocks.py
+++ b/dwave/cloud/testing/mocks.py
@@ -78,7 +78,7 @@ def structured_solver_data(id: str = None,
"h_range": [-2.0, 2.0],
"j_range": [-1.0, 1.0],
"num_reads_range": [1, 10000],
- "annealing_time_range": [1, 2000],
+ "annealing_time_range": [1.0, 2000.0],
"extended_j_range": [-2.0, 1.0],
"quota_conversion_rate": 1,
"parameters": { | Update mocks for SAPI timing params type change (#<I>) | py |
diff --git a/pipenv/cli.py b/pipenv/cli.py
index <HASH>..<HASH> 100644
--- a/pipenv/cli.py
+++ b/pipenv/cli.py
@@ -1390,6 +1390,7 @@ def uninstall(
)
)
package_names = project.parsed_pipfile['dev-packages']
+ package_names = package_names.keys()
pipfile_remove = False
else:
puts( | fix: uninstall dev expecting tuple, not dict | py |
diff --git a/subscriptions/tasks.py b/subscriptions/tasks.py
index <HASH>..<HASH> 100644
--- a/subscriptions/tasks.py
+++ b/subscriptions/tasks.py
@@ -580,12 +580,17 @@ class FireWeekEstimateLast(Task):
'*' in schedule.day_of_week):
totals[day] = totals[day] + schedule.total_subs
+ # Django's datetime's weekday method has Monday = 0
+ # whereas the cron format used in the schedules has Sunday = 0
+ sunday = totals.pop(0)
+ totals[7] = sunday
+ totals = {(k-1): v for k, v in totals.items()}
+
today = now()
for dow, total in totals.items():
# Only fire the metric for today or days in the future so that
# estimates for the week don't get updated after the day in
- # question. Django's datetime's weekday method has Monday = 0
- # whereas the cron format used in the schedules has Sunday = 0
+ # question.
if dow >= (today.weekday() - 1):
fire_metric.apply_async(kwargs={
"metric_name": 'subscriptions.send.estimate.%s.last' % dow, | Changed weekday order of estimate metrics | py |
diff --git a/tests/test_meta.py b/tests/test_meta.py
index <HASH>..<HASH> 100644
--- a/tests/test_meta.py
+++ b/tests/test_meta.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals, absolute_import, division
+
import re
import pytest
@@ -13,3 +15,13 @@ def test_meta_for_this_package():
dist = meta.load('.')
assert re.match(r'[\d.]+', dist.version)
assert dist.metadata['Name'] == 'pep517'
+
+
+def test_classic_package(tmpdir):
+ (tmpdir / 'setup.py').write_text(
+ 'from distutils.core import setup; setup(name="foo", version="1.0")',
+ encoding='utf-8',
+ )
+ dist = meta.load(str(tmpdir))
+ assert dist.version == '1.0'
+ assert dist.metadata['Name'] == 'foo' | Add another test demonstrating support for classic packages. | py |
diff --git a/safe/messaging/item/brand.py b/safe/messaging/item/brand.py
index <HASH>..<HASH> 100644
--- a/safe/messaging/item/brand.py
+++ b/safe/messaging/item/brand.py
@@ -23,7 +23,11 @@ from safe.utilities.resources import (
resource_url)
class Brand(Text):
- """A class to model the inasafe brand."""
+ """A class to model the inasafe brand.
+
+ .. versionadded: 3.2
+
+ """
def __init__(self, **kwargs):
"""Creates a brand element. | Annotated brand class to indicate which version it was added in. | py |
diff --git a/salt/states/service.py b/salt/states/service.py
index <HASH>..<HASH> 100644
--- a/salt/states/service.py
+++ b/salt/states/service.py
@@ -93,6 +93,7 @@ def _enable(name, started, result=True, **kwargs):
if __salt__['service.enabled'](name):
# Service is enabled
if started is True:
+ ret['changes'][name] = True
ret['comment'] = ('Service {0} is already enabled,'
' and is running').format(name)
return ret | Change output of states.service to reflect changes If a service is started, but already enabled, changes are now reflected to the user. | py |
diff --git a/lmj/nn/flags.py b/lmj/nn/flags.py
index <HASH>..<HASH> 100644
--- a/lmj/nn/flags.py
+++ b/lmj/nn/flags.py
@@ -84,6 +84,8 @@ g.add_argument('-C', '--cg-batches', type=int, metavar='N',
help='use at most N batches for CG computation')
g.add_argument('--initial-lambda', type=float, default=1., metavar='K',
help='start the HF method with Tikhonov damping of K')
+g.add_argument('--global-backtracking', action='store_true',
+ help='backtrack to lowest cost parameters during CG')
g.add_argument('--preconditioner', action='store_true',
help='precondition the system during CG')
g.add_argument('--save-progress', metavar='FILE', | Add a flag for controlling backtracking strategy during CG in the HF optimizer. | py |
diff --git a/abilian/services/indexing/service.py b/abilian/services/indexing/service.py
index <HASH>..<HASH> 100644
--- a/abilian/services/indexing/service.py
+++ b/abilian/services/indexing/service.py
@@ -303,7 +303,7 @@ def index_update(index, items):
adapted = service.adapted
session = Session(bind=db.session.get_bind(None, None), autocommit=True)
-
+ updated = set()
with AsyncWriter(index) as writer:
for op, cls_name, pk, data in items:
if pk is None:
@@ -319,6 +319,12 @@ def index_update(index, items):
# FIXME: log to sentry?
continue
+ if object_key in updated:
+ # don't add twice the same document in same transaction. The writer will
+ # not delete previous records, ending in duplicate records for same
+ # document.
+ continue
+
if op in ("new", "changed"):
with session.begin():
obj = adapter.retrieve(pk, _session=session, **data)
@@ -335,5 +341,6 @@ def index_update(index, items):
document = adapter.get_document(obj)
writer.add_document(**document)
+ updated.add(object_key)
session.close() | index update: don't add twice the same document during a single transaction | py |
diff --git a/bulbs/content/forms.py b/bulbs/content/forms.py
index <HASH>..<HASH> 100644
--- a/bulbs/content/forms.py
+++ b/bulbs/content/forms.py
@@ -1,19 +1,23 @@
from django import forms
+from django.utils.functional import lazy
from .models import Content
-class DoctypeChoiceField(forms.ChoiceField):
- """Field for choosing amongst the doctypes of a given polymorphic model."""
- def __init__(self, model, exclude_base=False, *args, **kwargs):
+def mapping_type_choices(model, exclude_base=True):
+ def get_choices():
mapping_type_models = model.get_mapping_type_models(exclude_base=exclude_base)
choices = []
for mapping_type_name, content_type in mapping_type_models:
choices.append((mapping_type_name, content_type.name))
- super(DoctypeChoiceField, self).__init__(choices=choices, *args, **kwargs)
+ return choices
+ return get_choices
class ContentDoctypeForm(forms.Form):
"""Form for choosing a Content subclass doctype."""
- doctype = DoctypeChoiceField(Content, exclude_base=True, label='Type')
+ doctype = forms.ChoiceField(
+ choices=lazy(mapping_type_choices(Content, exclude_base=True), list)(),
+ label='Type'
+ ) | Replaced DoctypeChoiceField with a lazily evaluated function | py |
diff --git a/glue/ligolw/lsctables.py b/glue/ligolw/lsctables.py
index <HASH>..<HASH> 100644
--- a/glue/ligolw/lsctables.py
+++ b/glue/ligolw/lsctables.py
@@ -681,7 +681,7 @@ class SnglInspiralTable(table.Table):
"bank_chisq": "real_4",
"bank_chisq_dof": "int_4s",
"cont_chisq": "real_4",
- "cont_chisq_dof": "int_4",
+ "cont_chisq_dof": "int_4s",
"sigmasq": "real_8",
"rsqveto_duration": "real_4",
"Gamma0": "real_4", | I committed a typo that I am fixing there is no int_4 type | py |
diff --git a/tests/test_common.py b/tests/test_common.py
index <HASH>..<HASH> 100644
--- a/tests/test_common.py
+++ b/tests/test_common.py
@@ -31,11 +31,13 @@ class TestClasses(object):
assert _class is not eval(repr(_class))
def test_equality(self, _class, _copy):
- _copy.foo = 'bar'
- assert not _class == _copy
+ copy_ = _copy(_class)
+ copy_.foo = 'bar'
+ assert not _class == copy_
assert not _class == "not a class instance"
def test_inequality(self, _class, _copy):
- _copy.foo = 'bar'
- assert not _class != _copy
+ copy_ = _copy(_class)
+ copy_.foo = 'bar'
+ assert not _class != copy_
assert _class != "not a class instance" | Fix original not passed to deepcopy fixture | py |
diff --git a/tests/build/test_build_request.py b/tests/build/test_build_request.py
index <HASH>..<HASH> 100644
--- a/tests/build/test_build_request.py
+++ b/tests/build/test_build_request.py
@@ -1156,6 +1156,8 @@ class TestBuildRequest(object):
build_request.set_params(**kwargs)
build_json = build_request.render()
+ assert build_json["metadata"]["labels"]["git-branch"] is not None
+
# Verify the triggers are now disabled
assert "triggers" not in build_json["spec"] | tests: make sure that git-branch is not being set to None | py |
diff --git a/src/SALib/sample/saltelli.py b/src/SALib/sample/saltelli.py
index <HASH>..<HASH> 100644
--- a/src/SALib/sample/saltelli.py
+++ b/src/SALib/sample/saltelli.py
@@ -1,5 +1,6 @@
from __future__ import division
+import warnings
import numpy as np
from . import common_args
@@ -28,7 +29,11 @@ def sample(problem, N, calc_second_order=True, seed=None, skip_values=1000):
Calculate second-order sensitivities (default True)
"""
if seed:
- np.random.seed(seed)
+ msg = "The seed value is ignored for the Saltelli sampler\n"
+ msg += "as it uses the (deterministic) Sobol sequence.\n"
+ msg += "Different samples can be obtained by setting the\n"
+ msg += "`skip_values` parameter (defaults to 1000)."
+ warnings.warn(msg)
D = problem['num_vars']
groups = problem.get('groups') | Display warning if seed value is set | py |
diff --git a/yabt/utils.py b/yabt/utils.py
index <HASH>..<HASH> 100644
--- a/yabt/utils.py
+++ b/yabt/utils.py
@@ -79,7 +79,7 @@ def rmtree(path: str):
pass
-def link_func(src: str, dst: str, force: bool=False):
+def link_func(src: str, dst: str, force: bool=True):
if force:
try:
os.remove(dst)
@@ -91,7 +91,7 @@ def link_func(src: str, dst: str, force: bool=False):
pass
-def link_node(abs_src: str, abs_dest: str, force: bool=False):
+def link_node(abs_src: str, abs_dest: str, force: bool=True):
"""Sync source node (file / dir) to destination path using hard links."""
dest_parent_dir = split(abs_dest)[0]
if not isdir(dest_parent_dir): | Allways force hard link. We encountered a bug when a python test depends on a cpp binary, in such cases once the binary was linked into the yabtwork workspace of the py test it will never be updated. After think about it a file can always be deleted in the origin and re-created thus links always should be forced. | py |
diff --git a/script/bootstrap.py b/script/bootstrap.py
index <HASH>..<HASH> 100755
--- a/script/bootstrap.py
+++ b/script/bootstrap.py
@@ -70,10 +70,10 @@ def update_win32_python():
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
- if not os.path.exists(config_gypi):
- with open(config_gypi, 'w+') as f:
- f.truncate(0)
- f.write('{}')
+ with open(config_gypi, 'w+') as f:
+ content = '\n{}'
+ if f.read() != content:
+ f.write(content)
def update_atom_shell(): | Make sure the fake config.gypi can be parsed by node.js. | py |
diff --git a/astroid/bases.py b/astroid/bases.py
index <HASH>..<HASH> 100644
--- a/astroid/bases.py
+++ b/astroid/bases.py
@@ -43,7 +43,7 @@ PROPERTIES = {BUILTINS + '.property', 'abc.abstractproperty'}
POSSIBLE_PROPERTIES = {"cached_property", "cachedproperty",
"lazyproperty", "lazy_property", "reify",
"lazyattribute", "lazy_attribute",
- "LazyProperty", "lazy"}
+ "LazyProperty", "lazy", "cache_readonly"}
def _is_property(meth): | Add cache_readonly to POSSIBLE_PROPERTIES for pandas | py |
diff --git a/satpy/tests/writer_tests/test_ninjotiff.py b/satpy/tests/writer_tests/test_ninjotiff.py
index <HASH>..<HASH> 100644
--- a/satpy/tests/writer_tests/test_ninjotiff.py
+++ b/satpy/tests/writer_tests/test_ninjotiff.py
@@ -37,8 +37,11 @@ class FakeImage:
self.mode = mode
-modules = {'pyninjotiff.ninjotiff': mock.Mock()}
-@mock.patch.dict(sys.modules)
+modules = {'pyninjotiff': mock.Mock(),
+ 'pyninjotiff.ninjotiff': mock.Mock()}
+
+
+@mock.patch.dict(sys.modules, modules)
class TestNinjoTIFFWriter(unittest.TestCase):
"""The ninjo tiff writer tests.""" | Fix import mocking for ninjotiff writer tests | py |
diff --git a/aprslib/inet.py b/aprslib/inet.py
index <HASH>..<HASH> 100644
--- a/aprslib/inet.py
+++ b/aprslib/inet.py
@@ -111,7 +111,7 @@ class IS(object):
while True:
try:
self._connect()
- if self.skip_login:
+ if not self.skip_login:
self._send_login()
break
except (LoginError, ConnectionError): | fix skip_login check being reversed | py |
diff --git a/sacad/recurse.py b/sacad/recurse.py
index <HASH>..<HASH> 100755
--- a/sacad/recurse.py
+++ b/sacad/recurse.py
@@ -222,22 +222,23 @@ def get_covers(work, args):
except Exception as exception:
stats["errors"] += 1
errors.append((path, artist, album, exception))
- if status:
- if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
- try:
- embed_album_art(cover_filepath, path)
- except Exception as exception:
- stats["errors"] += 1
- errors.append((path, artist, album, exception))
+ else:
+ if status:
+ if args.filename == EMBEDDED_ALBUM_ART_SYMBOL:
+ try:
+ embed_album_art(cover_filepath, path)
+ except Exception as exception:
+ stats["errors"] += 1
+ errors.append((path, artist, album, exception))
+ else:
+ stats["ok"] += 1
+ finally:
+ os.remove(cover_filepath)
else:
stats["ok"] += 1
- finally:
- os.remove(cover_filepath)
else:
- stats["ok"] += 1
- else:
- stats["no result found"] += 1
- not_found.append((path, artist, album))
+ stats["no result found"] += 1
+ not_found.append((path, artist, album))
progress.set_postfix(stats)
progress.update(1) | sacad_r: Fix error reporting | py |
diff --git a/steam/sim.py b/steam/sim.py
index <HASH>..<HASH> 100644
--- a/steam/sim.py
+++ b/steam/sim.py
@@ -201,7 +201,7 @@ class item(base.items.item):
return self.get_name()
def is_untradable(self):
- return bool(not self._item["tradable"])
+ return bool(not self._item.get("tradable"))
def get_quantity(self):
return int(self._item["amount"])
@@ -234,7 +234,7 @@ class item(base.items.item):
return self._item.get("type", "")
def get_image(self, size):
- smallicon = self._item["icon_url"]
+ smallicon = self._item.get("icon_url")
if not smallicon:
return "" | Fix errors from sim items without tradable flags or icons attached | py |
diff --git a/xerox/darwin.py b/xerox/darwin.py
index <HASH>..<HASH> 100644
--- a/xerox/darwin.py
+++ b/xerox/darwin.py
@@ -5,7 +5,6 @@
import subprocess
-import commands
from .base import *
@@ -23,7 +22,7 @@ def copy(string):
def paste():
"""Returns system clipboard contents."""
try:
- return unicode(commands.getoutput('pbpaste'))
+ return unicode(subprocess.check_output('pbpaste'))
except OSError as why:
raise XcodeNotFound | Use `subprocess.check_output` rather than `commands.getoutput`. `commands` is deprecated. | py |
diff --git a/libarchive/constants/archive_entry.py b/libarchive/constants/archive_entry.py
index <HASH>..<HASH> 100644
--- a/libarchive/constants/archive_entry.py
+++ b/libarchive/constants/archive_entry.py
@@ -1,11 +1,11 @@
-AE_IFMT = 0170000
-AE_IFREG = 0100000
-AE_IFLNK = 0120000
-AE_IFSOCK = 0140000
-AE_IFCHR = 0020000
-AE_IFBLK = 0060000
-AE_IFDIR = 0040000
-AE_IFIFO = 0010000
+AE_IFMT = 0o170000
+AE_IFREG = 0o100000
+AE_IFLNK = 0o120000
+AE_IFSOCK = 0o140000
+AE_IFCHR = 0o020000
+AE_IFBLK = 0o060000
+AE_IFDIR = 0o040000
+AE_IFIFO = 0o010000
FILETYPES = {
'IFREG': AE_IFREG, | Added octal specifier to archive entry fields to prevent Python 3 errors. | py |
diff --git a/gifi/queue.py b/gifi/queue.py
index <HASH>..<HASH> 100644
--- a/gifi/queue.py
+++ b/gifi/queue.py
@@ -1,5 +1,6 @@
from command import Command, AggregatedCommand, CommandException
from utils.git_utils import get_repo, check_repo_is_clean
+import feature
def _pop():
@@ -22,8 +23,10 @@ def _pop_finish(repo=None):
def _push():
repo = get_repo()
check_repo_is_clean(repo)
- if repo.head.commit == repo.commit('origin/master'):
- raise CommandException('You are currently at origin/master, there is nothing to push')
+ feature_config = feature.configuration(repo)
+ base = '%s/%s' % (feature_config.working_remote, feature_config.target_branch)
+ if repo.head.commit == repo.commit(base):
+ raise CommandException('You are currently at %s, there is nothing to push' % base)
commit_message = repo.head.commit.message
repo.git.reset('--soft', 'HEAD^')
repo.git.stash('save', _escape_new_lines(commit_message)) | queue: do not push when you are at working_remote/target_branch | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -32,9 +32,11 @@ setup(name='instana',
traces to your Instana dashboard.",
zip_safe=False,
install_requires=['autowrapt>=1.0',
+ 'basictracer>=3.0.0',
+ 'certifi>=2018.4.16',
'fysom>=2.1.2',
'opentracing>=2.0.0',
- 'basictracer>=3.0.0'],
+ 'urllib3>=1.18.1'],
entry_points={
'instana': ['string = instana:load'],
'flask': ['flask = instana.flaskana:hook'],
@@ -49,6 +51,7 @@ traces to your Instana dashboard.",
'flask>=0.12.2',
'lxml>=3.4',
'MySQL-python>=1.2.5;python_version<="2.7"',
+ 'pyOpenSSL>=16.1.0;python_version<="2.7"',
'requests>=2.17.1',
'urllib3[secure]>=1.15',
'spyne>=2.9', | Add dependencies for the REST API client (#<I>) * Add dependencies for the REST API client * Add pyOpenSSL for Python <I>.x | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -72,7 +72,7 @@ setup(
"eth-hash[pycryptodome]>=0.2.0,<1.0.0",
"eth-typing>=2.0.0,<3.0.0",
"eth-utils>=1.4.0,<2.0.0",
- "ethpm==0.1.4a15",
+ "ethpm>=0.1.4a19,<2.0.0",
"hexbytes>=0.1.0,<1.0.0",
"lru-dict>=1.1.6,<2.0.0",
"requests>=2.16.0,<3.0.0", | Update ethpm dep to latest alpha | py |
diff --git a/packages/vaex-core/vaex/expression.py b/packages/vaex-core/vaex/expression.py
index <HASH>..<HASH> 100644
--- a/packages/vaex-core/vaex/expression.py
+++ b/packages/vaex-core/vaex/expression.py
@@ -177,6 +177,10 @@ class Expression(with_metaclass(Meta)):
def __getitem__(self, slice):
return self.ds[slice][self.expression]
+ def __abs__(self):
+ """Returns the absolute value of the expression"""
+ return self.abs()
+
@property
def dt(self):
return DateTime(self) | feat: np.abs(df.x) and abs(df.x) now works, since it calls Expression.__abs__ | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -7,14 +7,14 @@ from setuptools import setup
setup(
name = 'easyhid',
- version = '0.0.0',
+ version = '0.0.1',
description = "A simple interface to the HIDAPI library.",
url = "http://github.com/ahtn/python-easyhid",
author = "jem",
author_email = "jem@seethis.link",
license = 'MIT',
packages = ['easyhid'],
- install_requires = ['cffi', 'ctypes'],
+ install_requires = ['cffi'],
keywords = ['hidapi', 'usb', 'hid'],
zip_safe = False
) | remove ctypes from dependencies | py |
diff --git a/aioxmpp/utils.py b/aioxmpp/utils.py
index <HASH>..<HASH> 100644
--- a/aioxmpp/utils.py
+++ b/aioxmpp/utils.py
@@ -174,7 +174,7 @@ class LazyTask(asyncio.Future):
def __iter__(self):
self.__start_task()
- return super().__iter__()
+ return iter(self.__task)
if hasattr(asyncio.Future, "__await__"):
def __await__(self): | utils: fix python<I> incompatibility I have no idea why the old way doesn’t work though. I presume this is some internals of asyncio which changed. | py |
diff --git a/h5netcdf/tests/test_h5netcdf.py b/h5netcdf/tests/test_h5netcdf.py
index <HASH>..<HASH> 100644
--- a/h5netcdf/tests/test_h5netcdf.py
+++ b/h5netcdf/tests/test_h5netcdf.py
@@ -576,6 +576,9 @@ def test_failed_read_open_and_clean_delete(tmpdir):
def test_create_variable_matching_saved_dimension(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
+ if h5 is not h5py:
+ pytest.xfail('https://github.com/shoyer/h5netcdf/issues/48')
+
with h5netcdf.File(tmp_local_or_remote_netcdf) as f:
f.dimensions['x'] = 2
f.create_variable('y', data=[1, 2], dimensions=('x',)) | xfail failing h5pyd test | py |
diff --git a/openupgradelib/openupgrade_120.py b/openupgradelib/openupgrade_120.py
index <HASH>..<HASH> 100644
--- a/openupgradelib/openupgrade_120.py
+++ b/openupgradelib/openupgrade_120.py
@@ -362,7 +362,7 @@ def _convert_field_bootstrap_3to4_orm(env, model_name, field_name,
update_field_multilang(
records,
field_name,
- lambda old, *a, **k: convert_field_bootstrap_3to4(old),
+ lambda old, *a, **k: convert_string_bootstrap_3to4(old),
) | [FIX] typo in <I> tools Fix typo in openupgrade_<I> in `_convert_field_bootstrap_3to4_orm` method. it have to call string conversion. | py |
diff --git a/docido_sdk/index/processor/es_api.py b/docido_sdk/index/processor/es_api.py
index <HASH>..<HASH> 100644
--- a/docido_sdk/index/processor/es_api.py
+++ b/docido_sdk/index/processor/es_api.py
@@ -148,6 +148,10 @@ class ElasticsearchProcessor(IndexAPIProcessor):
'_id': _id
}
})
+
+ if len(body) == 0:
+ return error_docs
+
params = {
'body': body,
'refresh': True,
@@ -185,6 +189,10 @@ class ElasticsearchProcessor(IndexAPIProcessor):
'_id': _id
}
})
+
+ if len(body) == 0:
+ return error_docs
+
params = {
'body': body,
'refresh': True,
@@ -215,6 +223,10 @@ class ElasticsearchProcessor(IndexAPIProcessor):
}
body.append(action)
body.append(doc)
+
+ if len(body) == 0:
+ return error_docs
+
params = {
'body': body,
'refresh': True, | es_api: don't even talk to the API if we have nothing to say | py |
diff --git a/jsmin/test.py b/jsmin/test.py
index <HASH>..<HASH> 100644
--- a/jsmin/test.py
+++ b/jsmin/test.py
@@ -317,5 +317,10 @@ var foo = "hey";
original = '/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
self.assertMinified(original, original) # there should be nothing jsmin can do here
+ def test_space_with_regex_repeats_not_at_start(self):
+ original = 'aaa;/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
+ self.assertMinified(original, original) # there should be nothing jsmin can do here
+
+
if __name__ == '__main__':
unittest.main() | Testing issue #<I> also when the regex is not at the start of the script. | py |
diff --git a/tests/test_recommendation_manager.py b/tests/test_recommendation_manager.py
index <HASH>..<HASH> 100644
--- a/tests/test_recommendation_manager.py
+++ b/tests/test_recommendation_manager.py
@@ -58,3 +58,29 @@ def test_recommendation_strategy():
10,
extra_data={'branch': 'linear'})
assert results == EXPECTED_ADDONS
+
+
+def test_recommendation_ensemble():
+ """The recommendation manager support an ensemble
+ method. We want to verify that at least the dispatch
+ to the stub ensemble recommendation is correctly executing.
+ """
+ EXPECTED_ADDONS = [("ensemble_guid1", 0.1),
+ ("ensemble_guid2", 0.2),
+ ("ensemble_guid3", 0.3)]
+
+ # Create a stub ProfileFetcher that always returns the same
+ # client data.
+ class StubFetcher:
+ def get(self, client_id):
+ return {'client_id': '00000'}
+
+ # Configure the recommender so that only the second model
+ # can recommend and return the expected addons.
+
+ # Make sure the recommender returns the expected addons.
+ manager = RecommendationManager(StubFetcher())
+ results = manager.recommend("client-id",
+ 10,
+ extra_data={'branch': 'ensemble'})
+ assert results == EXPECTED_ADDONS | Added a testcase to verify that ensemble recommendations are dispatching correctly. | py |
diff --git a/test/test_cluster_spec.py b/test/test_cluster_spec.py
index <HASH>..<HASH> 100644
--- a/test/test_cluster_spec.py
+++ b/test/test_cluster_spec.py
@@ -118,6 +118,14 @@ class TestAllScenarios(unittest.TestCase):
pass
+def cluster_type_name(cluster_type):
+ return CLUSTER_TYPE._fields[cluster_type]
+
+
+def server_type_name(server_type):
+ return SERVER_TYPE._fields[server_type]
+
+
def check_outcome(self, cluster, outcome):
expected_servers = outcome['servers']
@@ -144,8 +152,8 @@ def check_outcome(self, cluster, outcome):
SERVER_TYPE, expected_server['type'])
self.assertEqual(
- expected_server_type,
- actual_server_description.server_type)
+ server_type_name(expected_server_type),
+ server_type_name(actual_server_description.server_type))
self.assertEqual(
expected_server['setName'],
@@ -153,7 +161,8 @@ def check_outcome(self, cluster, outcome):
self.assertEqual(outcome['setName'], cluster.description.set_name)
expected_cluster_type = getattr(CLUSTER_TYPE, outcome['clusterType'])
- self.assertEqual(expected_cluster_type, cluster.description.cluster_type)
+ self.assertEqual(cluster_type_name(expected_cluster_type),
+ cluster_type_name(cluster.description.cluster_type))
def create_test(scenario_def): | PYTHON-<I> Better test_cluster_spec messages. If a server or cluster is the wrong type at the end of a test, print the type's name, not its number. | py |
diff --git a/ccxt/exchanges.py b/ccxt/exchanges.py
index <HASH>..<HASH> 100644
--- a/ccxt/exchanges.py
+++ b/ccxt/exchanges.py
@@ -116,6 +116,7 @@ import calendar
import datetime
import hashlib
import json
+import math
import sys
import time
import decimal | added missing import math to ./ccxt/exchanges.py | py |
diff --git a/shap/explainers/_partition.py b/shap/explainers/_partition.py
index <HASH>..<HASH> 100644
--- a/shap/explainers/_partition.py
+++ b/shap/explainers/_partition.py
@@ -83,7 +83,7 @@ class Partition(Explainer):
self.input_shape = masker.shape[1:] if hasattr(masker, "shape") and not callable(masker.shape) else None
# self.output_names = output_names
- self.model = lambda x: np.array(model(*x))
+ self.model = lambda *args: np.array(model(*args))
self.elemental_model = model
self.expected_value = None
self._curr_base_value = None | Changed variable naming from x->args and unpacking of args when passed to the model | py |
diff --git a/bika/lims/browser/analysisrequest/view.py b/bika/lims/browser/analysisrequest/view.py
index <HASH>..<HASH> 100644
--- a/bika/lims/browser/analysisrequest/view.py
+++ b/bika/lims/browser/analysisrequest/view.py
@@ -104,6 +104,19 @@ class AnalysisRequestViewView(BrowserView):
{'id': 'retract'},
{'id': 'verify'}]
self.qctable = qcview.contents_table()
+ # If a general retracted is done, rise a waring
+ if workflow.getInfoFor(ar, 'review_state') == 'sample_received':
+ allstatus = list()
+ for analysis in ar.getAnalyses():
+ status = workflow.getInfoFor(analysis.getObject(), 'review_state')
+ if status not in ['retracted','to_be_verified','verified']:
+ allstatus = []
+ break
+ else:
+ allstatus.append(status)
+ if len(allstatus) > 0:
+ self.addMessage("General Retract Done", 'warning')
+
# If is a retracted AR, show the link to child AR and show a warn msg
if workflow.getInfoFor(ar, 'review_state') == 'invalid':
childar = hasattr(ar, 'getChildAnalysisRequest') \ | Warning missage when you do a "full" retract on ARview | py |
diff --git a/src/vistir/spin.py b/src/vistir/spin.py
index <HASH>..<HASH> 100644
--- a/src/vistir/spin.py
+++ b/src/vistir/spin.py
@@ -112,6 +112,8 @@ class VistirSpinner(base_obj):
"""
self.handler = handler
+ import colorama
+ colorama.init()
sigmap = {}
if handler:
sigmap.update({ | make sure to init colorama in spinner | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from setuptools import setup
setup(name='dtw',
- version='1.1',
+ version='1.2',
description='Python DTW Module',
author='Pierre Rouanet',
author_email='pierre.rouanet@gmail.com', | Bump to version <I> | py |
diff --git a/arviz/rcparams.py b/arviz/rcparams.py
index <HASH>..<HASH> 100644
--- a/arviz/rcparams.py
+++ b/arviz/rcparams.py
@@ -234,15 +234,19 @@ class rc_context: # pylint: disable=invalid-name
Examples
--------
This allows one to do::
+
with az.rc_context(fname='pystan.rc'):
idata = az.load_arviz_data("radon")
az.plot_posterior(idata, var_names=["gamma"])
+
The plot would have settings from 'screen.rc'
A dictionary can also be passed to the context manager::
+
with az.rc_context(rc={'plot.max_subplots': None}, fname='pystan.rc'):
idata = az.load_arviz_data("radon")
az.plot_posterior(idata, var_names=["gamma"])
+
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid.
""" | fix code example in rc_context (#<I>) | py |
diff --git a/MAVProxy/mavproxy.py b/MAVProxy/mavproxy.py
index <HASH>..<HASH> 100755
--- a/MAVProxy/mavproxy.py
+++ b/MAVProxy/mavproxy.py
@@ -2278,10 +2278,11 @@ Auto-detected serial ports are:
else:
print("no script %s" % start_script)
- # some core functionality is in modules
- standard_modules = ['log']
- for m in standard_modules:
- process_stdin('module load %s' % m)
+ if not opts.setup:
+ # some core functionality is in modules
+ standard_modules = ['log']
+ for m in standard_modules:
+ process_stdin('module load %s' % m)
if opts.console:
process_stdin('module load console') | don't load log module in setup mode | py |
diff --git a/angr/analyses/decompiler/structured_codegen.py b/angr/analyses/decompiler/structured_codegen.py
index <HASH>..<HASH> 100644
--- a/angr/analyses/decompiler/structured_codegen.py
+++ b/angr/analyses/decompiler/structured_codegen.py
@@ -188,6 +188,8 @@ class CFunction(CConstruct): # pylint:disable=abstract-method
yield " ", None
if variable.name:
yield variable.name, cvariable
+ elif isinstance(variable, SimTemporaryVariable):
+ yield "tmp_%d" % variable.tmp_id, cvariable
else:
yield str(variable), cvariable
yield ";\n", None
@@ -748,6 +750,8 @@ class CVariable(CExpression):
if isinstance(self.variable, SimVariable):
if self.variable.name:
yield self.variable.name, self
+ elif isinstance(self.variable, SimTemporaryVariable):
+ yield "tmp_%d" % self.variable.tmp_id, self
else:
yield str(self.variable), self
elif isinstance(self.variable, CExpression): | StructuredCodeGen: Pretty print temporary variables. (#<I>) | py |
diff --git a/salt/modules/network.py b/salt/modules/network.py
index <HASH>..<HASH> 100644
--- a/salt/modules/network.py
+++ b/salt/modules/network.py
@@ -125,15 +125,15 @@ def subnets():
salt '*' network.subnets
'''
ifaces = interfaces()
- subnets = []
+ subnetworks = []
for ipv4_info in ifaces.values():
for ipv4 in ipv4_info.get('inet', []):
if ipv4['address'] == '127.0.0.1':
continue
network = _calculate_subnet(ipv4['address'], ipv4['netmask'])
- subnets.append(network)
- return subnets
+ subnetworks.append(network)
+ return subnetworks
def in_subnet(cidr): | network mod: rename subnets -> subnetworks, to avoid shadowing func | py |
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -16,9 +16,13 @@ setup(
version='1.0.5',
packages=[ 'ntlm_auth' ],
install_requires=[
- "six",
- "ordereddict ; python_version<'2.7'"
+ 'six'
],
+ extras_require={
+ ':python_version<"2.7"': [
+ 'ordereddict'
+ ]
+ },
author='Jordan Borean',
author_email='jborean93@gmail.com',
url='https://github.com/jborean93/ntlm-auth', | Fix environment markers being stripped when building python wheels (#<I>) | py |
diff --git a/bcbio/structural/manta.py b/bcbio/structural/manta.py
index <HASH>..<HASH> 100644
--- a/bcbio/structural/manta.py
+++ b/bcbio/structural/manta.py
@@ -105,6 +105,9 @@ def _prep_config(items, paired, work_dir):
cmd += ["--exome"]
for region in _maybe_limit_chromosomes(data):
cmd += ["--region", region]
+ resources = config_utils.get_resources("manta", data["config"])
+ if resources.get("options"):
+ cmd += [str(x) for x in resources["options"]]
# If we are removing polyX, avoid calling on small indels which require
# excessively long runtimes on noisy WGS runs
if "polyx" in dd.get_exclude_regions(data): | Manta: configManta command line tweaks with resources/options | py |
diff --git a/tests/pytests/unit/states/test_makeconf.py b/tests/pytests/unit/states/test_makeconf.py
index <HASH>..<HASH> 100644
--- a/tests/pytests/unit/states/test_makeconf.py
+++ b/tests/pytests/unit/states/test_makeconf.py
@@ -28,9 +28,6 @@ def test_present():
assert makeconf.present(name) == ret
-# 'absent' function tests: 1
-
-
def test_absent():
"""
Test to verify that the variable is not in the ``make.conf``. | Move makeconf state tests to pytest | py |
diff --git a/bika/lims/browser/analyses.py b/bika/lims/browser/analyses.py
index <HASH>..<HASH> 100644
--- a/bika/lims/browser/analyses.py
+++ b/bika/lims/browser/analyses.py
@@ -43,7 +43,6 @@ class AnalysesView(BikaListingView):
self.show_column_toggles = False
self.pagesize = 0
self.form_id = 'analyses_form'
- self.categories = []
self.portal = getToolByName(context, 'portal_url').getPortalObject()
self.portal_url = self.portal.absolute_url()
@@ -303,6 +302,7 @@ class AnalysesView(BikaListingView):
self.show_select_column = self.allow_edit
context_active = isActive(self.context)
+ self.categories = []
items = super(AnalysesView, self).folderitems(full_objects = True)
# manually skim retracted analyses from the list | LIMS-<I>: Prevent merging of field/lab categories This was causing empty categories to display, also. | py |
diff --git a/selectable/tests/views.py b/selectable/tests/views.py
index <HASH>..<HASH> 100644
--- a/selectable/tests/views.py
+++ b/selectable/tests/views.py
@@ -1,9 +1,9 @@
from django.http import HttpResponseNotFound, HttpResponseServerError
-def test_404(request):
+def test_404(request, *args, **kwargs):
return HttpResponseNotFound()
-def test_500(request):
+def test_500(request, *args, **kwargs):
return HttpResponseServerError() | <I> added another argument to the error handler views. | py |
diff --git a/salt/version.py b/salt/version.py
index <HASH>..<HASH> 100644
--- a/salt/version.py
+++ b/salt/version.py
@@ -574,7 +574,7 @@ def dependency_information(include_salt_cloud=False):
('msgpack-python', 'msgpack', 'version'),
('msgpack-pure', 'msgpack_pure', 'version'),
('pycrypto', 'Crypto', '__version__'),
- ('pycryptodome', 'Cryptodome', '__version__'),
+ ('pycryptodome', 'Cryptodome', 'version_info'),
('libnacl', 'libnacl', '__version__'),
('PyYAML', 'yaml', '__version__'),
('ioflo', 'ioflo', '__version__'), | fix the cryptodome version lookup for the versions report | py |
diff --git a/tests/unit/utils/test_templates.py b/tests/unit/utils/test_templates.py
index <HASH>..<HASH> 100644
--- a/tests/unit/utils/test_templates.py
+++ b/tests/unit/utils/test_templates.py
@@ -109,7 +109,7 @@ class RenderTestCase(TestCase):
ctx = dict(context)
ctx['var'] = 'OK'
res = salt.utils.templates.render_wempy_tmpl(tmpl, ctx)
- self.assertEqual(res, '<RU>OK</RU>')
+ self.assertEqual(res, 'OK')
### Tests for genshi template (xml-based)
def test_render_genshi_sanity(self): | Re-fixed the copy-pasta error in the wempy_variable test from unit.utils.test_templates so it doesn't check for xml that wasn't defined in the template. | py |
diff --git a/webssh/handler.py b/webssh/handler.py
index <HASH>..<HASH> 100644
--- a/webssh/handler.py
+++ b/webssh/handler.py
@@ -48,6 +48,10 @@ class MixinHandler(object):
raise InvalidValueError('Missing value {}'.format(name))
return value
+ def get_client_addr(self):
+ return self.get_real_client_addr() or self.request.connection.context.\
+ address
+
def get_real_client_addr(self):
ip = self.request.remote_ip
@@ -194,10 +198,6 @@ class IndexHandler(MixinHandler, tornado.web.RequestHandler):
logging.debug(args)
return args
- def get_client_addr(self):
- return self.get_real_client_addr() or self.request.connection.stream.\
- socket.getpeername()
-
def get_default_encoding(self, ssh):
try:
_, stdout, _ = ssh.exec_command('locale charmap')
@@ -277,9 +277,6 @@ class WsockHandler(MixinHandler, tornado.websocket.WebSocketHandler):
self.loop = loop
self.worker_ref = None
- def get_client_addr(self):
- return self.get_real_client_addr() or self.stream.socket.getpeername()
-
def open(self):
self.src_addr = self.get_client_addr()
logging.info('Connected from {}:{}'.format(*self.src_addr)) | Move get_client_addr to MixinHandler | py |
diff --git a/dynesty/nestedsamplers.py b/dynesty/nestedsamplers.py
index <HASH>..<HASH> 100644
--- a/dynesty/nestedsamplers.py
+++ b/dynesty/nestedsamplers.py
@@ -547,6 +547,11 @@ class MultiEllipsoidSampler(Sampler):
self.fmove = self.kwargs.get('fmove', 0.9)
self.max_move = self.kwargs.get('max_move', 100)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['rstate']
+ return state
+
def update(self, pointvol):
"""Update the bounding ellipsoids using the current set of
live points.""" | for pickling, remove 'rstate' from dictionary | py |
diff --git a/livelossplot/plot_losses.py b/livelossplot/plot_losses.py
index <HASH>..<HASH> 100644
--- a/livelossplot/plot_losses.py
+++ b/livelossplot/plot_losses.py
@@ -1,12 +1,12 @@
import warnings
from typing import Type, Tuple, List, Optional, TypeVar
-
from livelossplot.main_logger import MainLogger
from livelossplot.outputs import BaseOutput, MatplotlibPlot, ExtremaPrinter
BO = TypeVar('BO', bound=BaseOutput)
+
class PlotLosses:
"""
Class collect metrics from the training engine and send it to plugins, when send is called | fix(yapf) misc | py |
diff --git a/ontquery/plugins/services/rdflib.py b/ontquery/plugins/services/rdflib.py
index <HASH>..<HASH> 100644
--- a/ontquery/plugins/services/rdflib.py
+++ b/ontquery/plugins/services/rdflib.py
@@ -130,7 +130,7 @@ class rdflibLocal(OntService): # reccomended for local default implementation
#print(red.format('WARNING:'), 'untranslated predicate', p)
else:
c = pn
- if c in out:
+ if c in out and o not in out[c]:
if not isinstance(out[c], tuple):
out[c] = out.pop(c), o
else: | rdflib service dont add duplicate objects to query result under some circumstances both "asdf" and "asdf"^^xsd:string may be present in a graph, therefore skip cases where the python object is identical | py |
diff --git a/fundamentals/__version__.py b/fundamentals/__version__.py
index <HASH>..<HASH> 100644
--- a/fundamentals/__version__.py
+++ b/fundamentals/__version__.py
@@ -1 +1 @@
-__version__ = '2.1.7'
+__version__ = '2.2.0' | added a dbConn to the directory script runner and a execute_mysql_script function | py |
diff --git a/custodian/vasp/handlers.py b/custodian/vasp/handlers.py
index <HASH>..<HASH> 100644
--- a/custodian/vasp/handlers.py
+++ b/custodian/vasp/handlers.py
@@ -396,8 +396,13 @@ class VaspErrorHandler(ErrorHandler):
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
if "grad_not_orth" in self.errors:
- if vi["INCAR"].get("ISMEAR", 1) < 0:
- actions.append({"dict": "INCAR", "action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}})
+ if (
+ (vi["INCAR"].get("Algo", "Normal") == "All"
+ and not vi["INCAR"].get("METAGGA", False)
+ and not vi["INCAR"].get("LHFCALC", False))
+ or vi["INCAR"].get("ALGO", "Normal") == "Damped"
+ ):
+ actions.append({"dict": "INCAR", "action": {"_set": {"Algo": "Normal"}}})
if "zheev" in self.errors:
if vi["INCAR"].get("ALGO", "Fast").lower() != "exact": | More appropriate grad_not_orth fix grad_not_orth is primarily an issue related to how VASP is compiled. Instead of changing ISMEAR, Custodian now checks to see if Algo = All or Algo = Damped is set, in which case Algo = Normal is the new algorithm. Algo = All is *not* modified if METAGGA = True or LHFCALC = True. | py |
diff --git a/src/anyconfig/globals/datatypes.py b/src/anyconfig/globals/datatypes.py
index <HASH>..<HASH> 100644
--- a/src/anyconfig/globals/datatypes.py
+++ b/src/anyconfig/globals/datatypes.py
@@ -27,6 +27,6 @@ class IOInfo(typing.NamedTuple):
extension: str
-IOI_KEYS: typing.Tuple[typing.Optional[typing.Any]] = IOInfo._fields
+IOI_KEYS: typing.Tuple[str, ...] = IOInfo._fields
# vim:sw=4:ts=4:et: | fix: correct an wrong type hint to .globals.IOI_KEYS | py |
diff --git a/test/integration/007_graph_selection_tests/test_graph_selection.py b/test/integration/007_graph_selection_tests/test_graph_selection.py
index <HASH>..<HASH> 100644
--- a/test/integration/007_graph_selection_tests/test_graph_selection.py
+++ b/test/integration/007_graph_selection_tests/test_graph_selection.py
@@ -234,6 +234,6 @@ class TestGraphSelection(DBTIntegrationTest):
user_last_end = users.timing[1]['completed_at']
dep_first_start = dep.timing[0]['started_at']
self.assertTrue(
- user_last_end < dep_first_start,
+ user_last_end <= dep_first_start,
'dependency started before its transitive parent ({} > {})'.format(user_last_end, dep_first_start)
) | fix a test bug dbt doing two things in the same second incorrectly failed the test | py |
diff --git a/pinax/invitations/models.py b/pinax/invitations/models.py
index <HASH>..<HASH> 100644
--- a/pinax/invitations/models.py
+++ b/pinax/invitations/models.py
@@ -39,7 +39,7 @@ class JoinInvitation(models.Model):
message = models.TextField(null=True)
sent = models.DateTimeField(default=timezone.now)
status = models.IntegerField(choices=INVITE_STATUS_CHOICES)
- signup_code = models.OneToOneField(SignupCode)
+ signup_code = models.OneToOneField(SignupCode, on_delete=models.CASCADE)
def to_user_email(self):
return self.signup_code.email | Add another `on_delete` | py |
diff --git a/src/hamster/overview_activities.py b/src/hamster/overview_activities.py
index <HASH>..<HASH> 100644
--- a/src/hamster/overview_activities.py
+++ b/src/hamster/overview_activities.py
@@ -140,7 +140,7 @@ class OverviewBox(gtk.VBox):
self.delete_selected()
return True
elif (event.keyval == gtk.keysyms.Insert):
- dialogs.edit.show()
+ self.launch_edit(self.fact_tree.get_selected_fact())
return True
elif event.keyval == gtk.keysyms.c and event.state & gtk.gdk.CONTROL_MASK:
self.copy_selected() | keyboard shortcut points should point to same launch edit action (so that the fact gets passed in and such) | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.