diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
|---|---|---|
diff --git a/striplog/utils.py b/striplog/utils.py
index <HASH>..<HASH> 100644
--- a/striplog/utils.py
+++ b/striplog/utils.py
@@ -16,19 +16,22 @@ class CustomFormatter(Formatter):
def __init__(self):
super(CustomFormatter, self).__init__()
- self.last_index = 0
- def get_value(self, key, args, kwargs):
- if key == '':
- key = self.last_index
- self.last_index += 1
- return super(CustomFormatter, self).get_value(key, args, kwargs)
-
- def parse(self, format_string):
- # We'll leave this alone.
- return super(CustomFormatter, self).parse(format_string)
+ def get_field(self, field_name, args, kwargs):
+ """
+ Return an underscore if the attribute is absent.
+ Not all components have the same attributes.
+ """
+ try:
+ s = super(CustomFormatter, self)
+ return s.get_field(field_name, args, kwargs)
+ except KeyError:
+ return ("_", field_name)
def convert_field(self, value, conversion):
+ """
+ Define some extra field conversion functions.
+ """
try: # If the normal behaviour works, do it.
s = super(CustomFormatter, self)
return s.convert_field(value, conversion)
|
cope with missing fields in fmt
|
py
|
diff --git a/tests/test_io.py b/tests/test_io.py
index <HASH>..<HASH> 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -223,3 +223,16 @@ def test_save_bytesio(resources, outpdf):
assert bio_value != b''
pdf.save(outpdf, static_id=True)
assert outpdf.read_bytes() == bio_value
+
+
+def test_save_failure(sandwich, outdir):
+ dest = outdir / 'notwritable.pdf'
+
+ # This should work on Windows since Python maps the read-only bit
+ dest.touch(mode=0o444, exist_ok=False)
+ if dest.stat().st_mode & 0o400 != 0o400:
+ pytest.skip("Couldn't create a read-only file")
+
+ # Now try to overwrite
+ with pytest.raises(PermissionError, match="denied"):
+ sandwich.save(dest)
|
tests: confirm we can handle permissions errors By proxy this covers other errors that have to do with opening the destination file.
|
py
|
diff --git a/openquake/calculators/views.py b/openquake/calculators/views.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/views.py
+++ b/openquake/calculators/views.py
@@ -387,14 +387,20 @@ def view_totlosses(token, dstore):
return rst_table(tot_losses.view(oq.loss_dt()), fmt='%.6E')
-# for event based risk
+# for event based risk and ebrisk
def portfolio_loss(dstore):
array = dstore['losses_by_event'].value
- L, = array.dtype['loss'].shape
- R = dstore['csm_info'].get_num_rlzs()
- data = numpy.zeros((R, L), F32)
- for row in array:
- data[row['rlzi']] += row['loss']
+ if array.dtype.names: # for event based risk
+ L, = array.dtype['loss'].shape
+ R = dstore['csm_info'].get_num_rlzs()
+ data = numpy.zeros((R, L), F32)
+ for row in array:
+ data[row['rlzi']] += row['loss']
+ else: # arrays has shape (E, L)
+ rlzs = dstore['events']['rlz']
+ w = dstore['csm_info/weights']
+ weights = w[w.dtype.names[0]]
+ data = numpy.array([(arr * weights[rlzs]).sum() for arr in array.T])
return data
|
Extended the portfolio_losses view to ebrisk [skip CI] Former-commit-id: <I>fa6fd<I>e6ac<I>fc9dbd4b<I>eb5efd6c<I>e
|
py
|
diff --git a/test/integration/failover_script.py b/test/integration/failover_script.py
index <HASH>..<HASH> 100755
--- a/test/integration/failover_script.py
+++ b/test/integration/failover_script.py
@@ -7,7 +7,7 @@ up_file = "failover_test_elb_up"
down_file = "failover_test_elb_down"
def test(opts, server, repli_server, test_dir):
- time.sleep(10) #sometimes servers seem to take slightly longer for listening on the elb port
+ time.sleep(10) # TODO: why is this necessary?
os.system("%s reset" % opts["failover-script"])
if (os.path.exists(up_file) or os.path.exists(down_file)):
|
(uhm, commited a wrong comment in the test script)
|
py
|
diff --git a/bakery/management/commands/publish.py b/bakery/management/commands/publish.py
index <HASH>..<HASH> 100644
--- a/bakery/management/commands/publish.py
+++ b/bakery/management/commands/publish.py
@@ -89,12 +89,12 @@ in settings.py or provide it with --aws-bucket-name"
# Execute the command
subprocess.call(cmd, shell=True)
- # only gzipping rendered html views for now
+ # only gzipping rendered html views and sitemaps for now
# once we get our collect static routines worked out we should include
# .css and .js too
def sync_gzipped_files(self, options):
- gzip_file_match = getattr(settings, 'GZIP_FILE_MATCH', '*.html')
- cmd = "s3cmd sync --exclude '*.*' --include '%s' --add-header='Content-Encoding: gzip' --acl-public" % gzip_file_match
+ gzip_file_match = getattr(settings, 'GZIP_FILE_MATCH', '*.(html|xml)')
+ cmd = "s3cmd sync --exclude '*.*' --rinclude '%s' --add-header='Content-Encoding: gzip' --acl-public" % gzip_file_match
self.sync(cmd, options)
# The s3cmd basic command, before we append all the options.
|
adding in XML files to the default gzip s3cmd include
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,7 @@ setup(
'file-contents-sorter = pre_commit_hooks.file_contents_sorter:main',
'fix-encoding-pragma = pre_commit_hooks.fix_encoding_pragma:main',
'forbid-new-submodules = pre_commit_hooks.forbid_new_submodules:main',
- 'mixed-line-ending = pre_commit_hooks.mixed_line_ending:mixed_line_ending',
+ 'mixed-line-ending = pre_commit_hooks.mixed_line_ending:main',
'name-tests-test = pre_commit_hooks.tests_should_end_in_test:validate_files',
'no-commit-to-branch = pre_commit_hooks.no_commit_to_branch:main',
'pretty-format-json = pre_commit_hooks.pretty_format_json:pretty_format_json',
|
Fix mixed-line-endings entrypoint
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ with open(readme_path, 'r', encoding='utf-8') as fh:
def main():
- install_list = ['pysb>=1.3.0', 'objectpath', 'rdflib',
+ install_list = ['pysb>=1.3.0,<=1.9.1', 'objectpath', 'rdflib',
'requests>=2.11', 'lxml', 'ipython', 'future',
'networkx>=2,<=2.3', 'pandas', 'ndex2==2.0.1', 'jinja2',
'protmapper>=0.0.14']
|
Add upper constraint on PySB version
|
py
|
diff --git a/utils/bugzilla2el.py b/utils/bugzilla2el.py
index <HASH>..<HASH> 100755
--- a/utils/bugzilla2el.py
+++ b/utils/bugzilla2el.py
@@ -59,6 +59,8 @@ def parse_args():
help="Number of XML issues to get per query")
parser.add_argument("--cache", action='store_true',
help="Use perseval cache")
+ parser.add_argument("--debug", action='store_true',
+ help="Increase logging to debug")
args = parser.parse_args()
@@ -222,12 +224,17 @@ class BugzillaElastic(object):
if __name__ == '__main__':
+
+ args = parse_args()
+
app_init = datetime.now()
- # logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
- logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
+
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
+ if args.debug:
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
- args = parse_args()
+
bugzilla = Bugzilla(args.url, args.nissues, args.detail,
not args.no_history, args.cache)
|
Added option --debug to increase logging.
|
py
|
diff --git a/worker/step_runner.py b/worker/step_runner.py
index <HASH>..<HASH> 100755
--- a/worker/step_runner.py
+++ b/worker/step_runner.py
@@ -339,7 +339,7 @@ class StepRunner:
if self.settings.get('WORKER_LOGFILE') is None:
return logging.StreamHandler()
else:
- if not os.path.exists(os.path.dirname(self.settings['WORKER_LOGFILE']):
+ if not os.path.exists(os.path.dirname(self.settings['WORKER_LOGFILE'])):
os.makedirs(os.path.dirname(self.settings['WORKER_LOGFILE']))
return logging.FileHandler(self.settings['WORKER_LOGFILE'])
|
Fixed bug in merging step_runner.py.
|
py
|
diff --git a/pydot.py b/pydot.py
index <HASH>..<HASH> 100644
--- a/pydot.py
+++ b/pydot.py
@@ -1841,9 +1841,10 @@ class Dot(Graph):
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
except OSError as e:
if e.errno == os.errno.ENOENT:
- raise Exception(
- '"{prog}" not found in path.'.format(
- prog=prog))
+ args = list(e.args)
+ args[1] = '"{prog}" not found in path.'.format(
+ prog=prog)
+ raise OSError(*args)
else:
raise
stdout_data, stderr_data = p.communicate()
|
API: raise `OSError` when a GraphViz executable is not found Python 3 instantiates a subclass of `OSError` that is more specific to the kind of error raised [1]. By raising an error more specific than `Exception`, it can be recognized by packages that use `pydot`. [1] <URL>
|
py
|
diff --git a/pygelf/gelf.py b/pygelf/gelf.py
index <HASH>..<HASH> 100644
--- a/pygelf/gelf.py
+++ b/pygelf/gelf.py
@@ -38,7 +38,7 @@ def make(record, domain, debug, version, additional_fields, include_extra_fields
'host': domain
}
- if record.exc_info is not None:
+ if record.exc_info:
gelf['full_message'] = '\n'.join(traceback.format_exception(*record.exc_info))
elif record.exc_text is not None:
# QueueHandler, if used, formats the record, so that exc_info will always be empty:
|
Handle exc_info that's false but not None Closes #<I>.
|
py
|
diff --git a/python/thunder/clustering/kmeans.py b/python/thunder/clustering/kmeans.py
index <HASH>..<HASH> 100644
--- a/python/thunder/clustering/kmeans.py
+++ b/python/thunder/clustering/kmeans.py
@@ -2,7 +2,7 @@
Classes for KMeans clustering
"""
-from numpy import array, argmin, corrcoef, ndarray
+from numpy import array, argmin, corrcoef, ndarray, asarray, std
from scipy.spatial.distance import cdist
from thunder.rdds import Series
@@ -82,7 +82,8 @@ class KMeansModel(object):
For each data point, gives the similarity to its nearest cluster
"""
- similarity = lambda centers, p: corrcoef(centers[argmin(cdist(centers, array([p])))], p)[0, 1]
+ similarity = lambda centers, p: 0 if std(p) == 0 else \
+ corrcoef(centers[argmin(cdist(centers, array([p])))], p)[0, 1]
out = self.calc(data, similarity)
if isinstance(data, Series):
out._index = 'similarity'
|
Prevent NaNs for series with 0 std
|
py
|
diff --git a/pylint/checkers/format.py b/pylint/checkers/format.py
index <HASH>..<HASH> 100644
--- a/pylint/checkers/format.py
+++ b/pylint/checkers/format.py
@@ -1000,8 +1000,12 @@ class FormatChecker(BaseTokenChecker):
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
- if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
- line = line.split('#')[0].rstrip()
+ if mobj:
+ front_of_equal, back_of_equal = mobj.group(1).split('=', 1)
+ if front_of_equal.strip() == 'disable':
+ if 'line-too-long' in [_msg_id.strip() for _msg_id in back_of_equal.split(',')]:
+ return
+ line = line.rsplit('#', 1)[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
|
Modifying the way results of OPTION_RGX.search(line) are exploited
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -77,14 +77,14 @@ PKG_DESCRIBE = "DESCRIPTION.md"
## Directories to ignore in find_packages
EXCLUDES = (
- "tests",
+ "tests", "tests.*",
"bin",
- "docs",
+ "docs", "docs.*",
"fixtures",
"register",
- "notebooks",
- "examples",
- "binder",
+ "notebooks", "notebooks.*",
+ "examples", "examples.*",
+ "binder", "binder.*",
"paper",
)
|
Do not install tests subpackages with pip (#<I>) The new module rules mean that directories without a __init__.py file can be found, which meant that the find_packages method was discovering tests subpackages. By adding the wildcard flag to excludes, we omit these packages from our builds and they will not be installed by pip or setup.py on the next release. Fixes #<I>
|
py
|
diff --git a/py3status/modules/backlight.py b/py3status/modules/backlight.py
index <HASH>..<HASH> 100644
--- a/py3status/modules/backlight.py
+++ b/py3status/modules/backlight.py
@@ -96,8 +96,11 @@ class Py3status:
return brightness * 100 // brightness_max
def backlight(self):
- level = self._get_backlight_level()
- full_text = self.py3.safe_format(self.format, {'level': level})
+ full_text = ""
+ if self.device_path is not None:
+ level = self._get_backlight_level()
+ full_text = self.py3.safe_format(self.format, {'level': level})
+
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text
|
Added check for device presence in backlight.py
|
py
|
diff --git a/tests/test_symbolic_tester.py b/tests/test_symbolic_tester.py
index <HASH>..<HASH> 100644
--- a/tests/test_symbolic_tester.py
+++ b/tests/test_symbolic_tester.py
@@ -53,5 +53,4 @@ def test_tester_magma_internal_signals_verilator(target, solver):
kwargs["magma_opts"] = {"passes": ["rungenerators", "flatten",
"cullgraph"]}
kwargs["solver"] = solver
- _dir = "build"
tester.compile_and_run(target, directory=_dir, **kwargs)
|
Revert to using tempdir for test
|
py
|
diff --git a/src/you_get/common.py b/src/you_get/common.py
index <HASH>..<HASH> 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -916,7 +916,10 @@ def download_urls(
return
if dry_run:
print_user_agent(faker=faker)
- print('Real URLs:\n%s' % '\n'.join(urls))
+ try:
+ print('Real URLs:\n%s' % '\n'.join(urls))
+ except:
+ print('Real URLs:\n%s' % '\n'.join([j for i in urls for j in i]))
return
if player:
|
[common] download_urls(): fix URL printing for DASH streams
|
py
|
diff --git a/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py b/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
index <HASH>..<HASH> 100644
--- a/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
+++ b/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
@@ -504,7 +504,6 @@ class CartesianGetZmat(CartesianCore):
if isinstance(c_table, pd.Series):
c_table = pd.DataFrame(c_table).T
else:
- print(c_table)
c_table = np.array(c_table)
if len(c_table.shape) == 1:
c_table = c_table[None, :]
|
MAINT: Removed forgotten debugging print statement
|
py
|
diff --git a/sc2/client.py b/sc2/client.py
index <HASH>..<HASH> 100644
--- a/sc2/client.py
+++ b/sc2/client.py
@@ -169,15 +169,16 @@ class Client(Protocol):
))
return [ActionResult(p.result) for p in result.query.placements]
- async def query_available_abilities(self, unit):
- assert isinstance(unit, Unit)
+ async def query_available_abilities(self, units):
+ if not isinstance(units, list):
+ assert isinstance(units, Unit)
+ units = [units]
+ assert len(units) > 0
result = await self._execute(query=query_pb.RequestQuery(
abilities=[query_pb.RequestQueryAvailableAbilities(
- unit_tag=unit.tag
- )]
+ unit_tag=unit.tag) for unit in units]
))
- return [AbilityId(a.ability_id) for a in result.query.abilities[0].abilities]
-
+ return [[AbilityId(a.ability_id) for a in b.abilities] for b in result.query.abilities]
async def chat_send(self, message, team_only):
ch = ChatChannel.Team if team_only else ChatChannel.Broadcast
|
Change query_available_abilities to allow multiple per call requests
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@ install_requires = [
'djangosaml2>=0.11.0,<0.12',
'drf-extensions==0.2.6',
'logan==0.5.9.1',
+ 'ordereddict==1.1',
'python-cinderclient>=1.0.7,<=1.1.1',
'python-glanceclient>=0.12.0,<0.13.0',
'python-keystoneclient>=0.9.0,<0.11.2',
|
ordereddict added to requirements (itacloud-<I>)
|
py
|
diff --git a/green/djangorunner.py b/green/djangorunner.py
index <HASH>..<HASH> 100644
--- a/green/djangorunner.py
+++ b/green/djangorunner.py
@@ -75,7 +75,7 @@ try:
def __init__(self, verbose=1, **kwargs):
- super ().__init__ ()
+ super(DjangoRunner, self).__init__()
self.verbose = verbose
@classmethod
|
[FIX] Fixed super() call for Python 2.x
|
py
|
diff --git a/lib/ansiblereview/version.py b/lib/ansiblereview/version.py
index <HASH>..<HASH> 100644
--- a/lib/ansiblereview/version.py
+++ b/lib/ansiblereview/version.py
@@ -1 +1 @@
-__version__ = '0.4.1'
+__version__ = '0.4.2'
|
Update version to <I> New version for removal of Fabric
|
py
|
diff --git a/pcef/panels/lines.py b/pcef/panels/lines.py
index <HASH>..<HASH> 100644
--- a/pcef/panels/lines.py
+++ b/pcef/panels/lines.py
@@ -84,7 +84,6 @@ class LineNumberPanel(Panel):
normal_font.setBold(False)
bold_font = QFont(normal_font)
bold_font.setBold(True)
- bold_font.setItalic(True)
active = self.editor.codeEdit.textCursor().blockNumber()
for vb in self.editor.codeEdit.visible_blocks:
row = vb.row
|
Removes weird italic highlight (only bold is more than enough)
|
py
|
diff --git a/python/orca/src/bigdl/orca/common.py b/python/orca/src/bigdl/orca/common.py
index <HASH>..<HASH> 100644
--- a/python/orca/src/bigdl/orca/common.py
+++ b/python/orca/src/bigdl/orca/common.py
@@ -120,6 +120,19 @@ class OrcaContextMeta(type):
"shard size should be either None or a positive integer."
cls.__shard_size = value
+ @property
+ def barrier_mode(cls):
+ """
+ Whether to use Spark barrier mode to launch Ray, which is supported in Spark 2.4+ and when
+ dynamic allocation is disabled.
+ Default to be True.
+ """
+ return ZooContext.barrier_mode
+
+ @barrier_mode.setter
+ def barrier_mode(cls, value):
+ ZooContext.barrier_mode = value
+
class OrcaContext(metaclass=OrcaContextMeta):
@staticmethod
|
Add support for non-barrier mode to launch ray (#<I>) * add support for non-barrier mode * fix style * meet review * meet review * move barrier mode to zoocontext * bug fix * modify * update
|
py
|
diff --git a/tests/dtypes_test.py b/tests/dtypes_test.py
index <HASH>..<HASH> 100644
--- a/tests/dtypes_test.py
+++ b/tests/dtypes_test.py
@@ -102,7 +102,7 @@ class DtypesTest(jtu.JaxTestCase):
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
- z = x + y
+ z = op(x, y)
self.assertTrue(isinstance(z, jnp.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
|
fix binary promotion test (#<I>)
|
py
|
diff --git a/src/hamster/edit_activity.py b/src/hamster/edit_activity.py
index <HASH>..<HASH> 100644
--- a/src/hamster/edit_activity.py
+++ b/src/hamster/edit_activity.py
@@ -104,6 +104,7 @@ class CustomFactController(gobject.GObject):
# This signal should be emitted only after a manual modification,
# not at init time when cmdline might not always be fully parsable.
self.cmdline.connect("changed", self.on_cmdline_changed)
+ self.start_time.connect("changed", self.on_start_time_changed)
self.start_date.connect("day-selected", self.on_start_date_changed)
self.end_date.connect("day-selected", self.on_end_date_changed)
self.activity_entry.connect("changed", self.on_activity_changed)
@@ -202,6 +203,18 @@ class CustomFactController(gobject.GObject):
self.validate_fields()
self.update_cmdline()
+ def on_start_time_changed(self, widget):
+ if not self.master_is_cmdline:
+ previous_time = self.fact.start_time.time()
+ new_time = self.start_time.time
+ if new_time:
+ date = self.fact.start_time.date()
+ self.fact.start_time = dt.datetime.combine(date, new_time)
+ else:
+ self.fact.start_time = None
+ self.validate_fields()
+ self.update_cmdline()
+
def on_tags_changed(self, widget):
if not self.master_is_cmdline:
self.fact.tags = self.tags_entry.get_tags()
|
update start_time from gui entry
|
py
|
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index <HASH>..<HASH> 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -270,6 +270,16 @@ class TestMelt:
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
+ def test_preserve_category(self):
+ # GH 15853
+ data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
+ result = pd.melt(data, ["B"], ["A"])
+ expected = DataFrame(
+ {"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
+ )
+
+ tm.assert_frame_equal(result, expected)
+
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
|
add test for pd.melt dtypes preservation (#<I>)
|
py
|
diff --git a/holoviews/plotting/mpl/stats.py b/holoviews/plotting/mpl/stats.py
index <HASH>..<HASH> 100644
--- a/holoviews/plotting/mpl/stats.py
+++ b/holoviews/plotting/mpl/stats.py
@@ -197,7 +197,8 @@ class ViolinPlot(BoxPlot):
label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
else:
label = key
- data.append(group[group.vdims[0]])
+ d = group[group.vdims[0]]
+ data.append(d[np.isfinite(d)])
labels.append(label)
colors.append(elstyle[i].get('facecolors', 'blue'))
style['positions'] = list(range(len(data)))
@@ -208,6 +209,7 @@ class ViolinPlot(BoxPlot):
element = element.aggregate(function=np.mean)
else:
element = element.clone([(element.aggregate(function=np.mean),)])
+
new_style = self._apply_transforms(element, ranges, style)
style = {k: v for k, v in new_style.items()
if k not in ['zorder', 'label']}
|
Fix Violin matplotlib rendering with non-finite values (#<I>)
|
py
|
diff --git a/grimoire_elk/ocean/crates.py b/grimoire_elk/ocean/crates.py
index <HASH>..<HASH> 100644
--- a/grimoire_elk/ocean/crates.py
+++ b/grimoire_elk/ocean/crates.py
@@ -29,3 +29,10 @@ class CratesOcean(ElasticOcean):
"""Confluence Ocean feeder"""
pass
+
+ @classmethod
+ def get_perceval_params_from_url(cls, url):
+ # crates does not need any param
+ params = []
+
+ return params
|
[ocean][crates] Remove all params to be passed to the perceval backend The perceval backend for crates does not need any repository param. But we need to add an empty repository to projects.json so the backend is executed by mordred. Before creating the real perceval backend we need to remove this fake repository param: Contents of projects.json: { "grimoirelab": { "crates": [""] ...
|
py
|
diff --git a/picuplib/upload.py b/picuplib/upload.py
index <HASH>..<HASH> 100644
--- a/picuplib/upload.py
+++ b/picuplib/upload.py
@@ -195,16 +195,28 @@ def upload(apikey, picture, resize=None, rotation='00', noexif=False,
Need to take one argument. you can use the len function to determine \
the body length and call bytes_read().
"""
+
+ if isinstance(picture, str):
+ with open(picture, 'rb') as file_obj:
+ picture_name = picture
+ data = file_obj.read()
+ elif isinstance(picture, (tuple, list)):
+ picture_name = picture[0]
+ data = picture[1]
+ else:
+ raise TypeError("The second argument must be str or list/tuple. "
+ "Please refer to the documentation for details.")
+
+
check_rotation(rotation)
check_resize(resize)
check_callback(callback)
post_data = compose_post(apikey, resize, rotation, noexif)
- with open(picture, 'rb') as file_obj:
- post_data['Datei[]'] = (punify_filename(basename(picture)), file_obj)
+ post_data['Datei[]'] = (punify_filename(basename(picture_name)), data)
- return do_upload(post_data, callback)
+ return do_upload(post_data, callback)
# pylint: enable=too-many-arguments
|
add support for handing over image data directly
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -21,12 +21,23 @@ def delete_old_slimit_files():
os.remove(file_path)
-class new_install(install):
- def __init__(self, *args, **kwargs):
- super(new_install, self).__init__(*args, **kwargs)
- atexit.register(delete_old_slimit_files)
+class PostDevelopCommand(develop):
+ """Post-installation for development mode."""
+ def run(self):
+ def _post_install():
+ delete_old_slimit_files()
+ atexit.register(_post_install)
+ develop.run(self)
+class PostInstallCommand(install):
+ """Post-installation for installation mode."""
+ def run(self):
+ def _post_install():
+ delete_old_slimit_files()
+ atexit.register(_post_install)
+ install.run(self)
+
setup(name='fam',
version='2.0.0',
@@ -48,6 +59,8 @@ setup(name='fam',
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
- cmdclass={'install': new_install}
+ cmdclass={
+ 'develop': PostDevelopCommand,
+ 'install': PostInstallCommand,
+ }
)
-
|
fix for broken slimit/ply again
|
py
|
diff --git a/cookiecutter/config.py b/cookiecutter/config.py
index <HASH>..<HASH> 100644
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -39,7 +39,7 @@ def _expand_path(path):
def get_config(config_path):
- """Retrieve the config from the specified path, returning it as a config dict."""
+ """Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException
|
Edit get_config docstring to trim to <I> characters
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -10,19 +10,6 @@ def here(*paths):
return os.path.abspath(os.path.join(os.path.dirname(__file__), *paths))
-try:
- import brotli.brotli
-except ImportError:
- # installing - there is no cffi yet
- ext_modules = []
-else:
- # building bdist - cffi is here!
- ext_modules = [brotli.brotli.ffi.verifier.get_extension()]
- ext_modules[0].include_dirs.extend(
- [here("libbrotli/dec"), here("libbrotli/enc")]
- )
-
-
setup(
name=brotli.__title__,
version=brotli.__version__,
@@ -49,7 +36,6 @@ setup(
],
ext_package="brotli",
- ext_modules=ext_modules,
libraries=[
("libbrotli", {
@@ -93,4 +79,4 @@ setup(
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
]
-)
\ No newline at end of file
+)
|
Remove some ahead-of-time building.
|
py
|
diff --git a/src/flask_assets.py b/src/flask_assets.py
index <HASH>..<HASH> 100644
--- a/src/flask_assets.py
+++ b/src/flask_assets.py
@@ -229,6 +229,9 @@ class FlaskResolver(Resolver):
If app.config("FLASK_ASSETS_USE_S3") exists and is True
then we import the url_for function from flask.ext.s3,
otherwise we import url_for from flask directly.
+
+ If app.config("FLASK_ASSETS_USE_CDN") exists and is True
+ then we import the url_for function from flask.
"""
if ctx.environment._app.config.get("FLASK_ASSETS_USE_S3"):
try:
@@ -236,6 +239,12 @@ class FlaskResolver(Resolver):
except ImportError as e:
print("You must have Flask S3 to use FLASK_ASSETS_USE_S3 option")
raise e
+ elif ctx.environment._app.config.get("FLASK_ASSETS_USE_CDN"):
+ try:
+ from flask.ext.cdn import url_for
+ except ImportError as e:
+ print("You must have Flask CDN to use FLASK_ASSETS_USE_CDN option")
+ raise e
else:
from flask import url_for
|
adding option to use flask-cdn
|
py
|
diff --git a/kerncraft/models/layer_condition.py b/kerncraft/models/layer_condition.py
index <HASH>..<HASH> 100755
--- a/kerncraft/models/layer_condition.py
+++ b/kerncraft/models/layer_condition.py
@@ -175,7 +175,6 @@ class LC(object):
results['dimensions'][dimension]['caches'] = {}
for cl in csim.levels(with_mem=False):
cache_equation = sympy.Eq(cache_requirement_bytes, cl.size())
- print(sympy.LessThan(cache_requirement_bytes, cl.size()))
if len(self.kernel.constants.keys()) <= 1:
inequality = sympy.solve(sympy.LessThan(cache_requirement_bytes, cl.size()),
*self.kernel.constants.keys())
|
removed debug output in LC module
|
py
|
diff --git a/taskrunner/__main__.py b/taskrunner/__main__.py
index <HASH>..<HASH> 100644
--- a/taskrunner/__main__.py
+++ b/taskrunner/__main__.py
@@ -19,7 +19,7 @@ def main(argv=None):
When a task name is encountered in ``argv``, it will be considered
the starting point of the next task *unless* the previous item in
- ``arv`` was an option like ``--xyz`` that expects a value (i.e.,
+ ``argv`` was an option like ``--xyz`` that expects a value (i.e.,
it's not a flag).
To avoid ambiguity when an option value matches a task name, the
|
Fix typo in main() docstring
|
py
|
diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py
index <HASH>..<HASH> 100644
--- a/sos/report/plugins/ssh.py
+++ b/sos/report/plugins/ssh.py
@@ -19,9 +19,28 @@ class Ssh(Plugin, IndependentPlugin):
profiles = ('services', 'security', 'system', 'identity')
def setup(self):
- self.add_copy_spec([
+ sshcfgs = [
"/etc/ssh/ssh_config",
"/etc/ssh/sshd_config"
- ])
+ ]
+
+ # Include main config files
+ self.add_copy_spec(sshcfgs)
+
+ # Read configs for any includes and copy those
+ try:
+ for sshcfg in sshcfgs:
+ with open(sshcfg, 'r') as cfgfile:
+ for line in cfgfile:
+ # skip empty lines and comments
+ if len(line.split()) == 0 or line.startswith('#'):
+ continue
+ # ssh_config keywords are allowed as case-insensitive
+ if line.lower().startswith('include'):
+ confarg = line.split()
+ self.add_copy_spec(confarg[1])
+ except Exception:
+ pass
+
# vim: set et ts=4 sw=4 :
|
[ssh] SSH and SSHD configurations may be fragmented The SSH configurations may have an include directive. This adjustment to the plugin will copy in the main configs and then read those for any include keywords and attempt to copy those as well. Resolves: #<I>
|
py
|
diff --git a/can/io/generic.py b/can/io/generic.py
index <HASH>..<HASH> 100644
--- a/can/io/generic.py
+++ b/can/io/generic.py
@@ -73,7 +73,7 @@ class MessageWriter(BaseIOHandler, can.Listener, metaclass=ABCMeta):
# pylint: disable=abstract-method,too-few-public-methods
class FileIOMessageWriter(MessageWriter, metaclass=ABCMeta):
- """The base class for all writers."""
+ """A specialized base class for all writers with file descriptors."""
file: Union[TextIO, BinaryIO]
|
Update docstring of can.io.generic.FileIOMessageWriter (#<I>)
|
py
|
diff --git a/avatar/admin.py b/avatar/admin.py
index <HASH>..<HASH> 100644
--- a/avatar/admin.py
+++ b/avatar/admin.py
@@ -4,7 +4,7 @@ try:
from django.utils import six
except ImportError:
import six
-from django.utils.translation import ugettext_lazy as _
+from django.utils.translation import gettext_lazy as _
from django.template.loader import render_to_string
from avatar.models import Avatar
|
Django <I>: ugettext_lazy -> gettext_lazy
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -37,5 +37,5 @@ setup(
'wpconfigr'
],
url='https://github.com/cariad/py-wpconfigr',
- version='1.0'
+ version='1.1'
)
|
Increment version to <I>
|
py
|
diff --git a/tools/c7n_trailcreator/c7n_trailcreator/trailcreator.py b/tools/c7n_trailcreator/c7n_trailcreator/trailcreator.py
index <HASH>..<HASH> 100644
--- a/tools/c7n_trailcreator/c7n_trailcreator/trailcreator.py
+++ b/tools/c7n_trailcreator/c7n_trailcreator/trailcreator.py
@@ -763,7 +763,7 @@ def tag(assume, region, db, creator_tag, user_suffix, dryrun,
"""Tag resources with their creator.
"""
trail_db = TrailDB(db)
- load_resources()
+ load_resources(resource_types=('aws.*',))
with temp_dir() as output_dir:
config = ExecConfig.empty(
|
tools/c7n_trailcreator - load only aws provider resource types (#<I>)
|
py
|
diff --git a/compiler/js/code.py b/compiler/js/code.py
index <HASH>..<HASH> 100644
--- a/compiler/js/code.py
+++ b/compiler/js/code.py
@@ -14,7 +14,7 @@ def replace_enums(text, generator, registry):
return text
-id_re = re.compile(r'([_a-z]\w*)\.')
+id_re = re.compile(r'(?<!\.)([_a-z]\w*)\.')
def process(text, generator, registry):
id_set = registry.id_set
used_ids = set()
|
added negative look behind for \. to match only first id used
|
py
|
diff --git a/scout/server/blueprints/cases/views.py b/scout/server/blueprints/cases/views.py
index <HASH>..<HASH> 100644
--- a/scout/server/blueprints/cases/views.py
+++ b/scout/server/blueprints/cases/views.py
@@ -114,7 +114,7 @@ def phenotypes_actions(institute_id, case_name):
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == 'PHENOMIZER':
if len(hpo_ids) == 0:
- hpo_ids = [term['phenotype_id'] for term in case_obj['phenotype_terms']]
+ hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
username = current_app.config['PHENOMIZER_USERNAME']
password = current_app.config['PHENOMIZER_PASSWORD']
|
fix issue when no phenotype terms exist
|
py
|
diff --git a/dtool_irods/storagebroker.py b/dtool_irods/storagebroker.py
index <HASH>..<HASH> 100644
--- a/dtool_irods/storagebroker.py
+++ b/dtool_irods/storagebroker.py
@@ -233,9 +233,20 @@ class IrodsStorageBroker(object):
fname = generate_identifier(relpath)
dest_path = os.path.join(self._data_abspath, fname)
+ # Put the file into iRODS.
copy_file = CommandWrapper(["iput", "-f", fpath, dest_path])
copy_file()
+ # Add the realpath handle as metadata.
+ add_relpath_metadata = CommandWrapper([
+ "imeta",
+ "add",
+ "-d",
+ dest_path,
+ "handle",
+ relpath])
+ add_relpath_metadata()
+
def iter_item_handles(self):
"""Return iterator over item handles."""
|
Add relpath handle as irods metadata when putting a file
|
py
|
diff --git a/invocations/travis.py b/invocations/travis.py
index <HASH>..<HASH> 100644
--- a/invocations/travis.py
+++ b/invocations/travis.py
@@ -74,24 +74,16 @@ def sudo_coverage(c):
@task
-def test_installation(c, package, sanity=None):
+def test_installation(c, package, sanity):
"""
Test a non-editable pip install of source checkout.
Catches high level setup.py bugs.
- :param str package:
- Package name to uninstall.
-
- :param str sanity:
- Sanity-check command string to run. Optional.
-
- If given, will be appended to ``$VIRTUAL_ENV/bin/`` so it runs in the
- Travis test virtualenv.
+ :param str package: Package name to uninstall.
+ :param str sanity: Sanity-check command string to run.
"""
- c.run("echo echoing VIRTUAL_ENV: $VIRTUAL_ENV")
- pip = "$VIRTUAL_ENV/bin/pip"
- c.run("{0} uninstall -y {1}".format(pip, package))
- c.run("{0} install .".format(pip))
+ c.run("pip uninstall -y {0}".format(package))
+ c.run("pip install .")
if sanity:
- c.run("$VIRTUAL_ENV/bin/{0}".format(sanity))
+ c.run(sanity)
|
Wait am I crazy, we only need explicit venv junk under sudo right?
|
py
|
diff --git a/openquake/baselib/general.py b/openquake/baselib/general.py
index <HASH>..<HASH> 100644
--- a/openquake/baselib/general.py
+++ b/openquake/baselib/general.py
@@ -218,6 +218,8 @@ def split_in_blocks(sequence, hint, weight=lambda item: 1,
return block_splitter(items, math.ceil(total_weight / hint), weight, key)
+# the implementation here is unbelievably ugly; it is a remnant of the
+# past and soon or later will be removed (MS)
def deep_eq(a, b, decimal=7, exclude=None):
"""Deep compare two objects for equality by traversing __dict__ and
__slots__.
@@ -286,6 +288,8 @@ def _deep_eq(a, b, decimal, exclude=None):
assert a.__class__ == b.__class__, (
"%s and %s are different classes") % (a.__class__, b.__class__)
_test_dict(a.__dict__, b.__dict__)
+ elif isinstance(a, numpy.ndarray):
+ assert numpy.array_equal(a, b), '%s and %s are different' % (a, b)
# iterables (not strings)
elif isinstance(a, collections.Iterable) and not isinstance(a, str):
# If there's a generator or another type of iterable, treat it as a
|
Fixed deep_eq in the case of empty arrays
|
py
|
diff --git a/tools/c7n_mailer/c7n_mailer/utils.py b/tools/c7n_mailer/c7n_mailer/utils.py
index <HASH>..<HASH> 100644
--- a/tools/c7n_mailer/c7n_mailer/utils.py
+++ b/tools/c7n_mailer/c7n_mailer/utils.py
@@ -196,9 +196,11 @@ def resource_format(resource, resource_type):
len(resource.get('IpPermissions', ())),
len(resource.get('IpPermissionsEgress', ())))
elif resource_type == 'log-group':
- return "name: %s last_write: %s" % (
- resource['logGroupName'],
- resource['lastWrite'])
+ if 'lastWrite' in resource:
+ return "name: %s last_write: %s" % (
+ resource['logGroupName'],
+ resource['lastWrite'])
+ return "name: %s" % (resource['logGroupName'])
elif resource_type == 'cache-cluster':
return "name: %s created: %s status: %s" % (
resource['CacheClusterId'],
|
tools/c7n_mailer Updating mailer resource_format for log-group (#<I>)
|
py
|
diff --git a/pysat/tests/test_orbits.py b/pysat/tests/test_orbits.py
index <HASH>..<HASH> 100644
--- a/pysat/tests/test_orbits.py
+++ b/pysat/tests/test_orbits.py
@@ -628,7 +628,7 @@ class TestOrbitsGappyData2(TestGeneralOrbitsMLT):
seconds=int(seconds)) -
pds.DateOffset(seconds=20)])
- self.testInst.custom.attach(filter_data2, 'modify', times=times)
+ self.testInst.custom.attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
@@ -655,7 +655,7 @@ class TestOrbitsGappyData2Xarray(TestGeneralOrbitsMLT):
seconds=int(seconds)) -
pds.DateOffset(seconds=20)])
- self.testInst.custom.attach(filter_data2, 'modify', times=times)
+ self.testInst.custom.attach(filter_data2, kwargs={'times': times})
def teardown(self):
"""Runs after every method to clean up previous testing."""
|
TST: custom attath input Fixed custom attach input in unit tests.
|
py
|
diff --git a/colorama/win32.py b/colorama/win32.py
index <HASH>..<HASH> 100644
--- a/colorama/win32.py
+++ b/colorama/win32.py
@@ -4,8 +4,12 @@
STDOUT = -11
STDERR = -12
+import ctypes
+from ctypes import LibraryLoader
+
+windll = LibraryLoader(ctypes.WinDLL)
+
try:
- from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
|
Fix issue #<I>, incompatible with pyreadline. Use the fix supplied by google code user eryksun, creating our own instance of WinDLL('kernel<I>'), since this seems to make sure we won't be incompatible with any other libraries that also use ctypes.windll. Still haven't tested that this fix is safe on non-win platforms...
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -14,11 +14,13 @@ setup(
'robber.matchers',
],
classifiers=[
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
- 'Programming Language :: Python',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
],
)
|
Adding Python 3 to the pypi classifiers.
|
py
|
diff --git a/flask_security/datastore.py b/flask_security/datastore.py
index <HASH>..<HASH> 100644
--- a/flask_security/datastore.py
+++ b/flask_security/datastore.py
@@ -195,7 +195,7 @@ class SQLAlchemyUserDatastore(SQLAlchemyDatastore, UserDatastore):
def _is_numeric(self, value):
try:
int(value)
- except ValueError:
+ except (TypeError, ValueError):
return False
return True
|
Fail silently for get_user(None) get_user(identifier) checks if the identifier is a number by trying to convert it to int. This works for strings, but in a particular case, when identifier is None, it fails. Checking for both TypeError and ValueError fixes it.
|
py
|
diff --git a/pyecharts/js_extensions.py b/pyecharts/js_extensions.py
index <HASH>..<HASH> 100644
--- a/pyecharts/js_extensions.py
+++ b/pyecharts/js_extensions.py
@@ -14,7 +14,7 @@ OFFICIAL_PLUGINS = [
"echarts_china_cities_pypkg",
"echarts_countries_pypkg"
]
-THIRD_PARTY_PLUGIN_PREFIX = "pyecharts_"
+THIRD_PARTY_PLUGIN_PREFIX = "echarts_"
JS_EXTENSION_REGISTRY = 'registry.json'
REGISTRY_JS_FOLDER = 'JS_FOLDER'
|
:hammer: all javascript extension will need start with echarts_
|
py
|
diff --git a/openquake/calculators/base.py b/openquake/calculators/base.py
index <HASH>..<HASH> 100644
--- a/openquake/calculators/base.py
+++ b/openquake/calculators/base.py
@@ -250,7 +250,10 @@ def queue_next(task_func, task_args):
of the "plumbing" which handles task queuing (such as the various "task
complete" callback functions).
"""
- task_func.apply_async(task_args)
+ if openquake.no_distribute():
+ task_func(*task_args)
+ else:
+ task_func.apply_async(task_args)
def signal_task_complete(**kwargs):
|
calcs/base: `queue_next` now executes tasks as plain functions if OQ_NO_DISTRIBUTE is set.
|
py
|
diff --git a/afns/apps/markdown/templatetags/markdown.py b/afns/apps/markdown/templatetags/markdown.py
index <HASH>..<HASH> 100644
--- a/afns/apps/markdown/templatetags/markdown.py
+++ b/afns/apps/markdown/templatetags/markdown.py
@@ -16,12 +16,4 @@ def markdown(value, arg=''):
raise template.TemplateSyntaxError("Error in 'markdown' filter: The Python markdown library isn't installed.")
return force_unicode(value)
else:
- markdown_vers = getattr(markdown, "version_info", 0)
- if markdown_vers < (2, 1):
- if settings.DEBUG:
- raise template.TemplateSyntaxError(
- "Error in 'markdown' filter: Django does not support versions of the Python markdown library < 2.1.")
- return force_unicode(value)
- else:
- return mark_safe(markdown.markdown(
- force_unicode(value), ['smartypants','onion'], safe_mode=False))
\ No newline at end of file
+ return mark_safe(markdown.markdown(force_unicode(value), ['smartypants','onion'], safe_mode=False))
\ No newline at end of file
|
I trust that I'll have the right markdown lib installed
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@ version = imp.load_source('satpy.version', 'satpy/version.py')
BASE_PATH = os.path.sep.join(os.path.dirname(
os.path.realpath(__file__)).split(os.path.sep))
-requires = ['numpy >=1.4.1', 'pyresample', 'trollsift', 'trollimage', 'pykdtree', 'six']
+requires = ['numpy >=1.4.1', 'pillow', 'pyresample', 'trollsift', 'trollimage', 'pykdtree', 'six']
if sys.version < '2.7':
requires.append('ordereddict')
@@ -51,12 +51,6 @@ test_requires = ['behave']
if sys.version < '3.0':
test_requires.append('mock')
-try:
- from PIL import Image
-except ImportError:
- requires.append("pillow")
-
-
def _config_data_files(base_dirs, extensions=(".cfg",)):
"""Find all subdirectory configuration files
|
Update setup.py to always require pillow and not import PIL It seems that in older versions of setuptools (or maybe even easy_install) that importing certain libraries in setup.py causes an infinite loop and eats up memory until it gets killed by the kernel.
|
py
|
diff --git a/ufork/ufork.py b/ufork/ufork.py
index <HASH>..<HASH> 100644
--- a/ufork/ufork.py
+++ b/ufork/ufork.py
@@ -109,10 +109,13 @@ except:
pass #gevent worker not defined
else:
import gevent.pywsgi
+ import gevent.socket
def serve_wsgi_gevent(wsgi, address):
- server = gevent.pywsgi.WSGIServer(address, wsgi)
- server.init_socket()
+ sock = gevent.socket.socket()
+ sock.bind(address)
+ sock.listen(128) #TODO: what value?
+ server = gevent.pywsgi.WSGIServer(sock, wsgi)
def post_fork():
server.start()
pool = ForkPool(post_fork)
|
adding backwards compatibility with gevent <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -67,5 +67,6 @@ setup(
'Topic :: System :: Networking :: Monitoring',
],
test_suite='tests',
- tests_require=['iso8601'],
+ tests_require=['django'],
+ # tests_require=['iso8601'], # FIXME: Requires wq.io==1.0.1
)
|
test with django for now
|
py
|
diff --git a/searx/results.py b/searx/results.py
index <HASH>..<HASH> 100644
--- a/searx/results.py
+++ b/searx/results.py
@@ -138,6 +138,7 @@ class ResultContainer(object):
# if the result has no scheme, use http as default
if not result['parsed_url'].scheme:
result['parsed_url'] = result['parsed_url']._replace(scheme="http")
+ result['url'] = result['parsed_url'].geturl()
result['host'] = result['parsed_url'].netloc
|
Fix results with no scheme Related to #<I>, which was fixed in e3df<I>b but broken in a refactoring (b6c3cb0)
|
py
|
diff --git a/l/project.py b/l/project.py
index <HASH>..<HASH> 100644
--- a/l/project.py
+++ b/l/project.py
@@ -68,8 +68,10 @@ class GitPath(object):
argv = ["git"]
if self._git_dir is not None:
argv.extend(["--git-dir", self._git_dir.path])
- argv.extend(["ls-tree", "--name-only", "HEAD", self.path])
- return subprocess.check_output(argv).splitlines()
+ path = self.path + "/" if self.isdir() else ""
+ argv.extend(["ls-tree", "--name-only", "HEAD", path])
+ listdir = subprocess.check_output(argv).splitlines()
+ return [outputted.rpartition("/")[2] for outputted in listdir]
@implementer(abstract.IFilePath)
|
Really really nasty, but at least seems to work for now.
|
py
|
diff --git a/whois_bridge.py b/whois_bridge.py
index <HASH>..<HASH> 100644
--- a/whois_bridge.py
+++ b/whois_bridge.py
@@ -44,10 +44,6 @@ def init( ):
except NameError:
pass
-def GetHandler( query ):
- if re.search( r'\.ar$', query.lower() ): return doARQuery, ARParser
- if re.search( r'(\.fed\.us|\.gov)$', query.lower() ): return doGovQuery, GovParser
-
class WhoisHandler( object ):
"""WhoisHandler is an abstract class for defining whois interfaces for
web-based nic servers.
@@ -111,6 +107,19 @@ class ArgentinaWhoisHandler( WhoisHandler ):
def end_tr( self ):
self.formatter.add_line_break()
+class CoZaWhoisHandler( WhoisHandler ):
+ services = r'\.co\.za$'
+
+ def LoadHTTP( self ):
+ query = self._query
+ pageURL = 'http://whois.co.za/'
+ form = ParseResponse( urlopen( pageURL ) )[0]
+ form['Domain'] = query[ :query.find( '.' ) ]
+ req = form.click()
+ resp = urlopen( req )
+ self._response = resp.read()
+
+ _parser = HTMLParser
class GovWhoisHandler( WhoisHandler ):
services = r'(\.fed\.us|\.gov)$'
|
Added support for .co.za and removed deprecated GetHandler function.
|
py
|
diff --git a/configargparse.py b/configargparse.py
index <HASH>..<HASH> 100644
--- a/configargparse.py
+++ b/configargparse.py
@@ -241,15 +241,18 @@ class ArgumentParser(argparse.ArgumentParser):
else:
args = list(args)
+ new_args = list()
long_arg_re = re.compile(r"^(?P<key>--[^=]+)=(?P<value>.+)$")
for index, arg in enumerate(args):
match = long_arg_re.match(arg)
if match:
parts = match.groupdict()
if "key" in parts and "value" in parts:
- del args[index]
- args.append(parts["key"])
- args.append(parts["value"])
+ new_args.append(parts["key"])
+ new_args.append(parts["value"])
+ else:
+ new_args.append(arg)
+ args = new_args
for a in self._actions:
a.is_positional_arg = not a.option_strings
|
updated to preserve order of args
|
py
|
diff --git a/searx/engines/yandex.py b/searx/engines/yandex.py
index <HASH>..<HASH> 100644
--- a/searx/engines/yandex.py
+++ b/searx/engines/yandex.py
@@ -20,8 +20,13 @@ categories = ['general']
paging = True
language_support = True # TODO
+default_tld = 'com'
+language_map = {'ru': 'ru',
+ 'ua': 'uk',
+ 'tr': 'com.tr'}
+
# search-url
-base_url = 'https://yandex.ru/'
+base_url = 'https://yandex.{tld}/'
search_url = 'search/?{query}&p={page}'
results_xpath = '//div[@class="serp-item serp-item_plain_yes clearfix i-bem"]'
@@ -31,8 +36,10 @@ content_xpath = './/div[@class="serp-item__text"]//text()'
def request(query, params):
- params['url'] = base_url + search_url.format(page=params['pageno']-1,
- query=urlencode({'text': query}))
+ lang = params['language'].split('_')[0]
+ host = base_url.format(tld=language_map.get(lang) or default_tld)
+ params['url'] = host + search_url.format(page=params['pageno']-1,
+ query=urlencode({'text': query}))
return params
|
[fix] yandex engine language support according to #<I>
|
py
|
diff --git a/ldapcherry/backend/backendLdap.py b/ldapcherry/backend/backendLdap.py
index <HASH>..<HASH> 100644
--- a/ldapcherry/backend/backendLdap.py
+++ b/ldapcherry/backend/backendLdap.py
@@ -311,6 +311,9 @@ class Backend(ldapcherry.backend.Backend):
dn,
ldap.dn.dn2str([[(battr, bcontent, 1)]])
)
+ dn = ldap.dn.dn2str(
+ [[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:]
+ )
else:
if attr in old_attrs:
if type(old_attrs[attr]) is list:
|
fix rename in modify when renaming an entry, dn must be reset to the correct value for the modifications that come after the rename.
|
py
|
diff --git a/telethon/client/users.py b/telethon/client/users.py
index <HASH>..<HASH> 100644
--- a/telethon/client/users.py
+++ b/telethon/client/users.py
@@ -94,8 +94,10 @@ class UserMethods:
if utils.is_list_like(request):
request = request[request_index]
- self._flood_waited_requests\
- [request.CONSTRUCTOR_ID] = time.time() + e.seconds
+ # SLOW_MODE_WAIT is chat-specific, not request-specific
+ if not isinstance(e, errors.SlowModeWaitError):
+ self._flood_waited_requests\
+ [request.CONSTRUCTOR_ID] = time.time() + e.seconds
# In test servers, FLOOD_WAIT_0 has been observed, and sleeping for
# such a short amount will cause retries very fast leading to issues.
|
Don't cache SLOW_MODE_WAIT in _flood_waited_requests Closes #<I>.
|
py
|
diff --git a/tests/unit/Events/test_BEvents.py b/tests/unit/Events/test_BEvents.py
index <HASH>..<HASH> 100644
--- a/tests/unit/Events/test_BEvents.py
+++ b/tests/unit/Events/test_BEvents.py
@@ -2,7 +2,6 @@ from AlphaTwirl.Events import BEvents
from AlphaTwirl.Events import Branch
import sys
import unittest
-import ROOT
##____________________________________________________________________________||
class MockFile(object):
|
stop importing ROOT to test_BEvents.py
|
py
|
diff --git a/django-openstack/django_openstack/api.py b/django-openstack/django_openstack/api.py
index <HASH>..<HASH> 100644
--- a/django-openstack/django_openstack/api.py
+++ b/django-openstack/django_openstack/api.py
@@ -159,7 +159,7 @@ class Server(APIResourceWrapper):
Preserves the request info so image name can later be retrieved
"""
- _attrs = ['addresses', 'attrs', 'hostId', 'id', 'imageRef', 'links',
+ _attrs = ['addresses', 'attrs', 'hostId', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name']
@@ -175,7 +175,7 @@ class Server(APIResourceWrapper):
@property
def image_name(self):
- image = image_get(self.request, self.imageRef)
+ image = image_get(self.request, self.image['id'])
return image.name
def reboot(self, hardness=openstack.compute.servers.REBOOT_HARD):
|
in nova trunk, server.imageRef has been replaced by a server.image object
|
py
|
diff --git a/fedmsg/commands/tweet.py b/fedmsg/commands/tweet.py
index <HASH>..<HASH> 100644
--- a/fedmsg/commands/tweet.py
+++ b/fedmsg/commands/tweet.py
@@ -61,11 +61,16 @@ def tweet(**kw):
settings['api_key'],
)
- for name, ep, topic, message in fedmsg.tail_messages(**kw):
- link = fedmsg.text.msg2link(message, **kw)
- link = bitly.shorten(longUrl=link)['url']
- message = fedmsg.text.msg2subtitle(message, **kw)
- message = (message[:139] + " ")[:139 - len(link)] + link
+ for name, ep, topic, msg in fedmsg.tail_messages(**kw):
+ message = fedmsg.text.msg2subtitle(msg, **kw)
+ link = fedmsg.text.msg2link(msg, **kw)
+
+ if link:
+ link = bitly.shorten(longUrl=link)['url']
+ message = (message[:139] + " ")[:139 - len(link)] + link
+ else:
+ message = message[:140]
+
print("Tweeting %r" % message)
for api in apis:
try:
|
Be more careful with links that are NoneType.
|
py
|
diff --git a/poetry/console/commands/show.py b/poetry/console/commands/show.py
index <HASH>..<HASH> 100644
--- a/poetry/console/commands/show.py
+++ b/poetry/console/commands/show.py
@@ -145,7 +145,7 @@ lists all packages available."""
self.line(line)
def display_package_tree(self, package, installed_repo):
- self.write('<info>{}</info>'.format(package.prett_name))
+ self.write('<info>{}</info>'.format(package.pretty_name))
self.line(' {} {}'.format(package.pretty_version, package.description))
dependencies = package.requires
|
Fix show command with --tree option
|
py
|
diff --git a/tests/test_checks.py b/tests/test_checks.py
index <HASH>..<HASH> 100644
--- a/tests/test_checks.py
+++ b/tests/test_checks.py
@@ -132,7 +132,7 @@ class TestMissingAnyFills:
ds = xr.open_dataset(fn)
miss = checks.missing_any(ds.q_sim, freq='YS')
np.testing.assert_array_equal(miss[:-1], False)
- np.testing.assert_equal(miss[-1], True)
+ np.testing.assert_array_equal(miss[-1], True)
miss = checks.missing_any(ds.q_sim, freq='YS', season='JJA')
np.testing.assert_array_equal(miss, False)
|
revert back test change. miss is 2D, so still an array after indexing
|
py
|
diff --git a/salt/minion.py b/salt/minion.py
index <HASH>..<HASH> 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -809,6 +809,10 @@ class Minion(MinionBase):
elif data and tag:
load['data'] = data
load['tag'] = tag
+ elif not data and tag:
+ load['data'] = {}
+ load['tag'] = tag
+
else:
return
channel = salt.transport.Channel.factory(self.opts)
|
allow the minion to send empty dicts to the master the minion should be able to send empty dicts, because they appear every now and then. for example when pkg.list_upgrades reports that no upgrades are available.
|
py
|
diff --git a/tests/test_transliterate.py b/tests/test_transliterate.py
index <HASH>..<HASH> 100644
--- a/tests/test_transliterate.py
+++ b/tests/test_transliterate.py
@@ -156,6 +156,3 @@ class TestTransliteratePackage(unittest.TestCase):
def test_puan(self):
self.assertEqual(puan("นาริน"), "นิน-รา")
self.assertEqual(puan("นาริน", False), "นินรา")
- self.assertEqual(puan("สวัสดี"), "สะ-หี-ดวัด")
- self.assertEqual(puan("สวัสดี", False), "สะหีดวัด")
- self.assertEqual(puan("สวัสดีครับ"), "สวัสดีครับ")
|
Update test_transliterate.py
|
py
|
diff --git a/Lib/glyphsLib/builder.py b/Lib/glyphsLib/builder.py
index <HASH>..<HASH> 100644
--- a/Lib/glyphsLib/builder.py
+++ b/Lib/glyphsLib/builder.py
@@ -301,15 +301,18 @@ def set_custom_params(ufo, parsed=None, data=None, misc_keys=(), non_info=()):
else:
assert data is None, "Shouldn't provide parsed data and data to parse."
+ fsSelection_flags = {'Use Typo Metrics', 'Has WWS Names'}
for name, value in parsed:
name = normalize_custom_param_name(name)
- # special cases
- if name == 'Has WWS Names':
- try:
- ufo.info.openTypeOS2Selection.append(8)
- except AttributeError:
- ufo.info.openTypeOS2Selection = [8]
+ if name in fsSelection_flags:
+ if value:
+ if ufo.info.openTypeOS2Selection is None:
+ ufo.info.openTypeOS2Selection = []
+ if name == 'Use Typo Metrics':
+ ufo.info.openTypeOS2Selection.append(7)
+ elif name == 'Has WWS Names':
+ ufo.info.openTypeOS2Selection.append(8)
continue
# deal with any Glyphs naming quirks here
|
[builder] set OS/2 flSelection flags 7 and 8 from custom parameters we need to check if the value is True or False, not just whether the custom parameter is there.
|
py
|
diff --git a/spyderlib/plugins/inspector.py b/spyderlib/plugins/inspector.py
index <HASH>..<HASH> 100644
--- a/spyderlib/plugins/inspector.py
+++ b/spyderlib/plugins/inspector.py
@@ -31,9 +31,9 @@ from spyderlib.widgets.externalshell.pythonshell import ExtPythonShellWidget
from spyderlib.plugins import SpyderPluginWidget, PluginConfigPage
try:
+ from sphinx import __version__ as sphinx_version
from spyderlib.utils.inspector.sphinxify import (CSS_PATH, sphinxify,
warning, generate_context)
- from sphinx import __version__ as sphinx_version
except ImportError:
sphinxify = None
|
Object Inspector: Move sphinx version import before sphinxify It should be faster this way to check for sphinx presence
|
py
|
diff --git a/Tree/draw.py b/Tree/draw.py
index <HASH>..<HASH> 100644
--- a/Tree/draw.py
+++ b/Tree/draw.py
@@ -104,5 +104,5 @@ class SvgDrawer(Drawer):
SUPPORTED_CANVAS = {
"PIL.Image": PilDrawer,
- "svgwrite.Drawing": SvgDrawer
+ "svgwrite.drawing": SvgDrawer
}
\ No newline at end of file
|
correct mdoule name of svg drawing
|
py
|
diff --git a/utils/bench_result.py b/utils/bench_result.py
index <HASH>..<HASH> 100644
--- a/utils/bench_result.py
+++ b/utils/bench_result.py
@@ -28,8 +28,8 @@ def generate_results(results_files, type):
max = bm['stats']['max']
avr = bm['stats']['mean']
table.append([meta, commit, rate, min, max, avr])
- sortsecond = lambda val: val[1]
- table.sort(reverse=True, key=sortsecond)
+ sortrate = lambda val: val[2]
+ table.sort(reverse=True, key=sortrate)
return tabulate(table, headers=['platform', 'commit', 'rate (char/mSec)', 'min(sec)', 'max(sec)', 'mean(sec)'],
tablefmt=type)
|
benchmark: result: sort based on rate
|
py
|
diff --git a/salt/modules/apt.py b/salt/modules/apt.py
index <HASH>..<HASH> 100644
--- a/salt/modules/apt.py
+++ b/salt/modules/apt.py
@@ -140,6 +140,8 @@ def install(pkg, refresh=False, repo='', skip_verify=False,
if version:
pkg = "{0}={1}".format(pkg, version)
+ elif 'eq' in kwargs:
+ pkg = "{0}={1}".format(pkg, kwargs['eq'])
cmd = 'apt-get -q -y {confold}{verify}{target} install {pkg}'.format(
confold=' -o DPkg::Options::=--force-confold',
|
Add eq in for apt
|
py
|
diff --git a/tools/pip/setup.py b/tools/pip/setup.py
index <HASH>..<HASH> 100644
--- a/tools/pip/setup.py
+++ b/tools/pip/setup.py
@@ -142,14 +142,14 @@ else:
elif variant.startswith('CU92'):
libraries.append('CUDA-9.2')
-if variant != 'native':
+if variant != 'NATIVE':
libraries.append('MKLDNN')
short_description += ' This version uses {0}.'.format(' and '.join(libraries))
package_data = {'mxnet': [os.path.join('mxnet', os.path.basename(LIB_PATH[0]))],
'dmlc_tracker': []}
-if variant != 'native':
+if variant != 'NATIVE':
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/mkldnn/build/install/include'),
os.path.join(CURRENT_DIR, 'mxnet/include/mkldnn'))
if platform.system() == 'Linux':
|
fix native cd builds (#<I>)
|
py
|
diff --git a/buildozer/targets/android_new.py b/buildozer/targets/android_new.py
index <HASH>..<HASH> 100644
--- a/buildozer/targets/android_new.py
+++ b/buildozer/targets/android_new.py
@@ -57,9 +57,8 @@ class TargetAndroidNew(TargetAndroid):
elif option == "release":
cmd.append("--release")
continue
- if option in ("--window", ):
- # missing option in sdl2 bootstrap yet
- continue
+ if option == "--window":
+ cmd.append("--window")
elif option == "--sdk":
cmd.append("--android_api")
cmd.extend(values)
|
Added window option for target android_new.
|
py
|
diff --git a/moztelemetry/scalar.py b/moztelemetry/scalar.py
index <HASH>..<HASH> 100644
--- a/moztelemetry/scalar.py
+++ b/moztelemetry/scalar.py
@@ -19,7 +19,6 @@ from parse_scalars import ScalarType
SCALARS_YAML_PATH = '/toolkit/components/telemetry/Scalars.yaml'
REVISIONS = {'nightly': 'https://hg.mozilla.org/mozilla-central/rev/tip',
- 'aurora': 'https://hg.mozilla.org/releases/mozilla-aurora/rev/tip',
'beta': 'https://hg.mozilla.org/releases/mozilla-beta/rev/tip',
'release': 'https://hg.mozilla.org/releases/mozilla-release/rev/tip'}
|
Remove aurora scalar definitions (#<I>)
|
py
|
diff --git a/psamm/gapfill.py b/psamm/gapfill.py
index <HASH>..<HASH> 100644
--- a/psamm/gapfill.py
+++ b/psamm/gapfill.py
@@ -126,7 +126,9 @@ def gapfind(model, solver, epsilon=0.001, v_max=1000):
yield compound
-def gapfill(model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000):
+def gapfill(
+ model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000,
+ weights={}):
"""Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
@@ -165,7 +167,10 @@ def gapfill(model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000):
ym = prob.namespace(model.reactions, types=lp.VariableType.Binary)
yd = prob.namespace(database_reactions, types=lp.VariableType.Binary)
- objective = ym.sum(model.reactions) + yd.sum(database_reactions)
+ objective = ym.expr(
+ (rxnid, weights.get(rxnid, 1)) for rxnid in model.reactions)
+ objective += yd.expr(
+ (rxnid, weights.get(rxnid, 1)) for rxnid in database_reactions)
prob.set_objective(objective)
# Add constraints on all reactions
|
gapfill: Allow weights on added/modified reactions Allows the user to penalize reactions differently when proposing added or modified reactions. Default penalty value is 1.
|
py
|
diff --git a/py/testdir_multi_jvm/test_NN2_mnist_multi.py b/py/testdir_multi_jvm/test_NN2_mnist_multi.py
index <HASH>..<HASH> 100644
--- a/py/testdir_multi_jvm/test_NN2_mnist_multi.py
+++ b/py/testdir_multi_jvm/test_NN2_mnist_multi.py
@@ -57,7 +57,7 @@ class Basic(unittest.TestCase):
'max_w2' : 15,
'initial_weight_distribution' : 'UniformAdaptive',
#'initial_weight_scale' : 0.01,
- 'epochs' : 20.0,
+ 'epochs' : 30.0,
'destination_key' : model_key,
'validation' : validation_key,
}
@@ -71,7 +71,7 @@ class Basic(unittest.TestCase):
#### Now score using the model, and check the validation error
expectedErr = 0.046
- relTol = 0.35 # allow 35% tolerance. kbn
+ relTol = 0.40 # allow 40% tolerance. kbn
predict_key = 'Predict.hex'
kwargs = {
|
Increase epochs to <I> and increase tolerance to <I>
|
py
|
diff --git a/uk_geo_utils/management/commands/import_cleaned_addresses.py b/uk_geo_utils/management/commands/import_cleaned_addresses.py
index <HASH>..<HASH> 100644
--- a/uk_geo_utils/management/commands/import_cleaned_addresses.py
+++ b/uk_geo_utils/management/commands/import_cleaned_addresses.py
@@ -6,6 +6,12 @@ from uk_geo_utils.helpers import get_address_model
class Command(BaseCommand):
+ help = (
+ "Deletes all data in Address model AND any related tables,",
+ "and replaces Address model data with that in the cleaned AddressBase CSVs.",
+ "Data in related tables will need to be imported/rebuilt seperately",
+ )
+
def add_arguments(self, parser):
parser.add_argument(
"cleaned_ab_path",
@@ -38,7 +44,7 @@ class Command(BaseCommand):
cursor = connection.cursor()
self.stdout.write("clearing existing data..")
- cursor.execute("TRUNCATE TABLE %s;" % (self.table_name))
+ cursor.execute("TRUNCATE TABLE %s CASCADE;" % (self.table_name))
self.stdout.write("importing from %s.." % (cleaned_file_path))
cursor.copy_expert(
|
Make import_cleaned_addresses drop related tables This is necessary if your ONSUD_MODEL is related to your ADDRESS_MODEL, as is the case in UK-Polling-Stations. Without this the related table prevents the Address table from being truncated.
|
py
|
diff --git a/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py b/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
index <HASH>..<HASH> 100644
--- a/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
+++ b/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
@@ -645,13 +645,13 @@ class CartesianGetZmat(CartesianCore):
if as_function:
from chemcoord.cartesian_coordinates.xyz_functions import (
apply_grad_tensor)
- f = partial(apply_grad_tensor,
- construction_table=construction_table, grad_C=grad_C)
+
+ def f(cart_dist):
+ return apply_grad_tensor(grad_C, construction_table, cart_dist)
return f
else:
return grad_C
-
def to_zmat(self, *args, **kwargs):
"""Deprecated, use :meth:`~Cartesian.get_zmat`
"""
|
BUG: Bugfix because of partial argument ordering
|
py
|
diff --git a/pyvista/plotting/plotting.py b/pyvista/plotting/plotting.py
index <HASH>..<HASH> 100644
--- a/pyvista/plotting/plotting.py
+++ b/pyvista/plotting/plotting.py
@@ -587,6 +587,8 @@ class BasePlotter(PickingHelper, WidgetHelper):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
+ if renderer is None:
+ renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
|
Fix #<I> (#<I>)
|
py
|
diff --git a/zounds/index/test_hammingdb.py b/zounds/index/test_hammingdb.py
index <HASH>..<HASH> 100644
--- a/zounds/index/test_hammingdb.py
+++ b/zounds/index/test_hammingdb.py
@@ -132,6 +132,7 @@ class HammingDbTests(unittest2.TestCase):
db.append('a' * 8, 'some data')
self.assertEqual(2, len(db))
+ @unittest2.skip('This test fails in CI tests, but it is soon to be removed')
def test_can_search_over_text_documents(self):
db = HammingDb(self._path, code_size=8)
t1 = b'Mary had a little lamb'
|
Ignore test that fails in CI build. These are way out of scope for zounds and should be removed anyway
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -31,12 +31,16 @@ setup(
author_email='',
description='A python package for distribution grid analysis and optimization',
install_requires = [
- 'ding0==0.1.2',
+ 'ding0==0.1.2+git.5d882e80',
'networkx >=1.11',
'shapely >= 1.5.12, <= 1.5.12',
'pandas >=0.20.3, <=0.20.3',
'pypsa >=0.10.0, <=0.10.0'
],
cmdclass={
- 'install': InstallSetup}
+ 'install': InstallSetup},
+ dependency_links=[
+ 'https://github.com/openego/ding0/archive/'\
+ '5d882e804b12f79a4f74c88d26e71faff1929e00.zip'\
+ '#egg=ding0-0.1.2+git.5d882e80ding0==0.1.2+git.5d882e80']
)
|
Links to latest commit in setup.py
|
py
|
diff --git a/cytomine/cytomine_job.py b/cytomine/cytomine_job.py
index <HASH>..<HASH> 100644
--- a/cytomine/cytomine_job.py
+++ b/cytomine/cytomine_job.py
@@ -309,7 +309,7 @@ class CytomineJob(Cytomine):
self.close(value)
return False
- def logger(self, start=0, end=100, period=None):
+ def job_logger(self, start=0, end=100, period=None):
"""Return a logger for the current job."""
return CytomineJobLogger(self, start=start, end=end, period=period)
|
ENH rename job logger getter
|
py
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index <HASH>..<HASH> 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -345,6 +345,13 @@ class TestTools(unittest.TestCase):
'namespace': 'Microsoft.Authorization',
'subscription': 'fakesub',
}
+ },
+ {
+ 'resource_id': '/subscriptions/mySub/resourceGroups/myRg',
+ 'expected': {
+ 'subscription': 'mySub',
+ 'resource_group': 'myRg'
+ }
}
]
for test in tests:
@@ -455,6 +462,13 @@ class TestTools(unittest.TestCase):
'child_name_1': 'name2',
'child_namespace_2': 'Microsoft.Provider3'
}
+ },
+ {
+ 'resource_id': '/subscriptions/mySub/resourceGroups/myRg',
+ 'id_args': {
+ 'subscription': 'mySub',
+ 'resource_group': 'myRg'
+ }
}
]
for test in tests:
|
added tests for group ids
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@ setup(
'Programming Language :: Python :: 3',
],
install_requires=[
- "autobahn==17.5.1",
+ "autobahn==17.10.1",
],
tests_require=[
]
|
Updated autobahn requirement to match simpl-modelservice
|
py
|
diff --git a/salt/cli/__init__.py b/salt/cli/__init__.py
index <HASH>..<HASH> 100644
--- a/salt/cli/__init__.py
+++ b/salt/cli/__init__.py
@@ -224,11 +224,10 @@ class SaltCMD(parsers.SaltCMDOptionParser):
print_cli('-------------------------------------------')
print_cli('Summary')
print_cli('-------------------------------------------')
- if self.options.verbose:
- print_cli('# of Minions Targeted: {0}'.format(return_counter + not_return_counter))
+ print_cli('# of Minions Targeted: {0}'.format(return_counter + not_return_counter))
print_cli('# of Minions Returned: {0}'.format(return_counter))
+ print_cli('# of Minions Did Not Return: {0}'.format(not_return_counter))
if self.options.verbose:
- print_cli('# of Minions Did Not Return: {0}'.format(not_return_counter))
print_cli('Minions Which Did Not Return: {0}'.format(" ".join(not_return_minions)))
print_cli('-------------------------------------------')
|
Show all but list of unreturned minions with --summary
|
py
|
diff --git a/unit_systems.py b/unit_systems.py
index <HASH>..<HASH> 100644
--- a/unit_systems.py
+++ b/unit_systems.py
@@ -109,10 +109,16 @@ class UnitSystem(object):
repr += " %s: %s\n" % (key, self.units_map[dim])
return repr
-def create_code_unit_system(ds):
- code_unit_system = UnitSystem(str(ds), "code_length", "code_mass", "code_time",
- "code_temperature", registry=ds.unit_registry)
+def create_code_unit_system(ds, current_mks_unit=None):
+ code_unit_system = UnitSystem(
+ str(ds), "code_length", "code_mass", "code_time", "code_temperature",
+ current_mks_unit=current_mks_unit, registry=ds.unit_registry)
code_unit_system["velocity"] = "code_velocity"
+ if current_mks_unit:
+ code_unit_system["magnetic_field_mks"] = "code_magnetic"
+ else:
+ code_unit_system["magnetic_field_cgs"] = "code_magnetic"
+ code_unit_system["pressure"] = "code_pressure"
cgs_unit_system = UnitSystem("cgs", "cm", "g", "s")
cgs_unit_system["energy"] = "erg"
|
Make it possible to create a code unit system with MKS base units --HG-- branch : yt
|
py
|
diff --git a/TwitterSearch/TwitterSearchOrder.py b/TwitterSearch/TwitterSearchOrder.py
index <HASH>..<HASH> 100644
--- a/TwitterSearch/TwitterSearchOrder.py
+++ b/TwitterSearch/TwitterSearchOrder.py
@@ -99,7 +99,7 @@ class TwitterSearchOrder(object):
raise TwitterSearchException('Invalid callback string')
def setUntil(self, date):
- if isinstance(date, date):
+ if isinstance(date, datetime.date):
self.arguments.update( { 'unitl' : '%s' % date.strftime('%Y-%m-%d') } )
else:
raise TwitterSearchException('Not a date object')
|
bugfix: date to datetime.date
|
py
|
diff --git a/libreantdb/test/__init__.py b/libreantdb/test/__init__.py
index <HASH>..<HASH> 100644
--- a/libreantdb/test/__init__.py
+++ b/libreantdb/test/__init__.py
@@ -22,6 +22,6 @@ def cleanall():
if db.es.indices.exists(db.index_name):
db.es.delete_by_query(index=db.index_name,
body={'query': {'match_all': {}}})
- db.es.indices.delete(index=db.index_name)
- db.setup_db()
+ else:
+ db.setup_db()
db.es.indices.refresh(index=db.index_name)
|
[ db_api ] do not recreate index on every test fixes #<I>
|
py
|
diff --git a/openstack_dashboard/dashboards/settings/user/forms.py b/openstack_dashboard/dashboards/settings/user/forms.py
index <HASH>..<HASH> 100644
--- a/openstack_dashboard/dashboards/settings/user/forms.py
+++ b/openstack_dashboard/dashboards/settings/user/forms.py
@@ -52,7 +52,8 @@ class UserSettingsForm(forms.SelfHandlingForm):
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
# Timezone
- request.session['django_timezone'] = pytz.timezone(data['timezone'])
+ request.session['django_timezone'] = pytz.timezone(
+ data['timezone']).zone
messages.success(request, translation.ugettext("Settings saved."))
|
Store timezone as string. Fixes bug <I>. Change-Id: I8fd<I>fd0c<I>e<I>d8e<I>b<I>c<I>a<I>
|
py
|
diff --git a/hydra_base/lib/objects.py b/hydra_base/lib/objects.py
index <HASH>..<HASH> 100644
--- a/hydra_base/lib/objects.py
+++ b/hydra_base/lib/objects.py
@@ -27,7 +27,7 @@ from datetime import datetime
from ..exceptions import HydraError
from ..util import generate_data_hash
-from ..import config
+from .. import config
import zlib
import pandas as pd
@@ -43,6 +43,9 @@ class JSONObject(dict):
assert isinstance(obj, dict), "JSON string does not evaluate to a dict"
except Exception:
raise ValueError("Unable to read string value. Make sure it's JSON serialisable")
+ elif hasattr(obj_dict, '_asdict') and obj_dict._asdict is not None:
+ #A special case, trying to load a SQLAlchemy object, which is a 'dict' object
+ obj = obj_dict._asdict()
elif hasattr(obj_dict, '__dict__'):
#A special case, trying to load a SQLAlchemy object, which is a 'dict' object
obj = obj_dict.__dict__
|
Add a check for _asdict in objects.py to reflect siduation where a sqlalchemy object is passed in
|
py
|
diff --git a/tensorflow_probability/substrates/meta/rewrite.py b/tensorflow_probability/substrates/meta/rewrite.py
index <HASH>..<HASH> 100644
--- a/tensorflow_probability/substrates/meta/rewrite.py
+++ b/tensorflow_probability/substrates/meta/rewrite.py
@@ -260,16 +260,17 @@ def main(argv):
substrate = 'jax' if FLAGS.numpy_to_jax else 'numpy'
path = filename.split('/python/')[1]
- header = '\n'.join([
+ footer = '\n'.join([
+ '\n',
'# ' + '@' * 78,
'# This file is auto-generated by substrates/meta/rewrite.py',
'# It will be surfaced by the build system as a symlink at:',
'# `tensorflow_probability/substrates/{}/{}`'.format(substrate, path),
'# For more info, see substrate_runfiles_symlinks in build_defs.bzl',
'# ' + '@' * 78,
- '\n# (This notice adds 10 to line numbering.)\n\n\n'])
+ ])
- print(header + contents, file=open(1, 'w', encoding='utf-8', closefd=False))
+ print(contents + footer, file=open(1, 'w', encoding='utf-8', closefd=False))
if __name__ == '__main__':
|
Move the JAX/NumPy generated files notice to the end. This is to make the line numbers for errors correspond to the original sources. PiperOrigin-RevId: <I>
|
py
|
diff --git a/disqusapi/__init__.py b/disqusapi/__init__.py
index <HASH>..<HASH> 100644
--- a/disqusapi/__init__.py
+++ b/disqusapi/__init__.py
@@ -9,7 +9,7 @@ disqus.trends.listThreads()
try:
__version__ = __import__('pkg_resources') \
.get_distribution('disqusapi').version
-except:
+except Exception: # pragma: no cover
__version__ = 'unknown'
from six.moves import http_client as httplib
|
Don't catch a bare except
|
py
|
diff --git a/unfriendly/__init__.py b/unfriendly/__init__.py
index <HASH>..<HASH> 100644
--- a/unfriendly/__init__.py
+++ b/unfriendly/__init__.py
@@ -1,2 +1,2 @@
-VERSION = (0, 2, 2)
+VERSION = (0, 2, 3)
__version__ = '.'.join([str(x) for x in VERSION])
|
Bumped version -> <I>
|
py
|
diff --git a/tests/test_data_cloud.py b/tests/test_data_cloud.py
index <HASH>..<HASH> 100644
--- a/tests/test_data_cloud.py
+++ b/tests/test_data_cloud.py
@@ -52,7 +52,7 @@ def _should_test_gcp():
try:
check_output(['gcloud', 'auth', 'activate-service-account',
'--key-file', creds])
- except (CalledProcessError, IOError, WinError):
+ except (CalledProcessError, OSError):
return False
return True
|
test: gc: catch OSError
|
py
|
diff --git a/sos/report/plugins/gluster.py b/sos/report/plugins/gluster.py
index <HASH>..<HASH> 100644
--- a/sos/report/plugins/gluster.py
+++ b/sos/report/plugins/gluster.py
@@ -76,9 +76,8 @@ class Gluster(Plugin, RedHatPlugin):
"/var/lib/glusterd/",
# collect nfs-ganesha related configuration
"/run/gluster/shared_storage/nfs-ganesha/",
- # collect status files and public ssh keys
- "/var/lib/glusterd/.keys/",
- "/var/lib/glusterd/glusterfind/"
+ # collect public ssh keys (a_s_c skips implicit hidden files)
+ "/var/lib/glusterd/glusterfind/.keys/",
] + glob.glob('/run/gluster/*tier-dht/*'))
if not self.get_option("all_logs"):
|
[gluster] collect public keys from the right dir Collection of glusterfind dir is achieved by /var/lib/gluster so it doesn't be collected explicitly. /var/lib/glusterd/glusterfind/.keys/ subdir is required to be explicitly collected, as add_copy_spec uses glob.glob() that skips hidden files. Resolves: #<I>
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.