hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
|---|---|---|---|---|
ae3b203ac97b40fb9eaf58701276031c7112e2a6
|
diff --git a/controller/extjs/src/Controller/ExtJS/Catalog/Import/Text/Default.php b/controller/extjs/src/Controller/ExtJS/Catalog/Import/Text/Default.php
index <HASH>..<HASH> 100644
--- a/controller/extjs/src/Controller/ExtJS/Catalog/Import/Text/Default.php
+++ b/controller/extjs/src/Controller/ExtJS/Catalog/Import/Text/Default.php
@@ -289,8 +289,10 @@ class Controller_ExtJS_Catalog_Import_Text_Default
do
{
$criteria = $listManager->createSearch();
- $expr[] = $criteria->compare( '==', 'catalog.list.parentid', $catalogIds );
- $expr[] = $criteria->compare( '==', 'catalog.list.domain', 'text' );
+ $expr = array(
+ $criteria->compare( '==', 'catalog.list.parentid', $catalogIds ),
+ $criteria->compare( '==', 'catalog.list.domain', 'text' ),
+ );
$criteria->setConditions( $criteria->combine( '&&', $expr ) );
$listItems = $listManager->searchItems( $criteria, array(), $listTotal );
$listStart += count( $catalogItems );
|
Initializes the variable in a saver way
|
Arcavias_arcavias-core
|
train
|
2a600c13efd4c6367402ae51f1e9d819585eda7d
|
diff --git a/test/unit/fx.js b/test/unit/fx.js
index <HASH>..<HASH> 100644
--- a/test/unit/fx.js
+++ b/test/unit/fx.js
@@ -103,7 +103,7 @@ test("stop()", function() {
});
test("stop() - several in queue", function() {
- expect(4);
+ expect(3);
stop();
var $foo = jQuery("#nothiddendivchild");
@@ -121,7 +121,8 @@ test("stop() - several in queue", function() {
nw = $foo.width();
ok( nw != w, "Stop didn't reset the animation " + nw + "px " + w + "px");
- equals( $foo.queue().length, 1, "The next animation continued" );
+ // Disabled, being flaky
+ //equals( $foo.queue().length, 1, "The next animation continued" );
$foo.stop(true);
start();
}, 100);
@@ -155,7 +156,7 @@ test("stop(clearQueue)", function() {
});
test("stop(clearQueue, gotoEnd)", function() {
- expect(3);
+ expect(1);
stop();
var $foo = jQuery("#nothiddendivchild");
@@ -172,10 +173,12 @@ test("stop(clearQueue, gotoEnd)", function() {
$foo.stop(false, true);
nw = $foo.width();
- equals( nw, 1, "Stop() reset the animation" );
+ // Disabled, being flaky
+ //equals( nw, 1, "Stop() reset the animation" );
setTimeout(function(){
- equals( $foo.queue().length, 2, "The next animation continued" );
+ // Disabled, being flaky
+ //equals( $foo.queue().length, 2, "The next animation continued" );
$foo.stop(true);
start();
}, 100);
|
Disabled some of the animation tests that were being flaky.
|
jquery_jquery
|
train
|
d759a1e8dbe10f2918d04af42e1e520e71f7e9cd
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -1,6 +1,5 @@
var fs = require('fs'),
path = require('path'),
- crypto = require('crypto'),
tags = require('./lib/tags'),
parser = require('./lib/parser'),
@@ -117,13 +116,11 @@ exports.fromFile = function (filepath) {
};
exports.fromString = function (string) {
- var hash = crypto.createHash('md5').update(string).digest('hex');
-
- if (!CACHE.hasOwnProperty(hash)) {
- CACHE[hash] = createTemplate(string, hash);
+ if (!CACHE.hasOwnProperty(string)) {
+ CACHE[string] = createTemplate(string, string);
}
- return CACHE[hash];
+ return CACHE[string];
};
exports.compile = function (source, options, callback) {
diff --git a/tests/speed.js b/tests/speed.js
index <HASH>..<HASH> 100644
--- a/tests/speed.js
+++ b/tests/speed.js
@@ -1,5 +1,5 @@
var template = require('../index'),
- tplF, tplS, array, output, d, i;
+ tplF, tplS, array, output, d, i, tplString;
console.log();
console.log('Starting speed tests...');
@@ -8,18 +8,23 @@ template.init({
root: __dirname + '/templates'
});
-tplS = template.fromString(
- "{% for v in array %}"
- + "{% if 1 %}"
- + "{% for k in v %}"
- + "\n{{forloop.index}} {{k}}: "
- + "{% if forloop.index in 'msafas' %}"
- + "<p>Hello World {{k}}{{foo}}{{k}}{{foo}}{{k}}{{foo}}</p>"
- + "{% endif %}"
- + "{% endfor %}"
- + "{% endif %}"
- + "{% endfor %}"
-);
+tplString = "{% for v in array %}"
++ "{% if 1 %}"
++ "{% for k in v %}"
++ "\n{{forloop.index}} {{k}}: "
++ "{% if forloop.index in 'msafas' %}"
++ "<p>Hello World {{k}}{{foo}}{{k}}{{foo}}{{k}}{{foo}}</p>"
++ "{% endif %}"
++ "{% endfor %}"
++ "{% endif %}"
++ "{% endfor %}";
+
+console.time('Compile Template');
+i = 1000;
+while (i--) {
+ tplS = template.fromString(tplString);
+}
+console.timeEnd('Compile Template');
array = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], { af: "s", baz: "d", d: "f" }, "zeus"];
tplF = template.fromFile("include_base.html");
|
Stop using crypto for template cache.
Greatly improves speed when looking up cache. Hopefully doesn't cause memory issues with size of keys in the cache object.
|
Thunf_swiger
|
train
|
42979530b8ab516cec6bbc4660f65e4d673d4d7e
|
diff --git a/bqplot/overlays.py b/bqplot/overlays.py
index <HASH>..<HASH> 100644
--- a/bqplot/overlays.py
+++ b/bqplot/overlays.py
@@ -343,6 +343,8 @@ class BrushIntervalSelectorOverlay(OneDSelectorOverlay):
This attribute can be used to trigger computationally intensive code
which should be run only on the interval selection being completed as
opposed to code which should be run whenever selected is changing.
+ color: Color or None (default: None)
+ color of the rectangle representing the brush selector
"""
_view_name = Unicode('bqplot.BrushIntervalSelectorOverlay', sync=True)
brushing = Bool(False, sync=True)
@@ -385,6 +387,8 @@ class BrushSelectorOverlay(TwoDSelectorOverlay):
This attribute can be used to trigger computationally intensive code
which should be run only on the interval selection being completed as
opposed to code which should be run whenever selected is changing.
+ color: Color or None (default: None)
+ color of the rectangle representing the brush selector
"""
_view_name = Unicode('bqplot.BrushSelectorOverlay', sync=True)
clear = Bool(False, sync=True)
|
added docs for brush and brush overlay
|
bloomberg_bqplot
|
train
|
1f09a9451aaadc2a182086d68318cd88a3bf7f89
|
diff --git a/sonar-plugin-api/src/main/java/org/sonar/api/server/ws/WebService.java b/sonar-plugin-api/src/main/java/org/sonar/api/server/ws/WebService.java
index <HASH>..<HASH> 100644
--- a/sonar-plugin-api/src/main/java/org/sonar/api/server/ws/WebService.java
+++ b/sonar-plugin-api/src/main/java/org/sonar/api/server/ws/WebService.java
@@ -108,7 +108,7 @@ public interface WebService extends ServerExtension {
*
* @param path the controller path must not start or end with "/". It is recommended to start with "api/"
* and to use lower-case format with underscores, for example "api/coding_rules". Usual actions
- * are "list", "show", "create" and "delete"
+ * are "search", "list", "show", "create" and "delete"
*/
public NewController createController(String path) {
return new NewController(this, path);
|
Micro change in javadoc of WebService
|
SonarSource_sonarqube
|
train
|
e63c7be12265d7c77161ceea8f9947492b720488
|
diff --git a/test/catalog.rb b/test/catalog.rb
index <HASH>..<HASH> 100644
--- a/test/catalog.rb
+++ b/test/catalog.rb
@@ -15,16 +15,15 @@ require 'json'
program :name, "catalog"
program :version, "0.0.1"
+program :description, "Build a PDF catalog, with metadata."
def query_uri verb, prefix, issue, year
+ q = "verb=#{verb}&metadataPrefix=cr_unixml&set=#{prefix}:#{issue}:#{year}"
+ q = CGI.escape q
URI::HTTP.build({
:host => "oai.crossref.org",
:path => "/OAIHandler",
- :query => {
- :verb => verb,
- :metadataPrefix => "cr_unixml",
- :set => "#{prefix}:#{issue}:#{year}"
- }
+ :query => q
})
end
@@ -40,7 +39,7 @@ def get_dois prefix, issue, year
Net::HTTP.start uri.host do |http|
response = http.get uri.request_uri
- if response == 200
+ if response.code.to_i == 200
parse_dois response.body
else
fail "Failed to get metadata. OAI server returned: #{response.code}"
@@ -52,9 +51,9 @@ command :populate do |c|
c.syntax = "catalog populate publisher_prefix:journal_id:year"
c.description = "Add Crossref Metadata to a catalog."
- c.acton do |args, options|
+ c.action do |args, options|
args.each do |limiting_set|
- dois = get_dois limiting_set.split(":")
+ dois = get_dois(*limiting_set.split(":"))
say dois.to_json
end
end
|
catalog.rb: Rig up :populate to output DOIs for an OAI query.
|
CrossRef_pdfextract
|
train
|
976e9d1fe3e0f76f0ff877c9b1ba37c5c7fa9a3d
|
diff --git a/pkg/endpoint/endpoint.go b/pkg/endpoint/endpoint.go
index <HASH>..<HASH> 100644
--- a/pkg/endpoint/endpoint.go
+++ b/pkg/endpoint/endpoint.go
@@ -2232,10 +2232,6 @@ func (e *Endpoint) MapPin() error {
}
err = bpf.ObjPin(mapFd, e.BPFIpvlanMapPath())
- if err != nil {
- unix.Close(mapFd)
- return err
- }
-
- return nil
+ unix.Close(mapFd)
+ return err
}
|
cilium, ipvlan: fix tail call map fd leakage
After pinning we must close the fd we got before in success *and*
in error case.
Fixes: 7bfe<I>f<I> ("cilium, ipvlan: add initial endpoint ipvlan support")
|
cilium_cilium
|
train
|
d0aff3a1a79ae51b33653a944c4f0fc127dd517f
|
diff --git a/openstack_dashboard/api/cinder.py b/openstack_dashboard/api/cinder.py
index <HASH>..<HASH> 100644
--- a/openstack_dashboard/api/cinder.py
+++ b/openstack_dashboard/api/cinder.py
@@ -65,7 +65,10 @@ def volume_list(request, search_opts=None):
To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
- return cinderclient(request).volumes.list(search_opts=search_opts)
+ c_client = cinderclient(request)
+ if c_client is None:
+ return []
+ return c_client.volumes.list(search_opts=search_opts)
def volume_get(request, volume_id):
|
Catch another breakage, when cinder is unavailable
During review of backporting avoid cinder calls, this
has been discovered and should be applied to master branch.
Fixes bug <I>
Change-Id: I<I>c7fabd<I>ed<I>fc9f<I>ec<I>b2c<I>dd7c9b9ef
|
openstack_horizon
|
train
|
31aaddfaf6e09f53fb6ff1b6b42b7a43f52e5f22
|
diff --git a/go/client/files.go b/go/client/files.go
index <HASH>..<HASH> 100644
--- a/go/client/files.go
+++ b/go/client/files.go
@@ -82,12 +82,12 @@ func (s *FileSource) Open() error {
}
func (s *FileSource) Close() error {
- if s.file != nil {
- err := s.file.Close()
- s.file = nil
- return err
+ if s.file == nil {
+ return nil
}
- return io.EOF
+ err := s.file.Close()
+ s.file = nil
+ return err
}
func (s *FileSource) Read(p []byte) (n int, err error) {
|
FileSource Close() multiple times without err
|
keybase_client
|
train
|
5c92f48508bf95df758b5921b6aef3e56fa51d68
|
diff --git a/lib/squall.rb b/lib/squall.rb
index <HASH>..<HASH> 100644
--- a/lib/squall.rb
+++ b/lib/squall.rb
@@ -7,6 +7,7 @@ module Squall
class NotFound < StandardError;end
class RequestError < StandardError;end
class ServerError < StandardError;end
+ class NoConfig < StandardError;end
autoload :Hypervisor, 'squall/hypervisor'
autoload :Config, 'squall/config'
diff --git a/lib/squall/base.rb b/lib/squall/base.rb
index <HASH>..<HASH> 100644
--- a/lib/squall/base.rb
+++ b/lib/squall/base.rb
@@ -30,6 +30,7 @@ module Squall
end
def request(request_method, path, options = {})
+ check_config
@result = self.class.send(request_method, path, options)
@success = (200..207).include?(@result.code)
case @result.code
@@ -44,6 +45,10 @@ module Squall
end
end
+ def check_config
+ raise NoConfig, "Squall.config must be specified" if Squall.config.empty?
+ end
+
def key_for_class
word = self.class.name.split("::").last.to_s.dup
word.gsub!(/::/, '/')
diff --git a/spec/squall/base_spec.rb b/spec/squall/base_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/squall/base_spec.rb
+++ b/spec/squall/base_spec.rb
@@ -58,6 +58,11 @@ describe Squall::Base do
expect { @base.request(:get, '/422') }.to raise_error(Squall::RequestError)
@base.success.should be_false
end
+
+ it "is a sad panda when the config hasn't been specified" do
+ Squall.reset_config
+ expect { @base.request(:get, '/money') }.to raise_error(Squall::NoConfig, "Squall.config must be specified")
+ end
end
describe "#errors" do
|
Raise an error when trying to make requests without setting up Squall
|
Bweeb_squall
|
train
|
252b49d75f8fb70d29aad3b270ecfe8001a6cbf3
|
diff --git a/pypsa/linopf.py b/pypsa/linopf.py
index <HASH>..<HASH> 100644
--- a/pypsa/linopf.py
+++ b/pypsa/linopf.py
@@ -232,16 +232,18 @@ def define_ramp_limit_constraints(n, sns):
# fix up
gens_i = rup_i & fix_i
- lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
- rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
- define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', spec='nonext.')
+ if not gens_i.empty:
+ lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
+ rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
+ define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', spec='nonext.')
# ext up
gens_i = rup_i & ext_i
- limit_pu = n.df(c)['ramp_limit_up'][gens_i]
- p_nom = get_var(n, c, 'p_nom')[gens_i]
- lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
- define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='ext.')
+ if not gens_i.empty:
+ limit_pu = n.df(c)['ramp_limit_up'][gens_i]
+ p_nom = get_var(n, c, 'p_nom')[gens_i]
+ lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
+ define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', spec='ext.')
# com up
gens_i = rup_i & com_i
@@ -257,16 +259,18 @@ def define_ramp_limit_constraints(n, sns):
# fix down
gens_i = rdown_i & fix_i
- lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
- rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
- define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', spec='nonext.')
+ if not gens_i.empty:
+ lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
+ rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
+ define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', spec='nonext.')
# ext down
gens_i = rdown_i & ext_i
- limit_pu = n.df(c)['ramp_limit_down'][gens_i]
- p_nom = get_var(n, c, 'p_nom')[gens_i]
- lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
- define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='ext.')
+ if not gens_i.empty:
+ limit_pu = n.df(c)['ramp_limit_down'][gens_i]
+ p_nom = get_var(n, c, 'p_nom')[gens_i]
+ lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
+ define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', spec='ext.')
# com down
gens_i = rdown_i & com_i
|
solves #<I> (#<I>)
|
PyPSA_PyPSA
|
train
|
8e6dc91dc5a2a3c254263d462eeee1c416af1c13
|
diff --git a/bokeh/properties.py b/bokeh/properties.py
index <HASH>..<HASH> 100644
--- a/bokeh/properties.py
+++ b/bokeh/properties.py
@@ -480,7 +480,7 @@ class MetaHasProps(type):
# inside the instance.
subprop = copy(subprop)
if "%s" in prop.help:
- doc = prop.help % subpropname.split('_', 1)[1].replace('_', ' ')
+ doc = prop.help % subpropname.replace('_', ' ')
else:
doc = prop.help
try:
|
don't strip of fill, line, text from Include docs
|
bokeh_bokeh
|
train
|
d446a456e33c2f3e09125e91688f01a2e975f64f
|
diff --git a/models/reference_ext.go b/models/reference_ext.go
index <HASH>..<HASH> 100644
--- a/models/reference_ext.go
+++ b/models/reference_ext.go
@@ -11,7 +11,7 @@ func (r *Reference) UnmarshalJSON(data []byte) (err error) {
ref := reference{}
if err = json.Unmarshal(data, &ref); err == nil {
splitURL := strings.Split(ref.Reference, "/")
- if len(splitURL) >= 3 {
+ if len(splitURL) >= 2 {
ref.ReferencedID = splitURL[len(splitURL)-1]
ref.Type = splitURL[len(splitURL)-2]
}
|
Fix bug that caused parsing out components of references to fail
|
intervention-engine_fhir
|
train
|
7efd5b17208f2728e6cec438bad6ee2d201b27a2
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -17,8 +17,8 @@
# <http://www.gnu.org/licenses/>.
from datetime import date
+from distutils import log
from distutils.cmd import Command
-from distutils.command.install_data import install_data
from distutils.command.upload import upload
from distutils.errors import DistutilsExecError, DistutilsOptionError
import os
@@ -134,30 +134,50 @@ class upload_and_tag(upload):
cmdclass['upload'] = upload_and_tag
-# modify install_data class to set absolute path in desktop file
-class install_data_and_edit(install_data):
+# add command to create start menu entries
+class install_menu(Command):
+ description = 'install start menu entries'
+ user_options = [
+ ('install-base=', 'd', "base installation directory"),
+ ('script-dir=', 'd', "script installation directory"),
+ ('lib-dir=', 'd', "library installation directory")]
+
+ def initialize_options(self):
+ self.install_base = None
+ self.script_dir = None
+ self.lib_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_base', 'install_base'),
+ ('install_scripts', 'script_dir'),
+ ('install_lib', 'lib_dir'))
+
def run(self):
- result = install_data.run(self)
- for path in self.outfiles:
- dir_name, base_name = os.path.split(path)
- if base_name != 'photini.desktop':
- continue
- self.announce('editing ' + path, level=2)
- if self.dry_run:
- continue
- with open(path, 'r') as src:
- lines = list(src.readlines())
- with open(path, 'w') as dst:
- for line in lines:
- if line.startswith('Icon'):
- name, sep, value = line.partition('=')
- value = os.path.normpath(os.path.join(dir_name, value))
- line = name + sep + value
- dst.write(line)
- return result
+ self.outfiles = []
+ if sys.platform.startswith('linux'):
+ desktop_path = os.path.join(
+ self.install_base, 'share/applications/photini.desktop')
+ exec_path = os.path.join(self.script_dir, 'photini')
+ icon_path = os.path.join(
+ self.lib_dir, 'photini/data/icons/48/photini.png')
+ log.info('Installing desktop file %s', desktop_path)
+ if not self.dry_run:
+ with open('src/linux/photini.desktop', 'r') as src:
+ with open(desktop_path, 'w') as dst:
+ for line in src.readlines():
+ if line.startswith('Exec'):
+ line = 'Exec=' + exec_path + ' %F\n'
+ elif line.startswith('Icon'):
+ line = 'Icon=' + icon_path + '\n'
+ dst.write(line)
+ self.outfiles.append(desktop_path)
-if sys.platform.startswith('linux'):
- cmdclass['install_data'] = install_data_and_edit
+ def get_outputs(self):
+ return self.outfiles or []
+
+cmdclass['install_menu'] = install_menu
+install.sub_commands.append(('install_menu', lambda self:True))
# modify install class to install Windows shortcuts
@@ -334,17 +354,6 @@ if babel:
'output_dir' : ('setup.py', 'src/lang/doc'),
}
-data_files = []
-if sys.platform.startswith('linux'):
- # install application menu shortcut
- data_files.append(('share/icons/hicolor/48x48/apps',
- ['src/photini/data/icons/48/photini.png']))
- data_files.append(('share/applications', ['src/linux/photini.desktop']))
- command_options['install'] = {
- 'single_version_externally_managed' : ('setup.py', '1'),
- 'record' : ('setup.py', 'install.txt'),
- }
-
with open('README.rst') as ldf:
long_description = ldf.read()
url = 'https://github.com/jim-easterbrook/Photini'
@@ -378,7 +387,6 @@ setup(name = 'Photini',
'data/*map/script.js', 'data/openstreetmap/*.js',
'data/lang/*.qm'],
},
- data_files = data_files,
cmdclass = cmdclass,
command_options = command_options,
entry_points = {
diff --git a/src/photini/__init__.py b/src/photini/__init__.py
index <HASH>..<HASH> 100644
--- a/src/photini/__init__.py
+++ b/src/photini/__init__.py
@@ -1,4 +1,4 @@
from __future__ import unicode_literals
__version__ = '2020.10.0'
-build = '1504 (b85d5e6)'
+build = '1505 (9228a33)'
|
Add new setup.py command to install menu entries
This is currently only for Linux and doesn't work with 'python3 setup.py
install' - use 'pip3 install .' instead.
|
jim-easterbrook_Photini
|
train
|
08ca7b5f2ae451b0de956c7db25761375515905d
|
diff --git a/server/client.go b/server/client.go
index <HASH>..<HASH> 100644
--- a/server/client.go
+++ b/server/client.go
@@ -2185,6 +2185,11 @@ func (c *client) checkForImportServices(acc *Account, msg []byte) {
}
// FIXME(dlc) - Do L1 cache trick from above.
rr := rm.acc.sl.Match(rm.to)
+ // If we are a route or gateway and this message is flipped to a queue subscriber we
+ // need to handle that since the processMsgResults will want a queue filter.
+ if c.kind == ROUTER || c.kind == GATEWAY && c.pa.queues == nil && len(rr.qsubs) > 0 {
+ c.makeQFilter(rr.qsubs)
+ }
c.processMsgResults(rm.acc, rr, msg, []byte(rm.to), nrr, nil)
// If this is not a gateway connection but gateway is enabled,
// try to send this converted message to all gateways.
diff --git a/server/route.go b/server/route.go
index <HASH>..<HASH> 100644
--- a/server/route.go
+++ b/server/route.go
@@ -311,6 +311,18 @@ func (c *client) processInboundRoutedMsg(msg []byte) {
c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, nil)
}
+// Helper function for routes and gateways to create qfilters need for
+// converted subs from imports, etc.
+func (c *client) makeQFilter(qsubs [][]*subscription) {
+ qs := make([][]byte, 0, len(qsubs))
+ for _, qsub := range qsubs {
+ if len(qsub) > 0 {
+ qs = append(qs, qsub[0].queue)
+ }
+ }
+ c.pa.queues = qs
+}
+
// Lock should be held entering here.
func (c *client) sendConnect(tlsRequired bool) {
var user, pass string
diff --git a/test/new_routes_test.go b/test/new_routes_test.go
index <HASH>..<HASH> 100644
--- a/test/new_routes_test.go
+++ b/test/new_routes_test.go
@@ -1236,6 +1236,79 @@ func TestNewRouteServiceImport(t *testing.T) {
}
}
+func TestNewRouteServiceImportQueueGroups(t *testing.T) {
+ srvA, srvB, optsA, optsB := runServers(t)
+ defer srvA.Shutdown()
+ defer srvB.Shutdown()
+
+ // Do Accounts for the servers.
+ fooA, barA := registerAccounts(t, srvA)
+ fooB, barB := registerAccounts(t, srvB)
+
+ // Add export to both.
+ addServiceExport("test.request", isPublic, fooA, fooB)
+
+ // Add import abilities to server B's bar account from foo.
+ if err := barB.AddServiceImport(fooB, "foo.request", "test.request"); err != nil {
+ t.Fatalf("Error adding service import: %v", err)
+ }
+ // Do same on A.
+ if err := barA.AddServiceImport(fooA, "foo.request", "test.request"); err != nil {
+ t.Fatalf("Error adding service import: %v", err)
+ }
+
+ // clientA will be connected to srvA and be the service endpoint and responder.
+ clientA := createClientConn(t, optsA.Host, optsA.Port)
+ defer clientA.Close()
+
+ sendA, expectA := setupConnWithAccount(t, clientA, "$foo")
+ sendA("SUB test.request QGROUP 1\r\nPING\r\n")
+ expectA(pongRe)
+
+ // Now setup client B on srvB who will do a sub from account $bar
+ // that should map account $foo's foo subject.
+ clientB := createClientConn(t, optsB.Host, optsB.Port)
+ defer clientB.Close()
+
+ sendB, expectB := setupConnWithAccount(t, clientB, "$bar")
+ sendB("SUB reply QGROUP_TOO 1\r\nPING\r\n")
+ expectB(pongRe)
+
+ // Send the request from clientB on foo.request,
+ sendB("PUB foo.request reply 2\r\nhi\r\nPING\r\n")
+ expectB(pongRe)
+
+ expectMsgsA := expectMsgsCommand(t, expectA)
+ expectMsgsB := expectMsgsCommand(t, expectB)
+
+ // Expect the request on A
+ matches := expectMsgsA(1)
+ reply := string(matches[0][replyIndex])
+ checkMsg(t, matches[0], "test.request", "1", reply, "2", "hi")
+ if reply == "reply" {
+ t.Fatalf("Expected randomized reply, but got original")
+ }
+
+ sendA(fmt.Sprintf("PUB %s 2\r\nok\r\nPING\r\n", reply))
+ expectA(pongRe)
+
+ matches = expectMsgsB(1)
+ checkMsg(t, matches[0], "reply", "1", "", "2", "ok")
+
+ if ts := fooA.TotalSubs(); ts != 1 {
+ t.Fatalf("Expected one sub to be left on fooA, but got %d", ts)
+ }
+
+ routez, _ := srvA.Routez(&server.RoutezOptions{Subscriptions: true})
+ r := routez.Routes[0]
+ if r == nil {
+ t.Fatalf("Expected 1 route, got none")
+ }
+ if r.NumSubs != 1 {
+ t.Fatalf("Expected 1 sub in the route connection, got %v", r.NumSubs)
+ }
+}
+
func TestNewRouteServiceImportDanglingRemoteSubs(t *testing.T) {
srvA, srvB, optsA, optsB := runServers(t)
defer srvA.Shutdown()
|
ServiceImports and queue groups
|
nats-io_gnatsd
|
train
|
79579cb95cf0ec793569c201562d85ee73350dc8
|
diff --git a/django_uwsgi/views.py b/django_uwsgi/views.py
index <HASH>..<HASH> 100755
--- a/django_uwsgi/views.py
+++ b/django_uwsgi/views.py
@@ -5,7 +5,7 @@ from django.core.urlresolvers import reverse_lazy
from django.views.generic import View, TemplateView
from django.core.exceptions import PermissionDenied
-from django import apps
+from django.conf import settings
from . import uwsgi
@@ -13,7 +13,7 @@ class uWSGIStatus(TemplateView):
"""uWSGI Status View"""
- if apps.is_installed('wagtail.wagtailadmin'):
+ if ('wagtail.wagtailadmin' in settings.INSTALLED_APPS):
template_name = 'uwsgi/wagtail_uwsgi.html'
else:
template_name = 'uwsgi/uwsgi.html'
|
Another way to check if wagtail installed
|
unbit_django-uwsgi
|
train
|
6ec7c9d9a8a299343b665af6ef2a402a79eb8656
|
diff --git a/examples/bigdata-ping-pong/pong.php b/examples/bigdata-ping-pong/pong.php
index <HASH>..<HASH> 100644
--- a/examples/bigdata-ping-pong/pong.php
+++ b/examples/bigdata-ping-pong/pong.php
@@ -5,13 +5,14 @@ require dirname(dirname(__DIR__)) . '/vendor/autoload.php';
use React\EventLoop\Factory;
use React\Promise\Deferred;
use WyriHaximus\React\ChildProcess\Messenger\Messages\Payload;
+use WyriHaximus\React\ChildProcess\Messenger\Messenger;
$loop = Factory::create();
$recipient = \WyriHaximus\React\ChildProcess\Messenger\Factory::child($loop);
-$recipient->registerRpc('ping', function (Payload $payload, Deferred $deferred) use ($loop) {
- $deferred->resolve([
+$recipient->registerRpc('ping', function (Payload $payload) use ($loop) {
+ return \React\Promise\resolve([
'data' => $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'] . $payload['data'],
]);
});
diff --git a/examples/ping-pong/pong.php b/examples/ping-pong/pong.php
index <HASH>..<HASH> 100644
--- a/examples/ping-pong/pong.php
+++ b/examples/ping-pong/pong.php
@@ -22,9 +22,9 @@ $recipient->on('message', function (Payload $payload, Messenger $messenger) {
$messenger->getLoop()->stop();
});
});
-$recipient->registerRpc('ping', function (Payload $payload, Deferred $deferred) use ($loop) {
+$recipient->registerRpc('ping', function (Payload $payload) use ($loop) {
sleep(mt_rand(1, 5));
- $deferred->resolve([
+ return \React\Promise\resolve([
'result' => $payload['i'] * $payload['i'] * $payload['i'] * $payload['i'],
]);
});
|
Updated the examples to the return based promises
|
WyriHaximus_reactphp-child-process-pool
|
train
|
019c81f4723670396f7b8d1a4efb1d9cbe303a5a
|
diff --git a/app/models/bento_search/search_engine.rb b/app/models/bento_search/search_engine.rb
index <HASH>..<HASH> 100644
--- a/app/models/bento_search/search_engine.rb
+++ b/app/models/bento_search/search_engine.rb
@@ -76,7 +76,7 @@ module BentoSearch
def search(*arguments)
start_t = Time.now
- arguments = parse_search_arguments(*arguments)
+ arguments = normalized_search_arguments(*arguments)
results = search_implementation(arguments)
@@ -91,18 +91,9 @@ module BentoSearch
return results
end
- protected
-
- # Extend each result with each specified decorator module
- def decorate(results)
- results.each do |result|
- configuration.item_decorators.each do |decorator|
- result.extend decorator
- end
- end
- end
+
- def parse_search_arguments(*orig_arguments)
+ def normalized_search_arguments(*orig_arguments)
arguments = {}
# Two-arg style to one hash, if present
@@ -159,6 +150,19 @@ module BentoSearch
return arguments
end
+ alias_method :parse_search_arguments, :normalized_search_arguments
+
+ protected
+
+ # Extend each result with each specified decorator module
+ def decorate(results)
+ results.each do |result|
+ configuration.item_decorators.each do |decorator|
+ result.extend decorator
+ end
+ end
+ end
+
module ClassMethods
|
rename parse_search_arguments to normalized_search_arguments, with alias to old name. Make it public too, not worth it to keep em private, too convenient for testing.
|
jrochkind_bento_search
|
train
|
13384dd8a2c0e6db4c8c3123e914e8059b2f5d9e
|
diff --git a/lib/epub/parser/publication.rb b/lib/epub/parser/publication.rb
index <HASH>..<HASH> 100644
--- a/lib/epub/parser/publication.rb
+++ b/lib/epub/parser/publication.rb
@@ -1,7 +1,6 @@
require 'strscan'
require 'zipruby'
require 'nokogiri'
-require 'addressable/uri'
require 'epub/publication'
require 'epub/constants'
|
Dont't load Addressable::URI in parse. Should be loaded in model
|
KitaitiMakoto_epub-parser
|
train
|
9d07525a71e7bc12f606d8015d21425c5580e262
|
diff --git a/src/callbacks.js b/src/callbacks.js
index <HASH>..<HASH> 100644
--- a/src/callbacks.js
+++ b/src/callbacks.js
@@ -92,9 +92,10 @@ jQuery.Callbacks = function( options ) {
var start = list.length;
(function add( args ) {
jQuery.each( args, function( _, arg ) {
- if ( jQuery.isFunction( arg ) && ( !options.unique || !self.has( arg ) ) ) {
+ var type = jQuery.type( arg );
+ if ( type === "function" && ( !options.unique || !self.has( arg ) ) ) {
list.push( arg );
- } else if ( arg && arg.length ) {
+ } else if ( arg && arg.length && type !== "string" ) {
// Inspect recursively
add( arg );
}
diff --git a/test/unit/callbacks.js b/test/unit/callbacks.js
index <HASH>..<HASH> 100644
--- a/test/unit/callbacks.js
+++ b/test/unit/callbacks.js
@@ -250,3 +250,12 @@ test( "jQuery.Callbacks.remove - should remove all instances", function() {
ok( true, "end of test" );
}).remove( fn ).fire();
});
+
+test( "jQuery.Callbacks() - adding a string doesn't cause a stack overflow", function() {
+
+ expect( 1 );
+
+ jQuery.Callbacks().add( "hello world" );
+
+ ok( true, "no stack overflow" );
+});
|
Makes sure "adding" a string to a Callbacks object doesn't cause a stack overflow, just ignore the value like <I>.x righfully did. Fixes #<I>. Unit tests added.
|
jquery_jquery
|
train
|
f7a248b1e55cbaaf9e77973ab70c8504ec339039
|
diff --git a/nodeshot/models.py b/nodeshot/models.py
index <HASH>..<HASH> 100755
--- a/nodeshot/models.py
+++ b/nodeshot/models.py
@@ -451,7 +451,7 @@ class Contact(models.Model):
ip = models.GenericIPAddressField(verbose_name=_('ip address'))
user_agent = models.CharField(max_length=200, blank=True)
http_referer = models.CharField(max_length=200, blank=True)
- accept_language = models.CharField(max_length=30, blank=True)
+ accept_language = models.CharField(max_length=60, blank=True)
date = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
|
Found the bug. 'accept_language': [u'Ensure this value has at most <I> characters (it has <I>).'] . Close issue #<I>
|
ninuxorg_nodeshot
|
train
|
35b111083c9ec8d1b7536d26691d6d8d4eabe9b8
|
diff --git a/governor.py b/governor.py
index <HASH>..<HASH> 100755
--- a/governor.py
+++ b/governor.py
@@ -33,7 +33,7 @@ postgresql = Postgresql(config["postgresql"], aws_host_address)
ha = Ha(postgresql, etcd)
## Start the http_server to serve a simple healthcheck
-http_server = getHTTPServer(postgresql, http_port=8080, listen_address='0.0.0.0')
+http_server = getHTTPServer(postgresql, http_port=8008, listen_address='0.0.0.0')
http_thread = threading.Thread(target=http_server.serve_forever, args=())
http_thread.daemon = True
http_thread.start()
|
Change the governor health check default port to <I> in order to avoid a conflict with etcd proxy.
|
zalando_patroni
|
train
|
be6dcd6fea6cfb1a1508e173f969523df6fcb729
|
diff --git a/client/post-editor/controller.js b/client/post-editor/controller.js
index <HASH>..<HASH> 100644
--- a/client/post-editor/controller.js
+++ b/client/post-editor/controller.js
@@ -158,12 +158,27 @@ function startEditingPostCopy( siteId, postToCopyId, context ) {
} );
context.store.dispatch( editPost( siteId, null, reduxPostAttributes ) );
actions.edit( postAttributes );
- actions.updateMetadata(
- reduce( postToCopy.metadata, ( newMetadata, { key, value } ) => {
- newMetadata[ key ] = value;
- return newMetadata;
- }, {} )
- );
+
+ /**
+ * A post metadata whitelist for Flux's `updateMetadata()` action.
+ *
+ * This is needed because blindly passing all post metadata to `updateMetadata()`
+ * causes unforeseeable issues, such as Publicize not triggering on the copied post.
+ *
+ * @see https://github.com/Automattic/wp-calypso/issues/14840
+ */
+ const metadataWhitelist = [
+ 'geo_latitude',
+ 'geo_longitude',
+ ];
+
+ // Convert the metadata array into a metadata object, needed because `updateMetadata()` expects an object.
+ const metadata = reduce( postToCopy.metadata, ( newMetadata, { key, value } ) => {
+ newMetadata[ key ] = value;
+ return newMetadata;
+ }, {} );
+
+ actions.updateMetadata( pick( metadata, metadataWhitelist ) );
} ).catch( error => {
Dispatcher.handleServerAction( {
type: 'SET_POST_LOADING_ERROR',
|
Copy Post: Add a metadata whitelist (#<I>)
Fixes #<I>
Adds a metadata whitelist in order to accurately control which metadata are to be copied over with the Copy Post feature.
The whitelist currently only contains `geo_latitude` and `geo_longitude`, which can be safely copied.
Most notably excluded from the whitelist are the Publicize-related metadata, that caused the aforementioned issue.
|
Automattic_wp-calypso
|
train
|
f0300aaaf05b5fe9bec4a79ff111f9578c03e533
|
diff --git a/internal/driver/webui_test.go b/internal/driver/webui_test.go
index <HASH>..<HASH> 100644
--- a/internal/driver/webui_test.go
+++ b/internal/driver/webui_test.go
@@ -23,6 +23,7 @@ import (
"net/url"
"os/exec"
"regexp"
+ "sync"
"testing"
"github.com/google/pprof/internal/plugin"
@@ -101,6 +102,23 @@ func TestWebInterface(t *testing.T) {
}
}
+ // Also fetch all the test case URLs in parallel to test thread
+ // safety when run under the race detector.
+ var wg sync.WaitGroup
+ for _, c := range testcases {
+ if c.needDot && !haveDot {
+ continue
+ }
+ path := server.URL + c.path
+ for count := 0; count < 2; count++ {
+ wg.Add(1)
+ go func() {
+ http.Get(path)
+ wg.Done()
+ }()
+ }
+ }
+ wg.Wait()
}
// Implement fake object file support.
diff --git a/profile/profile.go b/profile/profile.go
index <HASH>..<HASH> 100644
--- a/profile/profile.go
+++ b/profile/profile.go
@@ -26,6 +26,7 @@ import (
"regexp"
"sort"
"strings"
+ "sync"
"time"
)
@@ -47,6 +48,10 @@ type Profile struct {
PeriodType *ValueType
Period int64
+ // The following fields are modified during encoding and copying,
+ // so are protected by a Mutex.
+ encodeMu sync.Mutex
+
commentX []int64
dropFramesX int64
keepFramesX int64
@@ -296,21 +301,25 @@ func (p *Profile) updateLocationMapping(from, to *Mapping) {
}
}
-// Write writes the profile as a gzip-compressed marshaled protobuf.
-func (p *Profile) Write(w io.Writer) error {
+func serialize(p *Profile) []byte {
+ p.encodeMu.Lock()
p.preEncode()
b := marshal(p)
+ p.encodeMu.Unlock()
+ return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
zw := gzip.NewWriter(w)
defer zw.Close()
- _, err := zw.Write(b)
+ _, err := zw.Write(serialize(p))
return err
}
// WriteUncompressed writes the profile as a marshaled protobuf.
func (p *Profile) WriteUncompressed(w io.Writer) error {
- p.preEncode()
- b := marshal(p)
- _, err := w.Write(b)
+ _, err := w.Write(serialize(p))
return err
}
@@ -605,11 +614,8 @@ func (m *Mapping) Unsymbolizable() bool {
// Copy makes a fully independent copy of a profile.
func (p *Profile) Copy() *Profile {
- p.preEncode()
- b := marshal(p)
-
pp := &Profile{}
- if err := unmarshal(b, pp); err != nil {
+ if err := unmarshal(serialize(p), pp); err != nil {
panic(err)
}
if err := pp.postDecode(); err != nil {
diff --git a/profile/profile_test.go b/profile/profile_test.go
index <HASH>..<HASH> 100644
--- a/profile/profile_test.go
+++ b/profile/profile_test.go
@@ -21,6 +21,7 @@ import (
"path/filepath"
"regexp"
"strings"
+ "sync"
"testing"
"github.com/google/pprof/internal/proftest"
@@ -694,3 +695,29 @@ func TestSetMain(t *testing.T) {
t.Errorf("got %s for main", testProfile1.Mapping[0].File)
}
}
+
+// parallel runs n copies of fn in parallel.
+func parallel(n int, fn func()) {
+ var wg sync.WaitGroup
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func() {
+ fn()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestThreadSafety(t *testing.T) {
+ src := testProfile1.Copy()
+ parallel(4, func() { src.Copy() })
+ parallel(4, func() {
+ var b bytes.Buffer
+ src.WriteUncompressed(&b)
+ })
+ parallel(4, func() {
+ var b bytes.Buffer
+ src.Write(&b)
+ })
+}
|
Make Profile copying and encoding thread-safe. (#<I>)
Added some multi-threaded tests that allow the race detector
to detect race conditions in pprof internals.
|
google_pprof
|
train
|
8d16c579af00788301fbfb7a855026b33ed92cc6
|
diff --git a/lib/__init__.py b/lib/__init__.py
index <HASH>..<HASH> 100644
--- a/lib/__init__.py
+++ b/lib/__init__.py
@@ -64,7 +64,7 @@ try:
except:
__svn_version__ = 'Unable to determine SVN revision'
-__version__ = '4.0.6dev11453'
+__version__ = '4.0.6dev11485'
# End Version Information ---------------------------------------------
# Pointer to the included Python class for WCS-based coordinate transformations
diff --git a/lib/wcs_functions.py b/lib/wcs_functions.py
index <HASH>..<HASH> 100644
--- a/lib/wcs_functions.py
+++ b/lib/wcs_functions.py
@@ -441,16 +441,21 @@ def make_outputwcs(imageObjectList,output,configObj=None):
keyname = 'driz_sep_'
for key in singleParDict:
k = key[len(keyname):]
- single_pars[WCS_USERPARS[k]] = singleParDict[key]
+ if k != 'refimage':
+ single_pars[k] = singleParDict[key]
#single_pars.update(singleParDict)
# Now, account for any user-specified reference image
if singleParDict[keyname+'refimage']:
+ def_wcs = default_wcs.deepcopy()
default_wcs = wcsutil.HSTWCS(singleParDict[keyname+'refimage'])
### Create single_wcs instance based on user parameters
outwcs.single_wcs = mergeWCS(default_wcs,single_pars)
-
+ # restore global default WCS to original value so single_drizzle WCS does not
+ # influence final_drizzle WCS
+ default_wcs = def_wcs.deepcopy()
+
final_step = configObj[util.getSectionName(configObj,7)]
finalParDict = configObj[util.getSectionName(configObj,'7a')].copy()
if final_step['driz_combine'] and finalParDict['final_wcs']:
@@ -458,8 +463,9 @@ def make_outputwcs(imageObjectList,output,configObj=None):
keyname = 'final_'
for key in finalParDict:
k = key[len(keyname):]
- final_pars[WCS_USERPARS[k]] = finalParDict[key]
- final_pars.update(finalParDict)
+ if k != 'refimage':
+ final_pars[k] = finalParDict[key]
+ #final_pars.update(finalParDict)
# Now, account for any user-specified reference image
if finalParDict[keyname+'refimage']:
default_wcs = wcsutil.HSTWCS(finalParDict[keyname+'refimage'])
diff --git a/src/cdrizzlebox.c b/src/cdrizzlebox.c
index <HASH>..<HASH> 100644
--- a/src/cdrizzlebox.c
+++ b/src/cdrizzlebox.c
@@ -711,7 +711,7 @@ do_kernel_gaussian(struct driz_param_t* p, const integer_t j,
ddx = xx - (double)ii;
/* Radial distance */
r2 = ddx*ddx + ddy*ddy;
-
+
/* Weight is a scaled Gaussian function of radial
distance */
dover = p->gaussian.es * exp(-r2 * p->gaussian.efac);
|
Problems with how the refimage and user-specified WCS parameters were applied to create the final output WCS were resolved in betadrizzle.wcs_functions. Potential conflicts between the user-specified WCS for the single drizzle step and the output WCS for the final drizzle step were also cleaned up. This allowed the betadrizzle results to exactly match the coordinate transformation results reported by WTRAXY (and betadrizzle.wcs_functions.WCSMap). WJH
git-svn-id: <URL>
|
spacetelescope_drizzlepac
|
train
|
d714bb9299a996d5b32bc26154e46c5dbfba67f4
|
diff --git a/wakatime/log.py b/wakatime/log.py
index <HASH>..<HASH> 100644
--- a/wakatime/log.py
+++ b/wakatime/log.py
@@ -38,15 +38,18 @@ class CustomEncoder(json.JSONEncoder):
class JsonFormatter(logging.Formatter):
def setup(self, timestamp, isWrite, targetFile, version, plugin):
+ encoding = sys.getfilesystemencoding()
+
self.timestamp = timestamp
self.isWrite = isWrite
- self.targetFile = targetFile
+ self.targetFile = targetFile.decode(encoding, 'ignore')
self.version = version
self.plugin = plugin
def format(self, record):
+ now = self.formatTime(record, self.datefmt).decode('utf-8', 'ignore')
data = OrderedDict([
- ('now', self.formatTime(record, self.datefmt)),
+ ('now', now),
('version', self.version),
('plugin', self.plugin),
('time', self.timestamp),
@@ -76,7 +79,7 @@ def setup_logging(args, version):
logger = logging.getLogger()
set_log_level(logger, args)
if len(logger.handlers) > 0:
- formatter = JsonFormatter(datefmt='%a %b %d %H:%M:%S %Z %Y')
+ formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
formatter.setup(
timestamp=args.timestamp,
isWrite=args.isWrite,
@@ -90,7 +93,7 @@ def setup_logging(args, version):
if not logfile:
logfile = '~/.wakatime.log'
handler = logging.FileHandler(os.path.expanduser(logfile))
- formatter = JsonFormatter(datefmt='%a %b %d %H:%M:%S %Z %Y')
+ formatter = JsonFormatter(datefmt='%Y/%m/%d %H:%M:%S %z')
formatter.setup(
timestamp=args.timestamp,
isWrite=args.isWrite,
|
fix character encoding problem with localized datetime in log
|
wakatime_wakatime
|
train
|
fa25d36a0b773f258068120581d536d4c06765c9
|
diff --git a/src/main/java/de/dimaki/refuel/appcast/boundary/AppcastManager.java b/src/main/java/de/dimaki/refuel/appcast/boundary/AppcastManager.java
index <HASH>..<HASH> 100644
--- a/src/main/java/de/dimaki/refuel/appcast/boundary/AppcastManager.java
+++ b/src/main/java/de/dimaki/refuel/appcast/boundary/AppcastManager.java
@@ -31,6 +31,15 @@ import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+import java.security.cert.X509Certificate;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSession;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Unmarshaller;
@@ -47,6 +56,10 @@ public class AppcastManager {
//Client client;
Unmarshaller unmarshaller;
+ // Trust all certs
+ boolean trustAllCerts = false;
+ // Verify Hostname
+ boolean verifyHostname = true;
public AppcastManager() throws JAXBException {
JAXBContext jc = JAXBContext.newInstance(Appcast.class);
@@ -85,6 +98,19 @@ public class AppcastManager {
}
conn.setConnectTimeout(connectTimeout);
conn.setReadTimeout(readTimeout);
+
+ // init SSL
+ if ((trustAllCerts || !verifyHostname) && conn instanceof HttpsURLConnection) {
+ HttpsURLConnection httpsConn = (HttpsURLConnection)conn;
+ if (trustAllCerts) {
+ SSLContext sslContext = createSslContext();
+ httpsConn.setSSLSocketFactory(sslContext.getSocketFactory());
+ }
+ if (!verifyHostname) {
+ httpsConn.setHostnameVerifier(new TrustAllHostnameVerifier());
+ }
+ conn = httpsConn;
+ }
conn.connect();
appcast = (Appcast)unmarshaller.unmarshal(conn.getInputStream());
} catch (JAXBException jbe) {
@@ -95,6 +121,8 @@ public class AppcastManager {
throw new AppcastException("Unknown Host", url, 404, uhe.getMessage());
} catch (IOException ex) {
throw new AppcastException("Could not establish connection to URL", url, 403, ex.getMessage());
+ } catch (GeneralSecurityException ex) {
+ throw new AppcastException("Could not initialize SSL context", url, 500, ex.getMessage());
}
// Got a valid response
return appcast;
@@ -183,4 +211,58 @@ public class AppcastManager {
return downloaded;
}
+
+ public boolean isTrustAllCerts() {
+ return trustAllCerts;
+ }
+
+ /**
+ * Set option to trust all SSL certificates
+ * @param trustAllCerts true to trust all SSL certificates, false otherwise (default)
+ */
+ public void setTrustAllCerts(boolean trustAllCerts) {
+ this.trustAllCerts = trustAllCerts;
+ }
+
+ public boolean isVerifyHostname() {
+ return verifyHostname;
+ }
+
+ /**
+ * Set option to verify hostname
+ * @param verifyHostname true to verify hostnames in SSL sessions (default), false to disable hostname verification
+ */
+ public void setVerifyHostname(boolean verifyHostname) {
+ this.verifyHostname = verifyHostname;
+ }
+
+ private SSLContext createSslContext() throws GeneralSecurityException {
+ SSLContext sslContext = SSLContext.getInstance("TLS");
+
+ TrustManager[] trustAll = new TrustManager[] {new X509TrustManager() {
+ @Override
+ public java.security.cert.X509Certificate[] getAcceptedIssuers() {
+ return null;
+ }
+ @Override
+ public void checkClientTrusted(X509Certificate[] certs, String authType) {
+ }
+ @Override
+ public void checkServerTrusted(X509Certificate[] certs, String authType) {
+ }
+ }
+ };
+ sslContext.init(null, trustAll, new SecureRandom());
+ return sslContext;
+ }
+
+ /**
+ * Inner class to trust all hostnames
+ */
+ public class TrustAllHostnameVerifier implements HostnameVerifier {
+ @Override
+ public boolean verify(String hostname, SSLSession session) {
+ return true;
+ }
+ }
}
diff --git a/src/test/java/de/dimaki/refuel/appcast/boundary/AppcastManagerTest.java b/src/test/java/de/dimaki/refuel/appcast/boundary/AppcastManagerTest.java
index <HASH>..<HASH> 100644
--- a/src/test/java/de/dimaki/refuel/appcast/boundary/AppcastManagerTest.java
+++ b/src/test/java/de/dimaki/refuel/appcast/boundary/AppcastManagerTest.java
@@ -20,6 +20,7 @@ import java.util.List;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Unmarshaller;
+import org.junit.Ignore;
import org.junit.Test;
import org.littleshoot.proxy.HttpProxyServer;
import org.littleshoot.proxy.impl.DefaultHttpProxyServer;
@@ -120,9 +121,10 @@ public class AppcastManagerTest {
}
@Test
+ @Ignore
public void testFetchError() {
try {
- manager.fetch(new URL("http://dummy.com"),
+ manager.fetch(new URL("http://httpstat.us/403"),
Proxy.NO_PROXY,
AppcastManager.DEFAULT_CONNECT_TIMEOUT,
AppcastManager.DEFAULT_READ_TIMEOUT);
|
Added SSL trust all certs feature
|
dimaki_refuel
|
train
|
78017ede35abae76bb3741dddd90844747f4316a
|
diff --git a/extension/protobuf/src/main/java/org/openbase/jul/extension/protobuf/processing/ProtoBufJSonProcessor.java b/extension/protobuf/src/main/java/org/openbase/jul/extension/protobuf/processing/ProtoBufJSonProcessor.java
index <HASH>..<HASH> 100644
--- a/extension/protobuf/src/main/java/org/openbase/jul/extension/protobuf/processing/ProtoBufJSonProcessor.java
+++ b/extension/protobuf/src/main/java/org/openbase/jul/extension/protobuf/processing/ProtoBufJSonProcessor.java
@@ -72,7 +72,7 @@ public class ProtoBufJSonProcessor {
throw new CouldNotPerformException("Could not serialize service argument to string!", ex);
}
} else {
- throw new InvalidStateException("Service attribute is not a protobuf message!");
+ throw new InvalidStateException("Service attribute Class["+serviceState.getClass().getSimpleName()+"] not a protobuf message!");
}
return jsonStringRep;
@@ -114,7 +114,7 @@ public class ProtoBufJSonProcessor {
}
/**
- * Deserialise a JSon string representation for an rst value given the class
+ * Deserialise a JSon string representation for a protobuf message given the class
* name for the value or the type if its a primitive.
*
* @param jsonStringRep the string representation of the rst value
@@ -126,11 +126,6 @@ public class ProtoBufJSonProcessor {
*/
public Message deserialize(String jsonStringRep, String serviceStateClassName) throws CouldNotPerformException {
try {
-
- if (!serviceStateClassName.startsWith("org.openbase.type")) {
- throw new NotSupportedException(serviceStateClassName, this, "Service arguments must be a protobuf message but detected type is ["+serviceStateClassName+"]!");
- }
-
try {
Class serviceStateClass = Class.forName(serviceStateClassName);
if (serviceStateClass.isEnum()) {
|
remove openbase type package filter to support any protobuf type deserialization.
|
openbase_jul
|
train
|
98613d63756c5420276b24e6e87c74054bbba700
|
diff --git a/lib/uv-rays/http/parser.rb b/lib/uv-rays/http/parser.rb
index <HASH>..<HASH> 100644
--- a/lib/uv-rays/http/parser.rb
+++ b/lib/uv-rays/http/parser.rb
@@ -95,6 +95,11 @@ module UV
# If chunked we'll buffer streaming data for notification
@chunked = data == 'chunked'
+ end
+
+ current = @headers[@header]
+ if current
+ @headers[@header] = "#{current}, #{data}"
else
@headers[@header] = data
end
@@ -141,7 +146,7 @@ module UV
def eof
return if @request.nil?
- if @headers_complete && @headers[:'Content-Length'].nil?
+ if @headers_complete && (@headers[:'Content-Length'].nil? || @request.method == :head)
on_message_complete(nil)
else
# Reject if this is a partial response
|
(HTTP res parser) fix head requests
Head requests now return a response - previously these were being
rejected as they set a content length then returned no content.
Also fixes header response where there are multiple values
|
cotag_uv-rays
|
train
|
fecb837888f3754a06ad2a33ddcf5ee561ccadaa
|
diff --git a/src/Node/Blockquote.php b/src/Node/Blockquote.php
index <HASH>..<HASH> 100644
--- a/src/Node/Blockquote.php
+++ b/src/Node/Blockquote.php
@@ -32,6 +32,13 @@ class Blockquote extends Block implements NodeInterface, NodeAcceptorInterface
return $paragraph;
}
+ public function acceptHeading(Heading $heading)
+ {
+ $this->addChild($heading);
+
+ return $this;
+ }
+
public function acceptBlockquote(Blockquote $blockquote)
{
$this->merge($blockquote);
|
Make sure headers can be children of blockquotes.
|
fluxbb_commonmark
|
train
|
c4cabdb76b6ab9ecc0244c48bf2c3c6c6646d225
|
diff --git a/go/libkb/test_common.go b/go/libkb/test_common.go
index <HASH>..<HASH> 100644
--- a/go/libkb/test_common.go
+++ b/go/libkb/test_common.go
@@ -20,6 +20,7 @@ import (
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol"
+ "github.com/keybase/gregor"
)
// TestConfig tracks libkb config during a test
@@ -250,6 +251,8 @@ func setupTestContext(tb testing.TB, name string, tcPrev *TestContext) (tc TestC
return
}
+ g.GregorDismisser = &FakeGregorDismisser{}
+
tc.PrevGlobal = G
G = g
tc.G = g
@@ -400,3 +403,14 @@ type TestLoginCancelUI struct {
func (t *TestLoginCancelUI) GetEmailOrUsername(_ context.Context, _ int) (string, error) {
return "", InputCanceledError{}
}
+
+type FakeGregorDismisser struct {
+ dismissedIDs []gregor.MsgID
+}
+
+var _ GregorDismisser = (*FakeGregorDismisser)(nil)
+
+func (f *FakeGregorDismisser) DismissItem(id gregor.MsgID) error {
+ f.dismissedIDs = append(f.dismissedIDs, id)
+ return nil
+}
|
define the FakeGregorDismisser for tests
|
keybase_client
|
train
|
d8c40fc298320cc46f18e9f512f47abcb1a98c0c
|
diff --git a/django_jenkins/tasks/lettuce_tests.py b/django_jenkins/tasks/lettuce_tests.py
index <HASH>..<HASH> 100644
--- a/django_jenkins/tasks/lettuce_tests.py
+++ b/django_jenkins/tasks/lettuce_tests.py
@@ -3,7 +3,7 @@ import os
from optparse import make_option
from django.conf import settings
from django_jenkins.tasks import BaseTask
-from unittest import TestCase
+from django.test import LiveServerTestCase
from lettuce.django import harvest_lettuces
from lettuce import Runner
from lettuce import registry
@@ -52,7 +52,7 @@ class Task(BaseTask):
return suite
-class LettuceTestCase(TestCase):
+class LettuceTestCase(LiveServerTestCase):
def __init__(self, runner, app_module, *args, **kwargs):
super(LettuceTestCase, self).__init__(*args, **kwargs)
self.runner = runner
|
switch from TestCase to LiveServerTestCase in lettuce_tests.py to better support liveserver option
|
kmmbvnr_django-jenkins
|
train
|
d654dbafac6ccfde77ba9611c7c36e09d6c4ca73
|
diff --git a/metrics/cgroups/v1/metric.go b/metrics/cgroups/v1/metric.go
index <HASH>..<HASH> 100644
--- a/metrics/cgroups/v1/metric.go
+++ b/metrics/cgroups/v1/metric.go
@@ -24,6 +24,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+// IDName is the name that is used to identify the id being collected in the metric
+var IDName = "container_id"
+
type value struct {
v float64
l []string
@@ -41,7 +44,7 @@ type metric struct {
func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
// the namespace label is for containerd namespaces
- return ns.NewDesc(m.name, m.help, m.unit, append([]string{"container_id", "namespace"}, m.labels...)...)
+ return ns.NewDesc(m.name, m.help, m.unit, append([]string{IDName, "namespace"}, m.labels...)...)
}
func (m *metric) collect(id, namespace string, stats *v1.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) {
diff --git a/metrics/cgroups/v2/metric.go b/metrics/cgroups/v2/metric.go
index <HASH>..<HASH> 100644
--- a/metrics/cgroups/v2/metric.go
+++ b/metrics/cgroups/v2/metric.go
@@ -24,6 +24,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+// IDName is the name that is used to identify the id being collected in the metric
+var IDName = "container_id"
+
type value struct {
v float64
l []string
@@ -41,7 +44,7 @@ type metric struct {
func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
// the namespace label is for containerd namespaces
- return ns.NewDesc(m.name, m.help, m.unit, append([]string{"container_id", "namespace"}, m.labels...)...)
+ return ns.NewDesc(m.name, m.help, m.unit, append([]string{IDName, "namespace"}, m.labels...)...)
}
func (m *metric) collect(id, namespace string, stats *v2.Metrics, ns *metrics.Namespace, ch chan<- prometheus.Metric, block bool) {
|
Allow the id for cgroup metrics to be changed
This makes the metrics package more extensible by allowing the default name of
`container_id` to be changed by the package caller.
|
containerd_containerd
|
train
|
b362d2a0b7072eef47a17e854b89617a13f06b81
|
diff --git a/doc/bridge_connection.md b/doc/bridge_connection.md
index <HASH>..<HASH> 100644
--- a/doc/bridge_connection.md
+++ b/doc/bridge_connection.md
@@ -168,4 +168,14 @@ $httpServletRequest = $context->getHttpServletRequest();
// @see http://docs.soluble.io/php-java-bridge/api/index.html?io/soluble/pjb/servlet/PhpJavaServlet.html
$servlet = $context->getServlet();
+
+// $servletContext on Tomcat would be
+// JavaObject: org.apache.catalina.core.ApplicationContextFacade
+$servletContext = $context->getServletContext();
+
+// $servletConfig on Tomcat would be
+// JavaObject: 'org.apache.catalina.core.StandardWrapperFacade
+$servletConfig = $context->getServlet()->getServletConfig();
+
+
```
\ No newline at end of file
diff --git a/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextServletTest.php b/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextServletTest.php
index <HASH>..<HASH> 100644
--- a/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextServletTest.php
+++ b/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextServletTest.php
@@ -66,6 +66,7 @@ class DriverContextServletTest extends \PHPUnit_Framework_TestCase
throw $e;
}
}
+
$this->assertInstanceOf(JavaObject::class, $servletContext);
$className = $this->driver->getClassName($servletContext);
@@ -93,16 +94,38 @@ class DriverContextServletTest extends \PHPUnit_Framework_TestCase
$servletConfig = $servletContext->getServletConfig();
$this->assertInstanceOf(JavaObject::class, $servletConfig);
- /*
- foreach($servletConfig as $cfg) {
-
- }*/
-
// on Tomcat could be : org.apache.catalina.core.StandardWrapperFacade
//$this->assertEquals('org.apache.catalina.core.StandardWrapperFacade', $this->driver->getClassName($servletConfig));
+ $servletContext = $context->getServletContext();
+
$paramNames = $servletContext->getInitParameterNames();
//echo $this->driver->getClassName($paramNames);
$this->assertInstanceOf(JavaObject::class, $paramNames);
}
+
+ public function testGetServletOnTomcat()
+ {
+ $context = $this->driver->getContext();
+ try {
+ $servletContext = $context->getServlet();
+ } catch (JavaException $e) {
+ $msg = $e->getMessage();
+ if ($e->getJavaClassName() == 'java.lang.IllegalStateException' &&
+ preg_match('/PHP not running in a servlet environment/', $msg)) {
+ // Basically mark this test as skipped as the test
+ // was made on the standalone server
+ $this->markTestIncomplete('Retrieval of servlet context is not supported with the standalone server');
+
+ return;
+ } else {
+ throw $e;
+ }
+ }
+
+ $servletConfig = $servletContext->getServletConfig();
+ $this->assertEquals('org.apache.catalina.core.StandardWrapperFacade', $this->driver->getClassName($servletConfig));
+
+ $this->assertEquals('org.apache.catalina.core.ApplicationContextFacade', $this->driver->getClassName($context->getServletContext()));
+ }
}
diff --git a/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextTest.php b/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextTest.php
index <HASH>..<HASH> 100644
--- a/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextTest.php
+++ b/test/src/SolubleTest/Japha/Bridge/Driver/DriverContextTest.php
@@ -118,6 +118,9 @@ class DriverContextTest extends \PHPUnit_Framework_TestCase
'io.soluble.pjb.servlet.RemoteHttpServletResponse',
'php.java.servlet.RemoteServletResponse'
]);
+
+ // @todo future work on session (already committed)
+ //var_dump($context->getAttribute('name'));
}
}
}
|
Prep <I>, more docs
|
belgattitude_soluble-japha
|
train
|
67549727e4294be32b40f807e4f4c86218818a48
|
diff --git a/AlphaTwirl/Events/BEvents.py b/AlphaTwirl/Events/BEvents.py
index <HASH>..<HASH> 100755
--- a/AlphaTwirl/Events/BEvents.py
+++ b/AlphaTwirl/Events/BEvents.py
@@ -2,9 +2,11 @@
from Branch import Branch
from Events import Events
from BranchAddressManager import BranchAddressManager
+from BranchAddressManagerForVector import BranchAddressManagerForVector
##____________________________________________________________________________||
branchAddressManager = BranchAddressManager()
+branchAddressManagerForVector = BranchAddressManagerForVector()
##____________________________________________________________________________||
class BEvents(Events):
@@ -15,10 +17,21 @@ class BEvents(Events):
def __getattr__(self, name):
if name in self.branches: return self.branches[name]
- itsArray, itsCountArray = branchAddressManager.getArrays(self.tree, name)
- if itsArray is None: raise AttributeError("'" + str(self) + "' has no attribute '" + name + "'")
- self.branches[name] = Branch(name, itsArray, itsCountArray)
+ branch = self._buildBranch(self.tree, name)
+ if branch is None: raise AttributeError("'" + str(self) + "' has no attribute '" + name + "'")
+ self.branches[name] = branch
if self.iEvent >= 0: self.tree.GetEntry(self.iEvent)
return self.branches[name]
+ def _buildBranch(self, tree, name):
+ itsArray, itsCountArray = branchAddressManager.getArrays(tree, name)
+ if itsArray is not None:
+ branch = Branch(name, itsArray, itsCountArray)
+ return branch
+ itsVector = branchAddressManagerForVector.getVector(tree, name)
+ if itsVector is not None:
+ return itsVector # this can be used at a branch
+ return None
+
+
##____________________________________________________________________________||
|
use BranchAddressManagerForVector in BEvents
|
alphatwirl_alphatwirl
|
train
|
619c077eed546d866f3da1c2e4653f45049c3ba5
|
diff --git a/logging/logging.go b/logging/logging.go
index <HASH>..<HASH> 100644
--- a/logging/logging.go
+++ b/logging/logging.go
@@ -82,11 +82,12 @@ func (rl *RemoteLog) CatCmd() string {
func (rl *RemoteLog) Open() (reader io.ReadCloser, e error) {
c := rl.Command()
var cmd *exec.Cmd
- if rl.User == "" {
- rl.User = "root"
+ user := rl.User
+ if user == "" {
+ user = "root"
}
if rl.Host != "" {
- cmd = exec.Command("ssh", "-t", "-l", rl.User, rl.Host, c)
+ cmd = exec.Command("ssh", "-t", "-l", user, rl.Host, c)
} else {
cmd = exec.Command("bash", "-c", c)
}
|
keep original user (in case we need it somewhere else)
|
dynport_dgtk
|
train
|
305bfe0a393686e770c5e82cdb5f75e0e438d9af
|
diff --git a/TYPO3.Media/Migrations/Mysql/Version20151216144408.php b/TYPO3.Media/Migrations/Mysql/Version20151216144408.php
index <HASH>..<HASH> 100644
--- a/TYPO3.Media/Migrations/Mysql/Version20151216144408.php
+++ b/TYPO3.Media/Migrations/Mysql/Version20151216144408.php
@@ -17,6 +17,7 @@ class Version20151216144408 extends AbstractMigration
{
$this->abortIf($this->connection->getDatabasePlatform()->getName() != "mysql");
+ $this->addSql("TRUNCATE TABLE typo3_media_domain_model_thumbnail");
$this->addSql("DROP INDEX originalasset_configurationhash ON typo3_media_domain_model_thumbnail");
$this->addSql("CREATE UNIQUE INDEX originalasset_configurationhash ON typo3_media_domain_model_thumbnail (originalasset, configurationhash)");
}
diff --git a/TYPO3.Media/Migrations/Postgresql/Version20151216144435.php b/TYPO3.Media/Migrations/Postgresql/Version20151216144435.php
index <HASH>..<HASH> 100644
--- a/TYPO3.Media/Migrations/Postgresql/Version20151216144435.php
+++ b/TYPO3.Media/Migrations/Postgresql/Version20151216144435.php
@@ -17,6 +17,7 @@ class Version20151216144435 extends AbstractMigration
{
$this->abortIf($this->connection->getDatabasePlatform()->getName() != "postgresql");
+ $this->addSql("TRUNCATE TABLE typo3_media_domain_model_thumbnail");
$this->addSql("DROP INDEX originalasset_configurationhash");
$this->addSql("CREATE UNIQUE INDEX originalasset_configurationhash ON typo3_media_domain_model_thumbnail (originalasset, configurationhash)");
}
|
[BUGFIX] Thumbnail migration should clear thumbnails
To avoid unique constraint errors on applying the new index the
thumbnail table should be cleared beforehand.
Any left over resources can be cleared by executing::
./flow resource:clean
|
neos_neos-development-collection
|
train
|
b4db87e7771c920d87925f7c42305a575191e879
|
diff --git a/src/index.js b/src/index.js
index <HASH>..<HASH> 100644
--- a/src/index.js
+++ b/src/index.js
@@ -85,5 +85,11 @@ export default function funcHasParam(contents, functionName, paramName, opts) {
return false;
}
- return params.includes(paramName);
+ for (i = 0; i < params.length; i++) {
+ if (params[i] === paramName) {
+ return true;
+ }
+ }
+
+ return false;
}
|
fix(index) remove Array#includes usage
includes isn't on Array.prototype. Not sure how tests were passing.
Packages depending on func-has-param break at Array#includes, so
remove it. Wasn't able to find a Babel stage that supported it.
Consider making a ponyfill for Array#includes.
|
dustinspecker_func-has-param
|
train
|
9eac6543f4ed34df0e756976761d825879322b77
|
diff --git a/lib/puppet/indirector/rest.rb b/lib/puppet/indirector/rest.rb
index <HASH>..<HASH> 100644
--- a/lib/puppet/indirector/rest.rb
+++ b/lib/puppet/indirector/rest.rb
@@ -1,6 +1,7 @@
require 'net/http'
require 'uri'
require 'json'
+require 'semantic_puppet'
require 'puppet/network/http'
require 'puppet/network/http_pool'
@@ -249,7 +250,21 @@ class Puppet::Indirector::REST < Puppet::Indirector::Terminus
# to request.do_request from here, thus if we change what we pass or how we
# get it, we only need to change it here.
def do_request(request)
- request.do_request(self.class.srv_service, self.class.server, self.class.port) { |req| yield(req) }
+ response = request.do_request(self.class.srv_service, self.class.server, self.class.port) { |req| yield(req) }
+
+ handle_response(request, response) if response
+
+ response
+ end
+
+ def handle_response(request, response)
+ server_version = response[Puppet::Network::HTTP::HEADER_PUPPET_VERSION]
+ if server_version &&
+ SemanticPuppet::Version.parse(server_version).major < 5 &&
+ Puppet[:preferred_serialization_format] != 'pson'
+ Puppet.warning("Downgrading to PSON for future requests")
+ Puppet[:preferred_serialization_format] = 'pson'
+ end
end
def validate_key(request)
diff --git a/spec/unit/indirector/rest_spec.rb b/spec/unit/indirector/rest_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/unit/indirector/rest_spec.rb
+++ b/spec/unit/indirector/rest_spec.rb
@@ -11,6 +11,43 @@ HTTP_ERROR_CODES = [300, 400, 500]
# Just one from each category since the code makes no real distinctions
shared_examples_for "a REST terminus method" do |terminus_method|
+ describe "when the server's major version is less than 5" do
+ let(:response) do
+ mock_response(200, 'OK')
+ end
+
+ it "falls back to pson for future requests" do
+ response.stubs(:[]).with(Puppet::Network::HTTP::HEADER_PUPPET_VERSION).returns("4.10.1")
+ terminus.send(terminus_method, request)
+
+ expect(Puppet[:preferred_serialization_format]).to eq("pson")
+ end
+
+ it "doesn't change the serialization format if the X-Puppet-Version header is missing" do
+ response.stubs(:[]).with(Puppet::Network::HTTP::HEADER_PUPPET_VERSION).returns(nil)
+
+ terminus.send(terminus_method, request)
+
+ expect(Puppet[:preferred_serialization_format]).to eq("json")
+ end
+
+ it "doesn't change the serialization format if the server major version is 5" do
+ response.stubs(:[]).with(Puppet::Network::HTTP::HEADER_PUPPET_VERSION).returns("5.0.3")
+
+ terminus.send(terminus_method, request)
+
+ expect(Puppet[:preferred_serialization_format]).to eq("json")
+ end
+
+ it "doesn't change the serialization format if the current format is already pson" do
+ response.stubs(:[]).with(Puppet::Network::HTTP::HEADER_PUPPET_VERSION).returns("4.10.1")
+ Puppet[:preferred_serialization_format] = "pson"
+ terminus.send(terminus_method, request)
+
+ expect(Puppet[:preferred_serialization_format]).to eq("pson")
+ end
+ end
+
HTTP_ERROR_CODES.each do |code|
describe "when the response code is #{code}" do
let(:message) { 'error messaged!!!' }
|
(PUP-<I>) Downgrade to pson for older servers
If we make a REST request, and the server's major version, as indicated
by the X-PUPPET-VERSION header, is less than 5, then fallback to pson
for future requests, and emit a warning:
Warning: Downgrading to PSON for future requests
Because we downgrade the preferred serialization format, we can
successfully send the report later.
Do nothing if we're already configured to use pson or if the header is
missing.
|
puppetlabs_puppet
|
train
|
2a1f0ff893f61c8e717cfe0b1dd7d2910b440b54
|
diff --git a/events.go b/events.go
index <HASH>..<HASH> 100644
--- a/events.go
+++ b/events.go
@@ -16,6 +16,7 @@ var eventToInterface = map[string]interface{}{
"CHANNEL_CREATE": ChannelCreate{},
"CHANNEL_UPDATE": ChannelUpdate{},
"CHANNEL_DELETE": ChannelDelete{},
+ "CHANNEL_PINS_UPDATE": ChannelPinsUpdate{},
"GUILD_CREATE": GuildCreate{},
"GUILD_UPDATE": GuildUpdate{},
"GUILD_DELETE": GuildDelete{},
diff --git a/structs.go b/structs.go
index <HASH>..<HASH> 100644
--- a/structs.go
+++ b/structs.go
@@ -464,6 +464,12 @@ type UserGuildSettingsEdit struct {
ChannelOverrides map[string]*UserGuildSettingsChannelOverride `json:"channel_overrides"`
}
+// ChannelPinsUpdate stores data for the channel pins update event
+type ChannelPinsUpdate struct {
+ LastPinTimestamp string `json:"last_pin_timestamp"`
+ ChannelID string `json:"channel_id"`
+}
+
// Constants for the different bit offsets of text channel permissions
const (
PermissionReadMessages = 1 << (iota + 10)
|
Added the CHANNEL_PINS_UPDATE event
|
bwmarrin_discordgo
|
train
|
3138638c50bec03df19cbb036f067dac60d3042d
|
diff --git a/assets/query-monitor.js b/assets/query-monitor.js
index <HASH>..<HASH> 100644
--- a/assets/query-monitor.js
+++ b/assets/query-monitor.js
@@ -118,9 +118,12 @@ jQuery( function($) {
hilite = $(this).attr('data-highlight'),
time = 0;
+ if ( hilite ) {
+ table.find('tr').removeClass('qm-highlight');
+ }
+
if ( $(this).val() !== '' ) {
if ( hilite ) {
- tr.removeClass('qm-highlight');
tr.filter('[data-qm-'+hilite+'*="' + val + '"]').addClass('qm-highlight');
}
tr.not('[data-qm-' + filter + '*="' + val + '"]').addClass('qm-hide-' + filter);
|
Improve highlighting removal when altering filters.
|
johnbillion_query-monitor
|
train
|
6d71384afb7a91429637c0267056ac7a9ccb7983
|
diff --git a/actionview/lib/action_view/helpers/form_tag_helper.rb b/actionview/lib/action_view/helpers/form_tag_helper.rb
index <HASH>..<HASH> 100644
--- a/actionview/lib/action_view/helpers/form_tag_helper.rb
+++ b/actionview/lib/action_view/helpers/form_tag_helper.rb
@@ -735,12 +735,40 @@ module ActionView
# * <tt>:max</tt> - The maximum acceptable value.
# * <tt>:in</tt> - A range specifying the <tt>:min</tt> and
# <tt>:max</tt> values.
+ # * <tt>:within</tt> - Same as <tt>:in</tt>.
# * <tt>:step</tt> - The acceptable value granularity.
# * Otherwise accepts the same options as text_field_tag.
#
# ==== Examples
+ # number_field_tag 'quantity'
+ # # => <input id="quantity" name="quantity" type="number" />
+ #
+ # number_field_tag 'quantity', '1'
+ # # => <input id="quantity" name="quantity" type="number" value="1" />
+ #
+ # number_field_tag 'quantity', nil, class: 'special_input'
+ # # => <input class="special_input" id="quantity" name="quantity" type="number" />
+ #
+ # number_field_tag 'quantity', nil, min: 1
+ # # => <input id="quantity" name="quantity" min="1" type="number" />
+ #
+ # number_field_tag 'quantity', nil, max: 9
+ # # => <input id="quantity" name="quantity" max="9" type="number" />
+ #
# number_field_tag 'quantity', nil, in: 1...10
# # => <input id="quantity" name="quantity" min="1" max="9" type="number" />
+ #
+ # number_field_tag 'quantity', nil, within: 1...10
+ # # => <input id="quantity" name="quantity" min="1" max="9" type="number" />
+ #
+ # number_field_tag 'quantity', nil, min: 1, max: 10
+ # # => <input id="quantity" name="quantity" min="1" max="9" type="number" />
+ #
+ # number_field_tag 'quantity', nil, min: 1, max: 10, step: 2
+ # # => <input id="quantity" name="quantity" min="1" max="9" step="2" type="number" />
+ #
+ # number_field_tag 'quantity', '1', class: 'special_input', disabled: true
+ # # => <input disabled="disabled" class="special_input" id="quantity" name="quantity" type="number" value="1" />
def number_field_tag(name, value = nil, options = {})
options = options.stringify_keys
options["type"] ||= "number"
|
[ci skip] Added example for number_field_tag method
|
rails_rails
|
train
|
b7237e248ebc27a9f55f822eadc49cf2e32220d4
|
diff --git a/pyfas/ppl.py b/pyfas/ppl.py
index <HASH>..<HASH> 100644
--- a/pyfas/ppl.py
+++ b/pyfas/ppl.py
@@ -92,7 +92,7 @@ class Ppl:
except ValueError:
pass
raw_geometry.extend(points)
- if 'CATALOG' in line or 'BRANCH' in line:
+ if ('CATALOG' in line) or ('BRANCH' in line) or ('ANNULUS' in line):
break
xy_geo = raw_geometry
self.geometries[branch] = (xy_geo[:int(len(xy_geo)/2)],
|
Additional criteria to stop reading geom. in ppl
Fixing a bug I found when there was an ANNULUS geometry following the BRANCH geometry, failing to fall into the break condition, appending extra incorrect values to raw_geometry.
|
gpagliuca_pyfas
|
train
|
4fdbfb3f5d4353cfdb7a0d115d8ac63938fc8b3f
|
diff --git a/lib/jars/maven_exec.rb b/lib/jars/maven_exec.rb
index <HASH>..<HASH> 100644
--- a/lib/jars/maven_exec.rb
+++ b/lib/jars/maven_exec.rb
@@ -93,13 +93,20 @@ module Jars
args << '--quiet'
end
- if Jars.maven_user_settings.nil? && (proxy = Gem.configuration[ :proxy ]).is_a?( String )
+ # TODO what todo with https proxy ?
+ # FIX this proxy settings seems not to work
+ if (proxy = Gem.configuration[ :http_proxy ]).is_a?( String )
require 'uri'; uri = URI.parse( proxy )
args << "-DproxySet=true"
args << "-DproxyHost=#{uri.host}"
args << "-DproxyPort=#{uri.port}"
end
+ if Jars.maven_settings
+ args << '-s'
+ args << Jars.maven_settings
+ end
+
args << "-Dmaven.repo.local=#{java.io.File.new( Jars.local_maven_repo ).absolute_path}"
args
|
set maven settings in case it exists
|
mkristian_jar-dependencies
|
train
|
39ccf3d3da9a68136fa27715e7d33cd7f82c1b5f
|
diff --git a/lib/prey/utils/managed_cache.js b/lib/prey/utils/managed_cache.js
index <HASH>..<HASH> 100644
--- a/lib/prey/utils/managed_cache.js
+++ b/lib/prey/utils/managed_cache.js
@@ -122,8 +122,8 @@ var ManagedCache = function() {
Calls back a value.
*/
this.value = function(id,force,callback) {
- if(typeof callback === 'undefined') {
- if (typeof force === 'function') {
+ if(callback === 'undefined') {
+ if (force === 'function') {
callback = force;
force = false;
} else throw new Error("ManagedCache.value: No callback supplied");
@@ -138,8 +138,8 @@ var ManagedCache = function() {
Calls back an entry {time:time,value:value}
*/
this.entry = function(id,force,callback) {
- if(typeof callback === 'undefined') {
- if (typeof force === 'function') {
+ if(callback === 'undefined') {
+ if (force === 'function') {
callback = force;
force = false;
} else throw new Error("ManagedCache.entry: No callback supplied");
@@ -168,6 +168,7 @@ var ManagedCache = function() {
el.freshen = function(cb) { cb(false); };
if (el.freshenIntervalID !== -1) {
clearInterval(el.freshenIntervalID);
+ el.freshenIntervalID = -1;
}
};
@@ -202,7 +203,7 @@ module.exports.stats = function(cache) {
keys:cache.keys(),
hitsById:hitsById,
freshById:freshById
- }
+ };
};
};
|
remove typeof in favor of === (suggested by jslint)
|
prey_prey-node-client
|
train
|
dac0d7150fcc29d48a620681818b803c277f5e3d
|
diff --git a/lib/holepicker/online_database.rb b/lib/holepicker/online_database.rb
index <HASH>..<HASH> 100644
--- a/lib/holepicker/online_database.rb
+++ b/lib/holepicker/online_database.rb
@@ -1,14 +1,13 @@
require 'holepicker/database'
require 'holepicker/logger'
require 'holepicker/utils'
-require 'net/http'
-require 'net/https'
+require 'open-uri'
module HolePicker
class OnlineDatabase < Database
include HasLogger
- URL = 'https://raw.github.com/jsuder/holepicker/master/lib/holepicker/data/data.json'
+ URL = 'https://raw.githubusercontent.com/jsuder/holepicker/master/lib/holepicker/data/data.json'
def self.load
logger.info "Fetching list of vulnerabilities..."
@@ -32,12 +31,7 @@ module HolePicker
private
def self.http_get(url)
- uri = URI(url)
- http = Net::HTTP.new(uri.host, uri.port)
- http.use_ssl = url.start_with?('https')
-
- response = http.get(uri.request_uri)
- response.body
+ open(url).read
end
def check_compatibility
|
fix: new domain for github raw data + follow redirect to avoid further issues
|
mackuba_holepicker
|
train
|
dcbe8d66af81426652973ccf8788ae0b008dda08
|
diff --git a/text_formatter.go b/text_formatter.go
index <HASH>..<HASH> 100644
--- a/text_formatter.go
+++ b/text_formatter.go
@@ -69,7 +69,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
}
b.WriteByte('\n')
- return b.Bytes(), nil
+ return b.Bytes()[1:], nil
}
func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
@@ -85,7 +85,7 @@ func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
levelText := strings.ToUpper(entry.Level.String())[0:4]
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m[%04d] %-44s", levelColor, levelText, miniTS(), entry.Message)
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
@@ -111,12 +111,12 @@ func printKeyValue(b *bytes.Buffer, key, value interface{}) {
case error:
value = value.(error).Error()
default:
- fmt.Fprintf(b, "%v=%v ", key, value)
+ fmt.Fprintf(b, " %v=%v", key, value)
}
if needsQuoting(value.(string)) {
- fmt.Fprintf(b, "%v=%s ", key, value)
+ fmt.Fprintf(b, " %v=%s", key, value)
} else {
- fmt.Fprintf(b, "%v=%q ", key, value)
+ fmt.Fprintf(b, " %v=%q", key, value)
}
}
diff --git a/text_formatter_test.go b/text_formatter_test.go
index <HASH>..<HASH> 100644
--- a/text_formatter_test.go
+++ b/text_formatter_test.go
@@ -31,3 +31,30 @@ func TestQuoting(t *testing.T) {
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}
+
+func TestTextPrint(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+ byts, _ := tf.Format(&Entry{Message: "msg content"})
+
+ // make sure no leading or trailing spaces
+ if string(byts) !=
+ "time=\"0001-01-01T00:00:00Z\" level=panic msg=\"msg content\"\n" {
+ t.Errorf("not expected: %q", string(byts))
+ }
+}
+
+func TestColorPrint(t *testing.T) {
+ tf := &TextFormatter{ForceColors: true}
+ entry := WithField("testkey", "value")
+ entry.Message = "msg content"
+ byts, _ := tf.Format(entry)
+
+ // make sure no leading or trailing spaces
+ if string(byts) !=
+ "\x1b[31mPANI\x1b[0m[0000] " +
+ // length 44 plus one space
+ "msg content " +
+ "\x1b[31mtestkey\x1b[0m=value\n" {
+ t.Errorf("not expected: %q", string(byts))
+ }
+}
|
make sure no leading or trailing spaces
This changed printColored and printKeyValue to print in same way
with prefix space instead of trailing space, to make it easier
to slice out when returning in Format;
The test cases are to make sure msg formartting doesn't include
leading or trailing spaces;
Closes #<I>
|
sirupsen_logrus
|
train
|
61c697c50941f6d66ed42399eb9471c4e8c723f0
|
diff --git a/oceandb_elasticsearch_driver/mapping.py b/oceandb_elasticsearch_driver/mapping.py
index <HASH>..<HASH> 100644
--- a/oceandb_elasticsearch_driver/mapping.py
+++ b/oceandb_elasticsearch_driver/mapping.py
@@ -2,7 +2,18 @@
# SPDX-License-Identifier: Apache-2.0
mapping = '''
-{"mappings": {
+{"settings": {
+ "analysis": {
+ "normalizer": {
+ "ocean_normalizer": {
+ "type": "custom",
+ "char_filter": [],
+ "filter": ["lowercase", "asciifolding"]
+ }
+ }
+ }
+ },
+ "mappings": {
"_doc": {
"properties": {
"@context": {
@@ -240,7 +251,8 @@ mapping = '''
"fields": {
"keyword": {
"type": "keyword",
- "ignore_above": 256
+ "ignore_above": 256,
+ "normalizer": "ocean_normalizer"
}
}
},
@@ -249,7 +261,8 @@ mapping = '''
"fields": {
"keyword": {
"type": "keyword",
- "ignore_above": 256
+ "ignore_above": 256,
+ "normalizer": "ocean_normalizer"
}
}
},
@@ -267,7 +280,8 @@ mapping = '''
"fields": {
"keyword": {
"type": "keyword",
- "ignore_above": 256
+ "ignore_above": 256,
+ "normalizer": "ocean_normalizer"
}
}
},
@@ -377,7 +391,8 @@ mapping = '''
"fields": {
"keyword": {
"type": "keyword",
- "ignore_above": 256
+ "ignore_above": 256,
+ "normalizer": "ocean_normalizer"
}
}
},
|
move some indexes to case insensitive
|
oceanprotocol_oceandb-elasticsearch-driver
|
train
|
75a1f88d319c393355fd361ac9b5dcc062094a77
|
diff --git a/test/connection-meta.test.js b/test/connection-meta.test.js
index <HASH>..<HASH> 100644
--- a/test/connection-meta.test.js
+++ b/test/connection-meta.test.js
@@ -169,7 +169,7 @@ describe("connection-meta", function() {
describe("get deleted account", function () {
it("should return deleted account object", function (done) {
var end = new Date();
- var start = new Date(end.getTime() - 2 * 24 * 60 * 60 * 1000); // 2 days before
+ var start = new Date(end.getTime() - 1 * 24 * 60 * 60 * 1000); // 1 day before
conn.sobject('Account').deleted(start, end, function (err, result) {
if (err) { throw err; }
assert.ok(_.isArray(result.deletedRecords));
@@ -183,7 +183,7 @@ describe("connection-meta", function() {
describe("get deleted account with string input", function () {
it("should return deleted account object", function (done) {
var end = new Date();
- var start = new Date(end.getTime() - 2 * 24 * 60 * 60 * 1000); // 2 days before
+ var start = new Date(end.getTime() - 1 * 24 * 60 * 60 * 1000); // 1 day before
conn.sobject('Account').deleted(start.toString(), end.toString(), function (err, result) {
if (err) { throw err; }
assert.ok(_.isArray(result.deletedRecords));
|
reduced replication timing from 2 days before to 1 day (for deleted records)
|
jsforce_jsforce
|
train
|
a370a880fddcc939e8f9871bdc34b148803d70ec
|
diff --git a/packages/ra-ui-materialui/src/list/Empty.js b/packages/ra-ui-materialui/src/list/Empty.js
index <HASH>..<HASH> 100644
--- a/packages/ra-ui-materialui/src/list/Empty.js
+++ b/packages/ra-ui-materialui/src/list/Empty.js
@@ -10,8 +10,12 @@ const useStyles = makeStyles(
{
message: {
textAlign: 'center',
- opacity: 0.5,
+ opacity: theme.palette.type === 'light' ? 0.5 : 0.8,
margin: '0 1em',
+ color:
+ theme.palette.type === 'light'
+ ? 'inherit'
+ : theme.palette.text.primary,
},
icon: {
width: '9em',
|
Empty component shows incorrectly in dark theme.
|
marmelab_react-admin
|
train
|
0b20718338d4a4b25d012778c3856c7d5ac838bb
|
diff --git a/samples/replay-api/download_replays.py b/samples/replay-api/download_replays.py
index <HASH>..<HASH> 100755
--- a/samples/replay-api/download_replays.py
+++ b/samples/replay-api/download_replays.py
@@ -141,7 +141,7 @@ def main():
with open(file_path) as fd:
try:
archive = mpyq.MPQArchive(fd).extract()
- except ValueError:
+ except:
found_versions['corrupt'] += 1
os.remove(file_path)
continue
|
Apparently replays can be corrupt in other ways, so catch everything.
|
Blizzard_s2client-proto
|
train
|
e83c4d409bb92044d9dcacb16c73332787ebe1c9
|
diff --git a/classes/PodsAdmin.php b/classes/PodsAdmin.php
index <HASH>..<HASH> 100644
--- a/classes/PodsAdmin.php
+++ b/classes/PodsAdmin.php
@@ -936,6 +936,11 @@ class PodsAdmin {
],
];
+ // Do not show Groups/Fields if in types-only mode.
+ if ( pods_is_types_only() ) {
+ unset( $fields['group_count'], $fields['field_count'] );
+ }
+
if ( $include_row_counts ) {
$fields['row_count'] = [
'label' => __( 'Data Rows', 'pods' ),
@@ -1043,12 +1048,17 @@ class PodsAdmin {
$row = $pod;
}
- $group_count = $pod->count_groups();
- $field_count = $pod->count_fields();
+ $group_count = 0;
+ $field_count = 0;
$row_count = 0;
$row_meta_count = 0;
$podsrel_count = 0;
+ if ( ! pods_is_types_only() ) {
+ $group_count = $pod->count_groups();
+ $field_count = $pod->count_fields();
+ }
+
if ( $include_row_counts ) {
$row_count = $pod->count_rows();
@@ -1062,19 +1072,22 @@ class PodsAdmin {
}
$pod = [
- 'id' => $pod['id'],
- 'label' => pods_v( 'label', $pod ),
- 'name' => pods_v( 'name', $pod ),
- 'object' => pods_v( 'object', $pod, '' ),
- 'type' => $pod_type,
- 'real_type' => $pod_real_type,
- 'storage' => $storage_type_label,
- 'group_count' => number_format_i18n( $group_count ),
- 'field_count' => number_format_i18n( $field_count ),
+ 'id' => $pod['id'],
+ 'label' => pods_v( 'label', $pod ),
+ 'name' => pods_v( 'name', $pod ),
+ 'object' => pods_v( 'object', $pod, '' ),
+ 'type' => $pod_type,
+ 'real_type' => $pod_real_type,
+ 'storage' => $storage_type_label,
];
- $total_groups += $group_count;
- $total_fields += $field_count;
+ if ( ! pods_is_types_only() ) {
+ $pod['group_count'] = number_format_i18n( $group_count );
+ $pod['field_count'] = number_format_i18n( $field_count );
+
+ $total_groups += $group_count;
+ $total_fields += $field_count;
+ }
if ( $include_row_counts ) {
$pod['row_count'] = number_format_i18n( $row_count );
@@ -1109,13 +1122,17 @@ class PodsAdmin {
$total_pods = count( $pod_list );
- $extra_total_text = sprintf(
- ', %1$s %2$s, %3$s %4$s',
- number_format_i18n( $total_groups ),
- _n( 'group', 'groups', $total_groups, 'pods' ),
- number_format_i18n( $total_fields ),
- _n( 'field', 'fields', $total_fields, 'pods' )
- );
+ $extra_total_text = '';
+
+ if ( ! pods_is_types_only() ) {
+ $extra_total_text .= sprintf(
+ ', %1$s %2$s, %3$s %4$s',
+ number_format_i18n( $total_groups ),
+ _n( 'group', 'groups', $total_groups, 'pods' ),
+ number_format_i18n( $total_fields ),
+ _n( 'field', 'fields', $total_fields, 'pods' )
+ );
+ }
if ( $include_row_counts ) {
$extra_total_text .= sprintf(
|
Remove groups/fields counts from Edit Pods screen if types-only mode is on
|
pods-framework_pods
|
train
|
7b9b96de81ef726f845f544023f1e03e6992b8b3
|
diff --git a/code/libraries/koowa/libraries/dispatcher/response/transport/http.php b/code/libraries/koowa/libraries/dispatcher/response/transport/http.php
index <HASH>..<HASH> 100644
--- a/code/libraries/koowa/libraries/dispatcher/response/transport/http.php
+++ b/code/libraries/koowa/libraries/dispatcher/response/transport/http.php
@@ -108,7 +108,7 @@ class KDispatcherResponseTransportHttp extends KDispatcherResponseTransportAbstr
$user_agent = $response->getRequest()->getAgent();
// basename does not work if the string starts with a UTF character
- $filename = ltrim(basename(strtr($response->getStream()->getPath(), array('/' => '/ '))));
+ $filename = ltrim(basename(' '.strtr($response->getStream()->getPath(), array('/' => '/ '))));
// Android cuts file names after #
if (stripos($user_agent, 'Android')) {
diff --git a/code/libraries/koowa/libraries/filter/filename.php b/code/libraries/koowa/libraries/filter/filename.php
index <HASH>..<HASH> 100644
--- a/code/libraries/koowa/libraries/filter/filename.php
+++ b/code/libraries/koowa/libraries/filter/filename.php
@@ -37,6 +37,6 @@ class KFilterFilename extends KFilterAbstract implements KFilterTraversable
public function sanitize($value)
{
// basename does not work if the string starts with a UTF character
- return ltrim(basename(strtr($value, array('/' => '/ '))));
+ return ltrim(basename(' '.strtr($value, array('/' => '/ '))));
}
}
|
basename, pathinfo is not safe for UTF-8 characters
The fix is to rewrite the string so each part starts with a space and then to trim the result. For example "foo/bar" would become " foo/ bar" and the end value " bar" would be trimmed to "bar"
|
timble_kodekit
|
train
|
f40f95b269386b20066b5bee3ea382b3439c23ab
|
diff --git a/src/Composer/DependencyResolver/SolverProblemsException.php b/src/Composer/DependencyResolver/SolverProblemsException.php
index <HASH>..<HASH> 100644
--- a/src/Composer/DependencyResolver/SolverProblemsException.php
+++ b/src/Composer/DependencyResolver/SolverProblemsException.php
@@ -43,7 +43,7 @@ class SolverProblemsException extends \RuntimeException
}
if (strpos($text, 'could not be found') || strpos($text, 'no matching package found')) {
- $text .= "\nPotential causes:\n - A typo in the package name\n - The package is not available in a stable-enough version according to your minimum-stability setting\n see <https://getcomposer.org/doc/04-schema.md#minimum-stability> for more details.\n\nRead <https://getcomposer.org/doc/articles/troubleshooting.md> for further common problems.";
+ $text .= "\nPotential causes:\n - A typo in the package name\n - The package is not available in a stable-enough version according to your minimum-stability setting\n see <https://getcomposer.org/doc/04-schema.md#minimum-stability> for more details.\n - It's a private package and you forgot to add a custom repository to find it\n\nRead <https://getcomposer.org/doc/articles/troubleshooting.md> for further common problems.";
}
if ($hasExtensionProblems) {
|
Add a note that a user could have forgotten to add a custom repo if package not found
|
composer_composer
|
train
|
026e54935f4dd0f22eb0ae4a1606d546a6a33641
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -187,7 +187,9 @@ function Driver (options) {
this._updateBalance()
])
.then(function () {
- if (self._destroyed) return
+ if (self._destroyed) {
+ return Q.reject(new Error('destroyed'))
+ }
self.msgDB.start()
self.txDB.start()
@@ -202,8 +204,6 @@ function Driver (options) {
self._sendTheUnsent()
// self._watchMsgStatuses()
})
-
- this._readyPromise.done()
}
Driver.prototype.ready = function () {
|
if tim is destroyed before it starts, reject ready promise
|
tradle_tim-old-engine
|
train
|
47dc4745cb5ac08ddc76f1b7390474eaea790f71
|
diff --git a/api/base/http.go b/api/base/http.go
index <HASH>..<HASH> 100644
--- a/api/base/http.go
+++ b/api/base/http.go
@@ -29,18 +29,18 @@ type HTTPCaller interface {
// HandleHTTPFailure returns the failure serialized in the response
// body. This function should only be called if the status code is not
// http.StatusOkay.
-func HandleHTTPFailure(resp *http.Response) (*params.Error, error) {
+func HandleHTTPFailure(resp *http.Response) error {
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, errors.Annotate(err, "while reading HTTP response")
+ return errors.Annotate(err, "while reading HTTP response")
}
var failure params.Error
if resp.Header.Get("Content-Type") == "application/json" {
if err := json.Unmarshal(body, &failure); err != nil {
- return nil, errors.Annotate(err, "while unserializing the error")
+ return errors.Annotate(err, "while unserializing the error")
}
} else {
switch resp.StatusCode {
@@ -54,5 +54,5 @@ func HandleHTTPFailure(resp *http.Response) (*params.Error, error) {
failure.Message = string(body)
}
- return &failure, nil
+ return &failure
}
|
Drop a superfluous error return.
|
juju_juju
|
train
|
bbac3772704d5a1a202635811cf0e645f070792e
|
diff --git a/src/Adaptor/CakeFabricateAdaptor.php b/src/Adaptor/CakeFabricateAdaptor.php
index <HASH>..<HASH> 100644
--- a/src/Adaptor/CakeFabricateAdaptor.php
+++ b/src/Adaptor/CakeFabricateAdaptor.php
@@ -7,6 +7,7 @@
*/
namespace CakeFabricate\Adaptor;
+use Cake\ORM\Locator\LocatorAwareTrait;
use Fabricate\Adaptor\AbstractFabricateAdaptor;
use Fabricate\Model\FabricateModel;
@@ -19,6 +20,8 @@ use Cake\ORM\Association;
*/
class CakeFabricateAdaptor extends AbstractFabricateAdaptor
{
+ use LocatorAwareTrait;
+
/**
* Filter primary key option.
* Default setting is false that primary key sets by Fabricate.
@@ -60,13 +63,13 @@ class CakeFabricateAdaptor extends AbstractFabricateAdaptor
public function getModel($modelName)
{
$model = new FabricateModel($modelName);
- $table = TableRegistry::get($modelName);
- $schema = $table->schema();
+ $table = $this->getTableLocator()->get($modelName);
+ $schema = $table->getSchema();
foreach ($schema->columns() as $name) {
if ($this->filterKey($table, $name)) {
continue;
}
- $attrs = $schema->column($name);
+ $attrs = $schema->getColumn($name);
$options = [];
if (array_key_exists('length', $attrs)) {
$options['limit'] = $attrs['length'];
@@ -78,18 +81,18 @@ class CakeFabricateAdaptor extends AbstractFabricateAdaptor
}
foreach ($table->associations()->keys() as $key) {
$association = $table->associations()->get($key);
- $target = $association->target();
+ $target = $association->getTarget();
$className = get_class($target);
- $alias = $target->alias();
+ $alias = $target->getAlias();
switch ($association->type()) {
case Association::ONE_TO_ONE:
- $model->hasOne($alias, $association->foreignKey(), $className);
+ $model->hasOne($alias, $association->getForeignKey(), $className);
break;
case Association::ONE_TO_MANY:
- $model->hasMany($alias, $association->foreignKey(), $className);
+ $model->hasMany($alias, $association->getForeignKey(), $className);
break;
case Association::MANY_TO_ONE:
- $model->belongsTo($alias, $association->foreignKey(), $className);
+ $model->belongsTo($alias, $association->getForeignKey(), $className);
break;
}
}
@@ -101,23 +104,15 @@ class CakeFabricateAdaptor extends AbstractFabricateAdaptor
*/
public function create($modelName, $attributes, $recordCount)
{
- $table = TableRegistry::get($modelName);
+ $table = $this->getTableLocator()->get($modelName);
$entities = $table->newEntities($attributes, [
'validate' => $this->options[self::OPTION_VALIDATE],
'accessibleFields' => ['*' => true]
]);
- $table->connection()->transactional(function () use ($table, $entities) {
- foreach ($entities as $entity) {
- $ret = $table->save($entity, [
- 'checkRules' => $this->options[self::OPTION_CHECK_RULES],
- 'atomic' => false
- ]);
- if (!$ret) {
- return false;
- }
- }
- return true;
- });
+ $table->saveMany($entities, [
+ 'checkRules' => $this->options[self::OPTION_CHECK_RULES],
+ ]);
+
return $entities;
}
@@ -126,7 +121,7 @@ class CakeFabricateAdaptor extends AbstractFabricateAdaptor
*/
public function build($modelName, $data)
{
- $table = TableRegistry::get($modelName);
+ $table = $this->getTableLocator()->get($modelName);
$entity = $table->newEntity($data, [
'validate' => $this->options[self::OPTION_VALIDATE],
'accessibleFields' => ['*' => true]
@@ -145,7 +140,7 @@ class CakeFabricateAdaptor extends AbstractFabricateAdaptor
if (!$this->options[self::OPTION_FILTER_KEY]) {
return false;
}
- $primaryKey = $table->primaryKey();
+ $primaryKey = $table->getPrimaryKey();
if (!is_array($primaryKey)) {
$primaryKey = [$primaryKey];
}
|
Update to CakePHP <I>
|
sizuhiko_cakephp-fabricate-adaptor
|
train
|
d2a2db18f59d9eb7111bad174497601a789cd7c0
|
diff --git a/lib/Skeleton/Core/Util.php b/lib/Skeleton/Core/Util.php
index <HASH>..<HASH> 100644
--- a/lib/Skeleton/Core/Util.php
+++ b/lib/Skeleton/Core/Util.php
@@ -21,7 +21,7 @@ class Util {
*/
public static function rewrite_reverse_html($html) {
$html = preg_replace_callback(
- '@\<([^>]*) (href|src|action)="/([^"]*?)@iU',
+ '@\<([^>]*) (href|src|action)="\/(?!\/)([^"]*?)@iU',
function ($matches) {
if (!isset($matches[3])) {
return $matches[0];
|
Don't rewrite protocol-relative url
|
tigron_skeleton-core
|
train
|
63b452931c9ccb05afd1a8776ccf21d82de7654e
|
diff --git a/www/src/py_bytes.js b/www/src/py_bytes.js
index <HASH>..<HASH> 100644
--- a/www/src/py_bytes.js
+++ b/www/src/py_bytes.js
@@ -1276,8 +1276,13 @@ function $UnicodeDecodeError(encoding, position){
"' codec can't decode bytes in position " + position)
}
-function _hex(_int){return _int.toString(16)}
-function _int(hex){return parseInt(hex, 16)}
+function _hex(_int){
+ var h = _int.toString(16)
+ return '0x' + '0'.repeat(2 - h.length) + h
+}
+function _int(hex){
+ return parseInt(hex, 16)
+}
function normalise(encoding){
var enc = encoding.toLowerCase()
@@ -1506,7 +1511,15 @@ var decode = $B.decode = function(obj, encoding, errors){
console.log(b, encoding, "error load_decoder", err)
throw _b_.LookupError.$factory("unknown encoding: " + enc)
}
- return to_unicode[enc](obj)[0]
+ var decoded = to_unicode[enc](obj)[0]
+ for(var i = 0, len = decoded.length; i < len; i++){
+ if(decoded.codePointAt(i) == 0xfffe){
+ throw _b_.UnicodeDecodeError.$factory("'charmap' codec " +
+ `can't decode byte ${_hex(b[i])} in position ${i}: ` +
+ "character maps to <undefined>")
+ }
+ }
+ return decoded
}
return s
}
|
Fix bug in bytes.decode() : throw an exception when a key in decoding_table maps to undefined. Related to issue #<I>.
|
brython-dev_brython
|
train
|
cfc948469b7363f24ca82cb3407abdf3126a113b
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index <HASH>..<HASH> 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -569,7 +569,7 @@ def test_set_kernel_auto(py_file, tmpdir):
nb = readf(tmp_ipynb)
kernel_name = nb.metadata['kernelspec']['name']
- assert get_kernel_spec(kernel_name).argv[0] == sys.executable
+ assert get_kernel_spec(kernel_name).argv[0] in ['python', sys.executable]
@pytest.mark.parametrize('py_file', list_notebooks('python'))
|
Travis only has the default Python kernel
#<I>
|
mwouts_jupytext
|
train
|
29148edc44dd3fe6f6196892adac6bf3e2ba5e59
|
diff --git a/server/server_test.go b/server/server_test.go
index <HASH>..<HASH> 100644
--- a/server/server_test.go
+++ b/server/server_test.go
@@ -60,59 +60,76 @@ func stackFatalf(t tLogger, f string, args ...interface{}) {
t.Fatalf("%s", strings.Join(lines, "\n"))
}
-// Helper function that fails if number of clients is not as expected
-func checkClients(t tLogger, s *StanServer, expected int) {
- clients := s.clients.GetClients()
- if len(clients) != expected {
- stackFatalf(t, "Incorrect number of clients, expected %v, got %v", expected, len(clients))
+// Helper function that checks that the number returned by function `f`
+// is equal to `expected`, otherwise fails.
+func checkCount(t tLogger, expected int, f func() (string, int)) {
+ if label, count := f(); count != expected {
+ stackFatalf(t, "Incorrect number of %s, expected %v got %v", label, expected, count)
}
}
-// Helper function that waits to get the expected number of clients,
-// fail after a certain timeout.
-func waitForNumClients(t tLogger, s *StanServer, expected int) {
- var clients []*client
+// Helper function that waits that the number returned by function `f`
+// is equal to `expected` for a certain period of time, otherwise fails.
+func waitForCount(t tLogger, expected int, f func() (string, int)) {
ok := false
+ label := ""
+ count := 0
timeout := time.Now().Add(5 * time.Second)
for !ok && time.Now().Before(timeout) {
- clients = s.clients.GetClients()
- if len(clients) != expected {
+ label, count = f()
+ if count != expected {
time.Sleep(10 * time.Millisecond)
continue
}
ok = true
}
if !ok {
- stackFatalf(t, "Timeout waiting to get %v clients, got %v", expected, len(clients))
+ stackFatalf(t, "Timeout waiting to get %v %s, got %v", expected, label, count)
}
}
+// Helper function that fails if number of clients is not as expected
+func checkClients(t tLogger, s *StanServer, expected int) {
+ checkCount(t, expected, func() (string, int) { return getClientsCountFunc(s) })
+}
+
+// Helper function that waits for a while to get the expected number of clients,
+// otherwise fails.
+func waitForNumClients(t tLogger, s *StanServer, expected int) {
+ waitForCount(t, expected, func() (string, int) { return getClientsCountFunc(s) })
+}
+
+// Helper function that returns the number of clients
+func getClientsCountFunc(s *StanServer) (string, int) {
+ // We avoid getting a copy of the clients map here by directly
+ // returning the length of the array.
+ s.clients.RLock()
+ defer s.clients.RUnlock()
+ return "clients", len(s.clients.clients)
+}
+
// Helper function that fails if number of subscriptions is not as expected
func checkSubs(t tLogger, s *StanServer, ID string, expected int) []*subState {
+ // Since we need to return the array and we want the array to match
+ // the expected value, use the "public" API here.
subs := s.clients.GetSubs(ID)
- if len(subs) != expected {
- stackFatalf(t, "Incorrect number of subscriptions, expected %v, got %v", expected, len(subs))
- }
+ checkCount(t, expected, func() (string, int) { return "subscriptions", len(subs) })
return subs
}
-// Helper function that waits to get the expected number of subscriptions,
-// fail after a certain timeout.
+// Helper function that waits for a while to get the expected number of subscriptions,
+// otherwise fails.
func waitForNumSubs(t tLogger, s *StanServer, ID string, expected int) {
- var subs []*subState
- ok := false
- timeout := time.Now().Add(5 * time.Second)
- for !ok && time.Now().Before(timeout) {
- subs = s.clients.GetSubs(ID)
- if len(subs) != expected {
- time.Sleep(10 * time.Millisecond)
- continue
- }
- ok = true
- }
- if !ok {
- stackFatalf(t, "Timeout waiting to get %v subscriptions, got %v", expected, len(subs))
- }
+ waitForCount(t, expected, func() (string, int) {
+ // We avoid getting a copy of the subscriptions array here
+ // by directly returning the length of the array.
+ s.clients.RLock()
+ defer s.clients.RUnlock()
+ c := s.clients.clients[ID]
+ c.RLock()
+ defer c.RUnlock()
+ return "subscriptions", len(c.subs)
+ })
}
func NewDefaultConnection(t tLogger) stan.Conn {
|
Use generic helper functions for checking and waiting on counts
|
nats-io_nats-streaming-server
|
train
|
78216dae6a7fc75a5c054203f00d8a99fe4a53ec
|
diff --git a/packages/origin.js/src/contract-service.js b/packages/origin.js/src/contract-service.js
index <HASH>..<HASH> 100644
--- a/packages/origin.js/src/contract-service.js
+++ b/packages/origin.js/src/contract-service.js
@@ -1,4 +1,4 @@
-import ListingsRegistryContract from '../../originSmartContracts/build/contracts/ListingsRegistry.json'
+import ListingsRegistryContract from '../../contracts/build/contracts/ListingsRegistry.json'
import bs58 from 'bs58'
class ContractService {
|
Fixed dir name of contracts to `contracts`
|
OriginProtocol_origin-js
|
train
|
d9fa7e2f35a617268f5b6d79f82bbe02c619a9d4
|
diff --git a/tests/PayloadTest.php b/tests/PayloadTest.php
index <HASH>..<HASH> 100644
--- a/tests/PayloadTest.php
+++ b/tests/PayloadTest.php
@@ -125,7 +125,7 @@ class PayloadTest extends AbstractTestCase
{
$values = $this->payload->get(['sub', 'jti']);
- list($sub, $jti) = $values;
+ [$sub, $jti] = $values;
$this->assertInternalType('array', $values);
$this->assertSame($sub, 1);
|
Apply fixes from StyleCI (#<I>)
[ci skip] [skip ci]
|
tymondesigns_jwt-auth
|
train
|
9c4622847a22d3c7fe6cf574af9ee611634404b4
|
diff --git a/items.js b/items.js
index <HASH>..<HASH> 100644
--- a/items.js
+++ b/items.js
@@ -113,19 +113,12 @@ export class InlineStyleItem extends IconItem {
}
apply(pm) {
let sel = pm.selection
- if (this.active(pm)) {
- if (sel.empty)
- pm.setInlineStyle(this.style, false)
- else
- pm.apply(pm.tr.removeStyle(sel.from, sel.to, this.style.type))
- } else if (this.dialog) {
+ if (this.active(pm))
+ pm.setStyle(this.style, false)
+ else if (this.dialog)
return [this.dialog]
- } else {
- if (sel.empty)
- pm.setInlineStyle(this.style, true)
- else
- pm.apply(pm.tr.addStyle(sel.from, sel.to, this.style))
- }
+ else
+ pm.setStyle(this.style, true)
}
}
|
Remove range param from setInlineStyle, rename to setStyle
|
ProseMirror_prosemirror-menu
|
train
|
93b3bf61864c1cd1959a1662a74425bcfedaef95
|
diff --git a/package/lib/connection.js b/package/lib/connection.js
index <HASH>..<HASH> 100644
--- a/package/lib/connection.js
+++ b/package/lib/connection.js
@@ -236,7 +236,7 @@ Connection.prototype._autoreconnect_advance = function () {
self._retry_count += 1;
var res;
- if (self._retry && self._retry_count <= self._max_retries) {
+ if (self._retry && (self._max_retries === -1 || self._retry_count <= self._max_retries)) {
res = {
count: self._retry_count,
delay: self._retry_delay,
|
Allow infinite retries
Allow to set `max_retries` to `-1` to allow infinite reconnection attempts
|
crossbario_autobahn-js
|
train
|
19677803010465a8a6a8d04999065a0c6dacb600
|
diff --git a/ella/newman/media/js/inlines.js b/ella/newman/media/js/inlines.js
index <HASH>..<HASH> 100644
--- a/ella/newman/media/js/inlines.js
+++ b/ella/newman/media/js/inlines.js
@@ -35,9 +35,14 @@
var o = preset.data[i];
if (o.name == 'placement_set-0-listings') desired_no++;
}
+ // add listings if necessary
for (var i = no_items; i < desired_no; i++) {
add_inline($template, i+1);
}
+ // remove listings if necessary
+ for (var i = no_items; i > desired_no; i--) {
+ $('.listing-row.inline-item:last').remove();
+ }
}
$('.change-form:has(.add-listing-button)').bind('preset_load_initiated.listing', add_listings_for_preset);
$(document).bind('content_added', function(evt) {
@@ -203,16 +208,19 @@
}
}
var no_items = $('.gallery-items-sortable input.target_id').length;
- while (no_items < desired_no) {
+ // add gallery items if necessary
+ for (var i = no_items; i < desired_no; i++) {
add_gallery_item({button:0});
- var old_no = no_items;
- no_items = $('.gallery-items-sortable input.target_id').length;
- if (old_no == no_items) {
- ;;; carp('inlines.js: preset_load_initiated handler: failed adding gallery item for preset values');
- show_err(gettext('Error restoring inlines'));
- break;
- }
}
+ // remove gallery items if necessary
+ for (var i = no_items; i > desired_no; i--) {
+ $('.gallery-items-sortable .inline-related:last').remove();
+ }
+ // reset the fields
+ var $rows = $('.gallery-items-sortable .inline-related');
+ $rows.find('input.target_id,input.item-order').val('');
+ $rows.find('img.thumb').attr({src:'', alt:''});
+ $rows.find('h4').remove();
})
// and get their thumbnails
.bind('preset_load_completed', function(evt) {
|
Loading presets with inlines now handles deleting. Fixes #<I>
|
ella_ella
|
train
|
440e5489011089a1df778b762e129e1694551556
|
diff --git a/docs/chart_repository.md b/docs/chart_repository.md
index <HASH>..<HASH> 100644
--- a/docs/chart_repository.md
+++ b/docs/chart_repository.md
@@ -23,7 +23,7 @@ alpine-0.1.0:
name: alpine
url: https://storage.googleapis.com/kubernetes-charts/alpine-0.1.0.tgz
created: 2016-05-26 11:23:44.086354411 +0000 UTC
- checksum: a61575c2d3160e5e39abf2a5ec984d6119404b18
+ digest: sha256:78e9a4282295184e8ce1496d23987993673f38e33e203c8bc18bc838a73e5864
chartfile:
name: alpine
description: Deploy a basic Alpine Linux pod
@@ -33,7 +33,7 @@ redis-2.0.0:
name: redis
url: https://storage.googleapis.com/kubernetes-charts/redis-2.0.0.tgz
created: 2016-05-26 11:23:44.087939192 +0000 UTC
- checksum: 2cea3048cf85d588204e1b1cc0674472b4517919
+ digest: sha256:bde9c2949e64d059c18d8f93566a64dafc6d2e8e259a70322fb804831dfd0b5b
chartfile:
name: redis
description: Port of the replicatedservice template from kubernetes/charts
diff --git a/pkg/repo/index.go b/pkg/repo/index.go
index <HASH>..<HASH> 100644
--- a/pkg/repo/index.go
+++ b/pkg/repo/index.go
@@ -39,7 +39,7 @@ type ChartRef struct {
URL string `yaml:"url"`
Created string `yaml:"created,omitempty"`
Removed bool `yaml:"removed,omitempty"`
- Checksum string `yaml:"checksum,omitempty"`
+ Digest string `yaml:"digest,omitempty"`
Chartfile *chart.Metadata `yaml:"chartfile"`
}
diff --git a/pkg/repo/repo.go b/pkg/repo/repo.go
index <HASH>..<HASH> 100644
--- a/pkg/repo/repo.go
+++ b/pkg/repo/repo.go
@@ -17,9 +17,10 @@ limitations under the License.
package repo // import "k8s.io/helm/pkg/repo"
import (
- "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
- "fmt"
+ "io"
"io/ioutil"
"net/url"
"os"
@@ -131,7 +132,7 @@ func (r *ChartRepository) Index() error {
}
chartfile := ch.Metadata
- hash, err := generateChecksum(path)
+ digest, err := generateDigest(path)
if err != nil {
return err
}
@@ -152,7 +153,7 @@ func (r *ChartRepository) Index() error {
url, _ := url.Parse(r.URL)
url.Path = filepath.Join(url.Path, key+".tgz")
- entry := &ChartRef{Chartfile: chartfile, Name: chartfile.Name, URL: url.String(), Created: created, Checksum: hash, Removed: false}
+ entry := &ChartRef{Chartfile: chartfile, Name: chartfile.Name, URL: url.String(), Created: created, Digest: digest, Removed: false}
r.IndexFile.Entries[key] = entry
@@ -170,18 +171,15 @@ func (r *ChartRepository) Index() error {
return r.saveIndexFile()
}
-func generateChecksum(path string) (string, error) {
+func generateDigest(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
- b, err := ioutil.ReadAll(f)
- if err != nil {
- return "", err
- }
-
- result := sha1.Sum(b)
+ h := sha256.New()
+ io.Copy(h, f)
- return fmt.Sprintf("%x", result), nil
+ digest := h.Sum([]byte{})
+ return "sha256:" + hex.EncodeToString(digest[:]), nil
}
diff --git a/pkg/repo/repo_test.go b/pkg/repo/repo_test.go
index <HASH>..<HASH> 100644
--- a/pkg/repo/repo_test.go
+++ b/pkg/repo/repo_test.go
@@ -85,8 +85,8 @@ func TestIndex(t *testing.T) {
}
timestamps[chartName] = details.Created
- if details.Checksum == "" {
- t.Errorf("Checksum was not set for %s", chartName)
+ if details.Digest == "" {
+ t.Errorf("Digest was not set for %s", chartName)
}
}
|
feat(repo): use OCI style digest identifiers
Use the same format as the Open Container Initiative for a digest
string. <URL>
|
helm_helm
|
train
|
1840bb4be8a1356ae092bf4a59ead37f2e8ade8f
|
diff --git a/django_inline_wrestler/static/django_inline_wrestler/css/django-inline-wrestler.css b/django_inline_wrestler/static/django_inline_wrestler/css/django-inline-wrestler.css
index <HASH>..<HASH> 100644
--- a/django_inline_wrestler/static/django_inline_wrestler/css/django-inline-wrestler.css
+++ b/django_inline_wrestler/static/django_inline_wrestler/css/django-inline-wrestler.css
@@ -213,6 +213,9 @@ body.grp-change-list table thead{
body.grp-change-list table tr{
width: 100%;
}
+.django-inline-wrestler .grp-items{
+ position:relative;
+}
.django-inline-wrestler-stacked,
.django-inline-wrestler-stacked .list-item-initialized .form-row,
.django-inline-wrestler-stacked .list-item-initialized fieldset.grp-module{
diff --git a/django_inline_wrestler/static/django_inline_wrestler/js/django-inline-wrestler.js b/django_inline_wrestler/static/django_inline_wrestler/js/django-inline-wrestler.js
index <HASH>..<HASH> 100644
--- a/django_inline_wrestler/static/django_inline_wrestler/js/django-inline-wrestler.js
+++ b/django_inline_wrestler/static/django_inline_wrestler/js/django-inline-wrestler.js
@@ -442,7 +442,7 @@
this._listContainerHeaderHeight = $(this._listContainerHeader).find('.grp-th, th').height();
- var runningY = this.is_changelist? this._listContainerHeaderHeight : 22;
+ var runningY = this.is_changelist? this._listContainerHeaderHeight : this.is_stacked? 0 : 22;
var maxW = 0;
for(var k=0; k<this.list_items.length; k++){
var list_item = this.list_items[k];
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
#this is a test
setup(name = 'django-inline-wrestler',
description = 'Wrestle your django inlines with jQuery and Grappelli.',
- version = '5.3',
+ version = '5.4',
url = 'https://github.com/ninapavlich/django-inline-wrestler',
author = 'Nina Pavlich',
author_email='nina@ninalp.com',
|
minor fix to stacked inline positioning
|
ninapavlich_django-list-wrestler
|
train
|
cbc8423e2bb9ecdc1684539c7065c7f73289ad17
|
diff --git a/tools/merge_flink_pr.py b/tools/merge_flink_pr.py
index <HASH>..<HASH> 100755
--- a/tools/merge_flink_pr.py
+++ b/tools/merge_flink_pr.py
@@ -92,7 +92,7 @@ def continue_maybe(prompt):
fail("Okay, exiting")
-original_head = run_cmd("git rev-parse HEAD")[:8]
+original_head = run_cmd("git rev-parse --abbrev-ref HEAD").rstrip("/\n")
def clean_up():
|
[tools] Add --abbrev-ref to get the right branch name of HEAD
Add --abbrev-ref to get the right branch name of HEAD rather than checksum to return back to original branch.
Without it will make merge tool to go to unnamed branch.
Somehow old PR #<I> could not be reopen so submit new one. Sorry
|
apache_flink
|
train
|
938a71c410cdebd8a8e3d98f7907187a4d183e1a
|
diff --git a/mex.go b/mex.go
index <HASH>..<HASH> 100644
--- a/mex.go
+++ b/mex.go
@@ -96,8 +96,9 @@ type messageExchange struct {
mexset *messageExchangeSet
framePool FramePool
- // shutdownAtomic is an atomically updated uint32.
+ // The following are atomically updated uint32.
shutdownAtomic uint32
+ errChNotified uint32
}
// checkError is called before waiting on the mex channels.
@@ -238,11 +239,8 @@ func (mex *messageExchange) shutdown(err error) {
return
}
- // Notify all calls blocked on the mex that it's shut down.
- if err == nil {
+ if atomic.CompareAndSwapUint32(&mex.errChNotified, 0, 1) {
mex.errCh.Notify(errMexShutdown)
- } else {
- mex.errCh.Notify(err)
}
mex.mexset.removeExchange(mex.msgID)
@@ -482,6 +480,14 @@ func (mexset *messageExchangeSet) stopExchanges(err error) {
}
for _, mex := range exchanges {
- mex.shutdown(err)
+ // When there's a connection failure, we want to notify blocked callers that the
+ // call will fail, but we don't want to shutdown the exchange as only the
+ // arg reader/writer should shutdown the exchange. Otherwise, our guarantee
+ // on sendChRefs that there's no references to sendCh is violated since
+ // readers/writers could still have a reference to sendCh even though
+ // we shutdown the exchange and called Done on sendChRefs.
+ if atomic.CompareAndSwapUint32(&mex.errChNotified, 0, 1) {
+ mex.errCh.Notify(err)
+ }
}
}
|
Don't call shutdown on connection failures
Calling shutdown causes data races since the reader/writer goroutine may
still have a reference to the sendCh, but shutdown decrements this
reference. We should instead unblock readers/writers by notifying them
of the error, and they can call shutdown.
|
uber_tchannel-go
|
train
|
762133aea6b5e3f3635d7157cd7a9ff9127342f3
|
diff --git a/js/hollaex.js b/js/hollaex.js
index <HASH>..<HASH> 100644
--- a/js/hollaex.js
+++ b/js/hollaex.js
@@ -1182,19 +1182,20 @@ module.exports = class hollaex extends Exchange {
const defaultExpires = this.safeInteger2 (this.options, 'api-expires', 'expires', parseInt (this.timeout / 1000));
const expires = this.sum (this.seconds (), defaultExpires);
const expiresString = expires.toString ();
- const auth = method + path + expiresString;
- const signature = this.hmac (this.encode (auth), this.encode (this.secret));
+ let auth = method + path + expiresString;
headers = {
'api-key': this.encode (this.apiKey),
- 'api-signature': signature,
'api-expires': expiresString,
};
- if (method !== 'GET') {
+ if (method === 'POST') {
headers['Content-type'] = 'application/json';
if (Object.keys (query).length) {
body = this.json (query);
+ auth += body;
}
}
+ const signature = this.hmac (this.encode (auth), this.encode (this.secret));
+ headers['api-signature'] = signature;
}
return { 'url': url, 'method': method, 'body': body, 'headers': headers };
}
|
hollaex private POST sign fix
|
ccxt_ccxt
|
train
|
d4488c880e031a951e14bbc064b51841149fa4a3
|
diff --git a/tofu/entrypoints/tofucalc.py b/tofu/entrypoints/tofucalc.py
index <HASH>..<HASH> 100755
--- a/tofu/entrypoints/tofucalc.py
+++ b/tofu/entrypoints/tofucalc.py
@@ -11,12 +11,6 @@ plt.switch_backend('Qt5Agg')
plt.ioff()
-# import parser dict
-sys.path.insert(1, _TOFUPATH)
-from scripts._dparser import _DPARSER
-_ = sys.path.pop(1)
-
-
# tofu
# test if in a tofu git repo
_HERE = os.path.abspath(os.path.dirname(__file__))
@@ -34,6 +28,12 @@ else:
import tofu as tf
+# import parser dict
+sys.path.insert(1, _TOFUPATH)
+from scripts._dparser import _DPARSER
+_ = sys.path.pop(1)
+
+
# tforigin = tf.__file__
# tfversion = tf.__version__
# print(tforigin, tfversion)
diff --git a/tofu/entrypoints/tofuplot.py b/tofu/entrypoints/tofuplot.py
index <HASH>..<HASH> 100755
--- a/tofu/entrypoints/tofuplot.py
+++ b/tofu/entrypoints/tofuplot.py
@@ -11,12 +11,6 @@ plt.switch_backend('Qt5Agg')
plt.ioff()
-# import parser dict
-sys.path.insert(1, _TOFUPATH)
-from scripts._dparser import _DPARSER
-_ = sys.path.pop(1)
-
-
# tofu
# test if in a tofu git repo
_HERE = os.path.abspath(os.path.dirname(__file__))
@@ -34,6 +28,12 @@ else:
import tofu as tf
+# import parser dict
+sys.path.insert(1, _TOFUPATH)
+from scripts._dparser import _DPARSER
+_ = sys.path.pop(1)
+
+
# tforigin = tf.__file__
# tfversion = tf.__version__
# print(tforigin, tfversion)
diff --git a/tofu/version.py b/tofu/version.py
index <HASH>..<HASH> 100644
--- a/tofu/version.py
+++ b/tofu/version.py
@@ -1,2 +1,2 @@
# Do not edit, pipeline versioning governed by git tags!
-__version__ = '1.4.6-18-g0f9e1e25'
+__version__ = '1.4.6-19-g5eed5ca'
|
[Issue<I>] Minor debugging (_TOFUPATH was used before definition)
|
ToFuProject_tofu
|
train
|
56c4888aeae156f026b01a006c421dff11903903
|
diff --git a/src/scripts/src/choices.js b/src/scripts/src/choices.js
index <HASH>..<HASH> 100644
--- a/src/scripts/src/choices.js
+++ b/src/scripts/src/choices.js
@@ -1896,6 +1896,7 @@ class Choices {
) {
let passedValue = isType('String', value) ? value.trim() : value;
const passedKeyCode = keyCode;
+ const passedCustomProperties = customProperties;
const items = this.store.getItems();
const passedLabel = label || passedValue;
const passedOptionId = parseInt(choiceId, 10) || -1;
@@ -1939,6 +1940,7 @@ class Choices {
id,
value: passedValue,
label: passedLabel,
+ customProperties: passedCustomProperties,
groupValue: group.value,
keyCode: passedKeyCode,
});
@@ -1947,6 +1949,7 @@ class Choices {
id,
value: passedValue,
label: passedLabel,
+ customProperties: passedCustomProperties,
keyCode: passedKeyCode,
});
}
|
Add custom properties to 'addItem' event
|
jshjohnson_Choices
|
train
|
ef27fc6ae8ede540383a86e0c4ce18249db06965
|
diff --git a/grab/spider/base.py b/grab/spider/base.py
index <HASH>..<HASH> 100644
--- a/grab/spider/base.py
+++ b/grab/spider/base.py
@@ -480,9 +480,9 @@ class Spider(SpiderPattern, SpiderStat):
def find_data_handler(self, data):
try:
- handler = getattr(self, 'data_%s' % data.name)
+ handler = getattr(self, 'data_%s' % data.data_name)
except AttributeError:
- raise NoDataHandler('No handler defined for Data %s' % data.name)
+ raise NoDataHandler('No handler defined for Data %s' % data.data_name)
else:
return handler
@@ -503,7 +503,7 @@ class Spider(SpiderPattern, SpiderStat):
try:
handler(**result.storage)
except Exception, ex:
- self.process_handler_error('data_%s' % result.name, ex, task)
+ self.process_handler_error('data_%s' % result.data_name, ex, task)
elif result is None:
pass
else:
diff --git a/grab/spider/data.py b/grab/spider/data.py
index <HASH>..<HASH> 100644
--- a/grab/spider/data.py
+++ b/grab/spider/data.py
@@ -1,8 +1,22 @@
+NULL = object()
+
class Data(object):
"""
Task handlers should return instances of that class.
"""
def __init__(self, data_name, **kwargs):
- self.name = data_name
+ self.data_name = data_name
self.storage = kwargs
+
+ def __getitem__(self, key):
+ return self.storage[key]
+
+ def get(self, key, default=NULL):
+ try:
+ return self.storage[key]
+ except KeyError:
+ if default is NULL:
+ raise
+ else:
+ return default
diff --git a/test/spider_data.py b/test/spider_data.py
index <HASH>..<HASH> 100644
--- a/test/spider_data.py
+++ b/test/spider_data.py
@@ -68,3 +68,14 @@ class TestSpider(TestCase):
bot.add_task(Task('page', url=SERVER.BASE_URL))
bot.run()
self.assertEqual(bot.data_processed, [1, 2, {'bar': 'gaz'}])
+
+ def test_data_object_dict_interface(self):
+ data = Data('person', person={'age': 22})
+ self.assertRaises(KeyError, lambda: data['name'])
+ self.assertEqual(data['person'], {'age': 22})
+
+ def test_data_object_get_method(self):
+ data = Data('person', person={'age': 22})
+ self.assertRaises(KeyError, lambda: data.get('name'))
+ self.assertEqual('foo', data.get('name', 'foo'))
+ self.assertEqual({'age': 22}, data.get('person'))
|
Enchance Data object to support multiple items
|
lorien_grab
|
train
|
e9c112e11135888eb0b470f8faa42b076b909aae
|
diff --git a/clearly/expected_state.py b/clearly/expected_state.py
index <HASH>..<HASH> 100644
--- a/clearly/expected_state.py
+++ b/clearly/expected_state.py
@@ -1,8 +1,6 @@
# coding=utf-8
from __future__ import absolute_import, print_function, unicode_literals
-from contextlib import contextmanager
-
from celery import states
@@ -11,36 +9,28 @@ class ExpectedStateHandler(object):
the final state, as celery itself takes into account their precedence.
Flower doesn't care either, as it shows a snapshot at that moment.
- But for Clearly, which shows in real-time was is happening with the tasks,
+ But for Clearly, which shows in real-time what is happening with the tasks,
it was very odd to show one with a RETRY state, before it was even STARTED,
or STARTED before being RECEIVED.
This class fixes that, with a state machine of the expected states, which
dynamically generates the missing states.
"""
- pre = post = None
- def __init__(self, field, expected_path):
- self.field = field
+ def __init__(self, expected_path):
self.expected_path = expected_path # type:ExpectedPath
- @contextmanager
- def track_changes(self, obj):
- self.pre = getattr(obj, self.field)
- yield
- self.post = getattr(obj, self.field)
-
- def states_through(self):
- if self.pre == self.post:
+ def states_through(self, pre, post):
+ if pre == post:
raise StopIteration
pointer = self.expected_path
- expected = self.pre
+ expected = pre
while pointer.name != expected:
pointer = pointer.find(expected)
- expected = self.post
stop = pointer.name
+ expected = post
while True:
pointer = pointer.find(expected)
if pointer.name == stop:
@@ -91,7 +81,7 @@ def setup_task_states():
states.RETRY) \
.to(return_path)
- return ExpectedStateHandler('state', expected_path)
+ return ExpectedStateHandler(expected_path)
def setup_worker_states():
@@ -99,4 +89,4 @@ def setup_worker_states():
# noinspection PyTypeChecker
expected_path.to(True).to(expected_path)
- return ExpectedStateHandler('alive', expected_path)
+ return ExpectedStateHandler(expected_path)
|
refac(expected_state) new stateless implementation
|
rsalmei_clearly
|
train
|
939135b8c2be7661f1363f2640fd6add5cbcc499
|
diff --git a/neural_nets/pycuda_ops/elementwise.py b/neural_nets/pycuda_ops/elementwise.py
index <HASH>..<HASH> 100644
--- a/neural_nets/pycuda_ops/elementwise.py
+++ b/neural_nets/pycuda_ops/elementwise.py
@@ -1,5 +1,6 @@
import numpy as np
from .. import sampler
+from .matrix import extract_columns, insert_columns
from pycuda import gpuarray
from pycuda.elementwise import ElementwiseKernel, get_elwise_kernel
@@ -138,10 +139,16 @@ sample_dropout_mask_kernel = get_elwise_kernel(
""",
"sample_dropout_mask")
-def sample_dropout_mask(x, dropout_probability=.5, stream=None):
+def sample_dropout_mask(x, dropout_probability=.5, columns=None, stream=None):
""" Samples a dropout mask and applies it in place"""
assert x.flags.c_contiguous
+
+ if columns is not None:
+ assert len(columns) == 2
+ x_tmp = x
+ x = extract_columns(x, columns[0], columns[1])
+
shape = x.shape
dropout_mask = sampler.gen_uniform(shape, x.dtype, stream)
@@ -149,6 +156,10 @@ def sample_dropout_mask(x, dropout_probability=.5, stream=None):
dropout_mask._grid, dropout_mask._block, stream,
x.gpudata, dropout_mask.gpudata, np.float32(dropout_probability),
dropout_mask.size)
+
+ if columns is not None:
+ insert_columns(x, x_tmp, columns[0])
+
return dropout_mask
apply_dropout_mask_kernel = get_elwise_kernel(
@@ -156,14 +167,23 @@ apply_dropout_mask_kernel = get_elwise_kernel(
"if (mask[i] == 0.) mat[i] = 0;",
"apply_dropout_mask")
-def apply_dropout_mask(x, mask, stream=None):
+def apply_dropout_mask(x, mask, columns=None, stream=None):
assert x.flags.c_contiguous
+
+ if columns is not None:
+ assert len(columns) == 2
+ x_tmp = x
+ x = extract_columns(x, columns[0], columns[1])
+
assert x.shape == mask.shape
shape = x.shape
apply_dropout_mask_kernel.prepared_async_call(x._grid, x._block, stream,
x.gpudata, mask.gpudata, x.size)
+ if columns is not None:
+ insert_columns(x, x_tmp, columns[0])
+
nan_to_zeros_kernel = ElementwiseKernel("float *mat, float *target",
"target[i] = isnan(mat[i]) ? 0. : mat[i];",
"nan_to_zeros_kernel")
diff --git a/neural_nets/pycuda_ops/matrix.py b/neural_nets/pycuda_ops/matrix.py
index <HASH>..<HASH> 100644
--- a/neural_nets/pycuda_ops/matrix.py
+++ b/neural_nets/pycuda_ops/matrix.py
@@ -148,5 +148,3 @@ def insert_columns(src, dst, offset):
copy.dst_pitch = w_dst * itemsize
copy.height = h_src
copy(aligned=True)
-
-
diff --git a/neural_nets_test.py b/neural_nets_test.py
index <HASH>..<HASH> 100644
--- a/neural_nets_test.py
+++ b/neural_nets_test.py
@@ -98,7 +98,7 @@ class TestSampleDropoutMask(unittest.TestCase):
def test_sample_dropout_mask(self):
for i in range(20):
- height = np.random.randint(100, 1000)
+ height = 1000
width = np.random.randint(500, 10000)
dropout_prob = np.random.rand()
X = sampler.gen_uniform((height, width), np.float32)
@@ -108,5 +108,24 @@ class TestSampleDropoutMask(unittest.TestCase):
self.assertLess(np.abs(dropout_prob - dropout_rate), self.TOL)
self.assertTrue(np.all((X.get() != 0.) == dropout_mask.get()))
+ def test_sample_dropout_mask_columns(self):
+ for i in range(20):
+ height = 10000
+ width = 10000
+ dropout_prob = np.random.rand()
+ X = sampler.gen_uniform((height, width), np.float32)
+
+ start = np.random.randint(0, width - 1000)
+ end = start + 1000
+ columns = (start, end)
+
+ dropout_mask = sample_dropout_mask(X, dropout_prob, columns)
+ dropout_rate = 1. - dropout_mask.get().mean()
+
+ self.assertEqual(dropout_mask.shape, (X.shape[0], end - start))
+ self.assertLess(np.abs(dropout_prob - dropout_rate),
+ self.TOL)
+ self.assertTrue(np.all((X.get()[:,start:end] != 0.) == dropout_mask.get()))
+
if __name__ == '__main__':
unittest.main()
|
implemented dropout on a subset of columns
|
hannes-brt_hebel
|
train
|
128d1f2919258ff80a8875ed6a7c6b0f60acf674
|
diff --git a/sinks/gcmautoscaling/driver.go b/sinks/gcmautoscaling/driver.go
index <HASH>..<HASH> 100644
--- a/sinks/gcmautoscaling/driver.go
+++ b/sinks/gcmautoscaling/driver.go
@@ -38,6 +38,10 @@ var (
Key: "compute.googleapis.com/resource_type",
Description: "Resource types for nodes specific for GCE.",
}
+ cpuUsage = "cpu/usage"
+ cpuLimit = "cpu/limit"
+ memUsage = "memory/usage"
+ memLimit = "memory/limit"
)
var autoscalingLabels = []sink_api.LabelDescriptor{
@@ -52,19 +56,19 @@ type utilizationMetric struct {
}
var autoscalingMetrics = map[string]utilizationMetric{
- "cpu/usage": {
+ cpuUsage: {
name: "cpu/node_utilization",
description: "Cpu utilization as a share of node capacity",
},
- "cpu/limit": {
+ cpuLimit: {
name: "cpu/node_reservation",
description: "Share of cpu that is reserved on the node",
},
- "memory/usage": {
+ memUsage: {
name: "memory/node_utilization",
description: "Memory utilization as a share of memory capacity",
},
- "memory/limit": {
+ memLimit: {
name: "memory/node_reservation",
description: "Share of memory that is reserved on the node",
},
@@ -128,7 +132,7 @@ func (self *gcmAutocalingSink) updateMachineCapacityAndReservation(input []sink_
self.memReservation = make(map[hostTime]int64)
for _, entry := range input {
metric := entry.Point
- if metric.Name != "cpu/limit" && metric.Name != "memory/limit" {
+ if metric.Name != cpuLimit && metric.Name != memLimit {
continue
}
host := metric.Labels[sink_api.LabelHostname.Key]
@@ -138,21 +142,23 @@ func (self *gcmAutocalingSink) updateMachineCapacityAndReservation(input []sink_
}
if isNode(metric) {
- if metric.Name == "cpu/limit" {
+ if metric.Name == cpuLimit {
self.cpuCapacity[hostTime{host, metric.End}] = value
- } else if metric.Name == "memory/limit" {
+ } else if metric.Name == memLimit {
self.memCapacity[hostTime{host, metric.End}] = value
}
} else if isPodContainer(metric) {
- if metric.Name == "cpu/limit" {
+ if metric.Name == cpuLimit {
self.cpuReservation[hostTime{host, metric.End}] += value
- } else if metric.Name == "memory/limit" {
+ } else if metric.Name == memLimit {
self.memReservation[hostTime{host, metric.End}] += value
}
}
}
}
+// For the given metric compute minimal set of labels required by autoscaling.
+// See more: https://cloud.google.com/compute/docs/autoscaler/scaling-cloud-monitoring-metrics#custom_metrics_beta
func getLabels(metric *sink_api.Point) map[string]string {
return map[string]string{
LabelHostname.Key: metric.Labels[sink_api.LabelHostname.Key],
@@ -161,31 +167,33 @@ func getLabels(metric *sink_api.Point) map[string]string {
}
}
+// For the given metric compute value of corresponding metric based on
+// the original value and precomputed node stats.
func (self *gcmAutocalingSink) getNewValue(metric *sink_api.Point, ts *gcm.Timeseries) *float64 {
host := metric.Labels[sink_api.LabelHostname.Key]
var val float64
switch metric.Name {
- case "cpu/usage":
+ case cpuUsage:
capacity, ok := self.cpuCapacity[hostTime{host, metric.End}]
if !ok || capacity < 1 || ts.Point.DoubleValue == nil {
return nil
}
val = *ts.Point.DoubleValue / float64(capacity)
- case "cpu/limit":
+ case cpuLimit:
reserved, ok := self.cpuReservation[hostTime{host, metric.End}]
capacity, ok2 := self.cpuCapacity[hostTime{host, metric.End}]
if !ok || !ok2 || capacity < 1 {
return nil
}
val = float64(reserved) / float64(capacity)
- case "memory/usage":
+ case memUsage:
capacity, ok := self.memCapacity[hostTime{host, metric.End}]
if !ok || capacity < 1 || ts.Point.Int64Value == nil {
return nil
}
val = float64(*ts.Point.Int64Value) / float64(capacity)
- case "memory/limit":
+ case memLimit:
reserved, ok := self.memReservation[hostTime{host, metric.End}]
capacity, ok2 := self.memCapacity[hostTime{host, metric.End}]
if !ok || !ok2 || capacity < 1 {
@@ -215,9 +223,9 @@ func (self gcmAutocalingSink) StoreTimeseries(input []sink_api.Timeseries) error
var ts *gcm.Timeseries
var err error
- if metric.Name == "cpu/usage" {
+ if metric.Name == cpuUsage {
ts, err = self.core.GetEquivalentRateMetric(metric)
- } else if metric.Name == "cpu/limit" || metric.Name == "memory/usage" || metric.Name == "memory/limit" {
+ } else if metric.Name == cpuLimit || metric.Name == memUsage || metric.Name == memLimit {
ts, err = self.core.GetMetric(metric)
} else {
continue
|
Added comments, replaced string literals with constants
|
kubernetes-retired_heapster
|
train
|
751b46f117ce8084eef5dca29eec57a6c371db57
|
diff --git a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingIOThread.java b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingIOThread.java
index <HASH>..<HASH> 100644
--- a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingIOThread.java
+++ b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingIOThread.java
@@ -41,6 +41,8 @@ public class NonBlockingIOThread extends Thread implements OperationHostileThrea
@Probe(name = "taskQueueSize")
private final Queue<Runnable> taskQueue = new ConcurrentLinkedQueue<Runnable>();
+ @Probe
+ private final SwCounter eventCount = newSwCounter();
private final ILogger logger;
@@ -52,9 +54,6 @@ public class NonBlockingIOThread extends Thread implements OperationHostileThrea
private volatile long lastSelectTimeMs;
- @Probe
- private final SwCounter eventCount = newSwCounter();
-
public NonBlockingIOThread(ThreadGroup threadGroup,
String threadName,
ILogger logger,
diff --git a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingReadHandler.java b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingReadHandler.java
index <HASH>..<HASH> 100644
--- a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingReadHandler.java
+++ b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingReadHandler.java
@@ -49,8 +49,8 @@ import static com.hazelcast.util.counters.SwCounter.newSwCounter;
*/
public final class NonBlockingReadHandler extends AbstractSelectionHandler implements ReadHandler {
- private ByteBuffer inputBuffer;
-
+ @Probe(name = "in.eventCount")
+ private final SwCounter eventCount = newSwCounter();
@Probe(name = "in.bytesRead")
private final SwCounter bytesRead = newSwCounter();
@Probe(name = "in.normalPacketsRead")
@@ -60,20 +60,15 @@ public final class NonBlockingReadHandler extends AbstractSelectionHandler imple
private final MetricsRegistry metricRegistry;
private SocketReader socketReader;
-
+ private ByteBuffer inputBuffer;
private volatile long lastReadTime;
- //This field will be incremented by a single thread. It can be read by multiple threads.
- @Probe(name = "in.eventCount")
- private final SwCounter eventCount = newSwCounter();
-
public NonBlockingReadHandler(
TcpIpConnection connection,
NonBlockingIOThread ioThread,
MetricsRegistry metricsRegistry) {
super(connection, ioThread, SelectionKey.OP_READ);
this.ioThread = ioThread;
-
this.metricRegistry = metricsRegistry;
metricRegistry.scanAndRegister(this, "tcp.connection[" + connection.getMetricsId() + "]");
}
diff --git a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingWriteHandler.java b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingWriteHandler.java
index <HASH>..<HASH> 100644
--- a/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingWriteHandler.java
+++ b/hazelcast/src/main/java/com/hazelcast/nio/tcp/nonblocking/NonBlockingWriteHandler.java
@@ -56,6 +56,8 @@ public final class NonBlockingWriteHandler extends AbstractSelectionHandler impl
private static final long TIMEOUT = 3;
+ @Probe(name = "out.eventCount")
+ private final SwCounter eventCount = newSwCounter();
@Probe(name = "out.writeQueueSize")
private final Queue<SocketWritable> writeQueue = new ConcurrentLinkedQueue<SocketWritable>();
@Probe(name = "out.priorityWriteQueueSize")
@@ -73,9 +75,6 @@ public final class NonBlockingWriteHandler extends AbstractSelectionHandler impl
private volatile SocketWritable currentPacket;
private SocketWriter socketWriter;
private volatile long lastWriteTime;
- @Probe(name = "out.eventCount")
- //This field will be incremented by a single thread. It can be read by multiple threads.
- private final SwCounter eventCount = newSwCounter();
private boolean shutdown;
// this field will be accessed by the NonBlockingIOThread or
|
Reordered a few probe fields on tcp objects
|
hazelcast_hazelcast
|
train
|
cac05c848de026335ac5d5022070ba53afcc333a
|
diff --git a/src/main/java/com/coveros/selenified/element/WaitFor.java b/src/main/java/com/coveros/selenified/element/WaitFor.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/coveros/selenified/element/WaitFor.java
+++ b/src/main/java/com/coveros/selenified/element/WaitFor.java
@@ -289,7 +289,8 @@ public class WaitFor {
double start = System.currentTimeMillis();
// wait for up to XX seconds
WebDriverWait wait = new WebDriverWait(element.getDriver(), (long) seconds, defaultPollingInterval);
- wait.until(ExpectedConditions.not(ExpectedConditions.elementToBeClickable(element.defineByElement())));
+ wait.until(ExpectedConditions.or(ExpectedConditions.not(ExpectedConditions.elementToBeClickable(element
+ .defineByElement())), ExpectedConditions.invisibilityOfElementLocated(element.defineByElement())));
double timetook = (System.currentTimeMillis() - start) / 1000;
file.recordAction(action, expected,
WAITED + timetook + SECONDS_FOR + element.prettyOutput() + " to not be enabled", Result.SUCCESS);
|
add expected condition for element no longer visible
|
Coveros_selenified
|
train
|
d0f2e8e3e2f7d59f669f053b0e17b05c6ecd65ee
|
diff --git a/src/main/java/io/katharsis/queryParams/QueryParams.java b/src/main/java/io/katharsis/queryParams/QueryParams.java
index <HASH>..<HASH> 100644
--- a/src/main/java/io/katharsis/queryParams/QueryParams.java
+++ b/src/main/java/io/katharsis/queryParams/QueryParams.java
@@ -316,20 +316,16 @@ public class QueryParams {
}
String resourceType = propertyList.get(0);
-
+ Set<Inclusion> resourceParams;
if (temporaryInclusionsMap.containsKey(resourceType)) {
- Set<Inclusion> resourceParams = temporaryInclusionsMap.get(resourceType);
- resourceParams.add(new Inclusion(entry.getValue()
- .iterator()
- .next()));
- temporaryInclusionsMap.put(resourceType, resourceParams);
+ resourceParams = temporaryInclusionsMap.get(resourceType);
} else {
- Set<Inclusion> resourceParams = new LinkedHashSet<>();
- resourceParams.add(new Inclusion(entry.getValue()
- .iterator()
- .next()));
- temporaryInclusionsMap.put(resourceType, resourceParams);
+ resourceParams = new LinkedHashSet<>();
+ }
+ for(String path : entry.getValue()) {
+ resourceParams.add(new Inclusion(path));
}
+ temporaryInclusionsMap.put(resourceType, resourceParams);
}
Map<String, IncludedRelationsParams> decodedInclusions = new LinkedHashMap<>();
diff --git a/src/main/java/io/katharsis/queryParams/include/Inclusion.java b/src/main/java/io/katharsis/queryParams/include/Inclusion.java
index <HASH>..<HASH> 100644
--- a/src/main/java/io/katharsis/queryParams/include/Inclusion.java
+++ b/src/main/java/io/katharsis/queryParams/include/Inclusion.java
@@ -45,4 +45,11 @@ public class Inclusion {
public int hashCode() {
return path != null ? path.hashCode() : 0;
}
+
+ @Override
+ public String toString() {
+ return "Inclusion{" +
+ "path='" + path + '\'' +
+ '}';
+ }
}
diff --git a/src/test/java/io/katharsis/queryParams/QueryParamsBuilderTest.java b/src/test/java/io/katharsis/queryParams/QueryParamsBuilderTest.java
index <HASH>..<HASH> 100644
--- a/src/test/java/io/katharsis/queryParams/QueryParamsBuilderTest.java
+++ b/src/test/java/io/katharsis/queryParams/QueryParamsBuilderTest.java
@@ -1,13 +1,11 @@
package io.katharsis.queryParams;
import io.katharsis.jackson.exception.ParametersDeserializationException;
+import io.katharsis.queryParams.include.Inclusion;
import org.junit.Before;
import org.junit.Test;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
import static org.assertj.core.api.Assertions.assertThat;
@@ -126,7 +124,7 @@ public class QueryParamsBuilderTest {
}
@Test
- public void onGivenIncludedRelationsBuilderShouldReturnRequestParamsWithIncludedRelations() throws
+ public void onGivenIncludedRelationBuilderShouldReturnRequestParamsWithIncludedRelation() throws
ParametersDeserializationException {
// GIVEN
queryParams.put("include[special-users]", Collections.singleton("friends"));
@@ -147,4 +145,24 @@ public class QueryParamsBuilderTest {
.next()
.getPath()).isEqualTo("friends");
}
+
+ @Test
+ public void onGivenIncludedRelationsBuilderShouldReturnRequestParamsWithIncludedRelations() throws
+ ParametersDeserializationException {
+ // GIVEN
+ queryParams.put("include[special-users]", new HashSet<>(Arrays.asList("friends", "foes")));
+
+ // WHEN
+ QueryParams result = sut.buildQueryParams(queryParams);
+
+ // THEN
+ assertThat(result.getIncludedRelations()
+ .getParams()
+ .get("special-users")).isNotNull();
+
+ assertThat(result.getIncludedRelations()
+ .getParams()
+ .get("special-users")
+ .getParams()).containsExactly(new Inclusion("friends"), new Inclusion("foes"));
+ }
}
|
#<I> allowed multiple inclusions
|
katharsis-project_katharsis-framework
|
train
|
acc396f2677f17452e501e76de24daa529f84fe3
|
diff --git a/src/assertions/index.js b/src/assertions/index.js
index <HASH>..<HASH> 100644
--- a/src/assertions/index.js
+++ b/src/assertions/index.js
@@ -9,32 +9,19 @@ var deepEqual = require('fast-deep-equal/es6');
/**
* Function returning whether the given graphs have the same nodes.
*
- * @param {Graph} G - First graph.
- * @param {Graph} H - Second graph.
+ * @param {boolean} deep - Whether to perform deep comparisons.
+ * @param {Graph} G - First graph.
+ * @param {Graph} H - Second graph.
* @return {boolean}
*/
-function haveSameNodes(G, H) {
- if (G.order !== H.order) return false;
-
- return G.everyNode(function (node) {
- return H.hasNode(node);
- });
-}
-
-/**
- * Function returning whether the given graphs have the same nodes & if these
- * nodes have the same attributes.
- *
- * @param {Graph} G - First graph.
- * @param {Graph} H - Second graph.
- * @return {boolean}
- */
-function haveSameNodesDeep(G, H) {
+function abstractHaveSameNodes(deep, G, H) {
if (G.order !== H.order) return false;
return G.everyNode(function (node, attr) {
if (!H.hasNode(node)) return false;
+ if (!deep) return true;
+
return deepEqual(attr, H.getNodeAttributes(node));
});
}
@@ -42,62 +29,12 @@ function haveSameNodesDeep(G, H) {
/**
* Function returning whether the given graphs are identical.
*
- * @param {Graph} G - First graph.
- * @param {Graph} H - Second graph.
- * @return {boolean}
- */
-function areSameGraphs(G, H) {
- if (G.multi || H.multi)
- throw new Error(
- 'graphology-assertions.areSameGraphs: not implemented for multigraphs yet!'
- );
-
- // If two graphs don't have the same settings they cannot be identical
- if (G.type !== H.type || G.allowSelfLoops !== H.allowSelfLoops) return false;
-
- // If two graphs don't have the same number of typed edges, they cannot be identical
- if (
- G.directedSize !== H.directedSize ||
- G.undirectedSize !== H.undirectedSize
- )
- return false;
-
- // If two graphs don't have the same nodes they cannot be identical
- if (!haveSameNodes(G, H)) return false;
-
- var sameDirectedEdges = false;
- var sameUndirectedEdges = false;
-
- // In the simple case we don't need refining
- sameDirectedEdges = G.everyDirectedEdge(function (_e, _ea, source, target) {
- return H.hasDirectedEdge(source, target);
- });
-
- if (!sameDirectedEdges) return false;
-
- sameUndirectedEdges = G.everyUndirectedEdge(function (
- _e,
- _ea,
- source,
- target
- ) {
- return H.hasUndirectedEdge(source, target);
- });
-
- if (!sameUndirectedEdges) return false;
-
- return true;
-}
-
-/**
- * Function returning whether the given graphs are identical and if their
- * node & edge attributes are identical also.
- *
- * @param {Graph} G - First graph.
- * @param {Graph} H - Second graph.
+ * @param {boolean} deep - Whether to perform deep comparison.
+ * @param {Graph} G - First graph.
+ * @param {Graph} H - Second graph.
* @return {boolean}
*/
-function areSameGraphsDeep(G, H) {
+function abstractAreSameGraphs(deep, G, H) {
if (G.multi || H.multi)
throw new Error(
'graphology-assertions.areSameGraphsDeep: not implemented for multigraphs yet!'
@@ -114,7 +51,7 @@ function areSameGraphsDeep(G, H) {
return false;
// If two graphs don't have the same nodes they cannot be identical
- if (!haveSameNodesDeep(G, H)) return false;
+ if (!abstractHaveSameNodes(deep, G, H)) return false;
var sameDirectedEdges = false;
var sameUndirectedEdges = false;
@@ -123,6 +60,8 @@ function areSameGraphsDeep(G, H) {
sameDirectedEdges = G.everyDirectedEdge(function (_e, _ea, source, target) {
if (!H.hasDirectedEdge(source, target)) return false;
+ if (!deep) return true;
+
return deepEqual(
G.getDirectedEdgeAttributes(source, target),
H.getDirectedEdgeAttributes(source, target)
@@ -139,6 +78,8 @@ function areSameGraphsDeep(G, H) {
) {
if (!H.hasUndirectedEdge(source, target)) return false;
+ if (!deep) return true;
+
return deepEqual(
G.getUndirectedEdgeAttributes(source, target),
H.getUndirectedEdgeAttributes(source, target)
@@ -155,7 +96,7 @@ function areSameGraphsDeep(G, H) {
*/
exports.isGraph = require('graphology-utils/is-graph');
exports.isGraphConstructor = require('graphology-utils/is-graph-constructor');
-exports.haveSameNodes = haveSameNodes;
-exports.haveSameNodesDeep = haveSameNodesDeep;
-exports.areSameGraphs = areSameGraphs;
-exports.areSameGraphsDeep = areSameGraphsDeep;
+exports.haveSameNodes = abstractHaveSameNodes.bind(null, false);
+exports.haveSameNodesDeep = abstractHaveSameNodes.bind(null, true);
+exports.areSameGraphs = abstractAreSameGraphs.bind(null, false);
+exports.areSameGraphsDeep = abstractAreSameGraphs.bind(null, true);
|
[assertions] refactoring through abstractions
|
graphology_graphology
|
train
|
2ded589254f427317fc13591b9e2b2d44e57c7cd
|
diff --git a/src/views/layout/main.blade.php b/src/views/layout/main.blade.php
index <HASH>..<HASH> 100644
--- a/src/views/layout/main.blade.php
+++ b/src/views/layout/main.blade.php
@@ -2,7 +2,7 @@
<div id="brdmain">
-@if ($errors->has())
+@if (isset($errors) and $errors->has())
<div class="alert alert-danger">
<p>The following errors occured:</p>
<ul>
|
Take care of missing $errors variable.
|
fluxbb_core
|
train
|
8da26f8030d8c670d9939bda79cad530c50fa823
|
diff --git a/gwt-material/src/main/java/gwt/material/design/client/ui/MaterialSwitch.java b/gwt-material/src/main/java/gwt/material/design/client/ui/MaterialSwitch.java
index <HASH>..<HASH> 100644
--- a/gwt-material/src/main/java/gwt/material/design/client/ui/MaterialSwitch.java
+++ b/gwt-material/src/main/java/gwt/material/design/client/ui/MaterialSwitch.java
@@ -74,6 +74,16 @@ public class MaterialSwitch extends ComplexWidget implements HasValue<Boolean>,
add(label);
add(lblError);
lblError.getElement().getStyle().setMarginTop(16, Unit.PX);
+ //register click handler here in order to have it at first position
+ // and therefore it will deal with clicks as first and setup the value
+ // right before others get notified.
+ addClickHandler(new ClickHandler() {
+ @Override
+ public void onClick(ClickEvent event) {
+ setValue(!getValue());
+ event.preventDefault();
+ }
+ });
}
/**
@@ -84,17 +94,10 @@ public class MaterialSwitch extends ComplexWidget implements HasValue<Boolean>,
setValue(value);
}
+
@Override
protected void onLoad() {
super.onLoad();
-
- addClickHandler(new ClickHandler() {
- @Override
- public void onClick(ClickEvent event) {
- setValue(!getValue());
- event.preventDefault();
- }
- });
}
@Override
|
#<I> MaterialSwitch problem with multiple ClickHandler registrations and ClickHandler order of registration
Move to Constructor in order to guarantee that it is only once
registered.
As well this resolved the problem that the value of the Action isn't
right when reading it in an onClick Handler. If the clickHandler is
registered in the constructor it is registered as first and therefore as
well always called as first what changes the value. So every later
called onClick handler in the chain will read the right value.
|
GwtMaterialDesign_gwt-material
|
train
|
37ba8f5c990ae27a3f0f11e32f217218e908fc71
|
diff --git a/framework/assets/yii.validation.js b/framework/assets/yii.validation.js
index <HASH>..<HASH> 100644
--- a/framework/assets/yii.validation.js
+++ b/framework/assets/yii.validation.js
@@ -203,16 +203,16 @@ yii.validation = (function ($) {
valid = value !== compareValue;
break;
case '>':
- valid = value > parseFloat(compareValue);
+ valid = parseFloat(value) > parseFloat(compareValue);
break;
case '>=':
- valid = value >= parseFloat(compareValue);
+ valid = parseFloat(value) >= parseFloat(compareValue);
break;
case '<':
- valid = value < parseFloat(compareValue);
+ valid = parseFloat(value) < parseFloat(compareValue);
break;
case '<=':
- valid = value <= parseFloat(compareValue);
+ valid = parseFloat(value) <= parseFloat(compareValue);
break;
default:
valid = false;
|
Fixes #<I> value also has to be parsed.
|
yiisoft_yii-core
|
train
|
3cb549ac03a678bd3355561b734bf7b22f3b11b1
|
diff --git a/pmagpy_tests/test_imports3.py b/pmagpy_tests/test_imports3.py
index <HASH>..<HASH> 100644
--- a/pmagpy_tests/test_imports3.py
+++ b/pmagpy_tests/test_imports3.py
@@ -187,9 +187,16 @@ class Test_iodp_srm_magic(unittest.TestCase):
pass
def tearDown(self):
- filelist = ['measurements.txt', 'specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']
+ filelist = ['measurements.txt', 'specimens.txt', 'samples.txt',
+ 'sites.txt', 'locations.txt',
+ 'IODP_LIMS_SRMsection_366_U1494.csv.magic',
+ 'IODP_LIMS_SRMsection_366_U1494_locations.txt',
+ 'IODP_LIMS_SRMsection_366_U1494_samples.txt',
+ 'IODP_LIMS_SRMsection_366_U1494_sites.txt',
+ 'IODP_LIMS_SRMsection_366_U1494_specimens.txt']
+ dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
#directory = os.path.join(WD)
- pmag.remove_files(filelist, WD)
+ pmag.remove_files(filelist, dir_path)
os.chdir(WD)
def test_iodp_with_no_files(self):
@@ -212,6 +219,7 @@ class Test_iodp_srm_magic(unittest.TestCase):
#dir_path = os.path.join(WD, 'data_files', 'Measurement_Import',
# 'iodp_srm_magic')
dir_path = os.path.join(WD, 'data_files', 'UTESTA', 'SRM_data')
+ options['dir_path'] = dir_path
options['input_dir_path'] = dir_path
options['csv_file'] = 'srmsection-XXX-UTEST-A.csv'
program_ran, outfile = iodp_srm_magic.convert(**options)
diff --git a/programs/conversion_scripts/iodp_srm_magic.py b/programs/conversion_scripts/iodp_srm_magic.py
index <HASH>..<HASH> 100755
--- a/programs/conversion_scripts/iodp_srm_magic.py
+++ b/programs/conversion_scripts/iodp_srm_magic.py
@@ -299,12 +299,11 @@ def convert(**kwargs):
MeasOuts=pmag.measurements_methods3(MeasRecs,noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
- con.tables['specimens'].write_magic_file(custom_name=spec_file)
- con.tables['samples'].write_magic_file(custom_name=samp_file)
- con.tables['sites'].write_magic_file(custom_name=site_file)
- con.tables['locations'].write_magic_file(custom_name=loc_file)
- con.tables['measurements'].write_magic_file(custom_name=meas_file)
-
+ con.write_table_to_file('specimens', custom_name=spec_file)
+ con.write_table_to_file('samples', custom_name=spec_file)
+ con.write_table_to_file('sites', custom_name=spec_file)
+ con.write_table_to_file('locations', custom_name=spec_file)
+ con.write_table_to_file('measurements', custom_name=spec_file)
return (True, meas_file)
# helper
|
fix output file writing in iodp_srm_magic
|
PmagPy_PmagPy
|
train
|
6c8e3664d466fa8a3653f70537b7a66c024aed19
|
diff --git a/src/main/java/hex/gbm/DRF.java b/src/main/java/hex/gbm/DRF.java
index <HASH>..<HASH> 100644
--- a/src/main/java/hex/gbm/DRF.java
+++ b/src/main/java/hex/gbm/DRF.java
@@ -221,7 +221,7 @@ public class DRF extends FrameJob {
Log.info(Sys.DRF__,"DRF done in "+t_drf);
// Remove temp vectors; cleanup the Frame
- while( fr.numCols() > ncols )
+ while( fr.numCols() > ncols+1/*Leave response*/ )
UKV.remove(fr.remove(fr.numCols()-1)._key);
remove();
tryComplete();
diff --git a/src/main/java/hex/gbm/GBM.java b/src/main/java/hex/gbm/GBM.java
index <HASH>..<HASH> 100644
--- a/src/main/java/hex/gbm/GBM.java
+++ b/src/main/java/hex/gbm/GBM.java
@@ -161,7 +161,7 @@ public class GBM extends FrameJob {
Log.info(Sys.GBM__,"GBM Modeling done in "+t_gbm);
// Remove temp vectors; cleanup the Frame
- while( fr.numCols() > ncols )
+ while( fr.numCols() > ncols+1/*Leave response*/ )
UKV.remove(fr.remove(fr.numCols()-1)._key);
remove();
tryComplete();
|
Was over-nuking the response col by mistake
|
h2oai_h2o-2
|
train
|
fb2cd8fc3e3a5bb433e88981d0e2b0dc9d00ab2f
|
diff --git a/drivers/bridge/setup_ip_tables.go b/drivers/bridge/setup_ip_tables.go
index <HASH>..<HASH> 100644
--- a/drivers/bridge/setup_ip_tables.go
+++ b/drivers/bridge/setup_ip_tables.go
@@ -138,6 +138,7 @@ func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairp
address = addr.String()
natRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", address, "!", "-o", bridgeIface, "-j", "MASQUERADE"}}
hpNatRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-m", "addrtype", "--src-type", "LOCAL", "-o", bridgeIface, "-j", "MASQUERADE"}}
+ skipDNAT = iptRule{table: iptables.Nat, chain: DockerChain, preArgs: []string{"-t", "nat"}, args: []string{"-i", bridgeIface, "-j", "RETURN"}}
outRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}}
inRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}}
)
@@ -147,6 +148,9 @@ func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairp
if err := programChainRule(natRule, "NAT", enable); err != nil {
return err
}
+ if err := programChainRule(skipDNAT, "SKIP DNAT", enable); err != nil {
+ return err
+ }
}
// In hairpin mode, masquerade traffic from localhost
|
Allow inter-network connectivity via exposed ports
|
docker_libnetwork
|
train
|
fffba071534ca116d040d04753e3da5611c3b1ae
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index <HASH>..<HASH> 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,10 @@
-# master
+# Master
-...
+# Version 0.0.9
+
+### Changed
+
+* Many strategy (persistence): setter `roles=` does not call save anymore, so it does not persist new records!
# Version 0.0.6
diff --git a/lib/simple_roles/many/roles_methods.rb b/lib/simple_roles/many/roles_methods.rb
index <HASH>..<HASH> 100644
--- a/lib/simple_roles/many/roles_methods.rb
+++ b/lib/simple_roles/many/roles_methods.rb
@@ -52,7 +52,6 @@ module SimpleRoles
def has_roles? *rolez
rolez.flatten!
- # rrr roles
rolez.all? do |role|
roles.include? role
end
diff --git a/spec/simple_roles/many_spec.rb b/spec/simple_roles/many_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/simple_roles/many_spec.rb
+++ b/spec/simple_roles/many_spec.rb
@@ -79,6 +79,7 @@ describe SimpleRoles::Many do
it "#roles= should not persist the user when being set" do
unsaved_user = build :user
unsaved_user.roles = [:admin]
+ unsaved_user.roles.should == [:admin]
unsaved_user.should_not be_persisted
end
|
cosmetic preps for <I>
|
stanislaw_simple_roles
|
train
|
857b2d27abe6ebdd42fe0514a60f0f035817dbef
|
diff --git a/openquake/server/db/actions.py b/openquake/server/db/actions.py
index <HASH>..<HASH> 100644
--- a/openquake/server/db/actions.py
+++ b/openquake/server/db/actions.py
@@ -16,7 +16,6 @@
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
-import zipfile
import operator
from datetime import datetime
@@ -24,9 +23,7 @@ from django.core import exceptions
from django import db
from openquake.commonlib import datastore, valid
-from openquake.commonlib.export import export
from openquake.server.db import models
-from openquake.engine.export import core
from openquake.server.db.schema.upgrades import upgrader
from openquake.server.db import upgrade_manager
@@ -304,7 +301,7 @@ def log(job_id, timestamp, level, process, message):
"""
db.connection.cursor().execute(
'INSERT INTO log (job_id, timestamp, level, process, message) VALUES'
- '(?, ?, ?, ?, ?)', (job_id, timestamp, level, process, message))
+ '(%s, %s, %s, %s, %s)', (job_id, timestamp, level, process, message))
def get_log(job_id):
|
Compatibility with Django <I>
|
gem_oq-engine
|
train
|
fa552914353002ab2ca72edff3299674444a7f11
|
diff --git a/salt/modules/rbenv.py b/salt/modules/rbenv.py
index <HASH>..<HASH> 100644
--- a/salt/modules/rbenv.py
+++ b/salt/modules/rbenv.py
@@ -178,6 +178,7 @@ def install_ruby(ruby, runas=None):
ret = {}
ret = _rbenv_exec('install', ruby, env=env, runas=runas, ret=ret)
if ret['retcode'] == 0:
+ rehash(runas=runas)
return ret['stderr']
else:
# Cleanup the failed installation so it doesn't list as installed
@@ -268,6 +269,21 @@ def list_(runas=None):
return ret
+def rehash(runas=None):
+ '''
+ Run rbenv rehash to update the installed shims.
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' rbenv.rehash
+ '''
+
+ ret = _rbenv_exec('rehash', runas=runas)
+ return True
+
+
def do(cmdline=None, runas=None):
'''
Execute a ruby command with rbenv's shims from the user or the system.
@@ -287,6 +303,7 @@ def do(cmdline=None, runas=None):
)
if result['retcode'] == 0:
+ rehash(runas=runas)
return result['stdout']
else:
return False
|
Add rehash method for rbenv module
- After installing gems or rubies, rbenv rehash should be run
- No harm in running it multiple times or when not strictly needed in
the do method.
|
saltstack_salt
|
train
|
554d714e29d41741b81ad5c1f32b9fb71c1b5433
|
diff --git a/agent_proxy.go b/agent_proxy.go
index <HASH>..<HASH> 100644
--- a/agent_proxy.go
+++ b/agent_proxy.go
@@ -21,6 +21,16 @@ import (
// assert that the HostAgent implements the LoadBalancer interface
var _ LoadBalancer = &HostAgent{}
+type ServiceLogInfo struct {
+ ServiceID string
+ Message string
+}
+
+func (a *HostAgent) SendLogMessage(serviceLogInfo ServiceLogInfo, _ *struct{}) (err error) {
+ glog.Infof("Service: %v message: %v", serviceLogInfo.ServiceID, serviceLogInfo.Message)
+ return nil
+}
+
func (a *HostAgent) GetServiceEndpoints(serviceId string, response *map[string][]*dao.ApplicationEndpoint) (err error) {
controlClient, err := NewControlClient(a.master)
if err != nil {
diff --git a/interfaces.go b/interfaces.go
index <HASH>..<HASH> 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -77,6 +77,9 @@ type ContainerState struct {
// The API for a service proxy.
type LoadBalancer interface {
+ // SendLogMessage sends a log message from a container to the agent???
+ SendLogMessage(serviceLogInfo ServiceLogInfo, _ *struct{}) error
+
GetServiceEndpoints(serviceId string, endpoints *map[string][]*dao.ApplicationEndpoint) error
// GetProxySnapshotQuiece blocks until there is a snapshot request
diff --git a/lbClient.go b/lbClient.go
index <HASH>..<HASH> 100644
--- a/lbClient.go
+++ b/lbClient.go
@@ -28,6 +28,12 @@ func (a *LBClient) Close() error {
return a.rpcClient.Close()
}
+// SendLogMessage
+func (a *LBClient) SendLogMessage(serviceLogInfo ServiceLogInfo, _ *struct{}) error {
+ glog.V(4).Infof("ControlPlaneAgent.SendLogMessage()")
+ return a.rpcClient.Call("ControlPlaneAgent.SendLogMessage", serviceLogInfo, nil)
+}
+
// GetServiceEndpoints returns a list of endpoints for the given service endpoint request.
func (a *LBClient) GetServiceEndpoints(serviceId string, endpoints *map[string][]*dao.ApplicationEndpoint) error {
glog.V(4).Infof("ControlPlaneAgent.GetServiceEndpoints()")
diff --git a/serviced/proxy.go b/serviced/proxy.go
index <HASH>..<HASH> 100644
--- a/serviced/proxy.go
+++ b/serviced/proxy.go
@@ -55,6 +55,16 @@ func (cli *ServicedCli) CmdProxy(args ...string) error {
cmd.Stdin = os.Stdin
err := cmd.Run()
if err != nil {
+ client, err := serviced.NewLBClient(proxyOptions.servicedEndpoint)
+ message := fmt.Sprintf("Problem running service: %v. Command \"%v\" failed: %v", config.ServiceId, config.Command, err)
+ if err == nil {
+ defer client.Close()
+ glog.Errorf(message)
+ client.SendLogMessage(serviced.ServiceLogInfo{config.ServiceId, message}, nil)
+ } else {
+ glog.Errorf("Failed to create a client to endpoint %s: %s", proxyOptions.servicedEndpoint, err)
+ }
+
glog.Errorf("Problem running service: %v", err)
glog.Flush()
if exiterr, ok := err.(*exec.ExitError); ok && !proxyOptions.autorestart {
@@ -63,6 +73,7 @@ func (cli *ServicedCli) CmdProxy(args ...string) error {
}
}
}
+
if !proxyOptions.autorestart {
break
}
|
Added SendLogMessage method to send logs from the proxy to the master
|
control-center_serviced
|
train
|
8394cb6ccc65ad583f993c83807090006a7cd025
|
diff --git a/gitlab/__init__.py b/gitlab/__init__.py
index <HASH>..<HASH> 100644
--- a/gitlab/__init__.py
+++ b/gitlab/__init__.py
@@ -1792,7 +1792,7 @@ class Gitlab(object):
:param project_id: The ID of a project
:return: list of the labels
"""
- request = requests.get("{0}/{1}/labels".format(self.projects_url, project_id), params=data,
+ request = requests.get("{0}/{1}/labels".format(self.projects_url, project_id),
verify=self.verify_ssl, headers=self.headers)
if request.status_code == 200:
|
Fix a small problem with getlabels
|
pyapi-gitlab_pyapi-gitlab
|
train
|
f9ae1f7deb2abf6cb937de1d3b4da7304a44d7b5
|
diff --git a/simulator/src/main/java/com/hazelcast/simulator/worker/MemberWorker.java b/simulator/src/main/java/com/hazelcast/simulator/worker/MemberWorker.java
index <HASH>..<HASH> 100644
--- a/simulator/src/main/java/com/hazelcast/simulator/worker/MemberWorker.java
+++ b/simulator/src/main/java/com/hazelcast/simulator/worker/MemberWorker.java
@@ -285,7 +285,9 @@ public final class MemberWorker implements Worker {
hazelcastInstance.shutdown();
LOGGER.info("Stopping WorkerPerformanceMonitor");
- workerPerformanceMonitor.shutdown();
+ if (workerPerformanceMonitor != null) {
+ workerPerformanceMonitor.shutdown();
+ }
LOGGER.info("Stopping WorkerConnector...");
workerConnector.shutdown();
|
Fixed NPE in MemberWorker shutdown when running without performance monitoring.
|
hazelcast_hazelcast-simulator
|
train
|
64fae6c5d91fa856c628240e3da9b5874ec60115
|
diff --git a/worker/rsyslog/rsyslog_test.go b/worker/rsyslog/rsyslog_test.go
index <HASH>..<HASH> 100644
--- a/worker/rsyslog/rsyslog_test.go
+++ b/worker/rsyslog/rsyslog_test.go
@@ -66,11 +66,14 @@ func (s *RsyslogSuite) TestTearDown(c *gc.C) {
func (s *RsyslogSuite) TestRsyslogCert(c *gc.C) {
st, m := s.st, s.machine
- worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", nil)
+ err := s.machine.SetAddresses(network.NewAddress("example.com", network.ScopeUnknown))
+ c.Assert(err, gc.IsNil)
+
+ worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", []string{"0.1.2.3"})
c.Assert(err, gc.IsNil)
defer func() { c.Assert(worker.Wait(), gc.IsNil) }()
defer worker.Kill()
- waitForFile(c, filepath.Join(*rsyslog.LogDir, "ca-cert.pem"))
+ waitForFile(c, filepath.Join(*rsyslog.LogDir, "rsyslog-cert.pem"))
rsyslogCertPEM, err := ioutil.ReadFile(filepath.Join(*rsyslog.LogDir, "rsyslog-cert.pem"))
c.Assert(err, gc.IsNil)
@@ -78,7 +81,7 @@ func (s *RsyslogSuite) TestRsyslogCert(c *gc.C) {
cert, err := cert.ParseCert(string(rsyslogCertPEM))
c.Assert(err, gc.IsNil)
- c.Assert(cert.DNSNames, gc.DeepEquals, []string{"*"})
+ c.Assert(cert.DNSNames, gc.DeepEquals, []string{"example.com", "*"})
subject := cert.Subject
c.Assert(subject.CommonName, gc.Equals, "*")
|
rsyslog: exercise rsyslogHosts conditional logic in test
|
juju_juju
|
train
|
8f3f624c22632afde6ae4864b062251dd7323ef7
|
diff --git a/lib/api/npm.js b/lib/api/npm.js
index <HASH>..<HASH> 100644
--- a/lib/api/npm.js
+++ b/lib/api/npm.js
@@ -79,7 +79,7 @@ async function getDependency(name, logger) {
logger.trace({ dependency: dep }, JSON.stringify(dep));
return dep;
} catch (err) {
- logger.warn(`Dependency not found: ${name}`);
+ logger.debug(`Dependency not found: ${name}`);
logger.debug(`err: ${JSON.stringify(err)}`);
return null;
}
|
refactor(npm): Don’t warn twice in logs for dep lookup
Closes #<I>
|
renovatebot_renovate
|
train
|
280f0b46618d201e6e2f89f938c8d6b66dc12eea
|
diff --git a/lib/puppet/parser/scope.rb b/lib/puppet/parser/scope.rb
index <HASH>..<HASH> 100644
--- a/lib/puppet/parser/scope.rb
+++ b/lib/puppet/parser/scope.rb
@@ -519,14 +519,14 @@ class Puppet::Parser::Scope
# of the objects contained in this scope.
def tag(*ary)
ary.each { |tag|
- unless tag =~ /^\w[-\w]+$/
- fail Puppet::ParseError, "Invalid tag %s" % tag.inspect
- end
if tag.nil? or tag == ""
puts caller
Puppet.debug "got told to tag with %s" % tag.inspect
next
end
+ unless tag =~ /^\w[-\w]+$/
+ fail Puppet::ParseError, "Invalid tag %s" % tag.inspect
+ end
tag = tag.to_s
unless @tags.include?(tag)
#Puppet.info "Tagging scope %s with %s" % [self.object_id, tag]
|
Still trying to track down the tagging problem
git-svn-id: <URL>
|
puppetlabs_puppet
|
train
|
b3c9a3c4c549e2b1f8fe3efdc5cd1e02fbde51fe
|
diff --git a/tests/TestCase.php b/tests/TestCase.php
index <HASH>..<HASH> 100644
--- a/tests/TestCase.php
+++ b/tests/TestCase.php
@@ -67,5 +67,7 @@ class TestCase extends \Orchestra\Testbench\TestCase
'database' => ':memory:',
'prefix' => '',
]);
+ //If the database redis has a prefix, Lada fails to flush the cache
+ $app['config']->set('database.redis.options.prefix', false);
}
}
\ No newline at end of file
|
Make all tests pass by removing the redis prefix in the Laravel config
|
spiritix_lada-cache
|
train
|
0359483c694abe2296a10d7ebf6e67b3a16957fc
|
diff --git a/SingularityBase/src/main/java/com/hubspot/mesos/MesosUtils.java b/SingularityBase/src/main/java/com/hubspot/mesos/MesosUtils.java
index <HASH>..<HASH> 100644
--- a/SingularityBase/src/main/java/com/hubspot/mesos/MesosUtils.java
+++ b/SingularityBase/src/main/java/com/hubspot/mesos/MesosUtils.java
@@ -10,7 +10,6 @@ import java.util.Comparator;
import java.util.List;
import java.util.Random;
import java.util.Set;
-import java.util.Iterator;
import com.google.common.collect.Sets;
import org.apache.mesos.Protos.MasterInfo;
@@ -44,8 +43,7 @@ public final class MesosUtils {
private static double getScalar(List<Resource> resources, String name, Optional<String> requiredRole) {
for (Resource r : resources) {
-
- if (r.hasName() && r.getName().equals(name) && r.hasScalar() && isRequiredRole(r, requiredRole)) {
+ if (r.hasName() && r.getName().equals(name) && r.hasScalar() && hasRequiredRole(r, requiredRole)) {
return getScalar(r);
}
}
@@ -53,11 +51,11 @@ public final class MesosUtils {
return 0;
}
- private static Boolean hasRole(Resource r) {
+ private static boolean hasRole(Resource r) {
return r.hasRole() && !r.getRole().equals("*");
}
- private static Boolean isRequiredRole(Resource r, Optional<String> requiredRole) {
+ private static boolean hasRequiredRole(Resource r, Optional<String> requiredRole) {
if (requiredRole.isPresent() && hasRole(r)) {
// required role with a resource with role
@@ -228,20 +226,7 @@ public final class MesosUtils {
return Resource.newBuilder().setName(name).setType(Type.RANGES).setRanges(Ranges.newBuilder().addRange(Range.newBuilder().setBegin(begin).setEnd(end).build()).build()).build();
}
- public static String getRolesInfo(Offer offer) {
- StringBuilder info = new StringBuilder();
- info.append("[");
- for (Iterator<String> itr = getRoles(offer).iterator(); itr.hasNext(); ) {
- info.append(itr.next());
- if (itr.hasNext()) {
- info.append(", ");
- }
- }
- info.append("]");
- return info.toString();
- }
-
- private static Set<String> getRoles(Offer offer) {
+ public static Set<String> getRoles(Offer offer) {
Set<String> roles = Sets.newHashSet();
for (Resource r : offer.getResourcesList()) {
diff --git a/SingularityService/src/main/java/com/hubspot/singularity/mesos/SingularityMesosScheduler.java b/SingularityService/src/main/java/com/hubspot/singularity/mesos/SingularityMesosScheduler.java
index <HASH>..<HASH> 100644
--- a/SingularityService/src/main/java/com/hubspot/singularity/mesos/SingularityMesosScheduler.java
+++ b/SingularityService/src/main/java/com/hubspot/singularity/mesos/SingularityMesosScheduler.java
@@ -120,7 +120,7 @@ public class SingularityMesosScheduler implements Scheduler {
LOG.info("Received {} offer(s)", offers.size());
for (Offer offer : offers) {
- String rolesInfo = MesosUtils.getRolesInfo(offer);
+ String rolesInfo = MesosUtils.getRoles(offer).toString();
LOG.debug("Received offer ID {} with roles {} from {} ({}) for {} cpu(s), {} memory, {} ports, and {} disk", offer.getId().getValue(), rolesInfo, offer.getHostname(), offer.getSlaveId().getValue(), MesosUtils.getNumCpus(offer), MesosUtils.getMemory(offer),
MesosUtils.getNumPorts(offer), MesosUtils.getDisk(offer));
}
@@ -273,9 +273,9 @@ public class SingularityMesosScheduler implements Scheduler {
return Optional.of(task);
} else {
- String rolesInfo = MesosUtils.getRolesInfo(offerHolder.getOffer());
+ String rolesInfo = MesosUtils.getRoles(offerHolder.getOffer()).toString();
LOG.trace("Ignoring offer {} with roles {} on {} for task {}; matched resources: {}, slave match state: {}", offerHolder.getOffer().getId(), rolesInfo, offerHolder.getOffer().getHostname(), taskRequest
- .getPendingTask().getPendingTaskId(), matchesResources, slaveMatchState);
+ .getPendingTask().getPendingTaskId(), matchesResources, slaveMatchState);
}
}
|
Minor cosmetics changes based on comments
|
HubSpot_Singularity
|
train
|
93fa5717d31506fb7cfeb34f12176d0482033585
|
diff --git a/nabu/provider/CNabuProviderFactory.php b/nabu/provider/CNabuProviderFactory.php
index <HASH>..<HASH> 100644
--- a/nabu/provider/CNabuProviderFactory.php
+++ b/nabu/provider/CNabuProviderFactory.php
@@ -68,7 +68,8 @@ class CNabuProviderFactory extends CNabuObject implements INabuSingleton
$this->nb_manager_list = new CNabuProviderManagerList();
$this->nb_interface_list = array(
self::INTERFACE_MESSAGING_SERVICE => new CNabuProviderInterfaceDescriptorList(),
- self::INTERFACE_MESSAGING_TEMPLATE_RENDER => new CNabuProviderInterfaceDescriptorList()
+ self::INTERFACE_MESSAGING_TEMPLATE_RENDER => new CNabuProviderInterfaceDescriptorList(),
+ self::INTERFACE_RENDER => new CNabuProviderInterfaceDescriptorList()
);
}
|
add PDF as Interface collection to Providers Factory
|
nabu-3_core
|
train
|
c1d195bf5a8d5fee5a324cc01177243edc1eb13f
|
diff --git a/src/Import_Command.php b/src/Import_Command.php
index <HASH>..<HASH> 100644
--- a/src/Import_Command.php
+++ b/src/Import_Command.php
@@ -5,7 +5,7 @@ class Import_Command extends WP_CLI_Command {
var $processed_posts = array();
/**
- * Import content from a WXR file.
+ * Imports content from a given WXR file.
*
* Provides a command line interface to the WordPress Importer plugin, for
* performing data migrations.
@@ -87,7 +87,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Import a WXR file
+ * Imports a WXR file.
*/
private function import_wxr( $file, $args ) {
@@ -174,7 +174,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Useful verbosity filters for the WXR importer
+ * Defines useful verbosity filters for the WXR importer.
*/
private function add_wxr_filters() {
@@ -241,7 +241,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Is the requested importer available?
+ * Determines whether the requested importer is available.
*/
private function is_importer_available() {
require_once ABSPATH . 'wp-admin/includes/plugin.php';
@@ -261,7 +261,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Process how the authors should be mapped
+ * Processes how the authors should be mapped
*
* @param string $authors_arg The `--author` argument originally passed to command
* @param array $author_data An array of WP_User-esque author objects
@@ -292,7 +292,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Read an author mapping file
+ * Reads an author mapping file.
*/
private function read_author_mapping_file( $file ) {
$author_mapping = array();
@@ -308,7 +308,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Create an author mapping file, based on provided author data
+ * Creates an author mapping file, based on provided author data.
*
* @return WP_Error The file was just now created, so some action needs to be taken
*/
@@ -331,7 +331,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Create users if they don't exist, and build an author mapping file
+ * Creates users if they don't exist, and build an author mapping file.
*/
private function create_authors_for_mapping( $author_data ) {
@@ -377,7 +377,7 @@ class Import_Command extends WP_CLI_Command {
}
/**
- * Suggest a blog user based on the levenshtein distance
+ * Suggests a blog user based on the levenshtein distance.
*/
private function suggest_user( $author_user_login, $author_user_email = '' ) {
|
Convert import command help summary (and other method DocBlock summaries) to use third-person singular verbs.
|
wp-cli_import-command
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.