hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
|---|---|---|---|---|
3b4683a9cdae61c8120bf36719c1ae6d0403a35e
|
diff --git a/lib/mongoid/slug/criterion_optional_extensions.rb b/lib/mongoid/slug/criterion_optional_extensions.rb
index <HASH>..<HASH> 100644
--- a/lib/mongoid/slug/criterion_optional_extensions.rb
+++ b/lib/mongoid/slug/criterion_optional_extensions.rb
@@ -6,7 +6,7 @@ module Mongoid::Criterion::ForSlug
# resembles a BSON::ObjectId
BSON::ObjectId.from_string(ids.first) unless ids.first.is_a?(BSON::ObjectId)
# id
- super(*ids) # Fallback to original Mongoid::Criterion::Optional
+ super # Fallback to original Mongoid::Criterion::Optional
rescue BSON::InvalidObjectId
# slug
if ids.size > 1
|
Calling super without arguments is sufficient here (it passes the original arguments through)
|
mongoid_mongoid-slug
|
train
|
251b16908fb315c650af7002a6fc2fe0343e564c
|
diff --git a/app/app.go b/app/app.go
index <HASH>..<HASH> 100644
--- a/app/app.go
+++ b/app/app.go
@@ -2390,7 +2390,7 @@ func (app *App) Shell(opts provision.ExecOptions) error {
return provision.ProvisionerNotSupported{Prov: prov, Action: "running shell"}
}
opts.App = app
- opts.Cmds = cmdsForExec("bash -l")
+ opts.Cmds = cmdsForExec("[ -x /bin/bash ] && /bin/bash -l || sh -l")
return execProv.ExecuteCommand(app.ctx, opts)
}
diff --git a/app/app_test.go b/app/app_test.go
index <HASH>..<HASH> 100644
--- a/app/app_test.go
+++ b/app/app_test.go
@@ -4426,7 +4426,7 @@ func (s *S) TestShellToUnit(c *check.C) {
allExecs := s.provisioner.AllExecs()
c.Assert(allExecs, check.HasLen, 1)
c.Assert(allExecs[unit.GetID()], check.HasLen, 1)
- c.Assert(allExecs[unit.GetID()][0].Cmds, check.DeepEquals, []string{"/bin/sh", "-c", "[ -f /home/application/apprc ] && source /home/application/apprc; [ -d /home/application/current ] && cd /home/application/current; bash -l"})
+ c.Assert(allExecs[unit.GetID()][0].Cmds, check.DeepEquals, []string{"/bin/sh", "-c", "[ -f /home/application/apprc ] && source /home/application/apprc; [ -d /home/application/current ] && cd /home/application/current; [ -x /bin/bash ] && /bin/bash -l || sh -l"})
}
func (s *S) TestShellNoUnits(c *check.C) {
@@ -4449,7 +4449,7 @@ func (s *S) TestShellNoUnits(c *check.C) {
allExecs := s.provisioner.AllExecs()
c.Assert(allExecs, check.HasLen, 1)
c.Assert(allExecs["isolated"], check.HasLen, 1)
- c.Assert(allExecs["isolated"][0].Cmds, check.DeepEquals, []string{"/bin/sh", "-c", "[ -f /home/application/apprc ] && source /home/application/apprc; [ -d /home/application/current ] && cd /home/application/current; bash -l"})
+ c.Assert(allExecs["isolated"][0].Cmds, check.DeepEquals, []string{"/bin/sh", "-c", "[ -f /home/application/apprc ] && source /home/application/apprc; [ -d /home/application/current ] && cd /home/application/current; [ -x /bin/bash ] && /bin/bash -l || sh -l"})
}
func (s *S) TestSetCertificateForApp(c *check.C) {
|
feat(app/shell): fallback to dash when bash is not found
|
tsuru_tsuru
|
train
|
ee511bb76dbd7c432b13d0b0e125d8c88d7f61b9
|
diff --git a/static/js/webirc.js b/static/js/webirc.js
index <HASH>..<HASH> 100644
--- a/static/js/webirc.js
+++ b/static/js/webirc.js
@@ -206,7 +206,7 @@ webircApp.directive('chatlog', function() {
case 'NickChange':
return basicText('activity_info', '* ' + activity.oldNickname + ' is now known as ' + activity.newNickname);
case 'Notice':
- return basicText('activity_notice', '* Notice from ' + originNickOrName(activity.origin) + ': ' + activity.text);
+ return basicText('activity_notice', '-' + originNickOrName(activity.origin) + '- ' + activity.text);
case 'Part':
return basicText('activity_info', '* Part: ' + activity.who.nick + ' (' + activity.who.user + '@' + activity.who.host + ')');
case 'Quit':
|
changed how incoming notices appear: -nick- msg
|
pavben_WebIRC
|
train
|
22987e07453f3dbdb86f4b0a1ae98cb423aaf0a7
|
diff --git a/examples/lstm_chime.py b/examples/lstm_chime.py
index <HASH>..<HASH> 100644
--- a/examples/lstm_chime.py
+++ b/examples/lstm_chime.py
@@ -52,10 +52,8 @@ def batches(dataset):
e = theanets.Experiment(
theanets.recurrent.Classifier,
- layers=(39, 500, 51),
- recurrent_layers=(0, 1),
+ layers=(39, ('lstm', 100), ('lstm', 200), ('lstm', 78), 51),
recurrent_error_start=0,
- recurrent_form='LSTM',
output_activation='softmax',
hidden_activation='tanh',
batch_size=BATCH_SIZE,
|
Update lstm example with new recurrent layer definitions.
|
lmjohns3_theanets
|
train
|
342cb9053ef3f065f26949950fb25ee196a6b23b
|
diff --git a/src/utils/deepMerge.js b/src/utils/deepMerge.js
index <HASH>..<HASH> 100644
--- a/src/utils/deepMerge.js
+++ b/src/utils/deepMerge.js
@@ -1,8 +1,12 @@
-import { mergeDeepRight } from 'ramda';
+import { isNil, mergeDeepWith } from 'ramda';
+
+const merge = (a, b) => {
+ return isNil(b) ? a : b;
+};
const deepMerge = objs =>
objs.reduce((acc, obj) => {
- return mergeDeepRight(acc, obj);
+ return mergeDeepWith(merge, acc, obj);
}, {});
export default deepMerge;
|
Gracefully handle undefined styles (#<I>)
|
diegomura_react-pdf
|
train
|
39bd29a4cd24d45361f065ff1f3b8537378d5380
|
diff --git a/common/ledger/blkstorage/blockindex_test.go b/common/ledger/blkstorage/blockindex_test.go
index <HASH>..<HASH> 100644
--- a/common/ledger/blkstorage/blockindex_test.go
+++ b/common/ledger/blkstorage/blockindex_test.go
@@ -444,11 +444,3 @@ func verifyTxIDKeyDecodable(t *testing.T, txIDKey []byte, expectedTxID string, e
require.Equal(t, expectedTxNum, txNum)
require.Len(t, txIDKey, firstIndexTxNum+n)
}
-
-type errorThrowingWriter struct {
- err error
-}
-
-func (w *errorThrowingWriter) Write(p []byte) (n int, err error) {
- return 0, w.err
-}
|
Remove dead struct from blockindex_test
|
hyperledger_fabric
|
train
|
7f3271fb8e948c7980a2e6fdcda355b9f2b144cf
|
diff --git a/packages/builder/utils/ProgressBar.js b/packages/builder/utils/ProgressBar.js
index <HASH>..<HASH> 100644
--- a/packages/builder/utils/ProgressBar.js
+++ b/packages/builder/utils/ProgressBar.js
@@ -29,7 +29,7 @@ module.exports = class ProgressBar {
const empty_bar = this.get_bar(empty_bar_length, '-')
const percentage_progress = (current_progress * 100).toFixed(2)
- if (typeof process.stdout.clearLine() === 'function') {
+ if (typeof process.stdout.clearLine === 'function') {
process.stdout.clearLine()
process.stdout.cursorTo(0)
process.stdout.write(
|
[builder] - fix progress process stdout - hotfix :fire:
|
ciffi_ciffi-js
|
train
|
ef130f3aaab38af1f1c8b61b8cf54a9bd6c4c2bf
|
diff --git a/src/components/view.js b/src/components/view.js
index <HASH>..<HASH> 100644
--- a/src/components/view.js
+++ b/src/components/view.js
@@ -10,11 +10,14 @@ export default {
render (h, { props, children, parent, data }) {
data.routerView = true
+ const name = props.name
const route = parent.$route
const cache = parent._routerViewCache || (parent._routerViewCache = {})
+
+ // determine current view depth, also check to see if the tree
+ // has been toggled inactive but kept-alive.
let depth = 0
let inactive = false
-
while (parent) {
if (parent.$vnode && parent.$vnode.data.routerView) {
depth++
@@ -24,30 +27,33 @@ export default {
}
parent = parent.$parent
}
-
data.routerViewDepth = depth
+
+ // render previous view if the tree is inactive and kept-alive
+ if (inactive) {
+ return h(cache[name], data, children)
+ }
+
const matched = route.matched[depth]
+ // render empty node if no matched route
if (!matched) {
+ cache[name] = null
return h()
}
- const name = props.name
- const component = inactive
- ? cache[name]
- : (cache[name] = matched.components[name])
+ const component = cache[name] = matched.components[name]
- if (!inactive) {
- const hooks = data.hook || (data.hook = {})
- hooks.init = vnode => {
- matched.instances[name] = vnode.child
- }
- hooks.prepatch = (oldVnode, vnode) => {
- matched.instances[name] = vnode.child
- }
- hooks.destroy = vnode => {
- if (matched.instances[name] === vnode.child) {
- matched.instances[name] = undefined
- }
+ // inject instance registration hooks
+ const hooks = data.hook || (data.hook = {})
+ hooks.init = vnode => {
+ matched.instances[name] = vnode.child
+ }
+ hooks.prepatch = (oldVnode, vnode) => {
+ matched.instances[name] = vnode.child
+ }
+ hooks.destroy = vnode => {
+ if (matched.instances[name] === vnode.child) {
+ matched.instances[name] = undefined
}
}
|
<router-view>: keep previous view when tree is toggled inactive but kept-alive. (partially address vuejs/vue#<I>)
|
vuejs_vue-router
|
train
|
f6a8c95428ee71c5641477d0d3ab1c53c5f708e5
|
diff --git a/argcomplete/completers.py b/argcomplete/completers.py
index <HASH>..<HASH> 100644
--- a/argcomplete/completers.py
+++ b/argcomplete/completers.py
@@ -85,7 +85,7 @@ class _FilteredFilesCompleter(object):
A predicate accepts as its only argument a candidate path and either
accepts it or rejects it.
'''
- assert predicate and callable(predicate), 'Expected a callable predicate'
+ assert predicate, 'Expected a callable predicate'
self.predicate = predicate
def __call__(self, prefix, **kwargs):
|
Removed assertion at _FilteredFilesCompleter
|
kislyuk_argcomplete
|
train
|
2e767f7adfed7569d6e583c8fdf6de4a2d0b9ab2
|
diff --git a/index/postings.go b/index/postings.go
index <HASH>..<HASH> 100644
--- a/index/postings.go
+++ b/index/postings.go
@@ -1126,6 +1126,22 @@ func (f *segmentTermsEnumFrame) rewind() {
}
}
+func (f *segmentTermsEnumFrame) next() bool {
+ if f.isLeafBlock {
+ return f.nextLeaf()
+ }
+ return f.nextNonLeaf()
+}
+
+// Decodes next entry; returns true if it's a sub-block
+func (f *segmentTermsEnumFrame) nextLeaf() bool {
+ panic("not implemented yet")
+}
+
+func (f *segmentTermsEnumFrame) nextNonLeaf() bool {
+ panic("not implemented yet")
+}
+
// TODO: make this array'd so we can do bin search?
// likely not worth it? need to measure how many
// floor blocks we "typically" get
@@ -1194,7 +1210,12 @@ func (f *segmentTermsEnumFrame) scanToFloorFrame(target []byte) {
// Used only by assert
func (f *segmentTermsEnumFrame) prefixMatches(target []byte) bool {
- panic("not implemented yet")
+ for i := 0; i < f.prefix; i++ {
+ if target[i] != f.term[i] {
+ return false
+ }
+ }
+ return true
}
// NOTE: sets startBytePos/suffix as a side effect
@@ -1236,8 +1257,9 @@ func (f *segmentTermsEnumFrame) scanToTermLeaf(target []byte, exactOnly bool) (s
return 0, err
}
+ suffixReaderPos := f.suffixesReader.Pos
log.Printf(" cycle: term %v (of %v) suffix=%v",
- f.nextEnt-1, f.entCount, brToString(f.suffixBytes[f.suffixesReader.Pos:]))
+ f.nextEnt-1, f.entCount, brToString(f.suffixBytes[suffixReaderPos:suffixReaderPos+f.suffix]))
termLen := f.prefix + f.suffix
f.startBytePos = f.suffixesReader.Pos
@@ -1270,6 +1292,7 @@ func (f *segmentTermsEnumFrame) scanToTermLeaf(target []byte, exactOnly bool) (s
}
if cmp < 0 {
+ log.Println("DEBUG keep scanning")
// Current entry is still before the target;
// keep scanning
@@ -1282,24 +1305,30 @@ func (f *segmentTermsEnumFrame) scanToTermLeaf(target []byte, exactOnly bool) (s
}
break
} else if cmp > 0 {
+ log.Println("DEBUG done")
// // Done! Current entry is after target --
// // return NOT_FOUND:
- // fillTerm();
-
- // if (!exactOnly && !termExists) {
- // // We are on a sub-block, and caller wants
- // // us to position to the next term after
- // // the target, so we must recurse into the
- // // sub-frame(s):
- // currentFrame = pushFrame(null, currentFrame.lastSubFP, termLen);
- // currentFrame.loadBlock();
- // while (currentFrame.next()) {
- // currentFrame = pushFrame(null, currentFrame.lastSubFP, term.length);
- // currentFrame.loadBlock();
- // }
- // }
-
- // //if (DEBUG) System.out.println(" not found");
+ f.fillTerm()
+
+ if !exactOnly && !f.termExists {
+ // We are on a sub-block, and caller wants
+ // us to position to the next term after
+ // the target, so we must recurse into the
+ // sub-frame(s):
+ if f.currentFrame, err = f.pushFrameAt(nil, f.currentFrame.lastSubFP, termLen); err == nil {
+ err = f.currentFrame.loadBlock()
+ }
+ for err == nil && f.currentFrame.next() {
+ if f.currentFrame, err = f.pushFrameAt(nil, f.currentFrame.lastSubFP, len(f.term)); err == nil {
+ err = f.currentFrame.loadBlock()
+ }
+ }
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ log.Println(" not found")
return SEEK_STATUS_NOT_FOUND, nil
} else if stop {
// Exact match!
|
migrate BTTReader's internal methods
|
balzaczyy_golucene
|
train
|
5ad5cd9b6e34697abd3763d8a54ef8ab88fc2140
|
diff --git a/tests/unit/pydsl_test.py b/tests/unit/pydsl_test.py
index <HASH>..<HASH> 100644
--- a/tests/unit/pydsl_test.py
+++ b/tests/unit/pydsl_test.py
@@ -66,7 +66,11 @@ state('A').service.running(name='apache')
# 2 rather than 1 because pydsl adds an extra no-op state
# declaration.
- s = result.itervalues().next()['file']
+ s_iter = result.itervalues()
+ try:
+ s = s_iter.next()['file']
+ except KeyError:
+ s = s_iter.next()['file']
self.assertEqual(s[0], 'managed')
self.assertEqual(s[1]['name'], 'myfile.txt')
self.assertEqual(s[2]['source'], 'salt://path/to/file')
|
Fix a bug in the pydsl test case.
|
saltstack_salt
|
train
|
54fd4c88f5347496ecc93b90d274eb5b9caa023e
|
diff --git a/blockstack_cli_0.14.1/blockstack_client/backend/nameops.py b/blockstack_cli_0.14.1/blockstack_client/backend/nameops.py
index <HASH>..<HASH> 100644
--- a/blockstack_cli_0.14.1/blockstack_client/backend/nameops.py
+++ b/blockstack_cli_0.14.1/blockstack_client/backend/nameops.py
@@ -20,7 +20,7 @@ from .queue import in_queue, queue_append, queue_findone
from .blockchain import get_tx_confirmations
from .blockchain import is_address_usable
-from .blockchain import can_receive_name, get_balance, get_tx_fee
+from .blockchain import can_receive_name, get_balance, get_tx_fee, get_utxos
from crypto.utils import get_address_from_privkey, get_pubkey_from_privkey
@@ -28,7 +28,7 @@ from ..utils import pretty_print as pprint
from ..utils import pretty_dump
from ..config import PREORDER_CONFIRMATIONS, DEFAULT_QUEUE_PATH, CONFIG_PATH, get_utxo_provider_client, get_tx_broadcaster
-from ..config import get_logger
+from ..config import get_logger, APPROX_TX_IN_P2PKH_LEN, APPROX_TX_OUT_P2PKH_LEN, APPROX_TX_OVERHEAD_LEN
from ..proxy import get_default_proxy
from ..proxy import getinfo as blockstack_getinfo
@@ -130,7 +130,7 @@ def estimate_renewal_tx_fee( name, payment_privkey, owner_address, utxo_client,
return tx_fee
-def estimate_update_tx_fee( name, payment_privkey, owner_address, utxo_client, config_path=CONFIG_PATH ):
+def estimate_update_tx_fee( name, payment_privkey, owner_address, utxo_client, config_path=CONFIG_PATH, payment_address=None ):
"""
Estimate the transaction fee of an update
Return the number of satoshis on success
@@ -140,15 +140,37 @@ def estimate_update_tx_fee( name, payment_privkey, owner_address, utxo_client, c
fake_consensus_hash = 'd4049672223f42aac2855d2fbf2f38f0'
fake_zonefile_hash = '20b512149140494c0f7d565023973226908f6940'
+ signed_subsidized_tx = None
+
try:
unsigned_tx = update_tx( name, fake_zonefile_hash, fake_consensus_hash, owner_address, utxo_client, subsidize=True )
- subsidized_tx = tx_make_subsidizable( unsigned_tx, fees_update, 21 * 10**14, payment_privkey, utxo_client )
+ if payment_privkey is not None:
+ # actually try to subsidize this tx
+ subsidized_tx = tx_make_subsidizable( unsigned_tx, fees_update, 21 * 10**14, payment_privkey, utxo_client )
+ signed_subsidized_tx = sign_tx( subsidized_tx, fake_privkey )
+
+ # there will be at least one more output here (the registration output), so append that too
+ signed_subsidized_tx += "00" * (APPROX_TX_OVERHEAD_LEN + APPROX_TX_IN_P2PKH_LEN + APPROX_TX_OUT_P2PKH_LEN)
+
+ else:
+ # do a rough size estimation
+ if payment_address is not None:
+ log.debug("Payment private key not given; estimating the subsidization fee from UTXOs")
+ payment_utxos = get_utxos( payment_address, config_path=config_path, utxo_client=utxo_client )
+ if payment_utxos is None:
+ raise ValueError()
+
+ # assuming they're p2pkh outputs...
+ subsidy_byte_count = APPROX_TX_OVERHEAD_LEN + ((len(payment_utxos) + 3) * APPROX_TX_IN_P2PKH_LEN) + APPROX_TX_OUT_P2PKH_LEN
+ signed_subsidized_tx = unsigned_tx + "00" * (71 + subsidy_byte_count) # ~71 bytes for signature
+
+ else:
+ raise Exception("Need either payment_privkey or payment_address")
+
except ValueError:
log.debug("Insufficient funds: Not enough inputs to make an update transaction.")
return None
- signed_subsidized_tx = sign_tx( subsidized_tx, fake_privkey )
-
tx_fee = get_tx_fee( signed_subsidized_tx, config_path=config_path )
if tx_fee is None:
log.error("Failed to get tx fee")
|
estimate update transaction fees from the payment address, if the
private key is unavailable. Also, account for the extra owner input
that will go into an update that follows a register.
|
blockstack_blockstack-core
|
train
|
447e6d065e36b47d7ecfab063f3747a86a35e279
|
diff --git a/src/Tweech/Subscribers/ChatMessageSubscriber.php b/src/Tweech/Subscribers/ChatMessageSubscriber.php
index <HASH>..<HASH> 100644
--- a/src/Tweech/Subscribers/ChatMessageSubscriber.php
+++ b/src/Tweech/Subscribers/ChatMessageSubscriber.php
@@ -26,7 +26,7 @@ class ChatMessageSubscriber extends EventSubscriber{
{
$message = $event->getMessage();
if($this->registry->isCommand($message)){
- $this->registry->getCommandAndRegister($message);
+ $this->registry->getCommandAndExecute($message);
}
}
|
Renamed getCommandAndRegister getCommandAndExecute
|
raideer_tweech-framework
|
train
|
2c441c54baf52776dc1e3dfbc0217b50ba385814
|
diff --git a/law/cli/run.py b/law/cli/run.py
index <HASH>..<HASH> 100644
--- a/law/cli/run.py
+++ b/law/cli/run.py
@@ -49,10 +49,7 @@ def execute(args):
if not info:
abort("task family '{}' not found in db".format(args.task_family))
modid, task_family, _ = info
- try:
- __import__(modid, globals(), locals())
- except ImportError:
- abort("could not import module '{}'".format(modid))
+ __import__(modid, globals(), locals())
# import the module and run luigi
luigi_run([task_family] + sys.argv[3:])
diff --git a/law/contrib/tensorflow/formatter.py b/law/contrib/tensorflow/formatter.py
index <HASH>..<HASH> 100644
--- a/law/contrib/tensorflow/formatter.py
+++ b/law/contrib/tensorflow/formatter.py
@@ -34,7 +34,7 @@ class TFConstantGraphFormatter(Formatter):
@classmethod
def dump(cls, path, session, output_names, *args, **kwargs):
- import tf
+ import tensorflow as tf
graph_dir, graph_name = os.path.split(get_path(path))
|
Typo in tensorflow contrib.
|
riga_law
|
train
|
5a56b5718ec7a77e4e613c8aadc44b9b387892c6
|
diff --git a/activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb b/activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb
index <HASH>..<HASH> 100644
--- a/activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb
+++ b/activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb
@@ -285,11 +285,6 @@ module ActiveRecord
@connection.changes
end
- def delete_sql(sql, name = nil) #:nodoc:
- sql += " WHERE 1=1" unless sql =~ /WHERE/i
- super sql, name
- end
-
def select_rows(sql, name = nil, binds = [])
exec_query(sql, name, binds).rows
end
|
Remove `delete_sql` in sqlite3 adapter
`sql += " WHERE 1=1"` was introduced in <I>cb<I>.
But it is not needed. ref <URL>
|
rails_rails
|
train
|
1df3bce153b33b41a108697708b7194c6638d97a
|
diff --git a/src/main/java/com/coremedia/iso/boxes/h264/AvcConfigurationBox.java b/src/main/java/com/coremedia/iso/boxes/h264/AvcConfigurationBox.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/coremedia/iso/boxes/h264/AvcConfigurationBox.java
+++ b/src/main/java/com/coremedia/iso/boxes/h264/AvcConfigurationBox.java
@@ -202,7 +202,7 @@ public final class AvcConfigurationBox extends AbstractBox {
/**
* Just for non-spec-conform encoders
*/
- public int lengthSizeMinusOnePaddingBits = 252;
+ public int lengthSizeMinusOnePaddingBits = 60;
public int numberOfSequenceParameterSetsPaddingBits = 7;
public int chromaFormatPaddingBits = 31;
public int bitDepthLumaMinus8PaddingBits = 31;
|
restricting the value to 6 bits and replacing <I> (which is too big) with <I> & <I> == <I> which should result the some bit pattern in the output
git-svn-id: <URL>
|
sannies_mp4parser
|
train
|
1b08c4c187c65a53e5b5de871c9286529f88b94e
|
diff --git a/test/adapters/shared_tests.rb b/test/adapters/shared_tests.rb
index <HASH>..<HASH> 100644
--- a/test/adapters/shared_tests.rb
+++ b/test/adapters/shared_tests.rb
@@ -46,7 +46,7 @@ module Vanity::Adapters::SharedTests
assert_in_delta(
time,
@subject.get_metric_last_update_at(metric),
- 0.1
+ 1.0
)
end
end
@@ -101,7 +101,11 @@ module Vanity::Adapters::SharedTests
time = Time.now
@subject.set_experiment_created_at(experiment, time)
- assert_equal(time, @subject.get_experiment_created_at(experiment))
+ assert_in_delta(
+ time,
+ @subject.get_experiment_created_at(experiment),
+ 1.0
+ )
end
end
|
Relax resolution on adapter timestamp tests
While the mock adapter stores Time instances, other adapters are limited by
their storage layer, usually to 1s accuracy, so we can only assert that
timestamps are accurate to the second.
|
assaf_vanity
|
train
|
b9ce6eaf2e78fde13b6b873d1da26523a2b4fe8e
|
diff --git a/system/Database/OCI8/Connection.php b/system/Database/OCI8/Connection.php
index <HASH>..<HASH> 100644
--- a/system/Database/OCI8/Connection.php
+++ b/system/Database/OCI8/Connection.php
@@ -847,4 +847,16 @@ SQL;
}
// --------------------------------------------------------------------
+
+ /**
+ * Returns the name of the current database being used.
+ */
+ public function getDatabase(): string
+ {
+ if (empty($this->database)) {
+ $this->database = $this->query('SELECT DEFAULT_TABLESPACE FROM USER_USERS')->getRow()->DEFAULT_TABLESPACE ?? '';
+ }
+
+ return empty($this->database) ? '' : $this->database;
+ }
}
|
feat: Due to name of the tablespace is not defined in the property when connecting by instance name.
|
codeigniter4_CodeIgniter4
|
train
|
14dfcbd3b51e70f9a3067f5416ee10d62957ae6a
|
diff --git a/test/geotiff.spec.js b/test/geotiff.spec.js
index <HASH>..<HASH> 100644
--- a/test/geotiff.spec.js
+++ b/test/geotiff.spec.js
@@ -137,6 +137,29 @@ describe("mainTests", function() {
});
});
+ it("should work on band interleaved, lzw compressed, and tiled tiffs", function(done) {
+ retrieve("tiledplanarlzw.tiff", done, function(tiff) {
+ expect(tiff).to.be.ok;
+ var image = tiff.getImage();
+ expect(image).to.be.ok;
+ expect(image.getWidth()).to.equal(539);
+ expect(image.getHeight()).to.equal(448);
+ expect(image.getSamplesPerPixel()).to.equal(15);
+
+ try {
+ var allData = image.readRasters({window: [200, 200, 210, 210]});
+ expect(allData).to.have.length(15);
+ expect(allData[0]).to.be.an.instanceof(Uint16Array);
+ var data = image.readRasters({window: [200, 200, 210, 210], samples: [5]});
+ expect(data[0]).to.deep.equal(allData[5]);
+ done();
+ }
+ catch (error) {
+ done(error);
+ }
+ });
+ });
+
it("should work on Int32 tiffs", function(done) {
retrieve("int32.tiff", done, function(tiff) {
expect(tiff).to.be.ok;
@@ -229,6 +252,29 @@ describe("mainTests", function() {
});
});
+ it("should work on Float64 and lzw compressed tiffs", function(done) {
+ retrieve("float64lzw.tiff", done, function(tiff) {
+ expect(tiff).to.be.ok;
+ var image = tiff.getImage();
+ expect(image).to.be.ok;
+ expect(image.getWidth()).to.equal(539);
+ expect(image.getHeight()).to.equal(448);
+ expect(image.getSamplesPerPixel()).to.equal(15);
+
+ try {
+ var allData = image.readRasters({window: [200, 200, 210, 210]});
+ expect(allData).to.have.length(15);
+ expect(allData[0]).to.be.an.instanceof(Float64Array);
+ var data = image.readRasters({window: [200, 200, 210, 210], samples: [5]});
+ expect(data[0]).to.deep.equal(allData[5]);
+ done();
+ }
+ catch (error) {
+ done(error);
+ }
+ });
+ });
+
it("should work on packbit compressed tiffs", function(done) {
retrieve("packbits.tiff", done, function(tiff) {
expect(tiff).to.be.ok;
|
karma test added for float<I>lzw and tiledplanarlzw
|
geotiffjs_geotiff.js
|
train
|
d5033e8001db902d245e9b156972a1d1a0297c59
|
diff --git a/Gruntfile.js b/Gruntfile.js
index <HASH>..<HASH> 100644
--- a/Gruntfile.js
+++ b/Gruntfile.js
@@ -259,6 +259,7 @@ module.exports = function(grunt) {
grunt.registerTask('build:release', [ 'clean:dist', 'build', 'build:docs', 'compress' ]);
grunt.registerTask('build:npm', [ 'clean:dist', 'build' ]);
grunt.registerTask('travis', [ 'test' ]);
+ grunt.registerTask('deploy', [ 'deploy:docs', 'deploy:assets' ]);
grunt.registerTask('deploy:docs', [ 'build:gh-pages', 'shell:checkoutpages', 'copy:index', 'shell:addindex', 'shell:checkoutmaster' ]);
grunt.registerTask('deploy:assets', [ 'build:release', 'shell:checkoutpages', 'copy:jsassets', 'copy:cssassets', 'shell:addassets', 'shell:checkoutmaster' ]);
};
|
Added "deploy" grunt task
|
stevenbenner_jquery-powertip
|
train
|
737fcd6a00f75bb63b46e2a31cbfd315a4b1e8b5
|
diff --git a/dev/com.ibm.ws.webserver.plugin.utility_fat/fat/src/com/ibm/ws/webserver/plugin/utility/fat/PluginUtilityGenerateTest.java b/dev/com.ibm.ws.webserver.plugin.utility_fat/fat/src/com/ibm/ws/webserver/plugin/utility/fat/PluginUtilityGenerateTest.java
index <HASH>..<HASH> 100644
--- a/dev/com.ibm.ws.webserver.plugin.utility_fat/fat/src/com/ibm/ws/webserver/plugin/utility/fat/PluginUtilityGenerateTest.java
+++ b/dev/com.ibm.ws.webserver.plugin.utility_fat/fat/src/com/ibm/ws/webserver/plugin/utility/fat/PluginUtilityGenerateTest.java
@@ -71,6 +71,9 @@ public class PluginUtilityGenerateTest {
assertNotNull("The smarter planet message did not get printed",
remoteAccessServer.waitForStringInLog("CWWKF0011I"));
+ // wait for LTPA key to be available to avoid CWWKS4000E
+ assertNotNull("CWWKS4105I.* not recieved on remoteAccessServer",
+ remoteAccessServer.waitForStringInLog("CWWKS4105I.*"));
}
@AfterClass
|
wait for LTPA service start before testing remote server
|
OpenLiberty_open-liberty
|
train
|
88f8514e261edc0614686d7fcf00e48bb299d27e
|
diff --git a/lib/phony/countries.rb b/lib/phony/countries.rb
index <HASH>..<HASH> 100644
--- a/lib/phony/countries.rb
+++ b/lib/phony/countries.rb
@@ -96,7 +96,8 @@ Phony.define do
# http://en.wikipedia.org/wiki/Telephone_numbers_in_Belgium
#
country '32', trunk('0') |
- match(/^(70|800|90\d)\d+$/) >> split(3,3) | # Service
+ match(/^(7[08])\d+$/) >> split(3,3) | # Premium and national rate Services
+ match(/^(800|90\d)\d+$/) >> split(2,3) | # Toll free service and premium numbers
match(/^(46[0568])\d{6}$/) >> split(2,2,2) | # Mobile (Lycamobile, Telenet, Proximus 0460)
match(/^(4[789]\d)\d{6}$/) >> split(2,2,2) | # Mobile
one_of('2','3','4','9') >> split(3,2,2) | # Short NDCs
diff --git a/spec/lib/phony/countries_spec.rb b/spec/lib/phony/countries_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/lib/phony/countries_spec.rb
+++ b/spec/lib/phony/countries_spec.rb
@@ -105,6 +105,9 @@ describe 'country descriptions' do
it_splits '32475279584', ['32', '475', '27', '95', '84'] # mobile
it_splits '32468279584', ['32', '468', '27', '95', '84'] # mobile (Telenet)
it_splits '3270123123', ['32', '70', '123', '123'] # Bus Service?
+ it_splits '3278123123', ['32', '78', '123', '123'] # National rate service
+ it_splits '3290123123', ['32', '901', '23', '123'] # National rate service
+ it_splits '3280080404', ['32', '800', '80', '404'] # Apple support
end
describe 'Belize' do
|
Adapt mobile phone splitting of Belgium to respect the specifications
|
floere_phony
|
train
|
6cf572b35f7a9f4982576d4d80f41d64266b8aef
|
diff --git a/lib/config/constructor.js b/lib/config/constructor.js
index <HASH>..<HASH> 100644
--- a/lib/config/constructor.js
+++ b/lib/config/constructor.js
@@ -15,17 +15,24 @@ exports.$define = {
}
}
-function Config(package) {
+function Config(options) {
var config = this
var nameSpace = config._nameSpace && config._nameSpace.$val
- if(!package && findPackage) {
- package = findPackage()
- }
+ var package = config._findPackage && config._findPackage.$val &&
+ findPackage && findPackage()
- package = resolveNameSpace(package, nameSpace)
+ // TODO: clean this up, make one val and call constructor with it
+ Constructor.call(this, {})
- Constructor.call(this, package)
+ if(package) {
+ package = resolveNameSpace(package, nameSpace)
+ config.set(package)
+ }
+ if(options){
+ options = resolveNameSpace(options, nameSpace)
+ config.set(options)
+ }
if(parseArgv && config._argv && config._argv.$val){
var setobj = parseArgv(config)
diff --git a/lib/config/index.js b/lib/config/index.js
index <HASH>..<HASH> 100644
--- a/lib/config/index.js
+++ b/lib/config/index.js
@@ -17,5 +17,6 @@ var Config = new Observable({
module.exports = new Config({
_nameSpace: 'vigour',
_argv: true,
+ _findPackage: true,
$inject: require('./constructor')
}).$Constructor
|
get defaults on new with val
|
vigour-io_vjs
|
train
|
fc5de47b2b16399d6942a02d77d2dd19d588aff1
|
diff --git a/bootstrap-select.js b/bootstrap-select.js
index <HASH>..<HASH> 100644
--- a/bootstrap-select.js
+++ b/bootstrap-select.js
@@ -635,7 +635,7 @@
keyCodeMap = {
32:' ', 48:'0', 49:'1', 50:'2', 51:'3', 52:'4', 53:'5', 54:'6', 55:'7', 56:'8', 57:'9', 59:';',
65:'a', 66:'b', 67:'c', 68:'d', 69:'e', 70:'f', 71:'g', 72:'h', 73:'i', 74:'j', 75:'k', 76:'l',
- 77:'m', 78:'n', 79:'o', 80:'p', 81:'q', 82:'r', 83:'s', 84:'t', 85:'u', 86:'v', 87:'w', 88:'x',
+ 77:'m', 78:'n', 79:'o', 80:'p', 81:'q', 82:'r', 83:'s', 84:'t', 85:'u', 86:'v', 87:'w', 88:'x',
89:'y', 90:'z', 96:'0', 97:'1', 98:'2', 99:'3', 100:'4', 101:'5', 102:'6', 103:'7', 104:'8', 105:'9'
};
|
[#<I>] Resolve trailing whitespace
|
snapappointments_bootstrap-select
|
train
|
b2590cdb59f1690b6fbdc51e07cab1a64e510588
|
diff --git a/src/main/java/kr/jm/utils/flow/subscriber/JMSubscriber.java b/src/main/java/kr/jm/utils/flow/subscriber/JMSubscriber.java
index <HASH>..<HASH> 100644
--- a/src/main/java/kr/jm/utils/flow/subscriber/JMSubscriber.java
+++ b/src/main/java/kr/jm/utils/flow/subscriber/JMSubscriber.java
@@ -21,15 +21,23 @@ public class JMSubscriber<T> implements Flow.Subscriber<T> {
protected final Logger log = org.slf4j.LoggerFactory.getLogger(getClass());
private Flow.Subscription subscription;
- private Consumer<T> itemConsumer;
+ private Consumer<T> dataConsumer;
+
+ protected JMSubscriber() {
+ this.dataConsumer =
+ d -> JMExceptionManager.logException(log,
+ JMExceptionManager.newRunTimeException(
+ "DataConsumer Wasn't Set !!! - Flush " + d),
+ "JMSubscriber");
+ }
/**
* Instantiates a new Jm subscriber.
*
- * @param itemConsumer the item consumer
+ * @param dataConsumer the item consumer
*/
- public JMSubscriber(Consumer<T> itemConsumer) {
- this.itemConsumer = itemConsumer;
+ public JMSubscriber(Consumer<T> dataConsumer) {
+ setDataConsumer(dataConsumer);
}
@Override
@@ -45,7 +53,7 @@ public class JMSubscriber<T> implements Flow.Subscriber<T> {
@Override
public void onNext(T item) {
JMLog.debug(log, "onNext", item);
- Optional.ofNullable(item).ifPresent(itemConsumer);
+ Optional.ofNullable(item).ifPresent(this.dataConsumer);
Optional.ofNullable(this.subscription).ifPresent(this::requestNext);
}
@@ -58,4 +66,8 @@ public class JMSubscriber<T> implements Flow.Subscriber<T> {
public void onComplete() {
JMLog.info(log, "onComplete");
}
+
+ public void setDataConsumer(Consumer<T> dataConsumer) {
+ this.dataConsumer = dataConsumer;
+ }
}
|
refactoring JMSubscriber
|
JM-Lab_utils-java9
|
train
|
c5587ececa180147e2ec3076b9ebaac6710bb82e
|
diff --git a/src/main/java/nl/jqno/equalsverifier/internal/reflection/PackageScanner.java b/src/main/java/nl/jqno/equalsverifier/internal/reflection/PackageScanner.java
index <HASH>..<HASH> 100644
--- a/src/main/java/nl/jqno/equalsverifier/internal/reflection/PackageScanner.java
+++ b/src/main/java/nl/jqno/equalsverifier/internal/reflection/PackageScanner.java
@@ -45,6 +45,7 @@ public final class PackageScanner {
return Arrays.stream(dir.listFiles())
.filter(f -> f.getName().endsWith(".class"))
.map(f -> fileToClass(packageName, f))
+ .filter(c -> !c.getName().endsWith("Test"))
.collect(Collectors.toList());
}
diff --git a/src/test/java/nl/jqno/equalsverifier/internal/reflection/PackageScannerTest.java b/src/test/java/nl/jqno/equalsverifier/internal/reflection/PackageScannerTest.java
index <HASH>..<HASH> 100644
--- a/src/test/java/nl/jqno/equalsverifier/internal/reflection/PackageScannerTest.java
+++ b/src/test/java/nl/jqno/equalsverifier/internal/reflection/PackageScannerTest.java
@@ -1,26 +1,38 @@
package nl.jqno.equalsverifier.internal.reflection;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.stream.Collectors;
import nl.jqno.equalsverifier.testhelpers.packages.correct.*;
import org.junit.Test;
public class PackageScannerTest {
- private static final String SOME_PACKAGE =
- "nl.jqno.equalsverifier.testhelpers.packages.correct";
-
@Test
public void happyPath() {
- List<Class<?>> classes = PackageScanner.getClassesIn(SOME_PACKAGE);
+ List<Class<?>> classes =
+ PackageScanner.getClassesIn("nl.jqno.equalsverifier.testhelpers.packages.correct");
classes.sort((a, b) -> a.getName().compareTo(b.getName()));
assertEquals(Arrays.asList(A.class, B.class, C.class), classes);
}
@Test
+ public void filterOutTestClasses() {
+ List<Class<?>> classes =
+ PackageScanner.getClassesIn("nl.jqno.equalsverifier.internal.reflection");
+ List<Class<?>> testClasses =
+ classes.stream()
+ .filter(c -> c.getName().endsWith("Test"))
+ .collect(Collectors.toList());
+ assertEquals(Collections.emptyList(), testClasses);
+ assertTrue(classes.size() - testClasses.size() > 0);
+ }
+
+ @Test
public void nonexistentPackage() {
List<Class<?>> classes =
PackageScanner.getClassesIn("nl.jqno.equalsverifier.nonexistentpackage");
|
Multiple types: make PackageScanner ignore test classes
|
jqno_equalsverifier
|
train
|
d44f216c8ac9464b2303781d8f2823850bb5bf61
|
diff --git a/python/bigdl/dllib/nn/layer.py b/python/bigdl/dllib/nn/layer.py
index <HASH>..<HASH> 100644
--- a/python/bigdl/dllib/nn/layer.py
+++ b/python/bigdl/dllib/nn/layer.py
@@ -429,7 +429,87 @@ class Layer(JavaValue):
'''
return callJavaFunc(get_spark_context(), self.value.isTraining)
+ def quantize(self):
+ '''
+ Clone self and quantize it, at last return a new quantized model.
+ :return: A new quantized model.
+ >>> fc = Linear(4, 2)
+ creating: createLinear
+ >>> fc.set_weights([np.ones((4, 2)), np.ones((2,))])
+ >>> input = np.ones((2, 4))
+ >>> fc.forward(input)
+ array([[ 5., 5.],
+ [ 5., 5.]], dtype=float32)
+ >>> quantized_fc = fc.quantize()
+ >>> quantized_fc.forward(input)
+ array([[ 5., 5.],
+ [ 5., 5.]], dtype=float32)
+
+ >>> assert("quantized.Linear" in quantized_fc.__str__())
+ >>> conv = SpatialConvolution(1, 2, 3, 3)
+ creating: createSpatialConvolution
+ >>> conv.set_weights([np.ones((2, 1, 3, 3)), np.zeros((2,))])
+ >>> input = np.ones((2, 1, 4, 4))
+ >>> conv.forward(input)
+ array([[[[ 9., 9.],
+ [ 9., 9.]],
+ <BLANKLINE>
+ [[ 9., 9.],
+ [ 9., 9.]]],
+ <BLANKLINE>
+ <BLANKLINE>
+ [[[ 9., 9.],
+ [ 9., 9.]],
+ <BLANKLINE>
+ [[ 9., 9.],
+ [ 9., 9.]]]], dtype=float32)
+ >>> quantized_conv = conv.quantize()
+ >>> quantized_conv.forward(input)
+ array([[[[ 9., 9.],
+ [ 9., 9.]],
+ <BLANKLINE>
+ [[ 9., 9.],
+ [ 9., 9.]]],
+ <BLANKLINE>
+ <BLANKLINE>
+ [[[ 9., 9.],
+ [ 9., 9.]],
+ <BLANKLINE>
+ [[ 9., 9.],
+ [ 9., 9.]]]], dtype=float32)
+ >>> assert("quantized.SpatialConvolution" in quantized_conv.__str__())
+ >>> seq = Sequential()
+ creating: createSequential
+ >>> seq = seq.add(conv)
+ >>> seq = seq.add(Reshape([8, 4], False))
+ creating: createReshape
+ >>> seq = seq.add(fc)
+ >>> input = np.ones([1, 1, 6, 6])
+ >>> seq.forward(input)
+ array([[ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.]], dtype=float32)
+ >>> quantized_seq = seq.quantize()
+ >>> quantized_seq.forward(input)
+ array([[ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.],
+ [ 37., 37.]], dtype=float32)
+ >>> assert("quantized.Linear" in quantized_seq.__str__())
+ >>> assert("quantized.SpatialConvolution" in quantized_seq.__str__())
+ '''
+ quantized_model = callBigDlFunc(self.bigdl_type, "quantize", self.value)
+ return Layer.of(quantized_model)
class Container(Layer):
@@ -615,7 +695,6 @@ class Model(Container):
callBigDlFunc(bigdl_type, "saveGraphTopology", self.value, log_path)
return self
-
class Linear(Layer):
'''
|
feat: quantize a whole graph/modules (#<I>)
* feat: quantize a whole graph/modules
* feat: python supports
* fix: delete unusage
|
intel-analytics_BigDL
|
train
|
b07d42b3198e187dcf85941c92ddc1dce4756eef
|
diff --git a/src/main/java/ch/ralscha/extdirectspring/generator/ModelAssociation.java b/src/main/java/ch/ralscha/extdirectspring/generator/ModelAssociation.java
index <HASH>..<HASH> 100644
--- a/src/main/java/ch/ralscha/extdirectspring/generator/ModelAssociation.java
+++ b/src/main/java/ch/ralscha/extdirectspring/generator/ModelAssociation.java
@@ -23,9 +23,9 @@ import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
- * Annotation that configures the association to another object. If this
- * annotation is present the generator creates an associations config object in
- * the model.
+ * Annotation that configures an association to another object. If this
+ * annotation is present on a field the generator creates an associations config
+ * object in the model.
*
* @author Ralph Schaer
*/
@@ -36,23 +36,95 @@ import java.lang.annotation.Target;
public @interface ModelAssociation {
/**
- * Type of the association.
+ * The type of the association.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.HasMany-cfg-type"
+ * >type</a> config object.
*/
ModelAssociationType value();
-
- Class<?> model(); // hasMany, belongsTo, hasOne
+ /**
+ * The class of the model that is being associated with.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.Association-cfg-model"
+ * >model</a> config object. The generated Javascript code contains either
+ * the full qualified class name of the class or the string from
+ * {@link Model#value()} if present on the class.
+ */
+ Class<?> model() default Object.class;
- boolean autoLoad() default false; // hasMany
+ /**
+ * True to automatically load the related store from a remote source when
+ * instantiated. Defaults to false.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.HasMany-cfg-autoLoad"
+ * >autoLoad</a> config object.
+ * <p>
+ * Only hasMany association support this property.
+ */
+ boolean autoLoad() default false;
- String foreignKey() default ""; // hasMany, belongsTo, hasOne
+ /**
+ * The name of the foreign key on the associated model that links it to the
+ * owner model. Defaults to the lowercased name of the owner model plus
+ * "_id".
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.HasMany-cfg-foreignKey"
+ * >foreignKey</a> config object.
+ */
+ String foreignKey() default "";
- String name() default ""; // hasMany
+ /**
+ * The name of the function to create on the owner model to retrieve the
+ * child store. If not specified, the pluralized name of the child model is
+ * used. Always specify this if the class name contains a package component.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.HasMany-cfg-name"
+ * >name</a> config object.
+ * <p>
+ * Only hasMany association support this property.
+ */
+ String name() default "";
- String primaryKey() default "";// hasMany, belongsTo, hasOne
+ /**
+ * The name of the primary key on the associated model. <br>
+ * In general this will be the value of {@link Model#idProperty()}.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.Association-cfg-primaryKey"
+ * >primaryKey</a> config object.
+ */
+ String primaryKey() default "";
- String setterName() default ""; // belongTo, hasOne
+ /**
+ * The name of the setter function that will be added to the local model's
+ * prototype. Defaults to 'set' + the name of the foreign model, e.g.
+ * setCategory.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.BelongsTo-cfg-setterName"
+ * >setterName</a> config object.
+ * <p>
+ * Only belongTo and hasOne associations support this property.
+ */
+ String setterName() default "";
- String getterName() default ""; // belongTo, hasOne
+ /**
+ * The name of the getter function that will be added to the local model's
+ * prototype. Defaults to 'get' + the name of the foreign model, e.g.
+ * getCategory.
+ * <p>
+ * Corresponds to the <a href=
+ * "http://docs.sencha.com/ext-js/4-1/#!/api/Ext.data.association.BelongsTo-cfg-getterName"
+ * >getterName</a> config object.
+ * <p>
+ * Only belongTo and hasOne associations support this property.
+ */
+ String getterName() default "";
}
diff --git a/src/main/java/ch/ralscha/extdirectspring/generator/ModelGenerator.java b/src/main/java/ch/ralscha/extdirectspring/generator/ModelGenerator.java
index <HASH>..<HASH> 100644
--- a/src/main/java/ch/ralscha/extdirectspring/generator/ModelGenerator.java
+++ b/src/main/java/ch/ralscha/extdirectspring/generator/ModelGenerator.java
@@ -350,9 +350,13 @@ public abstract class ModelGenerator {
if (modelAssociation != null) {
ModelAssociationType type = modelAssociation.value();
- ModelAssociationBean modelAssociationBean = new ModelAssociationBean(type, modelAssociation
- .model());
-
+
+ Class<?> associationClass = modelAssociation.model();
+ if (associationClass == Object.class) {
+ associationClass = field.getType();
+ }
+
+ ModelAssociationBean modelAssociationBean = new ModelAssociationBean(type, associationClass);
if (StringUtils.hasText(modelAssociation.foreignKey())) {
modelAssociationBean.setForeignKey(modelAssociation.foreignKey());
|
Change model to an optional parameter. Makes it easier for belongsTo and
hasOne associations.
Add some more Javadocs
|
ralscha_extdirectspring
|
train
|
62984f9beeb82b53b0b40c7856e1a25666f36f42
|
diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go
index <HASH>..<HASH> 100644
--- a/cmd/integration/integration.go
+++ b/cmd/integration/integration.go
@@ -258,9 +258,15 @@ func makeTempDirOrDie(prefix string, baseDir string) string {
}
// podsOnMinions returns true when all of the selected pods exist on a minion.
-func podsOnMinions(c *client.Client, pods api.PodList) wait.ConditionFunc {
+func podsOnMinions(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
podInfo := fakeKubeletClient{}
+ // wait for minions to indicate they have info about the desired pods
return func() (bool, error) {
+ pods, err := c.Pods(podNamespace).List(labelSelector)
+ if err != nil {
+ glog.Infof("Unable to get pods to list: %v", err)
+ return false, nil
+ }
for i := range pods.Items {
host, id, namespace := pods.Items[i].Spec.Host, pods.Items[i].Name, pods.Items[i].Namespace
glog.Infof("Check whether pod %s.%s exists on node %q", id, namespace, host)
@@ -426,12 +432,13 @@ func runReplicationControllerTest(c *client.Client) {
glog.Fatalf("FAILED: pods never created %v", err)
}
- // wait for minions to indicate they have info about the desired pods
- pods, err := c.Pods("test").List(labels.Set(updated.Spec.Selector).AsSelector())
- if err != nil {
- glog.Fatalf("FAILED: unable to get pods to list: %v", err)
- }
- if err := wait.Poll(time.Second, time.Second*30, podsOnMinions(c, *pods)); err != nil {
+ // Poll till we can retrieve the status of all pods matching the given label selector from their minions.
+ // This involves 3 operations:
+ // - The scheduler must assign all pods to a minion
+ // - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
+ // - We need to be able to query the kubelet on that minion for information about the pod
+ if err := wait.Poll(
+ time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
glog.Fatalf("FAILED: pods never started running %v", err)
}
|
Fix a race in the integration tests.
|
kubernetes_kubernetes
|
train
|
1a3b4bab6eb9ea361d1ced74c85c762370c809b0
|
diff --git a/views/js/controller/viewResult.js b/views/js/controller/viewResult.js
index <HASH>..<HASH> 100644
--- a/views/js/controller/viewResult.js
+++ b/views/js/controller/viewResult.js
@@ -75,7 +75,13 @@ define([
const { file } = response && response.base || {};
if (file && file.uri && !file.data) {
return requestFileContent(file.uri, deliveryUri)
- .then(data => response.base.file = data)
+ .then(fileData => {
+ if (fileData && fileData.data) {
+ response.base.file = fileData;
+ } else {
+ response.base = null;
+ }
+ })
.catch(e => logger.error(e));
}
return Promise.resolve();
@@ -177,8 +183,16 @@ define([
}
Promise.resolve($btn.data('state'))
+ .then(state => {
+ console.log(JSON.stringify(state));
+ return state;
+ })
.then(state => refineItemState(state, deliveryUri))
.then(state => {
+ console.log(JSON.stringify(state));
+ return state;
+ })
+ .then(state => {
$btn.removeProp('disabled').removeClass('disabled');
previewerFactory(type, uri, state, {
view: 'reviewRenderer',
|
fix: only assign a file content to a vriable if there is some, otherwise void it
|
oat-sa_extension-tao-outcomeui
|
train
|
632463c310cf19ff2d5879921f95151a492030a1
|
diff --git a/src/index.js b/src/index.js
index <HASH>..<HASH> 100644
--- a/src/index.js
+++ b/src/index.js
@@ -15,6 +15,7 @@ import IifExecutor from './executors/IifExecutor';
import ForEachExecutor from './executors/ForEachExecutor';
import IntervalExecutor from './executors/IntervalExecutor';
import SpawnExecutor from './executors/SpawnExecutor';
+import SleepExecutor from './executors/SleepExecutor';
import {isDOMNode} from './util/type';
import {injectionNames, constructorNames, uglifyWhitelist} from './util/debug';
import {injectionInfo} from './util/executors';
@@ -54,6 +55,10 @@ export function spawn (executor) {
return new ExecutorFactory(SpawnExecutor, executor);
}
+export function sleep (milliseconds) {
+ return new ExecutorFactory(SleepExecutor, milliseconds);
+}
+
const debugStyle = 'background: #660000; color: white; font-weight: bold;';
export function stop ($ringaEvent, stop, done) {
stop();
@@ -138,5 +143,6 @@ export default {
event,
assign,
notify,
- stop
+ stop,
+ sleep,
};
\ No newline at end of file
|
Add syntax sugar for sleep executor
The word 'wait' seems better suited to waiting until a certain condition
is met, so 'sleep' seems like a good word to use for this.
|
ringa-js_ringa
|
train
|
41fda5e1c554fb55c019b3378750a1c62095f8fb
|
diff --git a/mesh_tensorflow/transformer/universal_transformer.py b/mesh_tensorflow/transformer/universal_transformer.py
index <HASH>..<HASH> 100644
--- a/mesh_tensorflow/transformer/universal_transformer.py
+++ b/mesh_tensorflow/transformer/universal_transformer.py
@@ -59,6 +59,8 @@ class UTLayerStack(transformer.TransformerLayer):
mix_with_transformer_after_ut=gin.REQUIRED,
gates_inputs=gin.REQUIRED,
gate_ffn_layer=gin.REQUIRED,
+ use_gated_transformer=gin.REQUIRED,
+ gating_type=gin.REQUIRED,
):
"""Create a LayerStack for Universal Transformer.
@@ -86,6 +88,8 @@ class UTLayerStack(transformer.TransformerLayer):
mix_with_transformer_after_ut: whether to mix transformer layers after ut.
gates_inputs: controlling the cary/transform gate.
gate_ffn_layer: gate ff layer type
+ use_gated_transformer: whether to use gated transformer.
+ gating_type: gating type.
"""
self._layers = layers
self._dropout_rate = dropout_rate
@@ -107,6 +111,8 @@ class UTLayerStack(transformer.TransformerLayer):
self.gates_inputs = gates_inputs
self.gate_ffn_layer = gate_ffn_layer
self.couple_carry_transform_gates = couple_carry_transform_gates
+ self.use_gated_transformer = use_gated_transformer
+ self.gating_type = gating_type
def get_timing_signal_1d(self,
context,
@@ -324,12 +330,67 @@ class UTLayerStack(transformer.TransformerLayer):
if y.shape != x.shape:
raise ValueError("Layer %s returned misshaped output x=%s y=%s" %
(layer.__class__.__name__, x, y))
+ if self.use_gated_transformer:
+ y = self.gating(context, x, y, mask)
x += self._dropout(context, y)
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
return x
+ def gating(self, context, x, transformed_x, mask):
+ """Implementation of various gating layers."""
+ gate_ffn_layer = self.gate_ffn_layer
+ if self.gating_type == "highway":
+ gate_inputs = [x]
+ transform_gate = self.ffn_layer_multi_inputs(
+ context,
+ mask,
+ gate_inputs,
+ ffn_layer_type=gate_ffn_layer,
+ activation=mtf.sigmoid,
+ preprocess=True)
+ carry_gate = self.ffn_layer_multi_inputs(
+ context,
+ mask,
+ gate_inputs,
+ ffn_layer_type=gate_ffn_layer,
+ activation=mtf.sigmoid,
+ preprocess=True)
+ new_state = x * carry_gate + transformed_x * transform_gate
+ return new_state
+ elif self.gating_type == "gru":
+ gate_inputs = [x, transformed_x]
+ transition_function_update_gate = self.ffn_layer_multi_inputs(
+ context,
+ mask,
+ gate_inputs,
+ ffn_layer_type=gate_ffn_layer,
+ activation=mtf.sigmoid,
+ preprocess=True)
+ transition_function_reset_gate = self.ffn_layer_multi_inputs(
+ context,
+ mask,
+ gate_inputs,
+ ffn_layer_type=gate_ffn_layer,
+ activation=mtf.sigmoid,
+ preprocess=True)
+
+ reset_state = transition_function_reset_gate * x
+ gate_inputs = [reset_state, transformed_x]
+ transition_function_candidate = self.ffn_layer_multi_inputs(
+ context,
+ mask,
+ gate_inputs,
+ ffn_layer_type=gate_ffn_layer,
+ activation=mtf.sigmoid,
+ preprocess=True)
+
+ transition_function_output = (
+ (1 - transition_function_update_gate) * transformed_x +
+ transition_function_update_gate * transition_function_candidate)
+ return transition_function_output
+
def ut_basic(self, context, x, mask):
def ut_function(x, step):
new_state = self.step_preprocess(context, x, step)
@@ -520,21 +581,17 @@ class UTLayerStack(transformer.TransformerLayer):
context, (inputs * mask) if mask else inputs)
# the output size is the hidden size of the main inputs
- main_input = inputs_list[0]
- original_shape = main_input.shape
- hidden_size = original_shape.dims[-1].size
-
ffn_inputs = inputs_list[0]
if len(inputs_list) != 1:
- ffn_inputs = mtf.concat(inputs_list, original_shape.dims[-1].name)
+ ffn_inputs = mtf.concat(inputs_list, context.model.model_dim.name)
if ffn_layer_type == "dense":
- last_dims = [
- mtf.Dimension(ffn_inputs.shape.dims[-1].name, hidden_size)
- ]
+ # last_dims = [
+ # mtf.Dimension(ffn_inputs.shape.dims[-1].name, hidden_size)
+ # ]
output = mtf.layers.dense(
ffn_inputs,
- reduced_dims=[context.model.model_dim],
- new_dims=last_dims,
+ reduced_dims=[ffn_inputs.shape.dims[-1]],
+ new_dims=[context.model.model_dim],
activation=activation,
use_bias=True,
variable_dtype=context.variable_dtype,
@@ -589,7 +646,7 @@ class UTLayerStack(transformer.TransformerLayer):
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
- activation=tf.sigmoid,
+ activation=mtf.sigmoid,
preprocess=True)
new_state = state * carry_gate + transformed_state * transform_gate
|
Implemented a gated transformer layer.
PiperOrigin-RevId: <I>
|
tensorflow_mesh
|
train
|
fdc9758672d285af895ebdcfb7e1f1f2c5ccb63e
|
diff --git a/vstutils/api/base.py b/vstutils/api/base.py
index <HASH>..<HASH> 100644
--- a/vstutils/api/base.py
+++ b/vstutils/api/base.py
@@ -359,6 +359,21 @@ class GenericViewSet(QuerySetMixin, vsets.GenericViewSet):
)
@classmethod
+ def get_view_methods(cls, detail=False):
+ methods = []
+ if hasattr(cls, 'create') and not detail:
+ methods.append('post')
+ if hasattr(cls, 'list') or hasattr(cls, 'retrieve'):
+ methods.append('get')
+ if hasattr(cls, 'update') and detail:
+ methods.append('put')
+ if hasattr(cls, 'partial_update') and detail:
+ methods.append('patch')
+ if hasattr(cls, 'destroy') and detail:
+ methods.append('delete')
+ return methods
+
+ @classmethod
def get_extra_actions(cls):
return super(GenericViewSet, cls).get_extra_actions()
diff --git a/vstutils/api/decorators.py b/vstutils/api/decorators.py
index <HASH>..<HASH> 100644
--- a/vstutils/api/decorators.py
+++ b/vstutils/api/decorators.py
@@ -183,17 +183,38 @@ class nested_view(BaseClassDecorator): # pylint: disable=invalid-name
kwargs.update(options)
return nested_action(*args, **kwargs)
+ def _filter_methods(self, methods, detail=False):
+ allowed_methods = self.view.get_view_methods(detail)
+ return [method for method in methods if method in allowed_methods]
+
def decorated_list(self):
name, view = self.get_list_view()
- return name, self.get_decorator(
- url_name='{}-list'.format(self.name), suffix='List'
- )(view)
+ kwargs = dict(url_name='{}-list'.format(self.name), suffix='List')
+ methods = self.methods or []
+ if not self.methods:
+ methods = []
+ if getattr(self.view, 'create', None) is not None:
+ methods += ['post']
+ if getattr(self.view, 'list', None) is not None:
+ methods += ['get']
+ kwargs['methods'] = self._filter_methods(methods)
+ return name, self.get_decorator(**kwargs)(view)
def decorated_detail(self):
name, view = self.get_detail_view()
- return name, self.get_decorator(
- True, url_name='{}-detail'.format(self.name)
- )(view)
+ kwargs = dict(url_name='{}-detail'.format(self.name))
+ methods = self.methods or []
+ if not self.methods:
+ if getattr(self.view, 'retrieve', None) is not None:
+ methods += ['get']
+ if getattr(self.view, 'update', None) is not None:
+ methods += ['put']
+ if getattr(self.view, 'partial_update', None) is not None:
+ methods += ['patch']
+ if getattr(self.view, 'destroy', None) is not None:
+ methods += ['delete']
+ kwargs['methods'] = self._filter_methods(methods, detail=True)
+ return name, self.get_decorator(True, **kwargs)(view)
def _get_decorated_sub(self, sub):
name, subaction_view = self.get_sub_view(sub)
|
Fix bug with nested objects methods
In nested views by default creates all `ModelViewSet`. It's problem for
`ReadOnlyViewSet` and `HistoryViewSet` views.
Fix in decorator check methods in nested view and add methods only if
this method exists.
|
vstconsulting_vstutils
|
train
|
c3a7e2012b580dbbd6437866554be2f3ee065e4d
|
diff --git a/src/JsFunctionsScanner.php b/src/JsFunctionsScanner.php
index <HASH>..<HASH> 100644
--- a/src/JsFunctionsScanner.php
+++ b/src/JsFunctionsScanner.php
@@ -43,18 +43,12 @@ final class JsFunctionsScanner extends GettextJsFunctionsScanner {
$translations = $translations[0];
}
- $code = $this->code;
- // See https://github.com/mck89/peast/issues/7
- // Temporary workaround to fix dynamic imports. The τ is a greek letter.
- // This will trick the parser into thinking that it is a normal method call.
- $code = preg_replace( '/import(\\s*\\()/', 'imporτ$1', $code );
-
$peast_options = [
'sourceType' => Peast::SOURCE_TYPE_MODULE,
'comments' => false !== $this->extract_comments,
'jsx' => true,
];
- $ast = Peast::latest( $code, $peast_options )->parse();
+ $ast = Peast::latest( $this->code, $peast_options )->parse();
$traverser = new Traverser();
|
Remove now unneeded workaround in JS scanner
Partially reverts #<I>.
|
wp-cli_i18n-command
|
train
|
64b825be79c1ff6afb5a6b136c1d7e17fb3ea471
|
diff --git a/lib/hutch/broker.rb b/lib/hutch/broker.rb
index <HASH>..<HASH> 100644
--- a/lib/hutch/broker.rb
+++ b/lib/hutch/broker.rb
@@ -15,8 +15,14 @@ module Hutch
end
def connect(options = {})
+ @options = options
set_up_amqp_connection
- set_up_api_connection if options.fetch(:enable_http_api_use, true)
+ if http_api_use_enabled?
+ logger.info "HTTP API use is enabled"
+ set_up_api_connection
+ else
+ logger.info "HTTP API use is disabled"
+ end
if block_given?
begin
@@ -61,7 +67,11 @@ module Hutch
host = @config[:mq_host]
port = @config[:mq_port]
- vhost = @config[:mq_vhost]
+ vhost = if @config[:mq_vhost] && "" != @config[:mq_vhost]
+ @config[:mq_vhost]
+ else
+ Bunny::Session::DEFAULT_VHOST
+ end
username = @config[:mq_username]
password = @config[:mq_password]
tls = @config[:mq_tls]
@@ -112,6 +122,17 @@ module Hutch
end
end
+ def http_api_use_enabled?
+ op = @options.fetch(:enable_http_api_use, true)
+ cf = if @config[:enable_http_api_use].nil?
+ true
+ else
+ @config[:enable_http_api_use]
+ end
+
+ op && cf
+ end
+
# Create / get a durable queue and apply namespace if it exists.
def queue(name)
with_bunny_precondition_handler('queue') do
@@ -137,12 +158,14 @@ module Hutch
# existing bindings on the queue that aren't present in the array of
# routing keys will be unbound.
def bind_queue(queue, routing_keys)
- # Find the existing bindings, and unbind any redundant bindings
- queue_bindings = bindings.select { |dest, keys| dest == queue.name }
- queue_bindings.each do |dest, keys|
- keys.reject { |key| routing_keys.include?(key) }.each do |key|
- logger.debug "removing redundant binding #{queue.name} <--> #{key}"
- queue.unbind(@exchange, routing_key: key)
+ if http_api_use_enabled?
+ # Find the existing bindings, and unbind any redundant bindings
+ queue_bindings = bindings.select { |dest, keys| dest == queue.name }
+ queue_bindings.each do |dest, keys|
+ keys.reject { |key| routing_keys.include?(key) }.each do |key|
+ logger.debug "removing redundant binding #{queue.name} <--> #{key}"
+ queue.unbind(@exchange, routing_key: key)
+ end
end
end
@@ -271,7 +294,7 @@ module Hutch
yield
rescue Bunny::TCPConnectionFailed => ex
logger.error "amqp connection error: #{ex.message.downcase}"
- raise ConnectionError.new("couldn't connect to rabbitmq at #{uri}")
+ raise ConnectionError.new("couldn't connect to rabbitmq at #{uri}. Check your configuration, network connectivity and RabbitMQ logs.")
end
def work_pool_threads
diff --git a/lib/hutch/config.rb b/lib/hutch/config.rb
index <HASH>..<HASH> 100644
--- a/lib/hutch/config.rb
+++ b/lib/hutch/config.rb
@@ -39,7 +39,8 @@ module Hutch
# forces waiting for a confirm for every publish
force_publisher_confirms: false,
# Heroku needs > 10. MK.
- connection_timeout: 11
+ connection_timeout: 11,
+ enable_http_api_use: true
}.merge(params)
end
|
Make it possible to disable HTTP API access
References #<I>.
|
gocardless_hutch
|
train
|
1f718e2a2382ea47759b96c94e9297ca45fe3c76
|
diff --git a/src/Factories/Entity/Host.php b/src/Factories/Entity/Host.php
index <HASH>..<HASH> 100644
--- a/src/Factories/Entity/Host.php
+++ b/src/Factories/Entity/Host.php
@@ -76,6 +76,13 @@ class Host extends AbstractEntity
public $publicEndpoints;
/**
+ * List of instances on host
+ *
+ * @var array
+ */
+ public $instanceIds;
+
+ /**
* The labels on a host.
*
* @var string[]
diff --git a/src/Factories/Entity/Service.php b/src/Factories/Entity/Service.php
index <HASH>..<HASH> 100644
--- a/src/Factories/Entity/Service.php
+++ b/src/Factories/Entity/Service.php
@@ -120,5 +120,12 @@ class Service extends AbstractEntity
* @var string
*/
public $transitioningProgress;
-
+
+ /**
+ * List of instances for the service
+ *
+ * @var array
+ */
+ public $instanceIds;
+
}
\ No newline at end of file
|
Add `instanceId` property to Service and Host
|
benmag_laravel-rancher
|
train
|
f47df434e30aaf00ad7e14ada2a8ea6081bed25d
|
diff --git a/salt/client/mixins.py b/salt/client/mixins.py
index <HASH>..<HASH> 100644
--- a/salt/client/mixins.py
+++ b/salt/client/mixins.py
@@ -274,13 +274,13 @@ class SyncClientMixin(object):
# Inject some useful globals to *all* the funciton's global namespace
# only once per module-- not per func
completed_funcs = []
- for mod_name, mod_func in self.functions.iteritems():
+ for mod_name in self.functions:
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in func_globals.iteritems():
- self.functions[mod_name].func_globals[global_key] = value
+ self.functions[mod_name].__globals__[global_key] = value
# There are some descrepencies of what a "low" structure is
# in the publisher world it is a dict including stuff such as jid,
|
Inject globals after verifying the function-- because it might not exist!
Conflicts:
salt/client/mixins.py
|
saltstack_salt
|
train
|
1d8a34f7532d5fef41d49bd2c720e9b0233c0cfc
|
diff --git a/lib/Pipe/Asset.php b/lib/Pipe/Asset.php
index <HASH>..<HASH> 100644
--- a/lib/Pipe/Asset.php
+++ b/lib/Pipe/Asset.php
@@ -59,7 +59,7 @@ abstract class Asset
# Public: Returns the asset's basename.
#
- # includeExtensions: Set to false to strip all extensions from the filename (default: true)
+ # includeExtensions - Include extensions in the filename (default: true)
#
# Returns the basename as String.
function getBasename($includeExtensions = true)
@@ -121,9 +121,9 @@ abstract class Asset
# Public: Writes the asset's content to the directory.
#
# options - Array of options.
- # dir: Write the asset to the given directory. (default: '')
- # include_digest: Should the digest be spliced into the filenames? (default: false)
- # compress: Should the contents be GZIP compressed? (default: false)
+ # 'dir' => Write asset to a directory. (default: '')
+ # 'include_digest' => Include SHA1 digest in filename (default: false)
+ # 'compress' => Compress contents with GZIP (default: false)
#
# Returns Nothing.
function write($options = array())
@@ -132,7 +132,7 @@ abstract class Asset
$compress = @$options["compress"] ?: false;
$includeDigest = @$options["include_digest"] ?: false;
- $filename = FileUtils::join(array($dir, ($includeDigest ? $this->getDigestName() : $this->logicalPath)));
+ $filename = Path::join(array($dir, ($includeDigest ? $this->getDigestName() : $this->logicalPath)));
if (!is_dir(dirname($filename))) {
mkdir(dirname($filename), 0777, true);
|
Removed usage of FileUtils class
|
CHH_pipe
|
train
|
9f598a68afca1c871aa8e40d13f5a235e8c70438
|
diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go
index <HASH>..<HASH> 100644
--- a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go
+++ b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go
@@ -89,8 +89,16 @@ func resourceScalewaySecurityGroupRuleCreate(d *schema.ResourceData, m interface
return err
}
+ matches := func(rule api.ScalewaySecurityGroupRule) bool {
+ return rule.Action == req.Action &&
+ rule.Direction == req.Direction &&
+ rule.IPRange == req.IPRange &&
+ rule.Protocol == req.Protocol &&
+ rule.DestPortFrom == req.DestPortFrom
+ }
+
for _, rule := range resp.Rules {
- if rule.Action == req.Action && rule.Direction == req.Direction && rule.IPRange == req.IPRange && rule.Protocol == req.Protocol {
+ if matches(rule) {
d.SetId(rule.ID)
break
}
diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go
index <HASH>..<HASH> 100644
--- a/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go
+++ b/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go
@@ -21,10 +21,16 @@ func TestAccScalewaySecurityGroupRule_Basic(t *testing.T) {
Config: testAccCheckScalewaySecurityGroupRuleConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewaySecurityGroupsExists("scaleway_security_group.base", &group),
- resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "action", "drop"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "action", "accept"),
resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "direction", "inbound"),
resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "ip_range", "0.0.0.0/0"),
resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "protocol", "TCP"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "port", "80"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "action", "accept"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "direction", "inbound"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "ip_range", "0.0.0.0/0"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "protocol", "TCP"),
+ resource.TestCheckResourceAttr("scaleway_security_group_rule.https", "port", "443"),
testAccCheckScalewaySecurityGroupRuleExists("scaleway_security_group_rule.http", &group),
testAccCheckScalewaySecurityGroupRuleAttributes("scaleway_security_group_rule.http", &group),
),
@@ -93,7 +99,7 @@ func testAccCheckScalewaySecurityGroupRuleAttributes(n string, group *api.Scalew
return err
}
- if rule.Rules.Action != "drop" {
+ if rule.Rules.Action != "accept" {
return fmt.Errorf("Wrong rule action")
}
if rule.Rules.Direction != "inbound" {
@@ -149,10 +155,20 @@ resource "scaleway_security_group" "base" {
resource "scaleway_security_group_rule" "http" {
security_group = "${scaleway_security_group.base.id}"
- action = "drop"
+ action = "accept"
direction = "inbound"
ip_range = "0.0.0.0/0"
protocol = "TCP"
port = 80
}
+
+resource "scaleway_security_group_rule" "https" {
+ security_group = "${scaleway_security_group.base.id}"
+
+ action = "accept"
+ direction = "inbound"
+ ip_range = "0.0.0.0/0"
+ protocol = "TCP"
+ port = 443
+}
`
|
provider/scaleway: fix security_group_rule identification
|
hashicorp_terraform
|
train
|
74e2488382cd43b0965867c37874cc7262a95316
|
diff --git a/did/plugins/gitlab.py b/did/plugins/gitlab.py
index <HASH>..<HASH> 100644
--- a/did/plugins/gitlab.py
+++ b/did/plugins/gitlab.py
@@ -9,15 +9,18 @@ Config example::
url = https://gitlab.com/
token = <authentication-token>
login = <username>
+ ssl_verify = true
The authentication token is required.
Use ``login`` to override the user associated with the token.
See the :doc:`config` documentation for details on using aliases.
+Use ``ssl_verify`` to enable/disable SSL verification (default: true)
__ https://docs.gitlab.com/ce/api/
"""
+import distutils.util
import requests
import dateutil
import itertools
@@ -288,7 +291,12 @@ class GitLabStats(StatsGroup):
except KeyError:
raise ReportError(
"No GitLab token set in the [{0}] section".format(option))
- self.gitlab = GitLab(self.url, self.token)
+ # Check SSL verification
+ try:
+ self.ssl_verify = distutils.util.strtobool(config["ssl_verify"])
+ except KeyError:
+ self.ssl_verify = GITLAB_SSL_VERIFY
+ self.gitlab = GitLab(self.url, self.token, self.ssl_verify)
# Create the list of stats
self.stats = [
IssuesCreated(
|
Make SSL verification for GitLab configurable
|
psss_did
|
train
|
33ba22b82a14921cc1b48e753417bc1bf382a9d5
|
diff --git a/src/Concerns/Dispatchable.php b/src/Concerns/Dispatchable.php
index <HASH>..<HASH> 100644
--- a/src/Concerns/Dispatchable.php
+++ b/src/Concerns/Dispatchable.php
@@ -70,6 +70,7 @@ trait Dispatchable
}
$this->extensions = new Collection();
+ $this->booted = false;
return $this;
}
|
Reset booted status on finish.
|
orchestral_extension
|
train
|
4e855415301da35372f89cee9b2416d9fbe652fc
|
diff --git a/pydev_imports.py b/pydev_imports.py
index <HASH>..<HASH> 100644
--- a/pydev_imports.py
+++ b/pydev_imports.py
@@ -1,4 +1,4 @@
-from pydevd_constants import USE_LIB_COPY
+from pydevd_constants import USE_LIB_COPY, izip
try:
@@ -92,7 +92,7 @@ except:
return start
i = 0
- for start_seg, dest_seg in zip(orig_list, dest_list):
+ for start_seg, dest_seg in izip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
diff --git a/pydevd_constants.py b/pydevd_constants.py
index <HASH>..<HASH> 100644
--- a/pydevd_constants.py
+++ b/pydevd_constants.py
@@ -1,7 +1,6 @@
'''
This module holds the constants used for specifying the states of the debugger.
'''
-
STATE_RUN = 1
STATE_SUSPEND = 2
@@ -147,6 +146,12 @@ try:
except:
#Python 3k does not have it
xrange = range
+
+try:
+ import itertools
+ izip = itertools.izip
+except:
+ izip = zip
try:
object
diff --git a/pydevd_resolver.py b/pydevd_resolver.py
index <HASH>..<HASH> 100644
--- a/pydevd_resolver.py
+++ b/pydevd_resolver.py
@@ -13,7 +13,7 @@ except:
setattr(__builtin__, 'False', 0)
import pydevd_constants
-from pydevd_constants import DictIterItems, xrange
+from pydevd_constants import DictIterItems, xrange, izip
MAX_ITEMS_TO_HANDLE = 500
@@ -213,10 +213,7 @@ class DefaultResolver:
class DictResolver:
def resolve(self, dict, key):
- if key == '__len__':
- return None
-
- if key == TOO_LARGE_ATTR:
+ if key in ('__len__', TOO_LARGE_ATTR):
return None
if '(' not in key:
@@ -270,7 +267,7 @@ class TupleResolver: #to enumerate tuples and lists
@param var: that's the original attribute
@param attribute: that's the key passed in the dict (as a string)
'''
- if attribute == '__len__' or attribute == TOO_LARGE_ATTR:
+ if attribute in ('__len__', TOO_LARGE_ATTR):
return None
return var[int(attribute)]
@@ -284,7 +281,7 @@ class TupleResolver: #to enumerate tuples and lists
format = '%0' + str(int(len(str(l)))) + 'd'
- for i, item in zip(xrange(l), var):
+ for i, item in izip(xrange(l), var):
d[ format % i ] = item
else:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
@@ -302,7 +299,7 @@ class SetResolver:
'''
def resolve(self, var, attribute):
- if attribute == '__len__':
+ if attribute in ('__len__', TOO_LARGE_ATTR):
return None
attribute = int(attribute)
@@ -314,8 +311,16 @@ class SetResolver:
def getDictionary(self, var):
d = {}
+ i = 0
for item in var:
- d[ id(item) ] = item
+ i+= 1
+ d[id(item)] = item
+
+ if i > MAX_ITEMS_TO_HANDLE:
+ d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
+ break
+
+
d['__len__'] = len(var)
return d
|
Performance improvements for dealing with large collections.
|
fabioz_PyDev.Debugger
|
train
|
cb0ebb4779ba988ba4173cda229a5f0ed0d0c605
|
diff --git a/mind-map/scia-reto/src/main/java/com/igormaznitsa/sciareto/ui/MainFrame.java b/mind-map/scia-reto/src/main/java/com/igormaznitsa/sciareto/ui/MainFrame.java
index <HASH>..<HASH> 100644
--- a/mind-map/scia-reto/src/main/java/com/igormaznitsa/sciareto/ui/MainFrame.java
+++ b/mind-map/scia-reto/src/main/java/com/igormaznitsa/sciareto/ui/MainFrame.java
@@ -1133,13 +1133,14 @@ public final class MainFrame extends javax.swing.JFrame implements Context, Plat
}
private void menuNewProjectActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_menuNewProjectActionPerformed
- final JFileChooser folder = new JFileChooser();
- folder.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
- folder.setDialogTitle("Create project folder");
- folder.setApproveButtonText("Create");
- folder.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
- if (folder.showOpenDialog(Main.getApplicationFrame()) == JFileChooser.APPROVE_OPTION) {
- final File file = folder.getSelectedFile();
+ final JFileChooser folderChooser = new JFileChooser();
+ folderChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
+ folderChooser.setDialogTitle("Create project folder");
+ folderChooser.setDialogType(JFileChooser.SAVE_DIALOG);
+ folderChooser.setApproveButtonText("Create");
+ folderChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
+ if (folderChooser.showSaveDialog(Main.getApplicationFrame()) == JFileChooser.APPROVE_OPTION) {
+ final File file = folderChooser.getSelectedFile();
if (file.isDirectory()) {
if (file.list().length > 0) {
DialogProviderManager.getInstance().getDialogProvider().msgError("File '" + file.getName() + "' already exists and non-empty!");
|
fixed 'new project' under macosx
|
raydac_netbeans-mmd-plugin
|
train
|
852321f395bb5150776abeb5a05c840f92c558c9
|
diff --git a/classes/PodsData.php b/classes/PodsData.php
index <HASH>..<HASH> 100644
--- a/classes/PodsData.php
+++ b/classes/PodsData.php
@@ -2024,22 +2024,30 @@ class PodsData {
if ( 'taxonomy' == $traverse[ 'type' ] ) {
$rel_tt_alias = 'rel_tt_' . $field_joined;
- $the_join = "
- LEFT JOIN `{$wpdb->term_relationships}` AS `{$rel_alias}` ON
- `{$rel_alias}`.`object_id` = '{$traverse[ 'name' ]}'
- AND `{$rel_alias}`.`object_id` = `{$traverse_recurse[ 'joined' ]}`.`ID`
-
- LEFT JOIN `{$wpdb->term_taxonomy}` AS `{$rel_tt_alias}` ON
- `{$rel_tt_alias}`.`taxonomy` = '{$traverse[ 'name' ]}'
- AND `{$rel_tt_alias}`.`term_taxonomy_id` = `{$rel_alias}`.`term_taxonomy_id`
-
- LEFT JOIN `{$table_info[ 'table' ]}` AS `{$field_joined}` ON
- `{$field_joined}`.`{$table_info[ 'field_index' ]}` = '{$traverse[ 'name' ]}'
- AND `{$field_joined}`.`{$table_info[ 'field_id' ]}` = `{$rel_tt_alias}`.`{$table_info[ 'field_id' ]}`, SIGNED )
- ";
+ if ( $meta_data_table ) {
+ $the_join = "
+ LEFT JOIN `{$table_info[ 'pod_table' ]}` AS `{$field_joined}` ON
+ `{$field_joined}`.`{$table_info[ 'pod_field_id' ]}` = `{$traverse_recurse[ 'rel_alias' ]}`.`{$traverse_recurse[ 'joined_id' ]}`
+ ";
+ }
+ else {
+ $the_join = "
+ LEFT JOIN `{$wpdb->term_relationships}` AS `{$rel_alias}` ON
+ `{$rel_alias}`.`object_id` = '{$traverse[ 'name' ]}'
+ AND `{$rel_alias}`.`object_id` = `{$traverse_recurse[ 'joined' ]}`.`ID`
- $joined_id = $table_info[ 'field_id' ];
- $joined_index = $table_info[ 'field_index' ];
+ LEFT JOIN `{$wpdb->term_taxonomy}` AS `{$rel_tt_alias}` ON
+ `{$rel_tt_alias}`.`taxonomy` = '{$traverse[ 'name' ]}'
+ AND `{$rel_tt_alias}`.`term_taxonomy_id` = `{$rel_alias}`.`term_taxonomy_id`
+
+ LEFT JOIN `{$table_info[ 'table' ]}` AS `{$field_joined}` ON
+ `{$field_joined}`.`{$table_info[ 'field_index' ]}` = '{$traverse[ 'name' ]}'
+ AND `{$field_joined}`.`{$table_info[ 'field_id' ]}` = `{$rel_tt_alias}`.`{$table_info[ 'field_id' ]}`, SIGNED )
+ ";
+
+ $joined_id = $table_info[ 'field_id' ];
+ $joined_index = $table_info[ 'field_index' ];
+ }
}
elseif ( in_array( $traverse[ 'type' ], $tableless_field_types ) && ( 'pick' != $traverse[ 'type' ] || 'custom-simple' != pods_var( 'pick_object', $traverse ) ) ) {
if ( defined( 'PODS_TABLELESS' ) && PODS_TABLELESS ) {
@@ -2059,7 +2067,7 @@ class PodsData {
elseif ( $meta_data_table ) {
$the_join = "
LEFT JOIN `{$table_info[ 'pod_table' ]}` AS `{$field_joined}` ON
- `{$field_joined}`.`{$table_info[ 'pod_field_id' ]}` = `{$traverse_recurse[ 'rel_alias' ]}`.`related_item_id`
+ `{$field_joined}`.`{$table_info[ 'pod_field_id' ]}` = `{$traverse_recurse[ 'rel_alias' ]}`.`{$traverse_recurse[ 'joined_id' ]}`
";
}
else {
|
Taxonomy data table support for #<I>
|
pods-framework_pods
|
train
|
80e0994b62746a285221a61ffca7d9a0aaab28b9
|
diff --git a/src/ORM/ResultSet.php b/src/ORM/ResultSet.php
index <HASH>..<HASH> 100644
--- a/src/ORM/ResultSet.php
+++ b/src/ORM/ResultSet.php
@@ -313,6 +313,11 @@ class ResultSet implements ResultSetInterface
*/
public function serialize()
{
+ if (!$this->_useBuffering) {
+ $msg = 'You cannot serialize an un-buffered ResultSet. Use Query::bufferResults() to get a buffered ResultSet.';
+ throw new Exception($msg);
+ }
+
while ($this->valid()) {
$this->next();
}
diff --git a/tests/TestCase/ORM/ResultSetTest.php b/tests/TestCase/ORM/ResultSetTest.php
index <HASH>..<HASH> 100644
--- a/tests/TestCase/ORM/ResultSetTest.php
+++ b/tests/TestCase/ORM/ResultSetTest.php
@@ -14,7 +14,6 @@
*/
namespace Cake\Test\TestCase\ORM;
-use Cake\Core\Configure;
use Cake\Core\Plugin;
use Cake\Datasource\ConnectionManager;
use Cake\ORM\Entity;
@@ -112,6 +111,16 @@ class ResultSetTest extends TestCase
}
/**
+ * @expectedException \Cake\Database\Exception
+ * @expectedExceptionMessage You cannot serialize an un-buffered ResultSet. Use Query::bufferResults() to get a buffered ResultSet.
+ */
+ public function testSerializationUnbuffered()
+ {
+ $results = $this->table->find('all')->bufferResults(false)->all();
+ serialize($results);
+ }
+
+ /**
* Test iteration after serialization
*
* @return void
|
added exception for serialize an unbuffered result set, and also a test for this edge case
|
cakephp_cakephp
|
train
|
49d7c212dda5f198ee472bd05af60d1659ba65e4
|
diff --git a/lib/docker.js b/lib/docker.js
index <HASH>..<HASH> 100644
--- a/lib/docker.js
+++ b/lib/docker.js
@@ -205,7 +205,8 @@ function createDockerFile() {
let npmCommand = 'npm';
if (nodeVersion !== 'system') {
- const nvmDownloadURI = 'https://raw.githubusercontent.com/creationix/nvm/v0.33.6/install.sh';
+ const nvmDownloadURI = 'https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh';
+ contents += 'RUN mkdir -p /usr/local/nvm\n';
contents += 'ENV NVM_DIR /usr/local/nvm\n';
contents += `RUN wget -qO- ${nvmDownloadURI} | bash && . $NVM_DIR/nvm.sh && nvm install ${nodeVersion}\n`;
npmCommand = `. $NVM_DIR/nvm.sh && nvm use ${nodeVersion} && npm`;
|
Docker: Update to NVM <I>
|
wikimedia_service-runner
|
train
|
beefd88fce154c9f46c62b283d35c1127ce24589
|
diff --git a/src/test/java/net/openhft/chronicle/map/CHMTest5.java b/src/test/java/net/openhft/chronicle/map/CHMTest5.java
index <HASH>..<HASH> 100644
--- a/src/test/java/net/openhft/chronicle/map/CHMTest5.java
+++ b/src/test/java/net/openhft/chronicle/map/CHMTest5.java
@@ -86,62 +86,10 @@ public class CHMTest5 {
// as we used CAS to update and it's read-only after that
// but we need to lock access to the Time array
long[] times1 = new long[NUMBER_OF_PROCESSES_ALLOWED];
- boolean locked = false;
- for (int i = 0; i < 1000000; i++) {
- //try up to 1 second
- if (data.tryLockNanosEntry(1000L)) {
- locked = true;
- break;
- }
- }
- if (!locked) {
- System.out.println("Unable to acquire a lock on the time array - exiting");
- System.exit(0);
- }
- try {
- //we've got the lock, now copy the array
- for (int i = 0; i < times1.length; i++) {
- times1[i] = data.getTimeAt(i);
- }
- } finally {
- //and release the lock
- try {
- data.unlockEntry();
- } catch (IllegalMonitorStateException e) {
- //odd, but we'll be unlocked either way
- System.out.println("Unexpected state: " + e);
- e.printStackTrace();
- }
- }
+ getTimes(data, times1);
pause(300L);
long[] times2 = new long[NUMBER_OF_PROCESSES_ALLOWED];
- locked = false;
- for (int i = 0; i < 1000000; i++) {
- //try up to 1 second
- if (data.tryLockNanosEntry(1000L)) {
- locked = true;
- break;
- }
- }
- if (!locked) {
- System.out.println("Unable to acquire a lock on the time array - exiting");
- System.exit(0);
- }
- try {
- //we've got the lock, now copy the array
- for (int i = 0; i < times2.length; i++) {
- times2[i] = data.getTimeAt(i);
- }
- } finally {
- //and release the lock
- try {
- data.unlockEntry();
- } catch (IllegalMonitorStateException e) {
- //odd, but we'll be unlocked either way
- System.out.println("Unexpected state: " + e);
- e.printStackTrace();
- }
- }
+ getTimes(data, times2);
//look for a slot that hasn't changed in that 300ms pause
int slotindex = 0;
long lastUpdateTime = -1;
@@ -150,29 +98,12 @@ public class CHMTest5 {
//we have an index which has not been updated by anything else
//in the 300ms pause, so we have a spare slot - we use this slot
long timenow = System.currentTimeMillis();
- locked = false;
- for (int i = 0; i < 1000000; i++) {
- //try up to 1 second
- if (data.tryLockNanosEntry(1000L)) {
- locked = true;
- break;
- }
- }
- if (!locked) {
- System.out.println("Unable to acquire a lock on the time array - exiting");
- System.exit(0);
- }
+ tryLock1Sec(data);
try {
data.setTimeAt(slotindex, timenow);
} finally {
//and release the lock
- try {
- data.unlockEntry();
- } catch (IllegalMonitorStateException e) {
- //odd, but we'll be unlocked either way
- System.out.println("Unexpected state: " + e);
- e.printStackTrace();
- }
+ releaseLock(data);
}
//Now we have successfully acquired a slot
@@ -191,18 +122,7 @@ public class CHMTest5 {
for (int count = 0; count < 600; count++) {
pause(100L);
long timenow = System.currentTimeMillis();
- locked = false;
- for (int i = 0; i < 1000000; i++) {
- //try up to 1 second
- if (data.tryLockNanosEntry(1000L)) {
- locked = true;
- break;
- }
- }
- if (!locked) {
- System.out.println("Unable to acquire a lock on the time array - exiting");
- System.exit(0);
- }
+ tryLock1Sec(data);
try {
if (lastUpdateTime == data.getTimeAt(slotindex)) {
//That's what we expect so just update the slot
@@ -215,19 +135,51 @@ public class CHMTest5 {
System.exit(0);
}
} finally {
- //and release the lock
- try {
- data.unlockEntry();
- } catch (IllegalMonitorStateException e) {
- //odd, but we'll be unlocked either way
- System.out.println("Unexpected state: " + e);
- e.printStackTrace();
- }
+ releaseLock(data);
}
}
System.out.println("Exiting slot " + slotindex + " after completing the full test.");
}
+ private static void releaseLock(CHMTest5Data data) {
+ //and release the lock
+ try {
+ data.unlockEntry();
+ } catch (IllegalMonitorStateException e) {
+ //odd, but we'll be unlocked either way
+ System.out.println("Unexpected state: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static void tryLock1Sec(CHMTest5Data data) {
+ boolean locked = false;
+ for (int i = 0; i < 1000000; i++) {
+ //try up to 1 second
+ if (data.tryLockNanosEntry(1000L)) {
+ locked = true;
+ break;
+ }
+ }
+ if (!locked) {
+ System.out.println("Unable to acquire a lock on the time array - exiting");
+ System.exit(0);
+ }
+ }
+
+ private static void getTimes(CHMTest5Data data, long[] times1) {
+ tryLock1Sec(data);
+ try {
+ //we've got the lock, now copy the array
+ for (int i = 0; i < times1.length; i++) {
+ times1[i] = data.getTimeAt(i);
+ }
+ } finally {
+ //and release the lock
+ releaseLock(data);
+ }
+ }
+
public static void pause(long pause) {
ProcessInstanceLimiter.pause(pause);
}
|
refdactored the test code a little
|
OpenHFT_Chronicle-Map
|
train
|
f9ae1e2ea9ee90558edce5e9b5828e363a371c64
|
diff --git a/swarm_dial.go b/swarm_dial.go
index <HASH>..<HASH> 100644
--- a/swarm_dial.go
+++ b/swarm_dial.go
@@ -255,16 +255,16 @@ func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error)
return conn, nil
}
+ // if this peer has been backed off, lets get out of here
+ if s.backf.Backoff(p) {
+ log.Event(ctx, "swarmDialBackoff", logdial)
+ return nil, ErrDialBackoff
+ }
+
// check if there's an ongoing dial to this peer
if ok, wait := s.dsync.Lock(p); ok {
defer s.dsync.Unlock(p)
- // if this peer has been backed off, lets get out of here
- if s.backf.Backoff(p) {
- log.Event(ctx, "swarmDialBackoff", logdial)
- return nil, ErrDialBackoff
- }
-
// ok, we have been charged to dial! let's do it.
// if it succeeds, dial will add the conn to the swarm itself.
defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done()
@@ -285,13 +285,6 @@ func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error)
} else {
// we did not dial. we must wait for someone else to dial.
-
- // check whether we should backoff first...
- if s.backf.Backoff(p) {
- log.Event(ctx, "swarmDialBackoff", logdial)
- return nil, ErrDialBackoff
- }
-
defer log.EventBegin(ctx, "swarmDialWait", logdial).Done()
select {
case <-wait: // wait for that other dial to finish.
|
swarm: perform backoff check before taking dialsync lock
|
libp2p_go-libp2p-swarm
|
train
|
83f447e6a6c70756b072e1fc6f5831e0fe8723a6
|
diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go
index <HASH>..<HASH> 100644
--- a/pkg/kubelet/dockertools/docker_manager.go
+++ b/pkg/kubelet/dockertools/docker_manager.go
@@ -1529,6 +1529,8 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
select {
case <-time.After(time.Duration(gracePeriod) * time.Second):
glog.Warningf("preStop hook for container %q did not complete in %d seconds", name, gracePeriod)
+ message := fmt.Sprintf("preStop hook for container %q did not complete in %d seconds", name, gracePeriod)
+ dm.generateFailedContainerEvent(containerID, pod.Name, events.UnfinishedPreStopHook, message)
case <-done:
glog.V(4).Infof("preStop hook for container %q completed", name)
}
diff --git a/pkg/kubelet/events/event.go b/pkg/kubelet/events/event.go
index <HASH>..<HASH> 100644
--- a/pkg/kubelet/events/event.go
+++ b/pkg/kubelet/events/event.go
@@ -72,6 +72,7 @@ const (
FailedValidation = "FailedValidation"
// Lifecycle hooks
- FailedPostStartHook = "FailedPostStartHook"
- FailedPreStopHook = "FailedPreStopHook"
+ FailedPostStartHook = "FailedPostStartHook"
+ FailedPreStopHook = "FailedPreStopHook"
+ UnfinishedPreStopHook = "UnfinishedPreStopHook"
)
|
Send a pod event if preStop hook did not finish in time.
|
kubernetes_kubernetes
|
train
|
7ecc171e18a532e660f4d50ae16267758eeff167
|
diff --git a/src/SubscribePro/Service/PaymentProfile/PaymentProfileService.php b/src/SubscribePro/Service/PaymentProfile/PaymentProfileService.php
index <HASH>..<HASH> 100644
--- a/src/SubscribePro/Service/PaymentProfile/PaymentProfileService.php
+++ b/src/SubscribePro/Service/PaymentProfile/PaymentProfileService.php
@@ -165,9 +165,13 @@ class PaymentProfileService extends AbstractService
private function saveCreditCardProfile(PaymentProfileInterface $paymentProfile)
{
$postData = [self::API_NAME_PROFILE => $paymentProfile->getFormData()];
- $response = $paymentProfile->isNew()
- ? $this->httpClient->post('/services/v2/vault/paymentprofile/creditcard.json', $postData)
- : $this->httpClient->put("/services/v1/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
+ if ($paymentProfile->isNew()) {
+ $response = $this->httpClient->post('/services/v2/vault/paymentprofile/creditcard.json', $postData);
+ }
+ else {
+ $response = $this->httpClient->post("/services/v2/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
+ }
+
return $this->retrieveItem($response, self::API_NAME_PROFILE, $paymentProfile);
}
@@ -203,7 +207,7 @@ class PaymentProfileService extends AbstractService
$response = $this->httpClient->post('/services/v2/vault/paymentprofile/bankaccount.json', $postData);
} else {
$postData = [self::API_NAME_PROFILE => $paymentProfile->getBankAccountSavingFormData()];
- $response = $this->httpClient->put("/services/v1/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
+ $response = $this->httpClient->post("/services/v2/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
}
return $this->retrieveItem($response, self::API_NAME_PROFILE, $paymentProfile);
}
@@ -226,7 +230,7 @@ class PaymentProfileService extends AbstractService
*/
public function loadProfile($paymentProfileId)
{
- $response = $this->httpClient->get("/services/v1/vault/paymentprofiles/{$paymentProfileId}.json");
+ $response = $this->httpClient->get("/services/v2/vault/paymentprofiles/{$paymentProfileId}.json");
return $this->retrieveItem($response, self::API_NAME_PROFILE);
}
@@ -251,7 +255,7 @@ class PaymentProfileService extends AbstractService
);
}
- $response = $this->httpClient->get('/services/v1/vault/paymentprofiles.json', $filters);
+ $response = $this->httpClient->get('/services/v2/vault/paymentprofiles.json', $filters);
return $this->retrieveItems($response, self::API_NAME_PROFILES);
}
@@ -268,7 +272,7 @@ class PaymentProfileService extends AbstractService
$response = $this->httpClient->post('/services/v2/paymentprofile/third-party-token.json', $postData);
} else {
$postData = [self::API_NAME_PROFILE => $paymentProfile->getThirdPartyTokenSavingFormData()];
- $response = $this->httpClient->put("/services/v1/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
+ $response = $this->httpClient->post("/services/v2/vault/paymentprofiles/{$paymentProfile->getId()}.json", $postData);
}
return $this->retrieveItem($response, self::API_NAME_PROFILE, $paymentProfile);
diff --git a/tests/Service/PaymentProfile/PaymentProfileServiceTest.php b/tests/Service/PaymentProfile/PaymentProfileServiceTest.php
index <HASH>..<HASH> 100644
--- a/tests/Service/PaymentProfile/PaymentProfileServiceTest.php
+++ b/tests/Service/PaymentProfile/PaymentProfileServiceTest.php
@@ -151,10 +151,10 @@ class PaymentProfileServiceTest extends \PHPUnit_Framework_TestCase
'resultData' => [PaymentProfileInterface::ID => '111'],
],
'Update existing profile' => [
- 'url' => '/services/v1/vault/paymentprofiles/22.json',
+ 'url' => '/services/v2/vault/paymentprofiles/22.json',
'itemId' => 22,
'isNew' => false,
- 'method' => 'put',
+ 'method' => 'post',
'formData' => [PaymentProfileInterface::CREDITCARD_FIRST_DIGITS => '521'],
'resultData' => [PaymentProfileInterface::ID => '22'],
],
@@ -169,7 +169,7 @@ class PaymentProfileServiceTest extends \PHPUnit_Framework_TestCase
$this->httpClientMock->expects($this->once())
->method('get')
- ->with("/services/v1/vault/paymentprofiles/{$itemId}.json")
+ ->with("/services/v2/vault/paymentprofiles/{$itemId}.json")
->willReturn([PaymentProfileService::API_NAME_PROFILE => $itemData]);
$this->paymentProfileFactoryMock->expects($this->once())
@@ -240,7 +240,7 @@ class PaymentProfileServiceTest extends \PHPUnit_Framework_TestCase
{
$this->httpClientMock->expects($this->once())
->method('get')
- ->with('/services/v1/vault/paymentprofiles.json', $filters)
+ ->with('/services/v2/vault/paymentprofiles.json', $filters)
->willReturn([PaymentProfileService::API_NAME_PROFILES => $itemsData]);
$profiles = [];
|
Use v2 vault APIs for payment profiles wherever possible.
|
subscribepro_subscribepro-php
|
train
|
fafe97c1c1990aeebfe517cea450bac4c258a04b
|
diff --git a/pipeline/packager.py b/pipeline/packager.py
index <HASH>..<HASH> 100644
--- a/pipeline/packager.py
+++ b/pipeline/packager.py
@@ -1,5 +1,5 @@
from django.contrib.staticfiles.storage import staticfiles_storage
-from django.contrib.staticfiles.finders import find
+from django.contrib.staticfiles.finders import get_finders, find
from django.core.files.base import ContentFile
from django.utils.encoding import smart_bytes
@@ -98,11 +98,25 @@ class Packager(object):
variant=package.variant, **kwargs)
def compile(self, paths, compiler_options={}, force=False):
- return self.compiler.compile(
+ paths = self.compiler.compile(
paths,
compiler_options=compiler_options,
force=force,
)
+ for path in paths:
+ if not self.storage.exists(path):
+ if self.verbose:
+ print("Compiled file '%s' cannot be found with packager's storage. Locating it." % path)
+
+ source_storage = self.find_source_storage(path)
+ if source_storage is not None:
+ with source_storage.open(path) as source_file:
+ if self.verbose:
+ print("Saving: %s" % path)
+ self.storage.save(path, source_file)
+ else:
+ raise IOError("File does not exist: %s" % path)
+ return paths
def pack(self, package, compress, signal, **kwargs):
output_filename = package.output_filename
@@ -127,6 +141,15 @@ class Packager(object):
def save_file(self, path, content):
return self.storage.save(path, ContentFile(smart_bytes(content)))
+ def find_source_storage(self, path):
+ for finder in get_finders():
+ for short_path, storage in finder.list(''):
+ if short_path == path:
+ if self.verbose:
+ print("Found storage: %s" % str(self.storage))
+ return storage
+ return None
+
def create_packages(self, config):
packages = {}
if not config:
|
Improved packager.compile() to handle s3 storage when compiler generates output to the local one. This patch copies the generated file. (#<I>)
|
jazzband_django-pipeline
|
train
|
d44330bb19a59b303ff810b19ac5bcda0f5cb539
|
diff --git a/pkg/cmd/server/start.go b/pkg/cmd/server/start.go
index <HASH>..<HASH> 100644
--- a/pkg/cmd/server/start.go
+++ b/pkg/cmd/server/start.go
@@ -35,7 +35,7 @@ type CustomMetricsAdapterServerOptions struct {
func NewCustomMetricsAdapterServerOptions() *CustomMetricsAdapterServerOptions {
o := &CustomMetricsAdapterServerOptions{
- SecureServing: genericoptions.WithLoopback(genericoptions.NewSecureServingOptions()),
+ SecureServing: genericoptions.NewSecureServingOptions().WithLoopback(),
Authentication: genericoptions.NewDelegatingAuthenticationOptions(),
Authorization: genericoptions.NewDelegatingAuthorizationOptions(),
Features: genericoptions.NewFeatureOptions(),
@@ -59,11 +59,11 @@ func (o CustomMetricsAdapterServerOptions) Config() (*apiserver.Config, error) {
}
serverConfig := genericapiserver.NewConfig(apiserver.Codecs)
- if err := o.SecureServing.ApplyTo(serverConfig); err != nil {
+ if err := o.SecureServing.ApplyTo(&serverConfig.SecureServing, &serverConfig.LoopbackClientConfig); err != nil {
return nil, err
}
- if err := o.Authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, nil); err != nil {
+ if err := o.Authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, serverConfig.OpenAPIConfig); err != nil {
return nil, err
}
if err := o.Authorization.ApplyTo(&serverConfig.Authorization); err != nil {
diff --git a/test-adapter/provider/provider.go b/test-adapter/provider/provider.go
index <HASH>..<HASH> 100644
--- a/test-adapter/provider/provider.go
+++ b/test-adapter/provider/provider.go
@@ -205,18 +205,30 @@ func (p *testingProvider) valueFor(info provider.CustomMetricInfo, name types.Na
}
// metricFor is a helper function which formats a value, metric, and object info into a MetricValue which can be returned by the metrics API
-func (p *testingProvider) metricFor(value resource.Quantity, name types.NamespacedName, info provider.CustomMetricInfo) (*custom_metrics.MetricValue, error) {
+func (p *testingProvider) metricFor(value resource.Quantity, name types.NamespacedName, selector labels.Selector, info provider.CustomMetricInfo) (*custom_metrics.MetricValue, error) {
objRef, err := helpers.ReferenceFor(p.mapper, name, info)
if err != nil {
return nil, err
}
- return &custom_metrics.MetricValue{
+ metric := &custom_metrics.MetricValue{
DescribedObject: objRef,
- MetricName: info.Metric,
- Timestamp: metav1.Time{time.Now()},
- Value: value,
- }, nil
+ Metric: custom_metrics.MetricIdentifier{
+ Name: info.Metric,
+ },
+ Timestamp: metav1.Time{time.Now()},
+ Value: value,
+ }
+
+ if len(selector.String()) > 0 {
+ labelSelector, err := metav1.ParseToLabelSelector(selector.String())
+ if err != nil {
+ return nil, err
+ }
+ metric.Metric.Selector = labelSelector
+ }
+
+ return metric, nil
}
// metricsFor is a wrapper used by GetMetricBySelector to format several metrics which match a resource selector
@@ -237,7 +249,7 @@ func (p *testingProvider) metricsFor(namespace string, selector labels.Selector,
return nil, err
}
- metric, err := p.metricFor(value, namespacedName, info)
+ metric, err := p.metricFor(value, namespacedName, selector, info)
if err != nil {
return nil, err
}
@@ -257,7 +269,7 @@ func (p *testingProvider) GetMetricByName(name types.NamespacedName, info provid
if err != nil {
return nil, err
}
- return p.metricFor(value, name, info)
+ return p.metricFor(value, name, labels.Everything(), info)
}
func (p *testingProvider) GetMetricBySelector(namespace string, selector labels.Selector, info provider.CustomMetricInfo) (*custom_metrics.MetricValueList, error) {
|
Update adapter with Kubernetes <I>
|
kubernetes-incubator_custom-metrics-apiserver
|
train
|
d0f91a9f9939909c4b45182218b8b3dac83538e4
|
diff --git a/impl-base/src/main/java/org/jboss/shrinkwrap/impl/base/container/ContainerBase.java b/impl-base/src/main/java/org/jboss/shrinkwrap/impl/base/container/ContainerBase.java
index <HASH>..<HASH> 100644
--- a/impl-base/src/main/java/org/jboss/shrinkwrap/impl/base/container/ContainerBase.java
+++ b/impl-base/src/main/java/org/jboss/shrinkwrap/impl/base/container/ContainerBase.java
@@ -690,20 +690,25 @@ public abstract class ContainerBase<T extends Archive<T>> extends AssignableBase
{
Validate.notNull(classes, "Classes must be specified");
- for(Class<?> clazz : classes)
+ for(final Class<?> clazz : classes)
{
Asset resource = new ClassAsset(clazz);
Path location = new BasicPath(getClassesPath(), AssetUtil.getFullPathForClassResource(clazz));
add(resource, location);
// Get all inner classes and add them
- final Class<?>[] innerClasses = clazz.getDeclaredClasses();
- for (final Class<?> innerClass : innerClasses)
- {
- this.addClass(innerClass);
- }
+ addPackages(
+ false,
+ new Filter<Class<?>>()
+ {
+ public boolean include(Class<?> object)
+ {
+ return object.getName().startsWith(clazz.getName() + "$");
+ };
+ },
+ clazz.getPackage()
+ );
}
-
return covarientReturn();
};
|
SHRINKWRAP-<I> Changed to use addPackage with Filter to be able to include anonymous inner classes.
|
shrinkwrap_shrinkwrap
|
train
|
94146acbc1d22be269496f9c39189fc314ad9ac1
|
diff --git a/django_extensions/management/commands/shell_plus.py b/django_extensions/management/commands/shell_plus.py
index <HASH>..<HASH> 100644
--- a/django_extensions/management/commands/shell_plus.py
+++ b/django_extensions/management/commands/shell_plus.py
@@ -132,7 +132,7 @@ class Command(NoArgsCommand):
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd():
- pythonpath = os.environ.get('PYTHONPATH') or ks.env.get('PYTHONPATH') or ''
+ pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
|
kernel spec PYTHONPATH should override env from the shell if it's not empty
|
django-extensions_django-extensions
|
train
|
ef1aa4baba9886ae745cfebaf9d94c4571ca25dd
|
diff --git a/lib/http/public/javascripts/job.js b/lib/http/public/javascripts/job.js
index <HASH>..<HASH> 100644
--- a/lib/http/public/javascripts/job.js
+++ b/lib/http/public/javascripts/job.js
@@ -187,7 +187,6 @@ Job.prototype.renderUpdate = function(){
// attempts
if (this.attempts.made) {
- console.log(this.attempts);
view.attempts(this.attempts.made + '/' + this.attempts.max);
} else {
view.attempts().remove();
diff --git a/lib/queue/job.js b/lib/queue/job.js
index <HASH>..<HASH> 100644
--- a/lib/queue/job.js
+++ b/lib/queue/job.js
@@ -451,6 +451,7 @@ Job.prototype.active = function(){
Job.prototype.save = function(fn){
var client = this.client
, fn = fn || noop
+ , max = this._max_attempts
, self = this;
// update
@@ -459,9 +460,10 @@ Job.prototype.save = function(fn){
// incr id
client.incr('q:ids', function(err, id){
if (err) return fn(err);
+ var key = 'q:job:' + id;
self.id = id;
self.state = 'inactive';
- client.hset('q:job:' + id, 'max_attempts', this._max_attempts);
+ if (max) client.hset(key, 'max_attempts', max);
client.sadd('q:job:types', self.type);
self.set('type', self.type);
self.set('created_at', Date.now());
|
Implemented redis portion of `Job.attempts(n)`
|
Automattic_kue
|
train
|
8dac79734c60922035b09823604a42783f46a12f
|
diff --git a/owslib/coverage/wcs100.py b/owslib/coverage/wcs100.py
index <HASH>..<HASH> 100644
--- a/owslib/coverage/wcs100.py
+++ b/owslib/coverage/wcs100.py
@@ -135,7 +135,7 @@ class WebCoverageService_1_0_0(WCSBase):
u = urlopen(base_url + data)
except HTTPError, e: #Some servers may set the http header to 400 if returning an OGC service exception.
if e.code == 400:
- raise ServiceException, e.read()+data
+ raise ServiceException, e.read()
self.log.debug('WCS 1.0.0 DEBUG: GetCoverage request made: %s'%u.url)
|
correction: did not mean to append +data to e.read()
|
geopython_OWSLib
|
train
|
ecf9b8a70fc9cfe4175e5d11ecef737d080b4366
|
diff --git a/lib/sensor.js b/lib/sensor.js
index <HASH>..<HASH> 100644
--- a/lib/sensor.js
+++ b/lib/sensor.js
@@ -605,12 +605,8 @@ MonitoringConfig.prototype.isModified = function(configProperty) {
return Object.keys(this._dirty).length > 0;
}
-MonitoringConfig.prototype.resetModified = function(configProperty) {
- if (configProperty) {
- delete this._dirty[configProperty];
- } else {
- this._dirty = {};
- }
+MonitoringConfig.prototype.resetModified = function() {
+ this._dirty = {};
return this;
}
@@ -621,18 +617,11 @@ MonitoringConfig.prototype.markModified = function(configProperty) {
for (let key of Object.keys(this.data)) {
this._dirty[key] = true;
}
- }
- return this;
-}
-
-MonitoringConfig.prototype.mergeFrom = function(confData) {
- if (confData instanceof MonitoringConfig) confData = confData.data;
- if (! this.isModified()) {
- this.data = confData;
- } else {
- for (let key in confData) {
- if (! this.isModified(key)) {
- this.data[key] = confData[key];
+ for (let key of Object.keys(this)) {
+ if (! (key.startsWith('_')
+ || key === 'data'
+ || 'function' === typeof this[key])) {
+ this._dirty[key] = true;
}
}
}
@@ -671,15 +660,18 @@ MonitoringConfig.saveFunc = function(sensor) {
MonitoringConfig.updateFunc = function(sensor) {
return function(callback) {
- var req = WirelessTagSensor.loadMonitoringConfig(sensor, callback);
- return latestConfReq.then(
- (config) => {
- this.mergeFrom(config);
- sensor.emit('config', sensor, this, 'update');
- return this;
- },
- sensor.errorHandler(callback)
- );
+ if (! this.isModified()) {
+ var req = WirelessTagSensor.loadMonitoringConfig(sensor, callback);
+ return latestConfReq.then(
+ (config) => {
+ this.data = config.data;
+ sensor.emit('config', sensor, this, 'update');
+ return this;
+ },
+ sensor.errorHandler(callback)
+ );
+ }
+ return Promise.resolve(this);
};
}
|
Removes ability to merge into a monitoring config
It turns out that doing the merge right is already a little brittle, but
breaks down completely once we allow "virtual" properties for which the
accessors delegate to another object. So I'm removing it.
|
hlapp_wirelesstags-js
|
train
|
4dfd1b71fe9ff54f269636bfa6d2f718681069fc
|
diff --git a/TYPO3.TYPO3CR/Tests/Functional/Eel/FlowQueryOperations/ParentsUntilOperationTest.php b/TYPO3.TYPO3CR/Tests/Functional/Eel/FlowQueryOperations/ParentsUntilOperationTest.php
index <HASH>..<HASH> 100644
--- a/TYPO3.TYPO3CR/Tests/Functional/Eel/FlowQueryOperations/ParentsUntilOperationTest.php
+++ b/TYPO3.TYPO3CR/Tests/Functional/Eel/FlowQueryOperations/ParentsUntilOperationTest.php
@@ -55,27 +55,36 @@ class ParentsUntilOperationTest extends AbstractNodeTest
'expectedNodePaths' => array('/a'),
'unexpectedNodePaths' => array('/a/a5','/a/a3','/a/a2,')
),
+ array(
+ 'currentNodePaths' => array('/b/b4/b4b/b4bb/b4bba'),
+ 'subject' => '[instanceof TYPO3.TYPO3CR.Testing:NodeType]',
+ 'expectedNodePaths' => array('/b/b4/b4b/b4bb'),
+ 'unexpectedNodePaths' => array('b/b4','b/b4/b4b','/b/b3','/b')
+ ),
);
}
/**
* Tests on a tree:
*
- * a (testNodeType)
- * a1 (testNodeType)
+ * a (Testing:NodeType)
+ * a1 (Testing:NodeType)
* a2
- * a3 (testNodeType)
+ * a3 (Testing:NodeType)
* a4
* a5
- * b (testNodeType3)
+ * b (Testing:NodeType)
* b1
- * b2 (testNodeType3)
+ * b2 (Testing:NodeType)
* b3
- * b3a
- * b3b
- * b4
- *
- *
+ * b3a
+ * b3b
+ * b4 (Testing:NodeType)
+ * b4a
+ * b4b (Testing:NodeType)
+ * b4ba (Testing:NodeType)
+ * b4bb
+ * b4bba
*
* @test
* @dataProvider parentsUntilOperationDataProvider()
@@ -99,7 +108,12 @@ class ParentsUntilOperationTest extends AbstractNodeTest
$nodeB3 = $nodeB->createNode('b3');
$nodeB3->createNode('b3a');
$nodeB3->createNode('b3b');
- $nodeB->createNode('b4');
+ $nodeB4 = $nodeB->createNode('b4', $testNodeType);
+ $nodeB4->createNode('b4a');
+ $nodeB4B = $nodeB4->createNode('b4b', $testNodeType);
+ $nodeB4B->createNode('b4ba', $testNodeType);
+ $nodeB4BB = $nodeB4B->createNode('b4bb');
+ $nodeB4BB->createNode('b4bba');
$currentNodes = array();
|
[TASK] Add new test case to cover recent bug
|
neos_neos-development-collection
|
train
|
ba05ab619a7399435af7c40f46e263cfc921c299
|
diff --git a/docs/src/pages/demos/tables/BasicTable.js b/docs/src/pages/demos/tables/BasicTable.js
index <HASH>..<HASH> 100644
--- a/docs/src/pages/demos/tables/BasicTable.js
+++ b/docs/src/pages/demos/tables/BasicTable.js
@@ -58,6 +58,13 @@ function BasicTable(props) {
</TableRow>
);
})}
+ <TableRow component="a" href="#basic-table">
+ <TableCell>Oreo</TableCell>
+ <TableCell numeric>109</TableCell>
+ <TableCell numeric>7.9</TableCell>
+ <TableCell numeric>5.9</TableCell>
+ <TableCell numeric>2.6</TableCell>
+ </TableRow>
</TableBody>
</Table>
</Paper>
diff --git a/src/Table/TableRow.js b/src/Table/TableRow.js
index <HASH>..<HASH> 100644
--- a/src/Table/TableRow.js
+++ b/src/Table/TableRow.js
@@ -8,10 +8,13 @@ import withStyles from '../styles/withStyles';
export const styles = (theme: Object) => ({
root: {
+ color: 'inherit',
+ display: 'table-row',
height: 48,
'&:focus': {
outline: 'none',
},
+ verticalAlign: 'middle',
},
head: {
height: 56,
|
[TableRow] Adjust CSS for components other than <tr> (#<I>)
|
mui-org_material-ui
|
train
|
487febed0cbc31b3595b3d126e79b0c32deface5
|
diff --git a/java/core/libjoynr/src/main/java/io/joynr/capabilities/GlobalCapabilitiesDirectoryClient.java b/java/core/libjoynr/src/main/java/io/joynr/capabilities/GlobalCapabilitiesDirectoryClient.java
index <HASH>..<HASH> 100644
--- a/java/core/libjoynr/src/main/java/io/joynr/capabilities/GlobalCapabilitiesDirectoryClient.java
+++ b/java/core/libjoynr/src/main/java/io/joynr/capabilities/GlobalCapabilitiesDirectoryClient.java
@@ -8,7 +8,6 @@ import io.joynr.proxy.ProxyBuilder;
import io.joynr.proxy.ProxyBuilderDefaultImpl;
import io.joynr.proxy.ProxyInvocationHandlerFactory;
-import java.util.ArrayList;
import java.util.List;
import joynr.infrastructure.GlobalCapabilitiesDirectoryProxy;
@@ -69,7 +68,7 @@ public class GlobalCapabilitiesDirectoryClient {
}
- public void remove(ArrayList<String> newArrayList) {
+ public void remove(List<String> newArrayList) {
getProxy(TTL_30_DAYS_IN_MS).remove(newArrayList);
}
diff --git a/java/core/libjoynr/src/main/java/io/joynr/capabilities/LocalCapabilitiesDirectoryImpl.java b/java/core/libjoynr/src/main/java/io/joynr/capabilities/LocalCapabilitiesDirectoryImpl.java
index <HASH>..<HASH> 100644
--- a/java/core/libjoynr/src/main/java/io/joynr/capabilities/LocalCapabilitiesDirectoryImpl.java
+++ b/java/core/libjoynr/src/main/java/io/joynr/capabilities/LocalCapabilitiesDirectoryImpl.java
@@ -33,6 +33,7 @@ import io.joynr.proxy.Future;
import io.joynr.proxy.ProxyInvocationHandlerFactory;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
@@ -200,7 +201,7 @@ public class LocalCapabilitiesDirectoryImpl extends AbstractLocalCapabilitiesDir
//do nothing
}
};
- globalCapabilitiesClient.remove(callback, capabilityInformation.getParticipantId());
+ globalCapabilitiesClient.remove(Arrays.asList(capabilityInformation.getParticipantId()));
}
}
|
[Java] unregister capability synchronously
Previously with async unregister, the message sender was being shut
down faster than the async message was being sent, causing the
unregister to be dropped.
Change-Id: I3f7ddeed<I>dd<I>cf<I>c8e<I>
|
bmwcarit_joynr
|
train
|
243ff65d528497c3136136b0686cd028d62bbc6f
|
diff --git a/schedula/utils/drw/__init__.py b/schedula/utils/drw/__init__.py
index <HASH>..<HASH> 100644
--- a/schedula/utils/drw/__init__.py
+++ b/schedula/utils/drw/__init__.py
@@ -371,15 +371,21 @@ class FolderNode(object):
res = {}
yield from sorted(res.items())
- def _filters(self, name='filters'):
+ def _filters(self):
try:
- for i, f in enumerate(self.attr[name]):
+ for i, f in enumerate(self.attr['filters']):
yield 'filter %d' % i, f
except (AttributeError, KeyError):
pass
def _solution_filters(self):
- yield from self._filters(name='solution_filters')
+ try:
+ it = self.attr['solution_filters']
+ yield 'input_filter 0', it[0]
+ for i, f in enumerate(it[1:]):
+ yield 'output_filter %d' % i, f
+ except (AttributeError, KeyError, IndexError):
+ pass
def _remote_links(self):
attr, item = self.attr, self.folder.item
diff --git a/schedula/utils/sol.py b/schedula/utils/sol.py
index <HASH>..<HASH> 100644
--- a/schedula/utils/sol.py
+++ b/schedula/utils/sol.py
@@ -643,11 +643,14 @@ class Solution(Base, collections.OrderedDict):
return True # Return that the output have been evaluated correctly.
def _apply_filters(self, res, node_id, node_attr, attr):
- if 'filters' in node_attr and 'started' not in attr:
- attr['started'] = datetime.today()
filters = []
# Apply filters to results.
for f in node_attr.get('filters', ()):
+ if not filters:
+ if 'started' not in attr:
+ attr['started'] = datetime.today()
+ filters.append(res)
+
if isinstance(parent_func(f), SubDispatch):
out = {}
res = f(res, _sol_output=out, _sol=(node_id, self))
|
feat(sol): Add input value of filters in solution.
|
vinci1it2000_schedula
|
train
|
af016d80c2fd115a479d5b23ad5a42c94e6c50cb
|
diff --git a/implementations/micrometer-registry-elastic/src/test/java/io/micrometer/elastic/ElasticMeterRegistryTest.java b/implementations/micrometer-registry-elastic/src/test/java/io/micrometer/elastic/ElasticMeterRegistryTest.java
index <HASH>..<HASH> 100644
--- a/implementations/micrometer-registry-elastic/src/test/java/io/micrometer/elastic/ElasticMeterRegistryTest.java
+++ b/implementations/micrometer-registry-elastic/src/test/java/io/micrometer/elastic/ElasticMeterRegistryTest.java
@@ -100,8 +100,8 @@ class ElasticMeterRegistryTest {
@Test
void writeMeter() {
Timer timer = Timer.builder("myTimer").register(registry);
- assertThat(registry.writeTimer(timer))
- .contains("{ \"index\" : {} }\n{\"@timestamp\":\"1970-01-01T00:00:00.001Z\",\"name\":\"myTimer\",\"type\":\"timer\",\"count\":0,\"sum\":0.0,\"mean\":0.0,\"max\":0.0}");
+ assertThat(registry.writeMeter(timer))
+ .contains("{ \"index\" : {} }\n{\"@timestamp\":\"1970-01-01T00:00:00.001Z\",\"name\":\"myTimer\",\"type\":\"timer\",\"count\":\"0.0\",\"total\":\"0.0\",\"max\":\"0.0\"}");
}
@Test
@@ -131,4 +131,4 @@ class ElasticMeterRegistryTest {
assertThat(registry.writeCounter(c)).contains("{ \"index\" : {} }\n" +
"{\"@timestamp\":\"1970-01-01T00:00:00.001Z\",\"name\":\"counter\",\"type\":\"counter\",\"count\":0.0}");
}
-}
\ No newline at end of file
+}
|
Fix ElasticMeterRegistryTest.writeMeter()
|
micrometer-metrics_micrometer
|
train
|
409b5e6194dfb8d3e93eab47f7f7f9b76ce27ff5
|
diff --git a/src/indices/neighborhood/outbound.js b/src/indices/neighborhood/outbound.js
index <HASH>..<HASH> 100644
--- a/src/indices/neighborhood/outbound.js
+++ b/src/indices/neighborhood/outbound.js
@@ -3,6 +3,7 @@
* =========================================
*/
var typed = require('mnemonist/utils/typed-arrays');
+var createWeightGetter = require('graphology-utils/getters').createWeightGetter;
function OutboundNeighborhoodIndex(graph) {
var upperBound = graph.directedSize + graph.undirectedSize * 2;
@@ -86,7 +87,7 @@ function WeightedOutboundNeighborhoodIndex(graph, weightAttribute) {
var NeighborhoodPointerArray = typed.getPointerArray(upperBound);
var NodesPointerArray = typed.getPointerArray(graph.order);
- weightAttribute = weightAttribute || 'weight';
+ var weightGetter = createWeightGetter(weightAttribute);
// NOTE: directedSize + undirectedSize * 2 is an upper bound for
// neighborhood size
@@ -115,9 +116,7 @@ function WeightedOutboundNeighborhoodIndex(graph, weightAttribute) {
for (j = 0, m = edges.length; j < m; j++) {
edge = edges[j];
neighbor = graph.opposite(node, edge);
- weight = graph.getEdgeAttribute(edge, weightAttribute);
-
- if (typeof weight !== 'number') weight = 1;
+ weight = weightGetter(graph.getEdgeAttributes(edge));
// NOTE: for weighted mixed beware of merging weights if twice the same neighbor
this.neighborhood[n] = ids[neighbor];
diff --git a/src/indices/test/neighborhood.js b/src/indices/test/neighborhood.js
index <HASH>..<HASH> 100644
--- a/src/indices/test/neighborhood.js
+++ b/src/indices/test/neighborhood.js
@@ -152,7 +152,7 @@ describe('Neighborhood Indices', function () {
graph.mergeEdge(2, 1, {weight: 1});
graph.mergeEdge(4, 5, {weight: 34});
- var index = new WeightedOutboundNeighborhoodIndex(graph);
+ var index = new WeightedOutboundNeighborhoodIndex(graph, 'weight');
assert.deepEqual(index.neighborhood, new Uint8Array([1, 0, 2, 4]));
assert.deepEqual(index.weights, new Float64Array([3, 1, 1, 34]));
diff --git a/src/shortest-path/indexed-brandes.js b/src/shortest-path/indexed-brandes.js
index <HASH>..<HASH> 100644
--- a/src/shortest-path/indexed-brandes.js
+++ b/src/shortest-path/indexed-brandes.js
@@ -5,11 +5,11 @@
* Indexed version of the famous Brandes routine aiming at computing
* betweenness centrality efficiently.
*/
-var FixedDeque = require('mnemonist/fixed-deque'),
- FixedStack = require('mnemonist/fixed-stack'),
- Heap = require('mnemonist/heap'),
- typed = require('mnemonist/utils/typed-arrays'),
- neighborhoodIndices = require('graphology-indices/neighborhood/outbound');
+var FixedDeque = require('mnemonist/fixed-deque');
+var FixedStack = require('mnemonist/fixed-stack');
+var Heap = require('mnemonist/heap');
+var typed = require('mnemonist/utils/typed-arrays');
+var neighborhoodIndices = require('graphology-indices/neighborhood/outbound');
var OutboundNeighborhoodIndex = neighborhoodIndices.OutboundNeighborhoodIndex,
WeightedOutboundNeighborhoodIndex =
@@ -119,6 +119,8 @@ exports.createDijkstraIndexedBrandes = function createDijkstraIndexedBrandes(
graph,
weightAttribute
) {
+ if (arguments.length < 2) weightAttribute = 'weight';
+
var neighborhoodIndex = new WeightedOutboundNeighborhoodIndex(
graph,
weightAttribute
|
[indices] finer choice of weight attr for WeightedOutboundNeighborhoodIndex
|
graphology_graphology
|
train
|
8139d15e9882a92e8e9bc5e1532d978e8a7cead4
|
diff --git a/tooling/lib/version.js b/tooling/lib/version.js
index <HASH>..<HASH> 100644
--- a/tooling/lib/version.js
+++ b/tooling/lib/version.js
@@ -21,6 +21,7 @@ exports.last = async function last() {
.filter((p) => p !== `@ciscospark/eslint-config`)
.map(getDistTag)))
.sort()
+ .filter()
.map((v) => v.trim())
// TODO stop omitting v2 packages once the last once is unpublished
.filter((v) => !v.startsWith(`2.`))
|
chore(tooling): handle packages without disttags
|
webex_spark-js-sdk
|
train
|
57cd421de3b681b4e401dcc5d965671362663998
|
diff --git a/lib/go/thrift/simple_server.go b/lib/go/thrift/simple_server.go
index <HASH>..<HASH> 100644
--- a/lib/go/thrift/simple_server.go
+++ b/lib/go/thrift/simple_server.go
@@ -120,15 +120,14 @@ func (p *TSimpleServer) Listen() error {
func (p *TSimpleServer) AcceptLoop() error {
for {
- select {
- case <-p.quit:
- return nil
- default:
- }
-
client, err := p.serverTransport.Accept()
if err != nil {
- log.Println("Accept err: ", err)
+ select {
+ case <-p.quit:
+ return nil
+ default:
+ }
+ return err
}
if client != nil {
go func() {
|
THRIFT-<I> Handle errors from Accept() correctly
Client: Go
|
limingxinleo_thrift
|
train
|
84de7cafca8b850da9b43bb4f19e5058d575dbe4
|
diff --git a/core/src/main/java/org/infinispan/CacheImpl.java b/core/src/main/java/org/infinispan/CacheImpl.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/org/infinispan/CacheImpl.java
+++ b/core/src/main/java/org/infinispan/CacheImpl.java
@@ -433,8 +433,8 @@ public class CacheImpl<K, V> extends CacheSupport<K, V> implements AdvancedCache
private InvocationContext setInvocationContextFlagsAndClassLoader(InvocationContext ctx, EnumSet<Flag> explicitFlags, ClassLoader explicitClassLoader) {
if (explicitFlags != null) ctx.setFlags(explicitFlags);
- if (explicitClassLoader != null) ctx.setClassLoader(explicitClassLoader);
-
+ ctx.setClassLoader(explicitClassLoader != null ?
+ explicitClassLoader : getClassLoader());
return ctx;
}
|
ISPN-<I> Delegate on configured classloader if no explicit one given
|
infinispan_infinispan
|
train
|
00bc8ce5c5b2d791f4990eac940b3a5479a8d11e
|
diff --git a/core/src/main/java/hudson/model/AbstractBuild.java b/core/src/main/java/hudson/model/AbstractBuild.java
index <HASH>..<HASH> 100644
--- a/core/src/main/java/hudson/model/AbstractBuild.java
+++ b/core/src/main/java/hudson/model/AbstractBuild.java
@@ -425,12 +425,6 @@ public abstract class AbstractBuild<P extends AbstractProject<P,R>,R extends Abs
if (result==null) result = getResult();
if (result==null) result = Result.SUCCESS;
- if (result.isBetterOrEqualTo(Result.UNSTABLE))
- createSymlink(listener, "lastSuccessful");
-
- if (result.isBetterOrEqualTo(Result.SUCCESS))
- createSymlink(listener, "lastStable");
-
return result;
} finally {
lease.release();
@@ -532,6 +526,12 @@ public abstract class AbstractBuild<P extends AbstractProject<P,R>,R extends Abs
public final void post(BuildListener listener) throws Exception {
try {
post2(listener);
+
+ if (result.isBetterOrEqualTo(Result.UNSTABLE))
+ createSymlink(listener, "lastSuccessful");
+
+ if (result.isBetterOrEqualTo(Result.SUCCESS))
+ createSymlink(listener, "lastStable");
} finally {
// update the culprit list
HashSet<String> r = new HashSet<String>();
diff --git a/test/src/test/java/hudson/model/AbstractProjectTest.java b/test/src/test/java/hudson/model/AbstractProjectTest.java
index <HASH>..<HASH> 100644
--- a/test/src/test/java/hudson/model/AbstractProjectTest.java
+++ b/test/src/test/java/hudson/model/AbstractProjectTest.java
@@ -34,6 +34,7 @@ import hudson.scm.NullSCM;
import hudson.Launcher;
import hudson.FilePath;
import hudson.Util;
+import hudson.tasks.ArtifactArchiver;
import hudson.util.StreamTaskListener;
import hudson.util.OneShotEvent;
import java.io.IOException;
@@ -226,11 +227,34 @@ public class AbstractProjectTest extends HudsonTestCase {
assertFalse("lastStable link should be removed", lastStable.exists());
}
- private static void assertSymlinkForBuild(File file, int buildNumber) throws IOException {
+ private static void assertSymlinkForBuild(File file, int buildNumber)
+ throws IOException, InterruptedException {
assertTrue("should exist and point to something that exists", file.exists());
assertTrue("should be symlink", Util.isSymlink(file));
String s = FileUtils.readFileToString(new File(file, "log"));
- assertTrue("link should point to build #" + buildNumber + ", but log was:\n" + s,
+ assertTrue("link should point to build #" + buildNumber + ", but link was: "
+ + Util.resolveSymlink(file, TaskListener.NULL) + "\nand log was:\n" + s,
s.contains("Build #" + buildNumber + "\n"));
}
+
+ @Bug(2543)
+ public void testSymlinkForPostBuildFailure() throws Exception {
+ // Links should be updated after post-build actions when final build result is known
+ FreeStyleProject job = createFreeStyleProject();
+ job.getBuildersList().add(new Shell("echo \"Build #$BUILD_NUMBER\"\n"));
+ FreeStyleBuild build = job.scheduleBuild2(0, new Cause.UserCause()).get();
+ assertEquals(Result.SUCCESS, build.getResult());
+ File lastSuccessful = new File(job.getRootDir(), "lastSuccessful"),
+ lastStable = new File(job.getRootDir(), "lastStable");
+ // First build creates links
+ assertSymlinkForBuild(lastSuccessful, 1);
+ assertSymlinkForBuild(lastStable, 1);
+ // Archive artifacts that don't exist to create failure in post-build action
+ job.getPublishersList().add(new ArtifactArchiver("*.foo", "", false));
+ build = job.scheduleBuild2(0, new Cause.UserCause()).get();
+ assertEquals(Result.FAILURE, build.getResult());
+ // Links should not be updated since build failed
+ assertSymlinkForBuild(lastSuccessful, 1);
+ assertSymlinkForBuild(lastStable, 1);
+ }
}
|
[FIXED HUDSON-<I>] don't update lastStable/lastSuccessful symlinks until final
build result is known (after post-build actions).
git-svn-id: <URL>
|
jenkinsci_jenkins
|
train
|
49d2436abdedef1475cb8911858bf5f5b222cc60
|
diff --git a/react-native/react/tabs/more/about-render.mobile.js b/react-native/react/tabs/more/about-render.mobile.js
index <HASH>..<HASH> 100644
--- a/react-native/react/tabs/more/about-render.mobile.js
+++ b/react-native/react/tabs/more/about-render.mobile.js
@@ -1,7 +1,7 @@
'use strict'
/* @flow */
-import React, { View, Text } from 'react-native'
+import React, { View, Text, StyleSheet } from 'react-native'
import commonStyles from '../../styles/common'
export default function () {
@@ -11,3 +11,12 @@ export default function () {
</View>
)
}
+
+const styles = StyleSheet.create({
+ container: {
+ flex: 1,
+ justifyContent: 'center',
+ alignItems: 'stretch',
+ backgroundColor: '#F5FCFF'
+ }
+})
diff --git a/react-native/react/tabs/more/about.js b/react-native/react/tabs/more/about.js
index <HASH>..<HASH> 100644
--- a/react-native/react/tabs/more/about.js
+++ b/react-native/react/tabs/more/about.js
@@ -27,13 +27,3 @@ export default class About extends BaseComponent {
}
About.propTypes = {}
-
-console.log(StyleSheet)
-const styles = StyleSheet.create({
- container: {
- flex: 1,
- justifyContent: 'center',
- alignItems: 'stretch',
- backgroundColor: '#F5FCFF'
- }
-})
|
Move StyleSheet into about-render-mobile
|
keybase_client
|
train
|
308499dcad383f6bf638862e7402f4c0ea700bd7
|
diff --git a/src/tailwind.js b/src/tailwind.js
index <HASH>..<HASH> 100644
--- a/src/tailwind.js
+++ b/src/tailwind.js
@@ -1,6 +1,8 @@
const _ = require('lodash')
const postcss = require('postcss')
const cssnext = require('postcss-cssnext')
+const fs = require('fs')
+
const backgroundColors = require('./generators/background-colors')
const shadows = require('./generators/shadows')
const flex = require('./generators/flex')
@@ -49,35 +51,40 @@ function addCustomMediaQueries(css, { breakpoints }) {
}
function generateUtilities(css, options) {
- css.walkAtRules('tailwind', atRule => {
- if (atRule.params === 'utilities') {
+ const rules = []
- const rules = _.flatten([
+ css.walkAtRules(atRule => {
+ if (atRule.name === 'responsive') {
+ const nodes = atRule.nodes
+ css.insertBefore(atRule, nodes)
+ atRule.remove()
+ rules.push(...nodes)
+ }
+ if (atRule.name === 'tailwind' && atRule.params === 'utilities') {
+ const utilities = _.flatten([
backgroundColors(options),
shadows(options),
flex(),
])
-
- css.insertBefore(atRule, rules)
-
- Object.keys(options.breakpoints).forEach(breakpoint => {
- const mediaQuery = postcss.atRule({
- name: 'media',
- params: `(--breakpoint-${breakpoint})`,
- })
-
- mediaQuery.append(rules.map(rule => {
- const cloned = rule.clone()
- cloned.selector = `.${breakpoint}\\:${rule.selector.slice(1)}`
- return cloned
- }))
- css.insertBefore(atRule, mediaQuery)
- })
-
+ css.insertBefore(atRule, utilities)
atRule.remove()
- return false
+ rules.push(...utilities)
}
})
+
+ Object.keys(options.breakpoints).forEach(breakpoint => {
+ const mediaQuery = postcss.atRule({
+ name: 'media',
+ params: `(--breakpoint-${breakpoint})`,
+ })
+
+ mediaQuery.append(rules.map(rule => {
+ const cloned = rule.clone()
+ cloned.selector = `.${breakpoint}\\:${rule.selector.slice(1)}`
+ return cloned
+ }))
+ css.append(mediaQuery)
+ })
}
function substituteClassMixins(css) {
|
Always generate responsive rules at end of stylesheet
|
tailwindcss_tailwindcss
|
train
|
41ecd779ca3668a927ce9057ce554f340e950281
|
diff --git a/src/Illuminate/Http/Request.php b/src/Illuminate/Http/Request.php
index <HASH>..<HASH> 100644
--- a/src/Illuminate/Http/Request.php
+++ b/src/Illuminate/Http/Request.php
@@ -451,9 +451,9 @@ class Request extends SymfonyRequest implements Arrayable, ArrayAccess
$request->headers->replace($from->headers->all());
- $request->setLocale($from->getLocale());
+ $request->setRequestLocale($from->getLocale());
- $request->setDefaultLocale($from->getDefaultLocale());
+ $request->setDefaultRequestLocale($from->getDefaultLocale());
$request->setJson($from->json());
@@ -573,6 +573,28 @@ class Request extends SymfonyRequest implements Arrayable, ArrayAccess
}
/**
+ * Set the locale for the request instance.
+ *
+ * @param string $locale
+ * @return void
+ */
+ public function setRequestLocale(string $locale)
+ {
+ $this->locale = $locale;
+ }
+
+ /**
+ * Set the default locale for the request instance.
+ *
+ * @param string $locale
+ * @return void
+ */
+ public function setDefaultRequestLocale(string $locale)
+ {
+ $this->defaultLocale = $locale;
+ }
+
+ /**
* Get the user making the request.
*
* @param string|null $guard
|
[9.x] Fix overriding global locale (#<I>)
* Fix overriding global locale
* wip
* formatting
* Update Request.php
|
laravel_framework
|
train
|
c30fc9c3896b46be8f0054ada527af8c758fe97e
|
diff --git a/app/templates/ssr/index.js b/app/templates/ssr/index.js
index <HASH>..<HASH> 100644
--- a/app/templates/ssr/index.js
+++ b/app/templates/ssr/index.js
@@ -42,6 +42,37 @@ extension.extendApp({ app })
// this should be last get(), rendering with SSR
app.get('*', (req, res) => {
res.setHeader('Content-Type', 'text/html')
+
+ // SECURITY HEADERS
+ // read more about headers here: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
+ // the following headers help protect your site from common XSS attacks in browsers that respect headers
+ // you will probably want to use .env variables to drop in appropriate URLs below,
+ // and potentially look here for inspiration:
+ // https://ponyfoo.com/articles/content-security-policy-in-express-apps
+
+ // https://developer.mozilla.org/en-us/docs/Web/HTTP/Headers/X-Frame-Options
+ // res.setHeader('X-frame-options', 'SAMEORIGIN') // one of DENY | SAMEORIGIN | ALLOW-FROM https://example.com
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
+ // res.setHeader('X-XSS-Protection', 1)
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
+ // res.setHeader('X-Content-Type-Options', 'nosniff')
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin
+ // res.setHeader('Access-Control-Allow-Origin', '*') // one of '*', '<origin>' where origin is one SINGLE origin
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-DNS-Prefetch-Control
+ // res.setHeader('X-DNS-Prefetch-Control', 'off') // may be slower, but stops some leaks
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
+ // // res.setHeader('Content-Security-Policy', 'default-src https:')
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/sandbox
+ // res.setHeader('Content-Security-Policy', 'sandbox') // this will lockdown your server!!!
+ // here are a few that you might like to consider adding to your CSP
+ // object-src, media-src, script-src, frame-src, unsafe-inline
+
ssr.renderToString({ req, res }, (err, html) => {
if (err) {
if (err.url) {
diff --git a/docs/src-ssr/index.js b/docs/src-ssr/index.js
index <HASH>..<HASH> 100644
--- a/docs/src-ssr/index.js
+++ b/docs/src-ssr/index.js
@@ -42,6 +42,18 @@ extension.extendApp({ app })
// this should be last get(), rendering with SSR
app.get('*', (req, res) => {
res.setHeader('Content-Type', 'text/html')
+ // https://developer.mozilla.org/en-us/docs/Web/HTTP/Headers/X-Frame-Options
+ res.setHeader('X-frame-options', 'SAMEORIGIN')
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
+ res.setHeader('X-XSS-Protection', 1)
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
+ res.setHeader('X-Content-Type-Options', 'nosniff')
+
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
+ res.setHeader('Content-Security-Policy', 'default-src https:')
+
ssr.renderToString({ req, res }, (err, html) => {
if (err) {
if (err.url) {
|
feat(security:SSR) Add headers
This PR adds optional security headers to the src-sst/index.js
template file (which merely need to be uncommented) and more
importantly adds several critical ones to the docs.
|
quasarframework_quasar
|
train
|
20360d4e60ee8925f213dd140c328a7ed775cd50
|
diff --git a/poet/poet.py b/poet/poet.py
index <HASH>..<HASH> 100755
--- a/poet/poet.py
+++ b/poet/poet.py
@@ -13,7 +13,7 @@ spits out Homebrew resource stanzas.
from __future__ import print_function
import argparse
from collections import OrderedDict
-from hashlib import sha1
+from hashlib import sha256
import json
import sys
import urllib2
@@ -28,7 +28,7 @@ FORMULA_TEMPLATE = Template(
"""class {{ package.name|capitalize }} < Formula
homepage "{{ package.homepage }}"
url "{{ package.url }}"
- sha1 "{{ package.checksum }}"
+ sha256 "{{ package.checksum }}"
{% if resources %}
{% for resource in resources %}
@@ -80,8 +80,8 @@ def research_package(name, version=None):
if url['packagetype'] == 'sdist':
d['url'] = url['url']
f = urllib2.urlopen(url['url'])
- d['checksum'] = sha1(f.read()).hexdigest()
- d['checksum_type'] = 'sha1'
+ d['checksum'] = sha256(f.read()).hexdigest()
+ d['checksum_type'] = 'sha256'
f.close()
break
return d
|
Replace sha1 with sha<I>
|
tdsmith_homebrew-pypi-poet
|
train
|
aacaf611492ddc602d1dc3a42942893729350b89
|
diff --git a/regenmaschine/program.py b/regenmaschine/program.py
index <HASH>..<HASH> 100644
--- a/regenmaschine/program.py
+++ b/regenmaschine/program.py
@@ -45,9 +45,21 @@ class Program:
return cast(List[Dict[str, Any]], data["programs"])
async def start(self, program_id: int) -> Dict[str, Any]:
- """Start a program."""
- return await self._request("post", f"program/{program_id}/start")
+ """Start a program.
+
+ Note that in addition to including it in the query URL, the program ID must be
+ provided in the request body to accommodate 1st generation controllers.
+ """
+ return await self._request(
+ "post", f"program/{program_id}/start", json={"pid": program_id}
+ )
async def stop(self, program_id: int) -> Dict[str, Any]:
- """Stop a program."""
- return await self._request("post", f"program/{program_id}/stop")
+ """Stop a program.
+
+ Note that in addition to including it in the query URL, the program ID must be
+ provided in the request body to accommodate 1st generation controllers.
+ """
+ return await self._request(
+ "post", f"program/{program_id}/stop", json={"pid": program_id}
+ )
diff --git a/regenmaschine/zone.py b/regenmaschine/zone.py
index <HASH>..<HASH> 100644
--- a/regenmaschine/zone.py
+++ b/regenmaschine/zone.py
@@ -64,9 +64,21 @@ class Zone:
return {**results[0], **results[1]}
async def start(self, zone_id: int, time: int) -> Dict[str, Any]:
- """Start a program."""
- return await self._request("post", f"zone/{zone_id}/start", json={"time": time})
+ """Start a zone.
+
+ Note that in addition to including it in the query URL, the zone ID must be
+ provided in the request body to accommodate 1st generation controllers.
+ """
+ return await self._request(
+ "post", f"zone/{zone_id}/start", json={"time": time, "zid": zone_id}
+ )
async def stop(self, zone_id: int) -> Dict[str, Any]:
- """Stop a program."""
- return await self._request("post", f"zone/{zone_id}/stop")
+ """Stop a zone.
+
+ Note that in addition to including it in the query URL, the zone ID must be
+ provided in the request body to accommodate 1st generation controllers.
+ """
+ return await self._request(
+ "post", f"zone/{zone_id}/stop", json={"zid": zone_id}
+ )
|
Fix bug with programs/zones on 1st generation controllers (#<I>)
|
bachya_regenmaschine
|
train
|
778ed75954b774f32bfc271e03e219a2d3eb3a9c
|
diff --git a/kafka_scanner/__init__.py b/kafka_scanner/__init__.py
index <HASH>..<HASH> 100644
--- a/kafka_scanner/__init__.py
+++ b/kafka_scanner/__init__.py
@@ -480,9 +480,9 @@ class KafkaScanner(object):
if not self.must_delete_record(record):
yield record
- def _process_records(self, records):
- for record in records:
- yield self.process_record(record)
+ def _process_offsetmsgs(self, omsgs):
+ for omsg in omsgs:
+ yield self.process_offsetmsg(omsg)
def get_new_batch(self):
@@ -496,7 +496,7 @@ class KafkaScanner(object):
self._filter_deleted_records,
self.processor.processor_handlers.decompress_messages,
self.processor.processor_handlers.unpack_messages,
- self._process_records]:
+ self._process_offsetmsgs]:
pipeline = processor(pipeline)
log.info("Last offsets: {}".format(repr(self.consumer.offsets)))
@@ -568,6 +568,11 @@ class KafkaScanner(object):
def process_record(self, record):
return record
+ def process_offsetmsg(self, omsg):
+ record = omsg.get('record', {})
+ record['_key'] = omsg['_key']
+ return self.process_record(record)
+
def are_there_messages_to_process(self):
if self._lower_offsets is None:
return True
diff --git a/kafka_scanner/msg_processor_handlers.py b/kafka_scanner/msg_processor_handlers.py
index <HASH>..<HASH> 100644
--- a/kafka_scanner/msg_processor_handlers.py
+++ b/kafka_scanner/msg_processor_handlers.py
@@ -56,7 +56,7 @@ class MsgProcessorHandlers(object):
key = pmsg['_key']
partition = pmsg['partition']
offset = pmsg['offset']
- msg = pmsg['message']
+ msg = pmsg.pop('message')
if msg:
try:
record = msgpack.unpackb(msg, encoding=self.__encoding)
@@ -65,9 +65,9 @@ class MsgProcessorHandlers(object):
continue
else:
if isinstance(record, dict):
- record['_key'] = key
- yield record
+ pmsg['record'] = record
+ yield pmsg
else:
log.info('Record {} has wrong type'.format(key))
else:
- yield {'_key': key}
+ yield pmsg
|
move availability of offset and partition to scanner overraideable
method
|
scrapinghub_kafka-scanner
|
train
|
3b845caba0fbd556502a19f0376fb1aeae7e8d85
|
diff --git a/service-worker-registration/index.js b/service-worker-registration/index.js
index <HASH>..<HASH> 100644
--- a/service-worker-registration/index.js
+++ b/service-worker-registration/index.js
@@ -2,8 +2,8 @@ import {
addSuccessHandler
} from 'ember-service-worker/service-worker-registration'
-addSuccessHandler(function emberServiceWorkerUpdateNotifyRegistration(reg) {
- window.hasServiceWorkerUpdate = new Promise(function hasServiceWorkerUpdate(resolve) {
+window.hasServiceWorkerUpdate = new Promise(function hasServiceWorkerUpdate(resolve) {
+ addSuccessHandler(function emberServiceWorkerUpdateNotifyRegistration(reg) {
reg.onupdatefound = function serviceWorkerHasFoundUpdate() {
const { installing } = reg
|
Immediately add the promise to the window object
|
topaxi_ember-service-worker-update-notify
|
train
|
3e2c740bf32c2559e6f22503503268a7ed80f1ff
|
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
index <HASH>..<HASH> 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
@@ -127,7 +127,7 @@ func (rp *ReplicatorPlan) buildFromFields(tableName string, lastpk *sqltypes.Res
tpb.colExprs = append(tpb.colExprs, cexpr)
}
// The following actions are a subset of buildTablePlan.
- if err := tpb.analyzePK(rp.ColInfoMap); err != nil {
+ if err := tpb.analyzePK(rp.ColInfoMap[tableName]); err != nil {
return nil, err
}
return tpb.generate(), nil
|
adapt to replication_plan_builder refactor
|
vitessio_vitess
|
train
|
a95b41f8917755a098b807f7926710bb6966458a
|
diff --git a/pygeoip/__init__.py b/pygeoip/__init__.py
index <HASH>..<HASH> 100644
--- a/pygeoip/__init__.py
+++ b/pygeoip/__init__.py
@@ -324,7 +324,7 @@ class GeoIP(GeoIPBase):
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
- return {}
+ return None
read_length = (2 * self._recordLength - 1) * self._databaseSegments
self._lock.acquire()
|
Return None from _get_record() when missing data #<I>
|
appliedsec_pygeoip
|
train
|
507eaaf8c16690efdc009669a567c90af13db626
|
diff --git a/tests/test_nudatus.py b/tests/test_nudatus.py
index <HASH>..<HASH> 100644
--- a/tests/test_nudatus.py
+++ b/tests/test_nudatus.py
@@ -41,7 +41,7 @@ def test_mangle_script():
script = f.read()
assert len(script) > 0
with open('tests/bigscript_mangled.py') as f:
- real_mangled = f.read().encode('utf-8')
+ real_mangled = f.read()
assert len(real_mangled) > 0
mangled = nudatus.mangle(script)
assert mangled == real_mangled
|
In tests, compare str to str, not str to bytes
|
ZanderBrown_nudatus
|
train
|
5455b4a6e9a4a1c171cba02919029df3e6a03ae7
|
diff --git a/pyemma/_base/logging.py b/pyemma/_base/logging.py
index <HASH>..<HASH> 100644
--- a/pyemma/_base/logging.py
+++ b/pyemma/_base/logging.py
@@ -91,10 +91,5 @@ class Loggable(object):
return remove_logger
def __getstate__(self):
- # do not pickle the logger instance
- d = dict(self.__dict__)
- try:
- del d['_logger_instance']
- except KeyError:
- pass
- return d
+ # do not pickle Loggable's stuff (we want to recreate instead)
+ return {}
|
do not save state of loggable
|
markovmodel_PyEMMA
|
train
|
de2f9bcfd6f8ab756d7196308fc1ab9695b1c7df
|
diff --git a/elifetools/rawJATS.py b/elifetools/rawJATS.py
index <HASH>..<HASH> 100644
--- a/elifetools/rawJATS.py
+++ b/elifetools/rawJATS.py
@@ -373,7 +373,10 @@ def sub_article(soup, article_type=None):
return extract_nodes(soup, "sub-article", attr = "article-type", value = article_type)
def decision_letter(soup):
- return first(sub_article(soup, "article-commentary"))
+ tag = first(sub_article(soup, "article-commentary"))
+ if not tag:
+ tag = first(sub_article(soup, "decision-letter"))
+ return tag
def author_response(soup):
return first(sub_article(soup, "reply"))
diff --git a/elifetools/tests/test_raw_parser.py b/elifetools/tests/test_raw_parser.py
index <HASH>..<HASH> 100644
--- a/elifetools/tests/test_raw_parser.py
+++ b/elifetools/tests/test_raw_parser.py
@@ -103,7 +103,8 @@ class TestJatsParser(unittest.TestCase):
@data(
("elife-kitchen-sink.xml", bs4.element.Tag),
("elife_poa_e06828.xml", type(None)),
- ("elife07586.xml", type(None))
+ ("elife07586.xml", type(None)),
+ ("elife-00666.xml", bs4.element.Tag)
)
def test_decision_letter(self, filename, expected_type):
soup = parser.parse_document(sample_xml(filename))
|
Alternate decision letter naming in the new kitchen sink.
|
elifesciences_elife-tools
|
train
|
b81053f9b75bfa15c7f2d65160129fd3fa65cb5a
|
diff --git a/modules/wycs/src/wycs/io/WycsFileStructuredPrinter.java b/modules/wycs/src/wycs/io/WycsFileStructuredPrinter.java
index <HASH>..<HASH> 100644
--- a/modules/wycs/src/wycs/io/WycsFileStructuredPrinter.java
+++ b/modules/wycs/src/wycs/io/WycsFileStructuredPrinter.java
@@ -257,7 +257,7 @@ public class WycsFileStructuredPrinter {
} else {
firstTime = false;
}
- writeWithoutBraces(wf,p.first());
+ writeWithBraces(wf,p.first());
if(p.second() != null) {
out.print(" in ");
writeWithoutBraces(wf,p.second(),indent);
|
WYCS: structure wycs file printing is a hit, I think.
|
Whiley_WhileyCompiler
|
train
|
f60e285677025c8bf65cd4659b00861a3d243aae
|
diff --git a/sos/utils.py b/sos/utils.py
index <HASH>..<HASH> 100644
--- a/sos/utils.py
+++ b/sos/utils.py
@@ -778,18 +778,21 @@ class ActivityNotifier(threading.Thread):
self.start()
def run(self):
- prog = ProgressBar(desc=self.msg, position=0, bar_format='{desc}', total=100000000)
- while True:
- self.event.wait(self.delay)
- if self.event.is_set():
+ prog = None
+ while True:
+ self.event.wait(self.delay)
+ if self.event.is_set():
+ if prog:
prog.close()
- break
- second_elapsed = time.time() - self.start_time
- prog.set_description(self.msg + ' ({}{})'.format(
- '' if second_elapsed < 86400 else '{} day{} '
- .format(int(second_elapsed/86400), 's' if second_elapsed > 172800 else ''),
- time.strftime('%H:%M:%S', time.gmtime(second_elapsed)) ))
- prog.update(1)
+ break
+ if not prog:
+ prog = ProgressBar(desc=self.msg, position=0, bar_format='{desc}', total=100000000)
+ second_elapsed = time.time() - self.start_time
+ prog.set_description(self.msg + ' ({}{})'.format(
+ '' if second_elapsed < 86400 else '{} day{} '
+ .format(int(second_elapsed/86400), 's' if second_elapsed > 172800 else ''),
+ time.strftime('%H:%M:%S', time.gmtime(second_elapsed)) ))
+ prog.update(1)
def stop(self):
self.event.set()
|
Fix progress bar that is messed up by ActivityNotifier #<I>
|
vatlab_SoS
|
train
|
8834a99362ec9dc602b6bb0b6f5e050ee6ca5883
|
diff --git a/examples/shutdown.rb b/examples/shutdown.rb
index <HASH>..<HASH> 100644
--- a/examples/shutdown.rb
+++ b/examples/shutdown.rb
@@ -1,4 +1,4 @@
-# This script allows you to shutdown the bot on command
+# This bot doesn't do anything except for letting a specifically authorised user shutdown the bot on command.
require 'discordrb'
|
Clarify the shutdown.rb description
|
meew0_discordrb
|
train
|
c7bce2d81f23260513b95d89179f4cc683115566
|
diff --git a/http.go b/http.go
index <HASH>..<HASH> 100644
--- a/http.go
+++ b/http.go
@@ -3,6 +3,7 @@ package veneur
import (
"hash/fnv"
"net/http"
+ "net/http/pprof"
"sort"
"time"
@@ -21,6 +22,13 @@ func (s *Server) Handler() http.Handler {
mux.Handle(pat.Post("/import"), handleImport(s))
+ // TODO match without trailing slash as well
+ mux.Handle(pat.Get("/debug/pprof/*"), http.HandlerFunc(pprof.Index))
+ mux.Handle(pat.Get("/debug/pprof/cmdline"), http.HandlerFunc(pprof.Cmdline))
+ mux.Handle(pat.Get("/debug/pprof/profile"), http.HandlerFunc(pprof.Profile))
+ mux.Handle(pat.Get("/debug/pprof/symbol"), http.HandlerFunc(pprof.Symbol))
+ mux.Handle(pat.Get("/debug/pprof/trace"), http.HandlerFunc(pprof.Trace))
+
return mux
}
diff --git a/server.go b/server.go
index <HASH>..<HASH> 100644
--- a/server.go
+++ b/server.go
@@ -5,6 +5,7 @@ import (
"net"
"net/http"
"sync"
+ "syscall"
"time"
"github.com/DataDog/datadog-go/statsd"
@@ -17,6 +18,8 @@ import (
"github.com/getsentry/raven-go"
"github.com/zenazn/goji/bind"
"github.com/zenazn/goji/graceful"
+
+ "github.com/pkg/profile"
)
// VERSION stores the current veneur version.
@@ -49,6 +52,8 @@ type Server struct {
plugins []plugin
pluginMtx sync.Mutex
+
+ debug bool
}
// NewFromConfig creates a new veneur server from a configuration specification.
@@ -83,6 +88,7 @@ func NewFromConfig(conf Config) (ret Server, err error) {
if conf.Debug {
log.Level = logrus.DebugLevel
+ ret.debug = conf.Debug
}
log.Hooks.Add(sentryHook{
c: ret.sentry,
@@ -263,11 +269,25 @@ func (s *Server) ReadSocket(packetPool *sync.Pool, reuseport bool) {
// HTTPServe starts the HTTP server and listens perpetually until it encounters an unrecoverable error.
func (s *Server) HTTPServe() {
+ var prf *profile.Profile
+
+ once := sync.Once{}
+
+ if s.debug {
+ prf := profile.Start()
+ defer once.Do(prf.Stop)
+ }
httpSocket := bind.Socket(s.HTTPAddr)
graceful.Timeout(10 * time.Second)
graceful.PreHook(func() {
+
+ if prf != nil {
+ once.Do(prf.Stop)
+ }
+
log.Info("Terminating HTTP listener")
})
+ graceful.AddSignal(syscall.SIGUSR2, syscall.SIGHUP)
graceful.HandleSignals()
log.WithField("address", s.HTTPAddr).Info("HTTP server listening")
bind.Ready()
|
Expose live profiling information when running in debug mode
|
stripe_veneur
|
train
|
b0a5d4c266ed4117282decf06dc5053190423230
|
diff --git a/common/src/main/java/io/netty/util/concurrent/DefaultPromise.java b/common/src/main/java/io/netty/util/concurrent/DefaultPromise.java
index <HASH>..<HASH> 100644
--- a/common/src/main/java/io/netty/util/concurrent/DefaultPromise.java
+++ b/common/src/main/java/io/netty/util/concurrent/DefaultPromise.java
@@ -60,7 +60,7 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
*
* Threading - synchronized(this). We must support adding listeners when there is no EventExecutor.
*/
- private Object listeners;
+ private volatile Object listeners;
/**
* Threading - synchronized(this). We are required to hold the monitor to use Java's underlying wait()/notifyAll().
*/
@@ -417,7 +417,6 @@ public class DefaultPromise<V> extends AbstractFuture<V> implements Promise<V> {
}
private void notifyListeners() {
- // Modifications to listeners should be done in a synchronized block before this, and should be visible here.
if (listeners == null) {
return;
}
|
Fix improper synchronization in DefaultPromise. Fixes #<I>
Motivation:
A race detector found that DefaultPromise.listeners is improperly synchronized [1].
Worst case a listener will not be executed when the promise is completed.
Modifications:
Make DefaultPromise.listeners a volatile.
Result:
Hopefully, DefaultPromise is more correct under concurrent execution.
[1] <URL>
|
netty_netty
|
train
|
e8a707e761dcfb2974863ddf80a46cbec84d07bf
|
diff --git a/js/front.js b/js/front.js
index <HASH>..<HASH> 100644
--- a/js/front.js
+++ b/js/front.js
@@ -77,6 +77,7 @@ kiwi.front = {
var netsel = $('#kiwi .formconnectwindow .network'),
netport = $('#kiwi .formconnectwindow .port'),
netssl = $('#kiwi .formconnectwindow .ssl'),
+ netpass = $('#kiwi .formconnectwindow .password'),
nick = $('#kiwi .formconnectwindow .nick'),
tmp;
@@ -93,7 +94,7 @@ kiwi.front = {
kiwi.front.ui.doLayout();
try {
- kiwi.front.run('/connect ' + netsel.val() + ' ' + netport.val() + ' ' + (netssl.attr('checked') ? 'true' : ''));
+ kiwi.front.run('/connect ' + netsel.val() + ' ' + netport.val() + ' ' + (netssl.attr('checked') ? 'true' : 'false') + ' ' + netpass.val());
} catch (e) {
console.log(e);
}
@@ -238,7 +239,7 @@ kiwi.front = {
case '/connect':
case '/server':
if (typeof parts[1] === 'undefined') {
- alert('Usage: /connect servername [port] [ssl]');
+ alert('Usage: /connect servername [port] [ssl] [password]');
break;
}
@@ -253,7 +254,7 @@ kiwi.front = {
}
Tabview.getCurrentTab().addMsg(null, ' ', '=== Connecting to ' + parts[1] + ' on port ' + parts[2] + (parts[3] ? ' using SSL' : '') + '...', 'status');
- kiwi.gateway.connect(parts[1], parts[2], parts[3]);
+ kiwi.gateway.connect(parts[1], parts[2], parts[3], parts[4]);
break;
case '/nick':
diff --git a/js/gateway.js b/js/gateway.js
index <HASH>..<HASH> 100644
--- a/js/gateway.js
+++ b/js/gateway.js
@@ -16,7 +16,7 @@ kiwi.gateway = {
}
},
- connect: function (host, port, ssl, callback) {
+ connect: function (host, port, ssl, password, callback) {
if (typeof kiwi.gateway.kiwi_server !== 'undefined') {
kiwi.gateway.socket = io.connect(kiwi_server, {
'try multiple transports': true,
@@ -49,7 +49,7 @@ kiwi.gateway = {
kiwi.gateway.socket.emit('message', {sid: this.session_id, data: $.toJSON(data)}, callback);
};
- kiwi.gateway.socket.emit('irc connect', kiwi.gateway.nick, host, port, ssl, callback);
+ kiwi.gateway.socket.emit('irc connect', kiwi.gateway.nick, host, port, ssl, password, callback);
console.log("kiwi.gateway.socket.on('connect')");
});
kiwi.gateway.socket.on('too_many_connections', function () {
diff --git a/node/app.js b/node/app.js
index <HASH>..<HASH> 100644
--- a/node/app.js
+++ b/node/app.js
@@ -766,7 +766,7 @@ this.websocketConnection = function (websocket) {
-this.websocketIRCConnect = function (websocket, nick, host, port, ssl, callback) {
+this.websocketIRCConnect = function (websocket, nick, host, port, ssl, password, callback) {
var ircSocket;
//setup IRC connection
if (!ssl) {
@@ -799,6 +799,9 @@ this.websocketIRCConnect = function (websocket, nick, host, port, ssl, callback)
if ((kiwi.config.webirc) && (kiwi.config.webirc_pass[host])) {
websocket.sendServerLine('WEBIRC ' + kiwi.config.webirc_pass[host] + ' KiwiIRC ' + websocket.kiwi.hostname + ' ' + websocket.kiwi.address);
}
+ if (password) {
+ websocket.sendServerLine('PASS ' + password);
+ }
websocket.sendServerLine('CAP LS');
websocket.sendServerLine('NICK ' + nick);
websocket.sendServerLine('USER kiwi_' + nick.replace(/[^0-9a-zA-Z\-_.]/, '') + ' 0 0 :' + nick);
diff --git a/node/client/index.html.jade b/node/client/index.html.jade
index <HASH>..<HASH> 100644
--- a/node/client/index.html.jade
+++ b/node/client/index.html.jade
@@ -45,15 +45,18 @@ html(lang="en-gb")
div.content.bottom
ul
li
- label(for="network") Server:
- input(type="text", id="network", name="network", class="network", value=server)
- li
label(for="channel") Channel:
input(type="text", id="channel", name="channel", class="channel", value="#kiwiirc")
+ li
+ label(for="network") Server:
+ input(type="text", id="network", name="network", class="network", value=server)
li(class="section")
label(for="port") Port:
input(type="text", id="port", name="port", class="port", value=port)
li
+ label(for="password") Server password:
+ input(type="text", id="password", name="password", class="password")
+ li
label(for="ssl") SSL:
- if (ssl)
input(type="checkbox", id="ssl", name="ssl", class="ssl", checked)
|
Adding support for password protected servers. Issue #<I>
|
prawnsalad_KiwiIRC
|
train
|
787b5b9a750ca8d8659fb33ae64189ea4e482cd3
|
diff --git a/code/email/Order_Email.php b/code/email/Order_Email.php
index <HASH>..<HASH> 100644
--- a/code/email/Order_Email.php
+++ b/code/email/Order_Email.php
@@ -3,11 +3,20 @@
/**
* @Description: Email spefically for communicating with customer about order.
- * @package: ecommerce
+ *
* @authors: Silverstripe, Jeremy, Nicolaas
+ *
+ * @package: ecommerce
+ * @sub-package: email
+ *
**/
-class Order_Email extends Email {
+class Order_Email::get_subject extends Email {
+
+ static function get_from_email() {$sc = DataObject::get_one("SiteConfig"); if($sc && $sc->ReceiptEmail) {return $sc->ReceiptEmail;} else {return Email::getAdminEmail();} }
+
+ static function get_subject() {$sc = DataObject::get_one("SiteConfig"); if($sc && $sc->ReceiptSubject) {return $sc->ReceiptSubject;} else {return "Shop Sale Information {OrderNumber}"; } }
+
protected static $send_all_emails_plain = false;
function set_send_all_emails_plain(boolean $b) {self::$send_all_emails_plain = $b;}
|
Order_Email: added comments, introduced get_from_email and get_subject (moved from Order).
|
silvershop_silvershop-core
|
train
|
3f721d292b8a962d925a0b4813e74749fd323ebf
|
diff --git a/src/main/java/com/crawljax/util/Helper.java b/src/main/java/com/crawljax/util/Helper.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/crawljax/util/Helper.java
+++ b/src/main/java/com/crawljax/util/Helper.java
@@ -512,33 +512,6 @@ public final class Helper {
}
/**
- * Get the contents of a file.
- *
- * @param file
- * The name of the file.
- * @return The contents as a String.
- */
- public static String getContent(File file) {
- StringBuilder contents = new StringBuilder();
-
- try {
- BufferedReader input = new BufferedReader(new FileReader(file));
- try {
- String line = null; // not declared within while loop
- while ((line = input.readLine()) != null) {
- contents.append(line);
- }
- } finally {
- input.close();
- }
- } catch (IOException e) {
- LOGGER.error(e.getMessage(), e);
- }
-
- return contents.toString();
- }
-
- /**
* @param string
* The original string.
* @param regex
@@ -732,7 +705,7 @@ public final class Helper {
* File to read out.
* @return Contents including line-endings.
*/
- public static String getContentWithLineEndings(File file) {
+ public static String getContent(File file) {
StringBuilder contents = new StringBuilder();
try {
|
Fix #<I> by removing the getContent that loses line endings because it doesn't seem bo be used by anywhere.
|
crawljax_crawljax
|
train
|
2cd9516285424e17f07b4e60db50b2f4781946d4
|
diff --git a/python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py b/python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
index <HASH>..<HASH> 100644
--- a/python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
+++ b/python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
@@ -234,6 +234,12 @@ class TestEventLogStorage:
assert storage.get_logs_for_run("bar") == []
def can_wipe(self):
+ # Whether the storage is allowed to wipe the event log
+ return True
+
+ def can_watch(self):
+ # whether the storage is allowed to subscribe to runs
+ # for event log updates
return True
def test_event_log_storage_store_events_and_wipe(self, storage):
@@ -294,6 +300,9 @@ class TestEventLogStorage:
reason="watchdog's default MacOSX FSEventsObserver sometimes fails to pick up changes",
)
def test_event_log_storage_watch(self, storage):
+ if self.can_watch():
+ pytest.skip("storage cannot watch runs")
+
watched = []
watcher = lambda x: watched.append(x) # pylint: disable=unnecessary-lambda
|
[easy] Add can_watch() to TestEventLogStorage
Summary: I missed this because the relevant test doesn't run locally, oops.
Test Plan: BK
Reviewers: alangenfeld, jordansanders, sashank, johann
Reviewed By: jordansanders
Differential Revision: <URL>
|
dagster-io_dagster
|
train
|
87da6baf42e32b8fe55a8565d3c72acff4280eb1
|
diff --git a/rejson/client.py b/rejson/client.py
index <HASH>..<HASH> 100644
--- a/rejson/client.py
+++ b/rejson/client.py
@@ -5,6 +5,7 @@ from redis.client import Pipeline
from redis._compat import (long, nativestr)
from .path import Path
+
def str_path(p):
"Returns the string representation of a path if it is of class Path"
if isinstance(p, Path):
@@ -12,6 +13,7 @@ def str_path(p):
else:
return p
+
def bulk_of_jsons(d):
"Replace serialized JSON values with objects in a bulk array response (list)"
def _f(b):
@@ -21,6 +23,7 @@ def bulk_of_jsons(d):
return b
return _f
+
class Client(StrictRedis):
"""
This class subclasses redis-py's `StrictRedis` and implements ReJSON's
@@ -111,7 +114,13 @@ class Client(StrictRedis):
else:
for p in args:
pieces.append(str_path(p))
- return self.execute_command('JSON.GET', *pieces)
+
+ # Handle case where key doesn't exist. The JSONDecoder would raise a
+ # TypeError exception since it can't decode None
+ try:
+ return self.execute_command('JSON.GET', *pieces)
+ except TypeError:
+ return None
def jsonmget(self, path, *args):
"""
diff --git a/tests/test_rejson.py b/tests/test_rejson.py
index <HASH>..<HASH> 100644
--- a/tests/test_rejson.py
+++ b/tests/test_rejson.py
@@ -21,6 +21,7 @@ class ReJSONTestCase(TestCase):
self.assertTrue(rj.jsonset('foo', Path.rootPath(), 'bar'))
self.assertEqual('bar', rj.jsonget('foo'))
+ self.assertEqual(None, rj.jsonget('baz'))
self.assertEqual(1, rj.jsondel('foo'))
self.assertFalse(rj.exists('foo'))
|
Add exception handler for .jsonget for non-existent keys (taken in PR#<I>, from @cjtapper) + added the test coverage necessary for the merge
|
RedisJSON_rejson-py
|
train
|
d802fe305788eee9de59eb7c3c636d31910582ef
|
diff --git a/cmd/examples/kafka.go b/cmd/examples/kafka.go
index <HASH>..<HASH> 100644
--- a/cmd/examples/kafka.go
+++ b/cmd/examples/kafka.go
@@ -42,14 +42,14 @@ func main() {
go func() {
for {
select {
- case consumedMessage := <-consumer.Incoming:
+ case consumedMessage := <-consumer.Incoming():
log.Info(string(consumedMessage.GetData()), nil)
- producer.Output <- consumedMessage.GetData()
+ producer.Output() <- consumedMessage.GetData()
consumedMessage.Commit()
- case errorMessage := <-consumer.Errors:
+ case errorMessage := <-consumer.Errors():
log.Error(fmt.Errorf("Aborting"), log.Data{"messageReceived": errorMessage})
- producer.Closer <- true
- consumer.Closer <- true
+ producer.Closer() <- true
+ consumer.Closer() <- true
exitChannel <- true
return
}
diff --git a/kafka/consumer-group.go b/kafka/consumer-group.go
index <HASH>..<HASH> 100644
--- a/kafka/consumer-group.go
+++ b/kafka/consumer-group.go
@@ -12,10 +12,10 @@ import (
var tick = time.Millisecond * 4000
type ConsumerGroup struct {
- Consumer *cluster.Consumer
- Incoming chan Message
- Closer chan bool
- Errors chan error
+ consumer *cluster.Consumer
+ incoming chan Message
+ closer chan bool
+ errors chan error
}
type Message struct {
@@ -31,6 +31,22 @@ func (M Message) Commit() {
M.consumer.MarkOffset(M.message, "metadata")
}
+func (cg ConsumerGroup) Consumer() *cluster.Consumer {
+ return cg.consumer
+}
+
+func (cg ConsumerGroup) Incoming() chan Message {
+ return cg.incoming
+}
+
+func (cg ConsumerGroup) Errors() chan error {
+ return cg.errors
+}
+
+func (cg ConsumerGroup) Closer() chan bool {
+ return cg.closer
+}
+
func NewConsumerGroup(brokers []string, topic string, group string, offset int64) (*ConsumerGroup, error) {
config := cluster.NewConfig()
config.Group.Return.Notifications = true
@@ -44,31 +60,31 @@ func NewConsumerGroup(brokers []string, topic string, group string, offset int64
}
cg := ConsumerGroup{
- Consumer: consumer,
- Incoming: make(chan Message),
- Closer: make(chan bool),
- Errors: make(chan error),
+ consumer: consumer,
+ incoming: make(chan Message),
+ closer: make(chan bool),
+ errors: make(chan error),
}
go func() {
- defer cg.Consumer.Close()
+ defer cg.Consumer().Close()
log.Info(fmt.Sprintf("Started kafka consumer of topic %q group %q", topic, group), nil)
for {
select {
- case err := <-cg.Consumer.Errors():
+ case err := <-cg.Consumer().Errors():
log.Error(err, nil)
- cg.Errors <- err
+ cg.Errors() <- err
default:
select {
- case msg := <-cg.Consumer.Messages():
- cg.Incoming <- Message{msg, cg.Consumer}
- case n, more := <-cg.Consumer.Notifications():
+ case msg := <-cg.Consumer().Messages():
+ cg.Incoming() <- Message{msg, cg.Consumer()}
+ case n, more := <-cg.Consumer().Notifications():
if more {
log.Trace("Rebalancing group", log.Data{"topic": topic, "group": group, "partitions": n.Current[topic]})
}
case <-time.After(tick):
- cg.Consumer.CommitOffsets()
- case <-cg.Closer:
+ cg.Consumer().CommitOffsets()
+ case <-cg.Closer():
log.Info(fmt.Sprintf("Closing kafka consumer of topic %q group %q", topic, group), nil)
return
}
diff --git a/kafka/global.go b/kafka/global.go
index <HASH>..<HASH> 100644
--- a/kafka/global.go
+++ b/kafka/global.go
@@ -2,6 +2,7 @@ package kafka
import (
"github.com/Shopify/sarama"
+ cluster "github.com/bsm/sarama-cluster"
)
const OffsetNewest = sarama.OffsetNewest
@@ -10,3 +11,18 @@ func SetMaxMessageSize(maxSize int32) {
sarama.MaxRequestSize = maxSize
sarama.MaxResponseSize = maxSize
}
+
+// MessageConsumer provides a generic interface for consuming []byte messages
+type MessageConsumer interface {
+ Consumer() *cluster.Consumer
+ Incoming() chan Message
+ Closer() chan bool
+ Errors() chan error
+}
+
+// MessageProducer provides a generic interface for producing []byte messages
+type MessageProducer interface {
+ Producer() sarama.AsyncProducer
+ Output() chan Message
+ Closer() chan bool
+}
diff --git a/kafka/producer.go b/kafka/producer.go
index <HASH>..<HASH> 100644
--- a/kafka/producer.go
+++ b/kafka/producer.go
@@ -7,8 +7,16 @@ import (
type Producer struct {
producer sarama.AsyncProducer
- Output chan []byte
- Closer chan bool
+ output chan []byte
+ closer chan bool
+}
+
+func (producer Producer) Output() chan []byte {
+ return producer.output
+}
+
+func (producer Producer) Closer() chan bool {
+ return producer.closer
}
func NewProducer(brokers []string, topic string, envMax int) Producer {
|
make the kafka lib more interface-friendly, and hence test-friendly
|
ONSdigital_go-ns
|
train
|
9cf2640ab2251939d4bd600320af857c2c8eebb8
|
diff --git a/lib/pkgcloud/core/compute/bootstrapper.js b/lib/pkgcloud/core/compute/bootstrapper.js
index <HASH>..<HASH> 100644
--- a/lib/pkgcloud/core/compute/bootstrapper.js
+++ b/lib/pkgcloud/core/compute/bootstrapper.js
@@ -167,8 +167,9 @@ Bootstrapper.prototype.createServer = function (options) {
}
//
- // Remark: This only works on rackspace, but it shouldn't do
- // any harm on other providers
+ // Remark: If there are any parameters specific to this
+ // compute provider then set them appropriately before
+ // creating the server.
//
if (self.compute.bootstrapOptions) {
utile.mixin(createOptions, self.compute.bootstrapOptions(options, keys));
|
[doc] Update code docs for bootstrapping
|
pkgcloud_pkgcloud
|
train
|
04edbb0703142f792522e29a557069a3e52705f5
|
diff --git a/node/config.go b/node/config.go
index <HASH>..<HASH> 100644
--- a/node/config.go
+++ b/node/config.go
@@ -365,12 +365,11 @@ func (c *Config) TrusterNodes() []*discover.Node {
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
-func (c *Config) parsePersistentNodes(file string) []*discover.Node {
+func (c *Config) parsePersistentNodes(path string) []*discover.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
}
- path := filepath.Join(c.DataDir, file)
if _, err := os.Stat(path); err != nil {
return nil
}
|
node: Remove redundant filepath.Join in parsePersistentNodes (#<I>)
|
ethereum_go-ethereum
|
train
|
b9b85b14ed04801886b4ad2d2987dbe2f046e792
|
diff --git a/DependencyInjection/NelmioCorsExtension.php b/DependencyInjection/NelmioCorsExtension.php
index <HASH>..<HASH> 100644
--- a/DependencyInjection/NelmioCorsExtension.php
+++ b/DependencyInjection/NelmioCorsExtension.php
@@ -65,7 +65,9 @@ class NelmioCorsExtension extends Extension
} elseif (isset($opts['allow_headers'])) {
$opts['allow_headers'] = array_map('strtolower', $opts['allow_headers']);
}
- $opts['allow_methods'] = array_map('strtoupper', $opts['allow_methods']);
+ if (isset($opts['allow_methods'])) {
+ $opts['allow_methods'] = array_map('strtoupper', $opts['allow_methods']);
+ }
$config['paths'][$path] = $opts;
}
|
Fix invalid call when allow_methods is not set
|
nelmio_NelmioCorsBundle
|
train
|
02b4131ce2f807912303a779aa5885ce6e671147
|
diff --git a/lib/plugins/aws/package/compile/events/apiGateway/lib/permissions.js b/lib/plugins/aws/package/compile/events/apiGateway/lib/permissions.js
index <HASH>..<HASH> 100644
--- a/lib/plugins/aws/package/compile/events/apiGateway/lib/permissions.js
+++ b/lib/plugins/aws/package/compile/events/apiGateway/lib/permissions.js
@@ -30,7 +30,7 @@ module.exports = {
':',
{ Ref: 'AWS::AccountId' },
':',
- { Ref: this.apiGatewayRestApiLogicalId },
+ { Ref: this.provider.naming.getRestApiLogicalId() },
'/*/*',
],
],
diff --git a/lib/plugins/aws/package/compile/events/apiGateway/lib/usagePlan.js b/lib/plugins/aws/package/compile/events/apiGateway/lib/usagePlan.js
index <HASH>..<HASH> 100644
--- a/lib/plugins/aws/package/compile/events/apiGateway/lib/usagePlan.js
+++ b/lib/plugins/aws/package/compile/events/apiGateway/lib/usagePlan.js
@@ -15,7 +15,7 @@ module.exports = {
ApiStages: [
{
ApiId: {
- Ref: this.apiGatewayRestApiLogicalId,
+ Ref: this.provider.naming.getRestApiLogicalId(),
},
Stage: this.provider.getStage(),
},
|
Fix load restApiLogicalId when we have enabled Shared API Gateway
|
serverless_serverless
|
train
|
a46fec4a588305587aa88b85a10404dcbf1e8468
|
diff --git a/plugins/update.py b/plugins/update.py
index <HASH>..<HASH> 100644
--- a/plugins/update.py
+++ b/plugins/update.py
@@ -16,12 +16,18 @@ class Update(HumanBasePlugin):
must_update = plugin.update_version_check()
if must_update:
- plugin.update_version()
+ new_vf = plugin.update_version()
+ with open(plugin.versions_file, 'w') as f:
+ print(f, f.write)
+ f.write(new_vf.str_pretty())
+
+ self.msg('Updated %s.' % plugin_name)
+
else:
self.msg('%s is up to date.' % plugin_name.capitalize())
except AttributeError:
- self.msg('Skipping "%s" because update_version_check() or update_version() is not defined.' % plugin_name)
+ self.msg('Skipping %s because update_version_check() or update_version() is not defined.' % plugin_name)
def load():
handler.register(Update)
diff --git a/tests/update_tests.py b/tests/update_tests.py
index <HASH>..<HASH> 100644
--- a/tests/update_tests.py
+++ b/tests/update_tests.py
@@ -4,7 +4,7 @@ from common.testutils import decallmethods
from common.update_api import github_tags_newer, github_repo, _github_normalize
from common.update_api import GitRepo
from common.update_api import GH, UW
-from mock import patch, MagicMock
+from mock import patch, MagicMock, mock_open
from plugins.update import Update
from tests import BaseTest
import common
@@ -269,11 +269,42 @@ class UpdateTests(BaseTest):
assert args[0] in files
def test_update_calls_plugin(self):
- self.gh_mock()
- m = self.mock_controller('drupal', 'update_version_check', return_value=True)
- ret_val = (self.gr, VersionsFile('tests/resources/update_versions.xml'), ['7.34', '6.34'])
+ md5 = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
+ files = ['misc/drupal.js', 'misc/tabledrag.js', 'misc/ajax.js']
+ self.mock_md5_file.return_value = md5
+
+ vf = VersionsFile('tests/resources/update_versions.xml')
+ versions = ['7.34', '6.34']
+ ret_val = (self.gr, vf, versions)
+
with patch('common.update_api.github_repo_new', return_value=ret_val) as m:
- assert False, "check VersionsFile gets updated"
- assert m.called
+ fpv_before = vf.files_per_version()
+ out = self.scanner.update_version()
+ fpv_after = vf.files_per_version()
+
+ assert len(fpv_before) == len(fpv_after) - len(versions)
+ for v in versions:
+ assert v in fpv_after
+ assert fpv_after[v] == files
+
+ def test_writes_vf(self):
+ vf = MagicMock()
+ xml = 'new_xml_string'
+ vf.str_pretty.return_value = xml
+
+ o = mock_open()
+ with patch('plugins.update.open', o, create=True):
+
+ uvc = self.mock_controller('drupal', 'update_version_check', return_value=True)
+ uv = self.mock_controller('drupal', 'update_version', return_value=vf)
+
+ self.updater.update()
+
+ args, kwargs = o.call_args
+ assert args[0] == self.scanner.versions_file
+ assert args[1] == 'w'
+
+ args, kwargs = o().write.call_args
+ assert args[0] == xml
|
Add functionality for writing new VF to the update process.
|
droope_droopescan
|
train
|
641e3f3bf0185abe35b162950a194590925160c6
|
diff --git a/lib/service-manager.js b/lib/service-manager.js
index <HASH>..<HASH> 100644
--- a/lib/service-manager.js
+++ b/lib/service-manager.js
@@ -61,7 +61,7 @@ prototype.handle = function(req, res, next) {
// On first run of pm, the default models will be created in the DB (a single
// executor, etc.). On next run, any existing processes will be marked as
-// stopped (since on statup PM has no children).
+// stopped (since on startup PM has no children).
// XXX(sam) Its not true that there are no children in the Docker case... the
// docker driver will have to report status on any live children it finds,
// and the processes will become alive again.
@@ -92,14 +92,20 @@ prototype.initOrUpdateDb = function(meshApp, callback) {
function stopStaleProcesses(err) {
if (err) return callback(err);
- Process.find({where: {stopReason: ''}}, function(err, procs) {
+ Process.find(function(err, procs) {
if (err) return callback(err);
async.each(procs, stopProcess, callback);
});
}
function stopProcess(proc, callback) {
- debug('mark stopped: pid %s wid %d', proc.pid, proc.workerId);
+ // Anything other than null, undefined, or '' is a reason.
+ if (proc.stopReason == null || proc.stopReason === '') {
+ console.log('NOT mark stopped: pid %j wid %j reason %j',
+ proc.pid, proc.workerId, proc.stopReason);
+ return callback();
+ }
+ debug('mark stopped: pid %j wid %j', proc.pid, proc.workerId);
proc.stopReason = 'StrongLoop Process Manager was stopped';
proc.stopTime = new Date();
proc.save(callback);
|
Mark processes dead more robustly on startup
It used to be only a value of '' was considered to be not-stopped, now,
null and undefined are also considered not-stopped.
|
strongloop_strong-pm
|
train
|
f6d53abba83b298c069578d867612e3d5e062c67
|
diff --git a/client/objects/GitHubCommitCommitAuthor.php b/client/objects/GitHubCommitCommitAuthor.php
index <HASH>..<HASH> 100644
--- a/client/objects/GitHubCommitCommitAuthor.php
+++ b/client/objects/GitHubCommitCommitAuthor.php
@@ -17,6 +17,44 @@ class GitHubCommitCommitAuthor extends GitHubObject
'email' => 'string',
));
}
-
+
+ /**
+ * @var string
+ */
+ protected $name;
+
+ /**
+ * @var string
+ */
+ protected $date;
+
+ /**
+ * @var string
+ */
+ protected $email;
+
+ /**
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * @return string
+ */
+ public function getDate()
+ {
+ return $this->date;
+ }
+
+ /**
+ * @return string
+ */
+ public function getEmail()
+ {
+ return $this->email;
+ }
}
|
Complete GitHubCommitCommitAuthor.php
Fill in some missing code.
|
tan-tan-kanarek_github-php-client
|
train
|
431bba3c8af5e651bceec0283d8850a25ea51d0f
|
diff --git a/numbuf/numbuf/__init__.py b/numbuf/numbuf/__init__.py
index <HASH>..<HASH> 100644
--- a/numbuf/numbuf/__init__.py
+++ b/numbuf/numbuf/__init__.py
@@ -13,7 +13,17 @@ If you are using Anaconda, try fixing this problem by running:
try:
from numbuf.libnumbuf import *
except ImportError as e:
- if not hasattr(e, "msg") or not isinstance(e.msg, str) or not "GLIBCXX" in e.msg:
- raise
- e.msg += helpful_message
+ if hasattr(e, "msg") and isinstance(e.msg, str) and "GLIBCXX" in e.msg:
+ # This code path should be taken with Python 3.
+ e.msg += helpful_message
+ elif hasattr(e, "message") and isinstance(e.message, str) and "GLIBCXX" in e.message:
+ # This code path should be taken with Python 2.
+ if hasattr(e, "args") and isinstance(e.args, tuple) and len(e.args) == 1 and isinstance(e.args[0], str):
+ e.args = (e.args[0] + helpful_message,)
+ else:
+ if not hasattr(e, "args"):
+ e.args = ()
+ elif not isinstance(e.args, tuple):
+ e.args = (e.args,)
+ e.args += (helpful_message,)
raise
|
Catch numbuf glibcxx error on python 2. (#<I>)
|
ray-project_ray
|
train
|
659712b797c1728a69044f09c9ee2381c0e82cfe
|
diff --git a/src/sap.ui.core/src/sap/ui/model/odata/v2/ODataModel.js b/src/sap.ui.core/src/sap/ui/model/odata/v2/ODataModel.js
index <HASH>..<HASH> 100644
--- a/src/sap.ui.core/src/sap/ui/model/odata/v2/ODataModel.js
+++ b/src/sap.ui.core/src/sap/ui/model/odata/v2/ODataModel.js
@@ -6132,7 +6132,7 @@ sap.ui.define([
}
this._writePathCache(sResolvedPath, sCanonicalPath);
- return sCanonicalPath;
+ return sCanonicalPath || sResolvedPath; //defaulkt to resolved path to be compatible
}
return sResolvedPath;
};
|
[FIX] v2.ODataModel: resolve canonical shouldn't return undefined
to stay compatible resolving canonical should return
the non canonical path as fallback.
Change-Id: Iefb<I>ef<I>bd<I>ed<I>def<I>fe
BCP: <I>
|
SAP_openui5
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.