hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
|---|---|---|---|---|
f106b5cfb70fb6af9e6725aca48a7767500bbac8
|
diff --git a/src/Engine/SocketIO/Version1X.php b/src/Engine/SocketIO/Version1X.php
index <HASH>..<HASH> 100644
--- a/src/Engine/SocketIO/Version1X.php
+++ b/src/Engine/SocketIO/Version1X.php
@@ -299,7 +299,9 @@ class Version1X extends AbstractSocketIO
//remove message '40' from buffer, emmiting by socket.io after receiving EngineInterface::UPGRADE
if ($this->options['version'] === 2) {
- $this->read();
+ if (stream_get_meta_data($this->stream)["unread_bytes"] !== 0) {
+ $this->read();
+ }
}
}
|
Update Version1X.php
Only try to read bytes if there's bytes to read. Else the app enters a blocking state waiting for stream bytes, and timing out after <I> seconds.
|
Wisembly_elephant.io
|
train
|
346d292b9d94b2c80dcfe79d87c2b350ee3320ba
|
diff --git a/lib/create-espower-visitor.js b/lib/create-espower-visitor.js
index <HASH>..<HASH> 100644
--- a/lib/create-espower-visitor.js
+++ b/lib/create-espower-visitor.js
@@ -42,44 +42,6 @@ var helperTemplate = babelTemplate([
'});'
].join('\n'));
-function createHelperNameNode(nodePath, types) {
- var file = nodePath.hub.file;
- var programScope = nodePath.scope.getProgramParent();
- var helperNameNode = programScope.generateUidIdentifier('powerAssertRecorder');
- define(helperNameNode, {_generatedByEspower: true});
- file.set('powerAssertCaptureHelper', helperNameNode);
- var helperFunctionNode = types.toExpression(helperTemplate());
- estraverse.traverse(helperFunctionNode, {
- keys: types.VISITOR_KEYS,
- enter: function (currentNode) {
- define(currentNode, {_generatedByEspower: true});
- }
- });
- helperFunctionNode._compact = true;
- programScope.push({id: helperNameNode, init: helperFunctionNode});
- return helperNameNode;
-}
-
-function getCaptureHelperNameNode(nodePath, types) {
- var file = nodePath.hub.file;
- var helperNameNode = file.get('powerAssertCaptureHelper');
- if (!helperNameNode) {
- helperNameNode = createHelperNameNode(nodePath, types);
- }
- return helperNameNode;
-}
-
-function createNewCapturer(nodePath, types) {
- var helperNameNode = getCaptureHelperNameNode(nodePath, types);
- var capturerIdent = nodePath.scope.generateUidIdentifier('rec');
- define(capturerIdent, {_generatedByEspower: true});
- var init = types.callExpression(helperNameNode, []);
- define(init, {_generatedByEspower: true});
- nodePath.scope.push({id: capturerIdent, init: init});
- return capturerIdent;
-}
-
-
function BabelEspowerVisitor (babel, opts) {
this.babel = babel;
this.esTreePath = require('./babel-estree-path')(babel);
@@ -101,11 +63,11 @@ BabelEspowerVisitor.prototype.enter = function (nodePath) {
nodePath.skip();
return;
}
- if (!assertionVisitor.isCapturingArgument() && !isCalleeOfParentCallExpression(types, parentNode, currentKey)) {
+ if (!assertionVisitor.isCapturingArgument() && !this.isCalleeOfParentCallExpression(parentNode, currentKey)) {
// entering argument
assertionVisitor.enterArgument(nodePath);
if (assertionVisitor.isCapturingArgument()) {
- assertionVisitor.powerAssertCalleeObject = createNewCapturer(nodePath, types);
+ assertionVisitor.powerAssertCalleeObject = this.createNewCapturer(nodePath);
}
}
} else if (types.isCallExpression(currentNode)) {
@@ -176,9 +138,50 @@ BabelEspowerVisitor.prototype.exit = function (nodePath) {
}
};
-function isCalleeOfParentCallExpression(types, parentNode, currentKey) {
+BabelEspowerVisitor.prototype.createNewCapturer = function (nodePath) {
+ var types = this.babel.types;
+ var helperNameNode = this.getCaptureHelperNameNode(nodePath);
+ var capturerIdent = nodePath.scope.generateUidIdentifier('rec');
+ define(capturerIdent, {_generatedByEspower: true});
+ var init = types.callExpression(helperNameNode, []);
+ define(init, {_generatedByEspower: true});
+ nodePath.scope.push({id: capturerIdent, init: init});
+ return capturerIdent;
+};
+
+BabelEspowerVisitor.prototype.getCaptureHelperNameNode = function (nodePath) {
+ var file = nodePath.hub.file;
+ var helperNameNode = file.get('powerAssertCaptureHelper');
+ if (!helperNameNode) {
+ helperNameNode = this.createHelperNameNode(nodePath);
+ }
+ return helperNameNode;
+};
+
+BabelEspowerVisitor.prototype.createHelperNameNode = function (nodePath) {
+ var types = this.babel.types;
+ var file = nodePath.hub.file;
+ var programScope = nodePath.scope.getProgramParent();
+ var helperNameNode = programScope.generateUidIdentifier('powerAssertRecorder');
+ define(helperNameNode, {_generatedByEspower: true});
+ file.set('powerAssertCaptureHelper', helperNameNode);
+ var helperFunctionNode = types.toExpression(helperTemplate());
+ estraverse.traverse(helperFunctionNode, {
+ keys: types.VISITOR_KEYS,
+ enter: function (currentNode) {
+ define(currentNode, {_generatedByEspower: true});
+ }
+ });
+ helperFunctionNode._compact = true;
+ programScope.push({id: helperNameNode, init: helperFunctionNode});
+ return helperNameNode;
+};
+
+BabelEspowerVisitor.prototype.isCalleeOfParentCallExpression = function (parentNode, currentKey) {
+ var types = this.babel.types;
return types.isCallExpression(parentNode) && currentKey === 'callee';
-}
+};
+
module.exports = function createEspowerVisitor (babel, options) {
var opts = extend(espower.defaultOptions(), {
|
refactor(babel-plugin-espower): move more logic to methods
|
power-assert-js_babel-plugin-espower
|
train
|
a5898319d30443c18c524194858dcd6db702fbb5
|
diff --git a/base.php b/base.php
index <HASH>..<HASH> 100644
--- a/base.php
+++ b/base.php
@@ -213,7 +213,7 @@ final class Base {
$parts=$this->cut($key);
$jar=$this->hive['JAR'];
if ($ttl)
- $jar['expire']=$ttl;
+ $jar['expire']=time()+$ttl;
call_user_func_array('setcookie',
array_merge(array($parts[1],$val),$jar));
}
|
Adjust set() method to use $ttl relative to current time (Feature request #<I>)
|
bcosca_fatfree-core
|
train
|
e1459cf0b10132567890c88dfc48265fc581275f
|
diff --git a/internetarchive/iacli/ia_upload.py b/internetarchive/iacli/ia_upload.py
index <HASH>..<HASH> 100755
--- a/internetarchive/iacli/ia_upload.py
+++ b/internetarchive/iacli/ia_upload.py
@@ -52,7 +52,11 @@ def _upload_files(args, identifier, local_file, upload_kwargs):
sys.stdout.write('{0}:\n'.format(item.identifier))
try:
- response = item.upload({args['--remote-name']: local_file}, **upload_kwargs)
+ if args['--remote-name']:
+ files = {args['--remote-name']: local_file}
+ else:
+ files = local_file
+ response = item.upload(files, **upload_kwargs)
except HTTPError as exc:
response = [exc.response]
if not response[0]:
|
Fixed --remote-name when uploading from stdin.
|
jjjake_internetarchive
|
train
|
f0c3ffb6ae759cad6098c3cd079b1654508f1268
|
diff --git a/test/plugin/test_out_google_cloud.rb b/test/plugin/test_out_google_cloud.rb
index <HASH>..<HASH> 100644
--- a/test/plugin/test_out_google_cloud.rb
+++ b/test/plugin/test_out_google_cloud.rb
@@ -1049,7 +1049,7 @@ class GoogleCloudOutputTest < Test::Unit::TestCase
assert value.is_a?(String), "Value #{value} for label #{key} " \
'is not a string: ' + value.class.name
assert expected_labels.key?(key), "Unexpected label #{key} => #{value}"
- assert_equal value, expected_labels[key], 'Value mismatch - expected ' \
+ assert_equal expected_labels[key], value, 'Value mismatch - expected ' \
"#{expected_labels[key]} in #{key} => #{value}"
end
assert_equal expected_labels.length, all_labels.length, 'Expected ' \
|
Fix misordered expectation and value in assert statement.
This makes the error message label the expectation and actual value
correctly rather than mixing them up.
|
GoogleCloudPlatform_fluent-plugin-google-cloud
|
train
|
6112012fb7ab39b7670095a19635254547c439ba
|
diff --git a/functions/functions-twig.php b/functions/functions-twig.php
index <HASH>..<HASH> 100644
--- a/functions/functions-twig.php
+++ b/functions/functions-twig.php
@@ -7,7 +7,9 @@
if (is_array($uri)){
$loaders = array();
foreach($uri as $u){
- $loaders[] = new Twig_Loader_Filesystem($u.'/views/');
+ if (strlen(trim($u))){
+ $loaders[] = new Twig_Loader_Filesystem($u.'/views/');
+ }
}
$loader = new Twig_Loader_Chain($loaders);
} else {
|
added a catch for when theme is inactive
|
timber_timber
|
train
|
6f39c3769b2b7aeb6346bb1e1a8e8e7f2d06823f
|
diff --git a/build/index.js b/build/index.js
index <HASH>..<HASH> 100644
--- a/build/index.js
+++ b/build/index.js
@@ -18,8 +18,18 @@ var _isStamp = require('./is-stamp');
var _isStamp2 = _interopRequireDefault(_isStamp);
+var _isDescriptor = require('./is-descriptor');
+
+var _isDescriptor2 = _interopRequireDefault(_isDescriptor);
+
+var _isComposable = require('./is-composable');
+
+var _isComposable2 = _interopRequireDefault(_isComposable);
+
exports['default'] = _compose2['default'];
exports.compose = _compose2['default'];
-exports.init = _init2['default'];
exports.isStamp = _isStamp2['default'];
+exports.isDescriptor = _isDescriptor2['default'];
+exports.isComposable = _isComposable2['default'];
+exports.init = _init2['default'];
//# sourceMappingURL=index.js.map
\ No newline at end of file
diff --git a/build/index.js.map b/build/index.js.map
index <HASH>..<HASH> 100644
--- a/build/index.js.map
+++ b/build/index.js.map
@@ -1 +1 @@
-{"version":3,"sources":["../source/index.js"],"names":[],"mappings":";;;;;;;;uBAAoB,WAAW;;;;oBACd,QAAQ;;;;uBACL,YAAY;;;;;QAGZ,OAAO;QACV,IAAI;QACD,OAAO","file":"index.js","sourcesContent":["import compose from './compose';\nimport init from './init';\nimport isStamp from './is-stamp';\n\nexport default compose;\nexport { compose as compose };\nexport { init as init };\nexport { isStamp as isStamp };\n"]}
\ No newline at end of file
+{"version":3,"sources":["../source/index.js"],"names":[],"mappings":";;;;;;;;uBAAoB,WAAW;;;;oBACd,QAAQ;;;;uBACL,YAAY;;;;4BACP,iBAAiB;;;;4BACjB,iBAAiB;;;;;QAItB,OAAO;QACP,OAAO;QACF,YAAY;QACZ,YAAY;QACpB,IAAI","file":"index.js","sourcesContent":["import compose from './compose';\nimport init from './init';\nimport isStamp from './is-stamp';\nimport isDescriptor from './is-descriptor';\nimport isComposable from './is-composable';\n\n\nexport default compose;\nexport { compose as compose };\nexport { isStamp as isStamp };\nexport { isDescriptor as isDescriptor };\nexport { isComposable as isComposable };\nexport { init as init };\n"]}
\ No newline at end of file
diff --git a/source/is-descriptor/index.js b/source/is-descriptor/index.js
index <HASH>..<HASH> 100644
--- a/source/is-descriptor/index.js
+++ b/source/is-descriptor/index.js
@@ -8,6 +8,7 @@ export default (obj) => {
obj.staticProperties ||
obj.deepStaticProperties ||
obj.staticPropertyDescriptors ||
+ obj.initializers ||
obj.configuration
) && true);
};
diff --git a/test/is-descriptor/index.js b/test/is-descriptor/index.js
index <HASH>..<HASH> 100644
--- a/test/is-descriptor/index.js
+++ b/test/is-descriptor/index.js
@@ -4,7 +4,7 @@ import compose from '../../source/compose';
import isDescriptor from '../../source/is-descriptor';
test('isDescriptor', nest => {
- nest.test('...with descriptor', assert => {
+ nest.test('...with descriptor.properties', assert => {
const msg = 'should return true for descriptors';
const descriptor = {
properties: {
@@ -18,6 +18,22 @@ test('isDescriptor', nest => {
assert.end();
});
+ nest.test('...with descriptor.initializers', assert => {
+ const msg = 'should return true for descriptors';
+ const descriptor = {
+ initializers: [
+ ({ instance }) => {
+ instance.foo = 'bar';
+ }
+ ]
+ };
+ const actual = isDescriptor(descriptor);
+ const expected = true;
+
+ assert.equal(actual, expected, msg);
+ assert.end();
+ });
+
nest.test('...with stamp', assert => {
const msg = 'should return false for stamps';
const stamp = compose();
|
isDescriptor should work with initializers
|
stampit-org_stamp-utils
|
train
|
3505089a3ca9881e4810ef1af9f3a0dc69385780
|
diff --git a/internal/runner/ui.go b/internal/runner/ui.go
index <HASH>..<HASH> 100644
--- a/internal/runner/ui.go
+++ b/internal/runner/ui.go
@@ -46,7 +46,7 @@ func (u *runnerUI) Interactive() bool {
// arguments should be interpolations for the format string. After the
// interpolations you may add Options.
func (u *runnerUI) Output(msg string, raw ...interface{}) {
- msg, style, _ := terminal.Interpret(msg, raw...)
+ msg, style, disableNewline, _ := terminal.Interpret(msg, raw...)
// Extreme java looking code alert!
ev := &vagrant_server.RunnerJobStreamRequest{
@@ -56,8 +56,9 @@ func (u *runnerUI) Output(msg string, raw ...interface{}) {
{
Event: &vagrant_server.GetJobStreamResponse_Terminal_Event_Line_{
Line: &vagrant_server.GetJobStreamResponse_Terminal_Event_Line{
- Msg: msg,
- Style: style,
+ Msg: msg,
+ Style: style,
+ DisableNewLine: disableNewline,
},
},
},
|
Allow runner UI to disable new lines
|
hashicorp_vagrant
|
train
|
6c380da0f8ac769e8222285976aab9ebda8c217e
|
diff --git a/odtbrain/_version.py b/odtbrain/_version.py
index <HASH>..<HASH> 100644
--- a/odtbrain/_version.py
+++ b/odtbrain/_version.py
@@ -38,31 +38,37 @@ def git_describe():
return GIT_REVISION
+
def save_version(version):
data="""#!/usr/bin/env python
# This file was created automatically.
-version="{VERSION}"
+longversion="{VERSION}"
"""
with open(join(dirname(abspath(__file__)), "_version_save.py"), "w") as fd:
fd.write(data.format(VERSION=version))
-
+# Determine the accurate version
try:
if exists(join(dirname(dirname(abspath(__file__))), ".git")):
# Get the version using `git describe`
- version = git_describe()
- # Save the version to `_version_save.py` to allow distribution using
- # `python setup.py sdist` in case
- save_version(version)
+ longversion = git_describe()
else:
# If this is not a git repository, then we should be able to
# get the version from the previously generated `_version_save.py`
from . import _version_save # @UnresolvedImport
- version = _version_save.version
+ longversion = _version_save.longversion
+
except:
print("Could not determine version. Reason:")
print(traceback.format_exc())
ctime = os.stat(__file__)[8]
- tstr = time.strftime("%Y-%m-%d_%H.%M.%S", time.gmtime(ctime))
+ tstr = time.strftime("%Y.%m.%d-%H-%M-%S", time.gmtime(ctime))
version = "unknown_{}".format(tstr)
- print("Using creation time to determine version: {}".format(version))
\ No newline at end of file
+ print("Using creation time to determine version: {}".format(version))
+
+# Save the version to `_version_save.py` to allow distribution using
+# `python setup.py sdist`.
+save_version(longversion)
+
+# PEP 440-conform version:
+version = "-".join(longversion.split("-")[:2])
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@ for diffraction tomogrpahy. Visit the home page for more information.
sys.path.insert(0, realpath(dirname(__file__))+"/"+name)
-from _version import version
+from _version import shortversion as version
class PyTest(Command):
|
versioningn conforms with PEP<I>
|
RI-imaging_ODTbrain
|
train
|
df0cdfd4e833ac2aae22ee2f540f0661ae589789
|
diff --git a/lib/arjdbc/mysql/adapter.rb b/lib/arjdbc/mysql/adapter.rb
index <HASH>..<HASH> 100644
--- a/lib/arjdbc/mysql/adapter.rb
+++ b/lib/arjdbc/mysql/adapter.rb
@@ -72,7 +72,7 @@ module ActiveRecord
end
def error_number(exception)
- exception.errno if exception.respond_to? :errno
+ exception.error_code if exception.is_a?(JDBCError)
end
def create_table(table_name, **options) #:nodoc:
|
[mysql] extract sql error code from JDBC error only
|
jruby_activerecord-jdbc-adapter
|
train
|
80c52475241cd72dc87478ccc8637163ee9637c1
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -172,7 +172,9 @@ KafkaLogger.prototype.log = function(level, msg, meta, callback) {
break;
}
}
- logMessage.ts = timestamp;
+ if (!logMessage.ts) {
+ logMessage.ts = timestamp;
+ }
logMessage.level = level;
logMessage.msg = msg;
logMessage.fields = meta;
|
Adding check for ts field, if it's already set, then we don't set it
|
uber_kafka-logger
|
train
|
e19477fbd84bdc946b82a9def75cde455518abb6
|
diff --git a/libkbfs/disk_block_cache.go b/libkbfs/disk_block_cache.go
index <HASH>..<HASH> 100644
--- a/libkbfs/disk_block_cache.go
+++ b/libkbfs/disk_block_cache.go
@@ -7,6 +7,7 @@ package libkbfs
import (
"encoding/hex"
"fmt"
+ "math"
"path/filepath"
"sort"
"strconv"
@@ -203,10 +204,10 @@ func newDiskBlockCacheStandard(config diskBlockCacheConfig, dirPath string,
}
func (cache *DiskBlockCacheStandard) syncBlockCountsFromDb() error {
- // This lock is unnecessary because we don't allow concurrent access until
- // it's done. But can't hurt.
- cache.lock.RLock()
- defer cache.lock.RUnlock()
+ // We do a write lock for this to prevent any reads from happening while
+ // we're syncing the block counts.
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
tlfIDLen := len(tlf.NullID.Bytes())
tlfCounts := make(map[tlf.ID]int)
@@ -416,26 +417,30 @@ func (cache *DiskBlockCacheStandard) Delete(ctx context.Context, tlfID tlf.ID,
return cache.deleteLocked(ctx, tlfID, blockIDs)
}
-// evictLocked evicts a number of blocks from the cache.
+// evictLocked evicts a number of blocks from the cache. We choose a pivot
+// variable b randomly. Then begin an iterator into cache.lruDb.Range(tlfID +
+// b, tlfID + MaxBlockID) and iterate from there to get numBlocks *
+// evictionConsiderationFactor block IDs. We sort the resulting blocks by
+// value (LRU time) and pick the minimum numBlocks. We then call cache.Delete()
+// on that list of block IDs.
func (cache *DiskBlockCacheStandard) evictLocked(ctx context.Context,
tlfID tlf.ID, numBlocks int) (numRemoved int, err error) {
- // Use kbfscrypto.MakeTemporaryID() to create a random hash ID. Then begin
- // an iterator into cache.lruDb.Range(tlfID + b, tlfID + MaxBlockID) and
- // iterate from there to get numBlocks * evictionConsiderationFactor block
- // IDs. We sort the resulting blocks by value (LRU time) and pick the
- // minimum numBlocks. We then call cache.Delete() on that list of block
- // IDs.
tlfBytes := tlfID.Bytes()
- // We actually need a random range, not a random block ID. so, we pick a
- // point to start our range, based on the proportion of the TLF space taken
- // up by numBlocks/cache.tlfCounts[tlfID]. E.g. if we need to remove 100
- // out of 400 blocks, and we assume that the block IDs are uniformly
- // distributed, then our random start point should be in the [0,0.75)
- // interval on the [0,1.0) block ID space. This means that if we need to
- // remove all the blocks for the TLF, we can consider the whole range.
+ numElements := numBlocks * evictionConsiderationFactor
+ // We need a random range, not a random block ID. So, we pick a point to
+ // start our range, based on the proportion of the TLF space taken up by
+ // numBlocks/cache.tlfCounts[tlfID]. E.g. if we need to consider 100 out of
+ // 400 blocks, and we assume that the block IDs are uniformly distributed,
+ // then our random start point should be in the [0,0.75) interval on the
+ // [0,1.0) block ID space. If the iterator reaches the end of the TLF, we
+ // simply stop and return the number of blocks removed.
var rng *util.Range
- if numBlocks > cache.tlfCounts[tlfID] {
- randomBlockID, err := kbfsblock.MakeTemporaryID()
+ if numElements < cache.tlfCounts[tlfID] {
+ pivot := uint64(
+ float64(math.MaxUint64) *
+ (1.0 -
+ (float64(numElements) / float64(cache.tlfCounts[tlfID]))))
+ randomBlockID, err := kbfsblock.MakeRandomIDInRange(0, pivot)
if err != nil {
return 0, err
}
@@ -446,7 +451,6 @@ func (cache *DiskBlockCacheStandard) evictLocked(ctx context.Context,
}
iter := cache.lruDb.NewIterator(rng, nil)
- numElements := numBlocks * evictionConsiderationFactor
blockIDs := make(blockIDsByTime, 0, numElements)
for i := 0; i < numElements; i++ {
|
disk_block_cache: LRU eviction implementation first pass done
|
keybase_client
|
train
|
9dabf2eed094b0776c1160aba8fb5e4c30f00588
|
diff --git a/indices_forcemerge.go b/indices_forcemerge.go
index <HASH>..<HASH> 100644
--- a/indices_forcemerge.go
+++ b/indices_forcemerge.go
@@ -30,7 +30,6 @@ type IndicesForcemergeService struct {
ignoreUnavailable *bool
maxNumSegments interface{}
onlyExpungeDeletes *bool
- operationThreading interface{}
}
// NewIndicesForcemergeService creates a new IndicesForcemergeService.
@@ -94,11 +93,6 @@ func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *
return s
}
-func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService {
- s.operationThreading = operationThreading
- return s
-}
-
// Pretty indicates that the JSON response be indented and human readable.
func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
s.pretty = pretty
@@ -145,9 +139,6 @@ func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
if s.onlyExpungeDeletes != nil {
params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
}
- if s.operationThreading != nil {
- params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
- }
return path, params, nil
}
|
Remove operation_threading parameter from Forcemerge (#<I>)
|
olivere_elastic
|
train
|
0121d94e5fca6bd462b17f7c42dbc2ad5b8290ab
|
diff --git a/src/Gallery.js b/src/Gallery.js
index <HASH>..<HASH> 100644
--- a/src/Gallery.js
+++ b/src/Gallery.js
@@ -25,9 +25,7 @@ class Gallery extends Component {
}
componentDidMount () {
- this.setState(
- {containerWidth:
- Math.floor(ReactDOM.findDOMNode(this).clientWidth)});
+ this.handleResize();
window.addEventListener('resize', this.handleResize);
}
@@ -41,16 +39,14 @@ class Gallery extends Component {
componentDidUpdate () {
if (ReactDOM.findDOMNode(this).clientWidth
!== this.state.containerWidth){
- this.setState(
- {containerWidth:
- Math.floor(ReactDOM.findDOMNode(this).clientWidth)});
+ this.handleResize();
}
}
- handleResize (e) {
- this.setState(
- {containerWidth:
- Math.floor(ReactDOM.findDOMNode(this).clientWidth)});
+ handleResize (event) {
+ this.setState({
+ containerWidth: Math.floor(ReactDOM.findDOMNode(this).clientWidth)
+ });
}
openLightbox (index, event) {
@@ -81,7 +77,8 @@ class Gallery extends Component {
}
handleClickImage () {
- if (this.state.currentImage === this.props.images.length - 1) return;
+ if (this.state.currentImage === this.props.images.length - 1)
+ return;
this.gotoNext();
}
@@ -96,21 +93,14 @@ class Gallery extends Component {
else {
this.setState({
selectedImages: update(this.state.selectedImages,
- {$splice: [[i, 1]]})});
+ {$splice: [[i, 1]]})
+ });
}
}
- /**
- * Distribute a delta (integer value) to n items based on
- * the size (width) of the items thumbnails.
- */
calculateCutOff (len, delta, items) {
- // resulting distribution
var cutoff = [];
var cutsum = 0;
-
- // distribute the delta based on the proportion of
- // thumbnail size to length of all thumbnails.
for(var i in items) {
var item = items[i];
var fractOfLen = item.scaletwidth / len;
@@ -118,14 +108,10 @@ class Gallery extends Component {
cutsum += cutoff[i];
}
- // still more pixel to distribute because of decimal
- // fractions that were omitted.
var stillToCutOff = delta - cutsum;
while(stillToCutOff > 0) {
for(i in cutoff) {
- // distribute pixels evenly until done
cutoff[i]++;
- cutsum++; //debug
stillToCutOff--;
if (stillToCutOff < 0) break;
}
@@ -133,46 +119,27 @@ class Gallery extends Component {
return cutoff;
}
- /**
- * Takes images from the items array (removes them) as
- * long as they fit into a width of maxwidth pixels.
- */
buildImageRow (items) {
var row = [];
var len = 0;
-
- // left and right margin = 2x props.margin
var imgMargin = 2 * this.props.margin;
-
- // Build a row of images until longer than maxwidth
while(items.length > 0 && len < this.state.containerWidth) {
var item = items.shift();
row.push(item);
len += (item.scaletwidth + imgMargin);
}
- // calculate by how many pixels too long...
var delta = len - this.state.containerWidth;
-
- // if the line is too long, make images smaller
if(row.length > 0 && delta > 0) {
-
- // calculate the distribution to each image in the row
var cutoff = this.calculateCutOff(len, delta, row);
-
for(var i in row) {
var pixelsToRemove = cutoff[i];
item = row[i];
-
- // move the left border inwards by half the pixels
item.marginLeft = -Math.abs(Math.floor(pixelsToRemove / 2));
-
- // shrink the width of the image by pixelsToRemove
item.vwidth = item.scaletwidth - pixelsToRemove;
}
}
else {
- // all images fit in the row, set vx and vwidth
for(var i in row) {
item = row[i];
item.marginLeft = 0;
@@ -182,14 +149,11 @@ class Gallery extends Component {
return row;
}
- /**
- * Scales thumbnails to match props.rowHeight
- */
scaleThumbs (items) {
for (var i in items) {
items[i].scaletwidth =
- Math.floor(this.props.rowHeight *
- (items[i].thumbnailWidth / items[i].thumbnailHeight));
+ Math.floor(this.props.rowHeight * (items[i].thumbnailWidth
+ / items[i].thumbnailHeight));
}
return items;
}
@@ -200,17 +164,10 @@ class Gallery extends Component {
return false;
}
- /**
- * Builds images and packs them in rows
- */
renderGallery () {
if (!this.props.images) return;
if (this.state.containerWidth == 0) return;
-
- // Calculate new thumbnail size to match this.props.rowHeight
var items = this.scaleThumbs(this.props.images.slice());
-
- // calculate rows of images
var images = [];
var rows = [];
while(items.length > 0) {
@@ -221,7 +178,6 @@ class Gallery extends Component {
for(var r in rows) {
for(var i in rows[r]) {
var item = rows[r][i];
- // create image
images.push(
<Image
key={"Image-"+idx}
@@ -281,7 +237,7 @@ Gallery.propTypes = {
selectedImages: PropTypes.arrayOf(PropTypes.number),
onSelectedImagesChange: PropTypes.func,
rowHeight: PropTypes.number,
- margin: PropTypes.number, // margin size for each image
+ margin: PropTypes.number,
backdropClosesModal: PropTypes.bool,
currentImage: PropTypes.number,
customControls: PropTypes.arrayOf(PropTypes.node),
|
- commentary, simplified handleResize init
|
benhowell_react-grid-gallery
|
train
|
09fdf9ccd58ed966f87eb5c063a6bb262a250b50
|
diff --git a/modules/component/component.js b/modules/component/component.js
index <HASH>..<HASH> 100644
--- a/modules/component/component.js
+++ b/modules/component/component.js
@@ -24,6 +24,10 @@ export default class Component extends Container {
this.options.shadow.position = Position.from(this.options.shadow.position);
/**
+ * @type {Path2D}
+ */
+ this.path = null;
+ /**
* @type {Boolean}
*/
this.isClicked = false;
@@ -54,7 +58,7 @@ export default class Component extends Container {
if (willFill || willStroke) {
const path = new window.Path2D();
this.trace(path);
-
+ this.path = path;
if (willFill) {
ctx.fill(path);
@@ -124,33 +128,32 @@ export default class Component extends Container {
return false;
}
- const origin = this.getOrigin();
const relative = Position.from(positionDefinition).clone().subtract(this.position);
- const rotated = relative.clone().rotate(-this.options.rotation, this.options.rotationCenter).subtract(origin);
+ const rotated = relative.clone().rotate(-this.options.rotation, this.options.rotationCenter);
- const willFill = this.options.fill;
- const willStroke = this.options.stroke && this.options.strokeWidth > 0;
+ ctx.save();
+ const [willFill, willStroke] = this.setContext(ctx);
if (!willFill && !willStroke) {
+ ctx.restore();
return false;
}
- if (willStroke) {
- ctx.lineJoin = this.options.join;
- ctx.lineCap = this.options.cap;
- ctx.lineWidth = this.options.strokeWidth;
+ if (!this.path) {
+ const path = new window.Path2D();
+ this.trace(path);
}
- const path = new window.Path2D();
- this.trace(path);
- let result = (willFill && ctx.isPointInPath(path, rotated.x, rotated.y)) ||
- (willStroke && ctx.isPointInStroke(path, rotated.x, rotated.y));
+ let result = (willFill && ctx.isPointInPath(this.path, rotated.x, rotated.y)) ||
+ (willStroke && ctx.isPointInStroke(this.path, rotated.x, rotated.y));
if (this.options.clip) {
const clipper = this.options.clip === Container.ITSELF ? this : this.options.clip;
result = result && clipper.isHover(relative, ctx);
}
+ ctx.restore();
+
return result;
}
@@ -289,6 +292,7 @@ export default class Component extends Container {
cursors.rightResize = cursors.eResize;
cursors.bottomResize = cursors.sResize;
cursors.leftResize = cursors.wResize;
+
return cursors;
}
diff --git a/modules/component/component.test.js b/modules/component/component.test.js
index <HASH>..<HASH> 100644
--- a/modules/component/component.test.js
+++ b/modules/component/component.test.js
@@ -96,9 +96,8 @@ test("trace", (t) => {
test("isHover", (t) => {
t.context.trace = () => {};
- const ctx = {
- isPointInPath: () => true,
- };
+ const ctx = new window.CanvasRenderingContext2D();
+ ctx.isPointInPath = () => true;
t.true(t.context.isHover([0, 0], ctx));
t.context.options.clip = {
diff --git a/modules/input/input.test.js b/modules/input/input.test.js
index <HASH>..<HASH> 100644
--- a/modules/input/input.test.js
+++ b/modules/input/input.test.js
@@ -36,9 +36,8 @@ test("click", (t) => {
});
test("isHover", (t) => {
- const ctx = {
- isPointInPath: () => true,
- };
+ const ctx = new window.CanvasRenderingContext2D();
+ ctx.isPointInPath = () => true;
t.true(t.context.isHover([0, 0], ctx));
t.context.options.shown = false;
diff --git a/modules/path/path.test.js b/modules/path/path.test.js
index <HASH>..<HASH> 100644
--- a/modules/path/path.test.js
+++ b/modules/path/path.test.js
@@ -54,10 +54,9 @@ test("trace instructions string", (t) => {
test("isHover", (t) => {
t.context.trace = () => {};
- const ctx = {
- isPointInPath: () => t.fail(),
- isPointInStroke: () => true,
- };
+ const ctx = new window.CanvasRenderingContext2D();
+ ctx.isPointInPath = () => t.fail();
+ ctx.isPointInStroke = () => true;
t.true(t.context.isHover([0, 0], ctx));
});
|
:zap: Improving performance.
Cache the trace function call result
|
pencil-js_pencil.js
|
train
|
9dff15bd4f364c03fabdc8905e7dd755ecba2c7c
|
diff --git a/pyrogram/client/client.py b/pyrogram/client/client.py
index <HASH>..<HASH> 100644
--- a/pyrogram/client/client.py
+++ b/pyrogram/client/client.py
@@ -20,6 +20,7 @@ import asyncio
import base64
import binascii
import getpass
+import inspect
import json
import logging
import math
@@ -328,11 +329,18 @@ class Client(Methods, BaseClient):
run = asyncio.get_event_loop().run_until_complete
run(self.start())
- run(coroutine or self.idle())
- if coroutine:
+ run(
+ coroutine if inspect.iscoroutine(coroutine)
+ else coroutine() if coroutine
+ else self.idle()
+ )
+
+ if self.is_started:
run(self.stop())
+ return coroutine
+
def add_handler(self, handler, group: int = 0):
"""Use this method to register an update handler.
|
Make run() accept coroutine functions
|
pyrogram_pyrogram
|
train
|
07b585a52f5539bb1ab5b8b548acd829b4273295
|
diff --git a/Kwf_js/Utils/ResponsiveImg.js b/Kwf_js/Utils/ResponsiveImg.js
index <HASH>..<HASH> 100644
--- a/Kwf_js/Utils/ResponsiveImg.js
+++ b/Kwf_js/Utils/ResponsiveImg.js
@@ -7,7 +7,7 @@ var deferredImages = [];
Kwf.Utils.ResponsiveImg = function (selector) {
Kwf.onJElementWidthChange(selector, function responsiveImg(el) {
if (el.hasClass('loadImmediately') || isElementInView(el)) {
- if (!el.responsiveImgInitDone) {
+ if (!el.data('responsiveImgInitDone')) {
initResponsiveImgEl(el);
} else {
checkResponsiveImgEl(el);
@@ -67,7 +67,7 @@ function getResponsiveWidthSteps(minWidth, maxWidth) {
function initResponsiveImgEl(el) {
var elWidth = Kwf.Utils.Element.getCachedWidth(el);
if (elWidth == 0) return;
- el.responsiveImgInitDone = true;
+ el.data('responsiveImgInitDone', true);
var devicePixelRatio = window.devicePixelRatio ? window.devicePixelRatio : 1;
var baseUrl = el.data("src");
var minWidth = parseInt(el.data("minWidth"));
|
Save resonsiveImgInitDone to jquery el, didn't work before
|
koala-framework_koala-framework
|
train
|
ed5102c1ebba2dd76293ed864b2f3f1f1de6eab4
|
diff --git a/lib/celluloid/io/tcp_socket.rb b/lib/celluloid/io/tcp_socket.rb
index <HASH>..<HASH> 100644
--- a/lib/celluloid/io/tcp_socket.rb
+++ b/lib/celluloid/io/tcp_socket.rb
@@ -17,7 +17,7 @@ module Celluloid
if block_given?
begin
- yield sock
+ return yield(sock)
ensure
sock.close
end
diff --git a/spec/celluloid/io/tcp_socket_spec.rb b/spec/celluloid/io/tcp_socket_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/celluloid/io/tcp_socket_spec.rb
+++ b/spec/celluloid/io/tcp_socket_spec.rb
@@ -4,6 +4,33 @@ describe Celluloid::IO::TCPSocket do
let(:payload) { 'ohai' }
context "inside Celluloid::IO" do
+
+ describe ".open" do
+ it "returns the open socket" do
+ server = ::TCPServer.new example_addr, example_port
+ thread = Thread.new { server.accept }
+
+ socket = within_io_actor { Celluloid::IO::TCPSocket.open(example_addr, example_port) }
+ socket.should be_a(Celluloid::IO::TCPSocket)
+
+ server.close
+ thread.terminate
+ socket.close
+ end
+ context "when passed a block" do
+ it "returns the block evaluation" do
+ server = ::TCPServer.new example_addr, example_port
+ thread = Thread.new { server.accept }
+
+ value = within_io_actor { Celluloid::IO::TCPSocket.open(example_addr, example_port) { true } }
+ value.should be_true
+
+ server.close
+ thread.terminate
+ end
+ end
+ end
+
it "connects to TCP servers" do
server = ::TCPServer.new example_addr, example_port
thread = Thread.new { server.accept }
|
corrected return value semantics for celluloid io tcpsocket (now compatible with native tcpsocket) ( This fixes celluloid/celluloid-io#<I> )
|
celluloid_celluloid-io
|
train
|
be4c57c25805fc48bd61252f9e893444544b960f
|
diff --git a/aws/resource_aws_cognito_user_pool.go b/aws/resource_aws_cognito_user_pool.go
index <HASH>..<HASH> 100644
--- a/aws/resource_aws_cognito_user_pool.go
+++ b/aws/resource_aws_cognito_user_pool.go
@@ -10,6 +10,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
+ "github.com/hashicorp/aws-sdk-go-base/tfawserr"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
@@ -1260,6 +1261,10 @@ func resourceAwsCognitoUserPoolDelete(d *schema.ResourceData, meta interface{})
_, err := conn.DeleteUserPool(params)
+ if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) {
+ return nil
+ }
+
if err != nil {
return fmt.Errorf("error deleting Cognito user pool (%s): %w", d.Id(), err)
}
diff --git a/aws/resource_aws_cognito_user_pool_client.go b/aws/resource_aws_cognito_user_pool_client.go
index <HASH>..<HASH> 100644
--- a/aws/resource_aws_cognito_user_pool_client.go
+++ b/aws/resource_aws_cognito_user_pool_client.go
@@ -8,6 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
+ "github.com/hashicorp/aws-sdk-go-base/tfawserr"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
@@ -470,6 +471,10 @@ func resourceAwsCognitoUserPoolClientDelete(d *schema.ResourceData, meta interfa
_, err := conn.DeleteUserPoolClient(params)
+ if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) {
+ return nil
+ }
+
if err != nil {
return fmt.Errorf("error deleting Cognito User Pool Client (%s): %w", d.Id(), err)
}
|
Handle already deleted aws_cognito_user_pool and aws_cognito_user_pool_client resources.
|
terraform-providers_terraform-provider-aws
|
train
|
65a2136a6284227a67780f26f6fb12b687add0e4
|
diff --git a/pyinstrument/__main__.py b/pyinstrument/__main__.py
index <HASH>..<HASH> 100644
--- a/pyinstrument/__main__.py
+++ b/pyinstrument/__main__.py
@@ -3,6 +3,26 @@ import sys
import os
import codecs
from pyinstrument import Profiler
+from pyinstrument.compat import exec_
+
+# Python 3 compatibility. Mostly borrowed from SymPy
+PY3 = sys.version_info[0] > 2
+
+if PY3:
+ import builtins
+ exec_ = getattr(builtins, "exec")
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("exec _code_ in _globs_, _locs_")
def main():
usage = "usage: %prog [-h] [-o output_file_path] scriptfile [arg] ..."
@@ -12,7 +32,7 @@ def main():
dest="output_html", action='store_true',
help="output HTML instead of text", default=False)
parser.add_option('-o', '--outfile',
- dest="outfile", action='store',
+ dest="outfile", action='store',
help="save stats to <outfile>", default=None)
if not sys.argv[1:]:
@@ -38,8 +58,8 @@ def main():
profiler.start()
try:
- exec code in globs, None
- except SystemExit, KeyboardInterrupt:
+ exec_(code, globs, None)
+ except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
diff --git a/pyinstrument/profiler.py b/pyinstrument/profiler.py
index <HASH>..<HASH> 100644
--- a/pyinstrument/profiler.py
+++ b/pyinstrument/profiler.py
@@ -120,13 +120,13 @@ class Profiler(object):
return parent.children_dict[frame_name]
- for stack, self_time in self.stack_self_time.iteritems():
+ for stack, self_time in self.stack_self_time.items():
frame_for_stack(stack).self_time = self_time
return self._root_frame
def first_interesting_frame(self):
- """
+ """
Traverse down the frame hierarchy until a frame is found with more than one child
"""
frame = self.root_frame()
@@ -221,7 +221,7 @@ class Frame(object):
result = candidate
self._file_path_short = result
- else:
+ else:
self._file_path_short = None
return self._file_path_short
@@ -271,7 +271,7 @@ class Frame(object):
@property
def children(self):
- return self.children_dict.values()
+ return list(self.children_dict.values())
@property
def sorted_children(self):
@@ -297,7 +297,7 @@ class Frame(object):
code_position=self.code_position_short,
c=colors_enabled if color else colors_disabled)
- children = filter(lambda f: f.proportion_of_total > 0.01, self.sorted_children)
+ children = [f for f in self.sorted_children if f.proportion_of_total > 0.01]
if children:
last_child = children[-1]
@@ -334,7 +334,7 @@ class Frame(object):
time=self.time(),
function=self.function,
code_position=self.code_position_short,
- parent_proportion=self.proportion_of_parent,
+ parent_proportion=self.proportion_of_parent,
total_proportion=self.proportion_of_total,
extra_class=extra_class)
|
Add Python 3 support
This requires at least <I>, as I left the unicode literals in.
|
joerick_pyinstrument
|
train
|
acbff8378a99a1e41cdfee8761793ce4c8a38e88
|
diff --git a/fsm/deciders.go b/fsm/deciders.go
index <HASH>..<HASH> 100644
--- a/fsm/deciders.go
+++ b/fsm/deciders.go
@@ -478,6 +478,20 @@ func OnStartTimerFailed(timer string, deciders ...Decider) Decider {
}
}
+// OnTimerCanceled builds a composed decider that fires on EventTypeTimerCanceled.
+func OnTimerCanceled(timer string, deciders ...Decider) Decider {
+ return func(ctx *FSMContext, h *swf.HistoryEvent, data interface{}) Outcome {
+ switch *h.EventType {
+ case swf.EventTypeTimerCanceled:
+ if *h.TimerCanceledEventAttributes.TimerId == timer {
+ logf(ctx, "at=on-timer-canceled timer=%q", *h.TimerCanceledEventAttributes.TimerId)
+ return NewComposedDecider(deciders...)(ctx, h, data)
+ }
+ }
+ return ctx.Pass()
+ }
+}
+
func OnExternalCancellationResponse(exitDecider Decider) Decider {
return func(ctx *FSMContext, h *swf.HistoryEvent, data interface{}) Outcome {
switch *h.EventType {
|
Decider for EventTypeTimerCanceled
|
sclasen_swfsm
|
train
|
f46354f528ac5158de7ee817be10b38ee91e37ea
|
diff --git a/lib/ohai/plugins/linux/platform.rb b/lib/ohai/plugins/linux/platform.rb
index <HASH>..<HASH> 100644
--- a/lib/ohai/plugins/linux/platform.rb
+++ b/lib/ohai/plugins/linux/platform.rb
@@ -166,12 +166,6 @@ Ohai.plugin(:Platform) do
contents = File.read("/etc/system-release").chomp
platform get_redhatish_platform(contents)
platform_version get_redhatish_version(contents)
- elsif File.exist?("/etc/gentoo-release")
- platform "gentoo"
- # the gentoo release version is the base version used to bootstrap
- # a node and doesn't have a lot of meaning in a rolling release distro
- # kernel release will be used - ex. 3.18.7-gentoo
- platform_version `uname -r`.strip
elsif File.exist?("/etc/SuSE-release")
suse_release = File.read("/etc/SuSE-release")
suse_version = suse_release.scan(/VERSION = (\d+)\nPATCHLEVEL = (\d+)/).flatten.join(".")
@@ -187,22 +181,6 @@ Ohai.plugin(:Platform) do
else
platform "suse"
end
- elsif File.exist?("/etc/slackware-version")
- platform "slackware"
- platform_version File.read("/etc/slackware-version").scan(/(\d+|\.+)/).join
- elsif File.exist?("/etc/arch-release")
- platform "arch"
- # no way to determine platform_version in a rolling release distribution
- # kernel release will be used - ex. 2.6.32-ARCH
- platform_version `uname -r`.strip
- elsif File.exist?("/etc/exherbo-release")
- platform "exherbo"
- # no way to determine platform_version in a rolling release distribution
- # kernel release will be used - ex. 3.13
- platform_version `uname -r`.strip
- elsif File.exist?("/etc/alpine-release")
- platform "alpine"
- platform_version File.read("/etc/alpine-release").strip()
elsif File.exist?("/etc/Eos-release")
platform "arista_eos"
platform_version File.read("/etc/Eos-release").strip.split[-1]
@@ -222,6 +200,28 @@ Ohai.plugin(:Platform) do
platform_family "wrlinux"
platform_version os_release_info["VERSION"]
+ elsif File.exist?("/etc/gentoo-release")
+ platform "gentoo"
+ # the gentoo release version is the base version used to bootstrap
+ # a node and doesn't have a lot of meaning in a rolling release distro
+ # kernel release will be used - ex. 3.18.7-gentoo
+ platform_version `uname -r`.strip
+ elsif File.exist?("/etc/slackware-version")
+ platform "slackware"
+ platform_version File.read("/etc/slackware-version").scan(/(\d+|\.+)/).join
+ elsif File.exist?("/etc/arch-release")
+ platform "arch"
+ # no way to determine platform_version in a rolling release distribution
+ # kernel release will be used - ex. 2.6.32-ARCH
+ platform_version `uname -r`.strip
+ elsif File.exist?("/etc/exherbo-release")
+ platform "exherbo"
+ # no way to determine platform_version in a rolling release distribution
+ # kernel release will be used - ex. 3.13
+ platform_version `uname -r`.strip
+ elsif File.exist?("/etc/alpine-release")
+ platform "alpine"
+ platform_version File.read("/etc/alpine-release").strip()
elsif lsb[:id] =~ /RedHat/i
platform "redhat"
platform_version lsb[:release]
|
Reorder detection of platforms to speed things up
Check the more popular platforms first since those are most likely to match. Parallels have to come before RHEL though since it looks like RHEL.
|
chef_ohai
|
train
|
e62fb5595f309a9658b85da2c7724d62e9ef244b
|
diff --git a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java
index <HASH>..<HASH> 100644
--- a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java
+++ b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java
@@ -30,7 +30,6 @@ import java.util.stream.Collectors;
public final class StorageSharedKeyCredential {
private final ClientLogger logger = new ClientLogger(StorageSharedKeyCredential.class);
- private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKey %s:%s";
private static final Context LOG_STRING_TO_SIGN_CONTEXT = new Context(Constants.STORAGE_LOG_STRING_TO_SIGN, true);
// Pieces of the connection string that are needed.
@@ -114,7 +113,7 @@ public final class StorageSharedKeyCredential {
boolean logStringToSign) {
String signature = StorageImplUtils.computeHMac256(accountKey,
buildStringToSign(requestURL, httpMethod, headers, logStringToSign));
- return String.format(AUTHORIZATION_HEADER_FORMAT, accountName, signature);
+ return "SharedKey " + accountName + ":" + signature;
}
/**
|
Replace format with concat. (#<I>)
|
Azure_azure-sdk-for-java
|
train
|
bef35a96409448ca1162d18b883ee39ef6bb62fb
|
diff --git a/views/js/layout/tree.js b/views/js/layout/tree.js
index <HASH>..<HASH> 100644
--- a/views/js/layout/tree.js
+++ b/views/js/layout/tree.js
@@ -635,7 +635,8 @@ define([
* @returns {undefined}
*/
function executePossibleAction(actions, context, exclude) {
- var possibleActions;
+ var possibleActions,
+ self = this;
if (!_.isArray(exclude)) {
exclude = [];
}
@@ -649,9 +650,13 @@ define([
});
//execute the first allowed action
if(possibleActions.length > 0){
+ //hide shown earlier message
+ if (self.permissionErrorMessage) {
+ self.permissionErrorMessage.close();
+ }
actionManager.exec(possibleActions[0], context);
} else {
- feedback().error(__("You don't have sufficient permissions to access"));
+ self.permissionErrorMessage = feedback().error(__("You don't have sufficient permissions to access"));
}
}
|
Hide error message after the allowed item has been selected
|
oat-sa_tao-core
|
train
|
4934b87c41977d3015256a6c9cebae83a8777824
|
diff --git a/drools-compiler/src/test/java/org/drools/compiler/Cheese.java b/drools-compiler/src/test/java/org/drools/compiler/Cheese.java
index <HASH>..<HASH> 100644
--- a/drools-compiler/src/test/java/org/drools/compiler/Cheese.java
+++ b/drools-compiler/src/test/java/org/drools/compiler/Cheese.java
@@ -25,6 +25,8 @@ public class Cheese
public static final String STILTON = "stilton";
+ public static final int BASE_PRICE = 10;
+
private static final long serialVersionUID = 510l;
private String type;
private int price;
diff --git a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/NamedConsequencesTest.java b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/NamedConsequencesTest.java
index <HASH>..<HASH> 100644
--- a/drools-compiler/src/test/java/org/drools/compiler/integrationtests/NamedConsequencesTest.java
+++ b/drools-compiler/src/test/java/org/drools/compiler/integrationtests/NamedConsequencesTest.java
@@ -320,6 +320,56 @@ public class NamedConsequencesTest extends CommonTestMethodBase {
}
@Test
+ public void testIfElseWithConstant() {
+ // DROOLS-325
+ String str = "import org.drools.compiler.Cheese;\n " +
+ "global java.util.List results;\n" +
+ "\n" +
+ "rule R1 when\n" +
+ " $a: Cheese ( type == \"stilton\" )\n" +
+ " if ( price > Cheese.BASE_PRICE ) do[t1] else do[t2]\n" +
+ " $b: Cheese ( type == \"cheddar\" )\n" +
+ "then\n" +
+ " results.add( $b.getType() );\n" +
+ "then[t1]\n" +
+ " results.add( $a.getType() );\n" +
+ "then[t2]\n" +
+ " results.add( $a.getType().toUpperCase() );\n" +
+ "end\n";
+
+ List<String> results = executeTestWithDRL(str);
+
+ assertEquals( 2, results.size() );
+ assertTrue( results.contains( "cheddar" ) );
+ assertTrue( results.contains( "STILTON" ) );
+ }
+
+ @Test
+ public void testIfElseWithMvelAccessor() {
+ // DROOLS-324
+ String str = "import org.drools.compiler.Cheese;\n " +
+ "global java.util.List results;\n" +
+ "\n" +
+ "rule R1 dialect \"mvel\" when\n" +
+ " $a: Cheese ( type == \"stilton\" )\n" +
+ " if ( $a.price > Cheese.BASE_PRICE ) do[t1] else do[t2]\n" +
+ " $b: Cheese ( type == \"cheddar\" )\n" +
+ "then\n" +
+ " results.add( $b.getType() );\n" +
+ "then[t1]\n" +
+ " results.add( $a.getType() );\n" +
+ "then[t2]\n" +
+ " results.add( $a.getType().toUpperCase() );\n" +
+ "end\n";
+
+ List<String> results = executeTestWithDRL(str);
+
+ assertEquals( 2, results.size() );
+ assertTrue( results.contains( "cheddar" ) );
+ assertTrue( results.contains( "STILTON" ) );
+ }
+
+ @Test
public void testIfElse2() {
String str = "import org.drools.compiler.Cheese;\n " +
"global java.util.List results;\n" +
|
[DROOLS-<I>][DROOLS-<I>] add test cases showing that conditional named consequences are working as expected
|
kiegroup_drools
|
train
|
e5a7921873e8bfbccdf6d27c9152742a5117f7de
|
diff --git a/script/bump-version.py b/script/bump-version.py
index <HASH>..<HASH> 100755
--- a/script/bump-version.py
+++ b/script/bump-version.py
@@ -32,7 +32,6 @@ def main():
update_version_h(versions)
update_info_plist(version)
tag_version(version)
- git_push()
def increase_version(versions, index):
@@ -118,10 +117,5 @@ def tag_version(version):
execute(['git', 'tag', 'v{0}'.format(version)])
-def git_push():
- execute(['git', 'push'])
- execute(['git', 'push', '--tags'])
-
-
if __name__ == '__main__':
sys.exit(main())
|
Don't push when bumping version
|
electron_electron
|
train
|
10d9b8aba5ff0864a5f4cb56d17acc4dabbd2541
|
diff --git a/addon/apollo/query-manager.js b/addon/apollo/query-manager.js
index <HASH>..<HASH> 100644
--- a/addon/apollo/query-manager.js
+++ b/addon/apollo/query-manager.js
@@ -1,20 +1,13 @@
-import EmberObject from '@ember/object';
-import { A } from '@ember/array';
-import { inject as service } from '@ember-decorators/service';
-import { alias } from '@ember-decorators/object/computed';
+export default class QueryManager {
+ apollo = undefined;
+ activeSubscriptions = [];
-export default class QueryManager extends EmberObject {
- @service()
- apollo;
-
- @alias('apollo.client')
- apolloClient;
-
- activeSubscriptions = null;
+ constructor(apoloService) {
+ this.apollo = apoloService;
+ }
- init() {
- super.init(...arguments);
- this.set('activeSubscriptions', A([]));
+ get apolloClient() {
+ return this.apollo.client;
}
/**
@@ -28,7 +21,7 @@ export default class QueryManager extends EmberObject {
* @public
*/
mutate(opts, resultKey) {
- return this.get('apollo').mutate(opts, resultKey);
+ return this.apollo.mutate(opts, resultKey);
}
/**
@@ -42,7 +35,7 @@ export default class QueryManager extends EmberObject {
* @public
*/
query(opts, resultKey) {
- return this.get('apollo').query(opts, resultKey);
+ return this.apollo.query(opts, resultKey);
}
/**
@@ -60,7 +53,7 @@ export default class QueryManager extends EmberObject {
* @public
*/
watchQuery(opts, resultKey) {
- return this.get('apollo').managedWatchQuery(this, opts, resultKey);
+ return this.apollo.managedWatchQuery(this, opts, resultKey);
}
/**
@@ -79,7 +72,7 @@ export default class QueryManager extends EmberObject {
* @public
*/
subscribe(opts, resultKey) {
- return this.get('apollo').managedSubscribe(this, opts, resultKey);
+ return this.apollo.managedSubscribe(this, opts, resultKey);
}
/**
@@ -91,7 +84,7 @@ export default class QueryManager extends EmberObject {
* @private
*/
trackSubscription(subscription) {
- this.get('activeSubscriptions').pushObject({ subscription, stale: false });
+ this.activeSubscriptions.push({ subscription, stale: false });
}
/**
@@ -102,8 +95,7 @@ export default class QueryManager extends EmberObject {
* @private
*/
markSubscriptionsStale() {
- let subscriptions = this.get('activeSubscriptions');
- subscriptions.forEach(subscription => {
+ this.activeSubscriptions.forEach(subscription => {
subscription.stale = true;
});
}
@@ -120,12 +112,11 @@ export default class QueryManager extends EmberObject {
* @public
*/
unsubscribeAll(onlyStale = false) {
- let subscriptions = this.get('activeSubscriptions');
- subscriptions.forEach(subscription => {
+ this.activeSubscriptions.forEach(subscription => {
if (!onlyStale || subscription.stale) {
subscription.subscription.unsubscribe();
}
});
- this.set('activeSubscriptions', A([]));
+ this.activeSubscriptions = [];
}
}
diff --git a/addon/services/apollo.js b/addon/services/apollo.js
index <HASH>..<HASH> 100644
--- a/addon/services/apollo.js
+++ b/addon/services/apollo.js
@@ -352,7 +352,7 @@ export default class Apollo extends Service {
}
createQueryManager() {
- return QueryManager.create({ apollo: this });
+ return new QueryManager(this);
}
/**
|
Move query-manager away from EmberObject
|
ember-graphql_ember-apollo-client
|
train
|
dcd84a91722bdcbb688cf384513cfba0102651e7
|
diff --git a/api.go b/api.go
index <HASH>..<HASH> 100644
--- a/api.go
+++ b/api.go
@@ -590,9 +590,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
- if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
+ if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
+ if metadata.bucketName != "" && errResponse.Region != "" {
+ // Gather Cached location only if bucketName is present.
+ if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ }
}
}
|
Conditionally retry HTTP calls for specific S3 errors (#<I>)
When InvalidRegion or AuthorizationHeaderMalformed error is received from S3 server,
update bucket region cache and retry the same ReST call with returned region.
Fixes #<I> and #<I>
|
minio_minio-go
|
train
|
3362bd1a6589baf533e372ae92c03574426825cb
|
diff --git a/src/SpiffWorkflow/specs/TaskSpec.py b/src/SpiffWorkflow/specs/TaskSpec.py
index <HASH>..<HASH> 100644
--- a/src/SpiffWorkflow/specs/TaskSpec.py
+++ b/src/SpiffWorkflow/specs/TaskSpec.py
@@ -376,7 +376,7 @@ class TaskSpec(object):
my_task._update_children(self.outputs)
return True
- def serialize(self, serializer):
+ def serialize(self, serializer, **kwargs):
"""
Serializes the instance using the provided serializer.
@@ -386,13 +386,15 @@ class TaskSpec(object):
@type serializer: L{SpiffWorkflow.storage.Serializer}
@param serializer: The serializer to use.
+ @type kwargs: dict
+ @param kwargs: Passed to the serializer.
@rtype: object
@return: The serialized object.
"""
- return serializer._serialize_task_spec(self)
+ return serializer._serialize_task_spec(self, **kwargs)
@classmethod
- def deserialize(self, serializer, wf_spec, s_state):
+ def deserialize(self, serializer, wf_spec, s_state, **kwargs):
"""
Deserializes the instance using the provided serializer.
@@ -406,7 +408,9 @@ class TaskSpec(object):
@param wf_spec: An instance of the WorkflowSpec.
@type s_state: object
@param s_state: The serialized task specification object.
+ @type kwargs: dict
+ @param kwargs: Passed to the serializer.
@rtype: TaskSpec
@return: The task specification instance.
"""
- return serializer._deserialize_task_spec(wf_spec, s_state)
+ return serializer._deserialize_task_spec(wf_spec, s_state, **kwargs)
|
TaskSpec.serialize() and .deserialize(): pass kwargs to the serializer.
|
knipknap_SpiffWorkflow
|
train
|
9802debf71ff68897c1be59e1763dd31bcbde8ed
|
diff --git a/modules/angular2/test/render/dom/shadow_dom/native_shadow_dom_strategy_spec.js b/modules/angular2/test/render/dom/shadow_dom/native_shadow_dom_strategy_spec.js
index <HASH>..<HASH> 100644
--- a/modules/angular2/test/render/dom/shadow_dom/native_shadow_dom_strategy_spec.js
+++ b/modules/angular2/test/render/dom/shadow_dom/native_shadow_dom_strategy_spec.js
@@ -30,10 +30,12 @@ export function main() {
strategy = new NativeShadowDomStrategy(styleUrlResolver);
});
- it('should use the native shadow root', () => {
- var host = el('<div><span>original content</span></div>');
- expect(strategy.prepareShadowRoot(host)).toBe(DOM.getShadowRoot(host));
- });
+ if (DOM.supportsNativeShadowDOM()) {
+ it('should use the native shadow root', () => {
+ var host = el('<div><span>original content</span></div>');
+ expect(strategy.prepareShadowRoot(host)).toBe(DOM.getShadowRoot(host));
+ });
+ }
it('should rewrite style urls', () => {
var styleElement = el('<style>.foo {background-image: url("img.jpg");}</style>');
|
fix(test): native shadow DOM is required (IE<I>, Firefox)
|
angular_angular
|
train
|
7ef6a090e03163d7ffe0e320ca0806daf7dfa102
|
diff --git a/lib/honeybadger/config/yaml.rb b/lib/honeybadger/config/yaml.rb
index <HASH>..<HASH> 100644
--- a/lib/honeybadger/config/yaml.rb
+++ b/lib/honeybadger/config/yaml.rb
@@ -1,5 +1,6 @@
require 'pathname'
require 'yaml'
+require 'erb'
module Honeybadger
class Config
@@ -14,7 +15,7 @@ module Honeybadger
elsif !@path.writable?
raise ConfigError, "The configuration file #{@path} is not writable."
else
- yaml = YAML.load(@path.read)
+ yaml = YAML.load(ERB.new(@path.read).result)
yaml.merge!(yaml[env]) if yaml[env].kind_of?(Hash)
update(dotify_keys(yaml))
end
diff --git a/spec/fixtures/honeybadger.yml b/spec/fixtures/honeybadger.yml
index <HASH>..<HASH> 100644
--- a/spec/fixtures/honeybadger.yml
+++ b/spec/fixtures/honeybadger.yml
@@ -9,3 +9,4 @@ a:
nested: 'option'
production:
api_key: 'asdf'
+erb: <%= 'erb!' %>
diff --git a/spec/unit/honeybadger/config/yaml_spec.rb b/spec/unit/honeybadger/config/yaml_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/unit/honeybadger/config/yaml_spec.rb
+++ b/spec/unit/honeybadger/config/yaml_spec.rb
@@ -8,7 +8,7 @@ describe Honeybadger::Config::Yaml do
context "when options are nested" do
it "converts deeply nested options to dotted hash syntax" do
- should eq({:enabled => true, :api_key => 'asdf', :'foo.bar' => 'baz', :'foo.baz' => 'other', :'a.really.deeply.nested' => 'option', :'production.api_key' => 'asdf'})
+ expect(subject[:'a.really.deeply.nested']).to eq 'option'
end
end
@@ -43,6 +43,12 @@ describe Honeybadger::Config::Yaml do
end
end
+ context "when ERB is used" do
+ it "evaluates ERB" do
+ expect(subject[:erb]).to eq 'erb!'
+ end
+ end
+
context "when file is not found" do
it "raises a ConfigError" do
expect { described_class.new('foo.yml') }.to raise_error(Honeybadger::Config::ConfigError)
|
Evaluate ERB in honeybadger.yml
|
honeybadger-io_honeybadger-ruby
|
train
|
765d2ed61b9d06e688d5f202466df4104f1bff63
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -132,7 +132,7 @@ tests_requirements = [
"flake8-comprehensions",
"flake8-string-format",
"pylint",
- "pylint-pytest",
+ "pylint-pytest>=0.3.0",
"pylint-plugin-utils",
"wget",
"filelock",
|
Update pylint-pytest to >=<I>
Older versions had issues with loading pylint plugins on Windows.
Close #<I>
|
iterative_dvc
|
train
|
77be07e6165e66bbd9867804853616d616312e57
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -1,13 +1,15 @@
from setuptools import setup
-exec(open('./debianbts/version.py').read())
+meta = {}
+exec(open('./debianbts/version.py').read(), meta)
+meta['long_description'] = open('./README.md').read()
setup(
name='python-debianbts',
- version=__version__,
+ version=meta['__version__'],
description="Python interface to Debian's Bug Tracking System",
-
- long_description="This package provides the debianbts module, which allows to query Debian's Bug Tracking System.",
+ long_description=meta['long_description'],
+ long_description_content_type='text/markdown',
keywords='debian, soap, bts',
author='Bastian Venthur',
author_email='venthur@debian.org',
@@ -34,12 +36,10 @@ setup(
},
classifiers=[
"Development Status :: 5 - Production/Stable",
- "Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Topic :: Communications",
"Topic :: Software Development :: Bug Tracking",
],
)
|
fixed setup.py's pep8 errors
|
venthur_python-debianbts
|
train
|
dc160d2492f5e4f378ebb547d0cdb6f36dab8501
|
diff --git a/lib-dempsyimpl/src/main/java/com/nokia/dempsy/router/DecentralizedRoutingStrategy.java b/lib-dempsyimpl/src/main/java/com/nokia/dempsy/router/DecentralizedRoutingStrategy.java
index <HASH>..<HASH> 100644
--- a/lib-dempsyimpl/src/main/java/com/nokia/dempsy/router/DecentralizedRoutingStrategy.java
+++ b/lib-dempsyimpl/src/main/java/com/nokia/dempsy/router/DecentralizedRoutingStrategy.java
@@ -146,7 +146,7 @@ public class DecentralizedRoutingStrategy implements RoutingStrategy
// For now if we hit the race condition between when the target Inbound
// has created the slot and when it assigns the slot info, we simply claim
// we failed.
- if (emptySlots.size() > 0)
+ if (newtotalAddressCounts < 0 || emptySlots.size() > 0)
return false;
if (newtotalAddressCounts > 0)
|
[fix] Outbound could loose track when fillMapFromActiveSlots double fails with a NoNodeException.
|
Dempsy_dempsy
|
train
|
fbc28cefe03b1ea3ff65300d475d34f5f9629a5c
|
diff --git a/allennlp/models/model.py b/allennlp/models/model.py
index <HASH>..<HASH> 100644
--- a/allennlp/models/model.py
+++ b/allennlp/models/model.py
@@ -331,7 +331,7 @@ class Model(torch.nn.Module, Registrable):
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
- fine-tuning/tranfer-learning, it will have it work with extended vocabulary
+ fine-tuning/transfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
Parameters
|
Fix typo in Model.extend_embedder_vocab docstring (#<I>)
|
allenai_allennlp
|
train
|
b49bf4299c257a6211de883f0925c87e3c7ca714
|
diff --git a/lang/en/admin.php b/lang/en/admin.php
index <HASH>..<HASH> 100644
--- a/lang/en/admin.php
+++ b/lang/en/admin.php
@@ -1302,6 +1302,7 @@ $string['unbookmarkthispage'] = 'Unbookmark this page';
$string['unicoderequired'] = 'It is required that you store all your data in Unicode format (UTF-8). New installations must be performed into databases that have their default character set as Unicode. If you are upgrading, you should perform the UTF-8 migration process (see the Admin page).';
$string['uninstallplugin'] = 'Uninstall';
$string['unlockaccount'] = 'Unlock account';
+$string['unoconvwarning'] = 'The version of unoconv you have installed is not supported.';
$string['unsettheme'] = 'Unset theme';
$string['unsupported'] = 'Unsupported';
$string['unsupporteddbfileformat'] = 'Your database uses Antelope as the file format. Full UTF-8 support in MySQL and MariaDB requires the Barracuda file format. Please switch to the Barracuda file format. See the documentation <a href="https://docs.moodle.org/en/admin/environment/custom check/mysql full unicode support">MySQL full unicode support</a> for details.';
|
MDL-<I> lang: Add the unoconvwarning string back
It has been deprecated and removed by mistake.
|
moodle_moodle
|
train
|
1590fb1b4b0a213ebf24c3f96116a24d94ec11f2
|
diff --git a/files/rightSystem.js b/files/rightSystem.js
index <HASH>..<HASH> 100644
--- a/files/rightSystem.js
+++ b/files/rightSystem.js
@@ -13,11 +13,11 @@ var cache = {};
m.getPathRights = function (path, done) {
path = m.unifyPath(path);
if (cache[path]) {
- console.log('load cached');
+ //console.log('load cached');
cache[path].lastused = Date.now();
done(cache[path].right);
} else {
- console.log('load DB');
+ //console.log('load DB');
loadPathRights(path, function () {
done(cache[path].right);
});
diff --git a/files/rightsMiddleware.js b/files/rightsMiddleware.js
index <HASH>..<HASH> 100644
--- a/files/rightsMiddleware.js
+++ b/files/rightsMiddleware.js
@@ -46,7 +46,7 @@ module.exports = function (bauhausConfig) {
next();
} else {
req.cpPath = rightSystem.unifyPath(req.path);
- console.log('path', req.cpPath);
+ //console.log('path', req.cpPath);
if (isPathPrivate(req.cpPath)) {
if (req.session != null && req.session.user != null && req.session.user.id != null) {
diff --git a/package.json b/package.json
index <HASH>..<HASH> 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "bauhausjs",
- "version": "0.2.24",
+ "version": "0.2.24-1",
"description": "A modular CMS for Node.js",
"scripts": {
"test": "env NODE_ENV=test mocha page/test/ content/test/",
diff --git a/page/middleware.js b/page/middleware.js
index <HASH>..<HASH> 100644
--- a/page/middleware.js
+++ b/page/middleware.js
@@ -134,41 +134,50 @@ middleware.loadNavigation = function (req, res, next) {
}
// Returns true if user is allowed to see page
- function userHasAccess (item) {
- if (typeof item.isSecure !== 'boolean' || item.isSecure === false) {
- // page has no security settings or false, let user pass
- return true;
- } else {
- // page is secured, check if user has access
- var pageRoles = item.roles;
-
- if (Array.isArray(pageRoles) && pageRoles.length === 0) {
- // page is secured, but requires no specific role
- if (req.session.user) {
- // user is authenticated, let pass
- return true;
- } else {
- // user is not authenticated, reject
- return false
- }
- }
-
- if(req.session.user != null){
- var userRolesIds = (req.session.user.roleIds != null)? req.session.user.roleIds: [];
-
- // compare required roles and user roles
- for (var r in pageRoles) {
- var pageRoleId = pageRoles[r].toString();
- if (userRolesIds.indexOf(pageRoleId) !== -1) {
- // user has role, let pass
- return true;
- }
- }
- }
- }
- // all pass rules failed, reject user
- return false;
- }
+ function userHasAccess (item) {
+ if (typeof item.isSecure !== 'boolean' || item.isSecure === false) {
+ // page has no security settings or false, let user pass
+ return true;
+ } else {
+ // page is secured, check if user has access
+ var pageRoles = item.roles;
+ debug('pageRoles', item.title, pageRoles)
+
+ if (Array.isArray(pageRoles) && pageRoles.length === 0) {
+ // page is secured, but requires no specific role
+ if (req.session.user) {
+ // user is authenticated, let pass
+ return true;
+ } else {
+ // user is not authenticated, reject
+ return false
+ }
+ }
+
+ if (!req.session.user || !req.session.user.roleIds) {
+ // user session does not exist, skip
+ return false;
+ }
+
+ // make sure roleIds are strings (on first request they are objects)
+ var userRoles = [];
+ var sessionRoleIds = req.session.user.roleIds;
+ for (var r in sessionRoleIds) {
+ userRoles.push(sessionRoleIds[r].toString());
+ }
+
+ // compare required roles and user roles
+ for (var r in pageRoles) {
+ var pageRoleId = pageRoles[r].toString();
+ if (userRoles.indexOf(pageRoleId) !== -1) {
+ // user has role, let pass
+ return true;
+ }
+ }
+ }
+ // all pass rules failed, reject user
+ return false;
+ }
Page.findOne(query, function (err, doc) {
doc.getTree({
|
Bugfix: Secure menu by user group only visible after reload: fixed.
|
bauhausjs_bauhausjs
|
train
|
bf96d536d9c7c00705b04d1824f29f0d128f3f31
|
diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go
index <HASH>..<HASH> 100644
--- a/agent/xds/listeners.go
+++ b/agent/xds/listeners.go
@@ -242,9 +242,9 @@ func (s *Server) listenersFromSnapshotConnectProxy(cInfo connectionInfo, cfgSnap
resources = append(resources, outboundListener)
}
+ // Looping over explicit upstreams is only needed for prepared queries because they do not have discovery chains
for id, u := range cfgSnap.ConnectProxy.UpstreamConfig {
- if _, ok := cfgSnap.ConnectProxy.DiscoveryChain[id]; ok && u.DestinationType != structs.UpstreamDestTypePreparedQuery {
- // This upstream is already covered above
+ if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
continue
}
@@ -258,10 +258,7 @@ func (s *Server) listenersFromSnapshotConnectProxy(cInfo connectionInfo, cfgSnap
if u.LocalBindAddress != "" {
address = u.LocalBindAddress
}
- // This is the case where upstream config is centralized but no port was specified
- if u.LocalBindPort == 0 {
- continue
- }
+
upstreamListener := makeListener(id, address, u.LocalBindPort, envoy_core_v3.TrafficDirection_OUTBOUND)
filterChain, err := s.makeUpstreamFilterChainForDiscoveryChain(
|
Upstreams loop is only for prepared queries and they are not CentrallyConfigured
|
hashicorp_consul
|
train
|
f1d44c64ff9aa044d6ca43cee4993ffb6aefe7f2
|
diff --git a/undocker.py b/undocker.py
index <HASH>..<HASH> 100644
--- a/undocker.py
+++ b/undocker.py
@@ -16,12 +16,13 @@ LOG = logging.getLogger(__name__)
def parse_args():
p = argparse.ArgumentParser()
- p.add_argument('--tag', '-t',
- default='latest')
+
p.add_argument('--ignore-errors', '-i',
- action='store_true')
+ action='store_true',
+ help='Ignore OS errors when extracting files')
p.add_argument('--output', '-o',
- default='.')
+ default='.',
+ help='Output directory (defaults to ".")')
p.add_argument('--verbose', '-v',
action='store_const',
const=logging.INFO,
@@ -30,6 +31,14 @@ def parse_args():
action='store_const',
const=logging.DEBUG,
dest='loglevel')
+ p.add_argument('--list', '--ls',
+ action='store_true',
+ help='List layers in an image')
+ p.add_argument('--layer', '-l',
+ action='append',
+ help='Extract only the specified layer')
+ p.add_argument('image')
+
p.set_defaults(level=logging.WARN)
return p.parse_args()
@@ -49,10 +58,16 @@ def find_layers(img, id):
for layer in find_layers(img, pid):
yield layer
+
def main():
args = parse_args()
logging.basicConfig(level=args.loglevel)
+ try:
+ name, tag = args.image.split(':', 1)
+ except ValueError:
+ name, tag = args.image, 'latest'
+
with tempfile.NamedTemporaryFile() as fd:
fd.write(sys.stdin.read())
fd.seek(0)
@@ -60,22 +75,40 @@ def main():
repos = img.extractfile('repositories')
repos = json.load(repos)
- top = repos[repos.keys()[0]][args.tag]
- LOG.info('extracting image %s', top)
+ try:
+ top = repos[name][tag]
+ except KeyError:
+ LOG.error('failed to find image %s with tag %s',
+ name,
+ tag)
+ sys.exit(1)
+
+ LOG.info('extracting image %s (%s)', name, top)
layers = list(find_layers(img, top))
+ if args.list:
+ print '\n'.join(reversed(layers))
+ sys.exit(0)
+
if not os.path.isdir(args.output):
os.mkdir(args.output)
for id in reversed(layers):
+ if args.layer and not id in args.layer:
+ continue
+
LOG.info('extracting layer %s', id)
with tarfile.TarFile(
fileobj=img.extractfile('%s/layer.tar' % id)) as layer:
try:
layer.extractall(path=args.output)
- except OSError:
- if not args.ignore_errors:
- raise
+ except OSError as exc:
+ if args.ignore_errors:
+ LOG.info('ignoring error: %s',
+ exc)
+ continue
+
+ raise
if __name__ == '__main__':
|
correctly handle image archives
a docker "image" produced by "docker save" can actually contain multiple
images. We now handle that correctly, and also have facilities for
listing image layers and extracting specific layers rather than an
entire image.
|
larsks_undocker
|
train
|
a07ba4020ce4c7123e7126f9d82e43a6ecfcc64f
|
diff --git a/PHPCI/Controller/BitbucketController.php b/PHPCI/Controller/BitbucketController.php
index <HASH>..<HASH> 100644
--- a/PHPCI/Controller/BitbucketController.php
+++ b/PHPCI/Controller/BitbucketController.php
@@ -15,34 +15,36 @@ class BitbucketController extends b8\Controller
public function webhook($project)
{
- $payload = json_decode($this->getParam('payload'), true);
+ $payload = json_decode(file_get_contents('php://input'), true);
+ $branches = array();
+ $commits = array();
- try
+ foreach($payload['commits'] as $commit)
{
- $build = new Build();
- $build->setProjectId($project);
- $build->setCommitId($payload['after']);
- $build->setStatus(0);
- $build->setLog('');
- $build->setCreated(new \DateTime());
- $build->setBranch(str_replace('refs/heads/', '', $payload['ref']));
- }
- catch(\Exception $ex)
- {
- header('HTTP/1.1 400 Bad Request');
- header('Ex: ' . $ex->getMessage());
- die('FAIL');
+ if(!in_array($commit['branch'], $branches))
+ {
+ $branches[] = $commit['branch'];
+ $commits[$commit['branch']] = $commit['raw_node'];
+ }
}
- try
+ foreach($branches as $branch)
{
- $this->_buildStore->save($build);
- }
- catch(\Exception $ex)
- {
- header('HTTP/1.1 500 Internal Server Error');
- header('Ex: ' . $ex->getMessage());
- die('FAIL');
+ try
+ {
+
+ $build = new Build();
+ $build->setProjectId($project);
+ $build->setCommitId($commits[$branch]);
+ $build->setStatus(0);
+ $build->setLog('');
+ $build->setCreated(new \DateTime());
+ $build->setBranch($branch);
+ $this->_buildStore->save($build);
+ }
+ catch(\Exception $ex)
+ {
+ }
}
die('OK');
|
Adding proper Bitbucket support
|
dancryer_PHPCI
|
train
|
46640bbc4df85d38b99d896fe5d98012e5ac81cf
|
diff --git a/abilian/services/indexing/service.py b/abilian/services/indexing/service.py
index <HASH>..<HASH> 100644
--- a/abilian/services/indexing/service.py
+++ b/abilian/services/indexing/service.py
@@ -509,7 +509,8 @@ def index_update(index, items):
setattr(session, '_model_changes', {})
updated = set()
- with AsyncWriter(index) as writer:
+ writer = AsyncWriter(index)
+ try:
for op, cls_name, pk, data in items:
if pk is None:
continue
@@ -538,55 +539,22 @@ def index_update(index, items):
# deleted after task queued, but before task run
continue
- # # Hack: Load lazy fields
- # # This prevents a transaction error in get_document
- # # FIXME: really required?
- # for key in indexed_fields:
- # getattr(obj, key, None)
-
document = service.get_document(obj, adapter)
- try:
- writer.add_document(**document)
- except ValueError as exc:
- if not current_app.testing:
- raise
-
- # added to find a flipping failure in testcases during CI
- import sys
- Exc_cls, exc_instance, tb = sys.exc_info()
-
- while(tb.tb_next): # go to first tb
- tb = tb.tb_next
-
- # find if this is the case we want to debug
- frame = tb.tb_frame
- if (frame.f_globals.get('__name__', None) == 'whoosh.fields'
- and frame.f_code.co_name == 'index'):
- frame = frame.f_back
-
- if (frame.f_globals.get('__name__', None) == 'whoosh.writing'
- and frame.f_code.co_name == 'add_document'):
- fieldname = frame.f_locals['fieldname']
- logger.error('Field name: %s', fieldname)
-
- doc_kw = {}
- for k, v in document.items():
- if isinstance(v, basestring) and len(v) > 200:
- ellipsis = '...'
- if isinstance(v, unicode):
- ellipsis = u'...'
- v = v[:197] + ellipsis
- doc_kw[k] = v
- logger.error(
- 'Exception: %s: %s\n\nadd_document failed object_key=%s\n\n**document=%s\n\n',
- exc.__class__.__name__, exc.message,
- repr(object_key),
- pprint.pformat(doc_kw, indent=2))
- raise
-
+ writer.add_document(**document)
updated.add(object_key)
+ except:
+ writer.cancel()
+ raise
session.close()
+ writer.commit()
+ try:
+ # async thread: wait for its termination
+ writer.join()
+ except RuntimeError:
+ # happens when actual writer was alraedy available: asyncwriter needn't to
+ # start a thread
+ pass
class TestingStorage(RamStorage):
|
index_update task: wait for asyncwriter thread
we shouldn't exit task before writer has actually written
content. We mostly use Asyncwriter to not worry about obtaining a writer.
|
abilian_abilian-core
|
train
|
91fb9c56adadc73bb5d213d4823c0d4ec5b9c28d
|
diff --git a/test/resolver-test.js b/test/resolver-test.js
index <HASH>..<HASH> 100644
--- a/test/resolver-test.js
+++ b/test/resolver-test.js
@@ -81,22 +81,4 @@ module.exports = {
});
},
- lookup: function (test) {
- this.resolver.lookup('www.google.com', function (err, ip, family) {
- test.ifError(err);
- test.notStrictEqual(ip, null, err);
- test.ok(net.isIP(ip), "Invalid IP address.");
- if (family === 4) {
- test.strictEqual(family, 4);
- test.ok(net.isIPv4(ip), "Invalid IP address.");
- } else if (family === 6) {
- test.strictEqual(family, 6);
- test.ok(net.isIPv6(ip), "Invalid IP address.");
- } else {
- test.ok(false, "Invalid family found.");
- }
- test.done();
- });
- },
-
};
|
Removed test reolver.lookup method.
|
royalpinto_node-cares
|
train
|
1e4ee483b4a6f717730968a2f2711e8197446bf9
|
diff --git a/Gemfile.lock b/Gemfile.lock
index <HASH>..<HASH> 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -110,9 +110,9 @@ PLATFORMS
ruby
DEPENDENCIES
- rails (~> 5.1.3)
+ rails (~> 5.1, >= 5.1.3)
sendable_rails!
- sqlite3
+ sqlite3 (~> 1.3.13)
BUNDLED WITH
1.14.6
diff --git a/lib/sendable_rails/action_mailer_ext.rb b/lib/sendable_rails/action_mailer_ext.rb
index <HASH>..<HASH> 100644
--- a/lib/sendable_rails/action_mailer_ext.rb
+++ b/lib/sendable_rails/action_mailer_ext.rb
@@ -1,5 +1,6 @@
require 'net/http'
require 'uri'
+require 'json'
module SendableRails
module ActionMailerExt
@@ -32,11 +33,19 @@ module SendableRails
request.set_form_data(params)
response = http.request(request)
- content = response.body
+ email = JSON.parse(response.body)
- [{
- content: content
- }]
+ formats = []
+
+ if (email['html'])
+ formats << { content_type: 'text/html', body: email['html'] }
+ end
+
+ if (email['plain'])
+ formats << { content_type: 'text/plain', body: email['text'] }
+ end
+
+ formats
else
super
end
diff --git a/lib/sendable_rails/railtie.rb b/lib/sendable_rails/railtie.rb
index <HASH>..<HASH> 100644
--- a/lib/sendable_rails/railtie.rb
+++ b/lib/sendable_rails/railtie.rb
@@ -4,8 +4,13 @@ module SendableRails
initializer "sendable.configure" do |app|
SendableRails.config do |config|
- config.project_id = app.config.sendable[:project_id]
- config.api_key = app.config.sendable[:api_key]
+ if app.config.sendable[:project_id]
+ config.project_id = app.config.sendable[:project_id]
+ end
+
+ if app.config.sendable[:api_key]
+ config.api_key = app.config.sendable[:api_key]
+ end
end
end
end
diff --git a/lib/sendable_rails/version.rb b/lib/sendable_rails/version.rb
index <HASH>..<HASH> 100644
--- a/lib/sendable_rails/version.rb
+++ b/lib/sendable_rails/version.rb
@@ -1,3 +1,3 @@
module SendableRails
- VERSION = '0.1.0'
+ VERSION = '0.2.0'
end
diff --git a/sendable_rails.gemspec b/sendable_rails.gemspec
index <HASH>..<HASH> 100644
--- a/sendable_rails.gemspec
+++ b/sendable_rails.gemspec
@@ -17,5 +17,5 @@ Gem::Specification.new do |s|
s.files = Dir["{app,config,db,lib}/**/*", "MIT-LICENSE", "Rakefile", "README.md"]
s.add_development_dependency 'rails', '~> 5.1', '>= 5.1.3'
- s.add_development_dependency 'sqlite3', '~> 0'
+ s.add_development_dependency 'sqlite3', '~> 1.3.13'
end
|
Added support for html and text email parts.
|
sendable_sendable_rails
|
train
|
3c7b88d3b546478cd840dd1b547144d23230ebfc
|
diff --git a/runtime/undertow/src/main/java/org/wildfly/swarm/runtime/undertow/UndertowConfiguration.java b/runtime/undertow/src/main/java/org/wildfly/swarm/runtime/undertow/UndertowConfiguration.java
index <HASH>..<HASH> 100644
--- a/runtime/undertow/src/main/java/org/wildfly/swarm/runtime/undertow/UndertowConfiguration.java
+++ b/runtime/undertow/src/main/java/org/wildfly/swarm/runtime/undertow/UndertowConfiguration.java
@@ -79,6 +79,18 @@ public class UndertowConfiguration extends AbstractServerConfiguration<UndertowF
node.get(OP).set(ADD);
list.add(node);
+ node = new ModelNode();
+ node.get(OP_ADDR).set(address.append("servlet-container", "default").append( "setting", "websockets" ).toModelNode());
+ node.get(OP).set(ADD);
+ list.add(node);
+
+ node = new ModelNode();
+ node.get(OP_ADDR).set(address.append("servlet-container", "default").append( "setting", "jsp" ).toModelNode());
+ node.get(OP).set(ADD);
+ list.add(node);
+
+
+
return list;
|
Enable websockets and jsp explicitly.
|
thorntail_thorntail
|
train
|
96e26d3eef1352f143cfa1a21ed225a563e42eed
|
diff --git a/javascript/firefox-driver/js/dispatcher.js b/javascript/firefox-driver/js/dispatcher.js
index <HASH>..<HASH> 100644
--- a/javascript/firefox-driver/js/dispatcher.js
+++ b/javascript/firefox-driver/js/dispatcher.js
@@ -174,8 +174,6 @@ Dispatcher.prototype.init_ = function() {
on(Request.Method.GET, Dispatcher.executeAs('getCurrentUrl')).
on(Request.Method.POST, Dispatcher.executeAs('get'));
- this.bind_('/session/:sessionId/alert').
- on(Request.Method.GET, Dispatcher.executeAs('getAlert'));
this.bind_('/session/:sessionId/accept_alert').
on(Request.Method.POST, Dispatcher.executeAs('acceptAlert'));
this.bind_('/session/:sessionId/dismiss_alert').
@@ -184,9 +182,6 @@ Dispatcher.prototype.init_ = function() {
on(Request.Method.GET, Dispatcher.executeAs('getAlertText')).
on(Request.Method.POST, Dispatcher.executeAs('setAlertValue'));
- this.bind_('/session/:sessionId/alert_text').
- on(Request.Method.GET, Dispatcher.executeAs('getAlertText'));
-
this.bind_('/session/:sessionId/forward').
on(Request.Method.POST, Dispatcher.executeAs('goForward'));
this.bind_('/session/:sessionId/back').
|
Removing outdated binding for /session/:sessionId/alert and duplicated binding for /session/:sessionId/alert_text
|
SeleniumHQ_selenium
|
train
|
7ab13308b3169dc00aae3678261b08d01bfb1935
|
diff --git a/sigal/image.py b/sigal/image.py
index <HASH>..<HASH> 100644
--- a/sigal/image.py
+++ b/sigal/image.py
@@ -118,7 +118,13 @@ def generate_image(source, outname, settings, options=None):
logger.error('Wrong processor name: %s', settings['img_processor'])
sys.exit()
- processor = processor_cls(*settings['img_size'], upscale=False)
+ width, height = settings['img_size']
+
+ if img.size[0] < img.size[1]:
+ # swap target size if image is in portrait mode
+ height, width = width, height
+
+ processor = processor_cls(width, height, upscale=False)
img = processor.process(img)
# signal.send() does not work here as plugins can modify the image, so we
|
Resize portrait images to same size as landscape
With the current approach images are resized along the width, which means images
in portrait mode are substantially smaller than landscape images. This change
resizes portrait images along the height so that they will cover the exact same
area.
|
saimn_sigal
|
train
|
ea6744d0aa5b45abb271885a43f7dd2a15c8449f
|
diff --git a/lib/procodile/cli.rb b/lib/procodile/cli.rb
index <HASH>..<HASH> 100644
--- a/lib/procodile/cli.rb
+++ b/lib/procodile/cli.rb
@@ -104,6 +104,10 @@ module Procodile
cli.options[:proxy] = true
end
+ opts.on("--allocate-ports", "Allow free port numbers to all processes") do
+ cli.options[:allocate_ports] = true
+ end
+
opts.on("-d", "--dev", "Run in development mode") do
cli.options[:development] = true
cli.options[:respawn] = false
@@ -454,6 +458,7 @@ module Procodile
run_options[:stop_when_none] = options[:stop_when_none]
run_options[:proxy] = options[:proxy]
run_options[:force_single_log] = options[:foreground]
+ run_options[:allocate_ports] = options[:allocate_ports]
if options[:clean]
FileUtils.rm_rf(Dir[File.join(config.pid_root, '*')])
diff --git a/lib/procodile/instance.rb b/lib/procodile/instance.rb
index <HASH>..<HASH> 100644
--- a/lib/procodile/instance.rb
+++ b/lib/procodile/instance.rb
@@ -101,7 +101,7 @@ module Procodile
nil
else
- if @process.proxy? && @supervisor.tcp_proxy
+ if @supervisor.run_options[:allocate_ports] || (@process.proxy? && @supervisor.tcp_proxy)
allocate_port
end
@@ -317,20 +317,42 @@ module Procodile
# Find a port number for this instance to listen on. We just check that nothing is already listening on it.
# The process is expected to take it straight away if it wants it.
#
- def allocate_port
+ def allocate_port(max_attempts = 10)
+ attempts = 0
until @port
+ attempts += 1
possible_port = rand(10000) + 20000
- begin
- server = TCPServer.new('127.0.0.1', possible_port)
- server.close
+ if self.port_available?(possible_port)
+ Procodile.log(@process.log_color, description, "Allocated port as #{possible_port}")
return @port = possible_port
- rescue
- # Nah.
+ elsif attempts >= max_attempts
+ raise Procodile::Error, "Couldn't allocate port for #{instance.name}"
end
end
end
#
+ # Is the given port available?
+ #
+ def port_available?(port)
+ case @process.network_protocol
+ when 'tcp'
+ server = TCPServer.new('127.0.0.1', port)
+ server.close
+ true
+ when 'udp'
+ server = UDPSocket.new
+ server.bind('127.0.0.1', port)
+ server.close
+ true
+ else
+ raise Procodile::Error, "Invalid network_protocol '#{@process.network_protocol}'"
+ end
+ rescue Errno::EADDRINUSE => e
+ false
+ end
+
+ #
# If procodile is executed through rbenv it will pollute our environment which means that
# any spawned processes will be invoked with procodile's ruby rather than the ruby that
# the application wishes to use
diff --git a/lib/procodile/process.rb b/lib/procodile/process.rb
index <HASH>..<HASH> 100644
--- a/lib/procodile/process.rb
+++ b/lib/procodile/process.rb
@@ -132,6 +132,12 @@ module Procodile
proxy? ? @options['proxy_address'] || '127.0.0.1' : nil
end
+ #
+ # Return the network protocol for this process
+ #
+ def network_protocol
+ @options['network_protocol'] || 'tcp'
+ end
#
# Generate an array of new instances for this process (based on its quantity)
diff --git a/lib/procodile/status_cli_output.rb b/lib/procodile/status_cli_output.rb
index <HASH>..<HASH> 100644
--- a/lib/procodile/status_cli_output.rb
+++ b/lib/procodile/status_cli_output.rb
@@ -59,10 +59,10 @@ module Procodile
print "|| => ".color(process['log_color']) + instance['description'].to_s.ljust(17, ' ').color(process['log_color'])
print instance['status'].ljust(10, ' ')
print " " + formatted_timestamp(instance['started_at']).ljust(10, ' ')
- print " " + instance['pid'].to_s.ljust(6, ' ')
- print " " + instance['respawns'].to_s.ljust(4, ' ')
- print " " + (instance['port'] || "-").to_s.ljust(6, ' ')
- print " " + (instance['tag'] || "-").to_s
+ print " pid:" + instance['pid'].to_s.ljust(6, ' ')
+ print " respawns:" + instance['respawns'].to_s.ljust(4, ' ')
+ print " port:" + (instance['port'] || "-").to_s.ljust(6, ' ')
+ print " tag:" + (instance['tag'] || "-").to_s
puts
end
end
|
support for allocating udp ports plus adds option to force allocation of ports even when not using built-in proxy
|
adamcooke_procodile
|
train
|
34ebb11f65a8d504198d241fa9876ea84591572b
|
diff --git a/src/main/java/com/semanticcms/file/servlet/impl/FileImpl.java b/src/main/java/com/semanticcms/file/servlet/impl/FileImpl.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/semanticcms/file/servlet/impl/FileImpl.java
+++ b/src/main/java/com/semanticcms/file/servlet/impl/FileImpl.java
@@ -33,6 +33,7 @@ import com.aoindustries.util.StringUtility;
import com.semanticcms.core.model.NodeBodyWriter;
import com.semanticcms.core.model.PageRef;
import com.semanticcms.core.servlet.Headers;
+import com.semanticcms.core.servlet.PageIndex;
import com.semanticcms.core.servlet.PageRefResolver;
import com.semanticcms.core.servlet.ServletElementContext;
import com.semanticcms.core.servlet.impl.LinkImpl;
@@ -132,7 +133,10 @@ final public class FileImpl {
final boolean isAllowed = isAllowed(servletContext, request);
final boolean isExporting = Headers.isExporting(request);
- out.write("<a");
+ String refId = PageIndex.getRefIdInPage(servletContext, request, element.getPage(), element.getId());
+ out.write("<a id=\"");
+ encodeTextInXhtmlAttribute(refId, out);
+ out.append('"');
if(!hasBody) {
// TODO: Class like p:link, where providing empty class disables automatic class selection here
out.write(" class=\"");
|
Added missing id attribute on files.
|
aoindustries_semanticcms-file-servlet
|
train
|
3a0794cd8e7dabecb0d39c860305a6dccd2b6a87
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -49,7 +49,7 @@ function extract(opts, cb) {
if (!res.images) {
res.images = new Set();
}
- res.images.add(url.resolve(host, src));
+ res.images.add(url.resolve(opts.uri, src));
}
}
},
|
Use original uri for relative resources.
|
velocityzen_meta-extractor
|
train
|
c619a15dbd71933dc11b57b578b29ed96acd2510
|
diff --git a/server/src/test/java/org/cloudfoundry/identity/uaa/performance/TestMySQLEmailSearch.java b/server/src/test/java/org/cloudfoundry/identity/uaa/performance/TestMySQLEmailSearch.java
index <HASH>..<HASH> 100644
--- a/server/src/test/java/org/cloudfoundry/identity/uaa/performance/TestMySQLEmailSearch.java
+++ b/server/src/test/java/org/cloudfoundry/identity/uaa/performance/TestMySQLEmailSearch.java
@@ -20,6 +20,7 @@ import org.cloudfoundry.identity.uaa.scim.endpoints.ScimUserEndpoints;
import org.cloudfoundry.identity.uaa.scim.jdbc.JdbcScimUserProvisioning;
import org.cloudfoundry.identity.uaa.scim.jdbc.ScimSearchQueryConverter;
import org.cloudfoundry.identity.uaa.test.JdbcTestBase;
+import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -98,9 +99,13 @@ public class TestMySQLEmailSearch extends JdbcTestBase {
}
@Override
+ @After
public void tearDown() throws Exception {
if (HSQLDB_DEFAULT.equals(profile) || (!success)) {
- super.tearDown();
+ tearDown(true);
+ } else {
+ //leaves data in place
+ tearDown(false);
}
}
diff --git a/server/src/test/java/org/cloudfoundry/identity/uaa/test/JdbcTestBase.java b/server/src/test/java/org/cloudfoundry/identity/uaa/test/JdbcTestBase.java
index <HASH>..<HASH> 100644
--- a/server/src/test/java/org/cloudfoundry/identity/uaa/test/JdbcTestBase.java
+++ b/server/src/test/java/org/cloudfoundry/identity/uaa/test/JdbcTestBase.java
@@ -46,6 +46,7 @@ public class JdbcTestBase extends TestClassNullifier {
@Before
public void setUp() throws Exception {
+ IdentityZoneHolder.clear();
MockEnvironment environment = new MockEnvironment();
if (System.getProperty("spring.profiles.active")!=null) {
environment.setActiveProfiles(StringUtils.commaDelimitedListToStringArray(System.getProperty("spring.profiles.active")));
@@ -102,7 +103,14 @@ public class JdbcTestBase extends TestClassNullifier {
@After
public void tearDown() throws Exception {
- cleanData();
+ tearDown(true);
+ }
+
+ public final void tearDown(boolean cleandata) throws Exception {
+ if (cleandata) {
+ cleanData();
+ }
+ IdentityZoneHolder.clear();
((org.apache.tomcat.jdbc.pool.DataSource)dataSource).close(true);
webApplicationContext.destroy();
}
diff --git a/server/src/test/java/org/cloudfoundry/identity/uaa/zone/MultitenantJdbcClientDetailsServiceTests.java b/server/src/test/java/org/cloudfoundry/identity/uaa/zone/MultitenantJdbcClientDetailsServiceTests.java
index <HASH>..<HASH> 100644
--- a/server/src/test/java/org/cloudfoundry/identity/uaa/zone/MultitenantJdbcClientDetailsServiceTests.java
+++ b/server/src/test/java/org/cloudfoundry/identity/uaa/zone/MultitenantJdbcClientDetailsServiceTests.java
@@ -10,7 +10,6 @@ import org.cloudfoundry.identity.uaa.test.JdbcTestBase;
import org.cloudfoundry.identity.uaa.test.UaaTestAccounts;
import org.cloudfoundry.identity.uaa.util.JsonUtils;
import org.hamcrest.Matchers;
-import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -105,11 +104,6 @@ public class MultitenantJdbcClientDetailsServiceTests extends JdbcTestBase {
}
- @After
- public void tearDown() throws Exception {
- IdentityZoneHolder.clear();
- }
-
protected void addApproval(String clientId) {
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
String zoneId = IdentityZoneHolder.get().getId();
|
Fix incorrect override of the @After tearDown method
|
cloudfoundry_uaa
|
train
|
6043c9329798cd12159aa3b019797d6ecf04c5c3
|
diff --git a/lib/controllers/actions/jwt.js b/lib/controllers/actions/jwt.js
index <HASH>..<HASH> 100644
--- a/lib/controllers/actions/jwt.js
+++ b/lib/controllers/actions/jwt.js
@@ -13,9 +13,9 @@ module.exports = function(req, res){
if(!req.session.authenticated){
return res.forbidden('You are not authorized.');
}
-
+
var jwtData = waterlock._utils.createJwt(req, res);
-
+
Jwt.create({token: jwtData.token, uses: 0, owner: req.session.user.id}).exec(function(err){
if(err){
return res.serverError('JSON web token could not be created');
diff --git a/test/unit/controllers/actions/jwt.test.js b/test/unit/controllers/actions/jwt.test.js
index <HASH>..<HASH> 100644
--- a/test/unit/controllers/actions/jwt.test.js
+++ b/test/unit/controllers/actions/jwt.test.js
@@ -4,6 +4,7 @@ var proxyquire = require('proxyquire');
var should = require('should');
var mocha = require('mocha');
var config = require('../../../fixtures/waterlock.config').waterlock;
+var createJwt = require('../../../../lib/utils').createJwt;
describe('actions', function(){
describe('jwt', function(){
@@ -49,7 +50,12 @@ describe('actions', function(){
};
}
};
- global.waterlock = {config: config};
+ global.waterlock = {
+ config: config,
+ _utils: {
+ createJwt: createJwt
+ }
+ };
jwt.apply(this, [req, res]);
});
@@ -77,7 +83,12 @@ describe('actions', function(){
};
}
};
- global.waterlock = {config: config};
+ global.waterlock = {
+ config: config,
+ _utils: {
+ createJwt: createJwt
+ }
+ };
jwt.apply(this, [req, res]);
});
diff --git a/test/unit/utils.test.js b/test/unit/utils.test.js
index <HASH>..<HASH> 100644
--- a/test/unit/utils.test.js
+++ b/test/unit/utils.test.js
@@ -4,6 +4,7 @@ var proxyquire = require('proxyquire');
var should = require('should');
var mocha = require('mocha');
var utils = require('../../lib/utils');
+var config = require('../fixtures/waterlock.config').waterlock;
describe('utils', function(){
describe('#allParams()', function(){
@@ -74,4 +75,31 @@ describe('utils', function(){
done();
});
});
+
+ describe('#createJwt()', function(){
+ it('should create Jwt token including expires and token property', function(done){
+ var req = {
+ session:{
+ authenticated: true,
+ user:{
+ id: 1
+ }
+ }
+ };
+ var res = {
+ json:function(obj){
+ obj.should.be.type('object');
+ obj.should.have.property('token');
+ obj.should.have.property('expires');
+ obj.token.should.be.type('string');
+ done();
+ }
+ };
+ global.waterlock = { config: config };
+ var result = utils.createJwt(req, res);
+ result.should.have.property('expires');
+ result.should.have.property('token');
+ done();
+ });
+ });
});
\ No newline at end of file
|
Fixed existing test and added new test for createJwt util function.
|
waterlock_waterlock
|
train
|
1e3cff9b61c7e43bbdccc62662836bc7c95f4578
|
diff --git a/cultusrego.php b/cultusrego.php
index <HASH>..<HASH> 100644
--- a/cultusrego.php
+++ b/cultusrego.php
@@ -194,7 +194,7 @@ class cultusrego {
private function parse_element_value($element_label, $match) {
if (!preg_match_all("#\@$element_label (.*?)(\n|$)#s", $match, $detail_match)) {
preg_match_all("#\@$element_label\n +\* (.*?)(\n +\*\n|$)#s", $match, $detail_match);
- $detail_match[1] = str_replace(' * ', '', $detail_match[1]);
+ $detail_match[1] = preg_replace('#( *)\* #', '', $detail_match[1]);
}
return implode("\n\n", $detail_match[1]);
}
|
fixed parsing of indented comment blocks, fixes #<I>
|
maoberlehner_cultusrego
|
train
|
819e36f4d22e5546529fd9be096d6f65089492d3
|
diff --git a/scripts/instance.php b/scripts/instance.php
index <HASH>..<HASH> 100644
--- a/scripts/instance.php
+++ b/scripts/instance.php
@@ -1,15 +1,16 @@
<?php
namespace Aura\Filter;
require_once dirname(__DIR__) . '/src.php';
-
-$ruleRegistry = require __DIR__ . '/registry.php';
-$ruleRegistry['any'] = function() use ($ruleRegistry) {
- $rule = new \Aura\Filter\Rule\Any;
- $rule->setRuleLocator(new \Aura\Filter\RuleLocator($ruleRegistry));
- return $rule;
-};
-
return new RuleCollection(
- new RuleLocator($ruleRegistry),
+ new RuleLocator(array_merge(
+ require __DIR__ . '/registry.php',
+ ['any' => function () {
+ $rule = new \Aura\Filter\Rule\Any;
+ $rule->setRuleLocator(new \Aura\Filter\RuleLocator(
+ require __DIR__ . '/registry.php'
+ ));
+ return $rule;
+ }]
+ )),
new Translator(require dirname(__DIR__) . '/intl/en_US.php')
-);
\ No newline at end of file
+);
diff --git a/tests/example.php b/tests/example.php
index <HASH>..<HASH> 100644
--- a/tests/example.php
+++ b/tests/example.php
@@ -1,27 +1,25 @@
<?php
-use Aura\Filter\Value;
-
-$filter_chain = require_once dirname(__DIR__). '/scripts/instance.php';
+$filter = require_once dirname(__DIR__). '/scripts/instance.php';
// set up the filter chain.
-// $filter_chain->addHardRule($field, $method, $name, $param1, $param2, $paramN);
+// $filter->addHardRule($field, $method, $name, $param1, $param2, $paramN);
-$filter_chain->addHardRule('username', Filter::IS, 'alnum');
-$filter_chain->addHardRule('username', Filter::IS, 'strlenBetween', 6, 12);
-$filter_chain->addHardRule('username', Filter::FIX, 'alnum');
+$filter->addHardRule('username', $filter::IS, 'alnum');
+$filter->addHardRule('username', $filter::IS, 'strlenBetween', 6, 12);
+$filter->addHardRule('username', $filter::FIX, 'alnum');
-$filter_chain->addHardRule('birthday', Filter::IS, 'dateTime');
-$filter_chain->addHardRule('birthday', Filter::FIX, 'dateTime', 'Y-m-d');
-$filter_chain->addHardRule('birthday', Filter::IS, 'min', '1970-08-08'); // at least 42 on Aug 8
+$filter->addHardRule('birthday', $filter::IS, 'dateTime');
+$filter->addHardRule('birthday', $filter::FIX, 'dateTime', 'Y-m-d');
+$filter->addHardRule('birthday', $filter::IS, 'min', '1970-08-08'); // at least 42 on Aug 8
-$filter_chain->addHardRule('nickname', Filter::IS_BLANK_OR, 'string');
-$filter_chain->addHardRule('nickname', Filter::FIX_BLANK_OR, 'string');
+$filter->addHardRule('nickname', $filter::IS_BLANK_OR, 'string');
+$filter->addHardRule('nickname', $filter::FIX_BLANK_OR, 'string');
-$filter_chain->addHardRule('accept_terms', Filter::IS, 'bool', true);
-$filter_chain->addHardRule('accept_terms', Filter::FIX, 'bool');
+$filter->addHardRule('accept_terms', $filter::IS, 'bool', true);
+$filter->addHardRule('accept_terms', $filter::FIX, 'bool');
-$filter_chain->addHardRule('password_plaintext', Filter::IS, 'strlenMin', 6);
-$filter_chain->addHardRule('password_confirmed', Filter::IS, 'equalToField', 'password_plaintext');
+$filter->addHardRule('password_plaintext', $filter::IS, 'strlenMin', 6);
+$filter->addHardRule('password_confirmed', $filter::IS, 'equalToField', 'password_plaintext');
$data = (object) [
'username' => 'username',
@@ -30,13 +28,13 @@ $data = (object) [
'something' => 'Hello World',
'accept_terms' => true,
'password_plaintext' => 'passwd',
- 'password_confirmed' => 'passwd'
+ 'password_confirmed' => 'passed'
];
// execute the chain on a data object or array
-$success = $filter_chain->values($data);
+$success = $filter->values($data);
if (! $success) {
// an array of failure messages, with info about the failures
- $failure = $filter_chain->getMessages();
+ $failure = $filter->getMessages();
var_export($failure);
}
|
remove variables from the instance script so as not to pollute the global namespace
|
auraphp_Aura.Filter
|
train
|
99967ad41744b4b36dc9fb8e1ea70645319b688b
|
diff --git a/indra/belief/__init__.py b/indra/belief/__init__.py
index <HASH>..<HASH> 100644
--- a/indra/belief/__init__.py
+++ b/indra/belief/__init__.py
@@ -104,6 +104,9 @@ class SimpleScorer(BeliefScorer):
self.update_probs(prior_probs, subtype_probs)
def update_probs(self, prior_probs=None, subtype_probs=None):
+ print(self.prior_probs)
+ print(self.subtype_probs)
+ print('--------------')
if prior_probs:
for key in ('rand', 'syst'):
self.prior_probs[key].update(prior_probs.get(key, {}))
@@ -111,6 +114,8 @@ class SimpleScorer(BeliefScorer):
logger.debug("Prior probabilities for %s errors: %s"
% (err_type, source_dict))
self.subtype_probs = subtype_probs
+ print(self.prior_probs)
+ print(self.subtype_probs)
def score_evidence_list(self, evidences):
def _score(evidences):
@@ -198,6 +203,7 @@ class SimpleScorer(BeliefScorer):
List of statements to check
"""
sources = set()
+ print(statements)
for stmt in statements:
sources |= set([ev.source_api for ev in stmt.evidence])
for err_type in ('rand', 'syst'):
diff --git a/indra/belief/wm_scorer.py b/indra/belief/wm_scorer.py
index <HASH>..<HASH> 100644
--- a/indra/belief/wm_scorer.py
+++ b/indra/belief/wm_scorer.py
@@ -36,8 +36,6 @@ def get_eidos_counts():
{k: 1.0-min(v, 0.95)-syst_error for k, v
in zip(table['RULE'], table['% correct'])}}
scorer = BayesianScorer(prior_counts={}, subtype_counts=prior_counts)
- print(scorer.prior_probs)
- print(scorer.subtype_probs)
return scorer
diff --git a/indra/tools/live_curation.py b/indra/tools/live_curation.py
index <HASH>..<HASH> 100644
--- a/indra/tools/live_curation.py
+++ b/indra/tools/live_curation.py
@@ -1,6 +1,6 @@
import sys
import pickle
-from flask import Flask, request, abort, Response
+from flask import Flask, request, jsonify
from indra.belief import wm_scorer, BeliefEngine
scorer = wm_scorer.get_eidos_counts()
@@ -22,7 +22,7 @@ def update_beliefs():
prior_counts = {}
subtype_counts = {}
corpus = corpora.get(corpus_id)
- for uuid, correct in curations:
+ for uuid, correct in curations.items():
stmt = corpus.get(uuid)
for ev in stmt.evidence:
extraction_rule = ev.epistemics.get('found_by')
@@ -41,12 +41,14 @@ def update_beliefs():
scorer.update_counts(prior_counts, subtype_counts)
if not return_beliefs:
- return Response({})
+ return jsonify({})
else:
be = BeliefEngine(scorer)
- stmts = list(corpus.items())
+ stmts = list(corpus.values())
be.set_prior_probs(stmts)
- return Response(_get_belief_dict(stmts))
+ belief_dict = _get_belief_dict(stmts)
+ print(belief_dict)
+ return jsonify(belief_dict)
def _get_belief_dict(stmts):
@@ -56,5 +58,7 @@ def _get_belief_dict(stmts):
if __name__ == '__main__':
corpus_path = sys.argv[1]
with open(corpus_path, 'rb') as fh:
- corpora['1'] = pickle.load(fh)
+ stmts = pickle.load(fh)
+ corpora['1'] = {st.uuid: st for st in stmts}
+ print(corpora)
app.run()
|
Use jsonify and fix some dict structures
|
sorgerlab_indra
|
train
|
b4efa4375f49ae2898310c2c1634d324655da5b1
|
diff --git a/app/controllers/pwb/api/v1/properties_controller.rb b/app/controllers/pwb/api/v1/properties_controller.rb
index <HASH>..<HASH> 100644
--- a/app/controllers/pwb/api/v1/properties_controller.rb
+++ b/app/controllers/pwb/api/v1/properties_controller.rb
@@ -2,13 +2,12 @@ module Pwb
class Api::V1::PropertiesController < JSONAPI::ResourceController
# Skipping action below allows me to browse to endpoint
# without having set mime type
- skip_before_action :ensure_valid_accept_media_type
+ # skip_before_action :ensure_valid_accept_media_type
# def set_default_currency
# @model
# end
-
def bulk_create
propertiesJSON = params["propertiesJSON"]
unless propertiesJSON.is_a? Array
@@ -42,7 +41,7 @@ module Pwb
# into them
# if propertyJSON["features"]
- # TODO - process feature (currently not retrieved by PWS so not important)
+ # TODO - process feature (currently not retrieved by PWS so not important)
# new_prop.set_features=propertyJSON["features"]
# end
if propertyJSON["property_photos"]
@@ -71,10 +70,10 @@ module Pwb
end
return render json: {
- new_props: new_props,
- existing_props: existing_props,
- errors: errors
- }
+ new_props: new_props,
+ existing_props: existing_props,
+ errors: errors,
+ }
end
# TODO: rename to update_features:
@@ -129,7 +128,6 @@ module Pwb
files_array = params[:file]
photos_array = []
files_array.each do |file|
-
photo = PropPhoto.create
# photo.subdomain = subdomain
# photo.folder = current_tenant_model.whitelabel_country_code
@@ -180,13 +178,13 @@ module Pwb
private
- def properties_params propertiesJSON
+ def properties_params(propertiesJSON)
# propertiesJSON = params["propertiesJSON"]
# unless propertiesJSON.is_a? Array
# propertiesJSON = JSON.parse propertiesJSON
# end
# pp = ActionController::Parameters.new(propertiesJSON)
- pp = ActionController::Parameters.new({propertiesJSON: propertiesJSON})
+ pp = ActionController::Parameters.new({ propertiesJSON: propertiesJSON })
# https://github.com/rails/strong_parameters/issues/140
# params.require(:propertiesJSON).map do |p|
pp.require(:propertiesJSON).map do |p|
|
Comment out unneeded skip before action for ensure_valid_accept_media_type
|
etewiah_property_web_builder
|
train
|
dfd20fd7fa6edec56447fcb049acd5e17a78ec2e
|
diff --git a/github/github.go b/github/github.go
index <HASH>..<HASH> 100644
--- a/github/github.go
+++ b/github/github.go
@@ -735,11 +735,13 @@ func category(path string) rateLimitCategory {
// Deprecated: RateLimit is deprecated, use RateLimits instead.
func (c *Client) RateLimit() (*Rate, *Response, error) {
limits, resp, err := c.RateLimits()
+ if err != nil {
+ return nil, resp, err
+ }
if limits == nil {
- return nil, nil, err
+ return nil, resp, errors.New("RateLimits returned nil limits and error; unable to extract Core rate limit")
}
-
- return limits.Core, resp, err
+ return limits.Core, resp, nil
}
// RateLimits returns the rate limits for the current client.
diff --git a/github/issues_events.go b/github/issues_events.go
index <HASH>..<HASH> 100644
--- a/github/issues_events.go
+++ b/github/issues_events.go
@@ -91,7 +91,7 @@ func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *Lis
return nil, resp, err
}
- return events, resp, err
+ return events, resp, nil
}
// ListRepositoryEvents lists events for the specified repository.
@@ -115,7 +115,7 @@ func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOption
return nil, resp, err
}
- return events, resp, err
+ return events, resp, nil
}
// GetEvent returns the specified issue event.
@@ -135,7 +135,7 @@ func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Resp
return nil, resp, err
}
- return event, resp, err
+ return event, resp, nil
}
// Rename contains details for 'renamed' events.
diff --git a/github/repos_comments.go b/github/repos_comments.go
index <HASH>..<HASH> 100644
--- a/github/repos_comments.go
+++ b/github/repos_comments.go
@@ -103,7 +103,7 @@ func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *Re
return nil, resp, err
}
- return c, resp, err
+ return c, resp, nil
}
// GetComment gets a single comment from a repository.
@@ -125,7 +125,7 @@ func (s *RepositoriesService) GetComment(owner, repo string, id int) (*Repositor
return nil, resp, err
}
- return c, resp, err
+ return c, resp, nil
}
// UpdateComment updates the body of a single comment.
@@ -144,7 +144,7 @@ func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment
return nil, resp, err
}
- return c, resp, err
+ return c, resp, nil
}
// DeleteComment deletes a single comment from a repository.
|
Return nil error explicitly when there is no error. (#<I>)
Follows #<I>.
Fully resolves #<I>.
|
google_go-github
|
train
|
58deaf0ffe9286683a7ca084ebb3b02c97f55835
|
diff --git a/spec/support/webmock_server.rb b/spec/support/webmock_server.rb
index <HASH>..<HASH> 100644
--- a/spec/support/webmock_server.rb
+++ b/spec/support/webmock_server.rb
@@ -29,7 +29,11 @@ class WebMockServer
concurrent do
['TERM', 'INT'].each do |signal|
- trap(signal){ server.shutdown }
+ trap(signal) do
+ Thread.new do
+ server.shutdown
+ end
+ end
end
server.start do |socket|
socket.puts <<-EOT.gsub(/^\s+\|/, '')
|
Can't write to a Logger in a signal handler
Remove warning with ruby <I>.
Warning shows below:
"log writing failed. can't be called from trap context"
Related:
Can't write to a Logger in a signal handler
<URL>
|
bblimke_webmock
|
train
|
4b348c1d33373d672edd83fc576892d0e46686d2
|
diff --git a/rpctest/rpc_harness.go b/rpctest/rpc_harness.go
index <HASH>..<HASH> 100644
--- a/rpctest/rpc_harness.go
+++ b/rpctest/rpc_harness.go
@@ -383,6 +383,13 @@ func (h *Harness) RPCConfig() btcrpcclient.ConnConfig {
return h.node.config.rpcConnConfig()
}
+// P2PAddress returns the harness' P2P listening address. This allows potential
+// peers (such as SPV peers) created within tests to connect to a given test
+// harness instance.
+func (h *Harness) P2PAddress() string {
+ return h.node.config.listen
+}
+
// GenerateAndSubmitBlock creates a block whose contents include the passed
// transactions and submits it to the running simnet node. For generating
// blocks with only a coinbase tx, callers can simply pass nil instead of
|
rpctest: Add P2PAddress() for Harness to get P2P listen address
|
btcsuite_btcd
|
train
|
66d24cf0f00a58133c159940d8f65a4f622a09eb
|
diff --git a/alot/buffers.py b/alot/buffers.py
index <HASH>..<HASH> 100644
--- a/alot/buffers.py
+++ b/alot/buffers.py
@@ -207,6 +207,8 @@ class SearchBuffer(Buffer):
modename = 'search'
threads = []
+ _REVERSE = {'oldest_first': 'newest_first',
+ 'newest_first': 'oldest_first'}
def __init__(self, ui, initialquery='', sort_order=None):
self.dbman = ui.dbman
@@ -244,14 +246,20 @@ class SearchBuffer(Buffer):
if self.proc.is_alive():
self.proc.terminate()
- def rebuild(self):
+ def rebuild(self, reverse=False):
self.isinitialized = True
+ self.reversed = reverse
self.kill_filler_process()
self.result_count = self.dbman.count_messages(self.querystring)
+ if reverse:
+ order = self._REVERSE[self.sort_order]
+ else:
+ order = self.sort_order
+
try:
self.pipe, self.proc = self.dbman.get_threads(self.querystring,
- self.sort_order)
+ order)
except NotmuchError:
self.ui.notify('malformed query string: %s' % self.querystring,
'error')
@@ -260,7 +268,8 @@ class SearchBuffer(Buffer):
return
self.threadlist = PipeWalker(self.pipe, ThreadlineWidget,
- dbman=self.dbman)
+ dbman=self.dbman,
+ reverse=reverse)
self.listbox = urwid.ListBox(self.threadlist)
self.body = self.listbox
@@ -286,12 +295,22 @@ class SearchBuffer(Buffer):
self.threadlist._get_next_item()
def focus_first(self):
- self.body.set_focus(0)
+ if not self.reversed:
+ self.body.set_focus(0)
+ else:
+ self.rebuild(reverse=False)
def focus_last(self):
- self.consume_pipe()
- num_lines = len(self.threadlist.get_lines())
- self.body.set_focus(num_lines - 1)
+ if self.reversed:
+ self.body.set_focus(0)
+ elif (self.result_count < 200) or \
+ (self.sort_order not in self._REVERSE.keys()):
+ self.consume_pipe()
+ num_lines = len(self.threadlist.get_lines())
+ self.body.set_focus(num_lines - 1)
+ else:
+ self.rebuild(reverse=True)
+
class ThreadBuffer(Buffer):
|
smug focus_last in SearchBuffers
In case a search buffer displays more than <I> threads
and one calls `move last`, this will now result in a
new search with reversed search order displayed using a
reversed PipeWalker.
This makes it unnecessary to read all thread id's
when focussing the last element and thus speeds up the UI
considerably.
|
pazz_alot
|
train
|
7297dd33bb693aea29b5bc8ded0b3de3ddd368bb
|
diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go
index <HASH>..<HASH> 100644
--- a/pkg/kubelet/kubelet_node_status.go
+++ b/pkg/kubelet/kubelet_node_status.go
@@ -592,7 +592,9 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = kl.containerManager.GetDevicePluginResourceCapacity()
if devicePluginCapacity != nil {
for k, v := range devicePluginCapacity {
- glog.V(2).Infof("Update capacity for %s to %d", k, v.Value())
+ if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() {
+ glog.V(2).Infof("Update capacity for %s to %d", k, v.Value())
+ }
node.Status.Capacity[k] = v
}
}
@@ -635,9 +637,12 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
}
node.Status.Allocatable[k] = value
}
+
if devicePluginAllocatable != nil {
for k, v := range devicePluginAllocatable {
- glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value())
+ if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
+ glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value())
+ }
node.Status.Allocatable[k] = v
}
}
|
Change Capacity log verbosity in node status update
|
kubernetes_kubernetes
|
train
|
64e0be325c0e1f1c1358555a7f099a7fce5ca242
|
diff --git a/package.json b/package.json
index <HASH>..<HASH> 100644
--- a/package.json
+++ b/package.json
@@ -46,9 +46,9 @@
"cz-conventional-changelog": "^1.1.5",
"jsdom": "^7.2.2",
"mocha": "^2.3.3",
- "react": "^15.0.0",
- "react-addons-test-utils": "^15.0.0",
- "react-dom": "^15.0.0",
+ "react": "^15.5.4",
+ "prop-types": "15.5.8",
+ "react-dom": "^15.5.4",
"rf-release": "^0.4.0",
"semantic-release": "^4.3.5",
"should": "^7.0.1"
diff --git a/src/__specs__/styleable.spec.js b/src/__specs__/styleable.spec.js
index <HASH>..<HASH> 100644
--- a/src/__specs__/styleable.spec.js
+++ b/src/__specs__/styleable.spec.js
@@ -1,6 +1,7 @@
import React from 'react'
import ReactDOM from 'react-dom'
-import TestUtils from 'react-addons-test-utils'
+import TestUtils from 'react-dom/test-utils'
+import PropTypes from 'prop-types'
import styleable from '../styleable'
@@ -28,7 +29,7 @@ function mkFixtureWithReqPropTypes() {
@styleable(css)
class Subject extends React.Component {
static propTypes = {
- aReqProp: React.PropTypes.string.isRequired
+ aReqProp: PropTypes.string.isRequired
};
render() {
return (
@@ -56,7 +57,7 @@ function mkFunctionFixtureWithReqPropTypes() {
return <div className={props.css.content}>Req content {props.aReqProp}</div>
}
subject.propTypes = {
- aReqProp: React.PropTypes.string.isRequired
+ aReqProp: PropTypes.string.isRequired
}
return styleable(css)(subject)
diff --git a/src/styleable.js b/src/styleable.js
index <HASH>..<HASH> 100644
--- a/src/styleable.js
+++ b/src/styleable.js
@@ -1,6 +1,7 @@
import getDisplayName from './utils/get-display-name'
import invariant from 'invariant'
import React from 'react'
+import PropTypes from 'prop-types'
function getSelectorsNotInStylesheet(cssProps, stylesheet) {
const propKeys = Object.keys(cssProps)
@@ -61,7 +62,7 @@ export default function styleable(stylesheet) {
};
static propTypes = {
...DecoratedComponent.propTypes,
- css: React.PropTypes.object
+ css: PropTypes.object
};
getCss() {
invariant(
|
updating to react <I> and fixing propTypes warnings (#<I>)
|
pluralsight_react-styleable
|
train
|
b2a8a43e55da2702206acc4c7bd13ab2882ef980
|
diff --git a/client/lib/keyboard-shortcuts/key-bindings.js b/client/lib/keyboard-shortcuts/key-bindings.js
index <HASH>..<HASH> 100644
--- a/client/lib/keyboard-shortcuts/key-bindings.js
+++ b/client/lib/keyboard-shortcuts/key-bindings.js
@@ -23,9 +23,9 @@ const KEY_BINDINGS = {
},
{
eventName: 'open-selection',
- keys: [ 'enter' ],
+ keys: [ [ 'enter' ], [ 'v' ] ],
description: {
- keys: [ 'enter' ],
+ keys: [ [ 'enter' ], [ 'v' ] ],
text: translate( 'Open selection' ),
},
},
|
Update/<I> add keyboard shortcut for visit site (#<I>)
* add 'v' keyboard shortcut option for opening Reader site links
* refactor open shortcut to allow for either "enter" or "v" for opening posts
|
Automattic_wp-calypso
|
train
|
9aee7c5cf7dd5a81b5f449ae66152b052325e5b9
|
diff --git a/src/main/java/tech/sirwellington/alchemy/generator/ObjectGenerators.java b/src/main/java/tech/sirwellington/alchemy/generator/ObjectGenerators.java
index <HASH>..<HASH> 100644
--- a/src/main/java/tech/sirwellington/alchemy/generator/ObjectGenerators.java
+++ b/src/main/java/tech/sirwellington/alchemy/generator/ObjectGenerators.java
@@ -71,6 +71,7 @@ public final class ObjectGenerators
DEFAULT_GENERATOR_MAPPINGS.put(Instant.class, TimeGenerators.anytime());
DEFAULT_GENERATOR_MAPPINGS.put(ByteBuffer.class, BinaryGenerators.byteBuffers(333));
DEFAULT_GENERATOR_MAPPINGS.put(Boolean.class, BooleanGenerators.booleans());
+ DEFAULT_GENERATOR_MAPPINGS.put(Byte.class, BinaryGenerators.bytes());
}
ObjectGenerators() throws IllegalAccessException
|
Fixing issue #<I> in `pojos` generator
+ Registering a Generator for Byte types
|
SirWellington_alchemy-generator
|
train
|
759aa6836d3734ae8587656941185d4d6c947966
|
diff --git a/werkzeug/_compat.py b/werkzeug/_compat.py
index <HASH>..<HASH> 100644
--- a/werkzeug/_compat.py
+++ b/werkzeug/_compat.py
@@ -33,6 +33,8 @@ if PY2:
int_to_byte = chr
iter_bytes = iter
+ import collections as collections_abc
+
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
@@ -132,6 +134,8 @@ else:
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
iter_bytes = functools.partial(map, int_to_byte)
+ import collections.abc as collections_abc
+
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
diff --git a/werkzeug/datastructures.py b/werkzeug/datastructures.py
index <HASH>..<HASH> 100644
--- a/werkzeug/datastructures.py
+++ b/werkzeug/datastructures.py
@@ -13,11 +13,10 @@ import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
-from collections import Container, Iterable, MutableSet
from werkzeug._internal import _missing
-from werkzeug._compat import BytesIO, iterkeys, itervalues, iteritems, \
- iterlists, PY2, text_type, integer_types, string_types, \
+from werkzeug._compat import BytesIO, collections_abc, iterkeys, itervalues, \
+ iteritems, iterlists, PY2, text_type, integer_types, string_types, \
make_literal_wrapper, to_native
from werkzeug.filesystem import get_filesystem_encoding
@@ -2020,7 +2019,7 @@ class CallbackDict(UpdateDictMixin, dict):
)
-class HeaderSet(MutableSet):
+class HeaderSet(collections_abc.MutableSet):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
@@ -2174,7 +2173,7 @@ class HeaderSet(MutableSet):
)
-class ETags(Container, Iterable):
+class ETags(collections_abc.Container, collections_abc.Iterable):
"""A set that can be used to check if one etag is present in a collection
of etags.
|
Import abc classes from collections.abc
Fixes warnings that look like this:
```
werkzeug/datastructures.py:<I>: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in <I> it will stop working
```
You can replicate these warnings by running
`python -Wonce -m werkzeug.datastructures`
|
pallets_werkzeug
|
train
|
2303f3cba618d99fed7f3be93d3e6a9fc0621dc7
|
diff --git a/lib/redfish_plus/dsl.rb b/lib/redfish_plus/dsl.rb
index <HASH>..<HASH> 100644
--- a/lib/redfish_plus/dsl.rb
+++ b/lib/redfish_plus/dsl.rb
@@ -422,15 +422,16 @@ module RedfishPlus
domain.data['logging']['levels'][key] = level
end
- def custom_resource(domain, name, value)
+ def custom_resource(domain, name, value, restype = nil)
domain.data['custom_resources'][name]['properties']['value'] = value
+ domain.data['custom_resources'][name]['properties']['restype'] = restype if restype
end
- def custom_resource_from_env(domain, name, env_key = nil)
+ def custom_resource_from_env(domain, name, env_key = nil, restype = nil)
components = name.split('/')
components = [components.first] + components[2..components.size] if components.size > 2 && components[1] == 'env'
env_key = components.join('_').upcase if env_key.nil?
- custom_resource(domain, name, "${#{env_key}}")
+ custom_resource(domain, name, "${#{env_key}}", restype)
RedfishPlus.environment_variable(domain, env_key)
end
|
Support the configuraiton of restype for custom resources
|
realityforge_redfish
|
train
|
48a9856da8362d2904cc8d15122b515b5c505bd2
|
diff --git a/lib/filelib.php b/lib/filelib.php
index <HASH>..<HASH> 100644
--- a/lib/filelib.php
+++ b/lib/filelib.php
@@ -904,6 +904,7 @@ function file_save_draft_area_files($draftitemid, $contextid, $component, $filea
$newhashes = array();
$filecount = 0;
+ $context = context::instance_by_id($contextid, MUST_EXIST);
foreach ($draftfiles as $file) {
if (!$options['subdirs'] && $file->get_filepath() !== '/') {
continue;
@@ -912,8 +913,11 @@ function file_save_draft_area_files($draftitemid, $contextid, $component, $filea
continue;
}
if (!$file->is_directory()) {
- if ($options['maxbytes'] and $options['maxbytes'] < $file->get_filesize()) {
- // oversized file - should not get here at all
+ // Check to see if this file was uploaded by someone who can ignore the file size limits.
+ $fileusermaxbytes = get_user_max_upload_file_size($context, $options['maxbytes'], 0, 0, $file->get_userid());
+ if ($fileusermaxbytes != USER_CAN_IGNORE_FILE_SIZE_LIMITS
+ && ($options['maxbytes'] and $options['maxbytes'] < $file->get_filesize())) {
+ // Oversized file.
continue;
}
if ($options['maxfiles'] != -1 and $options['maxfiles'] <= $filecount) {
|
MDL-<I> filelib: Files that are oversized are checked with user.
Files that are oversized could have been uploaded by a user who
can ignore the file size limits. These files should not be deleted
in these situations.
|
moodle_moodle
|
train
|
7b728a79ee8fe66dd9b4cf91867139163e112002
|
diff --git a/lib/writer/jsonml.js b/lib/writer/jsonml.js
index <HASH>..<HASH> 100644
--- a/lib/writer/jsonml.js
+++ b/lib/writer/jsonml.js
@@ -93,6 +93,7 @@ var convertBlock = function(block, context) {
case 'divider': return ['hr'];
case 'bulleted': return ['ul'].concat(convertList(block, context));
case 'numbered': return ['ol'].concat(convertList(block, context));
+ case 'comment': return null; // TODO: export as HTML comments?
}
var spans = convertSpans(block, context);
diff --git a/lib/writer/xelatex.js b/lib/writer/xelatex.js
index <HASH>..<HASH> 100644
--- a/lib/writer/xelatex.js
+++ b/lib/writer/xelatex.js
@@ -100,6 +100,7 @@ function formatBlock(block, links, notes) {
var prefix = repeat(' ', attr.level);
return wordWrap(formatSpans(block, links, notes), prefix, prefix + '\\item ');
case 'quote': return '\\begin{quotation}\n' + wordWrap(formatSpans(block, links, notes)) + '\n\\end{quotation}';
+ case 'comment': return null; // TODO: export as LeTeX comments (%)
case 'link': return null;
case 'note': return null;
case 'verbatim': return '\\begin{verbatim}\n' + formatVerbatimSpans(block) + '\n\\end{verbatim}';
|
Strip comments from HTML and XeLaTeX output for now.
|
sheremetyev_texts.js
|
train
|
44b9aaa88c232938a79481925c0fe704baec46ab
|
diff --git a/fs/sshfs/sshfs.py b/fs/sshfs/sshfs.py
index <HASH>..<HASH> 100644
--- a/fs/sshfs/sshfs.py
+++ b/fs/sshfs/sshfs.py
@@ -4,7 +4,6 @@
from __future__ import unicode_literals
from __future__ import absolute_import
-import io
import os
import stat
import socket
@@ -18,7 +17,6 @@ from .. import errors
from ..base import FS
from ..info import Info
from ..enums import ResourceType
-from ..iotools import RawWrapper
from ..path import basename
from ..permissions import Permissions
from ..osfs import OSFS
@@ -52,7 +50,6 @@ class SSHFS(FS):
``CreateFailed`` error.
"""
-
_meta = {
'case_insensitive': False,
'invalid_path_chars': '\0',
@@ -88,7 +85,7 @@ class SSHFS(FS):
config = ssh_config.lookup(host)
pkey = config.get('identityfile') or pkey
# Extract the given info
- pkey, keyfile= (pkey, None) \
+ pkey, keyfile = (pkey, None) \
if isinstance(pkey, paramiko.PKey) else (None, pkey)
self._user = user = user or config.get('user')
self._host = host = config.get('hostname')
@@ -120,7 +117,7 @@ class SSHFS(FS):
self._sftp = client.open_sftp()
except (paramiko.ssh_exception.SSHException, # protocol errors
- paramiko.ssh_exception.NoValidConnectionsError, # connexion errors
+ paramiko.ssh_exception.NoValidConnectionsError, # connexion errors
socket.gaierror, socket.timeout) as e: # TCP errors
message = "Unable to create filesystem: {}".format(e)
@@ -180,8 +177,8 @@ class SSHFS(FS):
Arguments:
path (str): A path on the filesystem.
mode (str): Mode to open the file (must be a valid, non-text mode).
- Since this method only opens binary files, the ``b`` in the mode
- is implied.
+ Since this method only opens binary files, the ``b`` in the
+ mode is implied.
buffering (int): the buffering policy (-1 to use default buffering,
0 to disable completely, 1 to enable line based buffering, or
any larger positive integer for a custom buffer size).
@@ -189,6 +186,9 @@ class SSHFS(FS):
Keyword Arguments:
pipelined (bool): Set the transfer in pipelined mode (should
improve transfer speed). Defaults to ``True``.
+ prefetch (bool): Use background threading to prefetch the file
+ content when opened in reading mode. Disable in case of
+ threading issues. Defaults to ``True``.
Raises:
fs.errors.FileExpected: if the path if not a file.
@@ -205,12 +205,11 @@ class SSHFS(FS):
_mode.validate_bin()
with self._lock:
- if _mode.exclusive:
- if self.exists(_path):
- raise errors.FileExists(path)
- else:
- _mode = Mode(''.join(set(mode.replace('x', 'w'))))
- elif _mode.reading and not _mode.create and not self.exists(_path):
+ if _mode.exclusive and self.exists(_path):
+ raise errors.FileExists(path)
+ # else:
+ # _mode = Mode(''.join(set(mode.replace('x', 'w'))))
+ elif not _mode.create and not self.exists(_path):
raise errors.ResourceNotFound(path)
elif self.isdir(_path):
raise errors.FileExpected(path)
@@ -222,6 +221,9 @@ class SSHFS(FS):
bufsize=buffering
)
handle.set_pipelined(options.get("pipelined", True))
+ if options.get("prefetch", True):
+ if _mode.reading and not _mode.writing:
+ handle.prefetch(self.getsize(_path))
return SSHFile(handle)
def remove(self, path): # noqa: D102
@@ -244,12 +246,12 @@ class SSHFS(FS):
# NB: this will raise ResourceNotFound
# and DirectoryExpected as expected by
# the specifications
- if not self.isempty(path):
+ if not self.isempty(_path):
raise errors.DirectoryNotEmpty(path)
with convert_sshfs_errors('removedir', path):
with self._lock:
- self._sftp.rmdir(path)
+ self._sftp.rmdir(_path)
def setinfo(self, path, info): # noqa: D102
self.check()
@@ -263,15 +265,15 @@ class SSHFS(FS):
with convert_sshfs_errors('setinfo', path):
if 'accessed' in details or 'modified' in details:
- self._utime(path,
+ self._utime(_path,
details.get("modified"),
details.get("accessed"))
if 'uid' in access or 'gid' in access:
- self._chown(path,
+ self._chown(_path,
access.get('uid'),
access.get('gid'))
if 'permissions' in access:
- self._chmod(path, access['permissions'].mode)
+ self._chmod(_path, access['permissions'].mode)
@cached_property
def platform(self):
@@ -350,7 +352,7 @@ class SSHFS(FS):
}
details['created'] = getattr(stat_result, 'st_birthtime', None)
- ctime_key = 'created' if self.platform=="win32" else 'metadata_changed'
+ ctime_key = 'created' if self.platform == "win32" else 'metadata_changed'
details[ctime_key] = getattr(stat_result, 'st_ctime', None)
return details
|
Enable file prefetching in `SSHFS.openbin`
|
althonos_fs.sshfs
|
train
|
ca5f042957f297cfb99fdae9f85c514e5628c1f4
|
diff --git a/lib/Data/Mapper/UserRegisterMapper.php b/lib/Data/Mapper/UserRegisterMapper.php
index <HASH>..<HASH> 100644
--- a/lib/Data/Mapper/UserRegisterMapper.php
+++ b/lib/Data/Mapper/UserRegisterMapper.php
@@ -59,6 +59,7 @@ class UserRegisterMapper
$data = new UserRegisterData([
'contentType' => $contentType,
'mainLanguageCode' => $this->params['language'],
+ 'enabled' => true,
]);
$data->addParentGroup($this->parentGroupLoader->loadGroup());
diff --git a/lib/Form/EventSubscriber/UserFieldsSubscriber.php b/lib/Form/EventSubscriber/UserFieldsSubscriber.php
index <HASH>..<HASH> 100644
--- a/lib/Form/EventSubscriber/UserFieldsSubscriber.php
+++ b/lib/Form/EventSubscriber/UserFieldsSubscriber.php
@@ -62,7 +62,7 @@ class UserFieldsSubscriber implements EventSubscriberInterface
$data->login = $userAccountFieldData->username;
$data->email = $userAccountFieldData->email;
$data->password = $userAccountFieldData->password;
- $data->enabled = $userAccountFieldData->enabled;
+ $data->enabled = $data->enabled ?? $userAccountFieldData->enabled;
/** @var Value $userValue */
$userValue = clone $data->contentType
|
EZP-<I>: Error <I> after trying to register new user (#<I>)
|
ezsystems_repository-forms
|
train
|
4a8b871dbd65d823b6296a5d5885f47662f9b5bc
|
diff --git a/shaka-player.uncompiled.js b/shaka-player.uncompiled.js
index <HASH>..<HASH> 100644
--- a/shaka-player.uncompiled.js
+++ b/shaka-player.uncompiled.js
@@ -44,6 +44,7 @@ goog.require('shaka.polyfill.PatchedMediaKeysMs');
goog.require('shaka.polyfill.PatchedMediaKeysNop');
goog.require('shaka.polyfill.PatchedMediaKeysWebkit');
goog.require('shaka.polyfill.PiPWebkit');
+goog.require('shaka.polyfill.RandomUUID');
goog.require('shaka.polyfill.VTTCue');
goog.require('shaka.polyfill.VideoPlayPromise');
goog.require('shaka.polyfill.VideoPlaybackQuality');
|
test: Fix tests in uncompiled mode (#<I>)
Missing require for randomUUID polyfill
|
google_shaka-player
|
train
|
5904e4ac79d3b40182dc39945c093cb77676b87f
|
diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go
index <HASH>..<HASH> 100644
--- a/agent/consul/state/schema.go
+++ b/agent/consul/state/schema.go
@@ -2,6 +2,7 @@ package state
import (
"fmt"
+ "strings"
"github.com/hashicorp/go-memdb"
)
@@ -75,11 +76,37 @@ func indexTableSchema() *memdb.TableSchema {
Name: indexID,
AllowMissing: false,
Unique: true,
- Indexer: &memdb.StringFieldIndex{
- Field: "Key",
- Lowercase: true,
+ Indexer: indexerSingle{
+ readIndex: indexFromString,
+ writeIndex: indexNameFromIndexEntry,
},
},
},
}
}
+
+func indexNameFromIndexEntry(raw interface{}) ([]byte, error) {
+ p, ok := raw.(*IndexEntry)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type %T for IndexEntry index", raw)
+ }
+
+ if p.Key == "" {
+ return nil, errMissingValueForIndex
+ }
+
+ var b indexBuilder
+ b.String(strings.ToLower(p.Key))
+ return b.Bytes(), nil
+}
+
+func indexFromString(raw interface{}) ([]byte, error) {
+ q, ok := raw.(string)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type %T for string prefix query", raw)
+ }
+
+ var b indexBuilder
+ b.String(strings.ToLower(q))
+ return b.Bytes(), nil
+}
diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go
index <HASH>..<HASH> 100644
--- a/agent/consul/state/state_store.go
+++ b/agent/consul/state/state_store.go
@@ -295,17 +295,20 @@ func indexUpdateMaxTxn(tx WriteTxn, idx uint64, table string) error {
return fmt.Errorf("failed to retrieve existing index: %s", err)
}
- // Always take the first update, otherwise do the > check.
- if ti == nil {
- if err := tx.Insert(tableIndex, &IndexEntry{table, idx}); err != nil {
- return fmt.Errorf("failed updating index %s", err)
+ // if this is an update check the idx
+ if ti != nil {
+ cur, ok := ti.(*IndexEntry)
+ if !ok {
+ return fmt.Errorf("failed updating index %T need to be `*IndexEntry`", ti)
}
- return nil
- }
- if cur, ok := ti.(*IndexEntry); ok && idx > cur.Value {
- if err := tx.Insert(tableIndex, &IndexEntry{table, idx}); err != nil {
- return fmt.Errorf("failed updating index %s", err)
+ // Stored index is newer, don't insert the index
+ if idx <= cur.Value {
+ return nil
}
}
+
+ if err := tx.Insert(tableIndex, &IndexEntry{table, idx}); err != nil {
+ return fmt.Errorf("failed updating index %s", err)
+ }
return nil
}
|
Refactor table index (#<I>)
* convert tableIndex to use the new pattern
* make `indexFromString` available for oss as well
* refactor `indexUpdateMaxTxn`
|
hashicorp_consul
|
train
|
ce461454c80c12d0531ea45197f69a20f67c8ac7
|
diff --git a/buffer.go b/buffer.go
index <HASH>..<HASH> 100644
--- a/buffer.go
+++ b/buffer.go
@@ -24,43 +24,6 @@ type buffer struct {
Buffer
}
-func newBuffer(buf Buffer) Buffer {
- return &buffer{
- Buffer: buf,
- }
-}
-
-func (buf *buffer) Len() int64 {
- return buf.Buffer.Len() + int64(len(buf.head))
-}
-
-func (buf *buffer) Write(p []byte) (n int, err error) {
- n, err = buf.Buffer.Write(ShrinkToFit(buf, p))
- return n, err
-}
-
-func (buf *buffer) WriteTo(w io.Writer) (n int64, err error) {
- for !Empty(buf) || len(buf.head) != 0 {
- if len(buf.head) == 0 {
- buf.head = make([]byte, 1024*32)
- m, er := buf.Buffer.Read(buf.head)
- buf.head = buf.head[:m]
- if er != nil && er != io.EOF {
- return n, er
- }
- }
-
- m, er := w.Write(buf.head)
- n += int64(m)
- buf.head = buf.head[m:]
- if er != nil {
- return n, er
- }
-
- }
- return n, nil
-}
-
func MaxCap() int64 {
return maxInt64
}
diff --git a/buffer_test.go b/buffer_test.go
index <HASH>..<HASH> 100644
--- a/buffer_test.go
+++ b/buffer_test.go
@@ -3,14 +3,12 @@ package buffer
import (
"bytes"
"crypto/rand"
- "fmt"
"io"
"io/ioutil"
- "lib/test"
- "os"
"testing"
)
+/*
func ExamplePartition() {
buf := NewPartition(1024, NewFile)
buf.Write([]byte("Hello world\n"))
@@ -50,6 +48,7 @@ func TestWriter(t *testing.T) {
t.Error("Writer failed to write random data to buffer.")
}
}
+*/
func TestFile(t *testing.T) {
buf := NewFile(1024)
diff --git a/file.go b/file.go
index <HASH>..<HASH> 100644
--- a/file.go
+++ b/file.go
@@ -17,7 +17,7 @@ func NewFile(capacity int64) Buffer {
buf := &file{
capacity: capacity,
}
- return newBuffer(buf)
+ return buf
}
func (buf *file) init() error {
diff --git a/mem.go b/mem.go
index <HASH>..<HASH> 100644
--- a/mem.go
+++ b/mem.go
@@ -29,6 +29,10 @@ func (buf *memoryBuffer) Write(p []byte) (n int, err error) {
return n, err
}
+func (buf *memoryBuffer) ReadAt(b []byte, off int64) (n int, err error) {
+ return bytes.NewReader(buf.Bytes()).ReadAt(b, off)
+}
+
func (buf *memoryBuffer) Read(p []byte) (n int, err error) {
n, err = buf.Buffer.Read(ShrinkToRead(buf, p))
if buf.Len() == 0 {
diff --git a/multi.go b/multi.go
index <HASH>..<HASH> 100644
--- a/multi.go
+++ b/multi.go
@@ -21,7 +21,7 @@ func NewMulti(buffers ...Buffer) Buffer {
hasNext: len(buffers[1:]) != 0,
}
- return newBuffer(buf)
+ return buf
}
func (buf *linkBuffer) Cap() (n int64) {
diff --git a/partition.go b/partition.go
index <HASH>..<HASH> 100644
--- a/partition.go
+++ b/partition.go
@@ -16,7 +16,7 @@ func NewPartition(chunk int64, make func(int64) Buffer) Buffer {
chunk: chunk,
}
buf.push()
- return newBuffer(buf)
+ return buf
}
func (buf *partition) Len() int64 {
|
Removing WriterTo, will rework
|
djherbis_buffer
|
train
|
f277864aa5a8f59ada87ade92d71891f5a4be56f
|
diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go
index <HASH>..<HASH> 100644
--- a/pkg/scheduler/internal/queue/scheduling_queue.go
+++ b/pkg/scheduler/internal/queue/scheduling_queue.go
@@ -742,10 +742,11 @@ func (p *PriorityQueue) getBackoffTime(podInfo *framework.QueuedPodInfo) time.Ti
func (p *PriorityQueue) calculateBackoffDuration(podInfo *framework.QueuedPodInfo) time.Duration {
duration := p.podInitialBackoffDuration
for i := 1; i < podInfo.Attempts; i++ {
- duration = duration * 2
- if duration > p.podMaxBackoffDuration {
+ // Use subtraction instead of addition or multiplication to avoid overflow.
+ if duration > p.podMaxBackoffDuration-duration {
return p.podMaxBackoffDuration
}
+ duration += duration
}
return duration
}
diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go
index <HASH>..<HASH> 100644
--- a/pkg/scheduler/internal/queue/scheduling_queue_test.go
+++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go
@@ -19,6 +19,7 @@ package queue
import (
"context"
"fmt"
+ "math"
"reflect"
"strings"
"sync"
@@ -27,10 +28,9 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
- "k8s.io/apimachinery/pkg/runtime"
-
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
@@ -1995,3 +1995,43 @@ func makeQueuedPodInfos(num int, timestamp time.Time) []*framework.QueuedPodInfo
}
return pInfos
}
+
+func TestPriorityQueue_calculateBackoffDuration(t *testing.T) {
+ tests := []struct {
+ name string
+ initialBackoffDuration time.Duration
+ maxBackoffDuration time.Duration
+ podInfo *framework.QueuedPodInfo
+ want time.Duration
+ }{
+ {
+ name: "normal",
+ initialBackoffDuration: 1 * time.Nanosecond,
+ maxBackoffDuration: 32 * time.Nanosecond,
+ podInfo: &framework.QueuedPodInfo{Attempts: 16},
+ want: 32 * time.Nanosecond,
+ },
+ {
+ name: "overflow_32bit",
+ initialBackoffDuration: 1 * time.Nanosecond,
+ maxBackoffDuration: math.MaxInt32 * time.Nanosecond,
+ podInfo: &framework.QueuedPodInfo{Attempts: 32},
+ want: math.MaxInt32 * time.Nanosecond,
+ },
+ {
+ name: "overflow_64bit",
+ initialBackoffDuration: 1 * time.Nanosecond,
+ maxBackoffDuration: math.MaxInt64 * time.Nanosecond,
+ podInfo: &framework.QueuedPodInfo{Attempts: 64},
+ want: math.MaxInt64 * time.Nanosecond,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithPodInitialBackoffDuration(tt.initialBackoffDuration), WithPodMaxBackoffDuration(tt.maxBackoffDuration))
+ if got := q.calculateBackoffDuration(tt.podInfo); got != tt.want {
+ t.Errorf("PriorityQueue.calculateBackoffDuration() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
|
Scheduler queue: fix calculateBackoffDuration overflow in extreme data cases
|
kubernetes_kubernetes
|
train
|
2024d494546bfbf1d7a8339b57a0b895cb4af935
|
diff --git a/spec/page_spec.rb b/spec/page_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/page_spec.rb
+++ b/spec/page_spec.rb
@@ -2,33 +2,21 @@
describe 'Page' do
let(:site) { double }
- let(:page) { Dimples::Page.new(site) }
+ let(:page) { Dimples::Page.new(site, source_path) }
+ let(:html) { '<p><em>Hey!</em></p>' }
+ let(:source_path) { path = File.join(__dir__, 'sources', 'pages', 'index.markdown') }
describe '#initialize' do
context 'when a path is provided' do
- let(:path) { './pages/about.erb' }
- let(:page) { Dimples::Page.new(site, path) }
-
- before do
- page_data = <<PAGE_DATA
----
-title: About
-layout: default
----
-
-Hello.
-PAGE_DATA
-
- allow(File).to receive(:read).with(path).and_return(page_data)
- end
-
it 'parses the metadata and contents' do
- expect(page.contents).to eq('Hello.')
- expect(page.metadata).to eq(title: 'About', layout: 'default')
+ expect(page.contents).to eq('*Hey!*')
+ expect(page.metadata).to eq(title: 'About', layout: false)
end
end
context 'when no path is provided' do
+ let(:page) { Dimples::Page.new(site) }
+
it 'sets the default metadata and contents' do
expect(page.contents).to eq('')
expect(page.metadata).to eq({})
@@ -73,8 +61,39 @@ PAGE_DATA
end
describe '#render' do
+ context 'when the page has a path' do
+ before do
+ config = Hashie::Mash.new({ rendering: {} })
+ allow(site).to receive(:config).and_return(config)
+ end
+
+ it 'renders the contents' do
+ expect(page.render).to eq(html)
+ end
+ end
+
+ context 'when the page has no path' do
+ let(:page) { Dimples::Page.new(site) }
+
+ it 'renders an empty string' do
+ expect(page.render).to eq('')
+ end
+ end
end
describe '#write' do
+ let(:output_directory) { File.join(@site_output, 'pages') }
+ let(:output_path) { File.join(output_directory, "#{page.filename}.#{page.extension}") }
+
+ before do
+ allow(page).to receive(:render).and_return(html)
+ end
+
+ it 'writes out the file' do
+ page.write(output_directory)
+
+ expect(File.exist?(output_path)).to eq(true)
+ expect(File.read(output_path)).to eq(html)
+ end
end
end
|
Switch to the source page file, add render and write tests
|
waferbaby_dimples
|
train
|
32b3fa7b9d4fae258ba18143f197efc5ff7e5d6a
|
diff --git a/src/Resources/contao/pattern/PatternPageTree.php b/src/Resources/contao/pattern/PatternPageTree.php
index <HASH>..<HASH> 100644
--- a/src/Resources/contao/pattern/PatternPageTree.php
+++ b/src/Resources/contao/pattern/PatternPageTree.php
@@ -124,11 +124,14 @@ class PatternPageTree extends Pattern
$arrPages = array_values(array_filter($arrPages));
- $this->writeToTemplate($arrPages);
+ $this->writeToTemplate($arrPages->fetchAll());
}
else
{
- $this->writeToTemplate(\PageModel::findById($this->Value->singlePage));
+ if (($objPage = \PageModel::findById($this->Value->singlePage)) !== null)
+ {
+ $this->writeToTemplate($objPage->row());
+ }
}
}
}
|
Return arrData instead of the model
|
agoat_contao-customcontentelements-bundle
|
train
|
a844ede90f21a288f9829dc1f03a67d94211ec35
|
diff --git a/packages/avatar/src/Avatar.js b/packages/avatar/src/Avatar.js
index <HASH>..<HASH> 100644
--- a/packages/avatar/src/Avatar.js
+++ b/packages/avatar/src/Avatar.js
@@ -39,8 +39,7 @@ function backgroundIdFromName(name) {
* @returns {string}
*/
function initialsFromName(name) {
- const initials = name.match(/\b\w/g) || [];
-
+ const initials = name.match(/(?<=[-\s._'",;]|^)[^-\s._'",;]/g) || [];
return ((initials.shift() || "") + (initials.pop() || "")).toUpperCase();
}
|
fix: function initialsFromName() can't handle some non-English names properly
|
Autodesk_hig
|
train
|
df11d84822287a5ed9e2adb2bb909024bb65ed96
|
diff --git a/kubernetes-tests/src/test/java/io/fabric8/kubernetes/client/mock/ApproveTest.java b/kubernetes-tests/src/test/java/io/fabric8/kubernetes/client/mock/ApproveTest.java
index <HASH>..<HASH> 100644
--- a/kubernetes-tests/src/test/java/io/fabric8/kubernetes/client/mock/ApproveTest.java
+++ b/kubernetes-tests/src/test/java/io/fabric8/kubernetes/client/mock/ApproveTest.java
@@ -35,8 +35,8 @@ public class ApproveTest {
KubernetesClient client;
@Test
public void testApprove() throws MalformedURLException {
+
//Given
- KubernetesClient client = new DefaultKubernetesClient();
CertificateSigningRequest csr = new CertificateSigningRequestBuilder()
.withNewMetadata().withName("my-cert").endMetadata()
.withNewSpec()
@@ -66,7 +66,6 @@ public class ApproveTest {
public void testDeny() throws MalformedURLException {
//Given
- KubernetesClient client = new DefaultKubernetesClient();
CertificateSigningRequest csr = new CertificateSigningRequestBuilder()
.withNewMetadata().withName("my-cert").endMetadata()
.withNewSpec()
|
Update ApproveTest.java
|
fabric8io_kubernetes-client
|
train
|
3ecd6729caad4c776c52500f739da0453401714e
|
diff --git a/src/main/java/net/snowflake/client/jdbc/SnowflakeStatementV1.java b/src/main/java/net/snowflake/client/jdbc/SnowflakeStatementV1.java
index <HASH>..<HASH> 100644
--- a/src/main/java/net/snowflake/client/jdbc/SnowflakeStatementV1.java
+++ b/src/main/java/net/snowflake/client/jdbc/SnowflakeStatementV1.java
@@ -1424,8 +1424,9 @@ class SnowflakeStatementV1 implements Statement, SnowflakeStatement
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException
{
- throwExceptionAnyway();
- return false;
+ logger.debug("isWrapperFor(Class<?> iface)");
+
+ return iface.isInstance(this);
}
@SuppressWarnings("unchecked")
diff --git a/src/test/java/net/snowflake/client/jdbc/SnowflakeResultSetSerializableIT.java b/src/test/java/net/snowflake/client/jdbc/SnowflakeResultSetSerializableIT.java
index <HASH>..<HASH> 100644
--- a/src/test/java/net/snowflake/client/jdbc/SnowflakeResultSetSerializableIT.java
+++ b/src/test/java/net/snowflake/client/jdbc/SnowflakeResultSetSerializableIT.java
@@ -524,6 +524,7 @@ public class SnowflakeResultSetSerializableIT extends BaseJDBCTest
{
resultFileList.add(thisFileList.get(i));
}
+ rs.close();
}
if (developPrint)
@@ -536,7 +537,6 @@ public class SnowflakeResultSetSerializableIT extends BaseJDBCTest
}
@Test
- @Ignore
@ConditionalIgnoreRule.ConditionalIgnore(condition = RunningOnTravisCI.class)
public void testSplitResultSetSerializable() throws Throwable
{
|
SNOW-<I>: fix unreleased memory
|
snowflakedb_snowflake-jdbc
|
train
|
5f46a2ce5cd147f29ec87fe89ddc59ac94f36a7f
|
diff --git a/pingouin/bayesian.py b/pingouin/bayesian.py
index <HASH>..<HASH> 100644
--- a/pingouin/bayesian.py
+++ b/pingouin/bayesian.py
@@ -10,6 +10,8 @@ __all__ = ["bayesfactor_ttest", "bayesfactor_pearson", "bayesfactor_binom"]
def _format_bf(bf, precision=3, trim='0'):
"""Format BF10 to floating point or scientific notation.
"""
+ if type(bf) == str:
+ return bf
if bf >= 1e4 or bf <= 1e-4:
out = np.format_float_scientific(bf, precision=precision, trim=trim)
else:
|
don't touch BF if it's already a str
|
raphaelvallat_pingouin
|
train
|
5abde0eb7d52dbb43e00e5eed52cd0702d6d3c59
|
diff --git a/client/js/Panels/MetaEditor/MetaEditorControl.js b/client/js/Panels/MetaEditor/MetaEditorControl.js
index <HASH>..<HASH> 100644
--- a/client/js/Panels/MetaEditor/MetaEditorControl.js
+++ b/client/js/Panels/MetaEditor/MetaEditorControl.js
@@ -259,7 +259,7 @@ define(['logManager',
}
//process the sheets
- this._processMetaAspectSheetsRegistry();
+ var positionsUpdated = this._processMetaAspectSheetsRegistry();
this.logger.debug('_metaAspectMembersAll: \n' + JSON.stringify(this._metaAspectMembersAll));
this.logger.debug('_metaAspectMembersCoordinatesGlobal: \n' + JSON.stringify(this._metaAspectMembersCoordinatesGlobal));
@@ -289,7 +289,7 @@ define(['logManager',
}
//check all other nodes for position change
- diff = _.intersection(this._selectedMetaAspectSheetMembers, selectedSheetMembers);
+ diff = positionsUpdated;//_.intersection(this._selectedMetaAspectSheetMembers, selectedSheetMembers);
len = diff.length;
while (len--) {
gmeID = diff[len];
@@ -1615,6 +1615,9 @@ define(['logManager',
j,
gmeID;
+ //save old positions
+ var oldMetaAspectMembersCoordinatesPerSheet = this._metaAspectMembersCoordinatesPerSheet;
+
this._sheets = {};
this._metaAspectMembersPerSheet = {};
this._metaAspectMembersCoordinatesPerSheet = {};
@@ -1665,6 +1668,23 @@ define(['logManager',
}
}
+ //figure out whose position has changed
+ var positionUpdated = [];
+ if (this._selectedMetaAspectSet) {
+ var oldPositions = oldMetaAspectMembersCoordinatesPerSheet[this._selectedMetaAspectSet];
+ var newPositions = this._metaAspectMembersCoordinatesPerSheet[this._selectedMetaAspectSet];
+ if (oldPositions && newPositions) {
+ for (var oldItemId in oldPositions) {
+ if (oldPositions.hasOwnProperty(oldItemId) && newPositions.hasOwnProperty(oldItemId)) {
+ if (oldPositions[oldItemId].x !== newPositions[oldItemId].x ||
+ oldPositions[oldItemId].y !== newPositions[oldItemId].y) {
+ positionUpdated.push(oldItemId);
+ }
+ }
+ }
+ }
+ }
+
if (!selectedSheetID) {
for (selectedSheetID in this._sheets) {
if (this._sheets.hasOwnProperty(selectedSheetID)) {
@@ -1674,6 +1694,8 @@ define(['logManager',
}
this.diagramDesigner.selectTab(selectedSheetID);
+
+ return positionUpdated;
};
|
perormance optimisation: check whose position has been updated
Former-commit-id: <I>e<I>ce5f<I>f5c7f2f8d<I>
|
webgme_webgme-engine
|
train
|
23c7a25072bc81a899561395456bf186b13ddcd1
|
diff --git a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go
index <HASH>..<HASH> 100644
--- a/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go
+++ b/azurerm/internal/services/compute/resource_arm_linux_virtual_machine_scale_set.go
@@ -802,10 +802,13 @@ func resourceArmLinuxVirtualMachineScaleSetRead(d *schema.ResourceData, meta int
d.Set("unique_id", props.UniqueID)
d.Set("zone_balance", props.ZoneBalance)
- if props.ScaleInPolicy != nil && len(*props.ScaleInPolicy.Rules) > 0 {
- rules := *props.ScaleInPolicy.Rules
- d.Set("scale_in_policy", string(rules[0]))
+ rule := string(compute.Default)
+ if props.ScaleInPolicy != nil {
+ if rules := props.ScaleInPolicy.Rules; rules != nil && len(*rules) > 0 {
+ rule = string((*rules)[0])
+ }
}
+ d.Set("scale_in_policy", rule)
if profile := props.VirtualMachineProfile; profile != nil {
if err := d.Set("boot_diagnostics", flattenBootDiagnostics(profile.DiagnosticsProfile)); err != nil {
diff --git a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go
index <HASH>..<HASH> 100644
--- a/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go
+++ b/azurerm/internal/services/compute/resource_arm_windows_virtual_machine_scale_set.go
@@ -848,10 +848,13 @@ func resourceArmWindowsVirtualMachineScaleSetRead(d *schema.ResourceData, meta i
}
}
- if props.ScaleInPolicy != nil && len(*props.ScaleInPolicy.Rules) > 0 {
- rules := *props.ScaleInPolicy.Rules
- d.Set("scale_in_policy", string(rules[0]))
+ rule := string(compute.Default)
+ if props.ScaleInPolicy != nil {
+ if rules := props.ScaleInPolicy.Rules; rules != nil && len(*rules) > 0 {
+ rule = string((*rules)[0])
+ }
}
+ d.Set("scale_in_policy", rule)
if profile := props.VirtualMachineProfile; profile != nil {
if err := d.Set("boot_diagnostics", flattenBootDiagnostics(profile.DiagnosticsProfile)); err != nil {
|
Some update to conform <I>
|
terraform-providers_terraform-provider-azurerm
|
train
|
3bbb483787b68dc12ec1f378fc9299388951861c
|
diff --git a/google_drive_downloader/google_drive_downloader.py b/google_drive_downloader/google_drive_downloader.py
index <HASH>..<HASH> 100644
--- a/google_drive_downloader/google_drive_downloader.py
+++ b/google_drive_downloader/google_drive_downloader.py
@@ -1,5 +1,4 @@
from __future__ import print_function
-import re
import requests
import zipfile
import warnings
@@ -18,7 +17,7 @@ class GoogleDriveDownloader:
DOWNLOAD_URL = 'https://docs.google.com/uc?export=download'
@staticmethod
- def download_file_from_google_drive(file_id, dest_path='', overwrite=False, unzip=False, showsize=False):
+ def download_file_from_google_drive(file_id, dest_path, overwrite=False, unzip=False, showsize=False):
"""
Downloads a shared file from google drive into a given folder.
Optionally unzips it.
@@ -29,34 +28,29 @@ class GoogleDriveDownloader:
the file identifier.
You can obtain it from the sharable link.
dest_path: str
- optional, the destination where to save the downloaded file.
+ the destination where to save the downloaded file.
Must be a path (for example: './downloaded_file.txt')
- If omitted, it will try to get the correct name from the response headers
- and download in the local directory.
overwrite: bool
optional, if True forces re-download and overwrite.
unzip: bool
optional, if True unzips a file.
If the file is not a zip file, ignores it.
showsize: bool
- optional, if True prints the current download size.
+ optional, if True print the current download size.
Returns
-------
None
"""
- if dest_path:
- destination_directory = dirname(dest_path)
- if not exists(destination_directory):
- makedirs(destination_directory)
+ destination_directory = dirname(dest_path)
+ if not exists(destination_directory):
+ makedirs(destination_directory)
if not exists(dest_path) or overwrite:
session = requests.Session()
- print('Downloading {}'.format(file_id), end='')
- if dest_path:
- print(' into {}... '.format(dest_path), end='')
+ print('Downloading {} into {}... '.format(file_id, dest_path), end='')
stdout.flush()
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL, params={'id': file_id}, stream=True)
@@ -66,16 +60,6 @@ class GoogleDriveDownloader:
params = {'id': file_id, 'confirm': token}
response = session.get(GoogleDriveDownloader.DOWNLOAD_URL, params=params, stream=True)
- if not dest_path:
- # Get the filename from the response header 'Content-Disposition'
- match = re.search(r'filename="(?P<filename>[0-9A-Za-z ,\.\-\+\(\)\[\]]+)"', response.headers['Content-Disposition'])
-
- # Make it system safe, stripping commas
- dest_path = match['filename'].replace(',', '_')
-
- print(' into {}... '.format(dest_path), end='')
- stdout.flush()
-
if showsize:
print() # Skip to the next line
|
Revert attempt to fetch filename from response
Current way to fetch filename from response often fails. This feature
will be added as soon as a more reliable way to parse the response is
added.
This reverts commit 0c<I>c<I>fda<I>e<I>c<I>fdb<I>e<I>d<I>abad.
|
ndrplz_google-drive-downloader
|
train
|
a29151a399f8aac4ee23a24850308afd1753133c
|
diff --git a/lib/f_dev.js b/lib/f_dev.js
index <HASH>..<HASH> 100644
--- a/lib/f_dev.js
+++ b/lib/f_dev.js
@@ -198,10 +198,6 @@ function augment(toAugment, config){
maxRetries: config.maxRetries || { all : 10 },
- // HAS TO BE EDITTED
- maxMethodRetries: config.maxMethodRetries || 10,
-
-
maxMethodRetriesByName: config.maxMethodRetriesByName || {},
toLog: config.toLog || ['all'],
functionFlow: config.functionFlow || [],
@@ -212,6 +208,8 @@ function augment(toAugment, config){
};
+ if(!f_props.maxRetries.all) f_props.maxRetries.all = 10;
+
/**
* Make a copy of toAugment.prototype, store new properties in there.
* Later we asign it back to toAugment its real prototype object
|
Made sure maxRetries.all is always specified
|
opensoars_f_
|
train
|
db5e48200e724ed9d9ce7734d8041c22551f4ab2
|
diff --git a/redis/client.py b/redis/client.py
index <HASH>..<HASH> 100644
--- a/redis/client.py
+++ b/redis/client.py
@@ -1766,7 +1766,7 @@ class Script(object):
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
- client.script_load_for_pipeline(self.script)
+ client.script_load_for_pipeline(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
|
pass the script object, not the lua string
|
andymccurdy_redis-py
|
train
|
640df048405bc7c3e5b8fce8caec69c05f75e43c
|
diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py
index <HASH>..<HASH> 100644
--- a/moto/dynamodb2/models/__init__.py
+++ b/moto/dynamodb2/models/__init__.py
@@ -156,6 +156,10 @@ class Item(BaseModel):
existing = self.attrs.get(attribute_name, DynamoType({"SS": {}}))
new_set = set(existing.value).union(set(new_value))
self.attrs[attribute_name] = DynamoType({"SS": list(new_set)})
+ elif set(update_action["Value"].keys()) == {"L"}:
+ existing = self.attrs.get(attribute_name, DynamoType({"L": []}))
+ new_list = existing.value + new_value
+ self.attrs[attribute_name] = DynamoType({"L": new_list})
else:
# TODO: implement other data types
raise NotImplementedError(
diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py
index <HASH>..<HASH> 100644
--- a/tests/test_dynamodb2/test_dynamodb.py
+++ b/tests/test_dynamodb2/test_dynamodb.py
@@ -5704,3 +5704,26 @@ def test_dynamodb_update_item_fails_on_string_sets():
Key={"record_id": {"S": "testrecord"}},
AttributeUpdates=attribute,
)
+
+
+@moto.mock_dynamodb2
+def test_update_item_add_to_list_using_legacy_attribute_updates():
+ resource = boto3.resource("dynamodb", region_name="us-west-2")
+ resource.create_table(
+ AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
+ TableName="TestTable",
+ KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
+ ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
+ )
+ table = resource.Table("TestTable")
+ table.wait_until_exists()
+ table.put_item(Item={"id": "list_add", "attr": ["a", "b", "c"]},)
+
+ table.update_item(
+ TableName="TestTable",
+ Key={"id": "list_add"},
+ AttributeUpdates={"attr": {"Action": "ADD", "Value": ["d", "e"]}},
+ )
+
+ resp = table.get_item(Key={"id": "list_add"})
+ resp["Item"]["attr"].should.equal(["a", "b", "c", "d", "e"])
|
Implement Add to List for dynamodb:UpdateItem (#<I>)
This handles the add-to-list case using the legacy `AttributeUpdates` parameter.
* Added test coverage.
* Verified against real AWS backend.
Closes #<I>
|
spulec_moto
|
train
|
c046677b09c76f8febd68602015f7461de83a3e0
|
diff --git a/octo-core/lib/octocore/models/contactus.rb b/octo-core/lib/octocore/models/contactus.rb
index <HASH>..<HASH> 100644
--- a/octo-core/lib/octocore/models/contactus.rb
+++ b/octo-core/lib/octocore/models/contactus.rb
@@ -1,17 +1,17 @@
require 'cequel'
-
+# Model for contavt us page on the microsite
module Octo
class ContactUs
include Cequel::Record
key :email, :text
- column :typeofrequest, :text
+ key :created_at, :timestamp
- column :firstName, :text
- column :lastName, :text
+ column :typeofrequest, :text
+ column :firstname, :text
+ column :lastname, :text
column :message, :text
- column :created_at, :timestamp
end
end
\ No newline at end of file
diff --git a/octo-core/lib/octocore/models/subscribe.rb b/octo-core/lib/octocore/models/subscribe.rb
index <HASH>..<HASH> 100644
--- a/octo-core/lib/octocore/models/subscribe.rb
+++ b/octo-core/lib/octocore/models/subscribe.rb
@@ -1,12 +1,12 @@
require 'cequel'
-
+# Model for Subscribe to us (in the footer), on the microsite
module Octo
- class Subscribe
+ class Subscriber
include Cequel::Record
key :email, :text
- column :created_at, :timestamp
+ key :created_at, :timestamp
end
end
\ No newline at end of file
|
Minor model changes in subscribe and contactus
|
octoai_gem-octocore-cassandra
|
train
|
bde51819f3797727ad24d8c0c47078c17787a829
|
diff --git a/system/Database/MySQLi/Connection.php b/system/Database/MySQLi/Connection.php
index <HASH>..<HASH> 100644
--- a/system/Database/MySQLi/Connection.php
+++ b/system/Database/MySQLi/Connection.php
@@ -156,7 +156,6 @@ class Connection extends BaseConnection
}
}
- $clientFlags += MYSQLI_CLIENT_SSL;
$this->mysqli->ssl_set(
$ssl['key'] ?? null,
$ssl['cert'] ?? null,
@@ -165,6 +164,8 @@ class Connection extends BaseConnection
$ssl['cipher'] ?? null
);
}
+
+ $clientFlags += MYSQLI_CLIENT_SSL;
}
try {
|
Fix mysqli ssl connection - certificate is not required to establish a secure connection
|
codeigniter4_CodeIgniter4
|
train
|
fd2ab00c3a31928e407fa6861bd8808d35958157
|
diff --git a/gulpfile.js b/gulpfile.js
index <HASH>..<HASH> 100644
--- a/gulpfile.js
+++ b/gulpfile.js
@@ -41,6 +41,7 @@ gulp.task('compile', function() {
'!node_modules/google-closure-library/closure/**/*_test.js'
])
.pipe(closureCompiler({
+ assume_function_wrapper: true,
compilation_level: 'ADVANCED',
dependency_mode: 'STRICT',
entry_point: 'goog:wgxpath',
@@ -62,6 +63,7 @@ gulp.task('compile-node', function() {
'!node_modules/google-closure-library/closure/**/*_test.js',
])
.pipe(closureCompiler({
+ assume_function_wrapper: true,
compilation_level: 'ADVANCED',
dependency_mode: 'STRICT',
entry_point: 'goog:wgxpath.nodeModuleExports',
|
Pass --assume_function_wrapper to the Closure Compiler
Currently, this doesn't reduce the output size, but it might be
able to enable more optimizations in the future.
|
google_wicked-good-xpath
|
train
|
7dbfd6955b1bf0ea6a503c472a8429d76c575e1e
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -233,10 +233,10 @@ Victor.prototype.limit = function (max, factor) {
* @api public
*/
Victor.prototype.randomize = function (maxX, maxY) {
- var x = Math.floor(Math.random() * maxX),
- y = Math.floor(Math.random() * maxY);
+ this.randomizeX(maxX);
+ this.randomizeY(maxY);
- return new Victor(x, y);
+ return this;
};
/**
@@ -254,8 +254,7 @@ Victor.prototype.randomize = function (maxX, maxY) {
* @api public
*/
Victor.prototype.randomizeX = function (max) {
- var x = Math.floor(Math.random() * max);
- this.x = x;
+ this.x = Math.floor(Math.random() * max);
return this;
};
@@ -274,8 +273,7 @@ Victor.prototype.randomizeX = function (max) {
* @api public
*/
Victor.prototype.randomizeY = function (max) {
- var y = Math.floor(Math.random() * max);
- this.y = y;
+ this.y = Math.floor(Math.random() * max);
return this;
};
|
Fixed `randomize` function. It returned a copy instead of `this`
|
maxkueng_victor
|
train
|
39bc3964c485340b4cceba25e344a7e0d5f1ee4a
|
diff --git a/composer.json b/composer.json
index <HASH>..<HASH> 100644
--- a/composer.json
+++ b/composer.json
@@ -27,7 +27,8 @@
"require-dev": {
"phpunit/phpunit": "^6.3",
"doctrine/coding-standard": "^1.0",
- "squizlabs/php_codesniffer": "^3.0"
+ "squizlabs/php_codesniffer": "^3.0",
+ "symfony/phpunit-bridge": "^4.0.5"
},
"autoload": {
"psr-4": {
diff --git a/lib/Doctrine/Common/ClassLoader.php b/lib/Doctrine/Common/ClassLoader.php
index <HASH>..<HASH> 100644
--- a/lib/Doctrine/Common/ClassLoader.php
+++ b/lib/Doctrine/Common/ClassLoader.php
@@ -1,6 +1,11 @@
<?php
namespace Doctrine\Common;
+use function trigger_error;
+use const E_USER_DEPRECATED;
+
+@trigger_error(ClassLoader::class . ' is deprecated.', E_USER_DEPRECATED);
+
/**
* A <tt>ClassLoader</tt> is an autoloader for class files that can be
* installed on the SPL autoload stack. It is a class loader that either loads only classes
diff --git a/lib/Doctrine/Common/Lexer.php b/lib/Doctrine/Common/Lexer.php
index <HASH>..<HASH> 100644
--- a/lib/Doctrine/Common/Lexer.php
+++ b/lib/Doctrine/Common/Lexer.php
@@ -2,6 +2,10 @@
namespace Doctrine\Common;
use Doctrine\Common\Lexer\AbstractLexer;
+use function trigger_error;
+use const E_USER_DEPRECATED;
+
+@trigger_error(Lexer::class . ' is deprecated.', E_USER_DEPRECATED);
/**
* Base class for writing simple lexers, i.e. for creating small DSLs.
diff --git a/lib/Doctrine/Common/Util/Inflector.php b/lib/Doctrine/Common/Util/Inflector.php
index <HASH>..<HASH> 100644
--- a/lib/Doctrine/Common/Util/Inflector.php
+++ b/lib/Doctrine/Common/Util/Inflector.php
@@ -2,6 +2,10 @@
namespace Doctrine\Common\Util;
use Doctrine\Common\Inflector\Inflector as BaseInflector;
+use function trigger_error;
+use const E_USER_DEPRECATED;
+
+@trigger_error(Inflector::class . ' is deprecated.', E_USER_DEPRECATED);
/**
* Doctrine inflector has static methods for inflecting text.
diff --git a/tests/Doctrine/Tests/Common/ClassLoaderTest.php b/tests/Doctrine/Tests/Common/ClassLoaderTest.php
index <HASH>..<HASH> 100644
--- a/tests/Doctrine/Tests/Common/ClassLoaderTest.php
+++ b/tests/Doctrine/Tests/Common/ClassLoaderTest.php
@@ -4,6 +4,9 @@ namespace Doctrine\Tests\Common;
use Doctrine\Common\ClassLoader;
+/**
+ * @group legacy
+ */
class ClassLoaderTest extends \Doctrine\Tests\DoctrineTestCase
{
public function testClassLoader()
|
Trigger silent deprecations for legacy ClassLoader, Inflector and Lexer
|
doctrine_common
|
train
|
43a62f85f06efd12264046e0b51d9a99aa6f7c8a
|
diff --git a/anyconfig/cli.py b/anyconfig/cli.py
index <HASH>..<HASH> 100644
--- a/anyconfig/cli.py
+++ b/anyconfig/cli.py
@@ -17,6 +17,7 @@ import anyconfig.api as A
import anyconfig.compat
import anyconfig.globals
import anyconfig.mergeabledict
+import anyconfig.parser
_ENCODING = locale.getdefaultlocale()[1]
@@ -223,7 +224,8 @@ def main(argv=None):
return
if options.set:
- A.set_(data, *(options.set.split('=')))
+ (key, val) = options.set.split('=')
+ A.set_(data, key, anyconfig.parser.parse(val))
if options.output:
cparser = A.find_loader(options.output, options.otype)
|
cli: fix a bug that values set by --set option are not parsed and processed as strings
|
ssato_python-anyconfig
|
train
|
107722607eb06c9ba7c06bd76b9f49c0f53d3dc8
|
diff --git a/lib/solargraph/api_map.rb b/lib/solargraph/api_map.rb
index <HASH>..<HASH> 100755
--- a/lib/solargraph/api_map.rb
+++ b/lib/solargraph/api_map.rb
@@ -27,6 +27,10 @@ module Solargraph
@workspace = workspace
clear
unless @workspace.nil?
+ extra = File.join(workspace, '.solargraph')
+ if File.exist?(extra)
+ append_file(extra)
+ end
files = []
opts = options
(opts[:include] - opts[:exclude]).each { |glob|
@@ -52,16 +56,22 @@ module Solargraph
def options
o = {
- include: ['app/**/*.rb', 'lib/**/*.rb'],
+ include: [],
exclude: []
}
- yaml = File.join(workspace, '.solargraph.yml')
- if workspace && File.exist?(yaml)
- l = YAML.load_file(yaml)
- o[:include].concat l['include'] unless l['include'].nil?
- o[:exclude].concat l['exclude'] unless l['exclude'].nil?
- append_source(l['parse'].join("\n"), yaml) unless l['parse'].nil?
+ unless workspace.nil?
+ yardopts_file = File.join(workspace, '.yardopts')
+ if File.exist?(yardopts_file)
+ yardopts = File.read(yardopts_file)
+ yardopts.lines.each { |line|
+ arg = line.strip
+ if !arg.start_with?('-')
+ o[:include].push arg
+ end
+ }
+ end
end
+ o[:include].concat ['app/**/*.rb', 'lib/**/*.rb'] if o[:include].empty?
o
end
|
Use .yardopts for workspace files in ApiMap and .solargraph for extra code.
|
castwide_solargraph
|
train
|
031e441038bb8182ac69a9cec25a7bd557d49568
|
diff --git a/src/main/java/org/sakaiproject/nakamura/lite/types/Types.java b/src/main/java/org/sakaiproject/nakamura/lite/types/Types.java
index <HASH>..<HASH> 100644
--- a/src/main/java/org/sakaiproject/nakamura/lite/types/Types.java
+++ b/src/main/java/org/sakaiproject/nakamura/lite/types/Types.java
@@ -121,16 +121,17 @@ public class Types {
LOGGER.debug("Read key {} ",k);
output.put(k,lookupTypeById(dis.readInt()).load(dis));
}
+ String cftype = null;
try {
- String cftype = dis.readUTF();
- if (!type.equals(cftype)) {
- throw new IOException(
- "Object is not of expected column family, unable to read expected [" + type
- + "] was [" + cftype + "]");
- }
+ cftype = dis.readUTF();
} catch (IOException e) {
LOGGER.debug("No type specified");
}
+ if ( cftype != null && !cftype.equals(type)) {
+ throw new IOException(
+ "Object is not of expected column family, unable to read expected [" + type
+ + "] was [" + cftype + "]");
+ }
LOGGER.debug("Finished Reading");
dis.close();
binaryStream.close();
|
KENR-<I>, mucked up the logic there a bit, must remember to run the unit tests all the time.
|
ieb_sparsemapcontent
|
train
|
7c17fbce76580590cfbd2623f0193dd9cb2963ce
|
diff --git a/scriptblock.js b/scriptblock.js
index <HASH>..<HASH> 100644
--- a/scriptblock.js
+++ b/scriptblock.js
@@ -33,6 +33,22 @@ ScriptblockPlugin.prototype.disable = function() {
};
ScriptblockPlugin.prototype.interact = function(target) {
- console.log(target);
- // TODO: prompt for script, set at x,y,z
+ var x = target.voxel[0];
+ var y = target.voxel[1];
+ var z = target.voxel[2];
+
+ var bd = this.blockdata.get(x, y, z);
+ if (!bd) {
+ bd = {script: 'alert("Hello, voxel world!")'};
+ }
+
+ // interact (right-click) with top to set script, other sides to run
+ // TODO: run script when block takes damage instead (left-click)
+ if (target.side === 'top') {
+ bd.script = prompt("Script for block at ("+[x,y,z].join(",")+"): ", bd.script);
+
+ this.blockdata.set(x, y, z, bd);
+ } else {
+ eval(bd.script);
+ }
};
|
Set script in blockdata when interact with top, run when interact on other sides
|
voxel_voxel-scriptblock
|
train
|
3beac3ae1a1e99a27d817363f98d5570c234436a
|
diff --git a/src/Command/YamlSplitCommand.php b/src/Command/YamlSplitCommand.php
index <HASH>..<HASH> 100644
--- a/src/Command/YamlSplitCommand.php
+++ b/src/Command/YamlSplitCommand.php
@@ -86,7 +86,7 @@ class YamlSplitCommand extends Command
$initial_level = 1;
$nested_array->yaml_split_array($yaml_file_parsed, $yaml_split, $indent_level, $key_flatten, $initial_level, $exclude_parents_key);
- $this->writeSplittedFile($yaml_split,$output);
+ $this->writeSplittedFile($yaml_split, $output);
}
/**
@@ -118,17 +118,17 @@ class YamlSplitCommand extends Command
$input->setArgument('yaml-file', $yaml_file);
}
- protected function writeSplittedFile($yaml_splitted, $output) {
-
+ protected function writeSplittedFile($yaml_splitted, $output)
+ {
$dumper = new Dumper();
$output->writeln(
- '[+] <info>'.
+ '[+] <info>'.
$this->trans('commands.yaml.split.messages.generating-split')
- .'</info>'
+ .'</info>'
);
- foreach($yaml_splitted as $key => $value) {
+ foreach ($yaml_splitted as $key => $value) {
$filename = $key . '.yml';
try {
@@ -148,10 +148,10 @@ class YamlSplitCommand extends Command
}
$output->writeln(
- ' [-] <info>'.sprintf(
- $this->trans('commands.yaml.split.messages.split-generated'),
- $filename
- ).'</info>'
+ ' [-] <info>'.sprintf(
+ $this->trans('commands.yaml.split.messages.split-generated'),
+ $filename
+ ).'</info>'
);
}
}
diff --git a/src/Helper/NestedArrayHelper.php b/src/Helper/NestedArrayHelper.php
index <HASH>..<HASH> 100644
--- a/src/Helper/NestedArrayHelper.php
+++ b/src/Helper/NestedArrayHelper.php
@@ -217,8 +217,8 @@ class NestedArrayHelper extends BaseDialogHelper
/**
* Flat a yaml file
- * @param array $array
- * @param array $flatten_array
+ * @param array $array
+ * @param array $flatten_array
* @param string $key_flatten
*/
public function yaml_flatten_array(array &$array, &$flatten_array, &$key_flatten = '')
@@ -246,10 +246,10 @@ class NestedArrayHelper extends BaseDialogHelper
/**
* @param array $array
* @param array $split_array
- * @param int $indent_level
+ * @param int $indent_level
* @param array $key_flatten
- * @param int $key_level
- * @param bool $exclude_parents_key
+ * @param int $key_level
+ * @param bool $exclude_parents_key
*/
public function yaml_split_array(array &$array, array &$split_array, $indent_level = '', &$key_flatten, &$key_level, $exclude_parents_key)
{
@@ -258,13 +258,13 @@ class NestedArrayHelper extends BaseDialogHelper
$key_flatten.= '.';
}
- if($exclude_parents_key) {
+ if ($exclude_parents_key) {
$key_flatten = $key;
} else {
$key_flatten .= $key;
}
- if($key_level == $indent_level) {
+ if ($key_level == $indent_level) {
if (!empty($value)) {
$split_array[$key_flatten] = $value;
@@ -273,7 +273,7 @@ class NestedArrayHelper extends BaseDialogHelper
}
}
} else {
- if(is_array($value)) {
+ if (is_array($value)) {
$key_level++;
$this->yaml_split_array($value, $split_array, $indent_level, $key_flatten, $key_level, $exclude_parents_key);
}
|
Applied PHPQA to be PSR-2 complaint
|
hechoendrupal_drupal-console
|
train
|
6e81ac2cb67b09f508f832189726e597f9d56861
|
diff --git a/wandb/tensorboard/watcher.py b/wandb/tensorboard/watcher.py
index <HASH>..<HASH> 100644
--- a/wandb/tensorboard/watcher.py
+++ b/wandb/tensorboard/watcher.py
@@ -5,6 +5,8 @@ import sys
for path in sys.path:
if "client/wandb" in path:
sys.path.remove(path)
+if sys.modules.get("tensorboard"):
+ del sys.modules["tensorboard"]
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.compat import tf
|
fix tensorboard import issue
|
wandb_client
|
train
|
f7e47a6ac5cbb1eeec6b026984eb13e3dc549c1d
|
diff --git a/cfroutes/routing_info_helpers_test.go b/cfroutes/routing_info_helpers_test.go
index <HASH>..<HASH> 100644
--- a/cfroutes/routing_info_helpers_test.go
+++ b/cfroutes/routing_info_helpers_test.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
diff --git a/cmd/route-emitter/main_test.go b/cmd/route-emitter/main_test.go
index <HASH>..<HASH> 100644
--- a/cmd/route-emitter/main_test.go
+++ b/cmd/route-emitter/main_test.go
@@ -6,7 +6,7 @@ import (
"github.com/apcera/nats"
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
. "github.com/cloudfoundry-incubator/route-emitter/routing_table/matchers"
. "github.com/onsi/ginkgo"
diff --git a/routing_table/by_routing_key.go b/routing_table/by_routing_key.go
index <HASH>..<HASH> 100644
--- a/routing_table/by_routing_key.go
+++ b/routing_table/by_routing_key.go
@@ -4,7 +4,7 @@ import (
"errors"
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
)
type RoutesByRoutingKey map[RoutingKey]Routes
diff --git a/routing_table/by_routing_key_test.go b/routing_table/by_routing_key_test.go
index <HASH>..<HASH> 100644
--- a/routing_table/by_routing_key_test.go
+++ b/routing_table/by_routing_key_test.go
@@ -2,7 +2,7 @@ package routing_table_test
import (
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
. "github.com/onsi/ginkgo"
diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go
index <HASH>..<HASH> 100644
--- a/syncer/syncer_test.go
+++ b/syncer/syncer_test.go
@@ -7,7 +7,7 @@ import (
"github.com/apcera/nats"
"github.com/cloudfoundry-incubator/bbs/fake_bbs"
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
"github.com/cloudfoundry-incubator/route-emitter/syncer"
fake_metrics_sender "github.com/cloudfoundry/dropsonde/metric_sender/fake"
diff --git a/watcher/watcher.go b/watcher/watcher.go
index <HASH>..<HASH> 100644
--- a/watcher/watcher.go
+++ b/watcher/watcher.go
@@ -9,7 +9,7 @@ import (
"github.com/cloudfoundry-incubator/bbs"
"github.com/cloudfoundry-incubator/bbs/events"
"github.com/cloudfoundry-incubator/bbs/models"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
"github.com/cloudfoundry-incubator/route-emitter/nats_emitter"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
"github.com/cloudfoundry-incubator/route-emitter/syncer"
diff --git a/watcher/watcher_test.go b/watcher/watcher_test.go
index <HASH>..<HASH> 100644
--- a/watcher/watcher_test.go
+++ b/watcher/watcher_test.go
@@ -16,7 +16,7 @@ import (
"github.com/pivotal-golang/lager/lagertest"
"github.com/tedsuo/ifrit"
- "github.com/cloudfoundry-incubator/route-emitter/cfroutes"
+ "github.com/cloudfoundry-incubator/routing-info/cfroutes"
"github.com/cloudfoundry-incubator/route-emitter/nats_emitter/fake_nats_emitter"
"github.com/cloudfoundry-incubator/route-emitter/routing_table"
"github.com/cloudfoundry-incubator/route-emitter/routing_table/fake_routing_table"
|
Move cfroutes to routing-info
[#<I>]
|
cloudfoundry_route-emitter
|
train
|
1db6405e252a0368ccee9e8ba90354088717cd41
|
diff --git a/examples/models.py b/examples/models.py
index <HASH>..<HASH> 100644
--- a/examples/models.py
+++ b/examples/models.py
@@ -15,7 +15,7 @@ class LdapUser(ldapdb.models.Model):
# LDAP meta-data
base_dn = "ou=people,dc=example,dc=org"
object_classes = ['posixAccount', 'shadowAccount', 'inetOrgPerson']
- last_modified = fields.DateTimeField(db_column='modifyTimestamp')
+ last_modified = fields.DateTimeField(db_column='modifyTimestamp', editable=False)
# inetOrgPerson
first_name = fields.CharField(db_column='givenName', verbose_name="Prime name")
|
Mark 'modifyTimestamp' as read-only
This field is managed by the LDAP server, and not user-editable.
|
django-ldapdb_django-ldapdb
|
train
|
8fbf41b01b8dc21498cd4f59e9824ecbc1cbf413
|
diff --git a/src/tests/Autosuggest.js b/src/tests/Autosuggest.js
index <HASH>..<HASH> 100644
--- a/src/tests/Autosuggest.js
+++ b/src/tests/Autosuggest.js
@@ -429,11 +429,9 @@ describe('Autosuggest', function() {
describe('onSuggestionFocused', function() {
beforeEach(function() {
onSuggestionFocused.mockClear();
- onSuggestionUnfocused.mockClear();
createAutosuggest(
<Autosuggest suggestions={getSuburbStrings}
- onSuggestionFocused={onSuggestionFocused}
- onSuggestionUnfocused={onSuggestionUnfocused} />
+ onSuggestionFocused={onSuggestionFocused} />
);
setInputValue('m');
});
@@ -443,7 +441,6 @@ describe('Autosuggest', function() {
it('should call onSuggestionFocused when suggestion focused using mouse', function() {
mouseOverFromInputToSuggestion(1);
expect(onSuggestionFocused).toBeCalledWith('Mordialloc');
- expect(onSuggestionUnfocused).not.toBeCalled();
});
});
@@ -473,13 +470,11 @@ describe('Autosuggest', function() {
describe('onSuggestionUnfocused', function() {
beforeEach(function() {
- onSuggestionFocused.mockClear();
onSuggestionUnfocused.mockClear();
createAutosuggest(
<Autosuggest suggestions={getSuburbObjects}
suggestionRenderer={renderSuburbObject}
suggestionValue={getSuburbObjectValue}
- onSuggestionFocused={onSuggestionFocused}
onSuggestionUnfocused={onSuggestionUnfocused} />
);
setInputValue('m');
@@ -492,17 +487,6 @@ describe('Autosuggest', function() {
mouseOverBetweenSuggestions(0, 1);
expect(onSuggestionUnfocused).toBeCalledWith({ suburb: 'Mill Park', postcode: '3083' });
});
-
- it('should call onSuggestionUnfocused when clicking outside and suggestion is focused', function() {
- mouseOverFromInputToSuggestion(0);
- clickOutside();
- expect(onSuggestionUnfocused).toBeCalledWith({ suburb: 'Mill Park', postcode: '3083' });
- });
-
- it('should not call onSuggestionUnfocused when clicking outside and no suggestion focused', function() {
- clickOutside();
- expect(onSuggestionUnfocused).not.toBeCalled();
- });
});
describe('Keyboard interactions', function() {
@@ -523,6 +507,21 @@ describe('Autosuggest', function() {
clickEscape();
expect(onSuggestionUnfocused).not.toBeCalled();
});
+
+ it('should call onSuggestionUnfocused when clicking outside and suggestion is focused', function() {
+ clickDown();
+ clickOutside();
+ expect(onSuggestionUnfocused).toBeCalledWith({ suburb: 'Mill Park', postcode: '3083' });
+ });
+
+ it('should not call onSuggestionUnfocused when clicking outside and no suggestion focused', function() {
+ clickDown();
+ clickDown();
+ clickDown();
+ onSuggestionUnfocused.mockClear();
+ clickOutside();
+ expect(onSuggestionUnfocused).not.toBeCalled();
+ });
});
});
|
Cleans up tests for onSuggestionFocused and onSuggestionUnfocused:
- Updates simulation of clicking outside to be more realistic with
keyboard interactions
- Removes unnecessary function spies
|
moroshko_react-autosuggest
|
train
|
4fdfa51ca4073fe593c4cf8fd418a7f334e98fe7
|
diff --git a/mgmt/rest/plugin.go b/mgmt/rest/plugin.go
index <HASH>..<HASH> 100644
--- a/mgmt/rest/plugin.go
+++ b/mgmt/rest/plugin.go
@@ -291,24 +291,27 @@ func (s *Server) getPlugin(w http.ResponseWriter, r *http.Request, p httprouter.
return
}
- b, err := ioutil.ReadFile(plugin.PluginPath())
- if err != nil {
- f["plugin-path"] = plugin.PluginPath()
- pe := perror.New(err, f)
- respond(500, rbody.FromPulseError(pe), w)
- return
- }
+ rd := r.FormValue("download")
+ d, _ := strconv.ParseBool(rd)
+ if d {
+ b, err := ioutil.ReadFile(plugin.PluginPath())
+ if err != nil {
+ f["plugin-path"] = plugin.PluginPath()
+ pe := perror.New(err, f)
+ respond(500, rbody.FromPulseError(pe), w)
+ return
+ }
- w.Header().Set("Content-Encoding", "gzip")
- gz := gzip.NewWriter(w)
- defer gz.Close()
- _, err = gz.Write(b)
- if err != nil {
- f["plugin-path"] = plugin.PluginPath()
- pe := perror.New(err, f)
- respond(500, rbody.FromPulseError(pe), w)
+ w.Header().Set("Content-Encoding", "gzip")
+ gz := gzip.NewWriter(w)
+ defer gz.Close()
+ _, err = gz.Write(b)
+ if err != nil {
+ f["plugin-path"] = plugin.PluginPath()
+ pe := perror.New(err, f)
+ respond(500, rbody.FromPulseError(pe), w)
+ return
+ }
return
}
-
- // w.WriteHeader(200)
}
|
Adds required download param to be set when downloading plugin
|
intelsdi-x_snap
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.