hash
stringlengths 40
40
| diff
stringlengths 131
114k
| message
stringlengths 7
980
| project
stringlengths 5
67
| split
stringclasses 1
value |
|---|---|---|---|---|
8e1f501701ad77fdd34489868694f0ca261c371c
|
diff --git a/lib/x11/xcore.js b/lib/x11/xcore.js
index <HASH>..<HASH> 100644
--- a/lib/x11/xcore.js
+++ b/lib/x11/xcore.js
@@ -87,9 +87,10 @@ XClient.prototype.terminate = function()
// GetAtomName used as cheapest non-modifying request with reply
// 3 - id for shortest standard atom, "ARC"
XClient.prototype.ping = function(cb) {
+ var start = Date.now();
this.GetAtomName(3, function(err, str) {
if (err) return cb(err);
- return cb();
+ return cb(null, Date.now() - start);
});
}
|
return roundtrip time in ping() callback
|
sidorares_node-x11
|
train
|
2bee4a157068edcd9a159ee848bec46c061d34b1
|
diff --git a/helpers/content.go b/helpers/content.go
index <HASH>..<HASH> 100644
--- a/helpers/content.go
+++ b/helpers/content.go
@@ -69,18 +69,16 @@ var blackfridayExtensionMap = map[string]int{
"autoHeaderIds": blackfriday.EXTENSION_AUTO_HEADER_IDS,
}
+var stripHTMLReplacer = strings.NewReplacer("\n", " ", "</p>", "\n", "<br>", "\n", "<br />", "\n")
+
// StripHTML accepts a string, strips out all HTML tags and returns it.
func StripHTML(s string) string {
- output := ""
// Shortcut strings with no tags in them
if !strings.ContainsAny(s, "<>") {
- output = s
+ return s
} else {
- s = strings.Replace(s, "\n", " ", -1)
- s = strings.Replace(s, "</p>", "\n", -1)
- s = strings.Replace(s, "<br>", "\n", -1)
- s = strings.Replace(s, "<br />", "\n", -1) // <br /> is the xhtml line break tag
+ s = stripHTMLReplacer.Replace(s)
// Walk through the string removing all tags
b := new(bytes.Buffer)
@@ -97,9 +95,8 @@ func StripHTML(s string) string {
}
}
}
- output = b.String()
+ return b.String()
}
- return output
}
// StripEmptyNav strips out empty <nav> tags from content.
diff --git a/helpers/content_test.go b/helpers/content_test.go
index <HASH>..<HASH> 100644
--- a/helpers/content_test.go
+++ b/helpers/content_test.go
@@ -14,6 +14,8 @@ func TestStripHTML(t *testing.T) {
{"<h1>strip h1 tag <h1>", "strip h1 tag "},
{"<p> strip p tag </p>", " strip p tag \n"},
{"</br> strip br<br>", " strip br\n"},
+ {"</br> strip br2<br />", " strip br2\n"},
+ {"This <strong>is</strong> a\nnewline", "This is a newline"},
}
for i, d := range data {
output := StripHTML(d.input)
|
Replace 4 strings.Replace with 1 strings.Replacer
Consumes less memory, slightly faster.
|
gohugoio_hugo
|
train
|
7de74f1d0489bdfa1af899fde51b21da56b240a6
|
diff --git a/glue/text_progress_bar.py b/glue/text_progress_bar.py
index <HASH>..<HASH> 100644
--- a/glue/text_progress_bar.py
+++ b/glue/text_progress_bar.py
@@ -154,6 +154,7 @@ class ProgressBar:
nBlanks = barWidth - nBlocksInt - 1
barSymbols = (self.sequence[-1] * nBlocksInt) + partialBlock + \
(self.sequence[0] * nBlanks)
+ barSymbols = barSymbols[:barWidth]
progressFractionText = ('%.1f%%' % (100*progressFraction)).rjust(6)
print >>self.fid, '\r\x1B[1m' + label + '\x1B[0m [' + barSymbols + \
|
Fix bar length when at <I>%
|
gwastro_pycbc-glue
|
train
|
fac0154a49a8e1e0b079381fcd46e62566c1290d
|
diff --git a/index.js b/index.js
index <HASH>..<HASH> 100644
--- a/index.js
+++ b/index.js
@@ -13,6 +13,7 @@ module.exports = function(options) {
// Determine root module and root directory
self.root = options.root || getRoot();
self.rootDir = options.rootDir || path.dirname(self.root.filename);
+ self.npmRootDir = options.npmRootDir || self.rootDir;
testModule();
@@ -257,7 +258,7 @@ module.exports = function(options) {
}
function getNpmPath(name) {
- var parentPath = path.resolve(self.rootDir);
+ var parentPath = path.resolve(self.npmRootDir);
try {
return npmResolve.sync(name, { basedir: parentPath });
} catch (e) {
diff --git a/lib/modules/apostrophe-i18n/index.js b/lib/modules/apostrophe-i18n/index.js
index <HASH>..<HASH> 100644
--- a/lib/modules/apostrophe-i18n/index.js
+++ b/lib/modules/apostrophe-i18n/index.js
@@ -3,6 +3,11 @@
// usual `__()` helper function. Any options passed to this module are passed on to `i18n`.
//
// By default i18n locale files are generated in the `locales` subdirectory of the project.
+//
+// ## Options
+//
+// `localesDir`: if specified, the locale `.json` files are stored here, otherwise they
+// are stored in the `locales` subdirectory of the project root.
var _ = require('lodash');
var i18n = require('i18n');
@@ -14,7 +19,7 @@ module.exports = {
_.defaults(i18nOptions, {
locales: ['en'],
cookie: 'apos_language',
- directory: self.apos.rootDir + '/locales'
+ directory: self.options.localesDir || (self.apos.rootDir + '/locales')
});
i18n.configure(i18nOptions);
|
npmRootDir and localesDir options that override apos.rootDir, allowing rootDir to be used to point all asset-ish, data-ish things painlessly to an alternate location
|
apostrophecms_apostrophe
|
train
|
3eb4a013b13b80a5d893381a68b1ec174afd6f7f
|
diff --git a/tests/test_runtime.py b/tests/test_runtime.py
index <HASH>..<HASH> 100644
--- a/tests/test_runtime.py
+++ b/tests/test_runtime.py
@@ -87,7 +87,7 @@ class TestRuntime(unittest.TestCase):
crossplat_loop_run(test())
def test_joined_lines(self):
- """Test joining and iterating over lines joined from subprocesses."""
+ """Test iterating over lines joined from subprocesses."""
async def test():
# TODO
assert True
|
Remove redundant docstring comment from runtime tests
|
welchbj_sublemon
|
train
|
abee459516f5d88ceccf6f287e0202a81d2f4c87
|
diff --git a/demos/src/CustomArrowButton.js b/demos/src/CustomArrowButton.js
index <HASH>..<HASH> 100644
--- a/demos/src/CustomArrowButton.js
+++ b/demos/src/CustomArrowButton.js
@@ -1,96 +1,55 @@
-import { merge } from '../../src/updates.js';
import * as symbols from '../../src/symbols.js';
-import HoverMixin from '../../src/HoverMixin.js';
import WrappedStandardElement from '../../src/WrappedStandardElement.js';
-const Base =
- HoverMixin(
- WrappedStandardElement.wrap('button')
- );
-
-
-class CustomArrowButton extends Base {
-
- get defaultState() {
- return Object.assign({}, super.defaultState, {
- disabled: false
- });
- }
-
- get disabled() {
- // @ts-ignore
- return super.disabled;
- }
- set disabled(disabled) {
- const parsed = disabled != null;
- // @ts-ignore
- super.disabled = parsed;
- this.setState({
- disabled: parsed
- });
- }
-
- get updates() {
-
- const style = Object.assign(
- {
- background: 'rgba(255, 255, 255, 0.2)',
- 'border-color': 'rgba(255, 255, 255, 0.7)',
- color: 'rgba(255, 255, 255, 0.7)',
- transform: 'scale(1.0)'
- },
- this.state.hover && {
- background: 'rgba(255, 255, 255, 0.5)',
- 'border-color': 'rgba(255, 255, 255, 0.8)',
- color: 'rgba(255, 255, 255, 0.8)',
- cursor: 'pointer',
- transform: 'scale(1.1)'
- },
- this.state.disabled && {
- background: '',
- 'border-color': 'rgba(255, 255, 255, 0.2)',
- color: 'rgba(255, 255, 255, 0.2)',
- cursor: '',
- transform: 'scale(1.0)'
- }
- );
-
- return merge(super.updates, {
- $: {
- inner: { style }
- }
- });
- }
+class CustomArrowButton extends WrappedStandardElement.wrap('button') {
get [symbols.template]() {
return `
<style>
:host {
+ align-items: center;
display: flex;
- flex-direction: column;
- justify-content: center;
+ font-size: 28px;
+ font-weight: bold;
+ margin: 0.5em;
-webkit-tap-highlight-color: transparent;
}
#inner {
- background: transparent;
+ background: rgba(255, 255, 255, 0.2);
+ border-color: rgba(255, 255, 255, 0.7);
border: 2px solid transparent;
box-sizing: border-box;
color: inherit;
+ color: rgba(255, 255, 255, 0.7);
fill: currentColor;
+ flex: 1;
font-family: inherit;
- font-size: 28px;
- font-weight: bold;
+ font-size: inherit;
+ font-weight: inherit;
height: 48px;
- margin: 0.5em;
- width: 48px;
+ margin: 0;
outline: none;
padding: 0;
position: relative;
+ transform: scale(1.0);
transition: background 0.3s, border-color 0.3s, color 0.3s, transform 0.3s;
width: 48px;
}
+
+ :host(:hover) #inner:not(:disabled) {
+ border-color: rgba(255, 255, 255, 0.8);
+ color: rgba(255, 255, 255, 0.8);
+ cursor: pointer;
+ transform: scale(1.1);
+ }
+
+ #inner:disabled {
+ border-color: rgba(255, 255, 255, 0.2);
+ color: rgba(255, 255, 255, 0.2);
+ transform: scale(1.0);
+ }
</style>
<button id="inner">
<slot></slot>
diff --git a/src/ArrowDirectionButton.js b/src/ArrowDirectionButton.js
index <HASH>..<HASH> 100644
--- a/src/ArrowDirectionButton.js
+++ b/src/ArrowDirectionButton.js
@@ -18,38 +18,18 @@ const Base =
*/
class ArrowDirectionButton extends Base {
- get defaultState() {
- return Object.assign({}, super.defaultState, {
- disabled: false
- });
- }
-
- get disabled() {
- // @ts-ignore
- return super.disabled;
- }
- set disabled(disabled) {
- const parsed = disabled != null;
- // @ts-ignore
- super.disabled = parsed;
- this.setState({
- disabled: parsed
- });
- }
-
get updates() {
const style = Object.assign(
{
background: '',
color: 'rgba(255, 255, 255, 0.7)'
},
- this.state.hover && {
+ this.state.hover && !this.state.disabled && {
background: 'rgba(255, 255, 255, 0.2)',
color: 'rgba(255, 255, 255, 0.8)',
cursor: 'pointer'
},
this.state.disabled && {
- background: '',
color: 'rgba(255, 255, 255, 0.3)'
}
);
@@ -61,13 +41,14 @@ class ArrowDirectionButton extends Base {
get [symbols.template]() {
return `
<style>
- :host(:not([hidden])) {
+ :host {
display: flex;
+ -webkit-tap-highlight-color: transparent;
}
#inner {
background: transparent;
- border: 1px solid transparent;
+ border: none;
box-sizing: border-box;
color: inherit;
fill: currentColor;
@@ -79,7 +60,6 @@ class ArrowDirectionButton extends Base {
outline: none;
padding: 0;
position: relative;
- transition: opacity 1s;
}
</style>
<button id="inner">
|
Simplify construction of arrow buttons.
Use CSS instead of updates to style custom arrow button.
|
elix_elix
|
train
|
1982cb8b7b52ee5899b3ae9d7cb44f392d89ae31
|
diff --git a/src/authority/tests.py b/src/authority/tests.py
index <HASH>..<HASH> 100644
--- a/src/authority/tests.py
+++ b/src/authority/tests.py
@@ -52,3 +52,65 @@ class BehaviourTest(TestCase):
# test
self.assertFalse(self.check.delete_user())
self.assertTrue(self.check.delete_user(self.user))
+
+class AssignBehaviourTest(TestCase):
+ '''
+ self.user will be given:
+ - permission add_user (test_add),
+ - permission delete_user for him (test_delete),
+ - all existing codenames permissions: a/b/c/d (test_all),
+ '''
+
+ fixtures = ['tests.json',]
+
+ def setUp(self):
+ self.user = User.objects.get(username='jezdez')
+ self.check = UserPermission(self.user)
+
+ def test_add(self):
+ result = self.check.assign(codename='add_user')
+
+ self.assertTrue(isinstance(result, DjangoPermission))
+ self.assertTrue(self.check.add_user())
+
+ def test_delete(self):
+ result = self.check.assign(content_object=self.user, codename='delete_user')
+
+ self.assertTrue(isinstance(result, Permission))
+ self.assertFalse(self.check.delete_user())
+ self.assertTrue(self.check.delete_user(self.user))
+
+ def test_all(self):
+ result = self.check.assign(content_object=self.user)
+
+ self.assertTrue(isinstance(result, list))
+ self.assertTrue(self.check.delete_user())
+ self.assertTrue(self.check.add_user())
+ self.assertTrue(self.check.change_user())
+ self.assertTrue(self.check.browse_user())
+
+class GenericAssignBehaviourTest(TestCase):
+ '''
+ self.user will be given:
+ - permission add (test_add),
+ - permission delete for him (test_delete),
+ '''
+
+ fixtures = ['tests.json',]
+
+ def setUp(self):
+ self.user = User.objects.get(username='jezdez')
+ self.check = UserPermission(self.user)
+
+ def test_add(self):
+ result = self.check.assign(codename='add', generic=True)
+
+ self.assertTrue(isinstance(result, DjangoPermission))
+ self.assertTrue(self.check.add_user())
+
+ def test_delete(self):
+ result = self.check.assign(content_object=self.user, codename='delete', generic=True)
+
+ self.assertTrue(isinstance(result, Permission))
+ self.assertFalse(self.check.delete_user())
+ self.assertTrue(self.check.delete_user(self.user))
|
Add basic behaviour tests for BasePermission.assign()
|
jazzband_django-authority
|
train
|
6078fc1351497f7e86ad1d870e8290253944b6f1
|
diff --git a/gcloud/datastore/batch.py b/gcloud/datastore/batch.py
index <HASH>..<HASH> 100644
--- a/gcloud/datastore/batch.py
+++ b/gcloud/datastore/batch.py
@@ -72,6 +72,7 @@ class Batch(object):
'a dataset ID set.')
self._mutation = datastore_pb.Mutation()
+ self._auto_id_entities = []
@property
def dataset_id(self):
@@ -137,6 +138,9 @@ class Batch(object):
self.dataset_id, key_pb, properties,
exclude_from_indexes=exclude, mutation=self.mutation)
+ if entity.key.is_partial:
+ self._auto_id_entities.append(entity)
+
def delete(self, key):
"""Remember a key to be deleted durring ``commit``.
@@ -159,7 +163,11 @@ class Batch(object):
however it can be called explicitly if you don't want to use a
context manager.
"""
- self.connection.commit(self._dataset_id, self.mutation)
+ response = self.connection.commit(self._dataset_id, self.mutation)
+ for new_key_pb, entity in zip(response.insert_auto_id_key,
+ self._auto_id_entities):
+ new_id = new_key_pb.path_element[-1].id
+ entity.key = entity.key.completed_key(new_id)
def __enter__(self):
return self
diff --git a/gcloud/datastore/test_batch.py b/gcloud/datastore/test_batch.py
index <HASH>..<HASH> 100644
--- a/gcloud/datastore/test_batch.py
+++ b/gcloud/datastore/test_batch.py
@@ -46,6 +46,7 @@ class TestBatch(unittest2.TestCase):
self.assertEqual(batch.dataset_id, _DATASET)
self.assertEqual(batch.connection, connection)
self.assertTrue(isinstance(batch.mutation, Mutation))
+ self.assertEqual(batch._auto_id_entities, [])
def test_ctor_implicit(self):
from gcloud._testing import _Monkey
@@ -62,6 +63,7 @@ class TestBatch(unittest2.TestCase):
self.assertEqual(batch.dataset_id, DATASET_ID)
self.assertEqual(batch.connection, CONNECTION)
self.assertTrue(isinstance(batch.mutation, Mutation))
+ self.assertEqual(batch._auto_id_entities, [])
def test_put_entity_wo_key(self):
_DATASET = 'DATASET'
@@ -70,7 +72,23 @@ class TestBatch(unittest2.TestCase):
self.assertRaises(ValueError, batch.put, _Entity())
- def test_put_entity_w_key(self):
+ def test_put_entity_w_partial_key(self):
+ _DATASET = 'DATASET'
+ _PROPERTIES = {'foo': 'bar'}
+ connection = _Connection()
+ batch = self._makeOne(dataset_id=_DATASET, connection=connection)
+ entity = _Entity(_PROPERTIES)
+ key = entity.key = _Key(_DATASET)
+ key._partial = True
+
+ batch.put(entity)
+
+ self.assertEqual(
+ connection._saved,
+ (_DATASET, key._key, _PROPERTIES, (), batch.mutation))
+ self.assertEqual(batch._auto_id_entities, [entity])
+
+ def test_put_entity_w_completed_key(self):
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
connection = _Connection()
@@ -114,6 +132,22 @@ class TestBatch(unittest2.TestCase):
self.assertEqual(connection._committed, (_DATASET, batch.mutation))
+ def test_commit_w_auto_id_entities(self):
+ _DATASET = 'DATASET'
+ _NEW_ID = 1234
+ connection = _Connection(_NEW_ID)
+ batch = self._makeOne(dataset_id=_DATASET, connection=connection)
+ entity = _Entity({})
+ key = entity.key = _Key(_DATASET)
+ key._partial = True
+ batch._auto_id_entities.append(entity)
+
+ batch.commit()
+
+ self.assertEqual(connection._committed, (_DATASET, batch.mutation))
+ self.assertFalse(key._partial)
+ self.assertEqual(key._id, _NEW_ID)
+
def test_as_context_mgr_wo_error(self):
_DATASET = 'DATASET'
_PROPERTIES = {'foo': 'bar'}
@@ -154,7 +188,19 @@ class TestBatch(unittest2.TestCase):
class _CommitResult(object):
def __init__(self, *new_keys):
- self.insert_auto_id_key = new_keys
+ self.insert_auto_id_key = [_KeyPB(key) for key in new_keys]
+
+
+class _PathElementPB(object):
+
+ def __init__(self, id):
+ self.id = id
+
+
+class _KeyPB(object):
+
+ def __init__(self, id):
+ self.path_element = [_PathElementPB(id)]
class _Connection(object):
@@ -162,8 +208,8 @@ class _Connection(object):
_committed = _saved = _deleted = None
_save_result = (False, None)
- def __init__(self):
- self._commit_result = _CommitResult()
+ def __init__(self, *new_keys):
+ self._commit_result = _CommitResult(*new_keys)
def save_entity(self, dataset_id, key_pb, properties,
exclude_from_indexes=(), mutation=None):
@@ -201,3 +247,8 @@ class _Key(object):
def to_protobuf(self):
return self._key
+
+ def completed_key(self, new_id):
+ assert self._partial
+ self._id = new_id
+ self._partial = False
|
Add 'insert_auto_ids' support to 'Batch'.
Follow-on to #<I>.
|
googleapis_google-cloud-python
|
train
|
7c629d27f6b8238b312ec557a134694d1f043b0a
|
diff --git a/spark.js b/spark.js
index <HASH>..<HASH> 100644
--- a/spark.js
+++ b/spark.js
@@ -91,9 +91,9 @@ Spark.writable('__readyState', Spark.OPEN);
// Lazy parse interface for IP address information. As nobody is always
// interested in this, we're going to defer parsing until it's actually needed.
//
-Spark.readable('address', { get: function address() {
+Spark.get('address', function address() {
return this.request.forwarded || forwarded(this.remote, this.headers, this.primus.whitelist);
-}}, true);
+});
/**
* Set a timer to forcibly disconnect the spark if no data is received from the
|
[minor] Use the shorthand for assigning a getter.
|
primus_primus
|
train
|
b58b22cf8e830e555a8235a7e16e9c02acb856c1
|
diff --git a/lib/rules/no-component-lifecycle-hooks.js b/lib/rules/no-component-lifecycle-hooks.js
index <HASH>..<HASH> 100644
--- a/lib/rules/no-component-lifecycle-hooks.js
+++ b/lib/rules/no-component-lifecycle-hooks.js
@@ -30,7 +30,7 @@ module.exports = {
create(context) {
let isInsideEmberComponent = false;
- let currentComponent = null;
+ let currentEmberComponent = null;
const isAClassicLifecycleHook = keyName => {
return isComponentLifecycleHook(keyName) && isInsideEmberComponent;
@@ -43,15 +43,21 @@ module.exports = {
},
CallExpression(node) {
- isInsideEmberComponent = emberUtils.isEmberComponent(context, node);
+ if (isEmberComponent(context, node)) {
+ currentEmberComponent = node;
+ isInsideEmberComponent = true;
+ }
},
'ClassDeclaration:exit'() {
isInsideEmberComponent = false;
},
- 'CallExpression:exit'() {
- isInsideEmberComponent = false;
+ 'CallExpression:exit'(node) {
+ if (currentEmberComponent === node) {
+ currentEmberComponent = null;
+ isInsideEmberComponent = false;
+ }
},
MethodDefinition(node) {
@@ -61,7 +67,6 @@ module.exports = {
},
Property(node) {
- // TODO: fix
if (isAClassicLifecycleHook(node)) {
report(context, node.key);
}
diff --git a/tests/lib/rules/no-component-lifecycle-hooks.js b/tests/lib/rules/no-component-lifecycle-hooks.js
index <HASH>..<HASH> 100644
--- a/tests/lib/rules/no-component-lifecycle-hooks.js
+++ b/tests/lib/rules/no-component-lifecycle-hooks.js
@@ -128,5 +128,22 @@ ruleTester.run('no-component-lifecycle-hooks', rule, {
},
],
},
+ {
+ code: `
+ import Component from "@glimmer/component";
+
+ export const Component1 = Component.extend({
+ test: computed('', function () {}),
+ didDestroyElement() {},
+ });
+ `,
+ output: null,
+ errors: [
+ {
+ message: ERROR_MESSAGE,
+ type: 'Identifier',
+ },
+ ],
+ },
],
});
|
Fix issue & add test case for multiple CallExpression case
|
ember-cli_eslint-plugin-ember
|
train
|
44a62fd4b1b1ee303625fc8aad3e8a5f155b77d4
|
diff --git a/moderngl/program_members/uniform_block.py b/moderngl/program_members/uniform_block.py
index <HASH>..<HASH> 100644
--- a/moderngl/program_members/uniform_block.py
+++ b/moderngl/program_members/uniform_block.py
@@ -32,6 +32,18 @@ class UniformBlock:
self.mglo.binding = binding
@property
+ def value(self) -> int:
+ '''
+ int: The value of the uniform block.
+ '''
+
+ return self.mglo.value
+
+ @value.setter
+ def value(self, value):
+ self.mglo.binding = value
+
+ @property
def name(self) -> str:
'''
str: The name of the uniform block.
|
uniform value is the uniform binding (forward compatibility)
|
moderngl_moderngl
|
train
|
3d93d86d4237b484099b5e129cb2d3678729829d
|
diff --git a/lib/mysql2.rb b/lib/mysql2.rb
index <HASH>..<HASH> 100644
--- a/lib/mysql2.rb
+++ b/lib/mysql2.rb
@@ -15,7 +15,7 @@ require 'mysql2/result'
module Mysql2
end
-if defined?(ActiveRecord::VERSION::STRING) < "3.1"
+if defined?(ActiveRecord::VERSION::STRING) && ActiveRecord::VERSION::STRING < "3.1"
puts "WARNING: This version of mysql2 (#{Mysql2::VERSION}) doesn't ship with the ActiveRecord adapter bundled anymore as it's now part of Rails 3.1"
puts "WARNING: Please use the 0.2.x releases if you plan on using it in Rails <= 3.0.x"
end
\ No newline at end of file
|
wow, maybe it's time for bed
|
brianmario_mysql2
|
train
|
f0d3df197b9b3d5cc173d402e74fd1b21b40fe61
|
diff --git a/bin/melodrama-scripts.js b/bin/melodrama-scripts.js
index <HASH>..<HASH> 100644
--- a/bin/melodrama-scripts.js
+++ b/bin/melodrama-scripts.js
@@ -15,6 +15,7 @@ const cli = meow({
${chalk.underline('Commands:')}
init [dir] bootstrap project dir
start <file> run dev server with file as entry
+ build <file> build with file as entry
${chalk.underline('Options:')}
-p, --protocol use custom port (Default: 3000)
|
:lipstick: Add CLI doc for build command.
|
sebald_melodrama-scripts
|
train
|
c44f50dc048f97d5a020d844deb9afa26b2889dc
|
diff --git a/salt/master.py b/salt/master.py
index <HASH>..<HASH> 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -50,7 +50,7 @@ from salt.utils.event import tagify
import binascii
from salt.utils.master import ConnectedCache
from salt.utils.cache import CacheCli
-from six.moves import range
+from salt.utils.six.moves import range
# Import halite libs
try:
|
Replaced module six in file /salt/master.py
|
saltstack_salt
|
train
|
db6c0ee55c60afc12412af02d4d0652659950d2f
|
diff --git a/openpnm/core/Base.py b/openpnm/core/Base.py
index <HASH>..<HASH> 100644
--- a/openpnm/core/Base.py
+++ b/openpnm/core/Base.py
@@ -1629,6 +1629,7 @@ class Base(dict):
else:
r = int(np.ceil(N**0.5))
c = int(np.floor(N**0.5))
+ plt.figure()
for i in range(len(props)):
plt.subplot(r, c, i+1)
try:
|
prevent overwriting on previous fig when calling the method
|
PMEAL_OpenPNM
|
train
|
969a305b8ad82010963de70b01b012e3ead621e1
|
diff --git a/WrightTools/data/_pycmds.py b/WrightTools/data/_pycmds.py
index <HASH>..<HASH> 100644
--- a/WrightTools/data/_pycmds.py
+++ b/WrightTools/data/_pycmds.py
@@ -127,7 +127,7 @@ def from_PyCMDS(filepath, name=None, parent=None, verbose=True, *, collapse=True
else:
_no_collapse_create(data, headers, signed, index, kind, name, shape)
if not collapse:
- _no_collapse_fill(data, headers, file_, shape)
+ _no_collapse_fill(data, headers, file_, shape, verbose)
file_.close()
# axes
for a in axes:
@@ -267,12 +267,14 @@ def _no_collapse_create(data, headers, signed, index, kind, name, shape):
data.create_channel(name=name, shape=sh, dtype=np.dtype(np.float64), signed=next(signed))
-def _no_collapse_fill(data, headers, file_, shape):
+def _no_collapse_fill(data, headers, file_, shape, verbose):
frame_size = shape[-1]
file_.seek(0)
arr = np.genfromtxt(file_, max_rows=frame_size)
while arr.size > 0:
index = tuple(arr[0, 0 : len(shape) - 1].astype(np.int))
+ if verbose:
+ print(index)
for i, (kind, name) in enumerate(zip(headers["kind"], headers["name"])):
if kind is None and name != "time":
continue
|
no collapse verbose (#<I>)
|
wright-group_WrightTools
|
train
|
d59ae7fda38c2dcc0e176ebfe9016bc8620f3059
|
diff --git a/spectator-reg-tdigest/src/main/java/com/netflix/spectator/tdigest/TDigestConfig.java b/spectator-reg-tdigest/src/main/java/com/netflix/spectator/tdigest/TDigestConfig.java
index <HASH>..<HASH> 100644
--- a/spectator-reg-tdigest/src/main/java/com/netflix/spectator/tdigest/TDigestConfig.java
+++ b/spectator-reg-tdigest/src/main/java/com/netflix/spectator/tdigest/TDigestConfig.java
@@ -22,7 +22,7 @@ import com.netflix.archaius.annotations.DefaultValue;
* Configuration settings for the digest plugin.
*/
@Configuration(prefix = "spectator.tdigest.kinesis")
-interface TDigestConfig {
+public interface TDigestConfig {
/** Kinesis endpoint to use. */
@DefaultValue("kinesis.${EC2_REGION}.amazonaws.com")
String endpoint();
|
make TDigestConfig public
|
Netflix_spectator
|
train
|
480cf4a9476d8c8d24727120a640ac7c07c729c7
|
diff --git a/go/teams/create.go b/go/teams/create.go
index <HASH>..<HASH> 100644
--- a/go/teams/create.go
+++ b/go/teams/create.go
@@ -158,6 +158,10 @@ func CreateSubteam(ctx context.Context, g *libkb.GlobalContext, subteamBasename
return nil, err
}
+ if err := parentTeam.ForceMerkleRootUpdate(ctx); err != nil {
+ return nil, err
+ }
+
// Subteam creation involves two links, one in the parent team's chain, and
// one to start the new subteam chain. The start of the new subteam chain
// (type "team.subteam_head") is very similar to the "team.root" sig that
diff --git a/go/teams/create_test.go b/go/teams/create_test.go
index <HASH>..<HASH> 100644
--- a/go/teams/create_test.go
+++ b/go/teams/create_test.go
@@ -67,8 +67,6 @@ func TestCreateTeamAfterAccountReset(t *testing.T) {
}
func TestCreateSubteam(t *testing.T) {
- t.Skip("waiting for client merkle fix")
-
tc := SetupTest(t, "team", 1)
defer tc.Cleanup()
diff --git a/go/teams/loader_test.go b/go/teams/loader_test.go
index <HASH>..<HASH> 100644
--- a/go/teams/loader_test.go
+++ b/go/teams/loader_test.go
@@ -282,8 +282,6 @@ func TestLoaderWantMembers(t *testing.T) {
// Test loading a team that has a subteam in it
func TestLoaderParentEasy(t *testing.T) {
- t.Skip("waiting for client merkle fix")
-
_, tcs, cleanup := setupNTests(t, 1)
defer cleanup()
@@ -309,8 +307,6 @@ func TestLoaderParentEasy(t *testing.T) {
// Test loading a subteam
func TestLoaderSubteamEasy(t *testing.T) {
- t.Skip("waiting for client merkle fix")
-
_, tcs, cleanup := setupNTests(t, 1)
defer cleanup()
diff --git a/go/teams/teams.go b/go/teams/teams.go
index <HASH>..<HASH> 100644
--- a/go/teams/teams.go
+++ b/go/teams/teams.go
@@ -283,6 +283,10 @@ func (t *Team) Rotate(ctx context.Context) error {
return err
}
+ if err := t.ForceMerkleRootUpdate(ctx); err != nil {
+ return err
+ }
+
// create the team section of the signature
section, err := memSet.Section(t.Chain.GetID(), admin)
if err != nil {
@@ -345,6 +349,10 @@ func (t *Team) ChangeMembership(ctx context.Context, req keybase1.TeamChangeReq)
return err
}
+ if err := t.ForceMerkleRootUpdate(ctx); err != nil {
+ return err
+ }
+
var merkleRoot *libkb.MerkleRoot
var lease *libkb.Lease
@@ -636,6 +644,15 @@ func (t *Team) postMulti(payload libkb.JSONPayload) error {
return nil
}
+// ForceMerkleRootUpdate will call LookupTeam on MerkleClient to
+// update cached merkle root to include latest team sigs. Needed if
+// client wants to create a signature that refers to an adminship,
+// signature's merkle_root has to be more fresh than adminship's.
+func (t *Team) ForceMerkleRootUpdate(ctx context.Context) error {
+ _, err := t.G().GetMerkleClient().LookupTeam(ctx, t.ID)
+ return err
+}
+
func LoadTeamPlusApplicationKeys(ctx context.Context, g *libkb.GlobalContext, id keybase1.TeamID, application keybase1.TeamApplication, refreshers keybase1.TeamRefreshers) (keybase1.TeamPlusApplicationKeys, error) {
var teamPlusApplicationKeys keybase1.TeamPlusApplicationKeys
teamByID, err := GetForApplication(ctx, g, id, application, refreshers)
|
Fix team adminship merkle root in client (#<I>)
* Call LookupTeam to force merkle root update using team id
* Refactor to ForceMerkleRootUpdate
* Reenabe tests
|
keybase_client
|
train
|
2c241c6d63587cec3eca2e95cf2853a722ce896e
|
diff --git a/clients/clients.go b/clients/clients.go
index <HASH>..<HASH> 100644
--- a/clients/clients.go
+++ b/clients/clients.go
@@ -13,7 +13,7 @@ import (
type (
// A tcp represents a TCP connection to the mist server
- tcp struct {
+ TCP struct {
host string
conn net.Conn // the connection the mist server
encoder *json.Encoder //
@@ -23,8 +23,8 @@ type (
// New attempts to connect to a running mist server at the clients specified
// host and port.
-func New(host string) (*tcp, error) {
- client := &tcp{
+func New(host string) (*TCP, error) {
+ client := &TCP{
host: host,
messages: make(chan mist.Message),
}
@@ -34,7 +34,7 @@ func New(host string) (*tcp, error) {
// connect dials the remote mist server and handles any incoming responses back
// from mist
-func (c *tcp) connect() error {
+func (c *TCP) connect() error {
// attempt to connect to the server
conn, err := net.Dial("tcp", c.host)
@@ -74,13 +74,13 @@ func (c *tcp) connect() error {
}
// Ping the server
-func (c *tcp) Ping() error {
+func (c *TCP) Ping() error {
return c.encoder.Encode(&mist.Message{Command: "ping"})
}
// Subscribe takes the specified tags and tells the server to subscribe to updates
// on those tags, returning the tags and an error or nil
-func (c *tcp) Subscribe(tags []string) error {
+func (c *TCP) Subscribe(tags []string) error {
//
if len(tags) == 0 {
@@ -93,7 +93,7 @@ func (c *tcp) Subscribe(tags []string) error {
// Unsubscribe takes the specified tags and tells the server to unsubscribe from
// updates on those tags, returning an error or nil
-func (c *tcp) Unsubscribe(tags []string) error {
+func (c *TCP) Unsubscribe(tags []string) error {
//
if len(tags) == 0 {
@@ -106,7 +106,7 @@ func (c *tcp) Unsubscribe(tags []string) error {
// Publish sends a message to the mist server to be published to all subscribed
// clients
-func (c *tcp) Publish(tags []string, data string) error {
+func (c *TCP) Publish(tags []string, data string) error {
//
if len(tags) == 0 {
@@ -124,7 +124,7 @@ func (c *tcp) Publish(tags []string, data string) error {
// PublishAfter sends a message to the mist server to be published to all subscribed
// clients after a specified delay
-func (c *tcp) PublishAfter(tags []string, data string, delay time.Duration) error {
+func (c *TCP) PublishAfter(tags []string, data string, delay time.Duration) error {
go func() {
<-time.After(delay)
c.Publish(tags, data)
@@ -133,12 +133,12 @@ func (c *tcp) PublishAfter(tags []string, data string, delay time.Duration) erro
}
// List requests a list from the server of the tags this client is subscribed to
-func (c *tcp) List() error {
+func (c *TCP) List() error {
return c.encoder.Encode(&mist.Message{Command: "list"})
}
// Close closes the client data channel and the connection to the server
-func (c *tcp) Close() {
+func (c *TCP) Close() {
// we need to do it in this order in case the goroutine is stuck waiting for
// more data from the socket
@@ -147,6 +147,6 @@ func (c *tcp) Close() {
}
// Messages
-func (c *tcp) Messages() <-chan mist.Message {
+func (c *TCP) Messages() <-chan mist.Message {
return c.messages
}
diff --git a/core/proxy.go b/core/proxy.go
index <HASH>..<HASH> 100644
--- a/core/proxy.go
+++ b/core/proxy.go
@@ -92,8 +92,6 @@ func (p *Proxy) Ping() error {
// Subscribe
func (p *Proxy) Subscribe(tags []string) error {
- // verify access before doing action
-
// is this an error?
if len(tags) == 0 {
return nil
@@ -111,8 +109,6 @@ func (p *Proxy) Subscribe(tags []string) error {
// Unsubscribe
func (p *Proxy) Unsubscribe(tags []string) error {
- // verify access before doing action
-
// is this an error?
if len(tags) == 0 {
return nil
@@ -129,18 +125,12 @@ func (p *Proxy) Unsubscribe(tags []string) error {
// Publish
func (p *Proxy) Publish(tags []string, data string) error {
-
- // verify access before doing action
-
- //
return publish(p.id, tags, data)
}
// Sends a message with delay
func (p *Proxy) PublishAfter(tags []string, data string, delay time.Duration) error {
- // verify access before doing action
-
//
go func() {
<-time.After(delay)
@@ -155,8 +145,6 @@ func (p *Proxy) PublishAfter(tags []string, data string, delay time.Duration) er
// List
func (p *Proxy) List() error {
- // verify access before doing action
-
// convert the list into something friendlier
p.Lock()
var data []string
|
made the client TCP struct public
|
nanopack_mist
|
train
|
26faf91dded52dc8195172bb4e9c0d2c36bb1a21
|
diff --git a/python/examples/hypertools_demo-clusters.py b/python/examples/hypertools_demo-clusters.py
index <HASH>..<HASH> 100644
--- a/python/examples/hypertools_demo-clusters.py
+++ b/python/examples/hypertools_demo-clusters.py
@@ -15,4 +15,4 @@ ind_vars = df[df.columns[1:]]
m = PCA(n_components=3)
reduced_data = m.fit_transform(ind_vars)
-hyp.plot(reduced_data,'o',n_clusters=10,animate=True)
+hyp.plot(reduced_data,'o',n_clusters=10)
diff --git a/python/examples/hypertools_demo-point_colors.py b/python/examples/hypertools_demo-point_colors.py
index <HASH>..<HASH> 100644
--- a/python/examples/hypertools_demo-point_colors.py
+++ b/python/examples/hypertools_demo-point_colors.py
@@ -12,4 +12,4 @@ for idx,i in enumerate(w):
tmp.append(int(np.random.randint(10, size=1)))
point_colors.append(tmp)
-hyp.plot(w,'o',point_colors=point_colors,animate=True)
+hyp.plot(w,'o',point_colors=point_colors)
|
removed animate flag from point_colors/clusters examples
|
ContextLab_hypertools
|
train
|
4c61aaf1dc09aa46fe0e4248e326bd7cf78fd2b7
|
diff --git a/lark/tree.py b/lark/tree.py
index <HASH>..<HASH> 100644
--- a/lark/tree.py
+++ b/lark/tree.py
@@ -62,14 +62,12 @@ class Tree(object):
queue = [self]
subtrees = OrderedDict()
for subtree in queue:
- if id(subtree) in subtrees:
- continue
subtrees[id(subtree)] = subtree
- queue += [c for c in reversed(subtree.children) if isinstance(c, Tree)]
+ queue += [c for c in reversed(subtree.children)
+ if isinstance(c, Tree) and id(c) not in subtrees]
del queue
- for subtree in reversed(list(subtrees.values())):
- yield subtree
+ return reversed(list(subtrees.values()))
def find_pred(self, pred):
"Find all nodes where pred(tree) == True"
|
Small refactoring for iter_subtrees
|
lark-parser_lark
|
train
|
f96a78ca4698f3b5c2218e680f1f103b0f416875
|
diff --git a/account/models.py b/account/models.py
index <HASH>..<HASH> 100644
--- a/account/models.py
+++ b/account/models.py
@@ -377,7 +377,7 @@ class AccountDeletion(models.Model):
before = timezone.now() - datetime.timedelta(hours=hours_ago)
count = 0
for account_deletion in cls.objects.filter(date_requested__lt=before, user__isnull=False):
- settings.ACCOUNT_DELETION_EXPUNGE_CALLBACK(account_deletion)
+ hookset.account_delete_expunge(account_deletion)
account_deletion.date_expunged = timezone.now()
account_deletion.save()
count += 1
@@ -388,7 +388,7 @@ class AccountDeletion(models.Model):
account_deletion, created = cls.objects.get_or_create(user=user)
account_deletion.email = user.email
account_deletion.save()
- settings.ACCOUNT_DELETION_MARK_CALLBACK(account_deletion)
+ hookset.account_delete_mark(account_deletion)
return account_deletion
|
Fix a bug introduced by last commit
Fix a bug introduced by porting deletion callback to hookset: In AccountDeletion model, "account_delete_expunge" and "account_delete_mark" were still called from settings, instead of using the hookset.
|
pinax_django-user-accounts
|
train
|
0a2946ad872994ccab94b4464339924a1efbad9b
|
diff --git a/src/main/java/com/aparapi/Kernel.java b/src/main/java/com/aparapi/Kernel.java
index <HASH>..<HASH> 100644
--- a/src/main/java/com/aparapi/Kernel.java
+++ b/src/main/java/com/aparapi/Kernel.java
@@ -373,17 +373,10 @@ public abstract class Kernel implements Cloneable {
* kernel.execute(values.length);
* </pre></blockquote>
* <p>
-<<<<<<< HEAD:src/main/java/com/codegen/Kernel.java
* Alternatively, the property <code>com.codegen.executionMode</code> can be set to one of <code>JTP,GPU,ACC,CPU,SEQ</code>
* when an application is launched.
* <p><blockquote><pre>
* java -classpath ....;codegen.jar -Dcom.codegen.executionMode=GPU MyApplication
-=======
- * Alternatively, the property <code>com.amd.codegen.executionMode</code> can be set to one of <code>JTP,GPU,ACC,CPU,SEQ</code>
- * when an application is launched.
- * <p><blockquote><pre>
- * java -classpath ....;codegen.jar -Dcom.amd.codegen.executionMode=GPU MyApplication
->>>>>>> b118aad... added method to set execution mode without any fallback:com.amd.codegen/src/java/com/amd/codegen/Kernel.java
* </pre></blockquote><p>
* Generally setting the execution mode is not recommended (it is best to let Aparapi decide automatically) but the option
* provides a way to compare a kernel's performance under multiple execution modes.
|
docs(javadocs): fixed an unresolved conflict that was hidden in the Javadocs.
ISSUES CLOSED: #<I>
|
Syncleus_aparapi
|
train
|
cacfa0bcbe0639bde31e898a4ff4ee81ebfcb784
|
diff --git a/src/Traits/RequestTrait.php b/src/Traits/RequestTrait.php
index <HASH>..<HASH> 100644
--- a/src/Traits/RequestTrait.php
+++ b/src/Traits/RequestTrait.php
@@ -13,21 +13,26 @@ trait RequestTrait
public static function initialize(Request $request): void
{
/**
- * The scope is only to properly initialize the Request $request on PUT requests
+ * Do this only if the Request $request is passed by the container
*/
- $httpPutStreamListener = new HttpPutStreamListener();
- $data = $httpPutStreamListener->getData(
- $request
- );
- if (!$data['isEmptyPutStream']) {
- $request->initialize(
- [],
- $data['request'],
- [],
- [],
- $data['files']
+ if ($_SERVER['CONTENT_TYPE'] ?? null) {
+ /**
+ * The scope is only to properly initialize the Request $request on PUT requests
+ */
+ $httpPutStreamListener = new HttpPutStreamListener();
+ $data = $httpPutStreamListener->getData(
+ $request
);
- $request->setRequestFormat('json');
+ if (!$data['isEmptyPutStream']) {
+ $request->initialize(
+ [],
+ $data['request'],
+ [],
+ [],
+ $data['files']
+ );
+ $request->setRequestFormat('json');
+ }
}
}
|
Fixed how HttpPutStreamListener is ran on PUT
|
mindlahus_symfony-assets
|
train
|
b0d393f6d7b4f36487f85e20b49d00244b894d23
|
diff --git a/lib/ignorable.rb b/lib/ignorable.rb
index <HASH>..<HASH> 100644
--- a/lib/ignorable.rb
+++ b/lib/ignorable.rb
@@ -1,4 +1,5 @@
require 'active_record'
+require 'active_support/core_ext/class/attribute'
module Ignorable
module InstanceMethods
@@ -12,8 +13,6 @@ module Ignorable
@columns ||= super.reject{|col| ignored_column?(col)}
end
- attr_reader :ignored_columns
-
# Prevent Rails from loading a table column.
# Useful for legacy database schemas with problematic column names,
# like 'class' or 'attributes'.
@@ -24,10 +23,11 @@ module Ignorable
#
# Topic.new.respond_to?(:attributes) => false
def ignore_columns(*columns)
- @ignored_columns ||= []
- @ignored_columns += columns.map(&:to_s)
+ self.ignored_columns ||= []
+ self.ignored_columns += columns.map(&:to_s)
reset_column_information
- @ignored_columns.tap(&:uniq!)
+ descendants.each(&:reset_column_information)
+ self.ignored_columns.tap(&:uniq!)
end
alias ignore_column ignore_columns
@@ -35,13 +35,13 @@ module Ignorable
# Accepts both ActiveRecord::ConnectionAdapter::Column objects,
# and actual column names ('title')
def ignored_column?(column)
- ignored_columns.present? && ignored_columns.include?(
+ self.ignored_columns.present? && self.ignored_columns.include?(
column.respond_to?(:name) ? column.name : column.to_s
)
end
def reset_ignored_columns
- @ignored_columns = []
+ self.ignored_columns = []
reset_column_information
end
end
@@ -50,4 +50,5 @@ end
unless ActiveRecord::Base.include?(Ignorable::InstanceMethods)
ActiveRecord::Base.send :include, Ignorable::InstanceMethods
ActiveRecord::Base.send :extend, Ignorable::ClassMethods
+ ActiveRecord::Base.send :class_attribute, :ignored_columns
end
diff --git a/spec/ignorable_spec.rb b/spec/ignorable_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/ignorable_spec.rb
+++ b/spec/ignorable_spec.rb
@@ -25,6 +25,9 @@ describe Ignorable do
belongs_to :test_model
end
+ class SubclassTestModel < TestModel
+ end
+
around :each do |example|
ActiveRecord::Base.transaction do
example.call
@@ -37,6 +40,33 @@ describe Ignorable do
expect(Thing.column_names.sort).to eql ["id", "test_model_id", "value"]
end
+ it 'removes columns from the subclass' do
+ expect(SubclassTestModel.column_names).to match_array(['id', 'name'])
+ end
+
+ context 'when ignore_columns is called after the columns are loaded' do
+ before do
+ @test_model = Class.new(ActiveRecord::Base) do
+ self.table_name = 'test_models'
+ end
+ @subclass = Class.new(@test_model)
+
+ # Force columns to load
+ @test_model.columns
+ @subclass.columns
+
+ @test_model.ignore_columns :attributes, :legacy
+ end
+
+ it 'removes columns from the class' do
+ expect(@test_model.column_names).to match_array(['id', 'name'])
+ end
+
+ it 'removes columns from the subclass' do
+ expect(@subclass.column_names).to match_array(['id', 'name'])
+ end
+ end
+
it "should remove the columns from the attribute names" do
expect(TestModel.new.attribute_names.sort).to eql ["id", "name"]
expect(Thing.new.attribute_names.sort).to eql ["id", "test_model_id", "value"]
|
Ignore columns in subclasses too, merge tobyhs:model_inheritance
|
nthj_ignorable
|
train
|
c8172e88ecafe1ee3866e58d6db44c6e06480706
|
diff --git a/lib/podio/areas/contact.rb b/lib/podio/areas/contact.rb
index <HASH>..<HASH> 100644
--- a/lib/podio/areas/contact.rb
+++ b/lib/podio/areas/contact.rb
@@ -20,5 +20,9 @@ module Podio
member Podio.connection.get("/org/#{org_id}/profile").body
end
+ def totals_by_org
+ Podio.connection.get("/contact/totals/").body
+ end
+
end
end
|
Added method for getting contact totals per org on contact
|
podio_podio-rb
|
train
|
5dda3a85f2e19bda4b1662054d3654d2eebfc414
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index <HASH>..<HASH> 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -120,6 +120,8 @@ For instructions on upgrading to newer versions, visit
* \#2522 Fixed `Criteria#with` to return the criteria and not the class.
+* \#2518 Fix unit of work call for the identity map when using Passenger.
+
* \#2512 Ensure nested attributes destroy works with the delayed destroys
introduced in 3.0.10 when multiple levels deep.
diff --git a/lib/mongoid/config.rb b/lib/mongoid/config.rb
index <HASH>..<HASH> 100644
--- a/lib/mongoid/config.rb
+++ b/lib/mongoid/config.rb
@@ -216,5 +216,17 @@ module Mongoid
def time_zone
use_utc? ? "UTC" : ::Time.zone
end
+
+ # Is the application running under passenger?
+ #
+ # @example Is the application using passenger?
+ # config.running_with_passenger?
+ #
+ # @return [ true, false ] If the app is deployed on Passenger.
+ #
+ # @since 3.0.11
+ def running_with_passenger?
+ @running_with_passenger ||= defined?(PhusionPassenger)
+ end
end
end
diff --git a/lib/mongoid/railtie.rb b/lib/mongoid/railtie.rb
index <HASH>..<HASH> 100644
--- a/lib/mongoid/railtie.rb
+++ b/lib/mongoid/railtie.rb
@@ -118,7 +118,11 @@ module Rails
# Need to include the Mongoid identity map middleware.
initializer "include the identity map" do |app|
- app.config.middleware.use "Rack::Mongoid::Middleware::IdentityMap"
+ if ::Mongoid.running_with_passenger?
+ app.config.middleware.use "Rack::Mongoid::Middleware::IdentityMap::Passenger"
+ else
+ app.config.middleware.use "Rack::Mongoid::Middleware::IdentityMap"
+ end
end
# Instantitate any registered observers after Rails initialization and
@@ -144,7 +148,7 @@ module Rails
# Passenger provides the :starting_worker_process event for executing
# code after it has forked, so we use that and reconnect immediately.
- if defined?(PhusionPassenger)
+ if ::Mongoid.running_with_passenger?
PhusionPassenger.on_event(:starting_worker_process) do |forked|
::Mongoid.default_session.disconnect if forked
end
diff --git a/lib/rack/mongoid/middleware/identity_map.rb b/lib/rack/mongoid/middleware/identity_map.rb
index <HASH>..<HASH> 100644
--- a/lib/rack/mongoid/middleware/identity_map.rb
+++ b/lib/rack/mongoid/middleware/identity_map.rb
@@ -37,6 +37,38 @@ module Rack
end
response
end
+
+ # Passenger 3 does not execute the block provided to a Rack::BodyProxy
+ # so the identity map never gets cleared. Since there's no streaming
+ # support in it anyways we do not need the proxy functionality.
+ class Passenger
+
+ # Initialize the new middleware.
+ #
+ # @example Init the middleware.
+ # IdentityMap.new(app)
+ #
+ # @param [ Object ] app The application.
+ #
+ # @since 3.0.11
+ def initialize(app)
+ @app = app
+ end
+
+ # Make the request with the provided environment.
+ #
+ # @example Make the request.
+ # identity_map.call(env)
+ #
+ # @param [ Object ] env The environment.
+ #
+ # @return [ Array ] The status, headers, and response.
+ #
+ # @since 3.0.11
+ def call(env)
+ ::Mongoid.unit_of_work { @app.call(env) }
+ end
+ end
end
end
end
|
Fix middleware not clearing identity map with Passenger.
[ fix #<I> ]
|
mongodb_mongoid
|
train
|
4be57a2ea5fdd4ef99fba61d70b6962fde7360bd
|
diff --git a/cake/console/libs/shell.php b/cake/console/libs/shell.php
index <HASH>..<HASH> 100644
--- a/cake/console/libs/shell.php
+++ b/cake/console/libs/shell.php
@@ -30,6 +30,13 @@ require_once CAKE . 'console' . DS . 'console_input.php';
class Shell extends Object {
/**
+ * Output constants for making verbose and quiet shells.
+ */
+ const VERBOSE = 2;
+ const NORMAL = 1;
+ const QUIET = 0;
+
+/**
* An instance of the ShellDispatcher object that loaded this script
*
* @var ShellDispatcher
@@ -448,8 +455,18 @@ class Shell extends Object {
* @param integer $newlines Number of newlines to append
* @return integer Returns the number of bytes returned from writing to stdout.
*/
- public function out($message = null, $newlines = 1) {
- return $this->stdout->write($message, $newlines);
+ public function out($message = null, $newlines = 1, $level = Shell::NORMAL) {
+ $currentLevel = Shell::NORMAL;
+ if (!empty($this->params['verbose'])) {
+ $currentLevel = Shell::VERBOSE;
+ }
+ if (!empty($this->params['quiet'])) {
+ $currentLevel = Shell::QUIET;
+ }
+ if ($level <= $currentLevel) {
+ return $this->stdout->write($message, $newlines);
+ }
+ return true;
}
/**
diff --git a/cake/tests/cases/console/libs/shell.test.php b/cake/tests/cases/console/libs/shell.test.php
index <HASH>..<HASH> 100644
--- a/cake/tests/cases/console/libs/shell.test.php
+++ b/cake/tests/cases/console/libs/shell.test.php
@@ -259,7 +259,36 @@ class ShellTest extends CakeTestCase {
* @return void
*/
function testVerboseOutput() {
- $this->markTestIncomplete('This needs to be written.');
+ $this->Shell->stdout->expects($this->at(0))->method('write')
+ ->with('Verbose', 1);
+ $this->Shell->stdout->expects($this->at(1))->method('write')
+ ->with('Normal', 1);
+ $this->Shell->stdout->expects($this->at(2))->method('write')
+ ->with('Quiet', 1);
+
+ $this->Shell->params['verbose'] = true;
+ $this->Shell->params['quiet'] = false;
+
+ $this->Shell->out('Verbose', 1, Shell::VERBOSE);
+ $this->Shell->out('Normal', 1, Shell::NORMAL);
+ $this->Shell->out('Quiet', 1, Shell::QUIET);
+ }
+
+/**
+ * test that verbose and quiet output levels work
+ *
+ * @return void
+ */
+ function testQuietOutput() {
+ $this->Shell->stdout->expects($this->once())->method('write')
+ ->with('Quiet', 1);
+
+ $this->Shell->params['verbose'] = false;
+ $this->Shell->params['quiet'] = true;
+
+ $this->Shell->out('Verbose', 1, Shell::VERBOSE);
+ $this->Shell->out('Normal', 1, Shell::NORMAL);
+ $this->Shell->out('Quiet', 1, Shell::QUIET);
}
/**
|
Adding output levels to Shell
Adding tests for output levels.
|
cakephp_cakephp
|
train
|
a7bea58470cd4cb3b1ee55746243c13bb3038509
|
diff --git a/usb1.py b/usb1.py
index <HASH>..<HASH> 100644
--- a/usb1.py
+++ b/usb1.py
@@ -851,6 +851,8 @@ class USBPollerThread(threading.Thread):
See http://libusb.sourceforge.net/api-1.0/mtasync.html .
"""
+ _can_run = True
+
def __init__(self, context, poller, exc_callback=None):
"""
Create a poller thread for given context.
@@ -873,6 +875,14 @@ class USBPollerThread(threading.Thread):
if exc_callback is not None:
self.exceptionHandler = exc_callback
+ def stop(self):
+ """
+ Stop & join thread.
+
+ Allows stopping even thread before context gets closed.
+ """
+ self._can_run = False
+ self.join()
# pylint: disable=method-hidden
@staticmethod
@@ -900,7 +910,7 @@ class USBPollerThread(threading.Thread):
for fd, events in context.getPollFDList():
self._registerFD(fd, events, None)
try:
- while fd_set:
+ while fd_set and self._can_run:
if try_lock_events():
lock_event_waiters()
while event_handler_active():
|
USBPollerThread.stop: Allows stopping event thread.
|
vpelletier_python-libusb1
|
train
|
dab74e0708e14fab863ec91df989fc47a5f03bd4
|
diff --git a/lib/neo4j/active_node/persistence.rb b/lib/neo4j/active_node/persistence.rb
index <HASH>..<HASH> 100644
--- a/lib/neo4j/active_node/persistence.rb
+++ b/lib/neo4j/active_node/persistence.rb
@@ -97,11 +97,16 @@ module Neo4j::ActiveNode
end
def merge(attributes)
- neo4j_session.query.merge(n: {self => attributes}).exec
+ neo4j_session.query.merge(n: {self => attributes})
+ .on_create_set(n: on_create_props)
+ .pluck(:n).first
end
def find_or_create(find_attributes, set_attributes = {})
- neo4j_session.query.merge(n: {self => find_attributes}).set(n: set_attributes).exec
+ set_attributes_with_id = set_attributes.merge(on_create_props)
+ neo4j_session.query.merge(n: {self => find_attributes})
+ .on_create_set(n: set_attributes_with_id).on_match_set(n: set_attributes)
+ .pluck(:n).first
end
# Finds the first node with the given attributes, or calls create if none found
@@ -117,8 +122,12 @@ module Neo4j::ActiveNode
def load_entity(id)
Neo4j::Node.load(id)
end
- end
- private
+ private
+
+ def on_create_props
+ {id_property_name => default_properties[id_property_name].call}
+ end
+ end
end
end
diff --git a/spec/e2e/query_spec.rb b/spec/e2e/query_spec.rb
index <HASH>..<HASH> 100644
--- a/spec/e2e/query_spec.rb
+++ b/spec/e2e/query_spec.rb
@@ -206,6 +206,18 @@ describe 'Query API' do
expect(Teacher.first.name).to eq('Dr. Harold Samuels')
expect(Teacher.first.age).to eq(34)
end
+
+ it 'sets the id property method' do
+ teacher = Teacher.find_or_create(name: 'Dr. Harold Samuels')
+ expect(teacher.uuid).not_to be nil
+ end
+
+ it 'does not change the id property on match' do
+ teacher1 = Teacher.find_or_create(name: 'Dr. Harold Samuels')
+ teacher2 = Teacher.find_or_create(name: 'Dr. Harold Samuels')
+ expect(teacher1.neo_id).to eq teacher2.neo_id
+ expect(teacher1.id).to eq teacher2.id
+ end
end
end
|
:merge and :find_or_create need to set id property, return node
|
neo4jrb_neo4j
|
train
|
a44eab55d72d3336258a229bf2b6a6e773b2d581
|
diff --git a/azurerm/resource_arm_network_interface.go b/azurerm/resource_arm_network_interface.go
index <HASH>..<HASH> 100644
--- a/azurerm/resource_arm_network_interface.go
+++ b/azurerm/resource_arm_network_interface.go
@@ -93,6 +93,7 @@ func resourceArmNetworkInterface() *schema.Resource {
"private_ip_address": {
Type: schema.TypeString,
Optional: true,
+ Computed: true,
},
"private_ip_address_version": {
|
r/network_interface: making the `private_ip_address` computed
|
terraform-providers_terraform-provider-azurerm
|
train
|
7c739bafd8fdec0444e61e97ede2618e3fd0d4a1
|
diff --git a/tests/test_r_strsxp.py b/tests/test_r_strsxp.py
index <HASH>..<HASH> 100644
--- a/tests/test_r_strsxp.py
+++ b/tests/test_r_strsxp.py
@@ -37,6 +37,8 @@ if any([platform.startswith(os_name) for os_name in ['linux', 'darwin', 'freebsd
elif platform.startswith('win'):
import ctypes
+import pytest
+
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES AND ROUTINES
@@ -84,6 +86,7 @@ class sample_class:
# TEST(s)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+@pytest.mark.xfail(strict = False, reason = 'not yet implemented')
def test_r_strsxp():
sample = sample_class()
|
new test marked as xfail
|
pleiszenburg_zugbruecke
|
train
|
709bee1e2045fd3488f6281b4249fe35c01d73e5
|
diff --git a/openquake/engine/export/hazard.py b/openquake/engine/export/hazard.py
index <HASH>..<HASH> 100644
--- a/openquake/engine/export/hazard.py
+++ b/openquake/engine/export/hazard.py
@@ -26,7 +26,7 @@ from collections import namedtuple, defaultdict
from openquake.baselib.general import groupby
from openquake.hazardlib.calc import disagg
from openquake.commonlib import hazard_writers
-from openquake.commonlib.writers import floatformat, scientificformat, save_csv
+from openquake.commonlib.writers import floatformat, scientificformat, write_csv
from openquake.engine.db import models
from openquake.engine.export import core
@@ -311,7 +311,7 @@ def export_gmf_csv(key, output, target):
dest = _get_result_export_dest(
haz_calc.id, target, output.gmf)[:-3] + 'csv'
# export the GMFs ordered by tag
- save_csv(dest, sorted(_gen_gmf_rows(output), key=operator.itemgetter(0)))
+ write_csv(dest, sorted(_gen_gmf_rows(output), key=operator.itemgetter(0)))
return dest
@@ -423,7 +423,7 @@ def export_ses_csv(key, output, target):
for ses in ses_coll:
for sesrup in ses:
rows.append([sesrup.tag, sesrup.seed])
- save_csv(dest, sorted(rows, key=operator.itemgetter(0)))
+ write_csv(dest, sorted(rows, key=operator.itemgetter(0)))
return dest
diff --git a/openquake/engine/export/risk.py b/openquake/engine/export/risk.py
index <HASH>..<HASH> 100644
--- a/openquake/engine/export/risk.py
+++ b/openquake/engine/export/risk.py
@@ -162,7 +162,7 @@ def export_agg_loss_curve_csv(key, output, target):
row = output.loss_curve.aggregatelosscurvedata
data = ('aggregate', row.losses, row.poes, row.average_loss,
row.stddev_loss)
- return writers.save_csv(dest, [data], fmt='%10.6E')
+ return writers.write_csv(dest, [data], fmt='%10.6E')
@core.export_output.add(('loss_curve', 'xml'), ('event_loss_curve', 'xml'))
@@ -192,7 +192,7 @@ def export_loss_curve_csv(key, output, target):
row.asset_ref, row.losses, row.poes, row.average_loss)
data.append(lca)
header = [lca._fields]
- writers.save_csv(dest, header + data, fmt='%10.6E')
+ writers.write_csv(dest, header + data, fmt='%10.6E')
return dest
@@ -208,7 +208,7 @@ def export_avgloss_csv(key, output, target):
rows = [(c.location.x, c.location.y, c.asset_ref, c.asset_value,
c.average_loss, c.stddev_loss or '', c.loss_curve.loss_type)
for c in data]
- writers.save_csv(dest, [header] + rows)
+ writers.write_csv(dest, [header] + rows)
return dest
@@ -242,7 +242,7 @@ def export_loss_map_csv(key, output, target):
output.loss_map.lossmapdata_set.all().order_by('asset_ref')):
data.append(LossMapPerAsset(row.asset_ref, row.value))
header = [data[0]._fields]
- writers.save_csv(dest, header + data, fmt='%10.6E')
+ writers.write_csv(dest, header + data, fmt='%10.6E')
return dest
@@ -397,5 +397,5 @@ def export_event_loss_asset_csv(key, output, target):
rows.append([event_loss.rupture.tag,
event_loss.asset.asset_ref,
event_loss.loss])
- writers.save_csv(dest, rows)
+ writers.write_csv(dest, rows)
return dest
diff --git a/qa_tests/risk/classical_risk/test.py b/qa_tests/risk/classical_risk/test.py
index <HASH>..<HASH> 100644
--- a/qa_tests/risk/classical_risk/test.py
+++ b/qa_tests/risk/classical_risk/test.py
@@ -267,6 +267,6 @@ class ClassicalRiskCase4TestCase(
os.close(fd)
hd = [['asset_ref', 'lon', 'lat', 'avg_loss~structural',
'ins_loss~structural']] + data
- writers.save_csv(fname, hd, fmt='%10.6E')
+ writers.write_csv(fname, hd, fmt='%10.6E')
expected = self._test_path('expected/%s.csv' % key)
self.assertEqual(open(fname).read(), open(expected).read())
|
Removed the old save_csv function
Former-commit-id: ffcc<I>fcea<I>eaf<I>e1babc<I>cc
|
gem_oq-engine
|
train
|
4e1ef75c64f878ffb6cbb47dc606b9f7d09d6806
|
diff --git a/tests/test_runner.py b/tests/test_runner.py
index <HASH>..<HASH> 100644
--- a/tests/test_runner.py
+++ b/tests/test_runner.py
@@ -151,7 +151,7 @@ class TestRstcheckMainRunnerFileListUpdater:
_runner.update_file_list() # act
- assert len(_runner.files_to_check) == 5
+ assert len(_runner.files_to_check) == 6
assert EXAMPLES_DIR / "good" / "rst.rst" in _runner.files_to_check
@staticmethod
|
udpate tests b/c basic rst good file added
|
myint_rstcheck
|
train
|
e6d1649fad1728d9c1213deabcf2c76f91d3990b
|
diff --git a/rapidoid-buffer/src/main/java/org/rapidoid/buffer/MultiBuf.java b/rapidoid-buffer/src/main/java/org/rapidoid/buffer/MultiBuf.java
index <HASH>..<HASH> 100644
--- a/rapidoid-buffer/src/main/java/org/rapidoid/buffer/MultiBuf.java
+++ b/rapidoid-buffer/src/main/java/org/rapidoid/buffer/MultiBuf.java
@@ -841,27 +841,30 @@ public class MultiBuf implements Buf, Constants {
space = 2;
} else {
- int digitsN = (int) Math.ceil(Math.log10(n + 1));
+ if (appending) {
+ String nums = "" + n;
+ append(nums.getBytes());
+ space = nums.length();
+ } else {
- int pos = position + digitsN - 1 + direction * digitsN;
- if (!forward) {
- pos++;
- }
+ int digitsN = (int) Math.ceil(Math.log10(n + 1));
- while (true) {
- long digit = n % 10;
- byte dig = (byte) (digit + 48);
- if (appending) {
- append(dig);
- } else {
- put(pos--, dig);
+ int pos = position + digitsN - 1 + direction * digitsN;
+ if (!forward) {
+ pos++;
}
- if (n < 10) {
- break;
+
+ while (true) {
+ long digit = n % 10;
+ byte dig = (byte) (digit + 48);
+ put(pos--, dig);
+ if (n < 10) {
+ break;
+ }
+ n = n / 10;
}
- n = n / 10;
+ space = digitsN;
}
- space = digitsN;
}
} else {
if (forward) {
|
Fixed number rendering in the low-level buffer utils.
|
rapidoid_rapidoid
|
train
|
e379a1324206c8eb97f1391693365ed6cef4f992
|
diff --git a/aws-region-table-parser.js b/aws-region-table-parser.js
index <HASH>..<HASH> 100644
--- a/aws-region-table-parser.js
+++ b/aws-region-table-parser.js
@@ -5,6 +5,7 @@ const locationsMap = {
edgeLocations: [],
regionalEdgeCaches: []
};
+
const transpose = array => array[0].map((_, c) => array.map(r => r[c]));
const flatten = (array) => {
return array.reduce(
@@ -48,11 +49,18 @@ function parseAwsTable(html) {
if (coloumnIndex === 0) {
services[serviceName] = services[serviceName] || {};
} else {
- let regionName = regions[coloumnIndex];
- let serviceInRegion = $(coloumn).text() === '✓';
- services[serviceName][regionName] = serviceInRegion;
- if (serviceInRegion) {
- regionSummary[regionName] = regionSummary[regionName] + 1 || 0;
+ let regionCode = regions[coloumnIndex];
+ let isServiceSupportedInRegion = $(coloumn).text() === '✓';
+ services[serviceName][regionCode] = isServiceSupportedInRegion;
+
+ regionSummary[regionCode] = regionSummary[regionCode] || {
+ regionCode: regionCode,
+ regionName: Object.values(regionNames).filter(region => region.code === regionCode)[0].name,
+ value: 0
+ };
+
+ if (isServiceSupportedInRegion) {
+ regionSummary[regionCode].value++;
}
}
diff --git a/generate-json.js b/generate-json.js
index <HASH>..<HASH> 100644
--- a/generate-json.js
+++ b/generate-json.js
@@ -1,17 +1,23 @@
var fs = require('fs');
var awsRegionTableParser = require('./index.js');
+const sortArrayByProp = (array, prop, asc = true) => {
+ return array.sort((a, b) => {
+ return asc ? a[prop] - b[prop] : b[prop] - a[prop];
+ });
+};
+
function generateRegionSummary(parseddata) {
- const regionSummary = parseddata.regionSummary;
- const regions = Object.keys(regionSummary);
- let markdownTable =`# Region Summary # \n`;
- markdownTable += `| Region | Services | \n`;
- markdownTable += `| ------ | --------: | \n`;
- Object.entries(regionSummary).forEach(entry => {
- markdownTable += `${entry[0]} | ${entry[1]} \n`
+ const regionSummary = sortArrayByProp(Object.values(parseddata.regionSummary), 'value', false);
+
+ let markdownTable =`### Region Summary # \n`;
+ markdownTable += `| Region Code | Region Name | no. of Supported Services | \n`;
+ markdownTable += `| ------ | -------- | -------- | \n`;
+ regionSummary.forEach(region => {
+ markdownTable += `${region.regionCode} | ${region.regionName} | ${region.value}\n`
});
- markdownTable += `\n\n`
+ markdownTable += `\n\n`;
return markdownTable;
}
|
order regions summary by supported value, add region name.
|
burib_aws-region-table-parser
|
train
|
5948edd824388d4e2b88b85caf9619e6c64ed56c
|
diff --git a/src/node/index.js b/src/node/index.js
index <HASH>..<HASH> 100644
--- a/src/node/index.js
+++ b/src/node/index.js
@@ -87,8 +87,12 @@ function request(snek, options = snek.options) {
stream.once('end', () => {
snek.push(null);
const raw = Buffer.concat(body);
- if (options.connection)
+ if (options.connection && options.connection.close)
options.connection.close();
+ else if (options.connection && options.connection.destroy)
+ options.connection.destroy();
+ else if (options.connection)
+ throw new Error('connection was not able to be closed');
resolve({ raw, headers, statusCode, statusText });
});
};
|
fall back to destroying http2 connection
|
devsnek_snekfetch
|
train
|
5999bb1eb3c09fa8a60f0bf93973688e23596763
|
diff --git a/lib/fastlane/actions/latest_testflight_build_number.rb b/lib/fastlane/actions/latest_testflight_build_number.rb
index <HASH>..<HASH> 100644
--- a/lib/fastlane/actions/latest_testflight_build_number.rb
+++ b/lib/fastlane/actions/latest_testflight_build_number.rb
@@ -30,7 +30,12 @@ module Fastlane
Helper.log.info "Fetching the latest build number for version #{version_number}"
train = app.build_trains[version_number]
- build_number = train.builds.map(&:build_version).map(&:to_i).sort.last
+ begin
+ build_number = train.builds.map(&:build_version).map(&:to_i).sort.last
+ rescue
+ raise "could not find a build on iTC - and 'initial_build_number' option is not set" unless params[:initial_build_number]
+ build_number = params[:initial_build_number]
+ end
Helper.log.info "Latest upload is build number: #{build_number}"
Actions.lane_context[SharedValues::LATEST_TESTFLIGHT_BUILD_NUMBER] = build_number
@@ -66,7 +71,13 @@ module Fastlane
FastlaneCore::ConfigItem.new(key: :version,
env_name: "LATEST_VERSION",
description: "The version number whose latest build number we want",
- optional: true)
+ optional: true),
+ FastlaneCore::ConfigItem.new(key: :initial_build_number,
+ env_name: "INTITIAL_BUILD_NUMBER",
+ description: "sets the build number to given value if no build is in current train",
+ optional: true,
+ is_string: false)
+
]
end
|
* fixes the case when there is no build already uploaded - aka the first build
|
fastlane_fastlane
|
train
|
d8ad5fa74a5742ba73ecdbcd6456ef3617c1bb7c
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
# Generated by jaraco.develop (https://bitbucket.org/jaraco/jaraco.develop)
import io
+import sys
import setuptools
@@ -9,6 +10,9 @@ with io.open('README.txt', encoding='utf-8') as readme:
with io.open('CHANGES.txt', encoding='utf-8') as changes:
long_description += '\n\n' + changes.read()
+needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
+pytest_runner = ['pytest_runner'] if needs_pytest else []
+
setup_params = dict(
name='jaraco.collections',
use_hg_version=True,
@@ -25,9 +29,8 @@ setup_params = dict(
],
setup_requires=[
'hgtools',
- 'pytest-runner',
'sphinx',
- ],
+ ] + pytest_runner,
tests_require=[
'pytest',
],
|
Conditionally require pytest-runner.
|
jaraco_jaraco.collections
|
train
|
16fe07624fd1efd9e366ed4426d4f1a6dcfd8f95
|
diff --git a/GameQ.php b/GameQ.php
index <HASH>..<HASH> 100644
--- a/GameQ.php
+++ b/GameQ.php
@@ -709,7 +709,7 @@ class GameQ
));
// Create the socket
- if(($socket = stream_socket_client($remote_addr, $errno = NULL, $errstr = NULL, $this->timeout, STREAM_CLIENT_CONNECT, $context)) !== FALSE)
+ if(($socket = @stream_socket_client($remote_addr, $errno = NULL, $errstr = NULL, $this->timeout, STREAM_CLIENT_CONNECT, $context)) !== FALSE)
{
// Set the read timeout on the streams
stream_set_timeout($socket, $this->timeout);
|
Added supress warning for stream_client_connect. There is a catch for it failing and throwing an exception. No need for both.
|
Austinb_GameQ
|
train
|
9743541dd2b19137703e678781a9faa67e8fe4be
|
diff --git a/quart/local.py b/quart/local.py
index <HASH>..<HASH> 100644
--- a/quart/local.py
+++ b/quart/local.py
@@ -37,7 +37,7 @@ class TaskLocal:
def _task_identity() -> int:
loop = asyncio.get_event_loop()
if loop.is_running():
- task = asyncio.Task.current_task()
+ task = asyncio.current_task()
task_id = id(task)
return task_id
else:
|
Avoid a deprecation warning
Task.current_task() is deprecated, with asyncio.current_task() as the
replacement. As Quart targets only <I> or better there is no backwards
incompatibility worries.
|
pgjones_quart
|
train
|
bbd91c5aaa722e542ee306aa0f7aee444081e50a
|
diff --git a/src/components/Tile/Tile-test.js b/src/components/Tile/Tile-test.js
index <HASH>..<HASH> 100644
--- a/src/components/Tile/Tile-test.js
+++ b/src/components/Tile/Tile-test.js
@@ -160,5 +160,50 @@ describe('Tile', () => {
wrapper.simulate('click');
expect(wrapper.state().expanded).toEqual(true);
});
+
+ it('displays the default tooltip for the chevron depending on state', () => {
+ const defaultExpandedIconText = 'Collapse';
+ const defaultCollapsedIconText = 'Expand';
+
+ // Force the expanded tile to be collapsed.
+ wrapper.setState({ expanded: false });
+ const collapsedDescription = wrapper
+ .find('[name="chevron--down"]')
+ .getElements()[0].props.description;
+ expect(collapsedDescription).toEqual(defaultCollapsedIconText);
+
+ // click on the item to expand it.
+ wrapper.simulate('click');
+
+ // Validate the description change
+ const expandedDescription = wrapper
+ .find('[name="chevron--down"]')
+ .getElements()[0].props.description;
+ expect(expandedDescription).toEqual(defaultExpandedIconText);
+ });
+
+ it('displays the custom tooltips for the chevron depending on state', () => {
+ const tileExpandedIconText = 'Click To Collapse';
+ const tileCollapsedIconText = 'Click To Expand';
+
+ // Force the custom icon text
+ wrapper.setProps({ tileExpandedIconText, tileCollapsedIconText });
+
+ // Force the expanded tile to be collapsed.
+ wrapper.setState({ expanded: false });
+ const collapsedDescription = wrapper
+ .find('[name="chevron--down"]')
+ .getElements()[0].props.description;
+ expect(collapsedDescription).toEqual(tileCollapsedIconText);
+
+ // click on the item to expand it.
+ wrapper.simulate('click');
+
+ // Validate the description change
+ const expandedDescription = wrapper
+ .find('[name="chevron--down"]')
+ .getElements()[0].props.description;
+ expect(expandedDescription).toEqual(tileExpandedIconText);
+ });
});
});
diff --git a/src/components/Tile/Tile.js b/src/components/Tile/Tile.js
index <HASH>..<HASH> 100644
--- a/src/components/Tile/Tile.js
+++ b/src/components/Tile/Tile.js
@@ -225,6 +225,8 @@ export class ExpandableTile extends Component {
className: PropTypes.string,
expanded: PropTypes.bool,
tabIndex: PropTypes.number,
+ tileCollapsedIconText: PropTypes.string,
+ tileExpandedIconText: PropTypes.string,
};
static defaultProps = {
@@ -232,6 +234,8 @@ export class ExpandableTile extends Component {
expanded: false,
tileMaxHeight: '0',
handleClick: () => {},
+ tileCollapsedIconText: 'Expand',
+ tileExpandedIconText: 'Collapse',
};
componentWillReceiveProps({ expanded, tileMaxHeight, tilePadding }) {
@@ -295,6 +299,8 @@ export class ExpandableTile extends Component {
tilePadding, // eslint-disable-line
handleClick, // eslint-disable-line
expanded, // eslint-disable-line
+ tileCollapsedIconText, // eslint-disable-line
+ tileExpandedIconText, // eslint-disable-line
...other
} = this.props;
@@ -326,7 +332,12 @@ export class ExpandableTile extends Component {
onClick={this.handleClick}
tabIndex={tabIndex}>
<button className="bx--tile__chevron">
- <Icon name="chevron--down" description="Tile chevron" />
+ <Icon
+ name="chevron--down"
+ description={
+ this.state.expanded ? tileExpandedIconText : tileCollapsedIconText
+ }
+ />
</button>
<div
ref={tileContent => {
|
fix(ExpandableTile): improve the description for the chevron (#<I>) (#<I>)
* fix(ExpandableTile): improve the description for the chevron (#<I>)
* fix(ExpandableTile): Added props to allow for custom expand/collapse tooltip descriptions.
|
carbon-design-system_carbon-components
|
train
|
8de88563b64398135776db4c08c4d68504790316
|
diff --git a/sphinxcontrib/katex.py b/sphinxcontrib/katex.py
index <HASH>..<HASH> 100644
--- a/sphinxcontrib/katex.py
+++ b/sphinxcontrib/katex.py
@@ -143,14 +143,17 @@ document.addEventListener("DOMContentLoaded", function() {
if delimiter[0] == '\\':
katex_delimiters[idx] = '\\' + katex_delimiters[idx]
# Set chosen delimiters for the auto-rendering options of KaTeX
- delimiters = r'''[
- {{ left: "\{}", right: "\{}", display: false }},
- {{ left: "\{}", right: "\{}", display: true }}
+ delimiters = r'''delimiters: [
+ {{ left: "{}", right: "{}", display: false }},
+ {{ left: "{}", right: "{}", display: true }}
],'''.format(*katex_delimiters)
- prefix = 'katex_options = { delimiters: ' + delimiters
+ prefix = 'katex_options = {'
suffix = '}'
options = app.config.katex_options
- return '\n'.join([prefix, options, suffix, content])
+ # Ensure list of options ends with ',' to append delimiters
+ if not options[-1:] == ',':
+ options += ','
+ return '\n'.join([prefix, options, delimiters, suffix, content])
def setup_static_path(app):
|
Force overwriting of auto-redndering delimiters
|
hagenw_sphinxcontrib-katex
|
train
|
a7da3496139bf8dee4eb33f2137b3e4d6417da9e
|
diff --git a/test.rb b/test.rb
index <HASH>..<HASH> 100644
--- a/test.rb
+++ b/test.rb
@@ -34,8 +34,8 @@ class DummySenderPlugin < Seahorse::Client::Plugin
end
end
-MyClient = Seahorse::Client.define api: {
- 'plugins' => [
+MyClient = Seahorse::Client.define(
+ plugins: [
Aws::Plugins::VersionedApiLoader,
Aws::Plugins::RegionalEndpoint,
Aws::Plugins::EnvironmentCredentials,
@@ -43,13 +43,15 @@ MyClient = Seahorse::Client.define api: {
Aws::Plugins::JsonSerializer,
#DummySenderPlugin
],
- 'metadata' => {
- 'aws_api_versions' => {
- '2012-01-25' => 'models/swf-2012-01-25.json'
+ api: {
+ 'metadata' => {
+ 'aws_api_versions' => [
+ '2012-01-25' => 'models/swf-2012-01-25.json'
+ },
+ 'regional_endpoint' => 'swf.%s.amazonaws.com'
},
- 'regional_endpoint' => 'swf.%s.amazonaws.com'
- }
-}
+ },
+)
client = MyClient.new region: 'us-east-1',
ssl_default: false, api_version: '2012-01-25'
|
The swf test script now uses the new :plugin option to Client::Base.define.
|
aws_aws-sdk-ruby
|
train
|
b370e613f0e644936764150c0da22a0290a0e266
|
diff --git a/openpnm/algorithms/metrics/MercuryIntrusion.py b/openpnm/algorithms/metrics/MercuryIntrusion.py
index <HASH>..<HASH> 100644
--- a/openpnm/algorithms/metrics/MercuryIntrusion.py
+++ b/openpnm/algorithms/metrics/MercuryIntrusion.py
@@ -8,7 +8,7 @@ from openpnm import topotools
logger = logging.getLogger(__name__)
-class MercuryIntrusion(GenericAlgorithm):
+class MercuryIntrusion(Porosimetry):
r"""
A ready-made Mercury Intrusion Porosimetry algorithm
@@ -18,21 +18,33 @@ class MercuryIntrusion(GenericAlgorithm):
the Washburn capillary pressure model is added to each Physics object (
or to the Mercury object if there are no Geometries defined).
- Notes
- -----
- The simulation is automatically run with all faces treated as inlets. The
- results can be plotted using `plot_intrusion_data`, and numerical data
+ The results can be plotted using `plot_intrusion_data`, and numerical data
can be obtained with `get_intrusion_data`.
+
+ Examples
+ --------
+ >>> import openpnm as op
+ >>> pn = op.network.Cubic(shape=[10, 10, 10], spacing=1e-5)
+ >>> geo = op.geometry.StickAndBall(network=pn)
+ >>> mip = op.algorithms.metrics.MercuryPorosimetry(network=pn)
+ >>> mip.run()
+
+ You can then plot the results using ```mip.plot_intrusion_curve()```.
+
+ It is also possible to add some experimental data to the algorithm which
+ will the be plotted along with the simulated data:
+
+ >>> mip.pc_data = [10000, 20000, 30000]
+ >>> mip.snwp_data = [0, 0.5, 0.9]
"""
def __init__(self, network=None, project=None, settings={}, name=None,
**kwargs):
if project is None:
project = network.project
- hg = Mercury(network=network)
super().__init__(network=network, project=project, **kwargs)
- op = Porosimetry(project=project, phase=hg)
- self.settings['mip'] = op.name
+ hg = Mercury(network=network)
+ self.setup(phase=hg)
mod = models.physics.capillary_pressure.washburn
for geom in project.geometries().values():
phys = GenericPhysics(network=network, phase=hg, geometry=geom)
@@ -40,10 +52,10 @@ class MercuryIntrusion(GenericAlgorithm):
if not project.geometries():
hg.add_model(propname='throat.entry_pressure', model=mod)
topotools.find_surface_pores(network=network)
- op.set_inlets(pores=network.pores('surface'))
- logger.info('Running MIP simulation')
- op.run()
- self.update(op)
+ self.set_inlets(pores=network.pores('surface'))
+ del self['pore.outlets']
+ del self['pore.residual']
+ del self['throat.residual']
def _set_snwp_data(self, data):
self._snwp_data = np.array(data)
@@ -51,8 +63,6 @@ class MercuryIntrusion(GenericAlgorithm):
def _get_snwp_data(self):
if hasattr(self, '_snwp_data'):
return self._snwp_data
- else:
- logger.error('Pc data has not been provided')
snwp_data = property(fget=_get_snwp_data, fset=_set_snwp_data)
@@ -62,15 +72,11 @@ class MercuryIntrusion(GenericAlgorithm):
def _get_pc_data(self):
if hasattr(self, '_pc_data'):
return self._pc_data
- else:
- logger.error('Pc data has not been provided')
pc_data = property(fget=_get_pc_data, fset=_set_pc_data)
def plot_intrusion_curve(self, fig=None):
- proj = self.project
- op = proj[self.settings['mip']]
- fig = op.plot_intrusion_curve(fig=fig)
+ fig = super().plot_intrusion_curve(fig=fig)
ax = fig.gca()
x = self.pc_data
y = self.snwp_data
|
Cleaning up MIP, it's now a OrdinaryPercolation subclass
|
PMEAL_OpenPNM
|
train
|
10e02eff3f6e785c242e3a7ffcb4fb50caa54a1d
|
diff --git a/Discovery/TwigDiscovery.php b/Discovery/TwigDiscovery.php
index <HASH>..<HASH> 100644
--- a/Discovery/TwigDiscovery.php
+++ b/Discovery/TwigDiscovery.php
@@ -11,6 +11,7 @@
namespace LastCall\Mannequin\Twig\Discovery;
+use LastCall\Mannequin\Core\Component\BrokenComponent;
use LastCall\Mannequin\Core\Component\ComponentCollection;
use LastCall\Mannequin\Core\Discovery\DiscoveryInterface;
use LastCall\Mannequin\Core\Discovery\IdEncoder;
@@ -49,22 +50,28 @@ class TwigDiscovery implements DiscoveryInterface
$twig = $this->driver->getTwig();
$components = [];
foreach ($this->names as $names) {
+ $aliases = (array) $names;
+ $name = reset($aliases);
try {
- $aliases = (array) $names;
- $name = reset($aliases);
$component = new TwigComponent(
$this->encodeId($name),
$aliases,
$twig->load($name)->getSourceContext(),
$twig
);
- $component->setName($name);
- $components[] = $component;
} catch (\Twig_Error_Loader $e) {
throw new UnsupportedComponentException(
sprintf('Unable to load %s', reset($names)), 0, $e
);
+ } catch (\Twig_Error $e) {
+ $component = new BrokenComponent(
+ $this->encodeId($name),
+ $aliases
+ );
+ $component->addProblem($e->getMessage());
}
+ $component->setName($name);
+ $components[] = $component;
}
return new ComponentCollection($components);
diff --git a/Tests/Discovery/TwigDiscoveryTest.php b/Tests/Discovery/TwigDiscoveryTest.php
index <HASH>..<HASH> 100644
--- a/Tests/Discovery/TwigDiscoveryTest.php
+++ b/Tests/Discovery/TwigDiscoveryTest.php
@@ -11,7 +11,9 @@
namespace LastCall\Mannequin\Twig\Tests\Discovery;
+use LastCall\Mannequin\Core\Component\BrokenComponent;
use LastCall\Mannequin\Core\Component\ComponentCollection;
+use LastCall\Mannequin\Core\Component\ComponentInterface;
use LastCall\Mannequin\Core\Discovery\IdEncoder;
use LastCall\Mannequin\Twig\Discovery\TwigDiscovery;
use LastCall\Mannequin\Twig\Driver\TwigDriverInterface;
@@ -28,6 +30,7 @@ class TwigDiscoveryTest extends TestCase
{
$loader = new \Twig_Loader_Array([
'form-input.twig' => 'I am twig code',
+ 'broken' => '{% }}',
]);
return new \Twig_Environment($loader, [
@@ -135,4 +138,25 @@ class TwigDiscoveryTest extends TestCase
);
$discoverer->discover();
}
+
+ public function testLoadsBrokenComponent()
+ {
+ $driver = $this->getDriver($this->getTwig());
+ $discoverer = new TwigDiscovery(
+ $driver,
+ [['broken']]
+ );
+ $component = $discoverer->discover()->get($this->encodeId('broken'));
+ $this->assertInstanceOf(BrokenComponent::class, $component);
+
+ return $component;
+ }
+
+ /**
+ * @depends testLoadsBrokenComponent
+ */
+ public function testBrokenComponentListsProblems(ComponentInterface $component)
+ {
+ $this->assertCount(1, $component->getProblems());
+ }
}
|
Add a broken component class to handle non-loadable components, use in TwigDiscovery
|
LastCallMedia_Mannequin-Twig
|
train
|
b9ff6d8b46fd7b21dac13f3c37b69d175be0c12c
|
diff --git a/consent/admin/__init__.py b/consent/admin/__init__.py
index <HASH>..<HASH> 100644
--- a/consent/admin/__init__.py
+++ b/consent/admin/__init__.py
@@ -56,18 +56,6 @@ class DataSharingConsentAdmin(SimpleHistoryAdmin):
'course_id',
)
- def has_add_permission(self, request):
- """
- Disable add permission for DataSharingConsent.
- """
- return False
-
- def has_delete_permission(self, request, obj=None):
- """
- Disable deletion permission for DataSharingConsent.
- """
- return False
-
@admin.register(DataSharingConsentTextOverrides)
class DataSharingConsentTextOverridesAdmin(DjangoObjectActions, SimpleHistoryAdmin):
|
ENT-<I>: Enable Admins to Update DSC Records
|
edx_edx-enterprise
|
train
|
f53ab637b07a45347ed15b2b66bca117caa6770d
|
diff --git a/vent/helpers/meta.py b/vent/helpers/meta.py
index <HASH>..<HASH> 100644
--- a/vent/helpers/meta.py
+++ b/vent/helpers/meta.py
@@ -1,4 +1,6 @@
+import docker
import os
+import platform
def Version():
version = ''
@@ -10,3 +12,34 @@ def Version():
except Exception as e: # pragma: no cover
pass
return version
+
+def System():
+ return platform.system()
+
+def Docker():
+ docker_info = {'server':{}, 'env':'', 'type':'', 'os':''}
+
+ # get docker server version
+ try:
+ d_client = docker.from_env()
+ docker_info['server'] = d_client.version()
+ except Exception as e: # pragma: no cover
+ pass
+
+ # get operating system
+ system = System()
+ docker_info['os'] = system
+
+ # check if native or using docker-machine
+ if 'DOCKER_MACHINE_NAME' in os.environ:
+ # using docker-machine
+ docker_info['env'] = os.environ['DOCKER_MACHINE_NAME']
+ docker_info['type'] = 'docker-machine'
+ elif 'DOCKER_HOST' in os.environ:
+ # not native
+ docker_info['env'] = os.environ['DOCKER_HOST']
+ docker_info['type'] = 'remote'
+ else:
+ # using "local" server
+ docker_info['type'] = 'native'
+ return docker_info
|
helpers for docker setup and system
|
CyberReboot_vent
|
train
|
01b77fe92b206e684dd2980a6e767168bd68e8cc
|
diff --git a/trunk/JLanguageTool/src/test/de/danielnaber/languagetool/rules/patterns/PatternRuleTest.java b/trunk/JLanguageTool/src/test/de/danielnaber/languagetool/rules/patterns/PatternRuleTest.java
index <HASH>..<HASH> 100644
--- a/trunk/JLanguageTool/src/test/de/danielnaber/languagetool/rules/patterns/PatternRuleTest.java
+++ b/trunk/JLanguageTool/src/test/de/danielnaber/languagetool/rules/patterns/PatternRuleTest.java
@@ -27,6 +27,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
+import java.util.regex.Matcher;
import junit.framework.TestCase;
import de.danielnaber.languagetool.AnalyzedSentence;
@@ -46,6 +47,8 @@ public class PatternRuleTest extends TestCase {
private static final Pattern PROBABLE_REGEX = Pattern.compile("[^\\[\\]\\*\\+\\|\\^\\{\\}\\?][\\[\\]\\*\\+\\|\\^\\{\\}\\?]|\\\\[^0-9]|\\(.+\\)|\\..");
+ private static final Pattern CASE_REGEX = Pattern.compile("\\[(.)(.)\\]");
+
@Override
public void setUp() throws IOException {
@@ -130,7 +133,22 @@ public class PatternRuleTest extends TestCase {
+ "\" that is marked as inflected"
+ " but is empty, so the attribute is redundant.");
}
-
+
+ if (element.isRegularExpression() && !element.getCaseSensitive()) {
+ Matcher matcher = CASE_REGEX.matcher(element.getString());
+ if (matcher.find()) {
+ final String letter1 = matcher.group(1);
+ final String letter2 = matcher.group(2);
+
+ if (!letter1.equals(letter2)
+ && letter1.toLowerCase().equals(letter2.toLowerCase())) {
+ System.err.println("The " + lang.toString() + " rule: "
+ + ruleId + " contains regexp part [" + letter1 + letter2
+ + "] which is useless without case_sensitive=\"yes\".");
+ }
+ }
+ }
+
if (element.isRegularExpression() && element.getString().contains("|")) {
final String[] groups = element.getString().split("\\)");
final boolean caseSensitive = element.getCaseSensitive();
|
- added test to report useless regexp parts such as "[Ee]nglish" when
case_sensitive attribute is not set to yes. Example:
[junit] The English rule: BED_ENGLISH contains regexp part [Ee] which is useless without case_sensitive="yes".
|
languagetool-org_languagetool
|
train
|
5139715fcce253f62ba7279e9dd7701f60dbe96c
|
diff --git a/lombok-maven-plugin/src/main/java/lombok/launch/Delombok.java b/lombok-maven-plugin/src/main/java/lombok/launch/Delombok.java
index <HASH>..<HASH> 100644
--- a/lombok-maven-plugin/src/main/java/lombok/launch/Delombok.java
+++ b/lombok-maven-plugin/src/main/java/lombok/launch/Delombok.java
@@ -10,7 +10,7 @@ import java.util.Map;
/**
* Since the Shadow Class Loader hides Lombok's internal Delombok, we need to access it via reflection.
*
- * @see lombok.delombok.Delombok
+ * @see <a href="https://github.com/rzwitserloot/lombok/blob/master/src/delombok/lombok/delombok/Delombok.java">lombok.delombok.Delombok</a>
*/
public class Delombok {
|
Replaced the @see to be javadoc friendly.
|
awhitford_lombok.maven
|
train
|
ec062f4b5c96cc8425a527b978c3f5f578884482
|
diff --git a/satpy/resample.py b/satpy/resample.py
index <HASH>..<HASH> 100644
--- a/satpy/resample.py
+++ b/satpy/resample.py
@@ -830,7 +830,10 @@ class BilinearResampler(BaseResampler):
def precompute(self, mask=None, radius_of_influence=50000, epsilon=0,
reduce_data=True, cache_dir=False, **kwargs):
"""Create bilinear coefficients and store them for later use."""
- from pyresample.bilinear import XArrayBilinearResampler
+ try:
+ from pyresample.bilinear import XArrayBilinearResampler
+ except ImportError:
+ from pyresample.bilinear import XArrayResamplerBilinear as XArrayBilinearResampler
del kwargs
del mask
@@ -858,7 +861,11 @@ class BilinearResampler(BaseResampler):
filename = self._create_cache_filename(cache_dir,
prefix='bil_lut-',
**kwargs)
- self.resampler.load_resampling_info(filename)
+ try:
+ self.resampler.load_resampling_info(filename)
+ except AttributeError:
+ warnings.warn("Bilinear resampler can't handle caching, please upgrade Pyresample.")
+ raise IOError
else:
raise IOError
@@ -872,7 +879,10 @@ class BilinearResampler(BaseResampler):
if os.path.exists(filename):
_move_existing_caches(cache_dir, filename)
LOG.info('Saving BIL neighbour info to %s', filename)
- self.resampler.save_resampling_info(filename)
+ try:
+ self.resampler.save_resampling_info(filename)
+ except AttributeError:
+ warnings.warn("Bilinear resampler can't handle caching, please upgrade Pyresample.")
def compute(self, data, fill_value=None, **kwargs):
"""Resample the given data using bilinear interpolation."""
|
Raise warnings if Pyresample is too old for bilinear caching
|
pytroll_satpy
|
train
|
d8e7d85a3f6a5e8249323b2719b396b2aa5a5fbf
|
diff --git a/parameter.py b/parameter.py
index <HASH>..<HASH> 100644
--- a/parameter.py
+++ b/parameter.py
@@ -3,14 +3,30 @@ _no_default = object()
class Parameter(object):
counter = 0
- def __init__(self, default = _no_default):
- self.__default = default # The default default is no default
- self.counter = Parameter.counter # We need to keep track of this to get the order right
+ class Date:
+ def parse(self, s):
+ return datetime.date(*map(int, s.split('-')))
+
+ class Int:
+ def parse(self, s):
+ return int(s)
+
+ def __init__(self, default = _no_default, parser = None):
+ # The default default is no default
+ self.__default = default
+
+ # We need to keep track of this to get the order right (see Task class)
+ self.counter = Parameter.counter
Parameter.counter += 1
+ # Handles input/output
+ self.__parser = parser
+
@property
def default(self):
assert self.__default != _no_default # TODO: exception
return self.__default
- def parse(self, x): return x
+ def parse(self, x):
+ if self.__parser == None: return x
+ else: return self.__parser.parse(x)
diff --git a/scheduler.py b/scheduler.py
index <HASH>..<HASH> 100644
--- a/scheduler.py
+++ b/scheduler.py
@@ -10,9 +10,9 @@ class LocalScheduler(object):
def add(self, task):
if task.complete(): return
- if task in self.__scheduled: return
+ if str(task) in self.__scheduled: return
- self.__scheduled.add(task)
+ self.__scheduled.add(str(task))
for task_2 in flatten(task.requires()):
self.add(task_2)
diff --git a/task.py b/task.py
index <HASH>..<HASH> 100644
--- a/task.py
+++ b/task.py
@@ -8,6 +8,7 @@ class Task(object):
@classmethod
def get_params(cls):
# Extract all Argument instances from the class
+ # TODO: not really necessary to do multiple times, can we make it run once when the class is created?
params = []
for param_name in dir(cls):
param = getattr(cls, param_name)
@@ -15,6 +16,7 @@ class Task(object):
params.append((param_name, param))
+ # The order the parameters are created matters. See Parameter class
params.sort(key = lambda t: t[1].counter)
return params
@@ -41,13 +43,14 @@ class Task(object):
for key, value in result.iteritems():
setattr(self, key, value)
- self.__params = list(result.iteritems())
+ self.__hash = hash(tuple(result.iteritems()))
+ self.__repr = '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%s' % (str(k), str(v)) for k, v in result.iteritems()]))
def __hash__(self):
- return hash(tuple(self.__params))
+ return self.__hash
def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%s' % (str(k), str(v)) for k, v in self.__params]))
+ return self.__repr
def complete(self):
outputs = flatten(self.output())
|
added fib test + some input parsing
|
spotify_luigi
|
train
|
7cfae1e343b87038c17de284d63457acd064f24d
|
diff --git a/lib/influxdb/logging.rb b/lib/influxdb/logging.rb
index <HASH>..<HASH> 100644
--- a/lib/influxdb/logging.rb
+++ b/lib/influxdb/logging.rb
@@ -2,7 +2,7 @@ require 'logger'
module InfluxDB
module Logging # :nodoc:
- PREFIX = "[InfluxDB] ".freeze
+ PREFIX = "InfluxDB".freeze
class << self
attr_writer :logger
@@ -17,7 +17,7 @@ module InfluxDB
def log(level, message)
return unless InfluxDB::Logging.logger
- InfluxDB::Logging.logger.send(level.to_sym, PREFIX + message)
+ InfluxDB::Logging.logger.send(level.to_sym, PREFIX) { message }
end
end
end
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index <HASH>..<HASH> 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -13,6 +13,24 @@ RSpec.configure do |config|
else
config.formatter = :progress
end
-end
-InfluxDB::Logging.logger = Logger.new(STDOUT) if ENV['LOG']
+ if ENV["LOG"]
+ Dir.mkdir("tmp") unless Dir.exist?("tmp")
+ logfile = File.open("tmp/spec.log", File::WRONLY | File::TRUNC | File::CREAT)
+
+ InfluxDB::Logging.logger = Logger.new(logfile).tap do |logger|
+ logger.formatter = ->(severity, _datetime, progname, message) {
+ "%-5s - %s: %s\n" % [ severity, progname, message ]
+ }
+ end
+
+ config.before(:each) do
+ InfluxDB::Logging.logger.info("RSpec") { self.class }
+ InfluxDB::Logging.logger.info("RSpec") { @__inspect_output }
+ end
+
+ config.after(:each) do
+ logfile.write "\n"
+ end
+ end
+end
|
Adjust logging
Logger#info et al. accept a "progname" parameter, so there's no
need to manually prefix the log message.
Also improved the logging for the tests: When given a LOG env
variable, the output of the logger is redirected to tmp/spec.log
for further inspection.
|
influxdata_influxdb-ruby
|
train
|
74a45f9340c197333d2d3ccaaad74773015141a7
|
diff --git a/test/test-examples.js b/test/test-examples.js
index <HASH>..<HASH> 100644
--- a/test/test-examples.js
+++ b/test/test-examples.js
@@ -771,7 +771,50 @@ describe('examples', function () {
});
});
- it('default - 0', function () {
+ it('default - function', function () {
+ var template = {
+ content: {
+ last: {
+ dataKey: 'familyName',
+ default: function () {return 'unknown';}
+ },
+ first: {
+ dataKey: 'givenName',
+ default: function () {return 'unknown';}
+ }
+ }
+ };
+
+ var r0 = j2j.run(template, {
+ familyName: 'DOE',
+ givenName: 'JOE'
+ });
+ //console.log(r0); // {last: 'DOE', first: 'JOE'}
+ expect(r0).to.deep.equal({
+ last: 'DOE',
+ first: 'JOE'
+ });
+
+ var r1 = j2j.run(template, {
+ familyName: 'DOE'
+ });
+ //console.log(r1); // {last: 'DOE', first: 'unknown'}
+ expect(r1).to.deep.equal({
+ last: 'DOE',
+ first: 'unknown'
+ });
+
+ var r2 = j2j.run(template, {
+ givenName: 'JOE'
+ });
+ //console.log(r2); // {last: 'unknown', first: 'JOE'}
+ expect(r2).to.deep.equal({
+ last: 'unknown',
+ first: 'JOE'
+ });
+ });
+
+ it('default - 0', function () {
var template = {
content: {
last: {
@@ -813,7 +856,6 @@ describe('examples', function () {
first: 'JOE'
});
});
-
it('multiple - 0', function () {
var template = {
content: {
|
Update test-examples.js
Added test for default with function.
|
amida-tech_jsonapter
|
train
|
7c29e8881f67ba4c23ec2e8aa2e9418d9934dbc3
|
diff --git a/src/Symfony/Bundle/FrameworkBundle/RequestListener.php b/src/Symfony/Bundle/FrameworkBundle/RequestListener.php
index <HASH>..<HASH> 100644
--- a/src/Symfony/Bundle/FrameworkBundle/RequestListener.php
+++ b/src/Symfony/Bundle/FrameworkBundle/RequestListener.php
@@ -103,7 +103,7 @@ class RequestListener
$parameters = $this->router->match($request->getPathInfo());
if (null !== $this->logger) {
- $this->logger->info(sprintf('Matched route "%s" (parameters: %s)', $parameters['_route'], $this->parametersToString($parameters)));
+ $this->logger->debug(sprintf('Matched route "%s" (parameters: %s)', $parameters['_route'], $this->parametersToString($parameters)));
}
$request->attributes->add($parameters);
|
Changed log level of "Matched route ..." message from info to debug
|
symfony_symfony
|
train
|
4465068462dc90857746ef84614012e0a336d89e
|
diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go
index <HASH>..<HASH> 100644
--- a/test/e2e/kubectl.go
+++ b/test/e2e/kubectl.go
@@ -583,11 +583,11 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
_, err := framework.RunHostCmd(ns, simplePodName, "/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
- Expect(err).To(ContainSubstring("User \"system:anonymous\" cannot list pods in the namespace"))
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring("Authorization: Bearer invalid"))
- Expect(err).To(ContainSubstring("Response Status: 403 Forbidden"))
+ // TODO(kubernetes/kubernetes#39267): We should only see a 401 from an invalid bearer token.
+ Expect(err).To(Or(ContainSubstring("Response Status: 403 Forbidden"), ContainSubstring("Response Status: 401 Unauthorized")))
By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/kubectl get pods --server=invalid --v=6 2>&1")
|
Remove system:anonymous check from kubectl test
This verbiage doesn't appear when the cluster is AlwaysAllow
|
kubernetes_kubernetes
|
train
|
506cd448690f35215b664d25f95620d587890172
|
diff --git a/lib/classes/Utils.js b/lib/classes/Utils.js
index <HASH>..<HASH> 100644
--- a/lib/classes/Utils.js
+++ b/lib/classes/Utils.js
@@ -120,36 +120,12 @@ class Utils {
}
findServicePath() {
- const that = this;
-
- // Helper function
- const isServiceDir = (dir) => {
- let serverlessYmlFileFound = false;
-
- if (that.serverless.utils.fileExistsSync(path.join(dir, 'serverless.yml'))) {
- serverlessYmlFileFound = true;
- } else if (that.serverless.utils.fileExistsSync(path.join(dir, 'serverless.yaml'))) {
- serverlessYmlFileFound = true;
- }
-
- return serverlessYmlFileFound;
- };
-
- // Check up to 10 parent levels
- let previous = '.';
let servicePath = null;
- let i = 10;
-
- while (i >= 0) {
- const fullPath = path.resolve(process.cwd(), previous);
-
- if (isServiceDir(fullPath)) {
- servicePath = fullPath;
- break;
- }
- previous = path.join(previous, '..');
- i--;
+ if (this.serverless.utils.fileExistsSync(path.join(process.cwd(), 'serverless.yml'))) {
+ servicePath = process.cwd();
+ } else if (this.serverless.utils.fileExistsSync(path.join(process.cwd(), 'serverless.yaml'))) {
+ servicePath = process.cwd();
}
return servicePath;
|
Remove automatic servicePath detection to prevent unintended behavior
|
serverless_serverless
|
train
|
062abc7a4c578532e9f87b1557f2ebde93661867
|
diff --git a/Lib/fontParts/objects/base/validators.py b/Lib/fontParts/objects/base/validators.py
index <HASH>..<HASH> 100644
--- a/Lib/fontParts/objects/base/validators.py
+++ b/Lib/fontParts/objects/base/validators.py
@@ -46,7 +46,7 @@ def validateLayerOrder(value, font):
import collections.Counter
duplicates = [v for v, count in Counter(value).items() if count > 1]
if len(duplicates) != 0:
- raise FontPartsError("Duplicate layers are not allowed. Layer name(s) %r are duplicate." % ", ".join(duplicates))
+ raise FontPartsError("Duplicate layers are not allowed. Layer name(s) %r are duplicate(s)." % ", ".join(duplicates))
return [unicode(v) for v in value]
@@ -96,6 +96,7 @@ def validateKerningKey(value):
- value must be a tuple instance.
- value must be a two member tuple.
- value items must be strings.
+ - value items must be at least one character long.
- Returned value will be a tuple of unicode strings.
"""
@@ -106,6 +107,8 @@ def validateKerningKey(value):
for v in value:
if not isinstance(v, basestring):
raise FontPartsError("Kerning key items must be strings, not %s." % type(value).__name__)
+ if len(v) < 1:
+ raise FontPartsError("Kerning key items must be one character long")
return tuple([unicode(v) for v in value])
def validateKerningValue(value):
@@ -163,7 +166,7 @@ def validateFeatureText(value):
"""
if not isinstance(value, basestring):
- raise FontPartsError("Feature text items must be a string, not %s." % type(value).__name__)
+ raise FontPartsError("Feature text must be a string, not %s." % type(value).__name__)
return unicode(value)
# ---
@@ -333,10 +336,20 @@ def validateContour(value):
# -----
def validatePointType(value):
+ """Validates point type
+
+ - value must be an string.
+ - value can be 'move', 'line', 'offcurve', 'curve', or 'qcurve'.
+ - Returned value will be a unicode string.
"""
- XXX implement
- """
- return value
+
+ allowedTypes = ['move', 'line', 'offcurve', 'curve', 'qcurve']
+
+ if not isinstance(value, basestring):
+ raise FontPartsError("Point type must be a string, not %s." % type(value).__name__)
+ if value not in allowedTypes:
+ raise FontPartsError("Point type must be '%s'; not %r." % ("', '".join(allowedTypes), value)
+ return unicode(value)
def validatePointName(value):
"""Validates point name
@@ -354,20 +367,40 @@ def validatePointName(value):
# -------
def validateSegmentType(value):
+ """Validates segment type
+
+ - value must be an string.
+ - value can be 'move', 'line', 'curve', or 'qcurve'.
+ - Returned value will be a unicode string.
"""
- XXX implement
- """
- return value
+
+ allowedTypes = ['move', 'line', 'curve', 'qcurve']
+
+ if not isinstance(value, basestring):
+ raise FontPartsError("Segment type must be a string, not %s." % type(value).__name__)
+ if value not in allowedTypes:
+ raise FontPartsError("Segment type must be '%s'; not %r." % ("', '".join(allowedTypes), value)
+ return unicode(value)
# ----
# Type
# ----
def validateBPointType(value):
+ """Validates bPoint type
+
+ - value must be an string.
+ - value can be 'corner' or 'curve'.
+ - Returned value will be a unicode string.
"""
- XXX implement
- """
- return value
+
+ allowedTypes = ['corner', 'curve']
+
+ if not isinstance(value, basestring):
+ raise FontPartsError("bPoint type must be a string, not %s." % type(value).__name__)
+ if value not in allowedTypes:
+ raise FontPartsError("bPoint type must be 'corner' or 'curve', not %r." % value
+ return unicode(value)
# ---------
# Component
|
Validate Segment, Point, and bPoint types.
Grammer.
One more check on kerning key name length.
|
robotools_fontParts
|
train
|
ac2bc4093c46857fc6e5683cbc25577f3e7e8a7c
|
diff --git a/lib/Koine/Parameters.php b/lib/Koine/Parameters.php
index <HASH>..<HASH> 100644
--- a/lib/Koine/Parameters.php
+++ b/lib/Koine/Parameters.php
@@ -53,51 +53,70 @@ class Parameters extends Hash
{
$params = clone $this;
- return $this->filter($params, $permittedParams);
+ $this->filter($params, $permittedParams);
+
+ return $params;
}
/**
* Filter out or throws exception according to the permitted params
* @param Parameter $params
* @param array $permitted
- * @return Parameter
* @throws UnpermittedParameterException when params not permitted are passed in
*/
public function filter(Parameters $params, array $permitted = array())
{
$this->cleanUnwanted($params, $permitted);
$this->handleArrays($params, $permitted);
-
- return $params;
+ $this->handleCollections($params, $permitted);
}
- public function handleArrays(Parameters $params, $permitted)
+ /**
+ * Handle Parameters that have only integer indexes
+ * @param Parameter $params
+ * @param array $permitted
+ */
+ private function handleCollections(Parameters $params, array $permitted = array())
{
- if (defined('DEBUG')) {
- echo "------\n";
- print_r($params->toArray());
- print_r($permitted);
+ $keys = $params->keys();
+ $intKeys = $keys->select(function ($value) {
+ return is_int($value);
+ });
+
+ if ($keys->count() === $intKeys->count()) {
+ foreach ($keys as $key) {
+ $value = $params[$key];
+
+ if ($value instanceof Parameters) {
+ $this->filter($value, $permitted);
+ }
+ }
}
+ }
+ /**
+ * Handle permissions that are given in the hash form
+ * @param Parameter $params
+ * @param array $permitted
+ */
+ private function handleArrays(Parameters $params, array $permitted = array())
+ {
// handle arrays
foreach ($permitted as $key => $allowed) {
- if ($params->hasKey($key)) {
- $data = $params[$key];
-
- if ($data) {
- if (is_array($allowed)) {
- if ($data instanceof Parameters) {
- $this->handleArrays($data, $allowed);
- }
- } else {
- $this->handleUnpermittedParam($params, $permitted);
- }
- }
+ if (is_array($allowed) && $params->hasKey($key)) {
+ $this->filter($params[$key], $allowed);
}
}
}
- public function cleanUnwanted(Parameters $params, $permitted)
+ /**
+ * Filters out or throws exception when parameters are neigher keys nor values
+ * in the permitted array
+ * @param Parameter $params
+ * @param array $permitted
+ * @throws ParameterMissingException when parameter is missing
+ */
+ private function cleanUnwanted(Parameters $params, $permitted)
{
foreach ($params->toArray() as $key => $value) {
if (!is_int($key) && !in_array($key, $permitted) && !array_key_exists($key, $permitted)) {
diff --git a/tests/KoineTests/ParametersTest.php b/tests/KoineTests/ParametersTest.php
index <HASH>..<HASH> 100644
--- a/tests/KoineTests/ParametersTest.php
+++ b/tests/KoineTests/ParametersTest.php
@@ -292,5 +292,26 @@ class ParametersTest extends PHPUnit_Framework_TestCase
);
$this->assertEquals($expected, $actual);
+
+ // nested but with no specific requirements
+
+ $actual = $params->permit(array(
+ 'book' => array(
+ 'authors' => array(),
+ 'title'
+ ),
+ 'foo'
+ ))->toArray();
+
+ $expected = array(
+ 'book' => array(
+ 'title' => 'Some Title',
+ 'authors' => array(
+ array('name' => 'Jon', 'birthday' => '1960-01-02'),
+ array('name' => 'Daniel', 'birthday' => '1960-01-02'),
+ )
+ ),
+ 'foo' => 'bar'
+ );
}
}
|
Implemented nested permissions
|
koinephp_StrongParameters
|
train
|
4c559aa9732343791bad5d5137010b9b81311e3d
|
diff --git a/packages/razzle/scripts/build.js b/packages/razzle/scripts/build.js
index <HASH>..<HASH> 100755
--- a/packages/razzle/scripts/build.js
+++ b/packages/razzle/scripts/build.js
@@ -39,7 +39,7 @@ loadRazzleConfig(webpack).then(
async ({ razzle, razzleOptions, webpackObject, plugins, paths }) => {
const verbose = razzleOptions.verbose;
- if (process.env.NODE_ENV === "production" && (process.env.RAZZLE_NONINTERACTIVE !== "true" && !cliArgs['noninteractive'])) {
+ if (!process.env.CI && process.env.NODE_ENV === "production" && (process.env.RAZZLE_NONINTERACTIVE !== "true" && !cliArgs['noninteractive'])) {
await inquirer.prompt([
{
type: 'confirm',
|
feat(razzle): disable prompt on CI
|
jaredpalmer_razzle
|
train
|
a91162c77f613d9ed3ab8b27c63508e2e70dbbb5
|
diff --git a/phoebe/atmospheres/passbands.py b/phoebe/atmospheres/passbands.py
index <HASH>..<HASH> 100644
--- a/phoebe/atmospheres/passbands.py
+++ b/phoebe/atmospheres/passbands.py
@@ -591,22 +591,10 @@ class Passband:
# Store the passband index for use in planckint() and atmx():
self.extern_wd_idx = wdidx
- # The original atmcof.dat features 'D' instead of 'E' for
- # exponential notation. We need to provide a converter for
- # numpy's loadtxt to read that in:
- D2E = lambda s: float(s.replace('D', 'E'))
- atmtab = np.loadtxt(atmfile, converters={2: D2E, 3: D2E, 4: D2E, 5: D2E, 6: D2E, 7: D2E, 8: D2E, 9: D2E, 10: D2E, 11: D2E})
-
- # !!!!!
- # THE ABOVE IS NOT NEEDED AS self.wd_data HAS ALL THE DATA
- # SOMEBODY THAT UNDERSTANDS WHAT IS DONE HERE PLEASE CORRECT USE IT
- # !!!!!
-
# Break up the table along axes and extract a single passband data:
- atmtab = np.reshape(atmtab, (Nabun, Npb, Nlogg, Nints, -1))
+ atmtab = np.reshape(self.wd_data["atm_table"], (Nabun, Npb, Nlogg, Nints, -1))
atmtab = atmtab[:, wdidx, :, :, :]
-
# Finally, reverse the metallicity axis because it is sorted in
# reverse order in atmcof:
self.extern_wd_atmx = atmtab[::-1, :, :, :]
|
Omitting reading atm_table of coefficients twice.
|
phoebe-project_phoebe2
|
train
|
2355d4123f6670b39f81f89f376934244f424822
|
diff --git a/cassandra_test.go b/cassandra_test.go
index <HASH>..<HASH> 100644
--- a/cassandra_test.go
+++ b/cassandra_test.go
@@ -30,7 +30,7 @@ var (
clusterSize = flag.Int("clusterSize", 1, "the expected size of the cluster")
flagRetry = flag.Int("retries", 5, "number of times to retry queries")
flagAutoWait = flag.Duration("autowait", 1000*time.Millisecond, "time to wait for autodiscovery to fill the hosts poll")
- flagAutoFreq = flag.Duration("autofreq", 1000*time.Millisecond, "frequency for autodiscovery to discover new hosts")
+ //flagAutoFreq = flag.Duration("autofreq", 1000*time.Millisecond, "frequency for autodiscovery to discover new hosts")
clusterHosts []string
)
@@ -112,7 +112,7 @@ func TestRingDiscovery(t *testing.T) {
cluster.Consistency = Quorum
cluster.RetryPolicy.NumRetries = *flagRetry
cluster.DiscoverHosts = true
- cluster.Discovery = DiscoveryConfig{Sleep: *flagAutoFreq}
+ //cluster.Discovery = DiscoveryConfig{Sleep: *flagAutoFreq}
session, err := cluster.CreateSession()
if err != nil {
|
Maybe flagAutoFreq doesn't actually have much effect
|
gocql_gocql
|
train
|
6bc0e44b812bbc57f8a96cbf6cbe2ed50b72f8dc
|
diff --git a/lib/harness/adapters/librato_adapter.rb b/lib/harness/adapters/librato_adapter.rb
index <HASH>..<HASH> 100644
--- a/lib/harness/adapters/librato_adapter.rb
+++ b/lib/harness/adapters/librato_adapter.rb
@@ -62,7 +62,13 @@ module Harness
end
def self.sanitize(name)
- "#{name}#{Harness.config.namespace}".gsub(/\./, '-').gsub(/^!/, 'bang_')
+ base = name.gsub(/^!/, 'bang.')
+
+ if Harness.config.namespace
+ "#{name}.#{Harness.config.namespace}"
+ else
+ base
+ end
end
end
end
diff --git a/lib/harness/version.rb b/lib/harness/version.rb
index <HASH>..<HASH> 100644
--- a/lib/harness/version.rb
+++ b/lib/harness/version.rb
@@ -1,3 +1,3 @@
module Harness
- VERSION = "0.2.3"
+ VERSION = "0.2.4"
end
diff --git a/test/unit/adapters/librato_adapter_test.rb b/test/unit/adapters/librato_adapter_test.rb
index <HASH>..<HASH> 100644
--- a/test/unit/adapters/librato_adapter_test.rb
+++ b/test/unit/adapters/librato_adapter_test.rb
@@ -51,11 +51,11 @@ class LibratoAdapterTest < MiniTest::Unit::TestCase
end
def test_gauge_id_is_sanitized
- @gauge.id = "process_action.action_controller"
+ @gauge.id = "!process_action.action_controller"
json = {
:gauges => [{
- :name => "process_action-action_controller",
+ :name => "bang.process_action.action_controller",
:display_name => @gauge.name,
:value => @gauge.value,
:measure_time => @gauge.time.to_i,
@@ -119,11 +119,11 @@ class LibratoAdapterTest < MiniTest::Unit::TestCase
end
def test_counter_id_is_sanitized
- @counter.id = "total_requests.action_controller"
+ @counter.id = "!total_requests.action_controller"
json = {
:counters => [{
- :name => "total_requests-action_controller",
+ :name => "bang.total_requests.action_controller",
:display_name => @counter.name,
:value => @counter.value,
:measure_time => @counter.time.to_i,
|
Update for inconsistency in Librato api
|
ahawkins_harness
|
train
|
b4e58745429d62533b11882945d0762b63d630ef
|
diff --git a/lib/autowow/gem.rb b/lib/autowow/gem.rb
index <HASH>..<HASH> 100644
--- a/lib/autowow/gem.rb
+++ b/lib/autowow/gem.rb
@@ -6,7 +6,7 @@ module Autowow
include EasyLogging
def self.gem_release
- start_status = Vcs.status.stdout
+ start_status = Vcs.status
logger.info(start_status)
working_branch = Vcs.current_branch
logger.error("Not on master.") and return unless working_branch.eql?('master')
@@ -20,7 +20,7 @@ module Autowow
Vcs.checkout('master')
Vcs.stash_pop if pop_stash
- logger.info(status.stdout)
+ logger.info(status)
end
def self.release
diff --git a/lib/autowow/vcs.rb b/lib/autowow/vcs.rb
index <HASH>..<HASH> 100644
--- a/lib/autowow/vcs.rb
+++ b/lib/autowow/vcs.rb
@@ -7,7 +7,7 @@ module Autowow
include StringDecorator
def self.branch_merged
- start_status = status.stdout
+ start_status = status
logger.info(start_status)
working_branch = current_branch
logger.error("Nothing to do.") and return if working_branch.eql?('master')
@@ -19,7 +19,7 @@ module Autowow
stash_pop if pop_stash
branch_force_delete(working_branch)
- logger.info(status.stdout)
+ logger.info(status)
end
def self.update_projects
@@ -28,7 +28,7 @@ module Autowow
# https://stackoverflow.com/a/10148084/2771889
Dir.chdir(working_dir) {
logger.info("Updating #{working_dir} ...")
- start_status = status_dry.stdout
+ start_status = status_dry
logger.error("Skipped: not a git repository.") and next unless is_git?(start_status)
logger.error("Skipped: work in progress (not on master).") and next unless current_branch.eql?('master')
logger.error("Skipped: work in progress (uncommitted changes).") and next if uncommitted_changes?(start_status)
@@ -64,11 +64,13 @@ module Autowow
end
def self.status
- Command.run('git', 'status')
+ status = Command.run('git', 'status')
+ status.stdout + status.stderr
end
def self.status_dry
- Command.run_dry('git', 'status')
+ status = Command.run_dry('git', 'status')
+ status.stdout + status.stderr
end
def self.checkout(existing_branch)
|
Fixes issue where status would not consider stderr
|
thisismydesign_autowow
|
train
|
3970320a50b1504e5c8947c4793cc186a5388943
|
diff --git a/angr/path.py b/angr/path.py
index <HASH>..<HASH> 100644
--- a/angr/path.py
+++ b/angr/path.py
@@ -178,7 +178,11 @@ class Path(object):
# return n
#return None
- return self.blockcounter_stack[-1].most_common()[0][1]
+ mc = self.blockcounter_stack[-1].most_common()
+ if len(mc) == 0:
+ return None
+ else:
+ return mc[0][1]
def _make_sim_run(self):
self._run = self._project.sim_run(self.state, stmt_whitelist=self.stmt_whitelist, last_stmt=self.last_stmt, jumpkind=self.jumpkind)
|
Bug fix: calling detect_loops() on an empty path will raise an exception.
|
angr_angr
|
train
|
e89dd01ce18e40f1b21b2f8d8f858c836ee661dc
|
diff --git a/util.py b/util.py
index <HASH>..<HASH> 100644
--- a/util.py
+++ b/util.py
@@ -140,9 +140,16 @@ def UserAcceptance(
############################################################################
# GetBestMatch
+# This finds the elements of matchList which best match the target string
+# Note that this searches substrings so "abc" will have a 100% match in
+# both "this is the abc", "abcde" and "abc"
+# The return from this function is a list of potention matches which shared
+# the same highest match score. If any exact match is found (1.0 score and
+# equal size string) this will be given alone.
############################################################################
def GetBestMatch(target, matchList):
bestMatchList = []
+
if len(matchList) > 0:
ratioMatch = []
for item in matchList:
@@ -153,7 +160,11 @@ def GetBestMatch(target, matchList):
matchIndexList = [i for i, j in enumerate(ratioMatch) if j == maxRatio]
for index in matchIndexList:
- bestMatchList.append(matchList[index])
+ if maxRatio == 1 and len(matchList[index]) == len(target):
+ return [matchList[index], ]
+ else:
+ bestMatchList.append(matchList[index])
+
return bestMatchList
############################################################################
|
Updated string matching algorithm to prioritise exact matches
This take an exact match (match ratio = <I>% and both strings equal size) as a better result
than a substring match, even if the substring match ratio is also <I>%.
|
davgeo_clear
|
train
|
de2932f36d38def447662159b9f735f02a591624
|
diff --git a/claripy/claripy.py b/claripy/claripy.py
index <HASH>..<HASH> 100644
--- a/claripy/claripy.py
+++ b/claripy/claripy.py
@@ -44,7 +44,7 @@ class Claripy(object):
else:
return E(self, o, set() if variables is None else variables, symbolic)
- def _do_op(self, name, args, variables=None, symbolic=None, raw=False, simplified=False):
+ def _do_op_raw(self, name, args):
resolved = False
if not self.save_ast:
@@ -71,6 +71,12 @@ class Claripy(object):
if r is None:
r = A(self, name, args)
+ return r
+
+
+ def _do_op(self, name, args, variables=None, symbolic=None, raw=False, simplified=False):
+ r = self._do_op_raw(name, args)
+
if symbolic is None:
symbolic = any(arg.symbolic if isinstance(arg, E) else False for arg in args)
if variables is None:
@@ -159,6 +165,27 @@ class Claripy(object):
if len(args) != 3: raise ClaripyOperationError("invalid number of args passed to If")
return self._do_op('If', args)
+ def Identical(self, *args):
+ '''
+ Attempts to check if the underlying models of the expression are identical,
+ even if the hashes match.
+
+ This process is somewhat conservative: False does not necessarily mean that
+ it's not identical; just that it can't (easily) be determined to be identical.
+ '''
+ if not all([isinstance(a, E) for a in args]):
+ return False
+
+ if len(set(hash(a) for a in args)) == 1:
+ return True
+
+ first = args[0]
+ identical = True
+ for o in args:
+ i = self._do_op_raw('Identical', (first, o))
+ identical &= i is True
+ return identical
+
#def size(self, *args): return self._do_op('size', args)
def ite_dict(self, i, d, default):
|
split _do_op into _do_op and _do_op_raw and added Identical
|
angr_claripy
|
train
|
fb322e95b8b2599d903705d78270b942a867f072
|
diff --git a/src/TextureArchive.js b/src/TextureArchive.js
index <HASH>..<HASH> 100644
--- a/src/TextureArchive.js
+++ b/src/TextureArchive.js
@@ -22,6 +22,9 @@ export default class TextureArchive extends PersistedDocumentArchive {
entries.forEach(entry => {
let record = rawArchive.resources[entry.path]
+ if (!record) {
+ return
+ }
// Load any document except pub-meta (which we prepared manually)
if (entry.type !== 'pub-meta') {
// Passing down 'sessions' so that we can add to the pub-meta session
@@ -32,6 +35,29 @@ export default class TextureArchive extends PersistedDocumentArchive {
return sessions
}
+ _repair() {
+ let manifestSession = this.getEditorSession('manifest')
+ let entries = manifestSession.getDocument().getDocumentEntries()
+ let missingEntries = []
+
+ entries.forEach(entry => {
+ let session = this.getEditorSession(entry.id)
+ if (!session) {
+ missingEntries.push(entry.id)
+ console.warn(`${entry.path} could not be found in archive and will be deleted...`)
+ }
+ })
+
+ // Cleanup missing entries
+ manifestSession.transaction(tx => {
+ let documentsEl = tx.find('documents')
+ missingEntries.forEach(missingEntry => {
+ let entryEl = tx.get(missingEntry)
+ documentsEl.removeChild(entryEl)
+ })
+ })
+ }
+
_exportManifest(sessions, buffer, rawArchive) {
let manifest = sessions.manifest.getDocument()
if (buffer.hasResourceChanged('manifest')) {
@@ -126,16 +152,6 @@ function _importManifest(rawArchive) {
let manifestXML = rawArchive.resources['manifest.xml'].data
let dom = DefaultDOMElement.parseXML(manifestXML)
let documentsEl = dom.find('documents')
- let documents = dom.findAll('documents > document')
- // Remove entries from manifest that can not be found in the archive as files
- documents.forEach(doc => {
- let path = doc.attr('path')
- let record = rawArchive.resources[path]
- if (!record) {
- documentsEl.removeChild(doc)
- console.warn(`${path} could not be found in archive. Skipping...`)
- }
- })
documentsEl.append(
dom.createElement('document').attr({
id: 'pub-meta',
|
Implement repair step, which removes missing document entries.
|
substance_texture
|
train
|
dba6fc2d0bb428192c7b58dab3e8399c359dc214
|
diff --git a/lib/crtomo/tdManager.py b/lib/crtomo/tdManager.py
index <HASH>..<HASH> 100644
--- a/lib/crtomo/tdManager.py
+++ b/lib/crtomo/tdManager.py
@@ -50,6 +50,7 @@
"""
import glob
+import re
import os
import tempfile
import numpy as np
@@ -629,7 +630,14 @@ class tdMan(object):
)
return None
- def _invert(self, tempdir):
+ def _invert(self, tempdir, catch_output=True):
+ """
+
+ Parameters
+ ----------
+ catch_output: bool
+ if True, catch all outputs of the CRTomo call
+ """
print('attempting inversion in directory: {0}'.format(tempdir))
pwd = os.getcwd()
os.chdir(tempdir)
@@ -637,18 +645,28 @@ class tdMan(object):
self.save_to_tomodir('.')
os.chdir('exe')
binary = CRBin.get('CRTomo')
- subprocess.check_output(
- binary,
- shell=True,
- stderr=subprocess.STDOUT,
- )
+ print(binary)
+ print('calling CRTomo')
+ if catch_output:
+ subprocess.check_output(
+ binary,
+ shell=True,
+ stderr=subprocess.STDOUT,
+ )
+ else:
+ subprocess.call(
+ binary,
+ shell=True,
+ )
+
+ print('finished')
# if return_code != 0:
# raise Exception('There was an error using CRTomo')
os.chdir(pwd)
- self._read_inversion_results(tempdir + os.sep + 'inv')
+ self._read_inversion_results(tempdir)
- def invert(self, output_directory=None):
+ def invert(self, output_directory=None, catch_output=True):
"""Invert this instance, and import the result files
No directories/files will be overwritten.
@@ -667,7 +685,7 @@ class tdMan(object):
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
tempdir = output_directory
- self._invert(tempdir)
+ self._invert(tempdir, catch_output)
else:
raise IOError(
'output directory already exists: {0}'.format(
@@ -676,7 +694,7 @@ class tdMan(object):
)
else:
with tempfile.TemporaryDirectory() as tempdir:
- self._invert(tempdir)
+ self._invert(tempdir, catch_output)
return 1
else:
@@ -686,8 +704,75 @@ class tdMan(object):
return None
def _read_inversion_results(self, tomodir):
+ self._read_inv_ctr(tomodir)
self._read_resm_m(tomodir)
+ def _read_inv_ctr(self, tomodir):
+ """Read in selected results of the inv.ctr file
+
+ Parameters
+ ----------
+ tomodir: string
+ directory path to a tomodir
+
+ Returns
+ -------
+ inv_ctr: ?
+ structure containing inv.ctr data
+
+ """
+ invctr_file = tomodir + os.sep + 'inv' + os.sep + 'inv.ctr'
+ if not os.path.isfile(invctr_file):
+ print('inv.ctr not found: {0}'.format(invctr_file))
+ print(os.getcwd())
+ return 1
+
+ # read header
+ with open(invctr_file, 'r') as fid:
+ lines = fid.readlines()
+
+ # find section that contains the iteration data
+ for i, line in enumerate(lines):
+ if line.strip().startswith('ID it.'):
+ break
+
+ # TODO: check for robust iteration
+ # we have three types of lines:
+ # 1. first iteration line
+ # 2. other main iteration lines
+ # 3. update lines
+
+ # prepare regular expressions for these three types, each in two
+ # flavors: robust and non-robust
+
+ # this identifies a float number, or a NaN value
+ reg_float = ''.join((
+ '((?:[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)',
+ '|',
+ '(?:NaN))'
+ ))
+
+ reg_it1_norob = ''.join((
+ ' ([a-zA-Z]{1,3})',
+ ' *(\d{1,3})',
+ ' *' + reg_float,
+ ' *' + reg_float,
+ ' *' + reg_float,
+ ' *([0-9]{1,4})',
+
+ ))
+
+ for line in lines[i:]:
+ linec = line.strip()
+ if linec.startswith('IT') or linec.startswidth('PIT'):
+ # main iterations
+ re.compile(reg_it1_norob).search(linec).groups()
+ elif linec.startswith('UP'):
+ # update iterations
+ pass
+ import IPython
+ IPython.embed()
+
def _read_resm_m(self, tomodir):
"""Read in the resolution matrix of an inversion
|
start implementing proper parsing of the inv.ctr file using regular
expressions
|
geophysics-ubonn_crtomo_tools
|
train
|
fe7c33e259f82a015db69272e4eaa53c3f394230
|
diff --git a/src/webignition/ReadableDuration/ReadableDuration.php b/src/webignition/ReadableDuration/ReadableDuration.php
index <HASH>..<HASH> 100644
--- a/src/webignition/ReadableDuration/ReadableDuration.php
+++ b/src/webignition/ReadableDuration/ReadableDuration.php
@@ -28,6 +28,15 @@ class ReadableDuration {
const INTERVAL_SECOND_KEY = 's';
+ private $unitThresholds = array(
+ self::UNIT_MONTH => self::MONTHS_PER_YEAR,
+ self::UNIT_DAY => self::DAYS_PER_MONTH,
+ self::UNIT_HOUR => self::HOURS_PER_DAY,
+ self::UNIT_MINUTE => self::MINUTES_PER_HOUR,
+ self::UNIT_SECOND => self::SECONDS_PER_MINUTE
+ );
+
+
/**
*
* @var int
@@ -314,7 +323,7 @@ class ReadableDuration {
if ($seconds <= 29) {
return $minutes;
- }
+ }
return $minutes + 1;
}
@@ -394,12 +403,15 @@ class ReadableDuration {
$methodName = 'getSeconds';
} else {
$methodName = 'getRounded'.ucwords($this->getLargestIntervalUnit()).'s';
- }
+ }
- $values[] = array(
- 'unit' => $this->getLargestIntervalUnit(),
- 'value' => $this->$methodName()
- );
+ if ($this->$methodName() !== 0) {
+ $values[] = array(
+ 'unit' => $this->getLargestIntervalUnit(),
+ 'value' => $this->$methodName()
+ );
+ }
+
} else {
$values[] = array(
'unit' => $this->getLargestIntervalUnit(),
@@ -412,7 +424,31 @@ class ReadableDuration {
$this->interval = null;
- return $values;
+ return $this->roundUpUnitValues($values);
+ }
+
+
+ private function roundUpUnitValues($unitValues) {
+ $roundUpNextUnitValue = false;
+
+ for ($unitValueIndex = count($unitValues) - 1; $unitValueIndex >= 0; $unitValueIndex--) {
+ $unitValue = $unitValues[$unitValueIndex];
+
+ if ($roundUpNextUnitValue) {
+ $unitValue['value'] += 1;
+ $unitValues[$unitValueIndex] = $unitValue;
+ $roundUpNextUnitValue = false;
+ }
+
+ if (isset($this->unitsToIntervalUnits[$unitValue['unit']])) {
+ if ($unitValue['value'] == $this->unitThresholds[$unitValue['unit']]) {
+ $roundUpNextUnitValue = true;
+ unset($unitValues[$unitValueIndex]);
+ }
+ }
+ }
+
+ return $unitValues;
}
@@ -431,7 +467,7 @@ class ReadableDuration {
}
}
- return null;
+ return 'second';
}
diff --git a/tests/webignition/ReadableDuration/GetInMostAppropriateUnitsTestWithPrecisionTwo.php b/tests/webignition/ReadableDuration/GetInMostAppropriateUnitsTestWithPrecisionTwo.php
index <HASH>..<HASH> 100644
--- a/tests/webignition/ReadableDuration/GetInMostAppropriateUnitsTestWithPrecisionTwo.php
+++ b/tests/webignition/ReadableDuration/GetInMostAppropriateUnitsTestWithPrecisionTwo.php
@@ -87,4 +87,30 @@ class GetInMostAppropriateUnitsTestWithPrecisionTwo extends BaseTest {
)
), $readableDuration->getInMostAppropriateUnits(2));
}
+
+
+ public function test7200SecondsReturns2HoursWithPrecision2() {
+ $readableDuration = new ReadableDuration();
+ $readableDuration->setValueInSeconds(7200);
+
+ $this->assertEquals(array(
+ array(
+ 'unit' => 'hour',
+ 'value' => 2
+ )
+ ), $readableDuration->getInMostAppropriateUnits(2));
+ }
+
+
+ public function test7199SecondsReturns2HoursWithPrecision2000() {
+ $readableDuration = new ReadableDuration();
+ $readableDuration->setValueInSeconds(7199);
+
+ $this->assertEquals(array(
+ array(
+ 'unit' => 'hour',
+ 'value' => 2
+ )
+ ), $readableDuration->getInMostAppropriateUnits(2));
+ }
}
\ No newline at end of file
|
1 hour <I> minutes is 2 hours not 1 hour 6 minutes silly silly silly
|
webignition_readable-duration
|
train
|
72a55f5e24a2a8eaa6fa9f9b1d7ab20e7708d7c6
|
diff --git a/publify_amazon_sidebar/spec/models/amazon_sidebar_spec.rb b/publify_amazon_sidebar/spec/models/amazon_sidebar_spec.rb
index <HASH>..<HASH> 100644
--- a/publify_amazon_sidebar/spec/models/amazon_sidebar_spec.rb
+++ b/publify_amazon_sidebar/spec/models/amazon_sidebar_spec.rb
@@ -1,8 +1,8 @@
require 'rails_helper'
RSpec.describe AmazonSidebar do
- describe 'without constructor parameters' do
- let(:sidebar) { AmazonSidebar.new }
+ describe 'when using default values for its properties' do
+ let(:sidebar) { AmazonSidebar.new(blog: Blog.new) }
it "title should be 'Cited books'" do
expect(sidebar.title).to eq('Cited books')
@@ -27,11 +27,12 @@ RSpec.describe AmazonSidebar do
end
end
- describe 'with constructor parameters' do
+ describe 'when overriding the defaults' do
it 'gets attributes set correctly' do
sb = AmazonSidebar.new(title: 'Books',
associate_id: 'justasummary-21',
- maxlinks: 3)
+ maxlinks: 3,
+ blog: Blog.new)
expect(sb).to be_valid
expect(sb.title).to eq('Books')
expect(sb.associate_id).to eq('justasummary-21')
|
Make specs pass with publify_core <I>.pre5
|
publify_publify
|
train
|
7c215be4258baf831ea4b88aa11e0d9747fe38d2
|
diff --git a/Framework/ClientSideCompiler.php b/Framework/ClientSideCompiler.php
index <HASH>..<HASH> 100644
--- a/Framework/ClientSideCompiler.php
+++ b/Framework/ClientSideCompiler.php
@@ -48,6 +48,10 @@ private function preprocess($dom) {
}
}
+// TODO: Issue #62 and #64 - Don't preprocess the sass files to the Style
+// direcotry. Instead, output to the www directory... but two extra things must
+// be done to achieve this: the compiler must work within the www directory,
+// and the FileOrganiser must ignore .scss files.
private function sassParse($filePath) {
$sassParser = new SassParser_Utility($filePath);
$parsedString = $sassParser->parse();
|
For #<I> and #<I>, note added how to fix issues.
|
PhpGt_WebEngine
|
train
|
d122f5826c21c47c0be8e07f7e6336253fe3e41a
|
diff --git a/executor/adapter.go b/executor/adapter.go
index <HASH>..<HASH> 100644
--- a/executor/adapter.go
+++ b/executor/adapter.go
@@ -43,7 +43,7 @@ type recordSet struct {
executor Executor
stmt *ExecStmt
processinfo processinfoSetter
- err error
+ lastErr error
}
func (a *recordSet) Fields() ([]*ast.ResultField, error) {
@@ -72,6 +72,7 @@ func (a *recordSet) Fields() ([]*ast.ResultField, error) {
func (a *recordSet) Next() (*ast.Row, error) {
row, err := a.executor.Next()
if err != nil {
+ a.lastErr = err
return nil, errors.Trace(err)
}
if row == nil {
@@ -89,7 +90,7 @@ func (a *recordSet) Next() (*ast.Row, error) {
func (a *recordSet) Close() error {
err := a.executor.Close()
- a.stmt.logSlowQuery()
+ a.stmt.logSlowQuery(a.lastErr == nil)
if a.processinfo != nil {
a.processinfo.SetProcessInfo("")
}
@@ -194,15 +195,17 @@ func (a *ExecStmt) handleNoDelayExecutor(e Executor, ctx context.Context, pi pro
}
}
+ var err error
defer func() {
if pi != nil {
pi.SetProcessInfo("")
}
terror.Log(errors.Trace(e.Close()))
- a.logSlowQuery()
+ a.logSlowQuery(err == nil)
}()
for {
- row, err := e.Next()
+ var row Row
+ row, err = e.Next()
if err != nil {
return nil, errors.Trace(err)
}
@@ -270,7 +273,7 @@ func (a *ExecStmt) buildExecutor(ctx context.Context) (Executor, error) {
return e, nil
}
-func (a *ExecStmt) logSlowQuery() {
+func (a *ExecStmt) logSlowQuery(succ bool) {
cfg := config.GetGlobalConfig()
costTime := time.Since(a.startTime)
sql := a.Text
@@ -285,9 +288,9 @@ func (a *ExecStmt) logSlowQuery() {
"sql": sql,
}
if costTime < time.Duration(cfg.Log.SlowThreshold)*time.Millisecond {
- logEntry.WithField("type", "query").Debugf("query")
+ logEntry.WithField("type", "query").WithField("succ", succ).Debugf("query")
} else {
- logEntry.WithField("type", "slow-query").Warnf("slow-query")
+ logEntry.WithField("type", "slow-query").WithField("succ", succ).Warnf("slow-query")
}
}
diff --git a/tidb-server/main.go b/tidb-server/main.go
index <HASH>..<HASH> 100644
--- a/tidb-server/main.go
+++ b/tidb-server/main.go
@@ -64,6 +64,7 @@ const (
nmRunDDL = "run-ddl"
nmLogLevel = "L"
nmLogFile = "log-file"
+ nmLogSlowQuery = "log-slow-query"
nmReportStatus = "report-status"
nmStatusPort = "status"
nmMetricsAddr = "metrics-addr"
@@ -86,8 +87,9 @@ var (
ddlLease = flag.String(nmDdlLease, "10s", "schema lease duration, very dangerous to change only if you know what you do")
// Log
- logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal")
- logFile = flag.String(nmLogFile, "", "log file path")
+ logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal")
+ logFile = flag.String(nmLogFile, "", "log file path")
+ logSlowQuery = flag.String(nmLogSlowQuery, "", "slow query file path")
// Status
reportStatus = flagBoolean(nmReportStatus, true, "If enable status report HTTP service.")
@@ -284,6 +286,9 @@ func overrideConfig() {
if actualFlags[nmLogFile] {
cfg.Log.File.Filename = *logFile
}
+ if actualFlags[nmLogSlowQuery] {
+ cfg.Log.SlowQueryFile = *logSlowQuery
+ }
// Status
if actualFlags[nmReportStatus] {
|
tidb-server,executor: add command option and log success/fail for slow-query (#<I>)
|
pingcap_tidb
|
train
|
2c568a75f27e59a8bfccf933807f9676c999606f
|
diff --git a/plugins/commands/ssh/command.rb b/plugins/commands/ssh/command.rb
index <HASH>..<HASH> 100644
--- a/plugins/commands/ssh/command.rb
+++ b/plugins/commands/ssh/command.rb
@@ -55,7 +55,7 @@ module VagrantPlugins
return exit_status
else
@logger.debug("Invoking `ssh` action on machine")
- vm.action(:ssh, :ssh_opts => opts)
+ vm.action(:ssh, :ssh_opts => ssh_opts)
# We should never reach this point, since the point of `ssh`
# is to exec into the proper SSH shell, but we'll just return
|
commands/ssh: use proper variable
|
hashicorp_vagrant
|
train
|
9d5e092d46b22423bb27b6f418c307b1bd8ccfcb
|
diff --git a/src/moneyed/classes.py b/src/moneyed/classes.py
index <HASH>..<HASH> 100644
--- a/src/moneyed/classes.py
+++ b/src/moneyed/classes.py
@@ -3,6 +3,7 @@ from __future__ import division
from __future__ import unicode_literals
from decimal import Decimal, ROUND_DOWN
+import warnings
# Default, non-existent, currency
DEFAULT_CURRENCY_CODE = 'XYZ'
@@ -114,6 +115,8 @@ class Money(object):
if isinstance(other, Money):
raise TypeError('Cannot multiply two Money instances.')
else:
+ if isinstance(other, float):
+ warnings.warn("Multiplying Money instances with floats is deprecated", DeprecationWarning)
return Money(
amount=(self.amount * Decimal(str(other))),
currency=self.currency)
@@ -124,6 +127,8 @@ class Money(object):
raise TypeError('Cannot divide two different currencies.')
return self.amount / other.amount
else:
+ if isinstance(other, float):
+ warnings.warn("Dividing Money instances by floats is deprecated", DeprecationWarning)
return Money(
amount=self.amount / Decimal(str(other)),
currency=self.currency)
@@ -146,6 +151,8 @@ class Money(object):
if isinstance(other, Money):
raise TypeError('Invalid __rmod__ operation')
else:
+ if isinstance(other, float):
+ warnings.warn("Calculating percentages of Money instances using floats is deprecated", DeprecationWarning)
return Money(
amount=(Decimal(str(other)) * self.amount / 100),
currency=self.currency)
diff --git a/src/moneyed/test_moneyed_classes.py b/src/moneyed/test_moneyed_classes.py
index <HASH>..<HASH> 100644
--- a/src/moneyed/test_moneyed_classes.py
+++ b/src/moneyed/test_moneyed_classes.py
@@ -5,6 +5,8 @@ from __future__ import unicode_literals
from copy import deepcopy
from decimal import Decimal
+import warnings
+
import pytest # Works with less code, more consistency than unittest.
from moneyed.classes import Currency, Money, MoneyComparisonError, CURRENCIES, DEFAULT_CURRENCY
@@ -132,6 +134,18 @@ class TestMoney:
assert 3 * x == Money(333.99, currency=self.USD)
assert Money(333.99, currency=self.USD) == 3 * x
+ def test_mul_float_warning(self):
+ # This should be changed to TypeError exception after deprecation period is over.
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter("always")
+ Money(amount="10") * 1.2
+ assert "Multiplying Money instances with floats is deprecated" in [w.message.args[0] for w in warning_list]
+
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter("always")
+ 1.2 * Money(amount="10")
+ assert "Multiplying Money instances with floats is deprecated" in [w.message.args[0] for w in warning_list]
+
def test_mul_bad(self):
with pytest.raises(TypeError):
self.one_million_bucks * self.one_million_bucks
@@ -152,6 +166,13 @@ class TestMoney:
y = 2
assert x / y == Money(amount=25, currency=self.USD)
+ def test_div_float_warning(self):
+ # This should be changed to TypeError exception after deprecation period is over.
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter("always")
+ Money(amount="10") / 1.2
+ assert "Dividing Money instances by floats is deprecated" in [w.message.args[0] for w in warning_list]
+
def test_rmod(self):
assert 1 % self.one_million_bucks == Money(amount=10000,
currency=self.USD)
@@ -161,6 +182,13 @@ class TestMoney:
assert (self.one_million_bucks % self.one_million_bucks
== 1)
+ def test_rmod_float_warning(self):
+ # This should be changed to TypeError exception after deprecation period is over.
+ with warnings.catch_warnings(record=True) as warning_list:
+ warnings.simplefilter("always")
+ 2.0 % Money(amount="10")
+ assert "Calculating percentages of Money instances using floats is deprecated" in [w.message.args[0] for w in warning_list]
+
def test_convert_to_default(self):
# Currency conversions are not implemented as of 2/2011; when
# they are working, then convert_to_default and convert_to
|
Deprecated automatic conversion from floats for * / % operations
|
limist_py-moneyed
|
train
|
ba8f336b56db23c283ea5acd0d9fa42dd4a416b5
|
diff --git a/tensorflow_probability/python/experimental/distributions/joint_distribution_pinned.py b/tensorflow_probability/python/experimental/distributions/joint_distribution_pinned.py
index <HASH>..<HASH> 100644
--- a/tensorflow_probability/python/experimental/distributions/joint_distribution_pinned.py
+++ b/tensorflow_probability/python/experimental/distributions/joint_distribution_pinned.py
@@ -270,7 +270,7 @@ class JointDistributionPinned(object):
```
"""
- def __init__(self, distribution, *pins, **named_pins):
+ def __init__(self, distribution, *pins, name=None, **named_pins):
"""Constructs a `JointDistributionPinned`.
### Examples:
@@ -322,6 +322,9 @@ class JointDistributionPinned(object):
use an unordered sequence of pins with an unordered model, e.g. a
`tfp.distributions.JointDistributionNamed` constructed with a `dict`
model (`collections.OrderedDict` is allowed).
+ name: Python `str` name for this distribution. If `None`, defaults to
+ 'Pinned{distribution.name}'.
+ Default value: `None`.
**named_pins: Named elements to pin. The names given must align with the
part names defined by `distribution._flat_resolve_names()`, i.e. either
the explicitly named parts of `tfp.distributions.JointDistributionNamed`
@@ -330,8 +333,12 @@ class JointDistributionPinned(object):
"""
if bool(pins) == bool(named_pins):
raise ValueError('Exactly one of *pins or **named_pins should be set.')
+
+ if name is None:
+ name = 'Pinned{}'.format(distribution.name)
+
self._distribution = distribution
- self._name = named_pins.pop('name', 'Pinned{}'.format(distribution.name))
+ self._name = name
self._pins = _to_pins(distribution, *pins, **named_pins)
@property
@@ -340,6 +347,11 @@ class JointDistributionPinned(object):
return self._distribution
@property
+ def name(self):
+ """Name of this pinned distribution."""
+ return self._name
+
+ @property
def pins(self):
"""Dictionary of pins resolved to names."""
return self._pins
|
Expose `name` property of JointDistributionPinned.
This brings JDP slightly closer to 'quacking like' a Distribution.
PiperOrigin-RevId: <I>
|
tensorflow_probability
|
train
|
1336ff068f71092ce6c4b0b687a3eb86a686c346
|
diff --git a/lib/packets/column_definition.js b/lib/packets/column_definition.js
index <HASH>..<HASH> 100644
--- a/lib/packets/column_definition.js
+++ b/lib/packets/column_definition.js
@@ -45,8 +45,10 @@ class ColumnDefinition {
this.characterSet = packet.readInt16();
this.encoding = CharsetToEncoding[this.characterSet];
this.name = StringParser.decode(
- this._buf.slice(_nameStart, _nameStart + _nameLength),
- this.encoding === 'binary' ? this._clientEncoding : this.encoding
+ this._buf,
+ this.encoding === 'binary' ? this._clientEncoding : this.encoding,
+ _nameStart,
+ _nameStart + _nameLength
);
this.columnLength = packet.readInt32();
this.columnType = packet.readInt8();
@@ -113,8 +115,10 @@ const addString = function(name) {
const start = this[`_${name}Start`];
const end = start + this[`_${name}Length`];
const val = StringParser.decode(
- this._buf.slice(start, end),
- this.encoding === 'binary' ? this._clientEncoding : this.encoding
+ this._buf,
+ this.encoding === 'binary' ? this._clientEncoding : this.encoding,
+ start,
+ end
);
Object.defineProperty(this, name, {
diff --git a/lib/packets/packet.js b/lib/packets/packet.js
index <HASH>..<HASH> 100644
--- a/lib/packets/packet.js
+++ b/lib/packets/packet.js
@@ -387,8 +387,10 @@ class Packet {
// TODO: Use characterSetCode to get proper encoding
// https://github.com/sidorares/node-mysql2/pull/374
return StringParser.decode(
- this.buffer.slice(this.offset - len, this.offset),
- encoding
+ this.buffer,
+ encoding,
+ this.offset - len,
+ this.offset
);
}
@@ -407,7 +409,7 @@ class Packet {
end = end + 1; // TODO: handle OOB check
}
this.offset = end + 1;
- return StringParser.decode(this.buffer.slice(start, end), encoding);
+ return StringParser.decode(this.buffer, encoding, start, end);
}
// TODO reuse?
@@ -421,8 +423,10 @@ class Packet {
}
this.offset += len;
return StringParser.decode(
- this.buffer.slice(this.offset - len, this.offset),
- encoding
+ this.buffer,
+ encoding,
+ this.offset - len,
+ this.offset
);
}
diff --git a/lib/parsers/string.js b/lib/parsers/string.js
index <HASH>..<HASH> 100644
--- a/lib/parsers/string.js
+++ b/lib/parsers/string.js
@@ -2,14 +2,14 @@
const Iconv = require('iconv-lite');
-exports.decode = function(buffer, encoding, options) {
+exports.decode = function(buffer, encoding, start, end, options) {
if (Buffer.isEncoding(encoding)) {
- return buffer.toString(encoding);
+ return buffer.toString(encoding, start, end);
}
const decoder = Iconv.getDecoder(encoding, options || {});
- const res = decoder.write(buffer);
+ const res = decoder.write(buffer.slice(start, end));
const trail = decoder.end();
return trail ? res + trail : res;
|
Optimize string decoding by removing the use of slice()
Previously, when we want to decode a string from a range in a Buffer, we
chose to slice it first and then decode it. However, Buffer.toString()
already does it internally already. In addition to that, toString()
seems to be more efficient when working with native buffer rather than
the wrapper FastBuffer created by slice()
From a simple test with large buffer and short string, it seems to yield
about <I>-<I>% performance improvement
|
sidorares_node-mysql2
|
train
|
5379ebeb429e7ea52e4c2e560f1d701dc65b9d92
|
diff --git a/tests/test_luhn.py b/tests/test_luhn.py
index <HASH>..<HASH> 100644
--- a/tests/test_luhn.py
+++ b/tests/test_luhn.py
@@ -4,7 +4,7 @@ from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from case import unittest, build_document, build_sentence
-from sumy.algorithms._luhn import LuhnMethod
+from sumy.algorithms import LuhnMethod
from sumy._py3k import to_unicode
|
Import 'LuhnMethod' from public package
|
miso-belica_sumy
|
train
|
833f93ae687bdb0a0dfa618abb2680b22e773bbb
|
diff --git a/master/buildbot/test/unit/test_worker_local.py b/master/buildbot/test/unit/test_worker_local.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/test/unit/test_worker_local.py
+++ b/master/buildbot/test/unit/test_worker_local.py
@@ -52,7 +52,7 @@ class TestLocalWorker(unittest.TestCase):
old.updateWorker = mock.Mock(side_effect=lambda: defer.succeed(None))
yield old.startService()
- self.assertEqual(old.remote_worker.bot.basedir, os.path.abspath('basedir/slaves/bot'))
+ self.assertEqual(old.remote_worker.bot.basedir, os.path.abspath('basedir/workers/bot'))
yield old.reconfigServiceWithSibling(new)
diff --git a/master/buildbot/worker/local.py b/master/buildbot/worker/local.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/worker/local.py
+++ b/master/buildbot/worker/local.py
@@ -38,10 +38,7 @@ class LocalWorker(Worker):
Worker.reconfigService(self, name, None, **kwargs)
if workdir is None:
workdir = name
- # TODO: How to move working directory to the new place?
- # Perhaps check if old directory exists, then use it.
- # If old directory is not exists, then create new directory.
- workdir = os.path.abspath(os.path.join(self.master.basedir, "slaves", workdir))
+ workdir = os.path.abspath(os.path.join(self.master.basedir, "workers", workdir))
if not os.path.isdir(workdir):
os.makedirs(workdir)
diff --git a/master/docs/manual/worker-transition.rst b/master/docs/manual/worker-transition.rst
index <HASH>..<HASH> 100644
--- a/master/docs/manual/worker-transition.rst
+++ b/master/docs/manual/worker-transition.rst
@@ -319,3 +319,9 @@ In addition to ``buildbot.steps.slave`` module being renamed to
:py:mod:`buildbot.steps.worker`, default ``source`` value for
:py:class:`~buildbot.steps.worker.SetPropertiesFromEnv` was changed from
``"SlaveEnvironment"`` to ``"WorkerEnvironment"``.
+
+Local worker changes
+--------------------
+
+Working directory for local workers were changed from
+``master-basedir/slaves/name`` to ``master-basedir/workers/name``.
|
change working directory for local worker from `master-basedir/slaves/name` to `master-basedir/workers/name`
|
buildbot_buildbot
|
train
|
accce12fea9d24aa09ee4be54106870a58191587
|
diff --git a/src/main/java/de/thetaphi/forbiddenapis/ClassScanner.java b/src/main/java/de/thetaphi/forbiddenapis/ClassScanner.java
index <HASH>..<HASH> 100644
--- a/src/main/java/de/thetaphi/forbiddenapis/ClassScanner.java
+++ b/src/main/java/de/thetaphi/forbiddenapis/ClassScanner.java
@@ -196,6 +196,9 @@ public final class ClassScanner extends ClassVisitor implements Constants {
private final AncestorVisitor classRelationAncestorVisitor = new AncestorVisitor() {
@Override
public String visit(ClassSignature c, String origName, boolean isInterfaceOfAncestor, boolean previousInRuntime) {
+ if (previousInRuntime && c.isNonPortableRuntime) {
+ return null; // something inside the JVM is extending internal class/interface
+ }
return checkClassUse(c.className, isInterfaceOfAncestor ? "interface" : "class", origName);
}
};
@@ -407,6 +410,9 @@ public final class ClassScanner extends ClassVisitor implements Constants {
if (!c.methods.contains(lookupMethod)) {
return null;
}
+ if (previousInRuntime && c.isNonPortableRuntime) {
+ return null; // something inside the JVM is extending internal class/interface
+ }
String violation = forbiddenSignatures.checkMethod(c.className, lookupMethod);
if (violation != null) {
return violation;
@@ -443,6 +449,10 @@ public final class ClassScanner extends ClassVisitor implements Constants {
if (!c.fields.contains(field)) {
return null;
}
+ // we found the field: from now on we use STOP to exit, because fields are not virtual!
+ if (previousInRuntime && c.isNonPortableRuntime) {
+ return STOP; // something inside the JVM is extending internal class/interface
+ }
String violation = forbiddenSignatures.checkField(c.className, field);
if (violation != null) {
return violation;
|
Fix JDK-<I> bug with RandomSupport extending internal/nonportable classes
|
policeman-tools_forbidden-apis
|
train
|
6b444388924878cfad16812766a12d850994763f
|
diff --git a/hazelcast-client/src/main/java/com/hazelcast/client/spi/impl/listener/ClientListenerServiceImpl.java b/hazelcast-client/src/main/java/com/hazelcast/client/spi/impl/listener/ClientListenerServiceImpl.java
index <HASH>..<HASH> 100644
--- a/hazelcast-client/src/main/java/com/hazelcast/client/spi/impl/listener/ClientListenerServiceImpl.java
+++ b/hazelcast-client/src/main/java/com/hazelcast/client/spi/impl/listener/ClientListenerServiceImpl.java
@@ -111,6 +111,7 @@ public abstract class ClientListenerServiceImpl implements ClientListenerService
}
public void shutdown() {
+ eventExecutor.shutdown();
ClientExecutionServiceImpl.shutdownExecutor("registrationExecutor", registrationExecutor, logger);
}
|
Put eventExecutor.shutdown back to client
It seems it is removed when it should not.
To be able to find affected versiopns easily later, here are the commits deletes this
It is deleted in <I> here
<URL>
|
hazelcast_hazelcast
|
train
|
abedb3f642f4d6ac48833ac1442fff4c8108b685
|
diff --git a/ledcontroller/__init__.py b/ledcontroller/__init__.py
index <HASH>..<HASH> 100644
--- a/ledcontroller/__init__.py
+++ b/ledcontroller/__init__.py
@@ -278,6 +278,20 @@ class LedController(object):
have any effect. """
self._send_to_group(group, command="warmer")
+ @classmethod
+ def get_brightness_level(cls, percent):
+ """ Gets internal brightness level.
+
+ percent should be integer from 0 to 100.
+ Return value is 2 (minimum) - 27 (maximum)
+ """
+ # Clamp to appropriate range.
+ percent = min(100, max(0, percent))
+
+ # Map 0-100 to 2-27
+ value = int(2 + ((float(percent) / 100) * 25))
+ return percent, value
+
def set_brightness(self, percent, group=None):
""" Sets brightness.
@@ -294,11 +308,7 @@ class LedController(object):
percent = int(percent)
else:
percent = int(percent * 100)
- # Clamp to appropriate range.
- percent = min(100, max(0, percent))
-
- # Map 0-100 to 2-27
- value = int(2 + ((float(percent) / 100) * 25))
+ percent, value = self.get_brightness_level(percent)
self.on(group)
self._send_command((b"\x4e", struct.pack("B", value)))
return percent
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
setup(
name='ledcontroller',
- version='1.0.9',
+ version='1.1.0',
description='Controller library for limitlessled/easybulb/milight Wi-Fi LEDs',
long_description=long_description,
url='https://github.com/ojarva/python-ledcontroller',
|
Add get_brightness_level method to get internal brightness
Milights only have <I> brightness levels. get_brightness_level converts
percent to milight level.
Method returns (percent, value) tuple, where percent is processed
if needed (clamped to 0-<I>).
|
ojarva_python-ledcontroller
|
train
|
0f7c76b52b2f111127ff423607cf9d1317f256c0
|
diff --git a/movement.go b/movement.go
index <HASH>..<HASH> 100644
--- a/movement.go
+++ b/movement.go
@@ -54,6 +54,10 @@ func (c WorldCoord) DirectionTo(other WorldCoord) Direction {
panic("unable to calculate Direction")
}
+func (p PartialWorldCoord) String() string {
+ return fmt.Sprintf("PL{%v %v}", p.WorldCoord, p.Percentage)
+}
+
type (
StandAction struct {
WorldCoord
@@ -73,10 +77,6 @@ type (
}
)
-func (p PartialWorldCoord) String() string {
- return fmt.Sprintf("PL{%v %v}", p.WorldCoord, p.Percentage)
-}
-
func (pa PathAction) String() string {
return fmt.Sprintf("PA{s:%v d:%v e:%v f:%v t:%v}", pa.start, pa.duration, pa.end, pa.Orig, pa.Dest)
}
|
Moved this method closer to type def
|
ghthor_filu
|
train
|
4f734ddcb01f23b9b35b33299c06b523e86ab967
|
diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py
index <HASH>..<HASH> 100644
--- a/tornado/test/web_test.py
+++ b/tornado/test/web_test.py
@@ -1331,6 +1331,7 @@ class ErrorHandlerXSRFTest(WebTestCase):
self.assertEqual(response.code, 404)
+@wsgi_safe
class GzipTestCase(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
diff --git a/tornado/wsgi.py b/tornado/wsgi.py
index <HASH>..<HASH> 100644
--- a/tornado/wsgi.py
+++ b/tornado/wsgi.py
@@ -80,13 +80,8 @@ class WSGIApplication(web.Application):
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
- def __init__(self, handlers=None, default_host="", **settings):
- web.Application.__init__(self, handlers, default_host, transforms=[],
- **settings)
- self._adapter = WSGIAdapter(self)
-
def __call__(self, environ, start_response):
- return self._adapter.__call__(environ, start_response)
+ return WSGIAdapter(self)(environ, start_response)
class _WSGIConnection(object):
|
Allow gzip to be used with WSGIApplication.
Now that transfer encodings have been moved to http1connection,
the content-level output transforms can be used with wsgi.
|
tornadoweb_tornado
|
train
|
511cac1d1618628dab9695e9c8f856275baef7e0
|
diff --git a/lib/acl9/model_extensions/for_object.rb b/lib/acl9/model_extensions/for_object.rb
index <HASH>..<HASH> 100644
--- a/lib/acl9/model_extensions/for_object.rb
+++ b/lib/acl9/model_extensions/for_object.rb
@@ -10,7 +10,9 @@ module Acl9
# @param [Subject] subject Subject to add role for
# @see Acl9::ModelExtensions::Subject#has_role?
def accepts_role?(role_name, subject)
- subject.has_role? role_name, self
+ if not subject.nil?
+ subject.has_role? role_name, self
+ end
end
##
@@ -20,7 +22,9 @@ module Acl9
# @param [Subject] subject Subject to add role for
# @see Acl9::ModelExtensions::Subject#has_role!
def accepts_role!(role_name, subject)
- subject.has_role! role_name, self
+ if not subject.nil?
+ subject.has_role! role_name, self
+ end
end
##
@@ -30,7 +34,9 @@ module Acl9
# @param [Subject] subject Subject to remove role from
# @see Acl9::ModelExtensions::Subject#has_no_role!
def accepts_no_role!(role_name, subject)
- subject.has_no_role! role_name, self
+ if not subject.nil?
+ subject.has_no_role! role_name, self
+ end
end
##
@@ -40,7 +46,9 @@ module Acl9
# @return [Boolean] Returns true if +subject+ has any roles on this object.
# @see Acl9::ModelExtensions::Subject#has_roles_for?
def accepts_roles_by?(subject)
- subject.has_roles_for? self
+ if not subject.nil?
+ subject.has_roles_for? self
+ end
end
alias :accepts_role_by? :accepts_roles_by?
@@ -52,7 +60,9 @@ module Acl9
# @param [Subject] subject Subject to query roles
# @see Acl9::ModelExtensions::Subject#roles_for
def accepted_roles_by(subject)
- subject.roles_for self
+ if not subject.nil?
+ subject.roles_for self
+ end
end
end
end
|
Added some checks for objects when subject can be null.
|
be9_acl9
|
train
|
6b33bb9aa9363b8d350e816a5e7c2b81adb081e6
|
diff --git a/h2o-core/src/main/java/water/fvec/Frame.java b/h2o-core/src/main/java/water/fvec/Frame.java
index <HASH>..<HASH> 100644
--- a/h2o-core/src/main/java/water/fvec/Frame.java
+++ b/h2o-core/src/main/java/water/fvec/Frame.java
@@ -507,14 +507,19 @@ public class Frame extends Lockable<Frame> {
}
int ncols = _keys.length;
- _names = Arrays.copyOf(_names, ncols+N);
- _keys = Arrays.copyOf(_keys, ncols+N);
- _vecs = Arrays.copyOf(_vecs, ncols+N);
+
+ // make temp arrays and don't assign them back until they are fully filled - otherwise vecs() can cache null's and NPE.
+ String[] tmpnam = Arrays.copyOf(_names, ncols+N);
+ Key<Vec>[] tmpkeys = Arrays.copyOf(_keys, ncols+N);
+ Vec[] tmpvecs = Arrays.copyOf(_vecs, ncols+N);
for (int i=0; i<N; ++i) {
- _names[ncols+i] = tmpnames[i];
- _keys[ncols+i] = vecs[i]._key;
- _vecs[ncols+i] = vecs[i];
+ tmpnam[ncols+i] = tmpnames[i];
+ tmpkeys[ncols+i] = vecs[i]._key;
+ tmpvecs[ncols+i] = vecs[i];
}
+ _names = tmpnam;
+ _keys = tmpkeys;
+ _vecs = tmpvecs;
}
/** Append a named Vec to the Frame. Names are forced unique, by appending a
|
Fix PUBDEV-<I>: Prevent race condition in Frame's internal state.
|
h2oai_h2o-3
|
train
|
4f841c03763bdc504ea7c22195a55b983e457da6
|
diff --git a/sharding-jdbc/src/test/java/io/shardingsphere/dbtest/env/schema/SchemaEnvironmentManager.java b/sharding-jdbc/src/test/java/io/shardingsphere/dbtest/env/schema/SchemaEnvironmentManager.java
index <HASH>..<HASH> 100644
--- a/sharding-jdbc/src/test/java/io/shardingsphere/dbtest/env/schema/SchemaEnvironmentManager.java
+++ b/sharding-jdbc/src/test/java/io/shardingsphere/dbtest/env/schema/SchemaEnvironmentManager.java
@@ -77,6 +77,7 @@ public final class SchemaEnvironmentManager {
RunScript.execute(connection, stringReader);
} catch (final SQLException ex) {
// TODO schema maybe exist for oracle only
+ ex.printStackTrace();
}
}
}
@@ -98,6 +99,7 @@ public final class SchemaEnvironmentManager {
RunScript.execute(connection, stringReader);
} catch (final SQLException ex) {
// TODO schema maybe not exist for oracle only
+ ex.printStackTrace();
}
}
}
@@ -112,7 +114,7 @@ public final class SchemaEnvironmentManager {
if (DatabaseType.H2 == databaseType) {
return Collections.emptyList();
}
- String sql = DatabaseType.Oracle == databaseType ? "CREATE SCHEMA %s" : "CREATE DATABASE IF NOT EXISTS %s";
+ String sql = DatabaseType.Oracle == databaseType ? "CREATE SCHEMA %s" : "CREATE DATABASE %s";
Collection<String> result = new LinkedList<>();
for (String each : databases) {
result.add(String.format(sql, each));
@@ -154,6 +156,7 @@ public final class SchemaEnvironmentManager {
RunScript.execute(connection, stringReader);
} catch (final SQLException ex) {
// TODO schema maybe not exist for oracle only
+ ex.printStackTrace();
}
}
}
@@ -193,6 +196,7 @@ public final class SchemaEnvironmentManager {
RunScript.execute(connection, stringReader);
} catch (final SQLException ex) {
// TODO schema maybe not exist for oracle only
+ ex.printStackTrace();
}
}
}
|
Fixed the unit test initialization problem for PostgreSQL.
|
apache_incubator-shardingsphere
|
train
|
d9ae0bff72bd4d7797e99884a72f28e9613f6c63
|
diff --git a/js/getusermedia.js b/js/getusermedia.js
index <HASH>..<HASH> 100644
--- a/js/getusermedia.js
+++ b/js/getusermedia.js
@@ -4,38 +4,85 @@
getUserMedia = function (options, successCallback, errorCallback) {
- navigator.getUserMedia_ = navigator.getUserMedia || navigator.webkitGetUserMedia;
+ navigator.getUserMedia_ = (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia);
+
+ // detect if {video: true} or "video" style options
+ // by creating an iframe and blowing it up
+ // style jacked from @kangax
+ // taken here from @miketaylr: //gist.github.com/f2ac64ed7fc467ccdfe3
+ var optionStyle = (function (win) {
+
+ var el = document.createElement('iframe'),
+ root = document.body || document.documentElement,
+ string = true, object = true, nop = function(){};
+ root.appendChild(el);
+
+ var f = win.frames[win.frames.length-1];
+
+ f.navigator.getUserMedia || (f.navigator.getUserMedia = f.navigator.webkitGetUserMedia || f.navigator.mozGetuserMedia || f.navigator.msGetUserMedia);
+
+ try { // Try spec (object) syntax
+ f.navigator.getUserMedia({video: true, audio: true}, nop);
+ } catch (e) {
+ object = false;
+ try { // Try string syntax
+ f.navigator.getUserMedia("video, audio", nop);
+ } catch (e) { // Neither supported
+ string = false;
+ }
+ } finally { // Clean up
+ root.removeChild(el);
+ el = null;
+ }
+
+ return { string: string, object: object }
+
+ }(window));
if (!! navigator.getUserMedia_ ) {
- if ( !(options.audio && options.video) ) {
- alert('This mode is not yet supported: NOT_SUPPORTED_ERR');
- } else {
-
- var container, temp, video, ow, oh;
-
- container = document.getElementById(options.el);
- temp = document.createElement('video');
-
- // Fix for ratio
- ow = parseInt(container.offsetWidth);
- oh = parseInt(container.offsetHeight);
- if(options.width < ow && options.height < oh){
- options.width = ow;
- options.height = oh;
- }
+ if( !optionStyle.string && !optionStyle.object ) {
+ return undefined;
+ }
+
+ var getUserMediaOptions = undefined;
+ if( optionStyle.string ) {
+ if( options.video && options.audio )
+ getUserMediaOptions = 'video, audio';
+ else if( options.video )
+ getUserMediaOptions = 'video';
+ else if( options.audio )
+ getUserMediaOptions = 'audio';
+ } else if( optionStyle.object ) {
+ if( options.video )
+ getUserMediaOptions.video = true;
+ if( options.audio )
+ getUserMediaOptions.audio = true;
+ }
+
+ var container, temp, video, ow, oh;
+
+ container = document.getElementById(options.el);
+ temp = document.createElement('video');
+
+ // Fix for ratio
+ ow = parseInt(container.offsetWidth);
+ oh = parseInt(container.offsetHeight);
+ if(options.width < ow && options.height < oh){
+ options.width = ow;
+ options.height = oh;
+ }
- temp.width = options.width;
- temp.height = options.height;
- temp.autoplay = true;
- container.appendChild(temp);
- video = temp;
- options.videoEl = video;
- options.context = 'webrtc';
+ temp.width = options.width;
+ temp.height = options.height;
+ temp.autoplay = true;
+ container.appendChild(temp);
+ video = temp;
+ options.videoEl = video;
+ options.context = 'webrtc';
- navigator.getUserMedia_('video', successCallback, errorCallback);
+ navigator.getUserMedia_(getUserMediaOptions, successCallback, errorCallback);
- }
} else {
// fallback to flash
var source, el, cam;
|
Added getUserMedia option style detection to support implementations that use string and dictionary style implementations.
|
addyosmani_getUserMedia.js
|
train
|
3f34cd08020afae64f43c698dd0711b62008eabe
|
diff --git a/regex_field/fields.py b/regex_field/fields.py
index <HASH>..<HASH> 100644
--- a/regex_field/fields.py
+++ b/regex_field/fields.py
@@ -37,7 +37,7 @@ class RegexField(CharField):
def value_to_string(self, obj):
if obj is None:
- return 'None'
+ return None
else:
return obj.pattern
diff --git a/test_project/tests/regex_field_tests.py b/test_project/tests/regex_field_tests.py
index <HASH>..<HASH> 100644
--- a/test_project/tests/regex_field_tests.py
+++ b/test_project/tests/regex_field_tests.py
@@ -48,6 +48,8 @@ class RegexFieldTest(TestCase):
"""
test_obj = NullTrueModel.objects.create(regex=None)
self.assertEquals(test_obj.regex, None)
+ test_obj = NullTrueModel.objects.get(id=test_obj.id)
+ self.assertEquals(test_obj.regex, None)
def test_save_str(self):
"""
|
fixed a bug in obtaining a null value
|
ambitioninc_django-regex-field
|
train
|
da49cd1a15bcd746e7254baae4ffe614107e8088
|
diff --git a/bot.go b/bot.go
index <HASH>..<HASH> 100644
--- a/bot.go
+++ b/bot.go
@@ -137,16 +137,12 @@ func (b *IrcBot) Disconnect() {
b.Exit <- true
}
-func (b *IrcBot) IsJoined() bool {
- return b.joined
-}
-
//Say makes the bot say text to channel
func (b *IrcBot) Say(channel string, text string) {
msg := NewIrcMsg()
msg.Command = "PRIVMSG"
- msg.Channel = channel
- msg.Args = append(msg.Args, ":"+text)
+ msg.CmdParams = []string{channel}
+ msg.Trailing = []string{":", text}
b.ChOut <- msg
}
@@ -213,11 +209,32 @@ func (b *IrcBot) listen() {
if err != nil {
b.ChError <- err
}
- //convert line into IrcMsg
- msg := ParseLine(line)
- b.ChIn <- msg
- if err := logMsg(msg, b.db); err != nil {
- b.ChError <- err
+ // fmt.Println("DEBUG:", line)
+
+ //remove prefix from raw message
+ //usefull to select how to handle message
+ withoutPrefix := strings.SplitAfterN(line, " ", 2)[1]
+
+ // end of MODT
+ if strings.Contains(withoutPrefix, "376") {
+ b.join()
+ }
+
+ if strings.Contains(withoutPrefix, "PING") {
+ out := strings.Replace(withoutPrefix, "PING", "PONG", -1)
+ b.writer.PrintfLine(out)
+ // fmt.Println("DEBUG:", out)
+ }
+
+ if strings.Contains(line, "PRIVMSG") || strings.Contains(line, "JOIN") {
+ //convert line into IrcMsg
+ msg := ParseLine(line)
+ b.ChIn <- msg
+
+ if err := logMsg(msg, b.db); err != nil {
+ b.ChError <- err
+ }
+
}
}
@@ -239,25 +256,21 @@ func (b *IrcBot) handleActionIn() {
for {
//receive new message
msg := <-b.ChIn
- // fmt.Println("irc << ", msg.Raw)
+ // fmt.Println("DEBUG :", msg)
- if msg.Command == "JOIN" && msg.Nick == b.Nick {
- b.joined = true
- }
-
- if msg.Command == "PRIVMSG" && strings.HasPrefix(msg.Args[0], ":.") {
- cmd := strings.TrimPrefix(msg.Args[0], ":")
- action, ok := b.handlersUser[cmd]
+ //action fired by user
+ if msg.Command == "PRIVMSG" && strings.HasPrefix(msg.Trailing[0], ".") {
+ action, ok := b.HandlersUser[msg.Trailing[0]]
if ok {
action.Do(b, msg)
}
- } else {
- actions, ok := b.handlersIntern[msg.Command]
- //handle action
- if ok && len(actions) > 0 {
- for _, action := range actions {
- action.Do(b, msg)
- }
+ }
+
+ //action fired by event
+ actions, ok := b.handlersIntern[msg.Command]
+ if ok && len(actions) > 0 {
+ for _, action := range actions {
+ action.Do(b, msg)
}
}
}
@@ -269,13 +282,8 @@ func (b *IrcBot) handleActionOut() {
for {
msg := <-b.ChOut
- //we send nothing before we sure we join channel
- if b.joined == false {
- continue
- }
-
- s := fmt.Sprintf("%s %s %s", msg.Command, msg.Channel, strings.Join(msg.Args, " "))
- // fmt.Println("irc >> ", s)
+ s := fmt.Sprintf("%s %s %s", msg.Command, strings.Join(msg.CmdParams, " "), strings.Join(msg.Trailing, " "))
+ fmt.Println("irc >> ", s)
b.writer.PrintfLine(s)
}
}()
diff --git a/db.go b/db.go
index <HASH>..<HASH> 100644
--- a/db.go
+++ b/db.go
@@ -71,8 +71,8 @@ func (d *DB) exec(sql string, args ...interface{}) error {
func logMsg(m *IrcMsg, db *DB) error {
sql := "INSERT INTO logs (nick,message,channel,timestamp) VALUES ($nick,$message,$channel,$timestamp)"
- msg := strings.Join(m.Args, " ")
- if err := db.exec(sql, m.Nick, msg, m.Channel, time.Now()); err != nil {
+ msg := strings.Join(m.Trailing, " ")
+ if err := db.exec(sql, m.Nick(), msg, m.Channel(), time.Now()); err != nil {
db.log.Printf("error inserting logs : %s", err.Error())
return err
}
|
refactor bot core after refactor of ircMsg
|
zaibon_ircbot
|
train
|
a3a9f5fff0a46f1f7d5baf130961651cc63e0f84
|
diff --git a/src/test/java/com/mistraltech/smog/examples/extended/matcher/AddresseeMatcher.java b/src/test/java/com/mistraltech/smog/examples/extended/matcher/AddresseeMatcher.java
index <HASH>..<HASH> 100644
--- a/src/test/java/com/mistraltech/smog/examples/extended/matcher/AddresseeMatcher.java
+++ b/src/test/java/com/mistraltech/smog/examples/extended/matcher/AddresseeMatcher.java
@@ -10,7 +10,7 @@ import org.hamcrest.Matcher;
import static org.hamcrest.CoreMatchers.equalTo;
-public class AddresseeMatcher<R extends AddresseeMatcher, T extends Addressee> extends CompositePropertyMatcher<T> {
+public class AddresseeMatcher<R extends AddresseeMatcher<R, T>, T extends Addressee> extends CompositePropertyMatcher<T> {
private static final String MATCHED_OBJECT_DESCRIPTION = "an Addressee";
private PropertyMatcher<String> nameMatcher = new PropertyMatcher<String>("name", this);
diff --git a/src/test/java/com/mistraltech/smog/examples/extended/matcher/PersonMatcher.java b/src/test/java/com/mistraltech/smog/examples/extended/matcher/PersonMatcher.java
index <HASH>..<HASH> 100644
--- a/src/test/java/com/mistraltech/smog/examples/extended/matcher/PersonMatcher.java
+++ b/src/test/java/com/mistraltech/smog/examples/extended/matcher/PersonMatcher.java
@@ -13,7 +13,7 @@ import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.collection.IsEmptyCollection.empty;
import static org.hamcrest.collection.IsIterableContainingInOrder.contains;
-public class PersonMatcher<R extends PersonMatcher, T extends Person> extends AddresseeMatcher<R, T> {
+public class PersonMatcher<R extends PersonMatcher<R, T>, T extends Person> extends AddresseeMatcher<R, T> {
private static final String MATCHED_OBJECT_DESCRIPTION = "a Person";
private PropertyMatcher<Integer> ageMatcher = new PropertyMatcher<Integer>("age", this);
|
Tweeks to example matcher code.
|
mistraltechnologies_smog
|
train
|
f10e0c400b53fa36ad3ed18b811db7538853e702
|
diff --git a/java/com/couchbase/cblite/testapp/tests/Replicator.java b/java/com/couchbase/cblite/testapp/tests/Replicator.java
index <HASH>..<HASH> 100644
--- a/java/com/couchbase/cblite/testapp/tests/Replicator.java
+++ b/java/com/couchbase/cblite/testapp/tests/Replicator.java
@@ -194,6 +194,8 @@ public class Replicator extends CBLiteTestCase {
documentProperties.put("_deleted", true);
@SuppressWarnings("unused")
+ CBLRevision rev2 = database.putRevision(new CBLRevision(documentProperties, database), rev1.getRevId(), false, status);
+ Assert.assertTrue(status.getCode() >= 200 && status.getCode() < 300);
final CBLReplicator repl = database.getReplicator(remote, true, false, server.getWorkExecutor());
((CBLPusher)repl).setCreateTarget(true);
|
testPusherDeletedDoc was broken, and in fact does _not_ reproduce issue #<I>
|
couchbase_couchbase-lite-android
|
train
|
ada3da2894bf7fbfde22c1726a3ae3c020e338d8
|
diff --git a/lib/fluent/plugin/out_elasticsearch.rb b/lib/fluent/plugin/out_elasticsearch.rb
index <HASH>..<HASH> 100644
--- a/lib/fluent/plugin/out_elasticsearch.rb
+++ b/lib/fluent/plugin/out_elasticsearch.rb
@@ -482,6 +482,7 @@ EOC
if retries < 2
retries += 1
@_es = nil
+ @_es_info = nil
log.warn "Could not push logs to Elasticsearch, resetting connection and trying again. #{e.message}"
sleep 2**retries
retry
@@ -489,6 +490,7 @@ EOC
raise ConnectionFailure, "Could not push logs to Elasticsearch after #{retries} retries. #{e.message}"
rescue Exception
@_es = nil if @reconnect_on_error
+ @_es_info = nil if @reconnect_on_error
raise
end
end
|
Reset `@_es_info` variable when error occurred
|
uken_fluent-plugin-elasticsearch
|
train
|
1bba45998d457c72edfb96cb384daa0915d4d8b5
|
diff --git a/tests/test_widgets.py b/tests/test_widgets.py
index <HASH>..<HASH> 100644
--- a/tests/test_widgets.py
+++ b/tests/test_widgets.py
@@ -2,10 +2,12 @@ from __future__ import absolute_import, unicode_literals
import json
import re
+import unittest
from bs4 import BeautifulSoup
from django.test import SimpleTestCase
from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES
+from wagtail import VERSION as WAGTAIL_VERSION
from wagtaildraftail.widgets import DraftailTextArea
@@ -68,6 +70,7 @@ class DraftailTextAreaWidgetTestCase(SimpleTestCase):
options = self.extract_options_from_script(soup.script.text)
self.assertDictEqual(options, CUSTOM_OPTIONS)
+ @unittest.skipIf(WAGTAIL_VERSION < (1, 12), "Rich text features are only supported on Wagtail 1.12 and above")
def test_rendering_with_default_features(self):
"""
When no options or features are specified for the widget,
@@ -98,6 +101,7 @@ class DraftailTextAreaWidgetTestCase(SimpleTestCase):
self.assertTrue([entity for entity in options['entityTypes'] if entity['type'] == ENTITY_TYPES.LINK])
self.assertTrue([entity for entity in options['entityTypes'] if entity['type'] == ENTITY_TYPES.IMAGE])
+ @unittest.skipIf(WAGTAIL_VERSION < (1, 12), "Rich text features are only supported on Wagtail 1.12 and above")
def test_rendering_with_explicit_features(self):
"""
A features list passed to the widget should generate an options dict for those
diff --git a/wagtaildraftail/widgets.py b/wagtaildraftail/widgets.py
index <HASH>..<HASH> 100644
--- a/wagtaildraftail/widgets.py
+++ b/wagtaildraftail/widgets.py
@@ -9,11 +9,18 @@ from django.utils.inspect import func_supports_parameter
from draftjs_exporter.constants import ENTITY_TYPES
from wagtail.utils.widgets import WidgetWithScript
-from wagtail.wagtailadmin.rich_text import features as feature_registry
from wagtail.wagtailimages.formats import get_image_formats
from .draft_text import DraftText
+try:
+ # Wagtail >= 1.12
+ from wagtail.wagtailadmin.rich_text import features as feature_registry
+ RICH_TEXT_FEATURES_AVAILABLE = True
+except ImportError:
+ # Wagtail < 1.12
+ RICH_TEXT_FEATURES_AVAILABLE = False
+
def get_all_image_formats():
return [{'label': str(f.label), 'value': f.name} for f in get_image_formats()]
@@ -29,13 +36,16 @@ class DraftailTextArea(WidgetWithScript, forms.HiddenInput):
def __init__(self, attrs=None, options=None, features=None):
# Find or construct an 'options' dict for this editor to use, according to
# this order of precedence:
- # 1) If we receive an explicit 'features' list, build options from that
+ # 1) If we receive an explicit 'features' list (and we're on a Wagtail version
+ # that supports it), build options from that
# 2) If we receive an 'options' dict that looks like it's configuring things
# longhand (i.e. contains any of 'entityTypes' / 'blockTypes' / 'inlineStyles'),
# use that
- # 3) Otherwise, build options from the default feature set
+ # 3) If we're on a Wagtail version that supports rich text features,
+ # build options from the default feature set
+ # 4) Otherwise, use whatever 'options' dict we have
- if features is not None:
+ if RICH_TEXT_FEATURES_AVAILABLE and features is not None:
self.options = self._build_options_from_features(features)
elif (
options is not None and (
@@ -43,8 +53,10 @@ class DraftailTextArea(WidgetWithScript, forms.HiddenInput):
)
):
self.options = options
- else:
+ elif RICH_TEXT_FEATURES_AVAILABLE:
self.options = self._build_options_from_features(feature_registry.get_default_features())
+ else:
+ self.options = options or {}
# Whichever way we obtain the options dict, expand any references to imageFormats = '__all__'
self.options = self.intercept_image_formats(self.options)
|
Add fallback support for Wagtail <<I>
|
springload_wagtaildraftail
|
train
|
4fe9f278ea573074b282680dea09f8e2c1c3839e
|
diff --git a/lib/tumugi/version.rb b/lib/tumugi/version.rb
index <HASH>..<HASH> 100644
--- a/lib/tumugi/version.rb
+++ b/lib/tumugi/version.rb
@@ -1,3 +1,3 @@
module Tumugi
- VERSION = "0.6.2"
+ VERSION = "0.6.3"
end
|
Bumpup version to <I>
|
tumugi_tumugi
|
train
|
7799e3534528d692d527d10228a9a3f334ace53f
|
diff --git a/monitor/monitor_test.go b/monitor/monitor_test.go
index <HASH>..<HASH> 100644
--- a/monitor/monitor_test.go
+++ b/monitor/monitor_test.go
@@ -3,7 +3,6 @@
package monitor
import (
- "bytes"
"context"
"crypto/rand"
"fmt"
@@ -33,7 +32,7 @@ type mockClient struct {
func addressIsAmong(x *common.Address, addresses []common.Address) bool {
for _, a := range addresses {
- if bytes.Equal(x.Bytes(), a.Bytes()) {
+ if *x == a {
return true
}
}
@@ -42,7 +41,7 @@ func addressIsAmong(x *common.Address, addresses []common.Address) bool {
func hashIsAmong(x *common.Hash, hashes []common.Hash) bool {
for _, h := range hashes {
- if bytes.Equal(x.Bytes(), h.Bytes()) {
+ if *x == h {
return true
}
}
|
Simplify address comparison in monitor tests
|
Privatix_dappctrl
|
train
|
949f522dc61b25a07ae43d600b6b6064fc605dae
|
diff --git a/client/lib/purchases/index.js b/client/lib/purchases/index.js
index <HASH>..<HASH> 100644
--- a/client/lib/purchases/index.js
+++ b/client/lib/purchases/index.js
@@ -56,6 +56,15 @@ function hasPrivateRegistration( purchase ) {
return purchase.hasPrivateRegistration;
}
+/**
+ * Checks if a purchase can be cancelled.
+ * Returns true for purchases that aren't expired
+ * Also returns true for purchases whether or not they are after the refund period.
+ * Purchases included with a plan can't be cancelled.
+ *
+ * @param {Object} purchase
+ * @return {boolean}
+ */
function isCancelable( purchase ) {
if ( isIncludedWithPlan( purchase ) ) {
return false;
@@ -101,10 +110,27 @@ function isRedeemable( purchase ) {
return purchase.isRedeemable;
}
+/**
+ * Checks if a purchase can be canceled and refunded.
+ * Purchases usually can be refunded up to 30 days after purchase.
+ * Domains and domain mappings can be refunded up to 48 hours.
+ * Purchases included with plan can't be refunded.
+ *
+ * @param {Object} purchase
+ * @return {boolean}
+ */
function isRefundable( purchase ) {
return purchase.isRefundable;
}
+/**
+ * Checks if an expired purchase can be removed from a user account.
+ * Only domains and domain mappings can be removed.
+ * Purchases included with plan can't be removed.
+ *
+ * @param {Object} purchase
+ * @return {boolean}
+ */
function isRemovable( purchase ) {
if ( isIncludedWithPlan( purchase ) ) {
return false;
|
Purchases: Add JSDoc for cancel related methods.
|
Automattic_wp-calypso
|
train
|
d9238bc84f02166dd84cbb66fe351cbb4cede0fc
|
diff --git a/src/HttpAdapter/HttpStreamWrapperClient.php b/src/HttpAdapter/HttpStreamWrapperClient.php
index <HASH>..<HASH> 100644
--- a/src/HttpAdapter/HttpStreamWrapperClient.php
+++ b/src/HttpAdapter/HttpStreamWrapperClient.php
@@ -91,7 +91,7 @@ final class HttpStreamWrapperClient implements Client
$context = stream_context_create($options);
// Suppress errors for file_get_contents. We will analyze this ourselves.
- set_error_handler(fn() => true);
+ $errorReportingLevelBeforeFileGetContents = error_reporting(0);
$responseBody = file_get_contents(
$url,
@@ -99,10 +99,12 @@ final class HttpStreamWrapperClient implements Client
$context
);
- restore_error_handler();
+ error_reporting($errorReportingLevelBeforeFileGetContents);
if ($responseBody === false) {
- throw new NchanException('Unable to connect to ' . $url . '.');
+ throw new NchanException(
+ error_get_last()['message'] ?? 'Unable to connect to ' . $url . '.'
+ );
}
return HttpStreamWrapperResponse::fromResponse($http_response_header, $responseBody);
|
Improve exception messages
If possible, use the last error produced by file_get_contents
as an exception message
|
marein_php-nchan-client
|
train
|
4677812dc464e719474898df952b576796284a33
|
diff --git a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/BitcoindeAdapters.java b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/BitcoindeAdapters.java
index <HASH>..<HASH> 100644
--- a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/BitcoindeAdapters.java
+++ b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/BitcoindeAdapters.java
@@ -66,7 +66,7 @@ public final class BitcoindeAdapters {
*/
public static LimitOrder createOrder(CurrencyPair currencyPair, BigDecimal[] priceAndAmount, Order.OrderType orderType) {
- return new LimitOrder(orderType, priceAndAmount[1], currencyPair, "", null, priceAndAmount[0]);
+ return new LimitOrder(orderType, priceAndAmount[1], currencyPair, null, null, priceAndAmount[0]);
}
/**
diff --git a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/dto/marketdata/BitcoindeOrderBook.java b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/dto/marketdata/BitcoindeOrderBook.java
index <HASH>..<HASH> 100644
--- a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/dto/marketdata/BitcoindeOrderBook.java
+++ b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/dto/marketdata/BitcoindeOrderBook.java
@@ -2,6 +2,8 @@ package com.xeiam.xchange.bitcoinde.dto.marketdata;
import java.math.BigDecimal;
import java.util.Arrays;
+import java.util.Comparator;
+
import com.fasterxml.jackson.annotation.JsonProperty;
/**
@@ -20,8 +22,30 @@ public class BitcoindeOrderBook {
*/
public BitcoindeOrderBook(@JsonProperty("asks") BigDecimal[][] asks, @JsonProperty("bids") BigDecimal[][] bids) {
+ /* set the asks and bids */
this.asks = asks;
this.bids = bids;
+
+ /* sort the asks in ascending order by price */
+ Arrays.sort(this.asks, new Comparator<BigDecimal[]>() {
+ @Override
+ public int compare(final BigDecimal[] entry1, final BigDecimal[] entry2) {
+ final BigDecimal price1 = entry1[0]; // the first element of entry
+ final BigDecimal price2 = entry2[0]; // is the price in EUR
+
+ return price1.compareTo(price2);
+ }
+ });
+
+ /* sort the bids in descending order by price */
+ Arrays.sort(this.bids, new Comparator<BigDecimal[]>() {
+ @Override
+ public int compare(final BigDecimal[] entry1, final BigDecimal[] entry2) {
+ final BigDecimal price1 = entry1[0]; // the first elements of entry
+ final BigDecimal price2 = entry2[0]; // is price in EUR
+ return -1 * price1.compareTo(price2); // multiply by -1 to reverse the order (we want descending)
+ }
+ });
}
public BigDecimal[][] getAsks() {
diff --git a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/service/polling/BitcoindeMarketDataService.java b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/service/polling/BitcoindeMarketDataService.java
index <HASH>..<HASH> 100644
--- a/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/service/polling/BitcoindeMarketDataService.java
+++ b/xchange-bitcoinde/src/main/java/com/xeiam/xchange/bitcoinde/service/polling/BitcoindeMarketDataService.java
@@ -8,6 +8,7 @@ import com.xeiam.xchange.currency.CurrencyPair;
import com.xeiam.xchange.dto.marketdata.OrderBook;
import com.xeiam.xchange.dto.marketdata.Ticker;
import com.xeiam.xchange.dto.marketdata.Trades;
+import com.xeiam.xchange.exceptions.NotAvailableFromExchangeException;
import com.xeiam.xchange.service.polling.marketdata.PollingMarketDataService;
/**
@@ -28,7 +29,7 @@ public class BitcoindeMarketDataService extends BitcoindeMarketDataServiceRaw im
@Override
public Ticker getTicker(CurrencyPair currencyPair, Object... args) throws IOException {
- return BitcoindeAdapters.adaptTicker(getBitcoindeRate(), currencyPair);
+ throw new NotAvailableFromExchangeException();
}
@Override
|
Sorted OrderBook in raw class
|
knowm_XChange
|
train
|
5125d0ff5df407028dd35673d254056820ec594b
|
diff --git a/system/rake-support/lib/torquebox/rake/tasks/deployment.rb b/system/rake-support/lib/torquebox/rake/tasks/deployment.rb
index <HASH>..<HASH> 100644
--- a/system/rake-support/lib/torquebox/rake/tasks/deployment.rb
+++ b/system/rake-support/lib/torquebox/rake/tasks/deployment.rb
@@ -16,6 +16,7 @@
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
require 'rake'
+require 'torquebox/rake/tasks/rake_utils'
def deployment_descriptor(root, env, context_path)
d = {}
@@ -43,7 +44,7 @@ end
def deployment(app_name, root, context_path)
env = defined?(RACK_ENV) ? RACK_ENV : ENV['RACK_ENV']
if ( env.nil? )
- env = defined?(RAILS_ENV) ? RAILS_ENV : ENV['RAILS_ENV']
+ env = defined?(::Rails) ? ::Rails.env : ENV['RAILS_ENV']
end
[ "#{app_name}-knob.yml", deployment_descriptor( root, env, context_path) ]
@@ -56,7 +57,8 @@ namespace :torquebox do
app_name = File.basename( Dir.pwd )
deployment_name, deployment_descriptor = deployment( app_name, Dir.pwd, args[:context_path] )
TorqueBox::RakeUtils.deploy_yaml( deployment_name, deployment_descriptor )
- puts "Deployed #{deployment_name}"
+ puts "Deployed: #{deployment_name}"
+ puts " into: #{TorqueBox::RakeUtils.deploy_dir}"
end
desc "Undeploy the app in the current directory"
@@ -64,7 +66,7 @@ namespace :torquebox do
app_name = File.basename( Dir.pwd )
deployment_name = "#{app_name}-knob.yml"
TorqueBox::RakeUtils.undeploy( deployment_name )
- puts "Undeployed #{deployment_name}"
+ puts "Undeployed: #{deployment_name}"
end
desc "Create (if needed) and deploy as application archive"
@@ -73,7 +75,8 @@ namespace :torquebox do
archive_name = get_archive_name
src = File.join("#{Dir.pwd}", "#{archive_name}")
FileUtils.cp( src, TorqueBox::RakeUtils.deploy_dir )
- puts "Deployed #{archive_name}"
+ puts "Deployed: #{archive_name}"
+ puts " into: #{TorqueBox::RakeUtils.deploy_dir}"
end
end
namespace :undeploy do
diff --git a/system/rake-support/lib/torquebox/rake/tasks/rake_utils.rb b/system/rake-support/lib/torquebox/rake/tasks/rake_utils.rb
index <HASH>..<HASH> 100644
--- a/system/rake-support/lib/torquebox/rake/tasks/rake_utils.rb
+++ b/system/rake-support/lib/torquebox/rake/tasks/rake_utils.rb
@@ -30,6 +30,13 @@ module TorqueBox
raise "$JBOSS_HOME is not set" unless jboss_home
return jboss_home
end
+ def self.torquebox_home
+ torquebox_home = nil
+ if ( ENV['TORQUEBOX_HOME'] )
+ torquebox_home = File.expand_path(ENV['TORQUEBOX_HOME'])
+ end
+ torquebox_home
+ end
def self.jboss_conf
ENV['TORQUEBOX_CONF'] || ENV['JBOSS_CONF'] || 'default'
end
@@ -43,7 +50,12 @@ module TorqueBox
File.join("#{config_dir}","props")
end
def self.deploy_dir
- File.join("#{server_dir}","deploy")
+ d = File.join( torquebox_home, 'apps' )
+ if ( File.exists?( d ) && File.directory?( d ) )
+ return d
+ end
+
+ File.join( "#{server_dir}", "deploy" )
end
def self.deployers_dir
File.join("#{server_dir}","deployers")
|
Prefer TORQUEBOX_HOME/apps, iff it exists, else, fall back to typical deployment into deploy/
|
torquebox_torquebox
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.