code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#### Note: this error code is no longer emitted by the compiler
You implemented a trait, overriding one or more of its associated types but did
not reimplement its default methods.
Example of erroneous code:
```
#![feature(associated_type_defaults)]
pub trait Foo {
type Assoc = u8;
fn bar(&self) {}
}
impl Foo for i32 {
// error - the following trait items need to be reimplemented as
// `Assoc` was overridden: `bar`
type Assoc = i32;
}
```
To fix this, add an implementation for each default method from the trait:
```
#![feature(associated_type_defaults)]
pub trait Foo {
type Assoc = u8;
fn bar(&self) {}
}
impl Foo for i32 {
type Assoc = i32;
fn bar(&self) {} // ok!
}
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0399.md |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import update_test_suites
from dashboard.common import descriptor
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class ListTestSuitesTest(testing_common.TestCase):
def setUp(self):
super(ListTestSuitesTest, self).setUp()
app = webapp2.WSGIApplication([
('/update_test_suites', update_test_suites.UpdateTestSuitesHandler)
])
self.testapp = webtest.TestApp(app)
testing_common.SetIsInternalUser('internal@chromium.org', True)
self.UnsetCurrentUser()
stored_object.Set(descriptor.PARTIAL_TEST_SUITES_KEY, [
'TEST_PARTIAL_TEST_SUITE',
])
stored_object.Set(descriptor.GROUPABLE_TEST_SUITE_PREFIXES_KEY, [
'TEST_GROUPABLE%',
])
descriptor.Descriptor.ResetMemoizedConfigurationForTesting()
def testFetchCachedTestSuites_NotEmpty(self):
# If the cache is set, then whatever's there is returned.
key = namespaced_stored_object.NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, {'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
def _AddSampleData(self):
testing_common.AddTests(
['Chromium'], ['win7', 'mac'], {
'dromaeo': {
'dom': {},
'jslib': {},
},
'scrolling': {
'commit_time': {
'www.yahoo.com': {},
'www.cnn.com': {},
},
'commit_time_ref': {},
},
'really': {
'nested': {
'very': {
'deeply': {
'subtest': {}
}
},
'very_very': {}
}
},
})
def testPost_ForcesCacheUpdate(self):
key = namespaced_stored_object.NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, {'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
self._AddSampleData()
# Because there is something cached, the cache is
# not automatically updated when new data is added.
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
stored_object.Set(
namespaced_stored_object.NamespaceKey(
update_test_suites.TEST_SUITES_2_CACHE_KEY), ['foo'])
self.assertEqual(['foo'], update_test_suites.FetchCachedTestSuites2())
# Making a request to /udate_test_suites forces an update.
self.testapp.post('/update_test_suites')
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
self.assertEqual(['dromaeo', 'really', 'scrolling'],
update_test_suites.FetchCachedTestSuites2())
def testPost_InternalOnly(self):
self.SetCurrentUser('internal@chromium.org')
self._AddSampleData()
master_key = ndb.Key('Master', 'Chromium')
graph_data.Bot(
id='internal_mac', parent=master_key, internal_only=True).put()
t = graph_data.TestMetadata(
id='Chromium/internal_mac/internal_test', internal_only=True)
t.UpdateSheriff()
t.put()
self.testapp.post('/update_test_suites?internal_only=true')
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'internal_test': {
'mas': {
'Chromium': {
'internal_mac': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchCachedTestSuites_Empty_UpdatesWhenFetching(self):
# If the cache is not set at all, then FetchCachedTestSuites
# just updates the cache before returning the list.
self._AddSampleData()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchSuites_BasicDescription(self):
self._AddSampleData()
for test_path in ['Chromium/win7/scrolling', 'Chromium/mac/scrolling']:
test = utils.TestKey(test_path).get()
test.description = 'Description string.'
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
'des': 'Description string.'
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchSuites_DifferentMasters(self):
# If the cache is not set at all, then FetchCachedTestSuites
# just updates the cache before returning the list.
self._AddSampleData()
testing_common.AddTests(['ChromiumFYI'], ['linux'], {
'sunspider': {
'Total': {},
},
})
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'sunspider': {
'mas': {
'ChromiumFYI': {
'linux': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_SingleDeprecatedBot(self):
self._AddSampleData()
# For another test suite, set it as deprecated on both bots -- it should
# be marked as deprecated in the response dict.
for bot in ['win7']:
test = utils.TestKey('Chromium/%s/really' % bot).get()
test.deprecated = True
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': True
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_AllDeprecatedBots(self):
self._AddSampleData()
# For another test suite, set it as deprecated on both bots -- it should
# be marked as deprecated in the response dict.
for bot in ['win7', 'mac']:
test = utils.TestKey('Chromium/%s/really' % bot).get()
test.deprecated = True
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'dep': True,
'mas': {
'Chromium': {
'mac': True,
'win7': True
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_BasicMonitored(self):
self._AddSampleData()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_MultipleMonitored(self):
self._AddSampleData()
testing_common.AddTests(['ChromiumFYI'], ['linux'], {
'dromaeo': {
'foo': {},
},
})
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
},
'ChromiumFYI': {
'linux': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites(self):
self._AddSampleData()
suites = update_test_suites._FetchSuites()
suite_keys = [s.key for s in suites]
self.assertEqual(
list(
map(utils.TestKey, [
'Chromium/mac/dromaeo',
'Chromium/mac/really',
'Chromium/mac/scrolling',
'Chromium/win7/dromaeo',
'Chromium/win7/really',
'Chromium/win7/scrolling',
])), suite_keys)
def testGetSubTestPath(self):
key = utils.TestKey('Chromium/mac/my_suite/foo/bar')
self.assertEqual('foo/bar', update_test_suites._GetTestSubPath(key))
def testPartialTestSuites(self):
testing_common.AddTests(['master'], ['bot'], {
'TEST_PARTIAL_TEST_SUITE': {
'COMPOSITE': {
'measurement': {},
},
},
})
self.testapp.post('/update_test_suites')
self.assertEqual(['TEST_PARTIAL_TEST_SUITE:COMPOSITE'],
update_test_suites.FetchCachedTestSuites2())
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
{% load i18n admin_urls %}
<div class="js-inline-admin-formset inline-group"
id="{{ inline_admin_formset.formset.prefix }}-group"
data-inline-type="stacked"
data-inline-formset="{{ inline_admin_formset.inline_formset_data }}">
<fieldset class="module {{ inline_admin_formset.classes }}" aria-labelledby="{{ inline_admin_formset.formset.prefix }}-heading">
{% if inline_admin_formset.is_collapsible %}<details><summary>{% endif %}
<h2 id="{{ inline_admin_formset.formset.prefix }}-heading" class="inline-heading">
{% if inline_admin_formset.formset.max_num == 1 %}
{{ inline_admin_formset.opts.verbose_name|capfirst }}
{% else %}
{{ inline_admin_formset.opts.verbose_name_plural|capfirst }}
{% endif %}
</h2>
{% if inline_admin_formset.is_collapsible %}</summary>{% endif %}
{{ inline_admin_formset.formset.management_form }}
{{ inline_admin_formset.formset.non_form_errors }}
{% for inline_admin_form in inline_admin_formset %}<div class="inline-related{% if inline_admin_form.original or inline_admin_form.show_url %} has_original{% endif %}{% if forloop.last and inline_admin_formset.has_add_permission %} empty-form last-related{% endif %}" id="{{ inline_admin_formset.formset.prefix }}-{% if forloop.last and inline_admin_formset.has_add_permission %}empty{% else %}{{ forloop.counter0 }}{% endif %}">
<h3><b>{{ inline_admin_formset.opts.verbose_name|capfirst }}:</b> <span class="inline_label">{% if inline_admin_form.original %}{{ inline_admin_form.original }}{% if inline_admin_form.model_admin.show_change_link and inline_admin_form.model_admin.has_registered_model %} <a href="{% url inline_admin_form.model_admin.opts|admin_urlname:'change' inline_admin_form.original.pk|admin_urlquote %}" class="{{ inline_admin_formset.has_change_permission|yesno:'inlinechangelink,inlineviewlink' }}">{% if inline_admin_formset.has_change_permission %}{% translate "Change" %}{% else %}{% translate "View" %}{% endif %}</a>{% endif %}
{% else %}#{{ forloop.counter }}{% endif %}</span>
{% if inline_admin_form.show_url %}<a href="{{ inline_admin_form.absolute_url }}">{% translate "View on site" %}</a>{% endif %}
{% if inline_admin_formset.formset.can_delete and inline_admin_formset.has_delete_permission and inline_admin_form.original %}<span class="delete">{{ inline_admin_form.deletion_field.field }} {{ inline_admin_form.deletion_field.label_tag }}</span>{% endif %}
</h3>
{% if inline_admin_form.form.non_field_errors %}{{ inline_admin_form.form.non_field_errors }}{% endif %}
{% with parent_counter=forloop.counter0 %}
{% for fieldset in inline_admin_form %}
{% include "admin/includes/fieldset.html" with heading_level=4 prefix=fieldset.formset.prefix id_prefix=parent_counter id_suffix=forloop.counter0 %}
{% endfor %}
{% endwith %}
{% if inline_admin_form.needs_explicit_pk_field %}{{ inline_admin_form.pk_field.field }}{% endif %}
{% if inline_admin_form.fk_field %}{{ inline_admin_form.fk_field.field }}{% endif %}
</div>{% endfor %}
{% if inline_admin_formset.is_collapsible %}</details>{% endif %}
</fieldset>
</div> | html | github | https://github.com/django/django | django/contrib/admin/templates/admin/edit_inline/stacked.html |
/**
* @license
* Lodash <https://lodash.com/>
* Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
* Released under MIT license <https://lodash.com/license>
* Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
* Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
*/
;(function() {
/** Used as a safe reference for `undefined` in pre-ES5 environments. */
var undefined;
/** Used as the semantic version number. */
var VERSION = '4.17.23';
/** Used as the size to enable large array optimizations. */
var LARGE_ARRAY_SIZE = 200;
/** Error message constants. */
var CORE_ERROR_TEXT = 'Unsupported core-js use. Try https://npms.io/search?q=ponyfill.',
FUNC_ERROR_TEXT = 'Expected a function',
INVALID_TEMPL_VAR_ERROR_TEXT = 'Invalid `variable` option passed into `_.template`';
/** Used to stand-in for `undefined` hash values. */
var HASH_UNDEFINED = '__lodash_hash_undefined__';
/** Used as the maximum memoize cache size. */
var MAX_MEMOIZE_SIZE = 500;
/** Used as the internal argument placeholder. */
var PLACEHOLDER = '__lodash_placeholder__';
/** Used to compose bitmasks for cloning. */
var CLONE_DEEP_FLAG = 1,
CLONE_FLAT_FLAG = 2,
CLONE_SYMBOLS_FLAG = 4;
/** Used to compose bitmasks for value comparisons. */
var COMPARE_PARTIAL_FLAG = 1,
COMPARE_UNORDERED_FLAG = 2;
/** Used to compose bitmasks for function metadata. */
var WRAP_BIND_FLAG = 1,
WRAP_BIND_KEY_FLAG = 2,
WRAP_CURRY_BOUND_FLAG = 4,
WRAP_CURRY_FLAG = 8,
WRAP_CURRY_RIGHT_FLAG = 16,
WRAP_PARTIAL_FLAG = 32,
WRAP_PARTIAL_RIGHT_FLAG = 64,
WRAP_ARY_FLAG = 128,
WRAP_REARG_FLAG = 256,
WRAP_FLIP_FLAG = 512;
/** Used as default options for `_.truncate`. */
var DEFAULT_TRUNC_LENGTH = 30,
DEFAULT_TRUNC_OMISSION = '...';
/** Used to detect hot functions by number of calls within a span of milliseconds. */
var HOT_COUNT = 800,
HOT_SPAN = 16;
/** Used to indicate the type of lazy iteratees. */
var LAZY_FILTER_FLAG = 1,
LAZY_MAP_FLAG = 2,
LAZY_WHILE_FLAG = 3;
/** Used as references for various `Number` constants. */
var INFINITY = 1 / 0,
MAX_SAFE_INTEGER = 9007199254740991,
MAX_INTEGER = 1.7976931348623157e+308,
NAN = 0 / 0;
/** Used as references for the maximum length and index of an array. */
var MAX_ARRAY_LENGTH = 4294967295,
MAX_ARRAY_INDEX = MAX_ARRAY_LENGTH - 1,
HALF_MAX_ARRAY_LENGTH = MAX_ARRAY_LENGTH >>> 1;
/** Used to associate wrap methods with their bit flags. */
var wrapFlags = [
['ary', WRAP_ARY_FLAG],
['bind', WRAP_BIND_FLAG],
['bindKey', WRAP_BIND_KEY_FLAG],
['curry', WRAP_CURRY_FLAG],
['curryRight', WRAP_CURRY_RIGHT_FLAG],
['flip', WRAP_FLIP_FLAG],
['partial', WRAP_PARTIAL_FLAG],
['partialRight', WRAP_PARTIAL_RIGHT_FLAG],
['rearg', WRAP_REARG_FLAG]
];
/** `Object#toString` result references. */
var argsTag = '[object Arguments]',
arrayTag = '[object Array]',
asyncTag = '[object AsyncFunction]',
boolTag = '[object Boolean]',
dateTag = '[object Date]',
domExcTag = '[object DOMException]',
errorTag = '[object Error]',
funcTag = '[object Function]',
genTag = '[object GeneratorFunction]',
mapTag = '[object Map]',
numberTag = '[object Number]',
nullTag = '[object Null]',
objectTag = '[object Object]',
promiseTag = '[object Promise]',
proxyTag = '[object Proxy]',
regexpTag = '[object RegExp]',
setTag = '[object Set]',
stringTag = '[object String]',
symbolTag = '[object Symbol]',
undefinedTag = '[object Undefined]',
weakMapTag = '[object WeakMap]',
weakSetTag = '[object WeakSet]';
var arrayBufferTag = '[object ArrayBuffer]',
dataViewTag = '[object DataView]',
float32Tag = '[object Float32Array]',
float64Tag = '[object Float64Array]',
int8Tag = '[object Int8Array]',
int16Tag = '[object Int16Array]',
int32Tag = '[object Int32Array]',
uint8Tag = '[object Uint8Array]',
uint8ClampedTag = '[object Uint8ClampedArray]',
uint16Tag = '[object Uint16Array]',
uint32Tag = '[object Uint32Array]';
/** Used to match empty string literals in compiled template source. */
var reEmptyStringLeading = /\b__p \+= '';/g,
reEmptyStringMiddle = /\b(__p \+=) '' \+/g,
reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g;
/** Used to match HTML entities and HTML characters. */
var reEscapedHtml = /&(?:amp|lt|gt|quot|#39);/g,
reUnescapedHtml = /[&<>"']/g,
reHasEscapedHtml = RegExp(reEscapedHtml.source),
reHasUnescapedHtml = RegExp(reUnescapedHtml.source);
/** Used to match template delimiters. */
var reEscape = /<%-([\s\S]+?)%>/g,
reEvaluate = /<%([\s\S]+?)%>/g,
reInterpolate = /<%=([\s\S]+?)%>/g;
/** Used to match property names within property paths. */
var reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,
reIsPlainProp = /^\w*$/,
rePropName = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g;
/**
* Used to match `RegExp`
* [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns).
*/
var reRegExpChar = /[\\^$.*+?()[\]{}|]/g,
reHasRegExpChar = RegExp(reRegExpChar.source);
/** Used to match leading whitespace. */
var reTrimStart = /^\s+/;
/** Used to match a single whitespace character. */
var reWhitespace = /\s/;
/** Used to match wrap detail comments. */
var reWrapComment = /\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,
reWrapDetails = /\{\n\/\* \[wrapped with (.+)\] \*/,
reSplitDetails = /,? & /;
/** Used to match words composed of alphanumeric characters. */
var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g;
/**
* Used to validate the `validate` option in `_.template` variable.
*
* Forbids characters which could potentially change the meaning of the function argument definition:
* - "()," (modification of function parameters)
* - "=" (default value)
* - "[]{}" (destructuring of function parameters)
* - "/" (beginning of a comment)
* - whitespace
*/
var reForbiddenIdentifierChars = /[()=,{}\[\]\/\s]/;
/** Used to match backslashes in property paths. */
var reEscapeChar = /\\(\\)?/g;
/**
* Used to match
* [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components).
*/
var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g;
/** Used to match `RegExp` flags from their coerced string values. */
var reFlags = /\w*$/;
/** Used to detect bad signed hexadecimal string values. */
var reIsBadHex = /^[-+]0x[0-9a-f]+$/i;
/** Used to detect binary string values. */
var reIsBinary = /^0b[01]+$/i;
/** Used to detect host constructors (Safari). */
var reIsHostCtor = /^\[object .+?Constructor\]$/;
/** Used to detect octal string values. */
var reIsOctal = /^0o[0-7]+$/i;
/** Used to detect unsigned integer values. */
var reIsUint = /^(?:0|[1-9]\d*)$/;
/** Used to match Latin Unicode letters (excluding mathematical operators). */
var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g;
/** Used to ensure capturing order of template delimiters. */
var reNoMatch = /($^)/;
/** Used to match unescaped characters in compiled string literals. */
var reUnescapedString = /['\n\r\u2028\u2029\\]/g;
/** Used to compose unicode character classes. */
var rsAstralRange = '\\ud800-\\udfff',
rsComboMarksRange = '\\u0300-\\u036f',
reComboHalfMarksRange = '\\ufe20-\\ufe2f',
rsComboSymbolsRange = '\\u20d0-\\u20ff',
rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange,
rsDingbatRange = '\\u2700-\\u27bf',
rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff',
rsMathOpRange = '\\xac\\xb1\\xd7\\xf7',
rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf',
rsPunctuationRange = '\\u2000-\\u206f',
rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000',
rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde',
rsVarRange = '\\ufe0e\\ufe0f',
rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange;
/** Used to compose unicode capture groups. */
var rsApos = "['\u2019]",
rsAstral = '[' + rsAstralRange + ']',
rsBreak = '[' + rsBreakRange + ']',
rsCombo = '[' + rsComboRange + ']',
rsDigits = '\\d+',
rsDingbat = '[' + rsDingbatRange + ']',
rsLower = '[' + rsLowerRange + ']',
rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']',
rsFitz = '\\ud83c[\\udffb-\\udfff]',
rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')',
rsNonAstral = '[^' + rsAstralRange + ']',
rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}',
rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]',
rsUpper = '[' + rsUpperRange + ']',
rsZWJ = '\\u200d';
/** Used to compose unicode regexes. */
var rsMiscLower = '(?:' + rsLower + '|' + rsMisc + ')',
rsMiscUpper = '(?:' + rsUpper + '|' + rsMisc + ')',
rsOptContrLower = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?',
rsOptContrUpper = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?',
reOptMod = rsModifier + '?',
rsOptVar = '[' + rsVarRange + ']?',
rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*',
rsOrdLower = '\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])',
rsOrdUpper = '\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])',
rsSeq = rsOptVar + reOptMod + rsOptJoin,
rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq,
rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')';
/** Used to match apostrophes. */
var reApos = RegExp(rsApos, 'g');
/**
* Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and
* [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols).
*/
var reComboMark = RegExp(rsCombo, 'g');
/** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */
var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g');
/** Used to match complex or compound words. */
var reUnicodeWord = RegExp([
rsUpper + '?' + rsLower + '+' + rsOptContrLower + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')',
rsMiscUpper + '+' + rsOptContrUpper + '(?=' + [rsBreak, rsUpper + rsMiscLower, '$'].join('|') + ')',
rsUpper + '?' + rsMiscLower + '+' + rsOptContrLower,
rsUpper + '+' + rsOptContrUpper,
rsOrdUpper,
rsOrdLower,
rsDigits,
rsEmoji
].join('|'), 'g');
/** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */
var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']');
/** Used to detect strings that need a more robust regexp to match words. */
var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/;
/** Used to assign default `context` object properties. */
var contextProps = [
'Array', 'Buffer', 'DataView', 'Date', 'Error', 'Float32Array', 'Float64Array',
'Function', 'Int8Array', 'Int16Array', 'Int32Array', 'Map', 'Math', 'Object',
'Promise', 'RegExp', 'Set', 'String', 'Symbol', 'TypeError', 'Uint8Array',
'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', 'WeakMap',
'_', 'clearTimeout', 'isFinite', 'parseInt', 'setTimeout'
];
/** Used to make template sourceURLs easier to identify. */
var templateCounter = -1;
/** Used to identify `toStringTag` values of typed arrays. */
var typedArrayTags = {};
typedArrayTags[float32Tag] = typedArrayTags[float64Tag] =
typedArrayTags[int8Tag] = typedArrayTags[int16Tag] =
typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] =
typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] =
typedArrayTags[uint32Tag] = true;
typedArrayTags[argsTag] = typedArrayTags[arrayTag] =
typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] =
typedArrayTags[dataViewTag] = typedArrayTags[dateTag] =
typedArrayTags[errorTag] = typedArrayTags[funcTag] =
typedArrayTags[mapTag] = typedArrayTags[numberTag] =
typedArrayTags[objectTag] = typedArrayTags[regexpTag] =
typedArrayTags[setTag] = typedArrayTags[stringTag] =
typedArrayTags[weakMapTag] = false;
/** Used to identify `toStringTag` values supported by `_.clone`. */
var cloneableTags = {};
cloneableTags[argsTag] = cloneableTags[arrayTag] =
cloneableTags[arrayBufferTag] = cloneableTags[dataViewTag] =
cloneableTags[boolTag] = cloneableTags[dateTag] =
cloneableTags[float32Tag] = cloneableTags[float64Tag] =
cloneableTags[int8Tag] = cloneableTags[int16Tag] =
cloneableTags[int32Tag] = cloneableTags[mapTag] =
cloneableTags[numberTag] = cloneableTags[objectTag] =
cloneableTags[regexpTag] = cloneableTags[setTag] =
cloneableTags[stringTag] = cloneableTags[symbolTag] =
cloneableTags[uint8Tag] = cloneableTags[uint8ClampedTag] =
cloneableTags[uint16Tag] = cloneableTags[uint32Tag] = true;
cloneableTags[errorTag] = cloneableTags[funcTag] =
cloneableTags[weakMapTag] = false;
/** Used to map Latin Unicode letters to basic Latin letters. */
var deburredLetters = {
// Latin-1 Supplement block.
'\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A',
'\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a',
'\xc7': 'C', '\xe7': 'c',
'\xd0': 'D', '\xf0': 'd',
'\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E',
'\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e',
'\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I',
'\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i',
'\xd1': 'N', '\xf1': 'n',
'\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O',
'\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o',
'\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U',
'\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u',
'\xdd': 'Y', '\xfd': 'y', '\xff': 'y',
'\xc6': 'Ae', '\xe6': 'ae',
'\xde': 'Th', '\xfe': 'th',
'\xdf': 'ss',
// Latin Extended-A block.
'\u0100': 'A', '\u0102': 'A', '\u0104': 'A',
'\u0101': 'a', '\u0103': 'a', '\u0105': 'a',
'\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C',
'\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c',
'\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd',
'\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E',
'\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e',
'\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G',
'\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g',
'\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h',
'\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I',
'\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i',
'\u0134': 'J', '\u0135': 'j',
'\u0136': 'K', '\u0137': 'k', '\u0138': 'k',
'\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L',
'\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l',
'\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N',
'\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n',
'\u014c': 'O', '\u014e': 'O', '\u0150': 'O',
'\u014d': 'o', '\u014f': 'o', '\u0151': 'o',
'\u0154': 'R', '\u0156': 'R', '\u0158': 'R',
'\u0155': 'r', '\u0157': 'r', '\u0159': 'r',
'\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S',
'\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's',
'\u0162': 'T', '\u0164': 'T', '\u0166': 'T',
'\u0163': 't', '\u0165': 't', '\u0167': 't',
'\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U',
'\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u',
'\u0174': 'W', '\u0175': 'w',
'\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y',
'\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z',
'\u017a': 'z', '\u017c': 'z', '\u017e': 'z',
'\u0132': 'IJ', '\u0133': 'ij',
'\u0152': 'Oe', '\u0153': 'oe',
'\u0149': "'n", '\u017f': 's'
};
/** Used to map characters to HTML entities. */
var htmlEscapes = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": '''
};
/** Used to map HTML entities to characters. */
var htmlUnescapes = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
''': "'"
};
/** Used to escape characters for inclusion in compiled string literals. */
var stringEscapes = {
'\\': '\\',
"'": "'",
'\n': 'n',
'\r': 'r',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
/** Built-in method references without a dependency on `root`. */
var freeParseFloat = parseFloat,
freeParseInt = parseInt;
/** Detect free variable `global` from Node.js. */
var freeGlobal = typeof global == 'object' && global && global.Object === Object && global;
/** Detect free variable `self`. */
var freeSelf = typeof self == 'object' && self && self.Object === Object && self;
/** Used as a reference to the global object. */
var root = freeGlobal || freeSelf || Function('return this')();
/** Detect free variable `exports`. */
var freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;
/** Detect free variable `module`. */
var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;
/** Detect the popular CommonJS extension `module.exports`. */
var moduleExports = freeModule && freeModule.exports === freeExports;
/** Detect free variable `process` from Node.js. */
var freeProcess = moduleExports && freeGlobal.process;
/** Used to access faster Node.js helpers. */
var nodeUtil = (function() {
try {
// Use `util.types` for Node.js 10+.
var types = freeModule && freeModule.require && freeModule.require('util').types;
if (types) {
return types;
}
// Legacy `process.binding('util')` for Node.js < 10.
return freeProcess && freeProcess.binding && freeProcess.binding('util');
} catch (e) {}
}());
/* Node.js helper references. */
var nodeIsArrayBuffer = nodeUtil && nodeUtil.isArrayBuffer,
nodeIsDate = nodeUtil && nodeUtil.isDate,
nodeIsMap = nodeUtil && nodeUtil.isMap,
nodeIsRegExp = nodeUtil && nodeUtil.isRegExp,
nodeIsSet = nodeUtil && nodeUtil.isSet,
nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray;
/*--------------------------------------------------------------------------*/
/**
* A faster alternative to `Function#apply`, this function invokes `func`
* with the `this` binding of `thisArg` and the arguments of `args`.
*
* @private
* @param {Function} func The function to invoke.
* @param {*} thisArg The `this` binding of `func`.
* @param {Array} args The arguments to invoke `func` with.
* @returns {*} Returns the result of `func`.
*/
function apply(func, thisArg, args) {
switch (args.length) {
case 0: return func.call(thisArg);
case 1: return func.call(thisArg, args[0]);
case 2: return func.call(thisArg, args[0], args[1]);
case 3: return func.call(thisArg, args[0], args[1], args[2]);
}
return func.apply(thisArg, args);
}
/**
* A specialized version of `baseAggregator` for arrays.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform keys.
* @param {Object} accumulator The initial aggregated object.
* @returns {Function} Returns `accumulator`.
*/
function arrayAggregator(array, setter, iteratee, accumulator) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
var value = array[index];
setter(accumulator, value, iteratee(value), array);
}
return accumulator;
}
/**
* A specialized version of `_.forEach` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns `array`.
*/
function arrayEach(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (iteratee(array[index], index, array) === false) {
break;
}
}
return array;
}
/**
* A specialized version of `_.forEachRight` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns `array`.
*/
function arrayEachRight(array, iteratee) {
var length = array == null ? 0 : array.length;
while (length--) {
if (iteratee(array[length], length, array) === false) {
break;
}
}
return array;
}
/**
* A specialized version of `_.every` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`.
*/
function arrayEvery(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (!predicate(array[index], index, array)) {
return false;
}
}
return true;
}
/**
* A specialized version of `_.filter` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
*/
function arrayFilter(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (predicate(value, index, array)) {
result[resIndex++] = value;
}
}
return result;
}
/**
* A specialized version of `_.includes` for arrays without support for
* specifying an index to search from.
*
* @private
* @param {Array} [array] The array to inspect.
* @param {*} target The value to search for.
* @returns {boolean} Returns `true` if `target` is found, else `false`.
*/
function arrayIncludes(array, value) {
var length = array == null ? 0 : array.length;
return !!length && baseIndexOf(array, value, 0) > -1;
}
/**
* This function is like `arrayIncludes` except that it accepts a comparator.
*
* @private
* @param {Array} [array] The array to inspect.
* @param {*} target The value to search for.
* @param {Function} comparator The comparator invoked per element.
* @returns {boolean} Returns `true` if `target` is found, else `false`.
*/
function arrayIncludesWith(array, value, comparator) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (comparator(value, array[index])) {
return true;
}
}
return false;
}
/**
* A specialized version of `_.map` for arrays without support for iteratee
* shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
*/
function arrayMap(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length,
result = Array(length);
while (++index < length) {
result[index] = iteratee(array[index], index, array);
}
return result;
}
/**
* Appends the elements of `values` to `array`.
*
* @private
* @param {Array} array The array to modify.
* @param {Array} values The values to append.
* @returns {Array} Returns `array`.
*/
function arrayPush(array, values) {
var index = -1,
length = values.length,
offset = array.length;
while (++index < length) {
array[offset + index] = values[index];
}
return array;
}
/**
* A specialized version of `_.reduce` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @param {boolean} [initAccum] Specify using the first element of `array` as
* the initial value.
* @returns {*} Returns the accumulated value.
*/
function arrayReduce(array, iteratee, accumulator, initAccum) {
var index = -1,
length = array == null ? 0 : array.length;
if (initAccum && length) {
accumulator = array[++index];
}
while (++index < length) {
accumulator = iteratee(accumulator, array[index], index, array);
}
return accumulator;
}
/**
* A specialized version of `_.reduceRight` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @param {boolean} [initAccum] Specify using the last element of `array` as
* the initial value.
* @returns {*} Returns the accumulated value.
*/
function arrayReduceRight(array, iteratee, accumulator, initAccum) {
var length = array == null ? 0 : array.length;
if (initAccum && length) {
accumulator = array[--length];
}
while (length--) {
accumulator = iteratee(accumulator, array[length], length, array);
}
return accumulator;
}
/**
* A specialized version of `_.some` for arrays without support for iteratee
* shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
*/
function arraySome(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (predicate(array[index], index, array)) {
return true;
}
}
return false;
}
/**
* Gets the size of an ASCII `string`.
*
* @private
* @param {string} string The string inspect.
* @returns {number} Returns the string size.
*/
var asciiSize = baseProperty('length');
/**
* Converts an ASCII `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function asciiToArray(string) {
return string.split('');
}
/**
* Splits an ASCII `string` into an array of its words.
*
* @private
* @param {string} The string to inspect.
* @returns {Array} Returns the words of `string`.
*/
function asciiWords(string) {
return string.match(reAsciiWord) || [];
}
/**
* The base implementation of methods like `_.findKey` and `_.findLastKey`,
* without support for iteratee shorthands, which iterates over `collection`
* using `eachFunc`.
*
* @private
* @param {Array|Object} collection The collection to inspect.
* @param {Function} predicate The function invoked per iteration.
* @param {Function} eachFunc The function to iterate over `collection`.
* @returns {*} Returns the found element or its key, else `undefined`.
*/
function baseFindKey(collection, predicate, eachFunc) {
var result;
eachFunc(collection, function(value, key, collection) {
if (predicate(value, key, collection)) {
result = key;
return false;
}
});
return result;
}
/**
* The base implementation of `_.findIndex` and `_.findLastIndex` without
* support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} predicate The function invoked per iteration.
* @param {number} fromIndex The index to search from.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseFindIndex(array, predicate, fromIndex, fromRight) {
var length = array.length,
index = fromIndex + (fromRight ? 1 : -1);
while ((fromRight ? index-- : ++index < length)) {
if (predicate(array[index], index, array)) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.indexOf` without `fromIndex` bounds checks.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseIndexOf(array, value, fromIndex) {
return value === value
? strictIndexOf(array, value, fromIndex)
: baseFindIndex(array, baseIsNaN, fromIndex);
}
/**
* This function is like `baseIndexOf` except that it accepts a comparator.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @param {Function} comparator The comparator invoked per element.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseIndexOfWith(array, value, fromIndex, comparator) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (comparator(array[index], value)) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.isNaN` without support for number objects.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `NaN`, else `false`.
*/
function baseIsNaN(value) {
return value !== value;
}
/**
* The base implementation of `_.mean` and `_.meanBy` without support for
* iteratee shorthands.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {number} Returns the mean.
*/
function baseMean(array, iteratee) {
var length = array == null ? 0 : array.length;
return length ? (baseSum(array, iteratee) / length) : NAN;
}
/**
* The base implementation of `_.property` without support for deep paths.
*
* @private
* @param {string} key The key of the property to get.
* @returns {Function} Returns the new accessor function.
*/
function baseProperty(key) {
return function(object) {
return object == null ? undefined : object[key];
};
}
/**
* The base implementation of `_.propertyOf` without support for deep paths.
*
* @private
* @param {Object} object The object to query.
* @returns {Function} Returns the new accessor function.
*/
function basePropertyOf(object) {
return function(key) {
return object == null ? undefined : object[key];
};
}
/**
* The base implementation of `_.reduce` and `_.reduceRight`, without support
* for iteratee shorthands, which iterates over `collection` using `eachFunc`.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} accumulator The initial value.
* @param {boolean} initAccum Specify using the first or last element of
* `collection` as the initial value.
* @param {Function} eachFunc The function to iterate over `collection`.
* @returns {*} Returns the accumulated value.
*/
function baseReduce(collection, iteratee, accumulator, initAccum, eachFunc) {
eachFunc(collection, function(value, index, collection) {
accumulator = initAccum
? (initAccum = false, value)
: iteratee(accumulator, value, index, collection);
});
return accumulator;
}
/**
* The base implementation of `_.sortBy` which uses `comparer` to define the
* sort order of `array` and replaces criteria objects with their corresponding
* values.
*
* @private
* @param {Array} array The array to sort.
* @param {Function} comparer The function to define sort order.
* @returns {Array} Returns `array`.
*/
function baseSortBy(array, comparer) {
var length = array.length;
array.sort(comparer);
while (length--) {
array[length] = array[length].value;
}
return array;
}
/**
* The base implementation of `_.sum` and `_.sumBy` without support for
* iteratee shorthands.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {number} Returns the sum.
*/
function baseSum(array, iteratee) {
var result,
index = -1,
length = array.length;
while (++index < length) {
var current = iteratee(array[index]);
if (current !== undefined) {
result = result === undefined ? current : (result + current);
}
}
return result;
}
/**
* The base implementation of `_.times` without support for iteratee shorthands
* or max array length checks.
*
* @private
* @param {number} n The number of times to invoke `iteratee`.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the array of results.
*/
function baseTimes(n, iteratee) {
var index = -1,
result = Array(n);
while (++index < n) {
result[index] = iteratee(index);
}
return result;
}
/**
* The base implementation of `_.toPairs` and `_.toPairsIn` which creates an array
* of key-value pairs for `object` corresponding to the property names of `props`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} props The property names to get values for.
* @returns {Object} Returns the key-value pairs.
*/
function baseToPairs(object, props) {
return arrayMap(props, function(key) {
return [key, object[key]];
});
}
/**
* The base implementation of `_.trim`.
*
* @private
* @param {string} string The string to trim.
* @returns {string} Returns the trimmed string.
*/
function baseTrim(string) {
return string
? string.slice(0, trimmedEndIndex(string) + 1).replace(reTrimStart, '')
: string;
}
/**
* The base implementation of `_.unary` without support for storing metadata.
*
* @private
* @param {Function} func The function to cap arguments for.
* @returns {Function} Returns the new capped function.
*/
function baseUnary(func) {
return function(value) {
return func(value);
};
}
/**
* The base implementation of `_.values` and `_.valuesIn` which creates an
* array of `object` property values corresponding to the property names
* of `props`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} props The property names to get values for.
* @returns {Object} Returns the array of property values.
*/
function baseValues(object, props) {
return arrayMap(props, function(key) {
return object[key];
});
}
/**
* Checks if a `cache` value for `key` exists.
*
* @private
* @param {Object} cache The cache to query.
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function cacheHas(cache, key) {
return cache.has(key);
}
/**
* Used by `_.trim` and `_.trimStart` to get the index of the first string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the first unmatched string symbol.
*/
function charsStartIndex(strSymbols, chrSymbols) {
var index = -1,
length = strSymbols.length;
while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the last unmatched string symbol.
*/
function charsEndIndex(strSymbols, chrSymbols) {
var index = strSymbols.length;
while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Gets the number of `placeholder` occurrences in `array`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} placeholder The placeholder to search for.
* @returns {number} Returns the placeholder count.
*/
function countHolders(array, placeholder) {
var length = array.length,
result = 0;
while (length--) {
if (array[length] === placeholder) {
++result;
}
}
return result;
}
/**
* Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A
* letters to basic Latin letters.
*
* @private
* @param {string} letter The matched letter to deburr.
* @returns {string} Returns the deburred letter.
*/
var deburrLetter = basePropertyOf(deburredLetters);
/**
* Used by `_.escape` to convert characters to HTML entities.
*
* @private
* @param {string} chr The matched character to escape.
* @returns {string} Returns the escaped character.
*/
var escapeHtmlChar = basePropertyOf(htmlEscapes);
/**
* Used by `_.template` to escape characters for inclusion in compiled string literals.
*
* @private
* @param {string} chr The matched character to escape.
* @returns {string} Returns the escaped character.
*/
function escapeStringChar(chr) {
return '\\' + stringEscapes[chr];
}
/**
* Gets the value at `key` of `object`.
*
* @private
* @param {Object} [object] The object to query.
* @param {string} key The key of the property to get.
* @returns {*} Returns the property value.
*/
function getValue(object, key) {
return object == null ? undefined : object[key];
}
/**
* Checks if `string` contains Unicode symbols.
*
* @private
* @param {string} string The string to inspect.
* @returns {boolean} Returns `true` if a symbol is found, else `false`.
*/
function hasUnicode(string) {
return reHasUnicode.test(string);
}
/**
* Checks if `string` contains a word composed of Unicode symbols.
*
* @private
* @param {string} string The string to inspect.
* @returns {boolean} Returns `true` if a word is found, else `false`.
*/
function hasUnicodeWord(string) {
return reHasUnicodeWord.test(string);
}
/**
* Converts `iterator` to an array.
*
* @private
* @param {Object} iterator The iterator to convert.
* @returns {Array} Returns the converted array.
*/
function iteratorToArray(iterator) {
var data,
result = [];
while (!(data = iterator.next()).done) {
result.push(data.value);
}
return result;
}
/**
* Converts `map` to its key-value pairs.
*
* @private
* @param {Object} map The map to convert.
* @returns {Array} Returns the key-value pairs.
*/
function mapToArray(map) {
var index = -1,
result = Array(map.size);
map.forEach(function(value, key) {
result[++index] = [key, value];
});
return result;
}
/**
* Creates a unary function that invokes `func` with its argument transformed.
*
* @private
* @param {Function} func The function to wrap.
* @param {Function} transform The argument transform.
* @returns {Function} Returns the new function.
*/
function overArg(func, transform) {
return function(arg) {
return func(transform(arg));
};
}
/**
* Replaces all `placeholder` elements in `array` with an internal placeholder
* and returns an array of their indexes.
*
* @private
* @param {Array} array The array to modify.
* @param {*} placeholder The placeholder to replace.
* @returns {Array} Returns the new array of placeholder indexes.
*/
function replaceHolders(array, placeholder) {
var index = -1,
length = array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (value === placeholder || value === PLACEHOLDER) {
array[index] = PLACEHOLDER;
result[resIndex++] = index;
}
}
return result;
}
/**
* Converts `set` to an array of its values.
*
* @private
* @param {Object} set The set to convert.
* @returns {Array} Returns the values.
*/
function setToArray(set) {
var index = -1,
result = Array(set.size);
set.forEach(function(value) {
result[++index] = value;
});
return result;
}
/**
* Converts `set` to its value-value pairs.
*
* @private
* @param {Object} set The set to convert.
* @returns {Array} Returns the value-value pairs.
*/
function setToPairs(set) {
var index = -1,
result = Array(set.size);
set.forEach(function(value) {
result[++index] = [value, value];
});
return result;
}
/**
* A specialized version of `_.indexOf` which performs strict equality
* comparisons of values, i.e. `===`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function strictIndexOf(array, value, fromIndex) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (array[index] === value) {
return index;
}
}
return -1;
}
/**
* A specialized version of `_.lastIndexOf` which performs strict equality
* comparisons of values, i.e. `===`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function strictLastIndexOf(array, value, fromIndex) {
var index = fromIndex + 1;
while (index--) {
if (array[index] === value) {
return index;
}
}
return index;
}
/**
* Gets the number of symbols in `string`.
*
* @private
* @param {string} string The string to inspect.
* @returns {number} Returns the string size.
*/
function stringSize(string) {
return hasUnicode(string)
? unicodeSize(string)
: asciiSize(string);
}
/**
* Converts `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function stringToArray(string) {
return hasUnicode(string)
? unicodeToArray(string)
: asciiToArray(string);
}
/**
* Used by `_.trim` and `_.trimEnd` to get the index of the last non-whitespace
* character of `string`.
*
* @private
* @param {string} string The string to inspect.
* @returns {number} Returns the index of the last non-whitespace character.
*/
function trimmedEndIndex(string) {
var index = string.length;
while (index-- && reWhitespace.test(string.charAt(index))) {}
return index;
}
/**
* Used by `_.unescape` to convert HTML entities to characters.
*
* @private
* @param {string} chr The matched character to unescape.
* @returns {string} Returns the unescaped character.
*/
var unescapeHtmlChar = basePropertyOf(htmlUnescapes);
/**
* Gets the size of a Unicode `string`.
*
* @private
* @param {string} string The string inspect.
* @returns {number} Returns the string size.
*/
function unicodeSize(string) {
var result = reUnicode.lastIndex = 0;
while (reUnicode.test(string)) {
++result;
}
return result;
}
/**
* Converts a Unicode `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function unicodeToArray(string) {
return string.match(reUnicode) || [];
}
/**
* Splits a Unicode `string` into an array of its words.
*
* @private
* @param {string} The string to inspect.
* @returns {Array} Returns the words of `string`.
*/
function unicodeWords(string) {
return string.match(reUnicodeWord) || [];
}
/*--------------------------------------------------------------------------*/
/**
* Create a new pristine `lodash` function using the `context` object.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Util
* @param {Object} [context=root] The context object.
* @returns {Function} Returns a new `lodash` function.
* @example
*
* _.mixin({ 'foo': _.constant('foo') });
*
* var lodash = _.runInContext();
* lodash.mixin({ 'bar': lodash.constant('bar') });
*
* _.isFunction(_.foo);
* // => true
* _.isFunction(_.bar);
* // => false
*
* lodash.isFunction(lodash.foo);
* // => false
* lodash.isFunction(lodash.bar);
* // => true
*
* // Create a suped-up `defer` in Node.js.
* var defer = _.runInContext({ 'setTimeout': setImmediate }).defer;
*/
var runInContext = (function runInContext(context) {
context = context == null ? root : _.defaults(root.Object(), context, _.pick(root, contextProps));
/** Built-in constructor references. */
var Array = context.Array,
Date = context.Date,
Error = context.Error,
Function = context.Function,
Math = context.Math,
Object = context.Object,
RegExp = context.RegExp,
String = context.String,
TypeError = context.TypeError;
/** Used for built-in method references. */
var arrayProto = Array.prototype,
funcProto = Function.prototype,
objectProto = Object.prototype;
/** Used to detect overreaching core-js shims. */
var coreJsData = context['__core-js_shared__'];
/** Used to resolve the decompiled source of functions. */
var funcToString = funcProto.toString;
/** Used to check objects for own properties. */
var hasOwnProperty = objectProto.hasOwnProperty;
/** Used to generate unique IDs. */
var idCounter = 0;
/** Used to detect methods masquerading as native. */
var maskSrcKey = (function() {
var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || '');
return uid ? ('Symbol(src)_1.' + uid) : '';
}());
/**
* Used to resolve the
* [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)
* of values.
*/
var nativeObjectToString = objectProto.toString;
/** Used to infer the `Object` constructor. */
var objectCtorString = funcToString.call(Object);
/** Used to restore the original `_` reference in `_.noConflict`. */
var oldDash = root._;
/** Used to detect if a method is native. */
var reIsNative = RegExp('^' +
funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&')
.replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$'
);
/** Built-in value references. */
var Buffer = moduleExports ? context.Buffer : undefined,
Symbol = context.Symbol,
Uint8Array = context.Uint8Array,
allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined,
getPrototype = overArg(Object.getPrototypeOf, Object),
objectCreate = Object.create,
propertyIsEnumerable = objectProto.propertyIsEnumerable,
splice = arrayProto.splice,
spreadableSymbol = Symbol ? Symbol.isConcatSpreadable : undefined,
symIterator = Symbol ? Symbol.iterator : undefined,
symToStringTag = Symbol ? Symbol.toStringTag : undefined;
var defineProperty = (function() {
try {
var func = getNative(Object, 'defineProperty');
func({}, '', {});
return func;
} catch (e) {}
}());
/** Mocked built-ins. */
var ctxClearTimeout = context.clearTimeout !== root.clearTimeout && context.clearTimeout,
ctxNow = Date && Date.now !== root.Date.now && Date.now,
ctxSetTimeout = context.setTimeout !== root.setTimeout && context.setTimeout;
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeCeil = Math.ceil,
nativeFloor = Math.floor,
nativeGetSymbols = Object.getOwnPropertySymbols,
nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined,
nativeIsFinite = context.isFinite,
nativeJoin = arrayProto.join,
nativeKeys = overArg(Object.keys, Object),
nativeMax = Math.max,
nativeMin = Math.min,
nativeNow = Date.now,
nativeParseInt = context.parseInt,
nativeRandom = Math.random,
nativeReverse = arrayProto.reverse;
/* Built-in method references that are verified to be native. */
var DataView = getNative(context, 'DataView'),
Map = getNative(context, 'Map'),
Promise = getNative(context, 'Promise'),
Set = getNative(context, 'Set'),
WeakMap = getNative(context, 'WeakMap'),
nativeCreate = getNative(Object, 'create');
/** Used to store function metadata. */
var metaMap = WeakMap && new WeakMap;
/** Used to lookup unminified function names. */
var realNames = {};
/** Used to detect maps, sets, and weakmaps. */
var dataViewCtorString = toSource(DataView),
mapCtorString = toSource(Map),
promiseCtorString = toSource(Promise),
setCtorString = toSource(Set),
weakMapCtorString = toSource(WeakMap);
/** Used to convert symbols to primitives and strings. */
var symbolProto = Symbol ? Symbol.prototype : undefined,
symbolValueOf = symbolProto ? symbolProto.valueOf : undefined,
symbolToString = symbolProto ? symbolProto.toString : undefined;
/*------------------------------------------------------------------------*/
/**
* Creates a `lodash` object which wraps `value` to enable implicit method
* chain sequences. Methods that operate on and return arrays, collections,
* and functions can be chained together. Methods that retrieve a single value
* or may return a primitive value will automatically end the chain sequence
* and return the unwrapped value. Otherwise, the value must be unwrapped
* with `_#value`.
*
* Explicit chain sequences, which must be unwrapped with `_#value`, may be
* enabled using `_.chain`.
*
* The execution of chained methods is lazy, that is, it's deferred until
* `_#value` is implicitly or explicitly called.
*
* Lazy evaluation allows several methods to support shortcut fusion.
* Shortcut fusion is an optimization to merge iteratee calls; this avoids
* the creation of intermediate arrays and can greatly reduce the number of
* iteratee executions. Sections of a chain sequence qualify for shortcut
* fusion if the section is applied to an array and iteratees accept only
* one argument. The heuristic for whether a section qualifies for shortcut
* fusion is subject to change.
*
* Chaining is supported in custom builds as long as the `_#value` method is
* directly or indirectly included in the build.
*
* In addition to lodash methods, wrappers have `Array` and `String` methods.
*
* The wrapper `Array` methods are:
* `concat`, `join`, `pop`, `push`, `shift`, `sort`, `splice`, and `unshift`
*
* The wrapper `String` methods are:
* `replace` and `split`
*
* The wrapper methods that support shortcut fusion are:
* `at`, `compact`, `drop`, `dropRight`, `dropWhile`, `filter`, `find`,
* `findLast`, `head`, `initial`, `last`, `map`, `reject`, `reverse`, `slice`,
* `tail`, `take`, `takeRight`, `takeRightWhile`, `takeWhile`, and `toArray`
*
* The chainable wrapper methods are:
* `after`, `ary`, `assign`, `assignIn`, `assignInWith`, `assignWith`, `at`,
* `before`, `bind`, `bindAll`, `bindKey`, `castArray`, `chain`, `chunk`,
* `commit`, `compact`, `concat`, `conforms`, `constant`, `countBy`, `create`,
* `curry`, `debounce`, `defaults`, `defaultsDeep`, `defer`, `delay`,
* `difference`, `differenceBy`, `differenceWith`, `drop`, `dropRight`,
* `dropRightWhile`, `dropWhile`, `extend`, `extendWith`, `fill`, `filter`,
* `flatMap`, `flatMapDeep`, `flatMapDepth`, `flatten`, `flattenDeep`,
* `flattenDepth`, `flip`, `flow`, `flowRight`, `fromPairs`, `functions`,
* `functionsIn`, `groupBy`, `initial`, `intersection`, `intersectionBy`,
* `intersectionWith`, `invert`, `invertBy`, `invokeMap`, `iteratee`, `keyBy`,
* `keys`, `keysIn`, `map`, `mapKeys`, `mapValues`, `matches`, `matchesProperty`,
* `memoize`, `merge`, `mergeWith`, `method`, `methodOf`, `mixin`, `negate`,
* `nthArg`, `omit`, `omitBy`, `once`, `orderBy`, `over`, `overArgs`,
* `overEvery`, `overSome`, `partial`, `partialRight`, `partition`, `pick`,
* `pickBy`, `plant`, `property`, `propertyOf`, `pull`, `pullAll`, `pullAllBy`,
* `pullAllWith`, `pullAt`, `push`, `range`, `rangeRight`, `rearg`, `reject`,
* `remove`, `rest`, `reverse`, `sampleSize`, `set`, `setWith`, `shuffle`,
* `slice`, `sort`, `sortBy`, `splice`, `spread`, `tail`, `take`, `takeRight`,
* `takeRightWhile`, `takeWhile`, `tap`, `throttle`, `thru`, `toArray`,
* `toPairs`, `toPairsIn`, `toPath`, `toPlainObject`, `transform`, `unary`,
* `union`, `unionBy`, `unionWith`, `uniq`, `uniqBy`, `uniqWith`, `unset`,
* `unshift`, `unzip`, `unzipWith`, `update`, `updateWith`, `values`,
* `valuesIn`, `without`, `wrap`, `xor`, `xorBy`, `xorWith`, `zip`,
* `zipObject`, `zipObjectDeep`, and `zipWith`
*
* The wrapper methods that are **not** chainable by default are:
* `add`, `attempt`, `camelCase`, `capitalize`, `ceil`, `clamp`, `clone`,
* `cloneDeep`, `cloneDeepWith`, `cloneWith`, `conformsTo`, `deburr`,
* `defaultTo`, `divide`, `each`, `eachRight`, `endsWith`, `eq`, `escape`,
* `escapeRegExp`, `every`, `find`, `findIndex`, `findKey`, `findLast`,
* `findLastIndex`, `findLastKey`, `first`, `floor`, `forEach`, `forEachRight`,
* `forIn`, `forInRight`, `forOwn`, `forOwnRight`, `get`, `gt`, `gte`, `has`,
* `hasIn`, `head`, `identity`, `includes`, `indexOf`, `inRange`, `invoke`,
* `isArguments`, `isArray`, `isArrayBuffer`, `isArrayLike`, `isArrayLikeObject`,
* `isBoolean`, `isBuffer`, `isDate`, `isElement`, `isEmpty`, `isEqual`,
* `isEqualWith`, `isError`, `isFinite`, `isFunction`, `isInteger`, `isLength`,
* `isMap`, `isMatch`, `isMatchWith`, `isNaN`, `isNative`, `isNil`, `isNull`,
* `isNumber`, `isObject`, `isObjectLike`, `isPlainObject`, `isRegExp`,
* `isSafeInteger`, `isSet`, `isString`, `isUndefined`, `isTypedArray`,
* `isWeakMap`, `isWeakSet`, `join`, `kebabCase`, `last`, `lastIndexOf`,
* `lowerCase`, `lowerFirst`, `lt`, `lte`, `max`, `maxBy`, `mean`, `meanBy`,
* `min`, `minBy`, `multiply`, `noConflict`, `noop`, `now`, `nth`, `pad`,
* `padEnd`, `padStart`, `parseInt`, `pop`, `random`, `reduce`, `reduceRight`,
* `repeat`, `result`, `round`, `runInContext`, `sample`, `shift`, `size`,
* `snakeCase`, `some`, `sortedIndex`, `sortedIndexBy`, `sortedLastIndex`,
* `sortedLastIndexBy`, `startCase`, `startsWith`, `stubArray`, `stubFalse`,
* `stubObject`, `stubString`, `stubTrue`, `subtract`, `sum`, `sumBy`,
* `template`, `times`, `toFinite`, `toInteger`, `toJSON`, `toLength`,
* `toLower`, `toNumber`, `toSafeInteger`, `toString`, `toUpper`, `trim`,
* `trimEnd`, `trimStart`, `truncate`, `unescape`, `uniqueId`, `upperCase`,
* `upperFirst`, `value`, and `words`
*
* @name _
* @constructor
* @category Seq
* @param {*} value The value to wrap in a `lodash` instance.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* function square(n) {
* return n * n;
* }
*
* var wrapped = _([1, 2, 3]);
*
* // Returns an unwrapped value.
* wrapped.reduce(_.add);
* // => 6
*
* // Returns a wrapped value.
* var squares = wrapped.map(square);
*
* _.isArray(squares);
* // => false
*
* _.isArray(squares.value());
* // => true
*/
function lodash(value) {
if (isObjectLike(value) && !isArray(value) && !(value instanceof LazyWrapper)) {
if (value instanceof LodashWrapper) {
return value;
}
if (hasOwnProperty.call(value, '__wrapped__')) {
return wrapperClone(value);
}
}
return new LodashWrapper(value);
}
/**
* The base implementation of `_.create` without support for assigning
* properties to the created object.
*
* @private
* @param {Object} proto The object to inherit from.
* @returns {Object} Returns the new object.
*/
var baseCreate = (function() {
function object() {}
return function(proto) {
if (!isObject(proto)) {
return {};
}
if (objectCreate) {
return objectCreate(proto);
}
object.prototype = proto;
var result = new object;
object.prototype = undefined;
return result;
};
}());
/**
* The function whose prototype chain sequence wrappers inherit from.
*
* @private
*/
function baseLodash() {
// No operation performed.
}
/**
* The base constructor for creating `lodash` wrapper objects.
*
* @private
* @param {*} value The value to wrap.
* @param {boolean} [chainAll] Enable explicit method chain sequences.
*/
function LodashWrapper(value, chainAll) {
this.__wrapped__ = value;
this.__actions__ = [];
this.__chain__ = !!chainAll;
this.__index__ = 0;
this.__values__ = undefined;
}
/**
* By default, the template delimiters used by lodash are like those in
* embedded Ruby (ERB) as well as ES2015 template strings. Change the
* following template settings to use alternative delimiters.
*
* **Security:** See
* [threat model](https://github.com/lodash/lodash/blob/main/threat-model.md)
* — `_.template` is insecure and will be removed in v5.
*
* @static
* @memberOf _
* @type {Object}
*/
lodash.templateSettings = {
/**
* Used to detect `data` property values to be HTML-escaped.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'escape': reEscape,
/**
* Used to detect code to be evaluated.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'evaluate': reEvaluate,
/**
* Used to detect `data` property values to inject.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'interpolate': reInterpolate,
/**
* Used to reference the data object in the template text.
*
* @memberOf _.templateSettings
* @type {string}
*/
'variable': '',
/**
* Used to import variables into the compiled template.
*
* @memberOf _.templateSettings
* @type {Object}
*/
'imports': {
/**
* A reference to the `lodash` function.
*
* @memberOf _.templateSettings.imports
* @type {Function}
*/
'_': lodash
}
};
// Ensure wrappers are instances of `baseLodash`.
lodash.prototype = baseLodash.prototype;
lodash.prototype.constructor = lodash;
LodashWrapper.prototype = baseCreate(baseLodash.prototype);
LodashWrapper.prototype.constructor = LodashWrapper;
/*------------------------------------------------------------------------*/
/**
* Creates a lazy wrapper object which wraps `value` to enable lazy evaluation.
*
* @private
* @constructor
* @param {*} value The value to wrap.
*/
function LazyWrapper(value) {
this.__wrapped__ = value;
this.__actions__ = [];
this.__dir__ = 1;
this.__filtered__ = false;
this.__iteratees__ = [];
this.__takeCount__ = MAX_ARRAY_LENGTH;
this.__views__ = [];
}
/**
* Creates a clone of the lazy wrapper object.
*
* @private
* @name clone
* @memberOf LazyWrapper
* @returns {Object} Returns the cloned `LazyWrapper` object.
*/
function lazyClone() {
var result = new LazyWrapper(this.__wrapped__);
result.__actions__ = copyArray(this.__actions__);
result.__dir__ = this.__dir__;
result.__filtered__ = this.__filtered__;
result.__iteratees__ = copyArray(this.__iteratees__);
result.__takeCount__ = this.__takeCount__;
result.__views__ = copyArray(this.__views__);
return result;
}
/**
* Reverses the direction of lazy iteration.
*
* @private
* @name reverse
* @memberOf LazyWrapper
* @returns {Object} Returns the new reversed `LazyWrapper` object.
*/
function lazyReverse() {
if (this.__filtered__) {
var result = new LazyWrapper(this);
result.__dir__ = -1;
result.__filtered__ = true;
} else {
result = this.clone();
result.__dir__ *= -1;
}
return result;
}
/**
* Extracts the unwrapped value from its lazy wrapper.
*
* @private
* @name value
* @memberOf LazyWrapper
* @returns {*} Returns the unwrapped value.
*/
function lazyValue() {
var array = this.__wrapped__.value(),
dir = this.__dir__,
isArr = isArray(array),
isRight = dir < 0,
arrLength = isArr ? array.length : 0,
view = getView(0, arrLength, this.__views__),
start = view.start,
end = view.end,
length = end - start,
index = isRight ? end : (start - 1),
iteratees = this.__iteratees__,
iterLength = iteratees.length,
resIndex = 0,
takeCount = nativeMin(length, this.__takeCount__);
if (!isArr || (!isRight && arrLength == length && takeCount == length)) {
return baseWrapperValue(array, this.__actions__);
}
var result = [];
outer:
while (length-- && resIndex < takeCount) {
index += dir;
var iterIndex = -1,
value = array[index];
while (++iterIndex < iterLength) {
var data = iteratees[iterIndex],
iteratee = data.iteratee,
type = data.type,
computed = iteratee(value);
if (type == LAZY_MAP_FLAG) {
value = computed;
} else if (!computed) {
if (type == LAZY_FILTER_FLAG) {
continue outer;
} else {
break outer;
}
}
}
result[resIndex++] = value;
}
return result;
}
// Ensure `LazyWrapper` is an instance of `baseLodash`.
LazyWrapper.prototype = baseCreate(baseLodash.prototype);
LazyWrapper.prototype.constructor = LazyWrapper;
/*------------------------------------------------------------------------*/
/**
* Creates a hash object.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function Hash(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the hash.
*
* @private
* @name clear
* @memberOf Hash
*/
function hashClear() {
this.__data__ = nativeCreate ? nativeCreate(null) : {};
this.size = 0;
}
/**
* Removes `key` and its value from the hash.
*
* @private
* @name delete
* @memberOf Hash
* @param {Object} hash The hash to modify.
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function hashDelete(key) {
var result = this.has(key) && delete this.__data__[key];
this.size -= result ? 1 : 0;
return result;
}
/**
* Gets the hash value for `key`.
*
* @private
* @name get
* @memberOf Hash
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function hashGet(key) {
var data = this.__data__;
if (nativeCreate) {
var result = data[key];
return result === HASH_UNDEFINED ? undefined : result;
}
return hasOwnProperty.call(data, key) ? data[key] : undefined;
}
/**
* Checks if a hash value for `key` exists.
*
* @private
* @name has
* @memberOf Hash
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function hashHas(key) {
var data = this.__data__;
return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key);
}
/**
* Sets the hash `key` to `value`.
*
* @private
* @name set
* @memberOf Hash
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the hash instance.
*/
function hashSet(key, value) {
var data = this.__data__;
this.size += this.has(key) ? 0 : 1;
data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value;
return this;
}
// Add methods to `Hash`.
Hash.prototype.clear = hashClear;
Hash.prototype['delete'] = hashDelete;
Hash.prototype.get = hashGet;
Hash.prototype.has = hashHas;
Hash.prototype.set = hashSet;
/*------------------------------------------------------------------------*/
/**
* Creates an list cache object.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function ListCache(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the list cache.
*
* @private
* @name clear
* @memberOf ListCache
*/
function listCacheClear() {
this.__data__ = [];
this.size = 0;
}
/**
* Removes `key` and its value from the list cache.
*
* @private
* @name delete
* @memberOf ListCache
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function listCacheDelete(key) {
var data = this.__data__,
index = assocIndexOf(data, key);
if (index < 0) {
return false;
}
var lastIndex = data.length - 1;
if (index == lastIndex) {
data.pop();
} else {
splice.call(data, index, 1);
}
--this.size;
return true;
}
/**
* Gets the list cache value for `key`.
*
* @private
* @name get
* @memberOf ListCache
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function listCacheGet(key) {
var data = this.__data__,
index = assocIndexOf(data, key);
return index < 0 ? undefined : data[index][1];
}
/**
* Checks if a list cache value for `key` exists.
*
* @private
* @name has
* @memberOf ListCache
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function listCacheHas(key) {
return assocIndexOf(this.__data__, key) > -1;
}
/**
* Sets the list cache `key` to `value`.
*
* @private
* @name set
* @memberOf ListCache
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the list cache instance.
*/
function listCacheSet(key, value) {
var data = this.__data__,
index = assocIndexOf(data, key);
if (index < 0) {
++this.size;
data.push([key, value]);
} else {
data[index][1] = value;
}
return this;
}
// Add methods to `ListCache`.
ListCache.prototype.clear = listCacheClear;
ListCache.prototype['delete'] = listCacheDelete;
ListCache.prototype.get = listCacheGet;
ListCache.prototype.has = listCacheHas;
ListCache.prototype.set = listCacheSet;
/*------------------------------------------------------------------------*/
/**
* Creates a map cache object to store key-value pairs.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function MapCache(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the map.
*
* @private
* @name clear
* @memberOf MapCache
*/
function mapCacheClear() {
this.size = 0;
this.__data__ = {
'hash': new Hash,
'map': new (Map || ListCache),
'string': new Hash
};
}
/**
* Removes `key` and its value from the map.
*
* @private
* @name delete
* @memberOf MapCache
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function mapCacheDelete(key) {
var result = getMapData(this, key)['delete'](key);
this.size -= result ? 1 : 0;
return result;
}
/**
* Gets the map value for `key`.
*
* @private
* @name get
* @memberOf MapCache
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function mapCacheGet(key) {
return getMapData(this, key).get(key);
}
/**
* Checks if a map value for `key` exists.
*
* @private
* @name has
* @memberOf MapCache
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function mapCacheHas(key) {
return getMapData(this, key).has(key);
}
/**
* Sets the map `key` to `value`.
*
* @private
* @name set
* @memberOf MapCache
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the map cache instance.
*/
function mapCacheSet(key, value) {
var data = getMapData(this, key),
size = data.size;
data.set(key, value);
this.size += data.size == size ? 0 : 1;
return this;
}
// Add methods to `MapCache`.
MapCache.prototype.clear = mapCacheClear;
MapCache.prototype['delete'] = mapCacheDelete;
MapCache.prototype.get = mapCacheGet;
MapCache.prototype.has = mapCacheHas;
MapCache.prototype.set = mapCacheSet;
/*------------------------------------------------------------------------*/
/**
*
* Creates an array cache object to store unique values.
*
* @private
* @constructor
* @param {Array} [values] The values to cache.
*/
function SetCache(values) {
var index = -1,
length = values == null ? 0 : values.length;
this.__data__ = new MapCache;
while (++index < length) {
this.add(values[index]);
}
}
/**
* Adds `value` to the array cache.
*
* @private
* @name add
* @memberOf SetCache
* @alias push
* @param {*} value The value to cache.
* @returns {Object} Returns the cache instance.
*/
function setCacheAdd(value) {
this.__data__.set(value, HASH_UNDEFINED);
return this;
}
/**
* Checks if `value` is in the array cache.
*
* @private
* @name has
* @memberOf SetCache
* @param {*} value The value to search for.
* @returns {boolean} Returns `true` if `value` is found, else `false`.
*/
function setCacheHas(value) {
return this.__data__.has(value);
}
// Add methods to `SetCache`.
SetCache.prototype.add = SetCache.prototype.push = setCacheAdd;
SetCache.prototype.has = setCacheHas;
/*------------------------------------------------------------------------*/
/**
* Creates a stack cache object to store key-value pairs.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function Stack(entries) {
var data = this.__data__ = new ListCache(entries);
this.size = data.size;
}
/**
* Removes all key-value entries from the stack.
*
* @private
* @name clear
* @memberOf Stack
*/
function stackClear() {
this.__data__ = new ListCache;
this.size = 0;
}
/**
* Removes `key` and its value from the stack.
*
* @private
* @name delete
* @memberOf Stack
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function stackDelete(key) {
var data = this.__data__,
result = data['delete'](key);
this.size = data.size;
return result;
}
/**
* Gets the stack value for `key`.
*
* @private
* @name get
* @memberOf Stack
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function stackGet(key) {
return this.__data__.get(key);
}
/**
* Checks if a stack value for `key` exists.
*
* @private
* @name has
* @memberOf Stack
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function stackHas(key) {
return this.__data__.has(key);
}
/**
* Sets the stack `key` to `value`.
*
* @private
* @name set
* @memberOf Stack
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the stack cache instance.
*/
function stackSet(key, value) {
var data = this.__data__;
if (data instanceof ListCache) {
var pairs = data.__data__;
if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) {
pairs.push([key, value]);
this.size = ++data.size;
return this;
}
data = this.__data__ = new MapCache(pairs);
}
data.set(key, value);
this.size = data.size;
return this;
}
// Add methods to `Stack`.
Stack.prototype.clear = stackClear;
Stack.prototype['delete'] = stackDelete;
Stack.prototype.get = stackGet;
Stack.prototype.has = stackHas;
Stack.prototype.set = stackSet;
/*------------------------------------------------------------------------*/
/**
* Creates an array of the enumerable property names of the array-like `value`.
*
* @private
* @param {*} value The value to query.
* @param {boolean} inherited Specify returning inherited property names.
* @returns {Array} Returns the array of property names.
*/
function arrayLikeKeys(value, inherited) {
var isArr = isArray(value),
isArg = !isArr && isArguments(value),
isBuff = !isArr && !isArg && isBuffer(value),
isType = !isArr && !isArg && !isBuff && isTypedArray(value),
skipIndexes = isArr || isArg || isBuff || isType,
result = skipIndexes ? baseTimes(value.length, String) : [],
length = result.length;
for (var key in value) {
if ((inherited || hasOwnProperty.call(value, key)) &&
!(skipIndexes && (
// Safari 9 has enumerable `arguments.length` in strict mode.
key == 'length' ||
// Node.js 0.10 has enumerable non-index properties on buffers.
(isBuff && (key == 'offset' || key == 'parent')) ||
// PhantomJS 2 has enumerable non-index properties on typed arrays.
(isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) ||
// Skip index properties.
isIndex(key, length)
))) {
result.push(key);
}
}
return result;
}
/**
* A specialized version of `_.sample` for arrays.
*
* @private
* @param {Array} array The array to sample.
* @returns {*} Returns the random element.
*/
function arraySample(array) {
var length = array.length;
return length ? array[baseRandom(0, length - 1)] : undefined;
}
/**
* A specialized version of `_.sampleSize` for arrays.
*
* @private
* @param {Array} array The array to sample.
* @param {number} n The number of elements to sample.
* @returns {Array} Returns the random elements.
*/
function arraySampleSize(array, n) {
return shuffleSelf(copyArray(array), baseClamp(n, 0, array.length));
}
/**
* A specialized version of `_.shuffle` for arrays.
*
* @private
* @param {Array} array The array to shuffle.
* @returns {Array} Returns the new shuffled array.
*/
function arrayShuffle(array) {
return shuffleSelf(copyArray(array));
}
/**
* This function is like `assignValue` except that it doesn't assign
* `undefined` values.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function assignMergeValue(object, key, value) {
if ((value !== undefined && !eq(object[key], value)) ||
(value === undefined && !(key in object))) {
baseAssignValue(object, key, value);
}
}
/**
* Assigns `value` to `key` of `object` if the existing value is not equivalent
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function assignValue(object, key, value) {
var objValue = object[key];
if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) ||
(value === undefined && !(key in object))) {
baseAssignValue(object, key, value);
}
}
/**
* Gets the index at which the `key` is found in `array` of key-value pairs.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} key The key to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function assocIndexOf(array, key) {
var length = array.length;
while (length--) {
if (eq(array[length][0], key)) {
return length;
}
}
return -1;
}
/**
* Aggregates elements of `collection` on `accumulator` with keys transformed
* by `iteratee` and values set by `setter`.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform keys.
* @param {Object} accumulator The initial aggregated object.
* @returns {Function} Returns `accumulator`.
*/
function baseAggregator(collection, setter, iteratee, accumulator) {
baseEach(collection, function(value, key, collection) {
setter(accumulator, value, iteratee(value), collection);
});
return accumulator;
}
/**
* The base implementation of `_.assign` without support for multiple sources
* or `customizer` functions.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @returns {Object} Returns `object`.
*/
function baseAssign(object, source) {
return object && copyObject(source, keys(source), object);
}
/**
* The base implementation of `_.assignIn` without support for multiple sources
* or `customizer` functions.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @returns {Object} Returns `object`.
*/
function baseAssignIn(object, source) {
return object && copyObject(source, keysIn(source), object);
}
/**
* The base implementation of `assignValue` and `assignMergeValue` without
* value checks.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function baseAssignValue(object, key, value) {
if (key == '__proto__' && defineProperty) {
defineProperty(object, key, {
'configurable': true,
'enumerable': true,
'value': value,
'writable': true
});
} else {
object[key] = value;
}
}
/**
* The base implementation of `_.at` without support for individual paths.
*
* @private
* @param {Object} object The object to iterate over.
* @param {string[]} paths The property paths to pick.
* @returns {Array} Returns the picked elements.
*/
function baseAt(object, paths) {
var index = -1,
length = paths.length,
result = Array(length),
skip = object == null;
while (++index < length) {
result[index] = skip ? undefined : get(object, paths[index]);
}
return result;
}
/**
* The base implementation of `_.clamp` which doesn't coerce arguments.
*
* @private
* @param {number} number The number to clamp.
* @param {number} [lower] The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the clamped number.
*/
function baseClamp(number, lower, upper) {
if (number === number) {
if (upper !== undefined) {
number = number <= upper ? number : upper;
}
if (lower !== undefined) {
number = number >= lower ? number : lower;
}
}
return number;
}
/**
* The base implementation of `_.clone` and `_.cloneDeep` which tracks
* traversed objects.
*
* @private
* @param {*} value The value to clone.
* @param {boolean} bitmask The bitmask flags.
* 1 - Deep clone
* 2 - Flatten inherited properties
* 4 - Clone symbols
* @param {Function} [customizer] The function to customize cloning.
* @param {string} [key] The key of `value`.
* @param {Object} [object] The parent object of `value`.
* @param {Object} [stack] Tracks traversed objects and their clone counterparts.
* @returns {*} Returns the cloned value.
*/
function baseClone(value, bitmask, customizer, key, object, stack) {
var result,
isDeep = bitmask & CLONE_DEEP_FLAG,
isFlat = bitmask & CLONE_FLAT_FLAG,
isFull = bitmask & CLONE_SYMBOLS_FLAG;
if (customizer) {
result = object ? customizer(value, key, object, stack) : customizer(value);
}
if (result !== undefined) {
return result;
}
if (!isObject(value)) {
return value;
}
var isArr = isArray(value);
if (isArr) {
result = initCloneArray(value);
if (!isDeep) {
return copyArray(value, result);
}
} else {
var tag = getTag(value),
isFunc = tag == funcTag || tag == genTag;
if (isBuffer(value)) {
return cloneBuffer(value, isDeep);
}
if (tag == objectTag || tag == argsTag || (isFunc && !object)) {
result = (isFlat || isFunc) ? {} : initCloneObject(value);
if (!isDeep) {
return isFlat
? copySymbolsIn(value, baseAssignIn(result, value))
: copySymbols(value, baseAssign(result, value));
}
} else {
if (!cloneableTags[tag]) {
return object ? value : {};
}
result = initCloneByTag(value, tag, isDeep);
}
}
// Check for circular references and return its corresponding clone.
stack || (stack = new Stack);
var stacked = stack.get(value);
if (stacked) {
return stacked;
}
stack.set(value, result);
if (isSet(value)) {
value.forEach(function(subValue) {
result.add(baseClone(subValue, bitmask, customizer, subValue, value, stack));
});
} else if (isMap(value)) {
value.forEach(function(subValue, key) {
result.set(key, baseClone(subValue, bitmask, customizer, key, value, stack));
});
}
var keysFunc = isFull
? (isFlat ? getAllKeysIn : getAllKeys)
: (isFlat ? keysIn : keys);
var props = isArr ? undefined : keysFunc(value);
arrayEach(props || value, function(subValue, key) {
if (props) {
key = subValue;
subValue = value[key];
}
// Recursively populate clone (susceptible to call stack limits).
assignValue(result, key, baseClone(subValue, bitmask, customizer, key, value, stack));
});
return result;
}
/**
* The base implementation of `_.conforms` which doesn't clone `source`.
*
* @private
* @param {Object} source The object of property predicates to conform to.
* @returns {Function} Returns the new spec function.
*/
function baseConforms(source) {
var props = keys(source);
return function(object) {
return baseConformsTo(object, source, props);
};
}
/**
* The base implementation of `_.conformsTo` which accepts `props` to check.
*
* @private
* @param {Object} object The object to inspect.
* @param {Object} source The object of property predicates to conform to.
* @returns {boolean} Returns `true` if `object` conforms, else `false`.
*/
function baseConformsTo(object, source, props) {
var length = props.length;
if (object == null) {
return !length;
}
object = Object(object);
while (length--) {
var key = props[length],
predicate = source[key],
value = object[key];
if ((value === undefined && !(key in object)) || !predicate(value)) {
return false;
}
}
return true;
}
/**
* The base implementation of `_.delay` and `_.defer` which accepts `args`
* to provide to `func`.
*
* @private
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @param {Array} args The arguments to provide to `func`.
* @returns {number|Object} Returns the timer id or timeout object.
*/
function baseDelay(func, wait, args) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return setTimeout(function() { func.apply(undefined, args); }, wait);
}
/**
* The base implementation of methods like `_.difference` without support
* for excluding multiple arrays or iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Array} values The values to exclude.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
*/
function baseDifference(array, values, iteratee, comparator) {
var index = -1,
includes = arrayIncludes,
isCommon = true,
length = array.length,
result = [],
valuesLength = values.length;
if (!length) {
return result;
}
if (iteratee) {
values = arrayMap(values, baseUnary(iteratee));
}
if (comparator) {
includes = arrayIncludesWith;
isCommon = false;
}
else if (values.length >= LARGE_ARRAY_SIZE) {
includes = cacheHas;
isCommon = false;
values = new SetCache(values);
}
outer:
while (++index < length) {
var value = array[index],
computed = iteratee == null ? value : iteratee(value);
value = (comparator || value !== 0) ? value : 0;
if (isCommon && computed === computed) {
var valuesIndex = valuesLength;
while (valuesIndex--) {
if (values[valuesIndex] === computed) {
continue outer;
}
}
result.push(value);
}
else if (!includes(values, computed, comparator)) {
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.forEach` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
*/
var baseEach = createBaseEach(baseForOwn);
/**
* The base implementation of `_.forEachRight` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
*/
var baseEachRight = createBaseEach(baseForOwnRight, true);
/**
* The base implementation of `_.every` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`
*/
function baseEvery(collection, predicate) {
var result = true;
baseEach(collection, function(value, index, collection) {
result = !!predicate(value, index, collection);
return result;
});
return result;
}
/**
* The base implementation of methods like `_.max` and `_.min` which accepts a
* `comparator` to determine the extremum value.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The iteratee invoked per iteration.
* @param {Function} comparator The comparator used to compare values.
* @returns {*} Returns the extremum value.
*/
function baseExtremum(array, iteratee, comparator) {
var index = -1,
length = array.length;
while (++index < length) {
var value = array[index],
current = iteratee(value);
if (current != null && (computed === undefined
? (current === current && !isSymbol(current))
: comparator(current, computed)
)) {
var computed = current,
result = value;
}
}
return result;
}
/**
* The base implementation of `_.fill` without an iteratee call guard.
*
* @private
* @param {Array} array The array to fill.
* @param {*} value The value to fill `array` with.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns `array`.
*/
function baseFill(array, value, start, end) {
var length = array.length;
start = toInteger(start);
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = (end === undefined || end > length) ? length : toInteger(end);
if (end < 0) {
end += length;
}
end = start > end ? 0 : toLength(end);
while (start < end) {
array[start++] = value;
}
return array;
}
/**
* The base implementation of `_.filter` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
*/
function baseFilter(collection, predicate) {
var result = [];
baseEach(collection, function(value, index, collection) {
if (predicate(value, index, collection)) {
result.push(value);
}
});
return result;
}
/**
* The base implementation of `_.flatten` with support for restricting flattening.
*
* @private
* @param {Array} array The array to flatten.
* @param {number} depth The maximum recursion depth.
* @param {boolean} [predicate=isFlattenable] The function invoked per iteration.
* @param {boolean} [isStrict] Restrict to values that pass `predicate` checks.
* @param {Array} [result=[]] The initial result value.
* @returns {Array} Returns the new flattened array.
*/
function baseFlatten(array, depth, predicate, isStrict, result) {
var index = -1,
length = array.length;
predicate || (predicate = isFlattenable);
result || (result = []);
while (++index < length) {
var value = array[index];
if (depth > 0 && predicate(value)) {
if (depth > 1) {
// Recursively flatten arrays (susceptible to call stack limits).
baseFlatten(value, depth - 1, predicate, isStrict, result);
} else {
arrayPush(result, value);
}
} else if (!isStrict) {
result[result.length] = value;
}
}
return result;
}
/**
* The base implementation of `baseForOwn` which iterates over `object`
* properties returned by `keysFunc` and invokes `iteratee` for each property.
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {Function} keysFunc The function to get the keys of `object`.
* @returns {Object} Returns `object`.
*/
var baseFor = createBaseFor();
/**
* This function is like `baseFor` except that it iterates over properties
* in the opposite order.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {Function} keysFunc The function to get the keys of `object`.
* @returns {Object} Returns `object`.
*/
var baseForRight = createBaseFor(true);
/**
* The base implementation of `_.forOwn` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Object} Returns `object`.
*/
function baseForOwn(object, iteratee) {
return object && baseFor(object, iteratee, keys);
}
/**
* The base implementation of `_.forOwnRight` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Object} Returns `object`.
*/
function baseForOwnRight(object, iteratee) {
return object && baseForRight(object, iteratee, keys);
}
/**
* The base implementation of `_.functions` which creates an array of
* `object` function property names filtered from `props`.
*
* @private
* @param {Object} object The object to inspect.
* @param {Array} props The property names to filter.
* @returns {Array} Returns the function names.
*/
function baseFunctions(object, props) {
return arrayFilter(props, function(key) {
return isFunction(object[key]);
});
}
/**
* The base implementation of `_.get` without support for default values.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to get.
* @returns {*} Returns the resolved value.
*/
function baseGet(object, path) {
path = castPath(path, object);
var index = 0,
length = path.length;
while (object != null && index < length) {
object = object[toKey(path[index++])];
}
return (index && index == length) ? object : undefined;
}
/**
* The base implementation of `getAllKeys` and `getAllKeysIn` which uses
* `keysFunc` and `symbolsFunc` to get the enumerable property names and
* symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Function} keysFunc The function to get the keys of `object`.
* @param {Function} symbolsFunc The function to get the symbols of `object`.
* @returns {Array} Returns the array of property names and symbols.
*/
function baseGetAllKeys(object, keysFunc, symbolsFunc) {
var result = keysFunc(object);
return isArray(object) ? result : arrayPush(result, symbolsFunc(object));
}
/**
* The base implementation of `getTag` without fallbacks for buggy environments.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
function baseGetTag(value) {
if (value == null) {
return value === undefined ? undefinedTag : nullTag;
}
return (symToStringTag && symToStringTag in Object(value))
? getRawTag(value)
: objectToString(value);
}
/**
* The base implementation of `_.gt` which doesn't coerce arguments.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than `other`,
* else `false`.
*/
function baseGt(value, other) {
return value > other;
}
/**
* The base implementation of `_.has` without support for deep paths.
*
* @private
* @param {Object} [object] The object to query.
* @param {Array|string} key The key to check.
* @returns {boolean} Returns `true` if `key` exists, else `false`.
*/
function baseHas(object, key) {
return object != null && hasOwnProperty.call(object, key);
}
/**
* The base implementation of `_.hasIn` without support for deep paths.
*
* @private
* @param {Object} [object] The object to query.
* @param {Array|string} key The key to check.
* @returns {boolean} Returns `true` if `key` exists, else `false`.
*/
function baseHasIn(object, key) {
return object != null && key in Object(object);
}
/**
* The base implementation of `_.inRange` which doesn't coerce arguments.
*
* @private
* @param {number} number The number to check.
* @param {number} start The start of the range.
* @param {number} end The end of the range.
* @returns {boolean} Returns `true` if `number` is in the range, else `false`.
*/
function baseInRange(number, start, end) {
return number >= nativeMin(start, end) && number < nativeMax(start, end);
}
/**
* The base implementation of methods like `_.intersection`, without support
* for iteratee shorthands, that accepts an array of arrays to inspect.
*
* @private
* @param {Array} arrays The arrays to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of shared values.
*/
function baseIntersection(arrays, iteratee, comparator) {
var includes = comparator ? arrayIncludesWith : arrayIncludes,
length = arrays[0].length,
othLength = arrays.length,
othIndex = othLength,
caches = Array(othLength),
maxLength = Infinity,
result = [];
while (othIndex--) {
var array = arrays[othIndex];
if (othIndex && iteratee) {
array = arrayMap(array, baseUnary(iteratee));
}
maxLength = nativeMin(array.length, maxLength);
caches[othIndex] = !comparator && (iteratee || (length >= 120 && array.length >= 120))
? new SetCache(othIndex && array)
: undefined;
}
array = arrays[0];
var index = -1,
seen = caches[0];
outer:
while (++index < length && result.length < maxLength) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
value = (comparator || value !== 0) ? value : 0;
if (!(seen
? cacheHas(seen, computed)
: includes(result, computed, comparator)
)) {
othIndex = othLength;
while (--othIndex) {
var cache = caches[othIndex];
if (!(cache
? cacheHas(cache, computed)
: includes(arrays[othIndex], computed, comparator))
) {
continue outer;
}
}
if (seen) {
seen.push(computed);
}
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.invert` and `_.invertBy` which inverts
* `object` with values transformed by `iteratee` and set by `setter`.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform values.
* @param {Object} accumulator The initial inverted object.
* @returns {Function} Returns `accumulator`.
*/
function baseInverter(object, setter, iteratee, accumulator) {
baseForOwn(object, function(value, key, object) {
setter(accumulator, iteratee(value), key, object);
});
return accumulator;
}
/**
* The base implementation of `_.invoke` without support for individual
* method arguments.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path of the method to invoke.
* @param {Array} args The arguments to invoke the method with.
* @returns {*} Returns the result of the invoked method.
*/
function baseInvoke(object, path, args) {
path = castPath(path, object);
object = parent(object, path);
var func = object == null ? object : object[toKey(last(path))];
return func == null ? undefined : apply(func, object, args);
}
/**
* The base implementation of `_.isArguments`.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
*/
function baseIsArguments(value) {
return isObjectLike(value) && baseGetTag(value) == argsTag;
}
/**
* The base implementation of `_.isArrayBuffer` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array buffer, else `false`.
*/
function baseIsArrayBuffer(value) {
return isObjectLike(value) && baseGetTag(value) == arrayBufferTag;
}
/**
* The base implementation of `_.isDate` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a date object, else `false`.
*/
function baseIsDate(value) {
return isObjectLike(value) && baseGetTag(value) == dateTag;
}
/**
* The base implementation of `_.isEqual` which supports partial comparisons
* and tracks traversed objects.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @param {boolean} bitmask The bitmask flags.
* 1 - Unordered comparison
* 2 - Partial comparison
* @param {Function} [customizer] The function to customize comparisons.
* @param {Object} [stack] Tracks traversed `value` and `other` objects.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
*/
function baseIsEqual(value, other, bitmask, customizer, stack) {
if (value === other) {
return true;
}
if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) {
return value !== value && other !== other;
}
return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack);
}
/**
* A specialized version of `baseIsEqual` for arrays and objects which performs
* deep comparisons and tracks traversed objects enabling objects with circular
* references to be compared.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} [stack] Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) {
var objIsArr = isArray(object),
othIsArr = isArray(other),
objTag = objIsArr ? arrayTag : getTag(object),
othTag = othIsArr ? arrayTag : getTag(other);
objTag = objTag == argsTag ? objectTag : objTag;
othTag = othTag == argsTag ? objectTag : othTag;
var objIsObj = objTag == objectTag,
othIsObj = othTag == objectTag,
isSameTag = objTag == othTag;
if (isSameTag && isBuffer(object)) {
if (!isBuffer(other)) {
return false;
}
objIsArr = true;
objIsObj = false;
}
if (isSameTag && !objIsObj) {
stack || (stack = new Stack);
return (objIsArr || isTypedArray(object))
? equalArrays(object, other, bitmask, customizer, equalFunc, stack)
: equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack);
}
if (!(bitmask & COMPARE_PARTIAL_FLAG)) {
var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'),
othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__');
if (objIsWrapped || othIsWrapped) {
var objUnwrapped = objIsWrapped ? object.value() : object,
othUnwrapped = othIsWrapped ? other.value() : other;
stack || (stack = new Stack);
return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack);
}
}
if (!isSameTag) {
return false;
}
stack || (stack = new Stack);
return equalObjects(object, other, bitmask, customizer, equalFunc, stack);
}
/**
* The base implementation of `_.isMap` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a map, else `false`.
*/
function baseIsMap(value) {
return isObjectLike(value) && getTag(value) == mapTag;
}
/**
* The base implementation of `_.isMatch` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @param {Array} matchData The property names, values, and compare flags to match.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
*/
function baseIsMatch(object, source, matchData, customizer) {
var index = matchData.length,
length = index,
noCustomizer = !customizer;
if (object == null) {
return !length;
}
object = Object(object);
while (index--) {
var data = matchData[index];
if ((noCustomizer && data[2])
? data[1] !== object[data[0]]
: !(data[0] in object)
) {
return false;
}
}
while (++index < length) {
data = matchData[index];
var key = data[0],
objValue = object[key],
srcValue = data[1];
if (noCustomizer && data[2]) {
if (objValue === undefined && !(key in object)) {
return false;
}
} else {
var stack = new Stack;
if (customizer) {
var result = customizer(objValue, srcValue, key, object, source, stack);
}
if (!(result === undefined
? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack)
: result
)) {
return false;
}
}
}
return true;
}
/**
* The base implementation of `_.isNative` without bad shim checks.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a native function,
* else `false`.
*/
function baseIsNative(value) {
if (!isObject(value) || isMasked(value)) {
return false;
}
var pattern = isFunction(value) ? reIsNative : reIsHostCtor;
return pattern.test(toSource(value));
}
/**
* The base implementation of `_.isRegExp` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a regexp, else `false`.
*/
function baseIsRegExp(value) {
return isObjectLike(value) && baseGetTag(value) == regexpTag;
}
/**
* The base implementation of `_.isSet` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a set, else `false`.
*/
function baseIsSet(value) {
return isObjectLike(value) && getTag(value) == setTag;
}
/**
* The base implementation of `_.isTypedArray` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
*/
function baseIsTypedArray(value) {
return isObjectLike(value) &&
isLength(value.length) && !!typedArrayTags[baseGetTag(value)];
}
/**
* The base implementation of `_.iteratee`.
*
* @private
* @param {*} [value=_.identity] The value to convert to an iteratee.
* @returns {Function} Returns the iteratee.
*/
function baseIteratee(value) {
// Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9.
// See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details.
if (typeof value == 'function') {
return value;
}
if (value == null) {
return identity;
}
if (typeof value == 'object') {
return isArray(value)
? baseMatchesProperty(value[0], value[1])
: baseMatches(value);
}
return property(value);
}
/**
* The base implementation of `_.keys` which doesn't treat sparse arrays as dense.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function baseKeys(object) {
if (!isPrototype(object)) {
return nativeKeys(object);
}
var result = [];
for (var key in Object(object)) {
if (hasOwnProperty.call(object, key) && key != 'constructor') {
result.push(key);
}
}
return result;
}
/**
* The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function baseKeysIn(object) {
if (!isObject(object)) {
return nativeKeysIn(object);
}
var isProto = isPrototype(object),
result = [];
for (var key in object) {
if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) {
result.push(key);
}
}
return result;
}
/**
* The base implementation of `_.lt` which doesn't coerce arguments.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than `other`,
* else `false`.
*/
function baseLt(value, other) {
return value < other;
}
/**
* The base implementation of `_.map` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
*/
function baseMap(collection, iteratee) {
var index = -1,
result = isArrayLike(collection) ? Array(collection.length) : [];
baseEach(collection, function(value, key, collection) {
result[++index] = iteratee(value, key, collection);
});
return result;
}
/**
* The base implementation of `_.matches` which doesn't clone `source`.
*
* @private
* @param {Object} source The object of property values to match.
* @returns {Function} Returns the new spec function.
*/
function baseMatches(source) {
var matchData = getMatchData(source);
if (matchData.length == 1 && matchData[0][2]) {
return matchesStrictComparable(matchData[0][0], matchData[0][1]);
}
return function(object) {
return object === source || baseIsMatch(object, source, matchData);
};
}
/**
* The base implementation of `_.matchesProperty` which doesn't clone `srcValue`.
*
* @private
* @param {string} path The path of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
*/
function baseMatchesProperty(path, srcValue) {
if (isKey(path) && isStrictComparable(srcValue)) {
return matchesStrictComparable(toKey(path), srcValue);
}
return function(object) {
var objValue = get(object, path);
return (objValue === undefined && objValue === srcValue)
? hasIn(object, path)
: baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG);
};
}
/**
* The base implementation of `_.merge` without support for multiple sources.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @param {number} srcIndex The index of `source`.
* @param {Function} [customizer] The function to customize merged values.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
*/
function baseMerge(object, source, srcIndex, customizer, stack) {
if (object === source) {
return;
}
baseFor(source, function(srcValue, key) {
stack || (stack = new Stack);
if (isObject(srcValue)) {
baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack);
}
else {
var newValue = customizer
? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack)
: undefined;
if (newValue === undefined) {
newValue = srcValue;
}
assignMergeValue(object, key, newValue);
}
}, keysIn);
}
/**
* A specialized version of `baseMerge` for arrays and objects which performs
* deep merges and tracks traversed objects enabling objects with circular
* references to be merged.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @param {string} key The key of the value to merge.
* @param {number} srcIndex The index of `source`.
* @param {Function} mergeFunc The function to merge values.
* @param {Function} [customizer] The function to customize assigned values.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
*/
function baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) {
var objValue = safeGet(object, key),
srcValue = safeGet(source, key),
stacked = stack.get(srcValue);
if (stacked) {
assignMergeValue(object, key, stacked);
return;
}
var newValue = customizer
? customizer(objValue, srcValue, (key + ''), object, source, stack)
: undefined;
var isCommon = newValue === undefined;
if (isCommon) {
var isArr = isArray(srcValue),
isBuff = !isArr && isBuffer(srcValue),
isTyped = !isArr && !isBuff && isTypedArray(srcValue);
newValue = srcValue;
if (isArr || isBuff || isTyped) {
if (isArray(objValue)) {
newValue = objValue;
}
else if (isArrayLikeObject(objValue)) {
newValue = copyArray(objValue);
}
else if (isBuff) {
isCommon = false;
newValue = cloneBuffer(srcValue, true);
}
else if (isTyped) {
isCommon = false;
newValue = cloneTypedArray(srcValue, true);
}
else {
newValue = [];
}
}
else if (isPlainObject(srcValue) || isArguments(srcValue)) {
newValue = objValue;
if (isArguments(objValue)) {
newValue = toPlainObject(objValue);
}
else if (!isObject(objValue) || isFunction(objValue)) {
newValue = initCloneObject(srcValue);
}
}
else {
isCommon = false;
}
}
if (isCommon) {
// Recursively merge objects and arrays (susceptible to call stack limits).
stack.set(srcValue, newValue);
mergeFunc(newValue, srcValue, srcIndex, customizer, stack);
stack['delete'](srcValue);
}
assignMergeValue(object, key, newValue);
}
/**
* The base implementation of `_.nth` which doesn't coerce arguments.
*
* @private
* @param {Array} array The array to query.
* @param {number} n The index of the element to return.
* @returns {*} Returns the nth element of `array`.
*/
function baseNth(array, n) {
var length = array.length;
if (!length) {
return;
}
n += n < 0 ? length : 0;
return isIndex(n, length) ? array[n] : undefined;
}
/**
* The base implementation of `_.orderBy` without param guards.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function[]|Object[]|string[]} iteratees The iteratees to sort by.
* @param {string[]} orders The sort orders of `iteratees`.
* @returns {Array} Returns the new sorted array.
*/
function baseOrderBy(collection, iteratees, orders) {
if (iteratees.length) {
iteratees = arrayMap(iteratees, function(iteratee) {
if (isArray(iteratee)) {
return function(value) {
return baseGet(value, iteratee.length === 1 ? iteratee[0] : iteratee);
};
}
return iteratee;
});
} else {
iteratees = [identity];
}
var index = -1;
iteratees = arrayMap(iteratees, baseUnary(getIteratee()));
var result = baseMap(collection, function(value, key, collection) {
var criteria = arrayMap(iteratees, function(iteratee) {
return iteratee(value);
});
return { 'criteria': criteria, 'index': ++index, 'value': value };
});
return baseSortBy(result, function(object, other) {
return compareMultiple(object, other, orders);
});
}
/**
* The base implementation of `_.pick` without support for individual
* property identifiers.
*
* @private
* @param {Object} object The source object.
* @param {string[]} paths The property paths to pick.
* @returns {Object} Returns the new object.
*/
function basePick(object, paths) {
return basePickBy(object, paths, function(value, path) {
return hasIn(object, path);
});
}
/**
* The base implementation of `_.pickBy` without support for iteratee shorthands.
*
* @private
* @param {Object} object The source object.
* @param {string[]} paths The property paths to pick.
* @param {Function} predicate The function invoked per property.
* @returns {Object} Returns the new object.
*/
function basePickBy(object, paths, predicate) {
var index = -1,
length = paths.length,
result = {};
while (++index < length) {
var path = paths[index],
value = baseGet(object, path);
if (predicate(value, path)) {
baseSet(result, castPath(path, object), value);
}
}
return result;
}
/**
* A specialized version of `baseProperty` which supports deep paths.
*
* @private
* @param {Array|string} path The path of the property to get.
* @returns {Function} Returns the new accessor function.
*/
function basePropertyDeep(path) {
return function(object) {
return baseGet(object, path);
};
}
/**
* The base implementation of `_.pullAllBy` without support for iteratee
* shorthands.
*
* @private
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns `array`.
*/
function basePullAll(array, values, iteratee, comparator) {
var indexOf = comparator ? baseIndexOfWith : baseIndexOf,
index = -1,
length = values.length,
seen = array;
if (array === values) {
values = copyArray(values);
}
if (iteratee) {
seen = arrayMap(array, baseUnary(iteratee));
}
while (++index < length) {
var fromIndex = 0,
value = values[index],
computed = iteratee ? iteratee(value) : value;
while ((fromIndex = indexOf(seen, computed, fromIndex, comparator)) > -1) {
if (seen !== array) {
splice.call(seen, fromIndex, 1);
}
splice.call(array, fromIndex, 1);
}
}
return array;
}
/**
* The base implementation of `_.pullAt` without support for individual
* indexes or capturing the removed elements.
*
* @private
* @param {Array} array The array to modify.
* @param {number[]} indexes The indexes of elements to remove.
* @returns {Array} Returns `array`.
*/
function basePullAt(array, indexes) {
var length = array ? indexes.length : 0,
lastIndex = length - 1;
while (length--) {
var index = indexes[length];
if (length == lastIndex || index !== previous) {
var previous = index;
if (isIndex(index)) {
splice.call(array, index, 1);
} else {
baseUnset(array, index);
}
}
}
return array;
}
/**
* The base implementation of `_.random` without support for returning
* floating-point numbers.
*
* @private
* @param {number} lower The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the random number.
*/
function baseRandom(lower, upper) {
return lower + nativeFloor(nativeRandom() * (upper - lower + 1));
}
/**
* The base implementation of `_.range` and `_.rangeRight` which doesn't
* coerce arguments.
*
* @private
* @param {number} start The start of the range.
* @param {number} end The end of the range.
* @param {number} step The value to increment or decrement by.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Array} Returns the range of numbers.
*/
function baseRange(start, end, step, fromRight) {
var index = -1,
length = nativeMax(nativeCeil((end - start) / (step || 1)), 0),
result = Array(length);
while (length--) {
result[fromRight ? length : ++index] = start;
start += step;
}
return result;
}
/**
* The base implementation of `_.repeat` which doesn't coerce arguments.
*
* @private
* @param {string} string The string to repeat.
* @param {number} n The number of times to repeat the string.
* @returns {string} Returns the repeated string.
*/
function baseRepeat(string, n) {
var result = '';
if (!string || n < 1 || n > MAX_SAFE_INTEGER) {
return result;
}
// Leverage the exponentiation by squaring algorithm for a faster repeat.
// See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details.
do {
if (n % 2) {
result += string;
}
n = nativeFloor(n / 2);
if (n) {
string += string;
}
} while (n);
return result;
}
/**
* The base implementation of `_.rest` which doesn't validate or coerce arguments.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @returns {Function} Returns the new function.
*/
function baseRest(func, start) {
return setToString(overRest(func, start, identity), func + '');
}
/**
* The base implementation of `_.sample`.
*
* @private
* @param {Array|Object} collection The collection to sample.
* @returns {*} Returns the random element.
*/
function baseSample(collection) {
return arraySample(values(collection));
}
/**
* The base implementation of `_.sampleSize` without param guards.
*
* @private
* @param {Array|Object} collection The collection to sample.
* @param {number} n The number of elements to sample.
* @returns {Array} Returns the random elements.
*/
function baseSampleSize(collection, n) {
var array = values(collection);
return shuffleSelf(array, baseClamp(n, 0, array.length));
}
/**
* The base implementation of `_.set`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @param {Function} [customizer] The function to customize path creation.
* @returns {Object} Returns `object`.
*/
function baseSet(object, path, value, customizer) {
if (!isObject(object)) {
return object;
}
path = castPath(path, object);
var index = -1,
length = path.length,
lastIndex = length - 1,
nested = object;
while (nested != null && ++index < length) {
var key = toKey(path[index]),
newValue = value;
if (key === '__proto__' || key === 'constructor' || key === 'prototype') {
return object;
}
if (index != lastIndex) {
var objValue = nested[key];
newValue = customizer ? customizer(objValue, key, nested) : undefined;
if (newValue === undefined) {
newValue = isObject(objValue)
? objValue
: (isIndex(path[index + 1]) ? [] : {});
}
}
assignValue(nested, key, newValue);
nested = nested[key];
}
return object;
}
/**
* The base implementation of `setData` without support for hot loop shorting.
*
* @private
* @param {Function} func The function to associate metadata with.
* @param {*} data The metadata.
* @returns {Function} Returns `func`.
*/
var baseSetData = !metaMap ? identity : function(func, data) {
metaMap.set(func, data);
return func;
};
/**
* The base implementation of `setToString` without support for hot loop shorting.
*
* @private
* @param {Function} func The function to modify.
* @param {Function} string The `toString` result.
* @returns {Function} Returns `func`.
*/
var baseSetToString = !defineProperty ? identity : function(func, string) {
return defineProperty(func, 'toString', {
'configurable': true,
'enumerable': false,
'value': constant(string),
'writable': true
});
};
/**
* The base implementation of `_.shuffle`.
*
* @private
* @param {Array|Object} collection The collection to shuffle.
* @returns {Array} Returns the new shuffled array.
*/
function baseShuffle(collection) {
return shuffleSelf(values(collection));
}
/**
* The base implementation of `_.slice` without an iteratee call guard.
*
* @private
* @param {Array} array The array to slice.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the slice of `array`.
*/
function baseSlice(array, start, end) {
var index = -1,
length = array.length;
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = end > length ? length : end;
if (end < 0) {
end += length;
}
length = start > end ? 0 : ((end - start) >>> 0);
start >>>= 0;
var result = Array(length);
while (++index < length) {
result[index] = array[index + start];
}
return result;
}
/**
* The base implementation of `_.some` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
*/
function baseSome(collection, predicate) {
var result;
baseEach(collection, function(value, index, collection) {
result = predicate(value, index, collection);
return !result;
});
return !!result;
}
/**
* The base implementation of `_.sortedIndex` and `_.sortedLastIndex` which
* performs a binary search of `array` to determine the index at which `value`
* should be inserted into `array` in order to maintain its sort order.
*
* @private
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {boolean} [retHighest] Specify returning the highest qualified index.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
*/
function baseSortedIndex(array, value, retHighest) {
var low = 0,
high = array == null ? low : array.length;
if (typeof value == 'number' && value === value && high <= HALF_MAX_ARRAY_LENGTH) {
while (low < high) {
var mid = (low + high) >>> 1,
computed = array[mid];
if (computed !== null && !isSymbol(computed) &&
(retHighest ? (computed <= value) : (computed < value))) {
low = mid + 1;
} else {
high = mid;
}
}
return high;
}
return baseSortedIndexBy(array, value, identity, retHighest);
}
/**
* The base implementation of `_.sortedIndexBy` and `_.sortedLastIndexBy`
* which invokes `iteratee` for `value` and each element of `array` to compute
* their sort ranking. The iteratee is invoked with one argument; (value).
*
* @private
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} iteratee The iteratee invoked per element.
* @param {boolean} [retHighest] Specify returning the highest qualified index.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
*/
function baseSortedIndexBy(array, value, iteratee, retHighest) {
var low = 0,
high = array == null ? 0 : array.length;
if (high === 0) {
return 0;
}
value = iteratee(value);
var valIsNaN = value !== value,
valIsNull = value === null,
valIsSymbol = isSymbol(value),
valIsUndefined = value === undefined;
while (low < high) {
var mid = nativeFloor((low + high) / 2),
computed = iteratee(array[mid]),
othIsDefined = computed !== undefined,
othIsNull = computed === null,
othIsReflexive = computed === computed,
othIsSymbol = isSymbol(computed);
if (valIsNaN) {
var setLow = retHighest || othIsReflexive;
} else if (valIsUndefined) {
setLow = othIsReflexive && (retHighest || othIsDefined);
} else if (valIsNull) {
setLow = othIsReflexive && othIsDefined && (retHighest || !othIsNull);
} else if (valIsSymbol) {
setLow = othIsReflexive && othIsDefined && !othIsNull && (retHighest || !othIsSymbol);
} else if (othIsNull || othIsSymbol) {
setLow = false;
} else {
setLow = retHighest ? (computed <= value) : (computed < value);
}
if (setLow) {
low = mid + 1;
} else {
high = mid;
}
}
return nativeMin(high, MAX_ARRAY_INDEX);
}
/**
* The base implementation of `_.sortedUniq` and `_.sortedUniqBy` without
* support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
*/
function baseSortedUniq(array, iteratee) {
var index = -1,
length = array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
if (!index || !eq(computed, seen)) {
var seen = computed;
result[resIndex++] = value === 0 ? 0 : value;
}
}
return result;
}
/**
* The base implementation of `_.toNumber` which doesn't ensure correct
* conversions of binary, hexadecimal, or octal string values.
*
* @private
* @param {*} value The value to process.
* @returns {number} Returns the number.
*/
function baseToNumber(value) {
if (typeof value == 'number') {
return value;
}
if (isSymbol(value)) {
return NAN;
}
return +value;
}
/**
* The base implementation of `_.toString` which doesn't convert nullish
* values to empty strings.
*
* @private
* @param {*} value The value to process.
* @returns {string} Returns the string.
*/
function baseToString(value) {
// Exit early for strings to avoid a performance hit in some environments.
if (typeof value == 'string') {
return value;
}
if (isArray(value)) {
// Recursively convert values (susceptible to call stack limits).
return arrayMap(value, baseToString) + '';
}
if (isSymbol(value)) {
return symbolToString ? symbolToString.call(value) : '';
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
/**
* The base implementation of `_.uniqBy` without support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new duplicate free array.
*/
function baseUniq(array, iteratee, comparator) {
var index = -1,
includes = arrayIncludes,
length = array.length,
isCommon = true,
result = [],
seen = result;
if (comparator) {
isCommon = false;
includes = arrayIncludesWith;
}
else if (length >= LARGE_ARRAY_SIZE) {
var set = iteratee ? null : createSet(array);
if (set) {
return setToArray(set);
}
isCommon = false;
includes = cacheHas;
seen = new SetCache;
}
else {
seen = iteratee ? [] : result;
}
outer:
while (++index < length) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
value = (comparator || value !== 0) ? value : 0;
if (isCommon && computed === computed) {
var seenIndex = seen.length;
while (seenIndex--) {
if (seen[seenIndex] === computed) {
continue outer;
}
}
if (iteratee) {
seen.push(computed);
}
result.push(value);
}
else if (!includes(seen, computed, comparator)) {
if (seen !== result) {
seen.push(computed);
}
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.unset`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The property path to unset.
* @returns {boolean} Returns `true` if the property is deleted, else `false`.
*/
function baseUnset(object, path) {
path = castPath(path, object);
// Prevent prototype pollution, see: https://github.com/lodash/lodash/security/advisories/GHSA-xxjr-mmjv-4gpg
var index = -1,
length = path.length;
if (!length) {
return true;
}
var isRootPrimitive = object == null || (typeof object !== 'object' && typeof object !== 'function');
while (++index < length) {
var key = path[index];
// skip non-string keys (e.g., Symbols, numbers)
if (typeof key !== 'string') {
continue;
}
// Always block "__proto__" anywhere in the path if it's not expected
if (key === '__proto__' && !hasOwnProperty.call(object, '__proto__')) {
return false;
}
// Block "constructor.prototype" chains
if (key === 'constructor' &&
(index + 1) < length &&
typeof path[index + 1] === 'string' &&
path[index + 1] === 'prototype') {
// Allow ONLY when the path starts at a primitive root, e.g., _.unset(0, 'constructor.prototype.a')
if (isRootPrimitive && index === 0) {
continue;
}
return false;
}
}
var obj = parent(object, path);
return obj == null || delete obj[toKey(last(path))];
}
/**
* The base implementation of `_.update`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to update.
* @param {Function} updater The function to produce the updated value.
* @param {Function} [customizer] The function to customize path creation.
* @returns {Object} Returns `object`.
*/
function baseUpdate(object, path, updater, customizer) {
return baseSet(object, path, updater(baseGet(object, path)), customizer);
}
/**
* The base implementation of methods like `_.dropWhile` and `_.takeWhile`
* without support for iteratee shorthands.
*
* @private
* @param {Array} array The array to query.
* @param {Function} predicate The function invoked per iteration.
* @param {boolean} [isDrop] Specify dropping elements instead of taking them.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Array} Returns the slice of `array`.
*/
function baseWhile(array, predicate, isDrop, fromRight) {
var length = array.length,
index = fromRight ? length : -1;
while ((fromRight ? index-- : ++index < length) &&
predicate(array[index], index, array)) {}
return isDrop
? baseSlice(array, (fromRight ? 0 : index), (fromRight ? index + 1 : length))
: baseSlice(array, (fromRight ? index + 1 : 0), (fromRight ? length : index));
}
/**
* The base implementation of `wrapperValue` which returns the result of
* performing a sequence of actions on the unwrapped `value`, where each
* successive action is supplied the return value of the previous.
*
* @private
* @param {*} value The unwrapped value.
* @param {Array} actions Actions to perform to resolve the unwrapped value.
* @returns {*} Returns the resolved value.
*/
function baseWrapperValue(value, actions) {
var result = value;
if (result instanceof LazyWrapper) {
result = result.value();
}
return arrayReduce(actions, function(result, action) {
return action.func.apply(action.thisArg, arrayPush([result], action.args));
}, result);
}
/**
* The base implementation of methods like `_.xor`, without support for
* iteratee shorthands, that accepts an array of arrays to inspect.
*
* @private
* @param {Array} arrays The arrays to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of values.
*/
function baseXor(arrays, iteratee, comparator) {
var length = arrays.length;
if (length < 2) {
return length ? baseUniq(arrays[0]) : [];
}
var index = -1,
result = Array(length);
while (++index < length) {
var array = arrays[index],
othIndex = -1;
while (++othIndex < length) {
if (othIndex != index) {
result[index] = baseDifference(result[index] || array, arrays[othIndex], iteratee, comparator);
}
}
}
return baseUniq(baseFlatten(result, 1), iteratee, comparator);
}
/**
* This base implementation of `_.zipObject` which assigns values using `assignFunc`.
*
* @private
* @param {Array} props The property identifiers.
* @param {Array} values The property values.
* @param {Function} assignFunc The function to assign values.
* @returns {Object} Returns the new object.
*/
function baseZipObject(props, values, assignFunc) {
var index = -1,
length = props.length,
valsLength = values.length,
result = {};
while (++index < length) {
var value = index < valsLength ? values[index] : undefined;
assignFunc(result, props[index], value);
}
return result;
}
/**
* Casts `value` to an empty array if it's not an array like object.
*
* @private
* @param {*} value The value to inspect.
* @returns {Array|Object} Returns the cast array-like object.
*/
function castArrayLikeObject(value) {
return isArrayLikeObject(value) ? value : [];
}
/**
* Casts `value` to `identity` if it's not a function.
*
* @private
* @param {*} value The value to inspect.
* @returns {Function} Returns cast function.
*/
function castFunction(value) {
return typeof value == 'function' ? value : identity;
}
/**
* Casts `value` to a path array if it's not one.
*
* @private
* @param {*} value The value to inspect.
* @param {Object} [object] The object to query keys on.
* @returns {Array} Returns the cast property path array.
*/
function castPath(value, object) {
if (isArray(value)) {
return value;
}
return isKey(value, object) ? [value] : stringToPath(toString(value));
}
/**
* A `baseRest` alias which can be replaced with `identity` by module
* replacement plugins.
*
* @private
* @type {Function}
* @param {Function} func The function to apply a rest parameter to.
* @returns {Function} Returns the new function.
*/
var castRest = baseRest;
/**
* Casts `array` to a slice if it's needed.
*
* @private
* @param {Array} array The array to inspect.
* @param {number} start The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the cast slice.
*/
function castSlice(array, start, end) {
var length = array.length;
end = end === undefined ? length : end;
return (!start && end >= length) ? array : baseSlice(array, start, end);
}
/**
* A simple wrapper around the global [`clearTimeout`](https://mdn.io/clearTimeout).
*
* @private
* @param {number|Object} id The timer id or timeout object of the timer to clear.
*/
var clearTimeout = ctxClearTimeout || function(id) {
return root.clearTimeout(id);
};
/**
* Creates a clone of `buffer`.
*
* @private
* @param {Buffer} buffer The buffer to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Buffer} Returns the cloned buffer.
*/
function cloneBuffer(buffer, isDeep) {
if (isDeep) {
return buffer.slice();
}
var length = buffer.length,
result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length);
buffer.copy(result);
return result;
}
/**
* Creates a clone of `arrayBuffer`.
*
* @private
* @param {ArrayBuffer} arrayBuffer The array buffer to clone.
* @returns {ArrayBuffer} Returns the cloned array buffer.
*/
function cloneArrayBuffer(arrayBuffer) {
var result = new arrayBuffer.constructor(arrayBuffer.byteLength);
new Uint8Array(result).set(new Uint8Array(arrayBuffer));
return result;
}
/**
* Creates a clone of `dataView`.
*
* @private
* @param {Object} dataView The data view to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the cloned data view.
*/
function cloneDataView(dataView, isDeep) {
var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer;
return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength);
}
/**
* Creates a clone of `regexp`.
*
* @private
* @param {Object} regexp The regexp to clone.
* @returns {Object} Returns the cloned regexp.
*/
function cloneRegExp(regexp) {
var result = new regexp.constructor(regexp.source, reFlags.exec(regexp));
result.lastIndex = regexp.lastIndex;
return result;
}
/**
* Creates a clone of the `symbol` object.
*
* @private
* @param {Object} symbol The symbol object to clone.
* @returns {Object} Returns the cloned symbol object.
*/
function cloneSymbol(symbol) {
return symbolValueOf ? Object(symbolValueOf.call(symbol)) : {};
}
/**
* Creates a clone of `typedArray`.
*
* @private
* @param {Object} typedArray The typed array to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the cloned typed array.
*/
function cloneTypedArray(typedArray, isDeep) {
var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer;
return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length);
}
/**
* Compares values to sort them in ascending order.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {number} Returns the sort order indicator for `value`.
*/
function compareAscending(value, other) {
if (value !== other) {
var valIsDefined = value !== undefined,
valIsNull = value === null,
valIsReflexive = value === value,
valIsSymbol = isSymbol(value);
var othIsDefined = other !== undefined,
othIsNull = other === null,
othIsReflexive = other === other,
othIsSymbol = isSymbol(other);
if ((!othIsNull && !othIsSymbol && !valIsSymbol && value > other) ||
(valIsSymbol && othIsDefined && othIsReflexive && !othIsNull && !othIsSymbol) ||
(valIsNull && othIsDefined && othIsReflexive) ||
(!valIsDefined && othIsReflexive) ||
!valIsReflexive) {
return 1;
}
if ((!valIsNull && !valIsSymbol && !othIsSymbol && value < other) ||
(othIsSymbol && valIsDefined && valIsReflexive && !valIsNull && !valIsSymbol) ||
(othIsNull && valIsDefined && valIsReflexive) ||
(!othIsDefined && valIsReflexive) ||
!othIsReflexive) {
return -1;
}
}
return 0;
}
/**
* Used by `_.orderBy` to compare multiple properties of a value to another
* and stable sort them.
*
* If `orders` is unspecified, all values are sorted in ascending order. Otherwise,
* specify an order of "desc" for descending or "asc" for ascending sort order
* of corresponding values.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {boolean[]|string[]} orders The order to sort by for each property.
* @returns {number} Returns the sort order indicator for `object`.
*/
function compareMultiple(object, other, orders) {
var index = -1,
objCriteria = object.criteria,
othCriteria = other.criteria,
length = objCriteria.length,
ordersLength = orders.length;
while (++index < length) {
var result = compareAscending(objCriteria[index], othCriteria[index]);
if (result) {
if (index >= ordersLength) {
return result;
}
var order = orders[index];
return result * (order == 'desc' ? -1 : 1);
}
}
// Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications
// that causes it, under certain circumstances, to provide the same value for
// `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247
// for more details.
//
// This also ensures a stable sort in V8 and other engines.
// See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details.
return object.index - other.index;
}
/**
* Creates an array that is the composition of partially applied arguments,
* placeholders, and provided arguments into a single array of arguments.
*
* @private
* @param {Array} args The provided arguments.
* @param {Array} partials The arguments to prepend to those provided.
* @param {Array} holders The `partials` placeholder indexes.
* @params {boolean} [isCurried] Specify composing for a curried function.
* @returns {Array} Returns the new array of composed arguments.
*/
function composeArgs(args, partials, holders, isCurried) {
var argsIndex = -1,
argsLength = args.length,
holdersLength = holders.length,
leftIndex = -1,
leftLength = partials.length,
rangeLength = nativeMax(argsLength - holdersLength, 0),
result = Array(leftLength + rangeLength),
isUncurried = !isCurried;
while (++leftIndex < leftLength) {
result[leftIndex] = partials[leftIndex];
}
while (++argsIndex < holdersLength) {
if (isUncurried || argsIndex < argsLength) {
result[holders[argsIndex]] = args[argsIndex];
}
}
while (rangeLength--) {
result[leftIndex++] = args[argsIndex++];
}
return result;
}
/**
* This function is like `composeArgs` except that the arguments composition
* is tailored for `_.partialRight`.
*
* @private
* @param {Array} args The provided arguments.
* @param {Array} partials The arguments to append to those provided.
* @param {Array} holders The `partials` placeholder indexes.
* @params {boolean} [isCurried] Specify composing for a curried function.
* @returns {Array} Returns the new array of composed arguments.
*/
function composeArgsRight(args, partials, holders, isCurried) {
var argsIndex = -1,
argsLength = args.length,
holdersIndex = -1,
holdersLength = holders.length,
rightIndex = -1,
rightLength = partials.length,
rangeLength = nativeMax(argsLength - holdersLength, 0),
result = Array(rangeLength + rightLength),
isUncurried = !isCurried;
while (++argsIndex < rangeLength) {
result[argsIndex] = args[argsIndex];
}
var offset = argsIndex;
while (++rightIndex < rightLength) {
result[offset + rightIndex] = partials[rightIndex];
}
while (++holdersIndex < holdersLength) {
if (isUncurried || argsIndex < argsLength) {
result[offset + holders[holdersIndex]] = args[argsIndex++];
}
}
return result;
}
/**
* Copies the values of `source` to `array`.
*
* @private
* @param {Array} source The array to copy values from.
* @param {Array} [array=[]] The array to copy values to.
* @returns {Array} Returns `array`.
*/
function copyArray(source, array) {
var index = -1,
length = source.length;
array || (array = Array(length));
while (++index < length) {
array[index] = source[index];
}
return array;
}
/**
* Copies properties of `source` to `object`.
*
* @private
* @param {Object} source The object to copy properties from.
* @param {Array} props The property identifiers to copy.
* @param {Object} [object={}] The object to copy properties to.
* @param {Function} [customizer] The function to customize copied values.
* @returns {Object} Returns `object`.
*/
function copyObject(source, props, object, customizer) {
var isNew = !object;
object || (object = {});
var index = -1,
length = props.length;
while (++index < length) {
var key = props[index];
var newValue = customizer
? customizer(object[key], source[key], key, object, source)
: undefined;
if (newValue === undefined) {
newValue = source[key];
}
if (isNew) {
baseAssignValue(object, key, newValue);
} else {
assignValue(object, key, newValue);
}
}
return object;
}
/**
* Copies own symbols of `source` to `object`.
*
* @private
* @param {Object} source The object to copy symbols from.
* @param {Object} [object={}] The object to copy symbols to.
* @returns {Object} Returns `object`.
*/
function copySymbols(source, object) {
return copyObject(source, getSymbols(source), object);
}
/**
* Copies own and inherited symbols of `source` to `object`.
*
* @private
* @param {Object} source The object to copy symbols from.
* @param {Object} [object={}] The object to copy symbols to.
* @returns {Object} Returns `object`.
*/
function copySymbolsIn(source, object) {
return copyObject(source, getSymbolsIn(source), object);
}
/**
* Creates a function like `_.groupBy`.
*
* @private
* @param {Function} setter The function to set accumulator values.
* @param {Function} [initializer] The accumulator object initializer.
* @returns {Function} Returns the new aggregator function.
*/
function createAggregator(setter, initializer) {
return function(collection, iteratee) {
var func = isArray(collection) ? arrayAggregator : baseAggregator,
accumulator = initializer ? initializer() : {};
return func(collection, setter, getIteratee(iteratee, 2), accumulator);
};
}
/**
* Creates a function like `_.assign`.
*
* @private
* @param {Function} assigner The function to assign values.
* @returns {Function} Returns the new assigner function.
*/
function createAssigner(assigner) {
return baseRest(function(object, sources) {
var index = -1,
length = sources.length,
customizer = length > 1 ? sources[length - 1] : undefined,
guard = length > 2 ? sources[2] : undefined;
customizer = (assigner.length > 3 && typeof customizer == 'function')
? (length--, customizer)
: undefined;
if (guard && isIterateeCall(sources[0], sources[1], guard)) {
customizer = length < 3 ? undefined : customizer;
length = 1;
}
object = Object(object);
while (++index < length) {
var source = sources[index];
if (source) {
assigner(object, source, index, customizer);
}
}
return object;
});
}
/**
* Creates a `baseEach` or `baseEachRight` function.
*
* @private
* @param {Function} eachFunc The function to iterate over a collection.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new base function.
*/
function createBaseEach(eachFunc, fromRight) {
return function(collection, iteratee) {
if (collection == null) {
return collection;
}
if (!isArrayLike(collection)) {
return eachFunc(collection, iteratee);
}
var length = collection.length,
index = fromRight ? length : -1,
iterable = Object(collection);
while ((fromRight ? index-- : ++index < length)) {
if (iteratee(iterable[index], index, iterable) === false) {
break;
}
}
return collection;
};
}
/**
* Creates a base function for methods like `_.forIn` and `_.forOwn`.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new base function.
*/
function createBaseFor(fromRight) {
return function(object, iteratee, keysFunc) {
var index = -1,
iterable = Object(object),
props = keysFunc(object),
length = props.length;
while (length--) {
var key = props[fromRight ? length : ++index];
if (iteratee(iterable[key], key, iterable) === false) {
break;
}
}
return object;
};
}
/**
* Creates a function that wraps `func` to invoke it with the optional `this`
* binding of `thisArg`.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} [thisArg] The `this` binding of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createBind(func, bitmask, thisArg) {
var isBind = bitmask & WRAP_BIND_FLAG,
Ctor = createCtor(func);
function wrapper() {
var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
return fn.apply(isBind ? thisArg : this, arguments);
}
return wrapper;
}
/**
* Creates a function like `_.lowerFirst`.
*
* @private
* @param {string} methodName The name of the `String` case method to use.
* @returns {Function} Returns the new case function.
*/
function createCaseFirst(methodName) {
return function(string) {
string = toString(string);
var strSymbols = hasUnicode(string)
? stringToArray(string)
: undefined;
var chr = strSymbols
? strSymbols[0]
: string.charAt(0);
var trailing = strSymbols
? castSlice(strSymbols, 1).join('')
: string.slice(1);
return chr[methodName]() + trailing;
};
}
/**
* Creates a function like `_.camelCase`.
*
* @private
* @param {Function} callback The function to combine each word.
* @returns {Function} Returns the new compounder function.
*/
function createCompounder(callback) {
return function(string) {
return arrayReduce(words(deburr(string).replace(reApos, '')), callback, '');
};
}
/**
* Creates a function that produces an instance of `Ctor` regardless of
* whether it was invoked as part of a `new` expression or by `call` or `apply`.
*
* @private
* @param {Function} Ctor The constructor to wrap.
* @returns {Function} Returns the new wrapped function.
*/
function createCtor(Ctor) {
return function() {
// Use a `switch` statement to work with class constructors. See
// http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist
// for more details.
var args = arguments;
switch (args.length) {
case 0: return new Ctor;
case 1: return new Ctor(args[0]);
case 2: return new Ctor(args[0], args[1]);
case 3: return new Ctor(args[0], args[1], args[2]);
case 4: return new Ctor(args[0], args[1], args[2], args[3]);
case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]);
case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]);
case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]);
}
var thisBinding = baseCreate(Ctor.prototype),
result = Ctor.apply(thisBinding, args);
// Mimic the constructor's `return` behavior.
// See https://es5.github.io/#x13.2.2 for more details.
return isObject(result) ? result : thisBinding;
};
}
/**
* Creates a function that wraps `func` to enable currying.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {number} arity The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createCurry(func, bitmask, arity) {
var Ctor = createCtor(func);
function wrapper() {
var length = arguments.length,
args = Array(length),
index = length,
placeholder = getHolder(wrapper);
while (index--) {
args[index] = arguments[index];
}
var holders = (length < 3 && args[0] !== placeholder && args[length - 1] !== placeholder)
? []
: replaceHolders(args, placeholder);
length -= holders.length;
if (length < arity) {
return createRecurry(
func, bitmask, createHybrid, wrapper.placeholder, undefined,
args, holders, undefined, undefined, arity - length);
}
var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
return apply(fn, this, args);
}
return wrapper;
}
/**
* Creates a `_.find` or `_.findLast` function.
*
* @private
* @param {Function} findIndexFunc The function to find the collection index.
* @returns {Function} Returns the new find function.
*/
function createFind(findIndexFunc) {
return function(collection, predicate, fromIndex) {
var iterable = Object(collection);
if (!isArrayLike(collection)) {
var iteratee = getIteratee(predicate, 3);
collection = keys(collection);
predicate = function(key) { return iteratee(iterable[key], key, iterable); };
}
var index = findIndexFunc(collection, predicate, fromIndex);
return index > -1 ? iterable[iteratee ? collection[index] : index] : undefined;
};
}
/**
* Creates a `_.flow` or `_.flowRight` function.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new flow function.
*/
function createFlow(fromRight) {
return flatRest(function(funcs) {
var length = funcs.length,
index = length,
prereq = LodashWrapper.prototype.thru;
if (fromRight) {
funcs.reverse();
}
while (index--) {
var func = funcs[index];
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
if (prereq && !wrapper && getFuncName(func) == 'wrapper') {
var wrapper = new LodashWrapper([], true);
}
}
index = wrapper ? index : length;
while (++index < length) {
func = funcs[index];
var funcName = getFuncName(func),
data = funcName == 'wrapper' ? getData(func) : undefined;
if (data && isLaziable(data[0]) &&
data[1] == (WRAP_ARY_FLAG | WRAP_CURRY_FLAG | WRAP_PARTIAL_FLAG | WRAP_REARG_FLAG) &&
!data[4].length && data[9] == 1
) {
wrapper = wrapper[getFuncName(data[0])].apply(wrapper, data[3]);
} else {
wrapper = (func.length == 1 && isLaziable(func))
? wrapper[funcName]()
: wrapper.thru(func);
}
}
return function() {
var args = arguments,
value = args[0];
if (wrapper && args.length == 1 && isArray(value)) {
return wrapper.plant(value).value();
}
var index = 0,
result = length ? funcs[index].apply(this, args) : value;
while (++index < length) {
result = funcs[index].call(this, result);
}
return result;
};
});
}
/**
* Creates a function that wraps `func` to invoke it with optional `this`
* binding of `thisArg`, partial application, and currying.
*
* @private
* @param {Function|string} func The function or method name to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to prepend to those provided to
* the new function.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [partialsRight] The arguments to append to those provided
* to the new function.
* @param {Array} [holdersRight] The `partialsRight` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createHybrid(func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity) {
var isAry = bitmask & WRAP_ARY_FLAG,
isBind = bitmask & WRAP_BIND_FLAG,
isBindKey = bitmask & WRAP_BIND_KEY_FLAG,
isCurried = bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG),
isFlip = bitmask & WRAP_FLIP_FLAG,
Ctor = isBindKey ? undefined : createCtor(func);
function wrapper() {
var length = arguments.length,
args = Array(length),
index = length;
while (index--) {
args[index] = arguments[index];
}
if (isCurried) {
var placeholder = getHolder(wrapper),
holdersCount = countHolders(args, placeholder);
}
if (partials) {
args = composeArgs(args, partials, holders, isCurried);
}
if (partialsRight) {
args = composeArgsRight(args, partialsRight, holdersRight, isCurried);
}
length -= holdersCount;
if (isCurried && length < arity) {
var newHolders = replaceHolders(args, placeholder);
return createRecurry(
func, bitmask, createHybrid, wrapper.placeholder, thisArg,
args, newHolders, argPos, ary, arity - length
);
}
var thisBinding = isBind ? thisArg : this,
fn = isBindKey ? thisBinding[func] : func;
length = args.length;
if (argPos) {
args = reorder(args, argPos);
} else if (isFlip && length > 1) {
args.reverse();
}
if (isAry && ary < length) {
args.length = ary;
}
if (this && this !== root && this instanceof wrapper) {
fn = Ctor || createCtor(fn);
}
return fn.apply(thisBinding, args);
}
return wrapper;
}
/**
* Creates a function like `_.invertBy`.
*
* @private
* @param {Function} setter The function to set accumulator values.
* @param {Function} toIteratee The function to resolve iteratees.
* @returns {Function} Returns the new inverter function.
*/
function createInverter(setter, toIteratee) {
return function(object, iteratee) {
return baseInverter(object, setter, toIteratee(iteratee), {});
};
}
/**
* Creates a function that performs a mathematical operation on two values.
*
* @private
* @param {Function} operator The function to perform the operation.
* @param {number} [defaultValue] The value used for `undefined` arguments.
* @returns {Function} Returns the new mathematical operation function.
*/
function createMathOperation(operator, defaultValue) {
return function(value, other) {
var result;
if (value === undefined && other === undefined) {
return defaultValue;
}
if (value !== undefined) {
result = value;
}
if (other !== undefined) {
if (result === undefined) {
return other;
}
if (typeof value == 'string' || typeof other == 'string') {
value = baseToString(value);
other = baseToString(other);
} else {
value = baseToNumber(value);
other = baseToNumber(other);
}
result = operator(value, other);
}
return result;
};
}
/**
* Creates a function like `_.over`.
*
* @private
* @param {Function} arrayFunc The function to iterate over iteratees.
* @returns {Function} Returns the new over function.
*/
function createOver(arrayFunc) {
return flatRest(function(iteratees) {
iteratees = arrayMap(iteratees, baseUnary(getIteratee()));
return baseRest(function(args) {
var thisArg = this;
return arrayFunc(iteratees, function(iteratee) {
return apply(iteratee, thisArg, args);
});
});
});
}
/**
* Creates the padding for `string` based on `length`. The `chars` string
* is truncated if the number of characters exceeds `length`.
*
* @private
* @param {number} length The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padding for `string`.
*/
function createPadding(length, chars) {
chars = chars === undefined ? ' ' : baseToString(chars);
var charsLength = chars.length;
if (charsLength < 2) {
return charsLength ? baseRepeat(chars, length) : chars;
}
var result = baseRepeat(chars, nativeCeil(length / stringSize(chars)));
return hasUnicode(chars)
? castSlice(stringToArray(result), 0, length).join('')
: result.slice(0, length);
}
/**
* Creates a function that wraps `func` to invoke it with the `this` binding
* of `thisArg` and `partials` prepended to the arguments it receives.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} thisArg The `this` binding of `func`.
* @param {Array} partials The arguments to prepend to those provided to
* the new function.
* @returns {Function} Returns the new wrapped function.
*/
function createPartial(func, bitmask, thisArg, partials) {
var isBind = bitmask & WRAP_BIND_FLAG,
Ctor = createCtor(func);
function wrapper() {
var argsIndex = -1,
argsLength = arguments.length,
leftIndex = -1,
leftLength = partials.length,
args = Array(leftLength + argsLength),
fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
while (++leftIndex < leftLength) {
args[leftIndex] = partials[leftIndex];
}
while (argsLength--) {
args[leftIndex++] = arguments[++argsIndex];
}
return apply(fn, isBind ? thisArg : this, args);
}
return wrapper;
}
/**
* Creates a `_.range` or `_.rangeRight` function.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new range function.
*/
function createRange(fromRight) {
return function(start, end, step) {
if (step && typeof step != 'number' && isIterateeCall(start, end, step)) {
end = step = undefined;
}
// Ensure the sign of `-0` is preserved.
start = toFinite(start);
if (end === undefined) {
end = start;
start = 0;
} else {
end = toFinite(end);
}
step = step === undefined ? (start < end ? 1 : -1) : toFinite(step);
return baseRange(start, end, step, fromRight);
};
}
/**
* Creates a function that performs a relational operation on two values.
*
* @private
* @param {Function} operator The function to perform the operation.
* @returns {Function} Returns the new relational operation function.
*/
function createRelationalOperation(operator) {
return function(value, other) {
if (!(typeof value == 'string' && typeof other == 'string')) {
value = toNumber(value);
other = toNumber(other);
}
return operator(value, other);
};
}
/**
* Creates a function that wraps `func` to continue currying.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {Function} wrapFunc The function to create the `func` wrapper.
* @param {*} placeholder The placeholder value.
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to prepend to those provided to
* the new function.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createRecurry(func, bitmask, wrapFunc, placeholder, thisArg, partials, holders, argPos, ary, arity) {
var isCurry = bitmask & WRAP_CURRY_FLAG,
newHolders = isCurry ? holders : undefined,
newHoldersRight = isCurry ? undefined : holders,
newPartials = isCurry ? partials : undefined,
newPartialsRight = isCurry ? undefined : partials;
bitmask |= (isCurry ? WRAP_PARTIAL_FLAG : WRAP_PARTIAL_RIGHT_FLAG);
bitmask &= ~(isCurry ? WRAP_PARTIAL_RIGHT_FLAG : WRAP_PARTIAL_FLAG);
if (!(bitmask & WRAP_CURRY_BOUND_FLAG)) {
bitmask &= ~(WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG);
}
var newData = [
func, bitmask, thisArg, newPartials, newHolders, newPartialsRight,
newHoldersRight, argPos, ary, arity
];
var result = wrapFunc.apply(undefined, newData);
if (isLaziable(func)) {
setData(result, newData);
}
result.placeholder = placeholder;
return setWrapToString(result, func, bitmask);
}
/**
* Creates a function like `_.round`.
*
* @private
* @param {string} methodName The name of the `Math` method to use when rounding.
* @returns {Function} Returns the new round function.
*/
function createRound(methodName) {
var func = Math[methodName];
return function(number, precision) {
number = toNumber(number);
precision = precision == null ? 0 : nativeMin(toInteger(precision), 292);
if (precision && nativeIsFinite(number)) {
// Shift with exponential notation to avoid floating-point issues.
// See [MDN](https://mdn.io/round#Examples) for more details.
var pair = (toString(number) + 'e').split('e'),
value = func(pair[0] + 'e' + (+pair[1] + precision));
pair = (toString(value) + 'e').split('e');
return +(pair[0] + 'e' + (+pair[1] - precision));
}
return func(number);
};
}
/**
* Creates a set object of `values`.
*
* @private
* @param {Array} values The values to add to the set.
* @returns {Object} Returns the new set.
*/
var createSet = !(Set && (1 / setToArray(new Set([,-0]))[1]) == INFINITY) ? noop : function(values) {
return new Set(values);
};
/**
* Creates a `_.toPairs` or `_.toPairsIn` function.
*
* @private
* @param {Function} keysFunc The function to get the keys of a given object.
* @returns {Function} Returns the new pairs function.
*/
function createToPairs(keysFunc) {
return function(object) {
var tag = getTag(object);
if (tag == mapTag) {
return mapToArray(object);
}
if (tag == setTag) {
return setToPairs(object);
}
return baseToPairs(object, keysFunc(object));
};
}
/**
* Creates a function that either curries or invokes `func` with optional
* `this` binding and partially applied arguments.
*
* @private
* @param {Function|string} func The function or method name to wrap.
* @param {number} bitmask The bitmask flags.
* 1 - `_.bind`
* 2 - `_.bindKey`
* 4 - `_.curry` or `_.curryRight` of a bound function
* 8 - `_.curry`
* 16 - `_.curryRight`
* 32 - `_.partial`
* 64 - `_.partialRight`
* 128 - `_.rearg`
* 256 - `_.ary`
* 512 - `_.flip`
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to be partially applied.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createWrap(func, bitmask, thisArg, partials, holders, argPos, ary, arity) {
var isBindKey = bitmask & WRAP_BIND_KEY_FLAG;
if (!isBindKey && typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
var length = partials ? partials.length : 0;
if (!length) {
bitmask &= ~(WRAP_PARTIAL_FLAG | WRAP_PARTIAL_RIGHT_FLAG);
partials = holders = undefined;
}
ary = ary === undefined ? ary : nativeMax(toInteger(ary), 0);
arity = arity === undefined ? arity : toInteger(arity);
length -= holders ? holders.length : 0;
if (bitmask & WRAP_PARTIAL_RIGHT_FLAG) {
var partialsRight = partials,
holdersRight = holders;
partials = holders = undefined;
}
var data = isBindKey ? undefined : getData(func);
var newData = [
func, bitmask, thisArg, partials, holders, partialsRight, holdersRight,
argPos, ary, arity
];
if (data) {
mergeData(newData, data);
}
func = newData[0];
bitmask = newData[1];
thisArg = newData[2];
partials = newData[3];
holders = newData[4];
arity = newData[9] = newData[9] === undefined
? (isBindKey ? 0 : func.length)
: nativeMax(newData[9] - length, 0);
if (!arity && bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG)) {
bitmask &= ~(WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG);
}
if (!bitmask || bitmask == WRAP_BIND_FLAG) {
var result = createBind(func, bitmask, thisArg);
} else if (bitmask == WRAP_CURRY_FLAG || bitmask == WRAP_CURRY_RIGHT_FLAG) {
result = createCurry(func, bitmask, arity);
} else if ((bitmask == WRAP_PARTIAL_FLAG || bitmask == (WRAP_BIND_FLAG | WRAP_PARTIAL_FLAG)) && !holders.length) {
result = createPartial(func, bitmask, thisArg, partials);
} else {
result = createHybrid.apply(undefined, newData);
}
var setter = data ? baseSetData : setData;
return setWrapToString(setter(result, newData), func, bitmask);
}
/**
* Used by `_.defaults` to customize its `_.assignIn` use to assign properties
* of source objects to the destination object for all destination properties
* that resolve to `undefined`.
*
* @private
* @param {*} objValue The destination value.
* @param {*} srcValue The source value.
* @param {string} key The key of the property to assign.
* @param {Object} object The parent object of `objValue`.
* @returns {*} Returns the value to assign.
*/
function customDefaultsAssignIn(objValue, srcValue, key, object) {
if (objValue === undefined ||
(eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) {
return srcValue;
}
return objValue;
}
/**
* Used by `_.defaultsDeep` to customize its `_.merge` use to merge source
* objects into destination objects that are passed thru.
*
* @private
* @param {*} objValue The destination value.
* @param {*} srcValue The source value.
* @param {string} key The key of the property to merge.
* @param {Object} object The parent object of `objValue`.
* @param {Object} source The parent object of `srcValue`.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
* @returns {*} Returns the value to assign.
*/
function customDefaultsMerge(objValue, srcValue, key, object, source, stack) {
if (isObject(objValue) && isObject(srcValue)) {
// Recursively merge objects and arrays (susceptible to call stack limits).
stack.set(srcValue, objValue);
baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack);
stack['delete'](srcValue);
}
return objValue;
}
/**
* Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain
* objects.
*
* @private
* @param {*} value The value to inspect.
* @param {string} key The key of the property to inspect.
* @returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`.
*/
function customOmitClone(value) {
return isPlainObject(value) ? undefined : value;
}
/**
* A specialized version of `baseIsEqualDeep` for arrays with support for
* partial deep comparisons.
*
* @private
* @param {Array} array The array to compare.
* @param {Array} other The other array to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `array` and `other` objects.
* @returns {boolean} Returns `true` if the arrays are equivalent, else `false`.
*/
function equalArrays(array, other, bitmask, customizer, equalFunc, stack) {
var isPartial = bitmask & COMPARE_PARTIAL_FLAG,
arrLength = array.length,
othLength = other.length;
if (arrLength != othLength && !(isPartial && othLength > arrLength)) {
return false;
}
// Check that cyclic values are equal.
var arrStacked = stack.get(array);
var othStacked = stack.get(other);
if (arrStacked && othStacked) {
return arrStacked == other && othStacked == array;
}
var index = -1,
result = true,
seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined;
stack.set(array, other);
stack.set(other, array);
// Ignore non-index properties.
while (++index < arrLength) {
var arrValue = array[index],
othValue = other[index];
if (customizer) {
var compared = isPartial
? customizer(othValue, arrValue, index, other, array, stack)
: customizer(arrValue, othValue, index, array, other, stack);
}
if (compared !== undefined) {
if (compared) {
continue;
}
result = false;
break;
}
// Recursively compare arrays (susceptible to call stack limits).
if (seen) {
if (!arraySome(other, function(othValue, othIndex) {
if (!cacheHas(seen, othIndex) &&
(arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) {
return seen.push(othIndex);
}
})) {
result = false;
break;
}
} else if (!(
arrValue === othValue ||
equalFunc(arrValue, othValue, bitmask, customizer, stack)
)) {
result = false;
break;
}
}
stack['delete'](array);
stack['delete'](other);
return result;
}
/**
* A specialized version of `baseIsEqualDeep` for comparing objects of
* the same `toStringTag`.
*
* **Note:** This function only supports comparing values with tags of
* `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {string} tag The `toStringTag` of the objects to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) {
switch (tag) {
case dataViewTag:
if ((object.byteLength != other.byteLength) ||
(object.byteOffset != other.byteOffset)) {
return false;
}
object = object.buffer;
other = other.buffer;
case arrayBufferTag:
if ((object.byteLength != other.byteLength) ||
!equalFunc(new Uint8Array(object), new Uint8Array(other))) {
return false;
}
return true;
case boolTag:
case dateTag:
case numberTag:
// Coerce booleans to `1` or `0` and dates to milliseconds.
// Invalid dates are coerced to `NaN`.
return eq(+object, +other);
case errorTag:
return object.name == other.name && object.message == other.message;
case regexpTag:
case stringTag:
// Coerce regexes to strings and treat strings, primitives and objects,
// as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring
// for more details.
return object == (other + '');
case mapTag:
var convert = mapToArray;
case setTag:
var isPartial = bitmask & COMPARE_PARTIAL_FLAG;
convert || (convert = setToArray);
if (object.size != other.size && !isPartial) {
return false;
}
// Assume cyclic values are equal.
var stacked = stack.get(object);
if (stacked) {
return stacked == other;
}
bitmask |= COMPARE_UNORDERED_FLAG;
// Recursively compare objects (susceptible to call stack limits).
stack.set(object, other);
var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack);
stack['delete'](object);
return result;
case symbolTag:
if (symbolValueOf) {
return symbolValueOf.call(object) == symbolValueOf.call(other);
}
}
return false;
}
/**
* A specialized version of `baseIsEqualDeep` for objects with support for
* partial deep comparisons.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function equalObjects(object, other, bitmask, customizer, equalFunc, stack) {
var isPartial = bitmask & COMPARE_PARTIAL_FLAG,
objProps = getAllKeys(object),
objLength = objProps.length,
othProps = getAllKeys(other),
othLength = othProps.length;
if (objLength != othLength && !isPartial) {
return false;
}
var index = objLength;
while (index--) {
var key = objProps[index];
if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) {
return false;
}
}
// Check that cyclic values are equal.
var objStacked = stack.get(object);
var othStacked = stack.get(other);
if (objStacked && othStacked) {
return objStacked == other && othStacked == object;
}
var result = true;
stack.set(object, other);
stack.set(other, object);
var skipCtor = isPartial;
while (++index < objLength) {
key = objProps[index];
var objValue = object[key],
othValue = other[key];
if (customizer) {
var compared = isPartial
? customizer(othValue, objValue, key, other, object, stack)
: customizer(objValue, othValue, key, object, other, stack);
}
// Recursively compare objects (susceptible to call stack limits).
if (!(compared === undefined
? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack))
: compared
)) {
result = false;
break;
}
skipCtor || (skipCtor = key == 'constructor');
}
if (result && !skipCtor) {
var objCtor = object.constructor,
othCtor = other.constructor;
// Non `Object` object instances with different constructors are not equal.
if (objCtor != othCtor &&
('constructor' in object && 'constructor' in other) &&
!(typeof objCtor == 'function' && objCtor instanceof objCtor &&
typeof othCtor == 'function' && othCtor instanceof othCtor)) {
result = false;
}
}
stack['delete'](object);
stack['delete'](other);
return result;
}
/**
* A specialized version of `baseRest` which flattens the rest array.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @returns {Function} Returns the new function.
*/
function flatRest(func) {
return setToString(overRest(func, undefined, flatten), func + '');
}
/**
* Creates an array of own enumerable property names and symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names and symbols.
*/
function getAllKeys(object) {
return baseGetAllKeys(object, keys, getSymbols);
}
/**
* Creates an array of own and inherited enumerable property names and
* symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names and symbols.
*/
function getAllKeysIn(object) {
return baseGetAllKeys(object, keysIn, getSymbolsIn);
}
/**
* Gets metadata for `func`.
*
* @private
* @param {Function} func The function to query.
* @returns {*} Returns the metadata for `func`.
*/
var getData = !metaMap ? noop : function(func) {
return metaMap.get(func);
};
/**
* Gets the name of `func`.
*
* @private
* @param {Function} func The function to query.
* @returns {string} Returns the function name.
*/
function getFuncName(func) {
var result = (func.name + ''),
array = realNames[result],
length = hasOwnProperty.call(realNames, result) ? array.length : 0;
while (length--) {
var data = array[length],
otherFunc = data.func;
if (otherFunc == null || otherFunc == func) {
return data.name;
}
}
return result;
}
/**
* Gets the argument placeholder value for `func`.
*
* @private
* @param {Function} func The function to inspect.
* @returns {*} Returns the placeholder value.
*/
function getHolder(func) {
var object = hasOwnProperty.call(lodash, 'placeholder') ? lodash : func;
return object.placeholder;
}
/**
* Gets the appropriate "iteratee" function. If `_.iteratee` is customized,
* this function returns the custom method, otherwise it returns `baseIteratee`.
* If arguments are provided, the chosen function is invoked with them and
* its result is returned.
*
* @private
* @param {*} [value] The value to convert to an iteratee.
* @param {number} [arity] The arity of the created iteratee.
* @returns {Function} Returns the chosen function or its result.
*/
function getIteratee() {
var result = lodash.iteratee || iteratee;
result = result === iteratee ? baseIteratee : result;
return arguments.length ? result(arguments[0], arguments[1]) : result;
}
/**
* Gets the data for `map`.
*
* @private
* @param {Object} map The map to query.
* @param {string} key The reference key.
* @returns {*} Returns the map data.
*/
function getMapData(map, key) {
var data = map.__data__;
return isKeyable(key)
? data[typeof key == 'string' ? 'string' : 'hash']
: data.map;
}
/**
* Gets the property names, values, and compare flags of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the match data of `object`.
*/
function getMatchData(object) {
var result = keys(object),
length = result.length;
while (length--) {
var key = result[length],
value = object[key];
result[length] = [key, value, isStrictComparable(value)];
}
return result;
}
/**
* Gets the native function at `key` of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {string} key The key of the method to get.
* @returns {*} Returns the function if it's native, else `undefined`.
*/
function getNative(object, key) {
var value = getValue(object, key);
return baseIsNative(value) ? value : undefined;
}
/**
* A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the raw `toStringTag`.
*/
function getRawTag(value) {
var isOwn = hasOwnProperty.call(value, symToStringTag),
tag = value[symToStringTag];
try {
value[symToStringTag] = undefined;
var unmasked = true;
} catch (e) {}
var result = nativeObjectToString.call(value);
if (unmasked) {
if (isOwn) {
value[symToStringTag] = tag;
} else {
delete value[symToStringTag];
}
}
return result;
}
/**
* Creates an array of the own enumerable symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of symbols.
*/
var getSymbols = !nativeGetSymbols ? stubArray : function(object) {
if (object == null) {
return [];
}
object = Object(object);
return arrayFilter(nativeGetSymbols(object), function(symbol) {
return propertyIsEnumerable.call(object, symbol);
});
};
/**
* Creates an array of the own and inherited enumerable symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of symbols.
*/
var getSymbolsIn = !nativeGetSymbols ? stubArray : function(object) {
var result = [];
while (object) {
arrayPush(result, getSymbols(object));
object = getPrototype(object);
}
return result;
};
/**
* Gets the `toStringTag` of `value`.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
var getTag = baseGetTag;
// Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6.
if ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) ||
(Map && getTag(new Map) != mapTag) ||
(Promise && getTag(Promise.resolve()) != promiseTag) ||
(Set && getTag(new Set) != setTag) ||
(WeakMap && getTag(new WeakMap) != weakMapTag)) {
getTag = function(value) {
var result = baseGetTag(value),
Ctor = result == objectTag ? value.constructor : undefined,
ctorString = Ctor ? toSource(Ctor) : '';
if (ctorString) {
switch (ctorString) {
case dataViewCtorString: return dataViewTag;
case mapCtorString: return mapTag;
case promiseCtorString: return promiseTag;
case setCtorString: return setTag;
case weakMapCtorString: return weakMapTag;
}
}
return result;
};
}
/**
* Gets the view, applying any `transforms` to the `start` and `end` positions.
*
* @private
* @param {number} start The start of the view.
* @param {number} end The end of the view.
* @param {Array} transforms The transformations to apply to the view.
* @returns {Object} Returns an object containing the `start` and `end`
* positions of the view.
*/
function getView(start, end, transforms) {
var index = -1,
length = transforms.length;
while (++index < length) {
var data = transforms[index],
size = data.size;
switch (data.type) {
case 'drop': start += size; break;
case 'dropRight': end -= size; break;
case 'take': end = nativeMin(end, start + size); break;
case 'takeRight': start = nativeMax(start, end - size); break;
}
}
return { 'start': start, 'end': end };
}
/**
* Extracts wrapper details from the `source` body comment.
*
* @private
* @param {string} source The source to inspect.
* @returns {Array} Returns the wrapper details.
*/
function getWrapDetails(source) {
var match = source.match(reWrapDetails);
return match ? match[1].split(reSplitDetails) : [];
}
/**
* Checks if `path` exists on `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @param {Function} hasFunc The function to check properties.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
*/
function hasPath(object, path, hasFunc) {
path = castPath(path, object);
var index = -1,
length = path.length,
result = false;
while (++index < length) {
var key = toKey(path[index]);
if (!(result = object != null && hasFunc(object, key))) {
break;
}
object = object[key];
}
if (result || ++index != length) {
return result;
}
length = object == null ? 0 : object.length;
return !!length && isLength(length) && isIndex(key, length) &&
(isArray(object) || isArguments(object));
}
/**
* Initializes an array clone.
*
* @private
* @param {Array} array The array to clone.
* @returns {Array} Returns the initialized clone.
*/
function initCloneArray(array) {
var length = array.length,
result = new array.constructor(length);
// Add properties assigned by `RegExp#exec`.
if (length && typeof array[0] == 'string' && hasOwnProperty.call(array, 'index')) {
result.index = array.index;
result.input = array.input;
}
return result;
}
/**
* Initializes an object clone.
*
* @private
* @param {Object} object The object to clone.
* @returns {Object} Returns the initialized clone.
*/
function initCloneObject(object) {
return (typeof object.constructor == 'function' && !isPrototype(object))
? baseCreate(getPrototype(object))
: {};
}
/**
* Initializes an object clone based on its `toStringTag`.
*
* **Note:** This function only supports cloning values with tags of
* `Boolean`, `Date`, `Error`, `Map`, `Number`, `RegExp`, `Set`, or `String`.
*
* @private
* @param {Object} object The object to clone.
* @param {string} tag The `toStringTag` of the object to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the initialized clone.
*/
function initCloneByTag(object, tag, isDeep) {
var Ctor = object.constructor;
switch (tag) {
case arrayBufferTag:
return cloneArrayBuffer(object);
case boolTag:
case dateTag:
return new Ctor(+object);
case dataViewTag:
return cloneDataView(object, isDeep);
case float32Tag: case float64Tag:
case int8Tag: case int16Tag: case int32Tag:
case uint8Tag: case uint8ClampedTag: case uint16Tag: case uint32Tag:
return cloneTypedArray(object, isDeep);
case mapTag:
return new Ctor;
case numberTag:
case stringTag:
return new Ctor(object);
case regexpTag:
return cloneRegExp(object);
case setTag:
return new Ctor;
case symbolTag:
return cloneSymbol(object);
}
}
/**
* Inserts wrapper `details` in a comment at the top of the `source` body.
*
* @private
* @param {string} source The source to modify.
* @returns {Array} details The details to insert.
* @returns {string} Returns the modified source.
*/
function insertWrapDetails(source, details) {
var length = details.length;
if (!length) {
return source;
}
var lastIndex = length - 1;
details[lastIndex] = (length > 1 ? '& ' : '') + details[lastIndex];
details = details.join(length > 2 ? ', ' : ' ');
return source.replace(reWrapComment, '{\n/* [wrapped with ' + details + '] */\n');
}
/**
* Checks if `value` is a flattenable `arguments` object or array.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is flattenable, else `false`.
*/
function isFlattenable(value) {
return isArray(value) || isArguments(value) ||
!!(spreadableSymbol && value && value[spreadableSymbol]);
}
/**
* Checks if `value` is a valid array-like index.
*
* @private
* @param {*} value The value to check.
* @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.
* @returns {boolean} Returns `true` if `value` is a valid index, else `false`.
*/
function isIndex(value, length) {
var type = typeof value;
length = length == null ? MAX_SAFE_INTEGER : length;
return !!length &&
(type == 'number' ||
(type != 'symbol' && reIsUint.test(value))) &&
(value > -1 && value % 1 == 0 && value < length);
}
/**
* Checks if the given arguments are from an iteratee call.
*
* @private
* @param {*} value The potential iteratee value argument.
* @param {*} index The potential iteratee index or key argument.
* @param {*} object The potential iteratee object argument.
* @returns {boolean} Returns `true` if the arguments are from an iteratee call,
* else `false`.
*/
function isIterateeCall(value, index, object) {
if (!isObject(object)) {
return false;
}
var type = typeof index;
if (type == 'number'
? (isArrayLike(object) && isIndex(index, object.length))
: (type == 'string' && index in object)
) {
return eq(object[index], value);
}
return false;
}
/**
* Checks if `value` is a property name and not a property path.
*
* @private
* @param {*} value The value to check.
* @param {Object} [object] The object to query keys on.
* @returns {boolean} Returns `true` if `value` is a property name, else `false`.
*/
function isKey(value, object) {
if (isArray(value)) {
return false;
}
var type = typeof value;
if (type == 'number' || type == 'symbol' || type == 'boolean' ||
value == null || isSymbol(value)) {
return true;
}
return reIsPlainProp.test(value) || !reIsDeepProp.test(value) ||
(object != null && value in Object(object));
}
/**
* Checks if `value` is suitable for use as unique object key.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is suitable, else `false`.
*/
function isKeyable(value) {
var type = typeof value;
return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean')
? (value !== '__proto__')
: (value === null);
}
/**
* Checks if `func` has a lazy counterpart.
*
* @private
* @param {Function} func The function to check.
* @returns {boolean} Returns `true` if `func` has a lazy counterpart,
* else `false`.
*/
function isLaziable(func) {
var funcName = getFuncName(func),
other = lodash[funcName];
if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) {
return false;
}
if (func === other) {
return true;
}
var data = getData(other);
return !!data && func === data[0];
}
/**
* Checks if `func` has its source masked.
*
* @private
* @param {Function} func The function to check.
* @returns {boolean} Returns `true` if `func` is masked, else `false`.
*/
function isMasked(func) {
return !!maskSrcKey && (maskSrcKey in func);
}
/**
* Checks if `func` is capable of being masked.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `func` is maskable, else `false`.
*/
var isMaskable = coreJsData ? isFunction : stubFalse;
/**
* Checks if `value` is likely a prototype object.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a prototype, else `false`.
*/
function isPrototype(value) {
var Ctor = value && value.constructor,
proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto;
return value === proto;
}
/**
* Checks if `value` is suitable for strict equality comparisons, i.e. `===`.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` if suitable for strict
* equality comparisons, else `false`.
*/
function isStrictComparable(value) {
return value === value && !isObject(value);
}
/**
* A specialized version of `matchesProperty` for source values suitable
* for strict equality comparisons, i.e. `===`.
*
* @private
* @param {string} key The key of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
*/
function matchesStrictComparable(key, srcValue) {
return function(object) {
if (object == null) {
return false;
}
return object[key] === srcValue &&
(srcValue !== undefined || (key in Object(object)));
};
}
/**
* A specialized version of `_.memoize` which clears the memoized function's
* cache when it exceeds `MAX_MEMOIZE_SIZE`.
*
* @private
* @param {Function} func The function to have its output memoized.
* @returns {Function} Returns the new memoized function.
*/
function memoizeCapped(func) {
var result = memoize(func, function(key) {
if (cache.size === MAX_MEMOIZE_SIZE) {
cache.clear();
}
return key;
});
var cache = result.cache;
return result;
}
/**
* Merges the function metadata of `source` into `data`.
*
* Merging metadata reduces the number of wrappers used to invoke a function.
* This is possible because methods like `_.bind`, `_.curry`, and `_.partial`
* may be applied regardless of execution order. Methods like `_.ary` and
* `_.rearg` modify function arguments, making the order in which they are
* executed important, preventing the merging of metadata. However, we make
* an exception for a safe combined case where curried functions have `_.ary`
* and or `_.rearg` applied.
*
* @private
* @param {Array} data The destination metadata.
* @param {Array} source The source metadata.
* @returns {Array} Returns `data`.
*/
function mergeData(data, source) {
var bitmask = data[1],
srcBitmask = source[1],
newBitmask = bitmask | srcBitmask,
isCommon = newBitmask < (WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG | WRAP_ARY_FLAG);
var isCombo =
((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_CURRY_FLAG)) ||
((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_REARG_FLAG) && (data[7].length <= source[8])) ||
((srcBitmask == (WRAP_ARY_FLAG | WRAP_REARG_FLAG)) && (source[7].length <= source[8]) && (bitmask == WRAP_CURRY_FLAG));
// Exit early if metadata can't be merged.
if (!(isCommon || isCombo)) {
return data;
}
// Use source `thisArg` if available.
if (srcBitmask & WRAP_BIND_FLAG) {
data[2] = source[2];
// Set when currying a bound function.
newBitmask |= bitmask & WRAP_BIND_FLAG ? 0 : WRAP_CURRY_BOUND_FLAG;
}
// Compose partial arguments.
var value = source[3];
if (value) {
var partials = data[3];
data[3] = partials ? composeArgs(partials, value, source[4]) : value;
data[4] = partials ? replaceHolders(data[3], PLACEHOLDER) : source[4];
}
// Compose partial right arguments.
value = source[5];
if (value) {
partials = data[5];
data[5] = partials ? composeArgsRight(partials, value, source[6]) : value;
data[6] = partials ? replaceHolders(data[5], PLACEHOLDER) : source[6];
}
// Use source `argPos` if available.
value = source[7];
if (value) {
data[7] = value;
}
// Use source `ary` if it's smaller.
if (srcBitmask & WRAP_ARY_FLAG) {
data[8] = data[8] == null ? source[8] : nativeMin(data[8], source[8]);
}
// Use source `arity` if one is not provided.
if (data[9] == null) {
data[9] = source[9];
}
// Use source `func` and merge bitmasks.
data[0] = source[0];
data[1] = newBitmask;
return data;
}
/**
* This function is like
* [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
* except that it includes inherited enumerable properties.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function nativeKeysIn(object) {
var result = [];
if (object != null) {
for (var key in Object(object)) {
result.push(key);
}
}
return result;
}
/**
* Converts `value` to a string using `Object.prototype.toString`.
*
* @private
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
*/
function objectToString(value) {
return nativeObjectToString.call(value);
}
/**
* A specialized version of `baseRest` which transforms the rest array.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @param {Function} transform The rest array transform.
* @returns {Function} Returns the new function.
*/
function overRest(func, start, transform) {
start = nativeMax(start === undefined ? (func.length - 1) : start, 0);
return function() {
var args = arguments,
index = -1,
length = nativeMax(args.length - start, 0),
array = Array(length);
while (++index < length) {
array[index] = args[start + index];
}
index = -1;
var otherArgs = Array(start + 1);
while (++index < start) {
otherArgs[index] = args[index];
}
otherArgs[start] = transform(array);
return apply(func, this, otherArgs);
};
}
/**
* Gets the parent value at `path` of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} path The path to get the parent value of.
* @returns {*} Returns the parent value.
*/
function parent(object, path) {
return path.length < 2 ? object : baseGet(object, baseSlice(path, 0, -1));
}
/**
* Reorder `array` according to the specified indexes where the element at
* the first index is assigned as the first element, the element at
* the second index is assigned as the second element, and so on.
*
* @private
* @param {Array} array The array to reorder.
* @param {Array} indexes The arranged array indexes.
* @returns {Array} Returns `array`.
*/
function reorder(array, indexes) {
var arrLength = array.length,
length = nativeMin(indexes.length, arrLength),
oldArray = copyArray(array);
while (length--) {
var index = indexes[length];
array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined;
}
return array;
}
/**
* Gets the value at `key`, unless `key` is "__proto__" or "constructor".
*
* @private
* @param {Object} object The object to query.
* @param {string} key The key of the property to get.
* @returns {*} Returns the property value.
*/
function safeGet(object, key) {
if (key === 'constructor' && typeof object[key] === 'function') {
return;
}
if (key == '__proto__') {
return;
}
return object[key];
}
/**
* Sets metadata for `func`.
*
* **Note:** If this function becomes hot, i.e. is invoked a lot in a short
* period of time, it will trip its breaker and transition to an identity
* function to avoid garbage collection pauses in V8. See
* [V8 issue 2070](https://bugs.chromium.org/p/v8/issues/detail?id=2070)
* for more details.
*
* @private
* @param {Function} func The function to associate metadata with.
* @param {*} data The metadata.
* @returns {Function} Returns `func`.
*/
var setData = shortOut(baseSetData);
/**
* A simple wrapper around the global [`setTimeout`](https://mdn.io/setTimeout).
*
* @private
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @returns {number|Object} Returns the timer id or timeout object.
*/
var setTimeout = ctxSetTimeout || function(func, wait) {
return root.setTimeout(func, wait);
};
/**
* Sets the `toString` method of `func` to return `string`.
*
* @private
* @param {Function} func The function to modify.
* @param {Function} string The `toString` result.
* @returns {Function} Returns `func`.
*/
var setToString = shortOut(baseSetToString);
/**
* Sets the `toString` method of `wrapper` to mimic the source of `reference`
* with wrapper details in a comment at the top of the source body.
*
* @private
* @param {Function} wrapper The function to modify.
* @param {Function} reference The reference function.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @returns {Function} Returns `wrapper`.
*/
function setWrapToString(wrapper, reference, bitmask) {
var source = (reference + '');
return setToString(wrapper, insertWrapDetails(source, updateWrapDetails(getWrapDetails(source), bitmask)));
}
/**
* Creates a function that'll short out and invoke `identity` instead
* of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN`
* milliseconds.
*
* @private
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new shortable function.
*/
function shortOut(func) {
var count = 0,
lastCalled = 0;
return function() {
var stamp = nativeNow(),
remaining = HOT_SPAN - (stamp - lastCalled);
lastCalled = stamp;
if (remaining > 0) {
if (++count >= HOT_COUNT) {
return arguments[0];
}
} else {
count = 0;
}
return func.apply(undefined, arguments);
};
}
/**
* A specialized version of `_.shuffle` which mutates and sets the size of `array`.
*
* @private
* @param {Array} array The array to shuffle.
* @param {number} [size=array.length] The size of `array`.
* @returns {Array} Returns `array`.
*/
function shuffleSelf(array, size) {
var index = -1,
length = array.length,
lastIndex = length - 1;
size = size === undefined ? length : size;
while (++index < size) {
var rand = baseRandom(index, lastIndex),
value = array[rand];
array[rand] = array[index];
array[index] = value;
}
array.length = size;
return array;
}
/**
* Converts `string` to a property path array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the property path array.
*/
var stringToPath = memoizeCapped(function(string) {
var result = [];
if (string.charCodeAt(0) === 46 /* . */) {
result.push('');
}
string.replace(rePropName, function(match, number, quote, subString) {
result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match));
});
return result;
});
/**
* Converts `value` to a string key if it's not a string or symbol.
*
* @private
* @param {*} value The value to inspect.
* @returns {string|symbol} Returns the key.
*/
function toKey(value) {
if (typeof value == 'string' || isSymbol(value)) {
return value;
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
/**
* Converts `func` to its source code.
*
* @private
* @param {Function} func The function to convert.
* @returns {string} Returns the source code.
*/
function toSource(func) {
if (func != null) {
try {
return funcToString.call(func);
} catch (e) {}
try {
return (func + '');
} catch (e) {}
}
return '';
}
/**
* Updates wrapper `details` based on `bitmask` flags.
*
* @private
* @returns {Array} details The details to modify.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @returns {Array} Returns `details`.
*/
function updateWrapDetails(details, bitmask) {
arrayEach(wrapFlags, function(pair) {
var value = '_.' + pair[0];
if ((bitmask & pair[1]) && !arrayIncludes(details, value)) {
details.push(value);
}
});
return details.sort();
}
/**
* Creates a clone of `wrapper`.
*
* @private
* @param {Object} wrapper The wrapper to clone.
* @returns {Object} Returns the cloned wrapper.
*/
function wrapperClone(wrapper) {
if (wrapper instanceof LazyWrapper) {
return wrapper.clone();
}
var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__);
result.__actions__ = copyArray(wrapper.__actions__);
result.__index__ = wrapper.__index__;
result.__values__ = wrapper.__values__;
return result;
}
/*------------------------------------------------------------------------*/
/**
* Creates an array of elements split into groups the length of `size`.
* If `array` can't be split evenly, the final chunk will be the remaining
* elements.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to process.
* @param {number} [size=1] The length of each chunk
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the new array of chunks.
* @example
*
* _.chunk(['a', 'b', 'c', 'd'], 2);
* // => [['a', 'b'], ['c', 'd']]
*
* _.chunk(['a', 'b', 'c', 'd'], 3);
* // => [['a', 'b', 'c'], ['d']]
*/
function chunk(array, size, guard) {
if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) {
size = 1;
} else {
size = nativeMax(toInteger(size), 0);
}
var length = array == null ? 0 : array.length;
if (!length || size < 1) {
return [];
}
var index = 0,
resIndex = 0,
result = Array(nativeCeil(length / size));
while (index < length) {
result[resIndex++] = baseSlice(array, index, (index += size));
}
return result;
}
/**
* Creates an array with all falsey values removed. The values `false`, `null`,
* `0`, `""`, `undefined`, and `NaN` are falsey.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to compact.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.compact([0, 1, false, 2, '', 3]);
* // => [1, 2, 3]
*/
function compact(array) {
var index = -1,
length = array == null ? 0 : array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (value) {
result[resIndex++] = value;
}
}
return result;
}
/**
* Creates a new array concatenating `array` with any additional arrays
* and/or values.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to concatenate.
* @param {...*} [values] The values to concatenate.
* @returns {Array} Returns the new concatenated array.
* @example
*
* var array = [1];
* var other = _.concat(array, 2, [3], [[4]]);
*
* console.log(other);
* // => [1, 2, 3, [4]]
*
* console.log(array);
* // => [1]
*/
function concat() {
var length = arguments.length;
if (!length) {
return [];
}
var args = Array(length - 1),
array = arguments[0],
index = length;
while (index--) {
args[index - 1] = arguments[index];
}
return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1));
}
/**
* Creates an array of `array` values not included in the other given arrays
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. The order and references of result values are
* determined by the first array.
*
* **Note:** Unlike `_.pullAll`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @returns {Array} Returns the new array of filtered values.
* @see _.without, _.xor
* @example
*
* _.difference([2, 1], [2, 3]);
* // => [1]
*/
var difference = baseRest(function(array, values) {
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true))
: [];
});
/**
* This method is like `_.difference` except that it accepts `iteratee` which
* is invoked for each element of `array` and `values` to generate the criterion
* by which they're compared. The order and references of result values are
* determined by the first array. The iteratee is invoked with one argument:
* (value).
*
* **Note:** Unlike `_.pullAllBy`, this method returns a new array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.differenceBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [1.2]
*
* // The `_.property` iteratee shorthand.
* _.differenceBy([{ 'x': 2 }, { 'x': 1 }], [{ 'x': 1 }], 'x');
* // => [{ 'x': 2 }]
*/
var differenceBy = baseRest(function(array, values) {
var iteratee = last(values);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), getIteratee(iteratee, 2))
: [];
});
/**
* This method is like `_.difference` except that it accepts `comparator`
* which is invoked to compare elements of `array` to `values`. The order and
* references of result values are determined by the first array. The comparator
* is invoked with two arguments: (arrVal, othVal).
*
* **Note:** Unlike `_.pullAllWith`, this method returns a new array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
*
* _.differenceWith(objects, [{ 'x': 1, 'y': 2 }], _.isEqual);
* // => [{ 'x': 2, 'y': 1 }]
*/
var differenceWith = baseRest(function(array, values) {
var comparator = last(values);
if (isArrayLikeObject(comparator)) {
comparator = undefined;
}
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), undefined, comparator)
: [];
});
/**
* Creates a slice of `array` with `n` elements dropped from the beginning.
*
* @static
* @memberOf _
* @since 0.5.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to drop.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.drop([1, 2, 3]);
* // => [2, 3]
*
* _.drop([1, 2, 3], 2);
* // => [3]
*
* _.drop([1, 2, 3], 5);
* // => []
*
* _.drop([1, 2, 3], 0);
* // => [1, 2, 3]
*/
function drop(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
return baseSlice(array, n < 0 ? 0 : n, length);
}
/**
* Creates a slice of `array` with `n` elements dropped from the end.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to drop.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.dropRight([1, 2, 3]);
* // => [1, 2]
*
* _.dropRight([1, 2, 3], 2);
* // => [1]
*
* _.dropRight([1, 2, 3], 5);
* // => []
*
* _.dropRight([1, 2, 3], 0);
* // => [1, 2, 3]
*/
function dropRight(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
n = length - n;
return baseSlice(array, 0, n < 0 ? 0 : n);
}
/**
* Creates a slice of `array` excluding elements dropped from the end.
* Elements are dropped until `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.dropRightWhile(users, function(o) { return !o.active; });
* // => objects for ['barney']
*
* // The `_.matches` iteratee shorthand.
* _.dropRightWhile(users, { 'user': 'pebbles', 'active': false });
* // => objects for ['barney', 'fred']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.dropRightWhile(users, ['active', false]);
* // => objects for ['barney']
*
* // The `_.property` iteratee shorthand.
* _.dropRightWhile(users, 'active');
* // => objects for ['barney', 'fred', 'pebbles']
*/
function dropRightWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), true, true)
: [];
}
/**
* Creates a slice of `array` excluding elements dropped from the beginning.
* Elements are dropped until `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.dropWhile(users, function(o) { return !o.active; });
* // => objects for ['pebbles']
*
* // The `_.matches` iteratee shorthand.
* _.dropWhile(users, { 'user': 'barney', 'active': false });
* // => objects for ['fred', 'pebbles']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.dropWhile(users, ['active', false]);
* // => objects for ['pebbles']
*
* // The `_.property` iteratee shorthand.
* _.dropWhile(users, 'active');
* // => objects for ['barney', 'fred', 'pebbles']
*/
function dropWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), true)
: [];
}
/**
* Fills elements of `array` with `value` from `start` up to, but not
* including, `end`.
*
* **Note:** This method mutates `array`.
*
* @static
* @memberOf _
* @since 3.2.0
* @category Array
* @param {Array} array The array to fill.
* @param {*} value The value to fill `array` with.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns `array`.
* @example
*
* var array = [1, 2, 3];
*
* _.fill(array, 'a');
* console.log(array);
* // => ['a', 'a', 'a']
*
* _.fill(Array(3), 2);
* // => [2, 2, 2]
*
* _.fill([4, 6, 8, 10], '*', 1, 3);
* // => [4, '*', '*', 10]
*/
function fill(array, value, start, end) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
if (start && typeof start != 'number' && isIterateeCall(array, value, start)) {
start = 0;
end = length;
}
return baseFill(array, value, start, end);
}
/**
* This method is like `_.find` except that it returns the index of the first
* element `predicate` returns truthy for instead of the element itself.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=0] The index to search from.
* @returns {number} Returns the index of the found element, else `-1`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.findIndex(users, function(o) { return o.user == 'barney'; });
* // => 0
*
* // The `_.matches` iteratee shorthand.
* _.findIndex(users, { 'user': 'fred', 'active': false });
* // => 1
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findIndex(users, ['active', false]);
* // => 0
*
* // The `_.property` iteratee shorthand.
* _.findIndex(users, 'active');
* // => 2
*/
function findIndex(array, predicate, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = fromIndex == null ? 0 : toInteger(fromIndex);
if (index < 0) {
index = nativeMax(length + index, 0);
}
return baseFindIndex(array, getIteratee(predicate, 3), index);
}
/**
* This method is like `_.findIndex` except that it iterates over elements
* of `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=array.length-1] The index to search from.
* @returns {number} Returns the index of the found element, else `-1`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.findLastIndex(users, function(o) { return o.user == 'pebbles'; });
* // => 2
*
* // The `_.matches` iteratee shorthand.
* _.findLastIndex(users, { 'user': 'barney', 'active': true });
* // => 0
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findLastIndex(users, ['active', false]);
* // => 2
*
* // The `_.property` iteratee shorthand.
* _.findLastIndex(users, 'active');
* // => 0
*/
function findLastIndex(array, predicate, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = length - 1;
if (fromIndex !== undefined) {
index = toInteger(fromIndex);
index = fromIndex < 0
? nativeMax(length + index, 0)
: nativeMin(index, length - 1);
}
return baseFindIndex(array, getIteratee(predicate, 3), index, true);
}
/**
* Flattens `array` a single level deep.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to flatten.
* @returns {Array} Returns the new flattened array.
* @example
*
* _.flatten([1, [2, [3, [4]], 5]]);
* // => [1, 2, [3, [4]], 5]
*/
function flatten(array) {
var length = array == null ? 0 : array.length;
return length ? baseFlatten(array, 1) : [];
}
/**
* Recursively flattens `array`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to flatten.
* @returns {Array} Returns the new flattened array.
* @example
*
* _.flattenDeep([1, [2, [3, [4]], 5]]);
* // => [1, 2, 3, 4, 5]
*/
function flattenDeep(array) {
var length = array == null ? 0 : array.length;
return length ? baseFlatten(array, INFINITY) : [];
}
/**
* Recursively flatten `array` up to `depth` times.
*
* @static
* @memberOf _
* @since 4.4.0
* @category Array
* @param {Array} array The array to flatten.
* @param {number} [depth=1] The maximum recursion depth.
* @returns {Array} Returns the new flattened array.
* @example
*
* var array = [1, [2, [3, [4]], 5]];
*
* _.flattenDepth(array, 1);
* // => [1, 2, [3, [4]], 5]
*
* _.flattenDepth(array, 2);
* // => [1, 2, 3, [4], 5]
*/
function flattenDepth(array, depth) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
depth = depth === undefined ? 1 : toInteger(depth);
return baseFlatten(array, depth);
}
/**
* The inverse of `_.toPairs`; this method returns an object composed
* from key-value `pairs`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} pairs The key-value pairs.
* @returns {Object} Returns the new object.
* @example
*
* _.fromPairs([['a', 1], ['b', 2]]);
* // => { 'a': 1, 'b': 2 }
*/
function fromPairs(pairs) {
var index = -1,
length = pairs == null ? 0 : pairs.length,
result = {};
while (++index < length) {
var pair = pairs[index];
result[pair[0]] = pair[1];
}
return result;
}
/**
* Gets the first element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @alias first
* @category Array
* @param {Array} array The array to query.
* @returns {*} Returns the first element of `array`.
* @example
*
* _.head([1, 2, 3]);
* // => 1
*
* _.head([]);
* // => undefined
*/
function head(array) {
return (array && array.length) ? array[0] : undefined;
}
/**
* Gets the index at which the first occurrence of `value` is found in `array`
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. If `fromIndex` is negative, it's used as the
* offset from the end of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=0] The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.indexOf([1, 2, 1, 2], 2);
* // => 1
*
* // Search from the `fromIndex`.
* _.indexOf([1, 2, 1, 2], 2, 2);
* // => 3
*/
function indexOf(array, value, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = fromIndex == null ? 0 : toInteger(fromIndex);
if (index < 0) {
index = nativeMax(length + index, 0);
}
return baseIndexOf(array, value, index);
}
/**
* Gets all but the last element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.initial([1, 2, 3]);
* // => [1, 2]
*/
function initial(array) {
var length = array == null ? 0 : array.length;
return length ? baseSlice(array, 0, -1) : [];
}
/**
* Creates an array of unique values that are included in all given arrays
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. The order and references of result values are
* determined by the first array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* _.intersection([2, 1], [2, 3]);
* // => [2]
*/
var intersection = baseRest(function(arrays) {
var mapped = arrayMap(arrays, castArrayLikeObject);
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped)
: [];
});
/**
* This method is like `_.intersection` except that it accepts `iteratee`
* which is invoked for each element of each `arrays` to generate the criterion
* by which they're compared. The order and references of result values are
* determined by the first array. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* _.intersectionBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [2.1]
*
* // The `_.property` iteratee shorthand.
* _.intersectionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }]
*/
var intersectionBy = baseRest(function(arrays) {
var iteratee = last(arrays),
mapped = arrayMap(arrays, castArrayLikeObject);
if (iteratee === last(mapped)) {
iteratee = undefined;
} else {
mapped.pop();
}
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped, getIteratee(iteratee, 2))
: [];
});
/**
* This method is like `_.intersection` except that it accepts `comparator`
* which is invoked to compare elements of `arrays`. The order and references
* of result values are determined by the first array. The comparator is
* invoked with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.intersectionWith(objects, others, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }]
*/
var intersectionWith = baseRest(function(arrays) {
var comparator = last(arrays),
mapped = arrayMap(arrays, castArrayLikeObject);
comparator = typeof comparator == 'function' ? comparator : undefined;
if (comparator) {
mapped.pop();
}
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped, undefined, comparator)
: [];
});
/**
* Converts all elements in `array` into a string separated by `separator`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to convert.
* @param {string} [separator=','] The element separator.
* @returns {string} Returns the joined string.
* @example
*
* _.join(['a', 'b', 'c'], '~');
* // => 'a~b~c'
*/
function join(array, separator) {
return array == null ? '' : nativeJoin.call(array, separator);
}
/**
* Gets the last element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @returns {*} Returns the last element of `array`.
* @example
*
* _.last([1, 2, 3]);
* // => 3
*/
function last(array) {
var length = array == null ? 0 : array.length;
return length ? array[length - 1] : undefined;
}
/**
* This method is like `_.indexOf` except that it iterates over elements of
* `array` from right to left.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=array.length-1] The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.lastIndexOf([1, 2, 1, 2], 2);
* // => 3
*
* // Search from the `fromIndex`.
* _.lastIndexOf([1, 2, 1, 2], 2, 2);
* // => 1
*/
function lastIndexOf(array, value, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = length;
if (fromIndex !== undefined) {
index = toInteger(fromIndex);
index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1);
}
return value === value
? strictLastIndexOf(array, value, index)
: baseFindIndex(array, baseIsNaN, index, true);
}
/**
* Gets the element at index `n` of `array`. If `n` is negative, the nth
* element from the end is returned.
*
* @static
* @memberOf _
* @since 4.11.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=0] The index of the element to return.
* @returns {*} Returns the nth element of `array`.
* @example
*
* var array = ['a', 'b', 'c', 'd'];
*
* _.nth(array, 1);
* // => 'b'
*
* _.nth(array, -2);
* // => 'c';
*/
function nth(array, n) {
return (array && array.length) ? baseNth(array, toInteger(n)) : undefined;
}
/**
* Removes all given values from `array` using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* **Note:** Unlike `_.without`, this method mutates `array`. Use `_.remove`
* to remove elements from an array by predicate.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {...*} [values] The values to remove.
* @returns {Array} Returns `array`.
* @example
*
* var array = ['a', 'b', 'c', 'a', 'b', 'c'];
*
* _.pull(array, 'a', 'c');
* console.log(array);
* // => ['b', 'b']
*/
var pull = baseRest(pullAll);
/**
* This method is like `_.pull` except that it accepts an array of values to remove.
*
* **Note:** Unlike `_.difference`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @returns {Array} Returns `array`.
* @example
*
* var array = ['a', 'b', 'c', 'a', 'b', 'c'];
*
* _.pullAll(array, ['a', 'c']);
* console.log(array);
* // => ['b', 'b']
*/
function pullAll(array, values) {
return (array && array.length && values && values.length)
? basePullAll(array, values)
: array;
}
/**
* This method is like `_.pullAll` except that it accepts `iteratee` which is
* invoked for each element of `array` and `values` to generate the criterion
* by which they're compared. The iteratee is invoked with one argument: (value).
*
* **Note:** Unlike `_.differenceBy`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns `array`.
* @example
*
* var array = [{ 'x': 1 }, { 'x': 2 }, { 'x': 3 }, { 'x': 1 }];
*
* _.pullAllBy(array, [{ 'x': 1 }, { 'x': 3 }], 'x');
* console.log(array);
* // => [{ 'x': 2 }]
*/
function pullAllBy(array, values, iteratee) {
return (array && array.length && values && values.length)
? basePullAll(array, values, getIteratee(iteratee, 2))
: array;
}
/**
* This method is like `_.pullAll` except that it accepts `comparator` which
* is invoked to compare elements of `array` to `values`. The comparator is
* invoked with two arguments: (arrVal, othVal).
*
* **Note:** Unlike `_.differenceWith`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns `array`.
* @example
*
* var array = [{ 'x': 1, 'y': 2 }, { 'x': 3, 'y': 4 }, { 'x': 5, 'y': 6 }];
*
* _.pullAllWith(array, [{ 'x': 3, 'y': 4 }], _.isEqual);
* console.log(array);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 5, 'y': 6 }]
*/
function pullAllWith(array, values, comparator) {
return (array && array.length && values && values.length)
? basePullAll(array, values, undefined, comparator)
: array;
}
/**
* Removes elements from `array` corresponding to `indexes` and returns an
* array of removed elements.
*
* **Note:** Unlike `_.at`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {...(number|number[])} [indexes] The indexes of elements to remove.
* @returns {Array} Returns the new array of removed elements.
* @example
*
* var array = ['a', 'b', 'c', 'd'];
* var pulled = _.pullAt(array, [1, 3]);
*
* console.log(array);
* // => ['a', 'c']
*
* console.log(pulled);
* // => ['b', 'd']
*/
var pullAt = flatRest(function(array, indexes) {
var length = array == null ? 0 : array.length,
result = baseAt(array, indexes);
basePullAt(array, arrayMap(indexes, function(index) {
return isIndex(index, length) ? +index : index;
}).sort(compareAscending));
return result;
});
/**
* Removes all elements from `array` that `predicate` returns truthy for
* and returns an array of the removed elements. The predicate is invoked
* with three arguments: (value, index, array).
*
* **Note:** Unlike `_.filter`, this method mutates `array`. Use `_.pull`
* to pull elements from an array by value.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new array of removed elements.
* @example
*
* var array = [1, 2, 3, 4];
* var evens = _.remove(array, function(n) {
* return n % 2 == 0;
* });
*
* console.log(array);
* // => [1, 3]
*
* console.log(evens);
* // => [2, 4]
*/
function remove(array, predicate) {
var result = [];
if (!(array && array.length)) {
return result;
}
var index = -1,
indexes = [],
length = array.length;
predicate = getIteratee(predicate, 3);
while (++index < length) {
var value = array[index];
if (predicate(value, index, array)) {
result.push(value);
indexes.push(index);
}
}
basePullAt(array, indexes);
return result;
}
/**
* Reverses `array` so that the first element becomes the last, the second
* element becomes the second to last, and so on.
*
* **Note:** This method mutates `array` and is based on
* [`Array#reverse`](https://mdn.io/Array/reverse).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @returns {Array} Returns `array`.
* @example
*
* var array = [1, 2, 3];
*
* _.reverse(array);
* // => [3, 2, 1]
*
* console.log(array);
* // => [3, 2, 1]
*/
function reverse(array) {
return array == null ? array : nativeReverse.call(array);
}
/**
* Creates a slice of `array` from `start` up to, but not including, `end`.
*
* **Note:** This method is used instead of
* [`Array#slice`](https://mdn.io/Array/slice) to ensure dense arrays are
* returned.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to slice.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the slice of `array`.
*/
function slice(array, start, end) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
if (end && typeof end != 'number' && isIterateeCall(array, start, end)) {
start = 0;
end = length;
}
else {
start = start == null ? 0 : toInteger(start);
end = end === undefined ? length : toInteger(end);
}
return baseSlice(array, start, end);
}
/**
* Uses a binary search to determine the lowest index at which `value`
* should be inserted into `array` in order to maintain its sort order.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([30, 50], 40);
* // => 1
*/
function sortedIndex(array, value) {
return baseSortedIndex(array, value);
}
/**
* This method is like `_.sortedIndex` except that it accepts `iteratee`
* which is invoked for `value` and each element of `array` to compute their
* sort ranking. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* var objects = [{ 'x': 4 }, { 'x': 5 }];
*
* _.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; });
* // => 0
*
* // The `_.property` iteratee shorthand.
* _.sortedIndexBy(objects, { 'x': 4 }, 'x');
* // => 0
*/
function sortedIndexBy(array, value, iteratee) {
return baseSortedIndexBy(array, value, getIteratee(iteratee, 2));
}
/**
* This method is like `_.indexOf` except that it performs a binary
* search on a sorted `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.sortedIndexOf([4, 5, 5, 5, 6], 5);
* // => 1
*/
function sortedIndexOf(array, value) {
var length = array == null ? 0 : array.length;
if (length) {
var index = baseSortedIndex(array, value);
if (index < length && eq(array[index], value)) {
return index;
}
}
return -1;
}
/**
* This method is like `_.sortedIndex` except that it returns the highest
* index at which `value` should be inserted into `array` in order to
* maintain its sort order.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedLastIndex([4, 5, 5, 5, 6], 5);
* // => 4
*/
function sortedLastIndex(array, value) {
return baseSortedIndex(array, value, true);
}
/**
* This method is like `_.sortedLastIndex` except that it accepts `iteratee`
* which is invoked for `value` and each element of `array` to compute their
* sort ranking. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* var objects = [{ 'x': 4 }, { 'x': 5 }];
*
* _.sortedLastIndexBy(objects, { 'x': 4 }, function(o) { return o.x; });
* // => 1
*
* // The `_.property` iteratee shorthand.
* _.sortedLastIndexBy(objects, { 'x': 4 }, 'x');
* // => 1
*/
function sortedLastIndexBy(array, value, iteratee) {
return baseSortedIndexBy(array, value, getIteratee(iteratee, 2), true);
}
/**
* This method is like `_.lastIndexOf` except that it performs a binary
* search on a sorted `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.sortedLastIndexOf([4, 5, 5, 5, 6], 5);
* // => 3
*/
function sortedLastIndexOf(array, value) {
var length = array == null ? 0 : array.length;
if (length) {
var index = baseSortedIndex(array, value, true) - 1;
if (eq(array[index], value)) {
return index;
}
}
return -1;
}
/**
* This method is like `_.uniq` except that it's designed and optimized
* for sorted arrays.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.sortedUniq([1, 1, 2]);
* // => [1, 2]
*/
function sortedUniq(array) {
return (array && array.length)
? baseSortedUniq(array)
: [];
}
/**
* This method is like `_.uniqBy` except that it's designed and optimized
* for sorted arrays.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.sortedUniqBy([1.1, 1.2, 2.3, 2.4], Math.floor);
* // => [1.1, 2.3]
*/
function sortedUniqBy(array, iteratee) {
return (array && array.length)
? baseSortedUniq(array, getIteratee(iteratee, 2))
: [];
}
/**
* Gets all but the first element of `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to query.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.tail([1, 2, 3]);
* // => [2, 3]
*/
function tail(array) {
var length = array == null ? 0 : array.length;
return length ? baseSlice(array, 1, length) : [];
}
/**
* Creates a slice of `array` with `n` elements taken from the beginning.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to take.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.take([1, 2, 3]);
* // => [1]
*
* _.take([1, 2, 3], 2);
* // => [1, 2]
*
* _.take([1, 2, 3], 5);
* // => [1, 2, 3]
*
* _.take([1, 2, 3], 0);
* // => []
*/
function take(array, n, guard) {
if (!(array && array.length)) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
return baseSlice(array, 0, n < 0 ? 0 : n);
}
/**
* Creates a slice of `array` with `n` elements taken from the end.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to take.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.takeRight([1, 2, 3]);
* // => [3]
*
* _.takeRight([1, 2, 3], 2);
* // => [2, 3]
*
* _.takeRight([1, 2, 3], 5);
* // => [1, 2, 3]
*
* _.takeRight([1, 2, 3], 0);
* // => []
*/
function takeRight(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
n = length - n;
return baseSlice(array, n < 0 ? 0 : n, length);
}
/**
* Creates a slice of `array` with elements taken from the end. Elements are
* taken until `predicate` returns falsey. The predicate is invoked with
* three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.takeRightWhile(users, function(o) { return !o.active; });
* // => objects for ['fred', 'pebbles']
*
* // The `_.matches` iteratee shorthand.
* _.takeRightWhile(users, { 'user': 'pebbles', 'active': false });
* // => objects for ['pebbles']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.takeRightWhile(users, ['active', false]);
* // => objects for ['fred', 'pebbles']
*
* // The `_.property` iteratee shorthand.
* _.takeRightWhile(users, 'active');
* // => []
*/
function takeRightWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), false, true)
: [];
}
/**
* Creates a slice of `array` with elements taken from the beginning. Elements
* are taken until `predicate` returns falsey. The predicate is invoked with
* three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.takeWhile(users, function(o) { return !o.active; });
* // => objects for ['barney', 'fred']
*
* // The `_.matches` iteratee shorthand.
* _.takeWhile(users, { 'user': 'barney', 'active': false });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.takeWhile(users, ['active', false]);
* // => objects for ['barney', 'fred']
*
* // The `_.property` iteratee shorthand.
* _.takeWhile(users, 'active');
* // => []
*/
function takeWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3))
: [];
}
/**
* Creates an array of unique values, in order, from all given arrays using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of combined values.
* @example
*
* _.union([2], [1, 2]);
* // => [2, 1]
*/
var union = baseRest(function(arrays) {
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true));
});
/**
* This method is like `_.union` except that it accepts `iteratee` which is
* invoked for each element of each `arrays` to generate the criterion by
* which uniqueness is computed. Result values are chosen from the first
* array in which the value occurs. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of combined values.
* @example
*
* _.unionBy([2.1], [1.2, 2.3], Math.floor);
* // => [2.1, 1.2]
*
* // The `_.property` iteratee shorthand.
* _.unionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }, { 'x': 2 }]
*/
var unionBy = baseRest(function(arrays) {
var iteratee = last(arrays);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), getIteratee(iteratee, 2));
});
/**
* This method is like `_.union` except that it accepts `comparator` which
* is invoked to compare elements of `arrays`. Result values are chosen from
* the first array in which the value occurs. The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of combined values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.unionWith(objects, others, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }]
*/
var unionWith = baseRest(function(arrays) {
var comparator = last(arrays);
comparator = typeof comparator == 'function' ? comparator : undefined;
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), undefined, comparator);
});
/**
* Creates a duplicate-free version of an array, using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons, in which only the first occurrence of each element
* is kept. The order of result values is determined by the order they occur
* in the array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.uniq([2, 1, 2]);
* // => [2, 1]
*/
function uniq(array) {
return (array && array.length) ? baseUniq(array) : [];
}
/**
* This method is like `_.uniq` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* uniqueness is computed. The order of result values is determined by the
* order they occur in the array. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.uniqBy([2.1, 1.2, 2.3], Math.floor);
* // => [2.1, 1.2]
*
* // The `_.property` iteratee shorthand.
* _.uniqBy([{ 'x': 1 }, { 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }, { 'x': 2 }]
*/
function uniqBy(array, iteratee) {
return (array && array.length) ? baseUniq(array, getIteratee(iteratee, 2)) : [];
}
/**
* This method is like `_.uniq` except that it accepts `comparator` which
* is invoked to compare elements of `array`. The order of result values is
* determined by the order they occur in the array.The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.uniqWith(objects, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]
*/
function uniqWith(array, comparator) {
comparator = typeof comparator == 'function' ? comparator : undefined;
return (array && array.length) ? baseUniq(array, undefined, comparator) : [];
}
/**
* This method is like `_.zip` except that it accepts an array of grouped
* elements and creates an array regrouping the elements to their pre-zip
* configuration.
*
* @static
* @memberOf _
* @since 1.2.0
* @category Array
* @param {Array} array The array of grouped elements to process.
* @returns {Array} Returns the new array of regrouped elements.
* @example
*
* var zipped = _.zip(['a', 'b'], [1, 2], [true, false]);
* // => [['a', 1, true], ['b', 2, false]]
*
* _.unzip(zipped);
* // => [['a', 'b'], [1, 2], [true, false]]
*/
function unzip(array) {
if (!(array && array.length)) {
return [];
}
var length = 0;
array = arrayFilter(array, function(group) {
if (isArrayLikeObject(group)) {
length = nativeMax(group.length, length);
return true;
}
});
return baseTimes(length, function(index) {
return arrayMap(array, baseProperty(index));
});
}
/**
* This method is like `_.unzip` except that it accepts `iteratee` to specify
* how regrouped values should be combined. The iteratee is invoked with the
* elements of each group: (...group).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Array
* @param {Array} array The array of grouped elements to process.
* @param {Function} [iteratee=_.identity] The function to combine
* regrouped values.
* @returns {Array} Returns the new array of regrouped elements.
* @example
*
* var zipped = _.zip([1, 2], [10, 20], [100, 200]);
* // => [[1, 10, 100], [2, 20, 200]]
*
* _.unzipWith(zipped, _.add);
* // => [3, 30, 300]
*/
function unzipWith(array, iteratee) {
if (!(array && array.length)) {
return [];
}
var result = unzip(array);
if (iteratee == null) {
return result;
}
return arrayMap(result, function(group) {
return apply(iteratee, undefined, group);
});
}
/**
* Creates an array excluding all given values using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* **Note:** Unlike `_.pull`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...*} [values] The values to exclude.
* @returns {Array} Returns the new array of filtered values.
* @see _.difference, _.xor
* @example
*
* _.without([2, 1, 2, 3], 1, 2);
* // => [3]
*/
var without = baseRest(function(array, values) {
return isArrayLikeObject(array)
? baseDifference(array, values)
: [];
});
/**
* Creates an array of unique values that is the
* [symmetric difference](https://en.wikipedia.org/wiki/Symmetric_difference)
* of the given arrays. The order of result values is determined by the order
* they occur in the arrays.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of filtered values.
* @see _.difference, _.without
* @example
*
* _.xor([2, 1], [2, 3]);
* // => [1, 3]
*/
var xor = baseRest(function(arrays) {
return baseXor(arrayFilter(arrays, isArrayLikeObject));
});
/**
* This method is like `_.xor` except that it accepts `iteratee` which is
* invoked for each element of each `arrays` to generate the criterion by
* which by which they're compared. The order of result values is determined
* by the order they occur in the arrays. The iteratee is invoked with one
* argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.xorBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [1.2, 3.4]
*
* // The `_.property` iteratee shorthand.
* _.xorBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 2 }]
*/
var xorBy = baseRest(function(arrays) {
var iteratee = last(arrays);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return baseXor(arrayFilter(arrays, isArrayLikeObject), getIteratee(iteratee, 2));
});
/**
* This method is like `_.xor` except that it accepts `comparator` which is
* invoked to compare elements of `arrays`. The order of result values is
* determined by the order they occur in the arrays. The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.xorWith(objects, others, _.isEqual);
* // => [{ 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }]
*/
var xorWith = baseRest(function(arrays) {
var comparator = last(arrays);
comparator = typeof comparator == 'function' ? comparator : undefined;
return baseXor(arrayFilter(arrays, isArrayLikeObject), undefined, comparator);
});
/**
* Creates an array of grouped elements, the first of which contains the
* first elements of the given arrays, the second of which contains the
* second elements of the given arrays, and so on.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to process.
* @returns {Array} Returns the new array of grouped elements.
* @example
*
* _.zip(['a', 'b'], [1, 2], [true, false]);
* // => [['a', 1, true], ['b', 2, false]]
*/
var zip = baseRest(unzip);
/**
* This method is like `_.fromPairs` except that it accepts two arrays,
* one of property identifiers and one of corresponding values.
*
* @static
* @memberOf _
* @since 0.4.0
* @category Array
* @param {Array} [props=[]] The property identifiers.
* @param {Array} [values=[]] The property values.
* @returns {Object} Returns the new object.
* @example
*
* _.zipObject(['a', 'b'], [1, 2]);
* // => { 'a': 1, 'b': 2 }
*/
function zipObject(props, values) {
return baseZipObject(props || [], values || [], assignValue);
}
/**
* This method is like `_.zipObject` except that it supports property paths.
*
* @static
* @memberOf _
* @since 4.1.0
* @category Array
* @param {Array} [props=[]] The property identifiers.
* @param {Array} [values=[]] The property values.
* @returns {Object} Returns the new object.
* @example
*
* _.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]);
* // => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } }
*/
function zipObjectDeep(props, values) {
return baseZipObject(props || [], values || [], baseSet);
}
/**
* This method is like `_.zip` except that it accepts `iteratee` to specify
* how grouped values should be combined. The iteratee is invoked with the
* elements of each group: (...group).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Array
* @param {...Array} [arrays] The arrays to process.
* @param {Function} [iteratee=_.identity] The function to combine
* grouped values.
* @returns {Array} Returns the new array of grouped elements.
* @example
*
* _.zipWith([1, 2], [10, 20], [100, 200], function(a, b, c) {
* return a + b + c;
* });
* // => [111, 222]
*/
var zipWith = baseRest(function(arrays) {
var length = arrays.length,
iteratee = length > 1 ? arrays[length - 1] : undefined;
iteratee = typeof iteratee == 'function' ? (arrays.pop(), iteratee) : undefined;
return unzipWith(arrays, iteratee);
});
/*------------------------------------------------------------------------*/
/**
* Creates a `lodash` wrapper instance that wraps `value` with explicit method
* chain sequences enabled. The result of such sequences must be unwrapped
* with `_#value`.
*
* @static
* @memberOf _
* @since 1.3.0
* @category Seq
* @param {*} value The value to wrap.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 40 },
* { 'user': 'pebbles', 'age': 1 }
* ];
*
* var youngest = _
* .chain(users)
* .sortBy('age')
* .map(function(o) {
* return o.user + ' is ' + o.age;
* })
* .head()
* .value();
* // => 'pebbles is 1'
*/
function chain(value) {
var result = lodash(value);
result.__chain__ = true;
return result;
}
/**
* This method invokes `interceptor` and returns `value`. The interceptor
* is invoked with one argument; (value). The purpose of this method is to
* "tap into" a method chain sequence in order to modify intermediate results.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Seq
* @param {*} value The value to provide to `interceptor`.
* @param {Function} interceptor The function to invoke.
* @returns {*} Returns `value`.
* @example
*
* _([1, 2, 3])
* .tap(function(array) {
* // Mutate input array.
* array.pop();
* })
* .reverse()
* .value();
* // => [2, 1]
*/
function tap(value, interceptor) {
interceptor(value);
return value;
}
/**
* This method is like `_.tap` except that it returns the result of `interceptor`.
* The purpose of this method is to "pass thru" values replacing intermediate
* results in a method chain sequence.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Seq
* @param {*} value The value to provide to `interceptor`.
* @param {Function} interceptor The function to invoke.
* @returns {*} Returns the result of `interceptor`.
* @example
*
* _(' abc ')
* .chain()
* .trim()
* .thru(function(value) {
* return [value];
* })
* .value();
* // => ['abc']
*/
function thru(value, interceptor) {
return interceptor(value);
}
/**
* This method is the wrapper version of `_.at`.
*
* @name at
* @memberOf _
* @since 1.0.0
* @category Seq
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }, 4] };
*
* _(object).at(['a[0].b.c', 'a[1]']).value();
* // => [3, 4]
*/
var wrapperAt = flatRest(function(paths) {
var length = paths.length,
start = length ? paths[0] : 0,
value = this.__wrapped__,
interceptor = function(object) { return baseAt(object, paths); };
if (length > 1 || this.__actions__.length ||
!(value instanceof LazyWrapper) || !isIndex(start)) {
return this.thru(interceptor);
}
value = value.slice(start, +start + (length ? 1 : 0));
value.__actions__.push({
'func': thru,
'args': [interceptor],
'thisArg': undefined
});
return new LodashWrapper(value, this.__chain__).thru(function(array) {
if (length && !array.length) {
array.push(undefined);
}
return array;
});
});
/**
* Creates a `lodash` wrapper instance with explicit method chain sequences enabled.
*
* @name chain
* @memberOf _
* @since 0.1.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 40 }
* ];
*
* // A sequence without explicit chaining.
* _(users).head();
* // => { 'user': 'barney', 'age': 36 }
*
* // A sequence with explicit chaining.
* _(users)
* .chain()
* .head()
* .pick('user')
* .value();
* // => { 'user': 'barney' }
*/
function wrapperChain() {
return chain(this);
}
/**
* Executes the chain sequence and returns the wrapped result.
*
* @name commit
* @memberOf _
* @since 3.2.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var array = [1, 2];
* var wrapped = _(array).push(3);
*
* console.log(array);
* // => [1, 2]
*
* wrapped = wrapped.commit();
* console.log(array);
* // => [1, 2, 3]
*
* wrapped.last();
* // => 3
*
* console.log(array);
* // => [1, 2, 3]
*/
function wrapperCommit() {
return new LodashWrapper(this.value(), this.__chain__);
}
/**
* Gets the next value on a wrapped object following the
* [iterator protocol](https://mdn.io/iteration_protocols#iterator).
*
* @name next
* @memberOf _
* @since 4.0.0
* @category Seq
* @returns {Object} Returns the next iterator value.
* @example
*
* var wrapped = _([1, 2]);
*
* wrapped.next();
* // => { 'done': false, 'value': 1 }
*
* wrapped.next();
* // => { 'done': false, 'value': 2 }
*
* wrapped.next();
* // => { 'done': true, 'value': undefined }
*/
function wrapperNext() {
if (this.__values__ === undefined) {
this.__values__ = toArray(this.value());
}
var done = this.__index__ >= this.__values__.length,
value = done ? undefined : this.__values__[this.__index__++];
return { 'done': done, 'value': value };
}
/**
* Enables the wrapper to be iterable.
*
* @name Symbol.iterator
* @memberOf _
* @since 4.0.0
* @category Seq
* @returns {Object} Returns the wrapper object.
* @example
*
* var wrapped = _([1, 2]);
*
* wrapped[Symbol.iterator]() === wrapped;
* // => true
*
* Array.from(wrapped);
* // => [1, 2]
*/
function wrapperToIterator() {
return this;
}
/**
* Creates a clone of the chain sequence planting `value` as the wrapped value.
*
* @name plant
* @memberOf _
* @since 3.2.0
* @category Seq
* @param {*} value The value to plant.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* function square(n) {
* return n * n;
* }
*
* var wrapped = _([1, 2]).map(square);
* var other = wrapped.plant([3, 4]);
*
* other.value();
* // => [9, 16]
*
* wrapped.value();
* // => [1, 4]
*/
function wrapperPlant(value) {
var result,
parent = this;
while (parent instanceof baseLodash) {
var clone = wrapperClone(parent);
clone.__index__ = 0;
clone.__values__ = undefined;
if (result) {
previous.__wrapped__ = clone;
} else {
result = clone;
}
var previous = clone;
parent = parent.__wrapped__;
}
previous.__wrapped__ = value;
return result;
}
/**
* This method is the wrapper version of `_.reverse`.
*
* **Note:** This method mutates the wrapped array.
*
* @name reverse
* @memberOf _
* @since 0.1.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var array = [1, 2, 3];
*
* _(array).reverse().value()
* // => [3, 2, 1]
*
* console.log(array);
* // => [3, 2, 1]
*/
function wrapperReverse() {
var value = this.__wrapped__;
if (value instanceof LazyWrapper) {
var wrapped = value;
if (this.__actions__.length) {
wrapped = new LazyWrapper(this);
}
wrapped = wrapped.reverse();
wrapped.__actions__.push({
'func': thru,
'args': [reverse],
'thisArg': undefined
});
return new LodashWrapper(wrapped, this.__chain__);
}
return this.thru(reverse);
}
/**
* Executes the chain sequence to resolve the unwrapped value.
*
* @name value
* @memberOf _
* @since 0.1.0
* @alias toJSON, valueOf
* @category Seq
* @returns {*} Returns the resolved unwrapped value.
* @example
*
* _([1, 2, 3]).value();
* // => [1, 2, 3]
*/
function wrapperValue() {
return baseWrapperValue(this.__wrapped__, this.__actions__);
}
/*------------------------------------------------------------------------*/
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The corresponding value of
* each key is the number of times the key was returned by `iteratee`. The
* iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.5.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* _.countBy([6.1, 4.2, 6.3], Math.floor);
* // => { '4': 1, '6': 2 }
*
* // The `_.property` iteratee shorthand.
* _.countBy(['one', 'two', 'three'], 'length');
* // => { '3': 2, '5': 1 }
*/
var countBy = createAggregator(function(result, value, key) {
if (hasOwnProperty.call(result, key)) {
++result[key];
} else {
baseAssignValue(result, key, 1);
}
});
/**
* Checks if `predicate` returns truthy for **all** elements of `collection`.
* Iteration is stopped once `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index|key, collection).
*
* **Note:** This method returns `true` for
* [empty collections](https://en.wikipedia.org/wiki/Empty_set) because
* [everything is true](https://en.wikipedia.org/wiki/Vacuous_truth) of
* elements of empty collections.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`.
* @example
*
* _.every([true, 1, null, 'yes'], Boolean);
* // => false
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.every(users, { 'user': 'barney', 'active': false });
* // => false
*
* // The `_.matchesProperty` iteratee shorthand.
* _.every(users, ['active', false]);
* // => true
*
* // The `_.property` iteratee shorthand.
* _.every(users, 'active');
* // => false
*/
function every(collection, predicate, guard) {
var func = isArray(collection) ? arrayEvery : baseEvery;
if (guard && isIterateeCall(collection, predicate, guard)) {
predicate = undefined;
}
return func(collection, getIteratee(predicate, 3));
}
/**
* Iterates over elements of `collection`, returning an array of all elements
* `predicate` returns truthy for. The predicate is invoked with three
* arguments: (value, index|key, collection).
*
* **Note:** Unlike `_.remove`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
* @see _.reject
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* _.filter(users, function(o) { return !o.active; });
* // => objects for ['fred']
*
* // The `_.matches` iteratee shorthand.
* _.filter(users, { 'age': 36, 'active': true });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.filter(users, ['active', false]);
* // => objects for ['fred']
*
* // The `_.property` iteratee shorthand.
* _.filter(users, 'active');
* // => objects for ['barney']
*
* // Combining several predicates using `_.overEvery` or `_.overSome`.
* _.filter(users, _.overSome([{ 'age': 36 }, ['age', 40]]));
* // => objects for ['fred', 'barney']
*/
function filter(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, getIteratee(predicate, 3));
}
/**
* Iterates over elements of `collection`, returning the first element
* `predicate` returns truthy for. The predicate is invoked with three
* arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=0] The index to search from.
* @returns {*} Returns the matched element, else `undefined`.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false },
* { 'user': 'pebbles', 'age': 1, 'active': true }
* ];
*
* _.find(users, function(o) { return o.age < 40; });
* // => object for 'barney'
*
* // The `_.matches` iteratee shorthand.
* _.find(users, { 'age': 1, 'active': true });
* // => object for 'pebbles'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.find(users, ['active', false]);
* // => object for 'fred'
*
* // The `_.property` iteratee shorthand.
* _.find(users, 'active');
* // => object for 'barney'
*/
var find = createFind(findIndex);
/**
* This method is like `_.find` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Collection
* @param {Array|Object} collection The collection to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=collection.length-1] The index to search from.
* @returns {*} Returns the matched element, else `undefined`.
* @example
*
* _.findLast([1, 2, 3, 4], function(n) {
* return n % 2 == 1;
* });
* // => 3
*/
var findLast = createFind(findLastIndex);
/**
* Creates a flattened array of values by running each element in `collection`
* thru `iteratee` and flattening the mapped results. The iteratee is invoked
* with three arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [n, n];
* }
*
* _.flatMap([1, 2], duplicate);
* // => [1, 1, 2, 2]
*/
function flatMap(collection, iteratee) {
return baseFlatten(map(collection, iteratee), 1);
}
/**
* This method is like `_.flatMap` except that it recursively flattens the
* mapped results.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [[[n, n]]];
* }
*
* _.flatMapDeep([1, 2], duplicate);
* // => [1, 1, 2, 2]
*/
function flatMapDeep(collection, iteratee) {
return baseFlatten(map(collection, iteratee), INFINITY);
}
/**
* This method is like `_.flatMap` except that it recursively flattens the
* mapped results up to `depth` times.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {number} [depth=1] The maximum recursion depth.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [[[n, n]]];
* }
*
* _.flatMapDepth([1, 2], duplicate, 2);
* // => [[1, 1], [2, 2]]
*/
function flatMapDepth(collection, iteratee, depth) {
depth = depth === undefined ? 1 : toInteger(depth);
return baseFlatten(map(collection, iteratee), depth);
}
/**
* Iterates over elements of `collection` and invokes `iteratee` for each element.
* The iteratee is invoked with three arguments: (value, index|key, collection).
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* **Note:** As with other "Collections" methods, objects with a "length"
* property are iterated like arrays. To avoid this behavior use `_.forIn`
* or `_.forOwn` for object iteration.
*
* @static
* @memberOf _
* @since 0.1.0
* @alias each
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
* @see _.forEachRight
* @example
*
* _.forEach([1, 2], function(value) {
* console.log(value);
* });
* // => Logs `1` then `2`.
*
* _.forEach({ 'a': 1, 'b': 2 }, function(value, key) {
* console.log(key);
* });
* // => Logs 'a' then 'b' (iteration order is not guaranteed).
*/
function forEach(collection, iteratee) {
var func = isArray(collection) ? arrayEach : baseEach;
return func(collection, getIteratee(iteratee, 3));
}
/**
* This method is like `_.forEach` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @alias eachRight
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
* @see _.forEach
* @example
*
* _.forEachRight([1, 2], function(value) {
* console.log(value);
* });
* // => Logs `2` then `1`.
*/
function forEachRight(collection, iteratee) {
var func = isArray(collection) ? arrayEachRight : baseEachRight;
return func(collection, getIteratee(iteratee, 3));
}
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The order of grouped values
* is determined by the order they occur in `collection`. The corresponding
* value of each key is an array of elements responsible for generating the
* key. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* _.groupBy([6.1, 4.2, 6.3], Math.floor);
* // => { '4': [4.2], '6': [6.1, 6.3] }
*
* // The `_.property` iteratee shorthand.
* _.groupBy(['one', 'two', 'three'], 'length');
* // => { '3': ['one', 'two'], '5': ['three'] }
*/
var groupBy = createAggregator(function(result, value, key) {
if (hasOwnProperty.call(result, key)) {
result[key].push(value);
} else {
baseAssignValue(result, key, [value]);
}
});
/**
* Checks if `value` is in `collection`. If `collection` is a string, it's
* checked for a substring of `value`, otherwise
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* is used for equality comparisons. If `fromIndex` is negative, it's used as
* the offset from the end of `collection`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object|string} collection The collection to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=0] The index to search from.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`.
* @returns {boolean} Returns `true` if `value` is found, else `false`.
* @example
*
* _.includes([1, 2, 3], 1);
* // => true
*
* _.includes([1, 2, 3], 1, 2);
* // => false
*
* _.includes({ 'a': 1, 'b': 2 }, 1);
* // => true
*
* _.includes('abcd', 'bc');
* // => true
*/
function includes(collection, value, fromIndex, guard) {
collection = isArrayLike(collection) ? collection : values(collection);
fromIndex = (fromIndex && !guard) ? toInteger(fromIndex) : 0;
var length = collection.length;
if (fromIndex < 0) {
fromIndex = nativeMax(length + fromIndex, 0);
}
return isString(collection)
? (fromIndex <= length && collection.indexOf(value, fromIndex) > -1)
: (!!length && baseIndexOf(collection, value, fromIndex) > -1);
}
/**
* Invokes the method at `path` of each element in `collection`, returning
* an array of the results of each invoked method. Any additional arguments
* are provided to each invoked method. If `path` is a function, it's invoked
* for, and `this` bound to, each element in `collection`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Array|Function|string} path The path of the method to invoke or
* the function invoked per iteration.
* @param {...*} [args] The arguments to invoke each method with.
* @returns {Array} Returns the array of results.
* @example
*
* _.invokeMap([[5, 1, 7], [3, 2, 1]], 'sort');
* // => [[1, 5, 7], [1, 2, 3]]
*
* _.invokeMap([123, 456], String.prototype.split, '');
* // => [['1', '2', '3'], ['4', '5', '6']]
*/
var invokeMap = baseRest(function(collection, path, args) {
var index = -1,
isFunc = typeof path == 'function',
result = isArrayLike(collection) ? Array(collection.length) : [];
baseEach(collection, function(value) {
result[++index] = isFunc ? apply(path, value, args) : baseInvoke(value, path, args);
});
return result;
});
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The corresponding value of
* each key is the last element responsible for generating the key. The
* iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* var array = [
* { 'dir': 'left', 'code': 97 },
* { 'dir': 'right', 'code': 100 }
* ];
*
* _.keyBy(array, function(o) {
* return String.fromCharCode(o.code);
* });
* // => { 'a': { 'dir': 'left', 'code': 97 }, 'd': { 'dir': 'right', 'code': 100 } }
*
* _.keyBy(array, 'dir');
* // => { 'left': { 'dir': 'left', 'code': 97 }, 'right': { 'dir': 'right', 'code': 100 } }
*/
var keyBy = createAggregator(function(result, value, key) {
baseAssignValue(result, key, value);
});
/**
* Creates an array of values by running each element in `collection` thru
* `iteratee`. The iteratee is invoked with three arguments:
* (value, index|key, collection).
*
* Many lodash methods are guarded to work as iteratees for methods like
* `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`.
*
* The guarded methods are:
* `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`,
* `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`,
* `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`,
* `template`, `trim`, `trimEnd`, `trimStart`, and `words`
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
* @example
*
* function square(n) {
* return n * n;
* }
*
* _.map([4, 8], square);
* // => [16, 64]
*
* _.map({ 'a': 4, 'b': 8 }, square);
* // => [16, 64] (iteration order is not guaranteed)
*
* var users = [
* { 'user': 'barney' },
* { 'user': 'fred' }
* ];
*
* // The `_.property` iteratee shorthand.
* _.map(users, 'user');
* // => ['barney', 'fred']
*/
function map(collection, iteratee) {
var func = isArray(collection) ? arrayMap : baseMap;
return func(collection, getIteratee(iteratee, 3));
}
/**
* This method is like `_.sortBy` except that it allows specifying the sort
* orders of the iteratees to sort by. If `orders` is unspecified, all values
* are sorted in ascending order. Otherwise, specify an order of "desc" for
* descending or "asc" for ascending sort order of corresponding values.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Array[]|Function[]|Object[]|string[]} [iteratees=[_.identity]]
* The iteratees to sort by.
* @param {string[]} [orders] The sort orders of `iteratees`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`.
* @returns {Array} Returns the new sorted array.
* @example
*
* var users = [
* { 'user': 'fred', 'age': 48 },
* { 'user': 'barney', 'age': 34 },
* { 'user': 'fred', 'age': 40 },
* { 'user': 'barney', 'age': 36 }
* ];
*
* // Sort by `user` in ascending order and by `age` in descending order.
* _.orderBy(users, ['user', 'age'], ['asc', 'desc']);
* // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]]
*/
function orderBy(collection, iteratees, orders, guard) {
if (collection == null) {
return [];
}
if (!isArray(iteratees)) {
iteratees = iteratees == null ? [] : [iteratees];
}
orders = guard ? undefined : orders;
if (!isArray(orders)) {
orders = orders == null ? [] : [orders];
}
return baseOrderBy(collection, iteratees, orders);
}
/**
* Creates an array of elements split into two groups, the first of which
* contains elements `predicate` returns truthy for, the second of which
* contains elements `predicate` returns falsey for. The predicate is
* invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the array of grouped elements.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': true },
* { 'user': 'pebbles', 'age': 1, 'active': false }
* ];
*
* _.partition(users, function(o) { return o.active; });
* // => objects for [['fred'], ['barney', 'pebbles']]
*
* // The `_.matches` iteratee shorthand.
* _.partition(users, { 'age': 1, 'active': false });
* // => objects for [['pebbles'], ['barney', 'fred']]
*
* // The `_.matchesProperty` iteratee shorthand.
* _.partition(users, ['active', false]);
* // => objects for [['barney', 'pebbles'], ['fred']]
*
* // The `_.property` iteratee shorthand.
* _.partition(users, 'active');
* // => objects for [['fred'], ['barney', 'pebbles']]
*/
var partition = createAggregator(function(result, value, key) {
result[key ? 0 : 1].push(value);
}, function() { return [[], []]; });
/**
* Reduces `collection` to a value which is the accumulated result of running
* each element in `collection` thru `iteratee`, where each successive
* invocation is supplied the return value of the previous. If `accumulator`
* is not given, the first element of `collection` is used as the initial
* value. The iteratee is invoked with four arguments:
* (accumulator, value, index|key, collection).
*
* Many lodash methods are guarded to work as iteratees for methods like
* `_.reduce`, `_.reduceRight`, and `_.transform`.
*
* The guarded methods are:
* `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`,
* and `sortBy`
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @returns {*} Returns the accumulated value.
* @see _.reduceRight
* @example
*
* _.reduce([1, 2], function(sum, n) {
* return sum + n;
* }, 0);
* // => 3
*
* _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) {
* (result[value] || (result[value] = [])).push(key);
* return result;
* }, {});
* // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed)
*/
function reduce(collection, iteratee, accumulator) {
var func = isArray(collection) ? arrayReduce : baseReduce,
initAccum = arguments.length < 3;
return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach);
}
/**
* This method is like `_.reduce` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @returns {*} Returns the accumulated value.
* @see _.reduce
* @example
*
* var array = [[0, 1], [2, 3], [4, 5]];
*
* _.reduceRight(array, function(flattened, other) {
* return flattened.concat(other);
* }, []);
* // => [4, 5, 2, 3, 0, 1]
*/
function reduceRight(collection, iteratee, accumulator) {
var func = isArray(collection) ? arrayReduceRight : baseReduce,
initAccum = arguments.length < 3;
return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight);
}
/**
* The opposite of `_.filter`; this method returns the elements of `collection`
* that `predicate` does **not** return truthy for.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
* @see _.filter
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': true }
* ];
*
* _.reject(users, function(o) { return !o.active; });
* // => objects for ['fred']
*
* // The `_.matches` iteratee shorthand.
* _.reject(users, { 'age': 40, 'active': true });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.reject(users, ['active', false]);
* // => objects for ['fred']
*
* // The `_.property` iteratee shorthand.
* _.reject(users, 'active');
* // => objects for ['barney']
*/
function reject(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, negate(getIteratee(predicate, 3)));
}
/**
* Gets a random element from `collection`.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Collection
* @param {Array|Object} collection The collection to sample.
* @returns {*} Returns the random element.
* @example
*
* _.sample([1, 2, 3, 4]);
* // => 2
*/
function sample(collection) {
var func = isArray(collection) ? arraySample : baseSample;
return func(collection);
}
/**
* Gets `n` random elements at unique keys from `collection` up to the
* size of `collection`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to sample.
* @param {number} [n=1] The number of elements to sample.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the random elements.
* @example
*
* _.sampleSize([1, 2, 3], 2);
* // => [3, 1]
*
* _.sampleSize([1, 2, 3], 4);
* // => [2, 3, 1]
*/
function sampleSize(collection, n, guard) {
if ((guard ? isIterateeCall(collection, n, guard) : n === undefined)) {
n = 1;
} else {
n = toInteger(n);
}
var func = isArray(collection) ? arraySampleSize : baseSampleSize;
return func(collection, n);
}
/**
* Creates an array of shuffled values, using a version of the
* [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to shuffle.
* @returns {Array} Returns the new shuffled array.
* @example
*
* _.shuffle([1, 2, 3, 4]);
* // => [4, 1, 3, 2]
*/
function shuffle(collection) {
var func = isArray(collection) ? arrayShuffle : baseShuffle;
return func(collection);
}
/**
* Gets the size of `collection` by returning its length for array-like
* values or the number of own enumerable string keyed properties for objects.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object|string} collection The collection to inspect.
* @returns {number} Returns the collection size.
* @example
*
* _.size([1, 2, 3]);
* // => 3
*
* _.size({ 'a': 1, 'b': 2 });
* // => 2
*
* _.size('pebbles');
* // => 7
*/
function size(collection) {
if (collection == null) {
return 0;
}
if (isArrayLike(collection)) {
return isString(collection) ? stringSize(collection) : collection.length;
}
var tag = getTag(collection);
if (tag == mapTag || tag == setTag) {
return collection.size;
}
return baseKeys(collection).length;
}
/**
* Checks if `predicate` returns truthy for **any** element of `collection`.
* Iteration is stopped once `predicate` returns truthy. The predicate is
* invoked with three arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
* @example
*
* _.some([null, 0, 'yes', false], Boolean);
* // => true
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.some(users, { 'user': 'barney', 'active': false });
* // => false
*
* // The `_.matchesProperty` iteratee shorthand.
* _.some(users, ['active', false]);
* // => true
*
* // The `_.property` iteratee shorthand.
* _.some(users, 'active');
* // => true
*/
function some(collection, predicate, guard) {
var func = isArray(collection) ? arraySome : baseSome;
if (guard && isIterateeCall(collection, predicate, guard)) {
predicate = undefined;
}
return func(collection, getIteratee(predicate, 3));
}
/**
* Creates an array of elements, sorted in ascending order by the results of
* running each element in a collection thru each iteratee. This method
* performs a stable sort, that is, it preserves the original sort order of
* equal elements. The iteratees are invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {...(Function|Function[])} [iteratees=[_.identity]]
* The iteratees to sort by.
* @returns {Array} Returns the new sorted array.
* @example
*
* var users = [
* { 'user': 'fred', 'age': 48 },
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 30 },
* { 'user': 'barney', 'age': 34 }
* ];
*
* _.sortBy(users, [function(o) { return o.user; }]);
* // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 30]]
*
* _.sortBy(users, ['user', 'age']);
* // => objects for [['barney', 34], ['barney', 36], ['fred', 30], ['fred', 48]]
*/
var sortBy = baseRest(function(collection, iteratees) {
if (collection == null) {
return [];
}
var length = iteratees.length;
if (length > 1 && isIterateeCall(collection, iteratees[0], iteratees[1])) {
iteratees = [];
} else if (length > 2 && isIterateeCall(iteratees[0], iteratees[1], iteratees[2])) {
iteratees = [iteratees[0]];
}
return baseOrderBy(collection, baseFlatten(iteratees, 1), []);
});
/*------------------------------------------------------------------------*/
/**
* Gets the timestamp of the number of milliseconds that have elapsed since
* the Unix epoch (1 January 1970 00:00:00 UTC).
*
* @static
* @memberOf _
* @since 2.4.0
* @category Date
* @returns {number} Returns the timestamp.
* @example
*
* _.defer(function(stamp) {
* console.log(_.now() - stamp);
* }, _.now());
* // => Logs the number of milliseconds it took for the deferred invocation.
*/
var now = ctxNow || function() {
return root.Date.now();
};
/*------------------------------------------------------------------------*/
/**
* The opposite of `_.before`; this method creates a function that invokes
* `func` once it's called `n` or more times.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {number} n The number of calls before `func` is invoked.
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* var saves = ['profile', 'settings'];
*
* var done = _.after(saves.length, function() {
* console.log('done saving!');
* });
*
* _.forEach(saves, function(type) {
* asyncSave({ 'type': type, 'complete': done });
* });
* // => Logs 'done saving!' after the two async saves have completed.
*/
function after(n, func) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
n = toInteger(n);
return function() {
if (--n < 1) {
return func.apply(this, arguments);
}
};
}
/**
* Creates a function that invokes `func`, with up to `n` arguments,
* ignoring any additional arguments.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to cap arguments for.
* @param {number} [n=func.length] The arity cap.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new capped function.
* @example
*
* _.map(['6', '8', '10'], _.ary(parseInt, 1));
* // => [6, 8, 10]
*/
function ary(func, n, guard) {
n = guard ? undefined : n;
n = (func && n == null) ? func.length : n;
return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n);
}
/**
* Creates a function that invokes `func`, with the `this` binding and arguments
* of the created function, while it's called less than `n` times. Subsequent
* calls to the created function return the result of the last `func` invocation.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {number} n The number of calls at which `func` is no longer invoked.
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* jQuery(element).on('click', _.before(5, addContactToList));
* // => Allows adding up to 4 contacts to the list.
*/
function before(n, func) {
var result;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
n = toInteger(n);
return function() {
if (--n > 0) {
result = func.apply(this, arguments);
}
if (n <= 1) {
func = undefined;
}
return result;
};
}
/**
* Creates a function that invokes `func` with the `this` binding of `thisArg`
* and `partials` prepended to the arguments it receives.
*
* The `_.bind.placeholder` value, which defaults to `_` in monolithic builds,
* may be used as a placeholder for partially applied arguments.
*
* **Note:** Unlike native `Function#bind`, this method doesn't set the "length"
* property of bound functions.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to bind.
* @param {*} thisArg The `this` binding of `func`.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new bound function.
* @example
*
* function greet(greeting, punctuation) {
* return greeting + ' ' + this.user + punctuation;
* }
*
* var object = { 'user': 'fred' };
*
* var bound = _.bind(greet, object, 'hi');
* bound('!');
* // => 'hi fred!'
*
* // Bound with placeholders.
* var bound = _.bind(greet, object, _, '!');
* bound('hi');
* // => 'hi fred!'
*/
var bind = baseRest(function(func, thisArg, partials) {
var bitmask = WRAP_BIND_FLAG;
if (partials.length) {
var holders = replaceHolders(partials, getHolder(bind));
bitmask |= WRAP_PARTIAL_FLAG;
}
return createWrap(func, bitmask, thisArg, partials, holders);
});
/**
* Creates a function that invokes the method at `object[key]` with `partials`
* prepended to the arguments it receives.
*
* This method differs from `_.bind` by allowing bound functions to reference
* methods that may be redefined or don't yet exist. See
* [Peter Michaux's article](http://peter.michaux.ca/articles/lazy-function-definition-pattern)
* for more details.
*
* The `_.bindKey.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* @static
* @memberOf _
* @since 0.10.0
* @category Function
* @param {Object} object The object to invoke the method on.
* @param {string} key The key of the method.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new bound function.
* @example
*
* var object = {
* 'user': 'fred',
* 'greet': function(greeting, punctuation) {
* return greeting + ' ' + this.user + punctuation;
* }
* };
*
* var bound = _.bindKey(object, 'greet', 'hi');
* bound('!');
* // => 'hi fred!'
*
* object.greet = function(greeting, punctuation) {
* return greeting + 'ya ' + this.user + punctuation;
* };
*
* bound('!');
* // => 'hiya fred!'
*
* // Bound with placeholders.
* var bound = _.bindKey(object, 'greet', _, '!');
* bound('hi');
* // => 'hiya fred!'
*/
var bindKey = baseRest(function(object, key, partials) {
var bitmask = WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG;
if (partials.length) {
var holders = replaceHolders(partials, getHolder(bindKey));
bitmask |= WRAP_PARTIAL_FLAG;
}
return createWrap(key, bitmask, object, partials, holders);
});
/**
* Creates a function that accepts arguments of `func` and either invokes
* `func` returning its result, if at least `arity` number of arguments have
* been provided, or returns a function that accepts the remaining `func`
* arguments, and so on. The arity of `func` may be specified if `func.length`
* is not sufficient.
*
* The `_.curry.placeholder` value, which defaults to `_` in monolithic builds,
* may be used as a placeholder for provided arguments.
*
* **Note:** This method doesn't set the "length" property of curried functions.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Function
* @param {Function} func The function to curry.
* @param {number} [arity=func.length] The arity of `func`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new curried function.
* @example
*
* var abc = function(a, b, c) {
* return [a, b, c];
* };
*
* var curried = _.curry(abc);
*
* curried(1)(2)(3);
* // => [1, 2, 3]
*
* curried(1, 2)(3);
* // => [1, 2, 3]
*
* curried(1, 2, 3);
* // => [1, 2, 3]
*
* // Curried with placeholders.
* curried(1)(_, 3)(2);
* // => [1, 2, 3]
*/
function curry(func, arity, guard) {
arity = guard ? undefined : arity;
var result = createWrap(func, WRAP_CURRY_FLAG, undefined, undefined, undefined, undefined, undefined, arity);
result.placeholder = curry.placeholder;
return result;
}
/**
* This method is like `_.curry` except that arguments are applied to `func`
* in the manner of `_.partialRight` instead of `_.partial`.
*
* The `_.curryRight.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for provided arguments.
*
* **Note:** This method doesn't set the "length" property of curried functions.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to curry.
* @param {number} [arity=func.length] The arity of `func`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new curried function.
* @example
*
* var abc = function(a, b, c) {
* return [a, b, c];
* };
*
* var curried = _.curryRight(abc);
*
* curried(3)(2)(1);
* // => [1, 2, 3]
*
* curried(2, 3)(1);
* // => [1, 2, 3]
*
* curried(1, 2, 3);
* // => [1, 2, 3]
*
* // Curried with placeholders.
* curried(3)(1, _)(2);
* // => [1, 2, 3]
*/
function curryRight(func, arity, guard) {
arity = guard ? undefined : arity;
var result = createWrap(func, WRAP_CURRY_RIGHT_FLAG, undefined, undefined, undefined, undefined, undefined, arity);
result.placeholder = curryRight.placeholder;
return result;
}
/**
* Creates a debounced function that delays invoking `func` until after `wait`
* milliseconds have elapsed since the last time the debounced function was
* invoked. The debounced function comes with a `cancel` method to cancel
* delayed `func` invocations and a `flush` method to immediately invoke them.
* Provide `options` to indicate whether `func` should be invoked on the
* leading and/or trailing edge of the `wait` timeout. The `func` is invoked
* with the last arguments provided to the debounced function. Subsequent
* calls to the debounced function return the result of the last `func`
* invocation.
*
* **Note:** If `leading` and `trailing` options are `true`, `func` is
* invoked on the trailing edge of the timeout only if the debounced function
* is invoked more than once during the `wait` timeout.
*
* If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
* until to the next tick, similar to `setTimeout` with a timeout of `0`.
*
* See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
* for details over the differences between `_.debounce` and `_.throttle`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to debounce.
* @param {number} [wait=0] The number of milliseconds to delay.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.leading=false]
* Specify invoking on the leading edge of the timeout.
* @param {number} [options.maxWait]
* The maximum time `func` is allowed to be delayed before it's invoked.
* @param {boolean} [options.trailing=true]
* Specify invoking on the trailing edge of the timeout.
* @returns {Function} Returns the new debounced function.
* @example
*
* // Avoid costly calculations while the window size is in flux.
* jQuery(window).on('resize', _.debounce(calculateLayout, 150));
*
* // Invoke `sendMail` when clicked, debouncing subsequent calls.
* jQuery(element).on('click', _.debounce(sendMail, 300, {
* 'leading': true,
* 'trailing': false
* }));
*
* // Ensure `batchLog` is invoked once after 1 second of debounced calls.
* var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 });
* var source = new EventSource('/stream');
* jQuery(source).on('message', debounced);
*
* // Cancel the trailing debounced invocation.
* jQuery(window).on('popstate', debounced.cancel);
*/
function debounce(func, wait, options) {
var lastArgs,
lastThis,
maxWait,
result,
timerId,
lastCallTime,
lastInvokeTime = 0,
leading = false,
maxing = false,
trailing = true;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
wait = toNumber(wait) || 0;
if (isObject(options)) {
leading = !!options.leading;
maxing = 'maxWait' in options;
maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait;
trailing = 'trailing' in options ? !!options.trailing : trailing;
}
function invokeFunc(time) {
var args = lastArgs,
thisArg = lastThis;
lastArgs = lastThis = undefined;
lastInvokeTime = time;
result = func.apply(thisArg, args);
return result;
}
function leadingEdge(time) {
// Reset any `maxWait` timer.
lastInvokeTime = time;
// Start the timer for the trailing edge.
timerId = setTimeout(timerExpired, wait);
// Invoke the leading edge.
return leading ? invokeFunc(time) : result;
}
function remainingWait(time) {
var timeSinceLastCall = time - lastCallTime,
timeSinceLastInvoke = time - lastInvokeTime,
timeWaiting = wait - timeSinceLastCall;
return maxing
? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke)
: timeWaiting;
}
function shouldInvoke(time) {
var timeSinceLastCall = time - lastCallTime,
timeSinceLastInvoke = time - lastInvokeTime;
// Either this is the first call, activity has stopped and we're at the
// trailing edge, the system time has gone backwards and we're treating
// it as the trailing edge, or we've hit the `maxWait` limit.
return (lastCallTime === undefined || (timeSinceLastCall >= wait) ||
(timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait));
}
function timerExpired() {
var time = now();
if (shouldInvoke(time)) {
return trailingEdge(time);
}
// Restart the timer.
timerId = setTimeout(timerExpired, remainingWait(time));
}
function trailingEdge(time) {
timerId = undefined;
// Only invoke if we have `lastArgs` which means `func` has been
// debounced at least once.
if (trailing && lastArgs) {
return invokeFunc(time);
}
lastArgs = lastThis = undefined;
return result;
}
function cancel() {
if (timerId !== undefined) {
clearTimeout(timerId);
}
lastInvokeTime = 0;
lastArgs = lastCallTime = lastThis = timerId = undefined;
}
function flush() {
return timerId === undefined ? result : trailingEdge(now());
}
function debounced() {
var time = now(),
isInvoking = shouldInvoke(time);
lastArgs = arguments;
lastThis = this;
lastCallTime = time;
if (isInvoking) {
if (timerId === undefined) {
return leadingEdge(lastCallTime);
}
if (maxing) {
// Handle invocations in a tight loop.
clearTimeout(timerId);
timerId = setTimeout(timerExpired, wait);
return invokeFunc(lastCallTime);
}
}
if (timerId === undefined) {
timerId = setTimeout(timerExpired, wait);
}
return result;
}
debounced.cancel = cancel;
debounced.flush = flush;
return debounced;
}
/**
* Defers invoking the `func` until the current call stack has cleared. Any
* additional arguments are provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to defer.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {number} Returns the timer id.
* @example
*
* _.defer(function(text) {
* console.log(text);
* }, 'deferred');
* // => Logs 'deferred' after one millisecond.
*/
var defer = baseRest(function(func, args) {
return baseDelay(func, 1, args);
});
/**
* Invokes `func` after `wait` milliseconds. Any additional arguments are
* provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {number} Returns the timer id.
* @example
*
* _.delay(function(text) {
* console.log(text);
* }, 1000, 'later');
* // => Logs 'later' after one second.
*/
var delay = baseRest(function(func, wait, args) {
return baseDelay(func, toNumber(wait) || 0, args);
});
/**
* Creates a function that invokes `func` with arguments reversed.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to flip arguments for.
* @returns {Function} Returns the new flipped function.
* @example
*
* var flipped = _.flip(function() {
* return _.toArray(arguments);
* });
*
* flipped('a', 'b', 'c', 'd');
* // => ['d', 'c', 'b', 'a']
*/
function flip(func) {
return createWrap(func, WRAP_FLIP_FLAG);
}
/**
* Creates a function that memoizes the result of `func`. If `resolver` is
* provided, it determines the cache key for storing the result based on the
* arguments provided to the memoized function. By default, the first argument
* provided to the memoized function is used as the map cache key. The `func`
* is invoked with the `this` binding of the memoized function.
*
* **Note:** The cache is exposed as the `cache` property on the memoized
* function. Its creation may be customized by replacing the `_.memoize.Cache`
* constructor with one whose instances implement the
* [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object)
* method interface of `clear`, `delete`, `get`, `has`, and `set`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to have its output memoized.
* @param {Function} [resolver] The function to resolve the cache key.
* @returns {Function} Returns the new memoized function.
* @example
*
* var object = { 'a': 1, 'b': 2 };
* var other = { 'c': 3, 'd': 4 };
*
* var values = _.memoize(_.values);
* values(object);
* // => [1, 2]
*
* values(other);
* // => [3, 4]
*
* object.a = 2;
* values(object);
* // => [1, 2]
*
* // Modify the result cache.
* values.cache.set(object, ['a', 'b']);
* values(object);
* // => ['a', 'b']
*
* // Replace `_.memoize.Cache`.
* _.memoize.Cache = WeakMap;
*/
function memoize(func, resolver) {
if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) {
throw new TypeError(FUNC_ERROR_TEXT);
}
var memoized = function() {
var args = arguments,
key = resolver ? resolver.apply(this, args) : args[0],
cache = memoized.cache;
if (cache.has(key)) {
return cache.get(key);
}
var result = func.apply(this, args);
memoized.cache = cache.set(key, result) || cache;
return result;
};
memoized.cache = new (memoize.Cache || MapCache);
return memoized;
}
// Expose `MapCache`.
memoize.Cache = MapCache;
/**
* Creates a function that negates the result of the predicate `func`. The
* `func` predicate is invoked with the `this` binding and arguments of the
* created function.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} predicate The predicate to negate.
* @returns {Function} Returns the new negated function.
* @example
*
* function isEven(n) {
* return n % 2 == 0;
* }
*
* _.filter([1, 2, 3, 4, 5, 6], _.negate(isEven));
* // => [1, 3, 5]
*/
function negate(predicate) {
if (typeof predicate != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return function() {
var args = arguments;
switch (args.length) {
case 0: return !predicate.call(this);
case 1: return !predicate.call(this, args[0]);
case 2: return !predicate.call(this, args[0], args[1]);
case 3: return !predicate.call(this, args[0], args[1], args[2]);
}
return !predicate.apply(this, args);
};
}
/**
* Creates a function that is restricted to invoking `func` once. Repeat calls
* to the function return the value of the first invocation. The `func` is
* invoked with the `this` binding and arguments of the created function.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* var initialize = _.once(createApplication);
* initialize();
* initialize();
* // => `createApplication` is invoked once
*/
function once(func) {
return before(2, func);
}
/**
* Creates a function that invokes `func` with its arguments transformed.
*
* @static
* @since 4.0.0
* @memberOf _
* @category Function
* @param {Function} func The function to wrap.
* @param {...(Function|Function[])} [transforms=[_.identity]]
* The argument transforms.
* @returns {Function} Returns the new function.
* @example
*
* function doubled(n) {
* return n * 2;
* }
*
* function square(n) {
* return n * n;
* }
*
* var func = _.overArgs(function(x, y) {
* return [x, y];
* }, [square, doubled]);
*
* func(9, 3);
* // => [81, 6]
*
* func(10, 5);
* // => [100, 10]
*/
var overArgs = castRest(function(func, transforms) {
transforms = (transforms.length == 1 && isArray(transforms[0]))
? arrayMap(transforms[0], baseUnary(getIteratee()))
: arrayMap(baseFlatten(transforms, 1), baseUnary(getIteratee()));
var funcsLength = transforms.length;
return baseRest(function(args) {
var index = -1,
length = nativeMin(args.length, funcsLength);
while (++index < length) {
args[index] = transforms[index].call(this, args[index]);
}
return apply(func, this, args);
});
});
/**
* Creates a function that invokes `func` with `partials` prepended to the
* arguments it receives. This method is like `_.bind` except it does **not**
* alter the `this` binding.
*
* The `_.partial.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* **Note:** This method doesn't set the "length" property of partially
* applied functions.
*
* @static
* @memberOf _
* @since 0.2.0
* @category Function
* @param {Function} func The function to partially apply arguments to.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new partially applied function.
* @example
*
* function greet(greeting, name) {
* return greeting + ' ' + name;
* }
*
* var sayHelloTo = _.partial(greet, 'hello');
* sayHelloTo('fred');
* // => 'hello fred'
*
* // Partially applied with placeholders.
* var greetFred = _.partial(greet, _, 'fred');
* greetFred('hi');
* // => 'hi fred'
*/
var partial = baseRest(function(func, partials) {
var holders = replaceHolders(partials, getHolder(partial));
return createWrap(func, WRAP_PARTIAL_FLAG, undefined, partials, holders);
});
/**
* This method is like `_.partial` except that partially applied arguments
* are appended to the arguments it receives.
*
* The `_.partialRight.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* **Note:** This method doesn't set the "length" property of partially
* applied functions.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Function
* @param {Function} func The function to partially apply arguments to.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new partially applied function.
* @example
*
* function greet(greeting, name) {
* return greeting + ' ' + name;
* }
*
* var greetFred = _.partialRight(greet, 'fred');
* greetFred('hi');
* // => 'hi fred'
*
* // Partially applied with placeholders.
* var sayHelloTo = _.partialRight(greet, 'hello', _);
* sayHelloTo('fred');
* // => 'hello fred'
*/
var partialRight = baseRest(function(func, partials) {
var holders = replaceHolders(partials, getHolder(partialRight));
return createWrap(func, WRAP_PARTIAL_RIGHT_FLAG, undefined, partials, holders);
});
/**
* Creates a function that invokes `func` with arguments arranged according
* to the specified `indexes` where the argument value at the first index is
* provided as the first argument, the argument value at the second index is
* provided as the second argument, and so on.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to rearrange arguments for.
* @param {...(number|number[])} indexes The arranged argument indexes.
* @returns {Function} Returns the new function.
* @example
*
* var rearged = _.rearg(function(a, b, c) {
* return [a, b, c];
* }, [2, 0, 1]);
*
* rearged('b', 'c', 'a')
* // => ['a', 'b', 'c']
*/
var rearg = flatRest(function(func, indexes) {
return createWrap(func, WRAP_REARG_FLAG, undefined, undefined, undefined, indexes);
});
/**
* Creates a function that invokes `func` with the `this` binding of the
* created function and arguments from `start` and beyond provided as
* an array.
*
* **Note:** This method is based on the
* [rest parameter](https://mdn.io/rest_parameters).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @returns {Function} Returns the new function.
* @example
*
* var say = _.rest(function(what, names) {
* return what + ' ' + _.initial(names).join(', ') +
* (_.size(names) > 1 ? ', & ' : '') + _.last(names);
* });
*
* say('hello', 'fred', 'barney', 'pebbles');
* // => 'hello fred, barney, & pebbles'
*/
function rest(func, start) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
start = start === undefined ? start : toInteger(start);
return baseRest(func, start);
}
/**
* Creates a function that invokes `func` with the `this` binding of the
* create function and an array of arguments much like
* [`Function#apply`](http://www.ecma-international.org/ecma-262/7.0/#sec-function.prototype.apply).
*
* **Note:** This method is based on the
* [spread operator](https://mdn.io/spread_operator).
*
* @static
* @memberOf _
* @since 3.2.0
* @category Function
* @param {Function} func The function to spread arguments over.
* @param {number} [start=0] The start position of the spread.
* @returns {Function} Returns the new function.
* @example
*
* var say = _.spread(function(who, what) {
* return who + ' says ' + what;
* });
*
* say(['fred', 'hello']);
* // => 'fred says hello'
*
* var numbers = Promise.all([
* Promise.resolve(40),
* Promise.resolve(36)
* ]);
*
* numbers.then(_.spread(function(x, y) {
* return x + y;
* }));
* // => a Promise of 76
*/
function spread(func, start) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
start = start == null ? 0 : nativeMax(toInteger(start), 0);
return baseRest(function(args) {
var array = args[start],
otherArgs = castSlice(args, 0, start);
if (array) {
arrayPush(otherArgs, array);
}
return apply(func, this, otherArgs);
});
}
/**
* Creates a throttled function that only invokes `func` at most once per
* every `wait` milliseconds. The throttled function comes with a `cancel`
* method to cancel delayed `func` invocations and a `flush` method to
* immediately invoke them. Provide `options` to indicate whether `func`
* should be invoked on the leading and/or trailing edge of the `wait`
* timeout. The `func` is invoked with the last arguments provided to the
* throttled function. Subsequent calls to the throttled function return the
* result of the last `func` invocation.
*
* **Note:** If `leading` and `trailing` options are `true`, `func` is
* invoked on the trailing edge of the timeout only if the throttled function
* is invoked more than once during the `wait` timeout.
*
* If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
* until to the next tick, similar to `setTimeout` with a timeout of `0`.
*
* See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
* for details over the differences between `_.throttle` and `_.debounce`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to throttle.
* @param {number} [wait=0] The number of milliseconds to throttle invocations to.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.leading=true]
* Specify invoking on the leading edge of the timeout.
* @param {boolean} [options.trailing=true]
* Specify invoking on the trailing edge of the timeout.
* @returns {Function} Returns the new throttled function.
* @example
*
* // Avoid excessively updating the position while scrolling.
* jQuery(window).on('scroll', _.throttle(updatePosition, 100));
*
* // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes.
* var throttled = _.throttle(renewToken, 300000, { 'trailing': false });
* jQuery(element).on('click', throttled);
*
* // Cancel the trailing throttled invocation.
* jQuery(window).on('popstate', throttled.cancel);
*/
function throttle(func, wait, options) {
var leading = true,
trailing = true;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
if (isObject(options)) {
leading = 'leading' in options ? !!options.leading : leading;
trailing = 'trailing' in options ? !!options.trailing : trailing;
}
return debounce(func, wait, {
'leading': leading,
'maxWait': wait,
'trailing': trailing
});
}
/**
* Creates a function that accepts up to one argument, ignoring any
* additional arguments.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to cap arguments for.
* @returns {Function} Returns the new capped function.
* @example
*
* _.map(['6', '8', '10'], _.unary(parseInt));
* // => [6, 8, 10]
*/
function unary(func) {
return ary(func, 1);
}
/**
* Creates a function that provides `value` to `wrapper` as its first
* argument. Any additional arguments provided to the function are appended
* to those provided to the `wrapper`. The wrapper is invoked with the `this`
* binding of the created function.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {*} value The value to wrap.
* @param {Function} [wrapper=identity] The wrapper function.
* @returns {Function} Returns the new function.
* @example
*
* var p = _.wrap(_.escape, function(func, text) {
* return '<p>' + func(text) + '</p>';
* });
*
* p('fred, barney, & pebbles');
* // => '<p>fred, barney, & pebbles</p>'
*/
function wrap(value, wrapper) {
return partial(castFunction(wrapper), value);
}
/*------------------------------------------------------------------------*/
/**
* Casts `value` as an array if it's not one.
*
* @static
* @memberOf _
* @since 4.4.0
* @category Lang
* @param {*} value The value to inspect.
* @returns {Array} Returns the cast array.
* @example
*
* _.castArray(1);
* // => [1]
*
* _.castArray({ 'a': 1 });
* // => [{ 'a': 1 }]
*
* _.castArray('abc');
* // => ['abc']
*
* _.castArray(null);
* // => [null]
*
* _.castArray(undefined);
* // => [undefined]
*
* _.castArray();
* // => []
*
* var array = [1, 2, 3];
* console.log(_.castArray(array) === array);
* // => true
*/
function castArray() {
if (!arguments.length) {
return [];
}
var value = arguments[0];
return isArray(value) ? value : [value];
}
/**
* Creates a shallow clone of `value`.
*
* **Note:** This method is loosely based on the
* [structured clone algorithm](https://mdn.io/Structured_clone_algorithm)
* and supports cloning arrays, array buffers, booleans, date objects, maps,
* numbers, `Object` objects, regexes, sets, strings, symbols, and typed
* arrays. The own enumerable properties of `arguments` objects are cloned
* as plain objects. An empty object is returned for uncloneable values such
* as error objects, functions, DOM nodes, and WeakMaps.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to clone.
* @returns {*} Returns the cloned value.
* @see _.cloneDeep
* @example
*
* var objects = [{ 'a': 1 }, { 'b': 2 }];
*
* var shallow = _.clone(objects);
* console.log(shallow[0] === objects[0]);
* // => true
*/
function clone(value) {
return baseClone(value, CLONE_SYMBOLS_FLAG);
}
/**
* This method is like `_.clone` except that it accepts `customizer` which
* is invoked to produce the cloned value. If `customizer` returns `undefined`,
* cloning is handled by the method instead. The `customizer` is invoked with
* up to four arguments; (value [, index|key, object, stack]).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to clone.
* @param {Function} [customizer] The function to customize cloning.
* @returns {*} Returns the cloned value.
* @see _.cloneDeepWith
* @example
*
* function customizer(value) {
* if (_.isElement(value)) {
* return value.cloneNode(false);
* }
* }
*
* var el = _.cloneWith(document.body, customizer);
*
* console.log(el === document.body);
* // => false
* console.log(el.nodeName);
* // => 'BODY'
* console.log(el.childNodes.length);
* // => 0
*/
function cloneWith(value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseClone(value, CLONE_SYMBOLS_FLAG, customizer);
}
/**
* This method is like `_.clone` except that it recursively clones `value`.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Lang
* @param {*} value The value to recursively clone.
* @returns {*} Returns the deep cloned value.
* @see _.clone
* @example
*
* var objects = [{ 'a': 1 }, { 'b': 2 }];
*
* var deep = _.cloneDeep(objects);
* console.log(deep[0] === objects[0]);
* // => false
*/
function cloneDeep(value) {
return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG);
}
/**
* This method is like `_.cloneWith` except that it recursively clones `value`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to recursively clone.
* @param {Function} [customizer] The function to customize cloning.
* @returns {*} Returns the deep cloned value.
* @see _.cloneWith
* @example
*
* function customizer(value) {
* if (_.isElement(value)) {
* return value.cloneNode(true);
* }
* }
*
* var el = _.cloneDeepWith(document.body, customizer);
*
* console.log(el === document.body);
* // => false
* console.log(el.nodeName);
* // => 'BODY'
* console.log(el.childNodes.length);
* // => 20
*/
function cloneDeepWith(value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer);
}
/**
* Checks if `object` conforms to `source` by invoking the predicate
* properties of `source` with the corresponding property values of `object`.
*
* **Note:** This method is equivalent to `_.conforms` when `source` is
* partially applied.
*
* @static
* @memberOf _
* @since 4.14.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property predicates to conform to.
* @returns {boolean} Returns `true` if `object` conforms, else `false`.
* @example
*
* var object = { 'a': 1, 'b': 2 };
*
* _.conformsTo(object, { 'b': function(n) { return n > 1; } });
* // => true
*
* _.conformsTo(object, { 'b': function(n) { return n > 2; } });
* // => false
*/
function conformsTo(object, source) {
return source == null || baseConformsTo(object, source, keys(source));
}
/**
* Performs a
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* comparison between two values to determine if they are equivalent.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* var object = { 'a': 1 };
* var other = { 'a': 1 };
*
* _.eq(object, object);
* // => true
*
* _.eq(object, other);
* // => false
*
* _.eq('a', 'a');
* // => true
*
* _.eq('a', Object('a'));
* // => false
*
* _.eq(NaN, NaN);
* // => true
*/
function eq(value, other) {
return value === other || (value !== value && other !== other);
}
/**
* Checks if `value` is greater than `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than `other`,
* else `false`.
* @see _.lt
* @example
*
* _.gt(3, 1);
* // => true
*
* _.gt(3, 3);
* // => false
*
* _.gt(1, 3);
* // => false
*/
var gt = createRelationalOperation(baseGt);
/**
* Checks if `value` is greater than or equal to `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than or equal to
* `other`, else `false`.
* @see _.lte
* @example
*
* _.gte(3, 1);
* // => true
*
* _.gte(3, 3);
* // => true
*
* _.gte(1, 3);
* // => false
*/
var gte = createRelationalOperation(function(value, other) {
return value >= other;
});
/**
* Checks if `value` is likely an `arguments` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
* else `false`.
* @example
*
* _.isArguments(function() { return arguments; }());
* // => true
*
* _.isArguments([1, 2, 3]);
* // => false
*/
var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) {
return isObjectLike(value) && hasOwnProperty.call(value, 'callee') &&
!propertyIsEnumerable.call(value, 'callee');
};
/**
* Checks if `value` is classified as an `Array` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array, else `false`.
* @example
*
* _.isArray([1, 2, 3]);
* // => true
*
* _.isArray(document.body.children);
* // => false
*
* _.isArray('abc');
* // => false
*
* _.isArray(_.noop);
* // => false
*/
var isArray = Array.isArray;
/**
* Checks if `value` is classified as an `ArrayBuffer` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array buffer, else `false`.
* @example
*
* _.isArrayBuffer(new ArrayBuffer(2));
* // => true
*
* _.isArrayBuffer(new Array(2));
* // => false
*/
var isArrayBuffer = nodeIsArrayBuffer ? baseUnary(nodeIsArrayBuffer) : baseIsArrayBuffer;
/**
* Checks if `value` is array-like. A value is considered array-like if it's
* not a function and has a `value.length` that's an integer greater than or
* equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is array-like, else `false`.
* @example
*
* _.isArrayLike([1, 2, 3]);
* // => true
*
* _.isArrayLike(document.body.children);
* // => true
*
* _.isArrayLike('abc');
* // => true
*
* _.isArrayLike(_.noop);
* // => false
*/
function isArrayLike(value) {
return value != null && isLength(value.length) && !isFunction(value);
}
/**
* This method is like `_.isArrayLike` except that it also checks if `value`
* is an object.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array-like object,
* else `false`.
* @example
*
* _.isArrayLikeObject([1, 2, 3]);
* // => true
*
* _.isArrayLikeObject(document.body.children);
* // => true
*
* _.isArrayLikeObject('abc');
* // => false
*
* _.isArrayLikeObject(_.noop);
* // => false
*/
function isArrayLikeObject(value) {
return isObjectLike(value) && isArrayLike(value);
}
/**
* Checks if `value` is classified as a boolean primitive or object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a boolean, else `false`.
* @example
*
* _.isBoolean(false);
* // => true
*
* _.isBoolean(null);
* // => false
*/
function isBoolean(value) {
return value === true || value === false ||
(isObjectLike(value) && baseGetTag(value) == boolTag);
}
/**
* Checks if `value` is a buffer.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a buffer, else `false`.
* @example
*
* _.isBuffer(new Buffer(2));
* // => true
*
* _.isBuffer(new Uint8Array(2));
* // => false
*/
var isBuffer = nativeIsBuffer || stubFalse;
/**
* Checks if `value` is classified as a `Date` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a date object, else `false`.
* @example
*
* _.isDate(new Date);
* // => true
*
* _.isDate('Mon April 23 2012');
* // => false
*/
var isDate = nodeIsDate ? baseUnary(nodeIsDate) : baseIsDate;
/**
* Checks if `value` is likely a DOM element.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a DOM element, else `false`.
* @example
*
* _.isElement(document.body);
* // => true
*
* _.isElement('<body>');
* // => false
*/
function isElement(value) {
return isObjectLike(value) && value.nodeType === 1 && !isPlainObject(value);
}
/**
* Checks if `value` is an empty object, collection, map, or set.
*
* Objects are considered empty if they have no own enumerable string keyed
* properties.
*
* Array-like values such as `arguments` objects, arrays, buffers, strings, or
* jQuery-like collections are considered empty if they have a `length` of `0`.
* Similarly, maps and sets are considered empty if they have a `size` of `0`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is empty, else `false`.
* @example
*
* _.isEmpty(null);
* // => true
*
* _.isEmpty(true);
* // => true
*
* _.isEmpty(1);
* // => true
*
* _.isEmpty([1, 2, 3]);
* // => false
*
* _.isEmpty({ 'a': 1 });
* // => false
*/
function isEmpty(value) {
if (value == null) {
return true;
}
if (isArrayLike(value) &&
(isArray(value) || typeof value == 'string' || typeof value.splice == 'function' ||
isBuffer(value) || isTypedArray(value) || isArguments(value))) {
return !value.length;
}
var tag = getTag(value);
if (tag == mapTag || tag == setTag) {
return !value.size;
}
if (isPrototype(value)) {
return !baseKeys(value).length;
}
for (var key in value) {
if (hasOwnProperty.call(value, key)) {
return false;
}
}
return true;
}
/**
* Performs a deep comparison between two values to determine if they are
* equivalent.
*
* **Note:** This method supports comparing arrays, array buffers, booleans,
* date objects, error objects, maps, numbers, `Object` objects, regexes,
* sets, strings, symbols, and typed arrays. `Object` objects are compared
* by their own, not inherited, enumerable properties. Functions and DOM
* nodes are compared by strict equality, i.e. `===`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* var object = { 'a': 1 };
* var other = { 'a': 1 };
*
* _.isEqual(object, other);
* // => true
*
* object === other;
* // => false
*/
function isEqual(value, other) {
return baseIsEqual(value, other);
}
/**
* This method is like `_.isEqual` except that it accepts `customizer` which
* is invoked to compare values. If `customizer` returns `undefined`, comparisons
* are handled by the method instead. The `customizer` is invoked with up to
* six arguments: (objValue, othValue [, index|key, object, other, stack]).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* function isGreeting(value) {
* return /^h(?:i|ello)$/.test(value);
* }
*
* function customizer(objValue, othValue) {
* if (isGreeting(objValue) && isGreeting(othValue)) {
* return true;
* }
* }
*
* var array = ['hello', 'goodbye'];
* var other = ['hi', 'goodbye'];
*
* _.isEqualWith(array, other, customizer);
* // => true
*/
function isEqualWith(value, other, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
var result = customizer ? customizer(value, other) : undefined;
return result === undefined ? baseIsEqual(value, other, undefined, customizer) : !!result;
}
/**
* Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`,
* `SyntaxError`, `TypeError`, or `URIError` object.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an error object, else `false`.
* @example
*
* _.isError(new Error);
* // => true
*
* _.isError(Error);
* // => false
*/
function isError(value) {
if (!isObjectLike(value)) {
return false;
}
var tag = baseGetTag(value);
return tag == errorTag || tag == domExcTag ||
(typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value));
}
/**
* Checks if `value` is a finite primitive number.
*
* **Note:** This method is based on
* [`Number.isFinite`](https://mdn.io/Number/isFinite).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a finite number, else `false`.
* @example
*
* _.isFinite(3);
* // => true
*
* _.isFinite(Number.MIN_VALUE);
* // => true
*
* _.isFinite(Infinity);
* // => false
*
* _.isFinite('3');
* // => false
*/
function isFinite(value) {
return typeof value == 'number' && nativeIsFinite(value);
}
/**
* Checks if `value` is classified as a `Function` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a function, else `false`.
* @example
*
* _.isFunction(_);
* // => true
*
* _.isFunction(/abc/);
* // => false
*/
function isFunction(value) {
if (!isObject(value)) {
return false;
}
// The use of `Object#toString` avoids issues with the `typeof` operator
// in Safari 9 which returns 'object' for typed arrays and other constructors.
var tag = baseGetTag(value);
return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag;
}
/**
* Checks if `value` is an integer.
*
* **Note:** This method is based on
* [`Number.isInteger`](https://mdn.io/Number/isInteger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an integer, else `false`.
* @example
*
* _.isInteger(3);
* // => true
*
* _.isInteger(Number.MIN_VALUE);
* // => false
*
* _.isInteger(Infinity);
* // => false
*
* _.isInteger('3');
* // => false
*/
function isInteger(value) {
return typeof value == 'number' && value == toInteger(value);
}
/**
* Checks if `value` is a valid array-like length.
*
* **Note:** This method is loosely based on
* [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a valid length, else `false`.
* @example
*
* _.isLength(3);
* // => true
*
* _.isLength(Number.MIN_VALUE);
* // => false
*
* _.isLength(Infinity);
* // => false
*
* _.isLength('3');
* // => false
*/
function isLength(value) {
return typeof value == 'number' &&
value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER;
}
/**
* Checks if `value` is the
* [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)
* of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an object, else `false`.
* @example
*
* _.isObject({});
* // => true
*
* _.isObject([1, 2, 3]);
* // => true
*
* _.isObject(_.noop);
* // => true
*
* _.isObject(null);
* // => false
*/
function isObject(value) {
var type = typeof value;
return value != null && (type == 'object' || type == 'function');
}
/**
* Checks if `value` is object-like. A value is object-like if it's not `null`
* and has a `typeof` result of "object".
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is object-like, else `false`.
* @example
*
* _.isObjectLike({});
* // => true
*
* _.isObjectLike([1, 2, 3]);
* // => true
*
* _.isObjectLike(_.noop);
* // => false
*
* _.isObjectLike(null);
* // => false
*/
function isObjectLike(value) {
return value != null && typeof value == 'object';
}
/**
* Checks if `value` is classified as a `Map` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a map, else `false`.
* @example
*
* _.isMap(new Map);
* // => true
*
* _.isMap(new WeakMap);
* // => false
*/
var isMap = nodeIsMap ? baseUnary(nodeIsMap) : baseIsMap;
/**
* Performs a partial deep comparison between `object` and `source` to
* determine if `object` contains equivalent property values.
*
* **Note:** This method is equivalent to `_.matches` when `source` is
* partially applied.
*
* Partial comparisons will match empty array and empty object `source`
* values against any array or object value, respectively. See `_.isEqual`
* for a list of supported value comparisons.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
* @example
*
* var object = { 'a': 1, 'b': 2 };
*
* _.isMatch(object, { 'b': 2 });
* // => true
*
* _.isMatch(object, { 'b': 1 });
* // => false
*/
function isMatch(object, source) {
return object === source || baseIsMatch(object, source, getMatchData(source));
}
/**
* This method is like `_.isMatch` except that it accepts `customizer` which
* is invoked to compare values. If `customizer` returns `undefined`, comparisons
* are handled by the method instead. The `customizer` is invoked with five
* arguments: (objValue, srcValue, index|key, object, source).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
* @example
*
* function isGreeting(value) {
* return /^h(?:i|ello)$/.test(value);
* }
*
* function customizer(objValue, srcValue) {
* if (isGreeting(objValue) && isGreeting(srcValue)) {
* return true;
* }
* }
*
* var object = { 'greeting': 'hello' };
* var source = { 'greeting': 'hi' };
*
* _.isMatchWith(object, source, customizer);
* // => true
*/
function isMatchWith(object, source, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseIsMatch(object, source, getMatchData(source), customizer);
}
/**
* Checks if `value` is `NaN`.
*
* **Note:** This method is based on
* [`Number.isNaN`](https://mdn.io/Number/isNaN) and is not the same as
* global [`isNaN`](https://mdn.io/isNaN) which returns `true` for
* `undefined` and other non-number values.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `NaN`, else `false`.
* @example
*
* _.isNaN(NaN);
* // => true
*
* _.isNaN(new Number(NaN));
* // => true
*
* isNaN(undefined);
* // => true
*
* _.isNaN(undefined);
* // => false
*/
function isNaN(value) {
// An `NaN` primitive is the only value that is not equal to itself.
// Perform the `toStringTag` check first to avoid errors with some
// ActiveX objects in IE.
return isNumber(value) && value != +value;
}
/**
* Checks if `value` is a pristine native function.
*
* **Note:** This method can't reliably detect native functions in the presence
* of the core-js package because core-js circumvents this kind of detection.
* Despite multiple requests, the core-js maintainer has made it clear: any
* attempt to fix the detection will be obstructed. As a result, we're left
* with little choice but to throw an error. Unfortunately, this also affects
* packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill),
* which rely on core-js.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a native function,
* else `false`.
* @example
*
* _.isNative(Array.prototype.push);
* // => true
*
* _.isNative(_);
* // => false
*/
function isNative(value) {
if (isMaskable(value)) {
throw new Error(CORE_ERROR_TEXT);
}
return baseIsNative(value);
}
/**
* Checks if `value` is `null`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `null`, else `false`.
* @example
*
* _.isNull(null);
* // => true
*
* _.isNull(void 0);
* // => false
*/
function isNull(value) {
return value === null;
}
/**
* Checks if `value` is `null` or `undefined`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is nullish, else `false`.
* @example
*
* _.isNil(null);
* // => true
*
* _.isNil(void 0);
* // => true
*
* _.isNil(NaN);
* // => false
*/
function isNil(value) {
return value == null;
}
/**
* Checks if `value` is classified as a `Number` primitive or object.
*
* **Note:** To exclude `Infinity`, `-Infinity`, and `NaN`, which are
* classified as numbers, use the `_.isFinite` method.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a number, else `false`.
* @example
*
* _.isNumber(3);
* // => true
*
* _.isNumber(Number.MIN_VALUE);
* // => true
*
* _.isNumber(Infinity);
* // => true
*
* _.isNumber('3');
* // => false
*/
function isNumber(value) {
return typeof value == 'number' ||
(isObjectLike(value) && baseGetTag(value) == numberTag);
}
/**
* Checks if `value` is a plain object, that is, an object created by the
* `Object` constructor or one with a `[[Prototype]]` of `null`.
*
* @static
* @memberOf _
* @since 0.8.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a plain object, else `false`.
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* _.isPlainObject(new Foo);
* // => false
*
* _.isPlainObject([1, 2, 3]);
* // => false
*
* _.isPlainObject({ 'x': 0, 'y': 0 });
* // => true
*
* _.isPlainObject(Object.create(null));
* // => true
*/
function isPlainObject(value) {
if (!isObjectLike(value) || baseGetTag(value) != objectTag) {
return false;
}
var proto = getPrototype(value);
if (proto === null) {
return true;
}
var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor;
return typeof Ctor == 'function' && Ctor instanceof Ctor &&
funcToString.call(Ctor) == objectCtorString;
}
/**
* Checks if `value` is classified as a `RegExp` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a regexp, else `false`.
* @example
*
* _.isRegExp(/abc/);
* // => true
*
* _.isRegExp('/abc/');
* // => false
*/
var isRegExp = nodeIsRegExp ? baseUnary(nodeIsRegExp) : baseIsRegExp;
/**
* Checks if `value` is a safe integer. An integer is safe if it's an IEEE-754
* double precision number which isn't the result of a rounded unsafe integer.
*
* **Note:** This method is based on
* [`Number.isSafeInteger`](https://mdn.io/Number/isSafeInteger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a safe integer, else `false`.
* @example
*
* _.isSafeInteger(3);
* // => true
*
* _.isSafeInteger(Number.MIN_VALUE);
* // => false
*
* _.isSafeInteger(Infinity);
* // => false
*
* _.isSafeInteger('3');
* // => false
*/
function isSafeInteger(value) {
return isInteger(value) && value >= -MAX_SAFE_INTEGER && value <= MAX_SAFE_INTEGER;
}
/**
* Checks if `value` is classified as a `Set` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a set, else `false`.
* @example
*
* _.isSet(new Set);
* // => true
*
* _.isSet(new WeakSet);
* // => false
*/
var isSet = nodeIsSet ? baseUnary(nodeIsSet) : baseIsSet;
/**
* Checks if `value` is classified as a `String` primitive or object.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a string, else `false`.
* @example
*
* _.isString('abc');
* // => true
*
* _.isString(1);
* // => false
*/
function isString(value) {
return typeof value == 'string' ||
(!isArray(value) && isObjectLike(value) && baseGetTag(value) == stringTag);
}
/**
* Checks if `value` is classified as a `Symbol` primitive or object.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a symbol, else `false`.
* @example
*
* _.isSymbol(Symbol.iterator);
* // => true
*
* _.isSymbol('abc');
* // => false
*/
function isSymbol(value) {
return typeof value == 'symbol' ||
(isObjectLike(value) && baseGetTag(value) == symbolTag);
}
/**
* Checks if `value` is classified as a typed array.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
* @example
*
* _.isTypedArray(new Uint8Array);
* // => true
*
* _.isTypedArray([]);
* // => false
*/
var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray;
/**
* Checks if `value` is `undefined`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `undefined`, else `false`.
* @example
*
* _.isUndefined(void 0);
* // => true
*
* _.isUndefined(null);
* // => false
*/
function isUndefined(value) {
return value === undefined;
}
/**
* Checks if `value` is classified as a `WeakMap` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a weak map, else `false`.
* @example
*
* _.isWeakMap(new WeakMap);
* // => true
*
* _.isWeakMap(new Map);
* // => false
*/
function isWeakMap(value) {
return isObjectLike(value) && getTag(value) == weakMapTag;
}
/**
* Checks if `value` is classified as a `WeakSet` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a weak set, else `false`.
* @example
*
* _.isWeakSet(new WeakSet);
* // => true
*
* _.isWeakSet(new Set);
* // => false
*/
function isWeakSet(value) {
return isObjectLike(value) && baseGetTag(value) == weakSetTag;
}
/**
* Checks if `value` is less than `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than `other`,
* else `false`.
* @see _.gt
* @example
*
* _.lt(1, 3);
* // => true
*
* _.lt(3, 3);
* // => false
*
* _.lt(3, 1);
* // => false
*/
var lt = createRelationalOperation(baseLt);
/**
* Checks if `value` is less than or equal to `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than or equal to
* `other`, else `false`.
* @see _.gte
* @example
*
* _.lte(1, 3);
* // => true
*
* _.lte(3, 3);
* // => true
*
* _.lte(3, 1);
* // => false
*/
var lte = createRelationalOperation(function(value, other) {
return value <= other;
});
/**
* Converts `value` to an array.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to convert.
* @returns {Array} Returns the converted array.
* @example
*
* _.toArray({ 'a': 1, 'b': 2 });
* // => [1, 2]
*
* _.toArray('abc');
* // => ['a', 'b', 'c']
*
* _.toArray(1);
* // => []
*
* _.toArray(null);
* // => []
*/
function toArray(value) {
if (!value) {
return [];
}
if (isArrayLike(value)) {
return isString(value) ? stringToArray(value) : copyArray(value);
}
if (symIterator && value[symIterator]) {
return iteratorToArray(value[symIterator]());
}
var tag = getTag(value),
func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values);
return func(value);
}
/**
* Converts `value` to a finite number.
*
* @static
* @memberOf _
* @since 4.12.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted number.
* @example
*
* _.toFinite(3.2);
* // => 3.2
*
* _.toFinite(Number.MIN_VALUE);
* // => 5e-324
*
* _.toFinite(Infinity);
* // => 1.7976931348623157e+308
*
* _.toFinite('3.2');
* // => 3.2
*/
function toFinite(value) {
if (!value) {
return value === 0 ? value : 0;
}
value = toNumber(value);
if (value === INFINITY || value === -INFINITY) {
var sign = (value < 0 ? -1 : 1);
return sign * MAX_INTEGER;
}
return value === value ? value : 0;
}
/**
* Converts `value` to an integer.
*
* **Note:** This method is loosely based on
* [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toInteger(3.2);
* // => 3
*
* _.toInteger(Number.MIN_VALUE);
* // => 0
*
* _.toInteger(Infinity);
* // => 1.7976931348623157e+308
*
* _.toInteger('3.2');
* // => 3
*/
function toInteger(value) {
var result = toFinite(value),
remainder = result % 1;
return result === result ? (remainder ? result - remainder : result) : 0;
}
/**
* Converts `value` to an integer suitable for use as the length of an
* array-like object.
*
* **Note:** This method is based on
* [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toLength(3.2);
* // => 3
*
* _.toLength(Number.MIN_VALUE);
* // => 0
*
* _.toLength(Infinity);
* // => 4294967295
*
* _.toLength('3.2');
* // => 3
*/
function toLength(value) {
return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0;
}
/**
* Converts `value` to a number.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to process.
* @returns {number} Returns the number.
* @example
*
* _.toNumber(3.2);
* // => 3.2
*
* _.toNumber(Number.MIN_VALUE);
* // => 5e-324
*
* _.toNumber(Infinity);
* // => Infinity
*
* _.toNumber('3.2');
* // => 3.2
*/
function toNumber(value) {
if (typeof value == 'number') {
return value;
}
if (isSymbol(value)) {
return NAN;
}
if (isObject(value)) {
var other = typeof value.valueOf == 'function' ? value.valueOf() : value;
value = isObject(other) ? (other + '') : other;
}
if (typeof value != 'string') {
return value === 0 ? value : +value;
}
value = baseTrim(value);
var isBinary = reIsBinary.test(value);
return (isBinary || reIsOctal.test(value))
? freeParseInt(value.slice(2), isBinary ? 2 : 8)
: (reIsBadHex.test(value) ? NAN : +value);
}
/**
* Converts `value` to a plain object flattening inherited enumerable string
* keyed properties of `value` to own properties of the plain object.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {Object} Returns the converted plain object.
* @example
*
* function Foo() {
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.assign({ 'a': 1 }, new Foo);
* // => { 'a': 1, 'b': 2 }
*
* _.assign({ 'a': 1 }, _.toPlainObject(new Foo));
* // => { 'a': 1, 'b': 2, 'c': 3 }
*/
function toPlainObject(value) {
return copyObject(value, keysIn(value));
}
/**
* Converts `value` to a safe integer. A safe integer can be compared and
* represented correctly.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toSafeInteger(3.2);
* // => 3
*
* _.toSafeInteger(Number.MIN_VALUE);
* // => 0
*
* _.toSafeInteger(Infinity);
* // => 9007199254740991
*
* _.toSafeInteger('3.2');
* // => 3
*/
function toSafeInteger(value) {
return value
? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER)
: (value === 0 ? value : 0);
}
/**
* Converts `value` to a string. An empty string is returned for `null`
* and `undefined` values. The sign of `-0` is preserved.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.toString(null);
* // => ''
*
* _.toString(-0);
* // => '-0'
*
* _.toString([1, 2, 3]);
* // => '1,2,3'
*/
function toString(value) {
return value == null ? '' : baseToString(value);
}
/*------------------------------------------------------------------------*/
/**
* Assigns own enumerable string keyed properties of source objects to the
* destination object. Source objects are applied from left to right.
* Subsequent sources overwrite property assignments of previous sources.
*
* **Note:** This method mutates `object` and is loosely based on
* [`Object.assign`](https://mdn.io/Object/assign).
*
* @static
* @memberOf _
* @since 0.10.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.assignIn
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* function Bar() {
* this.c = 3;
* }
*
* Foo.prototype.b = 2;
* Bar.prototype.d = 4;
*
* _.assign({ 'a': 0 }, new Foo, new Bar);
* // => { 'a': 1, 'c': 3 }
*/
var assign = createAssigner(function(object, source) {
if (isPrototype(source) || isArrayLike(source)) {
copyObject(source, keys(source), object);
return;
}
for (var key in source) {
if (hasOwnProperty.call(source, key)) {
assignValue(object, key, source[key]);
}
}
});
/**
* This method is like `_.assign` except that it iterates over own and
* inherited source properties.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias extend
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.assign
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* function Bar() {
* this.c = 3;
* }
*
* Foo.prototype.b = 2;
* Bar.prototype.d = 4;
*
* _.assignIn({ 'a': 0 }, new Foo, new Bar);
* // => { 'a': 1, 'b': 2, 'c': 3, 'd': 4 }
*/
var assignIn = createAssigner(function(object, source) {
copyObject(source, keysIn(source), object);
});
/**
* This method is like `_.assignIn` except that it accepts `customizer`
* which is invoked to produce the assigned values. If `customizer` returns
* `undefined`, assignment is handled by the method instead. The `customizer`
* is invoked with five arguments: (objValue, srcValue, key, object, source).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias extendWith
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @see _.assignWith
* @example
*
* function customizer(objValue, srcValue) {
* return _.isUndefined(objValue) ? srcValue : objValue;
* }
*
* var defaults = _.partialRight(_.assignInWith, customizer);
*
* defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var assignInWith = createAssigner(function(object, source, srcIndex, customizer) {
copyObject(source, keysIn(source), object, customizer);
});
/**
* This method is like `_.assign` except that it accepts `customizer`
* which is invoked to produce the assigned values. If `customizer` returns
* `undefined`, assignment is handled by the method instead. The `customizer`
* is invoked with five arguments: (objValue, srcValue, key, object, source).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @see _.assignInWith
* @example
*
* function customizer(objValue, srcValue) {
* return _.isUndefined(objValue) ? srcValue : objValue;
* }
*
* var defaults = _.partialRight(_.assignWith, customizer);
*
* defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var assignWith = createAssigner(function(object, source, srcIndex, customizer) {
copyObject(source, keys(source), object, customizer);
});
/**
* Creates an array of values corresponding to `paths` of `object`.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Array} Returns the picked values.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }, 4] };
*
* _.at(object, ['a[0].b.c', 'a[1]']);
* // => [3, 4]
*/
var at = flatRest(baseAt);
/**
* Creates an object that inherits from the `prototype` object. If a
* `properties` object is given, its own enumerable string keyed properties
* are assigned to the created object.
*
* @static
* @memberOf _
* @since 2.3.0
* @category Object
* @param {Object} prototype The object to inherit from.
* @param {Object} [properties] The properties to assign to the object.
* @returns {Object} Returns the new object.
* @example
*
* function Shape() {
* this.x = 0;
* this.y = 0;
* }
*
* function Circle() {
* Shape.call(this);
* }
*
* Circle.prototype = _.create(Shape.prototype, {
* 'constructor': Circle
* });
*
* var circle = new Circle;
* circle instanceof Circle;
* // => true
*
* circle instanceof Shape;
* // => true
*/
function create(prototype, properties) {
var result = baseCreate(prototype);
return properties == null ? result : baseAssign(result, properties);
}
/**
* Assigns own and inherited enumerable string keyed properties of source
* objects to the destination object for all destination properties that
* resolve to `undefined`. Source objects are applied from left to right.
* Once a property is set, additional values of the same property are ignored.
*
* **Note:** This method mutates `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.defaultsDeep
* @example
*
* _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var defaults = baseRest(function(object, sources) {
object = Object(object);
var index = -1;
var length = sources.length;
var guard = length > 2 ? sources[2] : undefined;
if (guard && isIterateeCall(sources[0], sources[1], guard)) {
length = 1;
}
while (++index < length) {
var source = sources[index];
var props = keysIn(source);
var propsIndex = -1;
var propsLength = props.length;
while (++propsIndex < propsLength) {
var key = props[propsIndex];
var value = object[key];
if (value === undefined ||
(eq(value, objectProto[key]) && !hasOwnProperty.call(object, key))) {
object[key] = source[key];
}
}
}
return object;
});
/**
* This method is like `_.defaults` except that it recursively assigns
* default properties.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.defaults
* @example
*
* _.defaultsDeep({ 'a': { 'b': 2 } }, { 'a': { 'b': 1, 'c': 3 } });
* // => { 'a': { 'b': 2, 'c': 3 } }
*/
var defaultsDeep = baseRest(function(args) {
args.push(undefined, customDefaultsMerge);
return apply(mergeWith, undefined, args);
});
/**
* This method is like `_.find` except that it returns the key of the first
* element `predicate` returns truthy for instead of the element itself.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Object
* @param {Object} object The object to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {string|undefined} Returns the key of the matched element,
* else `undefined`.
* @example
*
* var users = {
* 'barney': { 'age': 36, 'active': true },
* 'fred': { 'age': 40, 'active': false },
* 'pebbles': { 'age': 1, 'active': true }
* };
*
* _.findKey(users, function(o) { return o.age < 40; });
* // => 'barney' (iteration order is not guaranteed)
*
* // The `_.matches` iteratee shorthand.
* _.findKey(users, { 'age': 1, 'active': true });
* // => 'pebbles'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findKey(users, ['active', false]);
* // => 'fred'
*
* // The `_.property` iteratee shorthand.
* _.findKey(users, 'active');
* // => 'barney'
*/
function findKey(object, predicate) {
return baseFindKey(object, getIteratee(predicate, 3), baseForOwn);
}
/**
* This method is like `_.findKey` except that it iterates over elements of
* a collection in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {string|undefined} Returns the key of the matched element,
* else `undefined`.
* @example
*
* var users = {
* 'barney': { 'age': 36, 'active': true },
* 'fred': { 'age': 40, 'active': false },
* 'pebbles': { 'age': 1, 'active': true }
* };
*
* _.findLastKey(users, function(o) { return o.age < 40; });
* // => returns 'pebbles' assuming `_.findKey` returns 'barney'
*
* // The `_.matches` iteratee shorthand.
* _.findLastKey(users, { 'age': 36, 'active': true });
* // => 'barney'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findLastKey(users, ['active', false]);
* // => 'fred'
*
* // The `_.property` iteratee shorthand.
* _.findLastKey(users, 'active');
* // => 'pebbles'
*/
function findLastKey(object, predicate) {
return baseFindKey(object, getIteratee(predicate, 3), baseForOwnRight);
}
/**
* Iterates over own and inherited enumerable string keyed properties of an
* object and invokes `iteratee` for each property. The iteratee is invoked
* with three arguments: (value, key, object). Iteratee functions may exit
* iteration early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 0.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forInRight
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forIn(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed).
*/
function forIn(object, iteratee) {
return object == null
? object
: baseFor(object, getIteratee(iteratee, 3), keysIn);
}
/**
* This method is like `_.forIn` except that it iterates over properties of
* `object` in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forIn
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forInRight(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'c', 'b', then 'a' assuming `_.forIn` logs 'a', 'b', then 'c'.
*/
function forInRight(object, iteratee) {
return object == null
? object
: baseForRight(object, getIteratee(iteratee, 3), keysIn);
}
/**
* Iterates over own enumerable string keyed properties of an object and
* invokes `iteratee` for each property. The iteratee is invoked with three
* arguments: (value, key, object). Iteratee functions may exit iteration
* early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 0.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forOwnRight
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forOwn(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'a' then 'b' (iteration order is not guaranteed).
*/
function forOwn(object, iteratee) {
return object && baseForOwn(object, getIteratee(iteratee, 3));
}
/**
* This method is like `_.forOwn` except that it iterates over properties of
* `object` in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forOwn
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forOwnRight(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'b' then 'a' assuming `_.forOwn` logs 'a' then 'b'.
*/
function forOwnRight(object, iteratee) {
return object && baseForOwnRight(object, getIteratee(iteratee, 3));
}
/**
* Creates an array of function property names from own enumerable properties
* of `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to inspect.
* @returns {Array} Returns the function names.
* @see _.functionsIn
* @example
*
* function Foo() {
* this.a = _.constant('a');
* this.b = _.constant('b');
* }
*
* Foo.prototype.c = _.constant('c');
*
* _.functions(new Foo);
* // => ['a', 'b']
*/
function functions(object) {
return object == null ? [] : baseFunctions(object, keys(object));
}
/**
* Creates an array of function property names from own and inherited
* enumerable properties of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to inspect.
* @returns {Array} Returns the function names.
* @see _.functions
* @example
*
* function Foo() {
* this.a = _.constant('a');
* this.b = _.constant('b');
* }
*
* Foo.prototype.c = _.constant('c');
*
* _.functionsIn(new Foo);
* // => ['a', 'b', 'c']
*/
function functionsIn(object) {
return object == null ? [] : baseFunctions(object, keysIn(object));
}
/**
* Gets the value at `path` of `object`. If the resolved value is
* `undefined`, the `defaultValue` is returned in its place.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to get.
* @param {*} [defaultValue] The value returned for `undefined` resolved values.
* @returns {*} Returns the resolved value.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.get(object, 'a[0].b.c');
* // => 3
*
* _.get(object, ['a', '0', 'b', 'c']);
* // => 3
*
* _.get(object, 'a.b.c', 'default');
* // => 'default'
*/
function get(object, path, defaultValue) {
var result = object == null ? undefined : baseGet(object, path);
return result === undefined ? defaultValue : result;
}
/**
* Checks if `path` is a direct property of `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
* @example
*
* var object = { 'a': { 'b': 2 } };
* var other = _.create({ 'a': _.create({ 'b': 2 }) });
*
* _.has(object, 'a');
* // => true
*
* _.has(object, 'a.b');
* // => true
*
* _.has(object, ['a', 'b']);
* // => true
*
* _.has(other, 'a');
* // => false
*/
function has(object, path) {
return object != null && hasPath(object, path, baseHas);
}
/**
* Checks if `path` is a direct or inherited property of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
* @example
*
* var object = _.create({ 'a': _.create({ 'b': 2 }) });
*
* _.hasIn(object, 'a');
* // => true
*
* _.hasIn(object, 'a.b');
* // => true
*
* _.hasIn(object, ['a', 'b']);
* // => true
*
* _.hasIn(object, 'b');
* // => false
*/
function hasIn(object, path) {
return object != null && hasPath(object, path, baseHasIn);
}
/**
* Creates an object composed of the inverted keys and values of `object`.
* If `object` contains duplicate values, subsequent values overwrite
* property assignments of previous values.
*
* @static
* @memberOf _
* @since 0.7.0
* @category Object
* @param {Object} object The object to invert.
* @returns {Object} Returns the new inverted object.
* @example
*
* var object = { 'a': 1, 'b': 2, 'c': 1 };
*
* _.invert(object);
* // => { '1': 'c', '2': 'b' }
*/
var invert = createInverter(function(result, value, key) {
if (value != null &&
typeof value.toString != 'function') {
value = nativeObjectToString.call(value);
}
result[value] = key;
}, constant(identity));
/**
* This method is like `_.invert` except that the inverted object is generated
* from the results of running each element of `object` thru `iteratee`. The
* corresponding inverted value of each inverted key is an array of keys
* responsible for generating the inverted value. The iteratee is invoked
* with one argument: (value).
*
* @static
* @memberOf _
* @since 4.1.0
* @category Object
* @param {Object} object The object to invert.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Object} Returns the new inverted object.
* @example
*
* var object = { 'a': 1, 'b': 2, 'c': 1 };
*
* _.invertBy(object);
* // => { '1': ['a', 'c'], '2': ['b'] }
*
* _.invertBy(object, function(value) {
* return 'group' + value;
* });
* // => { 'group1': ['a', 'c'], 'group2': ['b'] }
*/
var invertBy = createInverter(function(result, value, key) {
if (value != null &&
typeof value.toString != 'function') {
value = nativeObjectToString.call(value);
}
if (hasOwnProperty.call(result, value)) {
result[value].push(key);
} else {
result[value] = [key];
}
}, getIteratee);
/**
* Invokes the method at `path` of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the method to invoke.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {*} Returns the result of the invoked method.
* @example
*
* var object = { 'a': [{ 'b': { 'c': [1, 2, 3, 4] } }] };
*
* _.invoke(object, 'a[0].b.c.slice', 1, 3);
* // => [2, 3]
*/
var invoke = baseRest(baseInvoke);
/**
* Creates an array of the own enumerable property names of `object`.
*
* **Note:** Non-object values are coerced to objects. See the
* [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
* for more details.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.keys(new Foo);
* // => ['a', 'b'] (iteration order is not guaranteed)
*
* _.keys('hi');
* // => ['0', '1']
*/
function keys(object) {
return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);
}
/**
* Creates an array of the own and inherited enumerable property names of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.keysIn(new Foo);
* // => ['a', 'b', 'c'] (iteration order is not guaranteed)
*/
function keysIn(object) {
return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object);
}
/**
* The opposite of `_.mapValues`; this method creates an object with the
* same values as `object` and keys generated by running each own enumerable
* string keyed property of `object` thru `iteratee`. The iteratee is invoked
* with three arguments: (value, key, object).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns the new mapped object.
* @see _.mapValues
* @example
*
* _.mapKeys({ 'a': 1, 'b': 2 }, function(value, key) {
* return key + value;
* });
* // => { 'a1': 1, 'b2': 2 }
*/
function mapKeys(object, iteratee) {
var result = {};
iteratee = getIteratee(iteratee, 3);
baseForOwn(object, function(value, key, object) {
baseAssignValue(result, iteratee(value, key, object), value);
});
return result;
}
/**
* Creates an object with the same keys as `object` and values generated
* by running each own enumerable string keyed property of `object` thru
* `iteratee`. The iteratee is invoked with three arguments:
* (value, key, object).
*
* @static
* @memberOf _
* @since 2.4.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns the new mapped object.
* @see _.mapKeys
* @example
*
* var users = {
* 'fred': { 'user': 'fred', 'age': 40 },
* 'pebbles': { 'user': 'pebbles', 'age': 1 }
* };
*
* _.mapValues(users, function(o) { return o.age; });
* // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed)
*
* // The `_.property` iteratee shorthand.
* _.mapValues(users, 'age');
* // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed)
*/
function mapValues(object, iteratee) {
var result = {};
iteratee = getIteratee(iteratee, 3);
baseForOwn(object, function(value, key, object) {
baseAssignValue(result, key, iteratee(value, key, object));
});
return result;
}
/**
* This method is like `_.assign` except that it recursively merges own and
* inherited enumerable string keyed properties of source objects into the
* destination object. Source properties that resolve to `undefined` are
* skipped if a destination value exists. Array and plain object properties
* are merged recursively. Other objects and value types are overridden by
* assignment. Source objects are applied from left to right. Subsequent
* sources overwrite property assignments of previous sources.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 0.5.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @example
*
* var object = {
* 'a': [{ 'b': 2 }, { 'd': 4 }]
* };
*
* var other = {
* 'a': [{ 'c': 3 }, { 'e': 5 }]
* };
*
* _.merge(object, other);
* // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] }
*/
var merge = createAssigner(function(object, source, srcIndex) {
baseMerge(object, source, srcIndex);
});
/**
* This method is like `_.merge` except that it accepts `customizer` which
* is invoked to produce the merged values of the destination and source
* properties. If `customizer` returns `undefined`, merging is handled by the
* method instead. The `customizer` is invoked with six arguments:
* (objValue, srcValue, key, object, source, stack).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} customizer The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* function customizer(objValue, srcValue) {
* if (_.isArray(objValue)) {
* return objValue.concat(srcValue);
* }
* }
*
* var object = { 'a': [1], 'b': [2] };
* var other = { 'a': [3], 'b': [4] };
*
* _.mergeWith(object, other, customizer);
* // => { 'a': [1, 3], 'b': [2, 4] }
*/
var mergeWith = createAssigner(function(object, source, srcIndex, customizer) {
baseMerge(object, source, srcIndex, customizer);
});
/**
* The opposite of `_.pick`; this method creates an object composed of the
* own and inherited enumerable property paths of `object` that are not omitted.
*
* **Note:** This method is considerably slower than `_.pick`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The source object.
* @param {...(string|string[])} [paths] The property paths to omit.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.omit(object, ['a', 'c']);
* // => { 'b': '2' }
*/
var omit = flatRest(function(object, paths) {
var result = {};
if (object == null) {
return result;
}
var isDeep = false;
paths = arrayMap(paths, function(path) {
path = castPath(path, object);
isDeep || (isDeep = path.length > 1);
return path;
});
copyObject(object, getAllKeysIn(object), result);
if (isDeep) {
result = baseClone(result, CLONE_DEEP_FLAG | CLONE_FLAT_FLAG | CLONE_SYMBOLS_FLAG, customOmitClone);
}
var length = paths.length;
while (length--) {
baseUnset(result, paths[length]);
}
return result;
});
/**
* The opposite of `_.pickBy`; this method creates an object composed of
* the own and inherited enumerable string keyed properties of `object` that
* `predicate` doesn't return truthy for. The predicate is invoked with two
* arguments: (value, key).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The source object.
* @param {Function} [predicate=_.identity] The function invoked per property.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.omitBy(object, _.isNumber);
* // => { 'b': '2' }
*/
function omitBy(object, predicate) {
return pickBy(object, negate(getIteratee(predicate)));
}
/**
* Creates an object composed of the picked `object` properties.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The source object.
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.pick(object, ['a', 'c']);
* // => { 'a': 1, 'c': 3 }
*/
var pick = flatRest(function(object, paths) {
return object == null ? {} : basePick(object, paths);
});
/**
* Creates an object composed of the `object` properties `predicate` returns
* truthy for. The predicate is invoked with two arguments: (value, key).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The source object.
* @param {Function} [predicate=_.identity] The function invoked per property.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.pickBy(object, _.isNumber);
* // => { 'a': 1, 'c': 3 }
*/
function pickBy(object, predicate) {
if (object == null) {
return {};
}
var props = arrayMap(getAllKeysIn(object), function(prop) {
return [prop];
});
predicate = getIteratee(predicate);
return basePickBy(object, props, function(value, path) {
return predicate(value, path[0]);
});
}
/**
* This method is like `_.get` except that if the resolved value is a
* function it's invoked with the `this` binding of its parent object and
* its result is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to resolve.
* @param {*} [defaultValue] The value returned for `undefined` resolved values.
* @returns {*} Returns the resolved value.
* @example
*
* var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] };
*
* _.result(object, 'a[0].b.c1');
* // => 3
*
* _.result(object, 'a[0].b.c2');
* // => 4
*
* _.result(object, 'a[0].b.c3', 'default');
* // => 'default'
*
* _.result(object, 'a[0].b.c3', _.constant('default'));
* // => 'default'
*/
function result(object, path, defaultValue) {
path = castPath(path, object);
var index = -1,
length = path.length;
// Ensure the loop is entered when path is empty.
if (!length) {
length = 1;
object = undefined;
}
while (++index < length) {
var value = object == null ? undefined : object[toKey(path[index])];
if (value === undefined) {
index = length;
value = defaultValue;
}
object = isFunction(value) ? value.call(object) : value;
}
return object;
}
/**
* Sets the value at `path` of `object`. If a portion of `path` doesn't exist,
* it's created. Arrays are created for missing index properties while objects
* are created for all other missing properties. Use `_.setWith` to customize
* `path` creation.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @returns {Object} Returns `object`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.set(object, 'a[0].b.c', 4);
* console.log(object.a[0].b.c);
* // => 4
*
* _.set(object, ['x', '0', 'y', 'z'], 5);
* console.log(object.x[0].y.z);
* // => 5
*/
function set(object, path, value) {
return object == null ? object : baseSet(object, path, value);
}
/**
* This method is like `_.set` except that it accepts `customizer` which is
* invoked to produce the objects of `path`. If `customizer` returns `undefined`
* path creation is handled by the method instead. The `customizer` is invoked
* with three arguments: (nsValue, key, nsObject).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* var object = {};
*
* _.setWith(object, '[0][1]', 'a', Object);
* // => { '0': { '1': 'a' } }
*/
function setWith(object, path, value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return object == null ? object : baseSet(object, path, value, customizer);
}
/**
* Creates an array of own enumerable string keyed-value pairs for `object`
* which can be consumed by `_.fromPairs`. If `object` is a map or set, its
* entries are returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias entries
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the key-value pairs.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.toPairs(new Foo);
* // => [['a', 1], ['b', 2]] (iteration order is not guaranteed)
*/
var toPairs = createToPairs(keys);
/**
* Creates an array of own and inherited enumerable string keyed-value pairs
* for `object` which can be consumed by `_.fromPairs`. If `object` is a map
* or set, its entries are returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias entriesIn
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the key-value pairs.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.toPairsIn(new Foo);
* // => [['a', 1], ['b', 2], ['c', 3]] (iteration order is not guaranteed)
*/
var toPairsIn = createToPairs(keysIn);
/**
* An alternative to `_.reduce`; this method transforms `object` to a new
* `accumulator` object which is the result of running each of its own
* enumerable string keyed properties thru `iteratee`, with each invocation
* potentially mutating the `accumulator` object. If `accumulator` is not
* provided, a new object with the same `[[Prototype]]` will be used. The
* iteratee is invoked with four arguments: (accumulator, value, key, object).
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 1.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The custom accumulator value.
* @returns {*} Returns the accumulated value.
* @example
*
* _.transform([2, 3, 4], function(result, n) {
* result.push(n *= n);
* return n % 2 == 0;
* }, []);
* // => [4, 9]
*
* _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) {
* (result[value] || (result[value] = [])).push(key);
* }, {});
* // => { '1': ['a', 'c'], '2': ['b'] }
*/
function transform(object, iteratee, accumulator) {
var isArr = isArray(object),
isArrLike = isArr || isBuffer(object) || isTypedArray(object);
iteratee = getIteratee(iteratee, 4);
if (accumulator == null) {
var Ctor = object && object.constructor;
if (isArrLike) {
accumulator = isArr ? new Ctor : [];
}
else if (isObject(object)) {
accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {};
}
else {
accumulator = {};
}
}
(isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) {
return iteratee(accumulator, value, index, object);
});
return accumulator;
}
/**
* Removes the property at `path` of `object`.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to unset.
* @returns {boolean} Returns `true` if the property is deleted, else `false`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 7 } }] };
* _.unset(object, 'a[0].b.c');
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*
* _.unset(object, ['a', '0', 'b', 'c']);
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*/
function unset(object, path) {
return object == null ? true : baseUnset(object, path);
}
/**
* This method is like `_.set` except that accepts `updater` to produce the
* value to set. Use `_.updateWith` to customize `path` creation. The `updater`
* is invoked with one argument: (value).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {Function} updater The function to produce the updated value.
* @returns {Object} Returns `object`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.update(object, 'a[0].b.c', function(n) { return n * n; });
* console.log(object.a[0].b.c);
* // => 9
*
* _.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; });
* console.log(object.x[0].y.z);
* // => 0
*/
function update(object, path, updater) {
return object == null ? object : baseUpdate(object, path, castFunction(updater));
}
/**
* This method is like `_.update` except that it accepts `customizer` which is
* invoked to produce the objects of `path`. If `customizer` returns `undefined`
* path creation is handled by the method instead. The `customizer` is invoked
* with three arguments: (nsValue, key, nsObject).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {Function} updater The function to produce the updated value.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* var object = {};
*
* _.updateWith(object, '[0][1]', _.constant('a'), Object);
* // => { '0': { '1': 'a' } }
*/
function updateWith(object, path, updater, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return object == null ? object : baseUpdate(object, path, castFunction(updater), customizer);
}
/**
* Creates an array of the own enumerable string keyed property values of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property values.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.values(new Foo);
* // => [1, 2] (iteration order is not guaranteed)
*
* _.values('hi');
* // => ['h', 'i']
*/
function values(object) {
return object == null ? [] : baseValues(object, keys(object));
}
/**
* Creates an array of the own and inherited enumerable string keyed property
* values of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property values.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.valuesIn(new Foo);
* // => [1, 2, 3] (iteration order is not guaranteed)
*/
function valuesIn(object) {
return object == null ? [] : baseValues(object, keysIn(object));
}
/*------------------------------------------------------------------------*/
/**
* Clamps `number` within the inclusive `lower` and `upper` bounds.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Number
* @param {number} number The number to clamp.
* @param {number} [lower] The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the clamped number.
* @example
*
* _.clamp(-10, -5, 5);
* // => -5
*
* _.clamp(10, -5, 5);
* // => 5
*/
function clamp(number, lower, upper) {
if (upper === undefined) {
upper = lower;
lower = undefined;
}
if (upper !== undefined) {
upper = toNumber(upper);
upper = upper === upper ? upper : 0;
}
if (lower !== undefined) {
lower = toNumber(lower);
lower = lower === lower ? lower : 0;
}
return baseClamp(toNumber(number), lower, upper);
}
/**
* Checks if `n` is between `start` and up to, but not including, `end`. If
* `end` is not specified, it's set to `start` with `start` then set to `0`.
* If `start` is greater than `end` the params are swapped to support
* negative ranges.
*
* @static
* @memberOf _
* @since 3.3.0
* @category Number
* @param {number} number The number to check.
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @returns {boolean} Returns `true` if `number` is in the range, else `false`.
* @see _.range, _.rangeRight
* @example
*
* _.inRange(3, 2, 4);
* // => true
*
* _.inRange(4, 8);
* // => true
*
* _.inRange(4, 2);
* // => false
*
* _.inRange(2, 2);
* // => false
*
* _.inRange(1.2, 2);
* // => true
*
* _.inRange(5.2, 4);
* // => false
*
* _.inRange(-3, -2, -6);
* // => true
*/
function inRange(number, start, end) {
start = toFinite(start);
if (end === undefined) {
end = start;
start = 0;
} else {
end = toFinite(end);
}
number = toNumber(number);
return baseInRange(number, start, end);
}
/**
* Produces a random number between the inclusive `lower` and `upper` bounds.
* If only one argument is provided a number between `0` and the given number
* is returned. If `floating` is `true`, or either `lower` or `upper` are
* floats, a floating-point number is returned instead of an integer.
*
* **Note:** JavaScript follows the IEEE-754 standard for resolving
* floating-point values which can produce unexpected results.
*
* @static
* @memberOf _
* @since 0.7.0
* @category Number
* @param {number} [lower=0] The lower bound.
* @param {number} [upper=1] The upper bound.
* @param {boolean} [floating] Specify returning a floating-point number.
* @returns {number} Returns the random number.
* @example
*
* _.random(0, 5);
* // => an integer between 0 and 5
*
* _.random(5);
* // => also an integer between 0 and 5
*
* _.random(5, true);
* // => a floating-point number between 0 and 5
*
* _.random(1.2, 5.2);
* // => a floating-point number between 1.2 and 5.2
*/
function random(lower, upper, floating) {
if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) {
upper = floating = undefined;
}
if (floating === undefined) {
if (typeof upper == 'boolean') {
floating = upper;
upper = undefined;
}
else if (typeof lower == 'boolean') {
floating = lower;
lower = undefined;
}
}
if (lower === undefined && upper === undefined) {
lower = 0;
upper = 1;
}
else {
lower = toFinite(lower);
if (upper === undefined) {
upper = lower;
lower = 0;
} else {
upper = toFinite(upper);
}
}
if (lower > upper) {
var temp = lower;
lower = upper;
upper = temp;
}
if (floating || lower % 1 || upper % 1) {
var rand = nativeRandom();
return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper);
}
return baseRandom(lower, upper);
}
/*------------------------------------------------------------------------*/
/**
* Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the camel cased string.
* @example
*
* _.camelCase('Foo Bar');
* // => 'fooBar'
*
* _.camelCase('--foo-bar--');
* // => 'fooBar'
*
* _.camelCase('__FOO_BAR__');
* // => 'fooBar'
*/
var camelCase = createCompounder(function(result, word, index) {
word = word.toLowerCase();
return result + (index ? capitalize(word) : word);
});
/**
* Converts the first character of `string` to upper case and the remaining
* to lower case.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to capitalize.
* @returns {string} Returns the capitalized string.
* @example
*
* _.capitalize('FRED');
* // => 'Fred'
*/
function capitalize(string) {
return upperFirst(toString(string).toLowerCase());
}
/**
* Deburrs `string` by converting
* [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table)
* and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A)
* letters to basic Latin letters and removing
* [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to deburr.
* @returns {string} Returns the deburred string.
* @example
*
* _.deburr('déjà vu');
* // => 'deja vu'
*/
function deburr(string) {
string = toString(string);
return string && string.replace(reLatin, deburrLetter).replace(reComboMark, '');
}
/**
* Checks if `string` ends with the given target string.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {string} [target] The string to search for.
* @param {number} [position=string.length] The position to search up to.
* @returns {boolean} Returns `true` if `string` ends with `target`,
* else `false`.
* @example
*
* _.endsWith('abc', 'c');
* // => true
*
* _.endsWith('abc', 'b');
* // => false
*
* _.endsWith('abc', 'b', 2);
* // => true
*/
function endsWith(string, target, position) {
string = toString(string);
target = baseToString(target);
var length = string.length;
position = position === undefined
? length
: baseClamp(toInteger(position), 0, length);
var end = position;
position -= target.length;
return position >= 0 && string.slice(position, end) == target;
}
/**
* Converts the characters "&", "<", ">", '"', and "'" in `string` to their
* corresponding HTML entities.
*
* **Note:** No other characters are escaped. To escape additional
* characters use a third-party library like [_he_](https://mths.be/he).
*
* Though the ">" character is escaped for symmetry, characters like
* ">" and "/" don't need escaping in HTML and have no special meaning
* unless they're part of a tag or unquoted attribute value. See
* [Mathias Bynens's article](https://mathiasbynens.be/notes/ambiguous-ampersands)
* (under "semi-related fun fact") for more details.
*
* When working with HTML you should always
* [quote attribute values](http://wonko.com/post/html-escaping) to reduce
* XSS vectors.
*
* @static
* @since 0.1.0
* @memberOf _
* @category String
* @param {string} [string=''] The string to escape.
* @returns {string} Returns the escaped string.
* @example
*
* _.escape('fred, barney, & pebbles');
* // => 'fred, barney, & pebbles'
*/
function escape(string) {
string = toString(string);
return (string && reHasUnescapedHtml.test(string))
? string.replace(reUnescapedHtml, escapeHtmlChar)
: string;
}
/**
* Escapes the `RegExp` special characters "^", "$", "\", ".", "*", "+",
* "?", "(", ")", "[", "]", "{", "}", and "|" in `string`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to escape.
* @returns {string} Returns the escaped string.
* @example
*
* _.escapeRegExp('[lodash](https://lodash.com/)');
* // => '\[lodash\]\(https://lodash\.com/\)'
*/
function escapeRegExp(string) {
string = toString(string);
return (string && reHasRegExpChar.test(string))
? string.replace(reRegExpChar, '\\$&')
: string;
}
/**
* Converts `string` to
* [kebab case](https://en.wikipedia.org/wiki/Letter_case#Special_case_styles).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the kebab cased string.
* @example
*
* _.kebabCase('Foo Bar');
* // => 'foo-bar'
*
* _.kebabCase('fooBar');
* // => 'foo-bar'
*
* _.kebabCase('__FOO_BAR__');
* // => 'foo-bar'
*/
var kebabCase = createCompounder(function(result, word, index) {
return result + (index ? '-' : '') + word.toLowerCase();
});
/**
* Converts `string`, as space separated words, to lower case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the lower cased string.
* @example
*
* _.lowerCase('--Foo-Bar--');
* // => 'foo bar'
*
* _.lowerCase('fooBar');
* // => 'foo bar'
*
* _.lowerCase('__FOO_BAR__');
* // => 'foo bar'
*/
var lowerCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + word.toLowerCase();
});
/**
* Converts the first character of `string` to lower case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.lowerFirst('Fred');
* // => 'fred'
*
* _.lowerFirst('FRED');
* // => 'fRED'
*/
var lowerFirst = createCaseFirst('toLowerCase');
/**
* Pads `string` on the left and right sides if it's shorter than `length`.
* Padding characters are truncated if they can't be evenly divided by `length`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.pad('abc', 8);
* // => ' abc '
*
* _.pad('abc', 8, '_-');
* // => '_-abc_-_'
*
* _.pad('abc', 3);
* // => 'abc'
*/
function pad(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
if (!length || strLength >= length) {
return string;
}
var mid = (length - strLength) / 2;
return (
createPadding(nativeFloor(mid), chars) +
string +
createPadding(nativeCeil(mid), chars)
);
}
/**
* Pads `string` on the right side if it's shorter than `length`. Padding
* characters are truncated if they exceed `length`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.padEnd('abc', 6);
* // => 'abc '
*
* _.padEnd('abc', 6, '_-');
* // => 'abc_-_'
*
* _.padEnd('abc', 3);
* // => 'abc'
*/
function padEnd(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
return (length && strLength < length)
? (string + createPadding(length - strLength, chars))
: string;
}
/**
* Pads `string` on the left side if it's shorter than `length`. Padding
* characters are truncated if they exceed `length`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.padStart('abc', 6);
* // => ' abc'
*
* _.padStart('abc', 6, '_-');
* // => '_-_abc'
*
* _.padStart('abc', 3);
* // => 'abc'
*/
function padStart(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
return (length && strLength < length)
? (createPadding(length - strLength, chars) + string)
: string;
}
/**
* Converts `string` to an integer of the specified radix. If `radix` is
* `undefined` or `0`, a `radix` of `10` is used unless `value` is a
* hexadecimal, in which case a `radix` of `16` is used.
*
* **Note:** This method aligns with the
* [ES5 implementation](https://es5.github.io/#x15.1.2.2) of `parseInt`.
*
* @static
* @memberOf _
* @since 1.1.0
* @category String
* @param {string} string The string to convert.
* @param {number} [radix=10] The radix to interpret `value` by.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {number} Returns the converted integer.
* @example
*
* _.parseInt('08');
* // => 8
*
* _.map(['6', '08', '10'], _.parseInt);
* // => [6, 8, 10]
*/
function parseInt(string, radix, guard) {
if (guard || radix == null) {
radix = 0;
} else if (radix) {
radix = +radix;
}
return nativeParseInt(toString(string).replace(reTrimStart, ''), radix || 0);
}
/**
* Repeats the given string `n` times.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to repeat.
* @param {number} [n=1] The number of times to repeat the string.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the repeated string.
* @example
*
* _.repeat('*', 3);
* // => '***'
*
* _.repeat('abc', 2);
* // => 'abcabc'
*
* _.repeat('abc', 0);
* // => ''
*/
function repeat(string, n, guard) {
if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) {
n = 1;
} else {
n = toInteger(n);
}
return baseRepeat(toString(string), n);
}
/**
* Replaces matches for `pattern` in `string` with `replacement`.
*
* **Note:** This method is based on
* [`String#replace`](https://mdn.io/String/replace).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to modify.
* @param {RegExp|string} pattern The pattern to replace.
* @param {Function|string} replacement The match replacement.
* @returns {string} Returns the modified string.
* @example
*
* _.replace('Hi Fred', 'Fred', 'Barney');
* // => 'Hi Barney'
*/
function replace() {
var args = arguments,
string = toString(args[0]);
return args.length < 3 ? string : string.replace(args[1], args[2]);
}
/**
* Converts `string` to
* [snake case](https://en.wikipedia.org/wiki/Snake_case).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the snake cased string.
* @example
*
* _.snakeCase('Foo Bar');
* // => 'foo_bar'
*
* _.snakeCase('fooBar');
* // => 'foo_bar'
*
* _.snakeCase('--FOO-BAR--');
* // => 'foo_bar'
*/
var snakeCase = createCompounder(function(result, word, index) {
return result + (index ? '_' : '') + word.toLowerCase();
});
/**
* Splits `string` by `separator`.
*
* **Note:** This method is based on
* [`String#split`](https://mdn.io/String/split).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to split.
* @param {RegExp|string} separator The separator pattern to split by.
* @param {number} [limit] The length to truncate results to.
* @returns {Array} Returns the string segments.
* @example
*
* _.split('a-b-c', '-', 2);
* // => ['a', 'b']
*/
function split(string, separator, limit) {
if (limit && typeof limit != 'number' && isIterateeCall(string, separator, limit)) {
separator = limit = undefined;
}
limit = limit === undefined ? MAX_ARRAY_LENGTH : limit >>> 0;
if (!limit) {
return [];
}
string = toString(string);
if (string && (
typeof separator == 'string' ||
(separator != null && !isRegExp(separator))
)) {
separator = baseToString(separator);
if (!separator && hasUnicode(string)) {
return castSlice(stringToArray(string), 0, limit);
}
}
return string.split(separator, limit);
}
/**
* Converts `string` to
* [start case](https://en.wikipedia.org/wiki/Letter_case#Stylistic_or_specialised_usage).
*
* @static
* @memberOf _
* @since 3.1.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the start cased string.
* @example
*
* _.startCase('--foo-bar--');
* // => 'Foo Bar'
*
* _.startCase('fooBar');
* // => 'Foo Bar'
*
* _.startCase('__FOO_BAR__');
* // => 'FOO BAR'
*/
var startCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + upperFirst(word);
});
/**
* Checks if `string` starts with the given target string.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {string} [target] The string to search for.
* @param {number} [position=0] The position to search from.
* @returns {boolean} Returns `true` if `string` starts with `target`,
* else `false`.
* @example
*
* _.startsWith('abc', 'a');
* // => true
*
* _.startsWith('abc', 'b');
* // => false
*
* _.startsWith('abc', 'b', 1);
* // => true
*/
function startsWith(string, target, position) {
string = toString(string);
position = position == null
? 0
: baseClamp(toInteger(position), 0, string.length);
target = baseToString(target);
return string.slice(position, position + target.length) == target;
}
/**
* Creates a compiled template function that can interpolate data properties
* in "interpolate" delimiters, HTML-escape interpolated data properties in
* "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data
* properties may be accessed as free variables in the template. If a setting
* object is given, it takes precedence over `_.templateSettings` values.
*
* **Security:** `_.template` is insecure and should not be used. It will be
* removed in Lodash v5. Avoid untrusted input. See
* [threat model](https://github.com/lodash/lodash/blob/main/threat-model.md).
*
* **Note:** In the development build `_.template` utilizes
* [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl)
* for easier debugging.
*
* For more information on precompiling templates see
* [lodash's custom builds documentation](https://lodash.com/custom-builds).
*
* For more information on Chrome extension sandboxes see
* [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval).
*
* @static
* @since 0.1.0
* @memberOf _
* @category String
* @param {string} [string=''] The template string.
* @param {Object} [options={}] The options object.
* @param {RegExp} [options.escape=_.templateSettings.escape]
* The HTML "escape" delimiter.
* @param {RegExp} [options.evaluate=_.templateSettings.evaluate]
* The "evaluate" delimiter.
* @param {Object} [options.imports=_.templateSettings.imports]
* An object to import into the template as free variables.
* @param {RegExp} [options.interpolate=_.templateSettings.interpolate]
* The "interpolate" delimiter.
* @param {string} [options.sourceURL='lodash.templateSources[n]']
* The sourceURL of the compiled template.
* @param {string} [options.variable='obj']
* The data object variable name.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the compiled template function.
* @example
*
* // Use the "interpolate" delimiter to create a compiled template.
* var compiled = _.template('hello <%= user %>!');
* compiled({ 'user': 'fred' });
* // => 'hello fred!'
*
* // Use the HTML "escape" delimiter to escape data property values.
* var compiled = _.template('<b><%- value %></b>');
* compiled({ 'value': '<script>' });
* // => '<b><script></b>'
*
* // Use the "evaluate" delimiter to execute JavaScript and generate HTML.
* var compiled = _.template('<% _.forEach(users, function(user) { %><li><%- user %></li><% }); %>');
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the internal `print` function in "evaluate" delimiters.
* var compiled = _.template('<% print("hello " + user); %>!');
* compiled({ 'user': 'barney' });
* // => 'hello barney!'
*
* // Use the ES template literal delimiter as an "interpolate" delimiter.
* // Disable support by replacing the "interpolate" delimiter.
* var compiled = _.template('hello ${ user }!');
* compiled({ 'user': 'pebbles' });
* // => 'hello pebbles!'
*
* // Use backslashes to treat delimiters as plain text.
* var compiled = _.template('<%= "\\<%- value %\\>" %>');
* compiled({ 'value': 'ignored' });
* // => '<%- value %>'
*
* // Use the `imports` option to import `jQuery` as `jq`.
* var text = '<% jq.each(users, function(user) { %><li><%- user %></li><% }); %>';
* var compiled = _.template(text, { 'imports': { 'jq': jQuery } });
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the `sourceURL` option to specify a custom sourceURL for the template.
* var compiled = _.template('hello <%= user %>!', { 'sourceURL': '/basic/greeting.jst' });
* compiled(data);
* // => Find the source of "greeting.jst" under the Sources tab or Resources panel of the web inspector.
*
* // Use the `variable` option to ensure a with-statement isn't used in the compiled template.
* var compiled = _.template('hi <%= data.user %>!', { 'variable': 'data' });
* compiled.source;
* // => function(data) {
* // var __t, __p = '';
* // __p += 'hi ' + ((__t = ( data.user )) == null ? '' : __t) + '!';
* // return __p;
* // }
*
* // Use custom template delimiters.
* _.templateSettings.interpolate = /{{([\s\S]+?)}}/g;
* var compiled = _.template('hello {{ user }}!');
* compiled({ 'user': 'mustache' });
* // => 'hello mustache!'
*
* // Use the `source` property to inline compiled templates for meaningful
* // line numbers in error messages and stack traces.
* fs.writeFileSync(path.join(process.cwd(), 'jst.js'), '\
* var JST = {\
* "main": ' + _.template(mainText).source + '\
* };\
* ');
*/
function template(string, options, guard) {
// Based on John Resig's `tmpl` implementation
// (http://ejohn.org/blog/javascript-micro-templating/)
// and Laura Doktorova's doT.js (https://github.com/olado/doT).
var settings = lodash.templateSettings;
if (guard && isIterateeCall(string, options, guard)) {
options = undefined;
}
string = toString(string);
options = assignInWith({}, options, settings, customDefaultsAssignIn);
var imports = assignInWith({}, options.imports, settings.imports, customDefaultsAssignIn),
importsKeys = keys(imports),
importsValues = baseValues(imports, importsKeys);
var isEscaping,
isEvaluating,
index = 0,
interpolate = options.interpolate || reNoMatch,
source = "__p += '";
// Compile the regexp to match each delimiter.
var reDelimiters = RegExp(
(options.escape || reNoMatch).source + '|' +
interpolate.source + '|' +
(interpolate === reInterpolate ? reEsTemplate : reNoMatch).source + '|' +
(options.evaluate || reNoMatch).source + '|$'
, 'g');
// Use a sourceURL for easier debugging.
// The sourceURL gets injected into the source that's eval-ed, so be careful
// to normalize all kinds of whitespace, so e.g. newlines (and unicode versions of it) can't sneak in
// and escape the comment, thus injecting code that gets evaled.
var sourceURL = '//# sourceURL=' +
(hasOwnProperty.call(options, 'sourceURL')
? (options.sourceURL + '').replace(/\s/g, ' ')
: ('lodash.templateSources[' + (++templateCounter) + ']')
) + '\n';
string.replace(reDelimiters, function(match, escapeValue, interpolateValue, esTemplateValue, evaluateValue, offset) {
interpolateValue || (interpolateValue = esTemplateValue);
// Escape characters that can't be included in string literals.
source += string.slice(index, offset).replace(reUnescapedString, escapeStringChar);
// Replace delimiters with snippets.
if (escapeValue) {
isEscaping = true;
source += "' +\n__e(" + escapeValue + ") +\n'";
}
if (evaluateValue) {
isEvaluating = true;
source += "';\n" + evaluateValue + ";\n__p += '";
}
if (interpolateValue) {
source += "' +\n((__t = (" + interpolateValue + ")) == null ? '' : __t) +\n'";
}
index = offset + match.length;
// The JS engine embedded in Adobe products needs `match` returned in
// order to produce the correct `offset` value.
return match;
});
source += "';\n";
// If `variable` is not specified wrap a with-statement around the generated
// code to add the data object to the top of the scope chain.
var variable = hasOwnProperty.call(options, 'variable') && options.variable;
if (!variable) {
source = 'with (obj) {\n' + source + '\n}\n';
}
// Throw an error if a forbidden character was found in `variable`, to prevent
// potential command injection attacks.
else if (reForbiddenIdentifierChars.test(variable)) {
throw new Error(INVALID_TEMPL_VAR_ERROR_TEXT);
}
// Cleanup code by stripping empty strings.
source = (isEvaluating ? source.replace(reEmptyStringLeading, '') : source)
.replace(reEmptyStringMiddle, '$1')
.replace(reEmptyStringTrailing, '$1;');
// Frame code as the function body.
source = 'function(' + (variable || 'obj') + ') {\n' +
(variable
? ''
: 'obj || (obj = {});\n'
) +
"var __t, __p = ''" +
(isEscaping
? ', __e = _.escape'
: ''
) +
(isEvaluating
? ', __j = Array.prototype.join;\n' +
"function print() { __p += __j.call(arguments, '') }\n"
: ';\n'
) +
source +
'return __p\n}';
var result = attempt(function() {
return Function(importsKeys, sourceURL + 'return ' + source)
.apply(undefined, importsValues);
});
// Provide the compiled function's source by its `toString` method or
// the `source` property as a convenience for inlining compiled templates.
result.source = source;
if (isError(result)) {
throw result;
}
return result;
}
/**
* Converts `string`, as a whole, to lower case just like
* [String#toLowerCase](https://mdn.io/toLowerCase).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the lower cased string.
* @example
*
* _.toLower('--Foo-Bar--');
* // => '--foo-bar--'
*
* _.toLower('fooBar');
* // => 'foobar'
*
* _.toLower('__FOO_BAR__');
* // => '__foo_bar__'
*/
function toLower(value) {
return toString(value).toLowerCase();
}
/**
* Converts `string`, as a whole, to upper case just like
* [String#toUpperCase](https://mdn.io/toUpperCase).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the upper cased string.
* @example
*
* _.toUpper('--foo-bar--');
* // => '--FOO-BAR--'
*
* _.toUpper('fooBar');
* // => 'FOOBAR'
*
* _.toUpper('__foo_bar__');
* // => '__FOO_BAR__'
*/
function toUpper(value) {
return toString(value).toUpperCase();
}
/**
* Removes leading and trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trim(' abc ');
* // => 'abc'
*
* _.trim('-_-abc-_-', '_-');
* // => 'abc'
*
* _.map([' foo ', ' bar '], _.trim);
* // => ['foo', 'bar']
*/
function trim(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return baseTrim(string);
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
chrSymbols = stringToArray(chars),
start = charsStartIndex(strSymbols, chrSymbols),
end = charsEndIndex(strSymbols, chrSymbols) + 1;
return castSlice(strSymbols, start, end).join('');
}
/**
* Removes trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trimEnd(' abc ');
* // => ' abc'
*
* _.trimEnd('-_-abc-_-', '_-');
* // => '-_-abc'
*/
function trimEnd(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.slice(0, trimmedEndIndex(string) + 1);
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
end = charsEndIndex(strSymbols, stringToArray(chars)) + 1;
return castSlice(strSymbols, 0, end).join('');
}
/**
* Removes leading whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trimStart(' abc ');
* // => 'abc '
*
* _.trimStart('-_-abc-_-', '_-');
* // => 'abc-_-'
*/
function trimStart(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrimStart, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
start = charsStartIndex(strSymbols, stringToArray(chars));
return castSlice(strSymbols, start).join('');
}
/**
* Truncates `string` if it's longer than the given maximum string length.
* The last characters of the truncated string are replaced with the omission
* string which defaults to "...".
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to truncate.
* @param {Object} [options={}] The options object.
* @param {number} [options.length=30] The maximum string length.
* @param {string} [options.omission='...'] The string to indicate text is omitted.
* @param {RegExp|string} [options.separator] The separator pattern to truncate to.
* @returns {string} Returns the truncated string.
* @example
*
* _.truncate('hi-diddly-ho there, neighborino');
* // => 'hi-diddly-ho there, neighbo...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'length': 24,
* 'separator': ' '
* });
* // => 'hi-diddly-ho there,...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'length': 24,
* 'separator': /,? +/
* });
* // => 'hi-diddly-ho there...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'omission': ' [...]'
* });
* // => 'hi-diddly-ho there, neig [...]'
*/
function truncate(string, options) {
var length = DEFAULT_TRUNC_LENGTH,
omission = DEFAULT_TRUNC_OMISSION;
if (isObject(options)) {
var separator = 'separator' in options ? options.separator : separator;
length = 'length' in options ? toInteger(options.length) : length;
omission = 'omission' in options ? baseToString(options.omission) : omission;
}
string = toString(string);
var strLength = string.length;
if (hasUnicode(string)) {
var strSymbols = stringToArray(string);
strLength = strSymbols.length;
}
if (length >= strLength) {
return string;
}
var end = length - stringSize(omission);
if (end < 1) {
return omission;
}
var result = strSymbols
? castSlice(strSymbols, 0, end).join('')
: string.slice(0, end);
if (separator === undefined) {
return result + omission;
}
if (strSymbols) {
end += (result.length - end);
}
if (isRegExp(separator)) {
if (string.slice(end).search(separator)) {
var match,
substring = result;
if (!separator.global) {
separator = RegExp(separator.source, toString(reFlags.exec(separator)) + 'g');
}
separator.lastIndex = 0;
while ((match = separator.exec(substring))) {
var newEnd = match.index;
}
result = result.slice(0, newEnd === undefined ? end : newEnd);
}
} else if (string.indexOf(baseToString(separator), end) != end) {
var index = result.lastIndexOf(separator);
if (index > -1) {
result = result.slice(0, index);
}
}
return result + omission;
}
/**
* The inverse of `_.escape`; this method converts the HTML entities
* `&`, `<`, `>`, `"`, and `'` in `string` to
* their corresponding characters.
*
* **Note:** No other HTML entities are unescaped. To unescape additional
* HTML entities use a third-party library like [_he_](https://mths.be/he).
*
* @static
* @memberOf _
* @since 0.6.0
* @category String
* @param {string} [string=''] The string to unescape.
* @returns {string} Returns the unescaped string.
* @example
*
* _.unescape('fred, barney, & pebbles');
* // => 'fred, barney, & pebbles'
*/
function unescape(string) {
string = toString(string);
return (string && reHasEscapedHtml.test(string))
? string.replace(reEscapedHtml, unescapeHtmlChar)
: string;
}
/**
* Converts `string`, as space separated words, to upper case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the upper cased string.
* @example
*
* _.upperCase('--foo-bar');
* // => 'FOO BAR'
*
* _.upperCase('fooBar');
* // => 'FOO BAR'
*
* _.upperCase('__foo_bar__');
* // => 'FOO BAR'
*/
var upperCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + word.toUpperCase();
});
/**
* Converts the first character of `string` to upper case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.upperFirst('fred');
* // => 'Fred'
*
* _.upperFirst('FRED');
* // => 'FRED'
*/
var upperFirst = createCaseFirst('toUpperCase');
/**
* Splits `string` into an array of its words.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {RegExp|string} [pattern] The pattern to match words.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the words of `string`.
* @example
*
* _.words('fred, barney, & pebbles');
* // => ['fred', 'barney', 'pebbles']
*
* _.words('fred, barney, & pebbles', /[^, ]+/g);
* // => ['fred', 'barney', '&', 'pebbles']
*/
function words(string, pattern, guard) {
string = toString(string);
pattern = guard ? undefined : pattern;
if (pattern === undefined) {
return hasUnicodeWord(string) ? unicodeWords(string) : asciiWords(string);
}
return string.match(pattern) || [];
}
/*------------------------------------------------------------------------*/
/**
* Attempts to invoke `func`, returning either the result or the caught error
* object. Any additional arguments are provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Function} func The function to attempt.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {*} Returns the `func` result or error object.
* @example
*
* // Avoid throwing errors for invalid selectors.
* var elements = _.attempt(function(selector) {
* return document.querySelectorAll(selector);
* }, '>_>');
*
* if (_.isError(elements)) {
* elements = [];
* }
*/
var attempt = baseRest(function(func, args) {
try {
return apply(func, undefined, args);
} catch (e) {
return isError(e) ? e : new Error(e);
}
});
/**
* Binds methods of an object to the object itself, overwriting the existing
* method.
*
* **Note:** This method doesn't set the "length" property of bound functions.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {Object} object The object to bind and assign the bound methods to.
* @param {...(string|string[])} methodNames The object method names to bind.
* @returns {Object} Returns `object`.
* @example
*
* var view = {
* 'label': 'docs',
* 'click': function() {
* console.log('clicked ' + this.label);
* }
* };
*
* _.bindAll(view, ['click']);
* jQuery(element).on('click', view.click);
* // => Logs 'clicked docs' when clicked.
*/
var bindAll = flatRest(function(object, methodNames) {
arrayEach(methodNames, function(key) {
key = toKey(key);
baseAssignValue(object, key, bind(object[key], object));
});
return object;
});
/**
* Creates a function that iterates over `pairs` and invokes the corresponding
* function of the first predicate to return truthy. The predicate-function
* pairs are invoked with the `this` binding and arguments of the created
* function.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {Array} pairs The predicate-function pairs.
* @returns {Function} Returns the new composite function.
* @example
*
* var func = _.cond([
* [_.matches({ 'a': 1 }), _.constant('matches A')],
* [_.conforms({ 'b': _.isNumber }), _.constant('matches B')],
* [_.stubTrue, _.constant('no match')]
* ]);
*
* func({ 'a': 1, 'b': 2 });
* // => 'matches A'
*
* func({ 'a': 0, 'b': 1 });
* // => 'matches B'
*
* func({ 'a': '1', 'b': '2' });
* // => 'no match'
*/
function cond(pairs) {
var length = pairs == null ? 0 : pairs.length,
toIteratee = getIteratee();
pairs = !length ? [] : arrayMap(pairs, function(pair) {
if (typeof pair[1] != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return [toIteratee(pair[0]), pair[1]];
});
return baseRest(function(args) {
var index = -1;
while (++index < length) {
var pair = pairs[index];
if (apply(pair[0], this, args)) {
return apply(pair[1], this, args);
}
}
});
}
/**
* Creates a function that invokes the predicate properties of `source` with
* the corresponding property values of a given object, returning `true` if
* all predicates return truthy, else `false`.
*
* **Note:** The created function is equivalent to `_.conformsTo` with
* `source` partially applied.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {Object} source The object of property predicates to conform to.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 2, 'b': 1 },
* { 'a': 1, 'b': 2 }
* ];
*
* _.filter(objects, _.conforms({ 'b': function(n) { return n > 1; } }));
* // => [{ 'a': 1, 'b': 2 }]
*/
function conforms(source) {
return baseConforms(baseClone(source, CLONE_DEEP_FLAG));
}
/**
* Creates a function that returns `value`.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Util
* @param {*} value The value to return from the new function.
* @returns {Function} Returns the new constant function.
* @example
*
* var objects = _.times(2, _.constant({ 'a': 1 }));
*
* console.log(objects);
* // => [{ 'a': 1 }, { 'a': 1 }]
*
* console.log(objects[0] === objects[1]);
* // => true
*/
function constant(value) {
return function() {
return value;
};
}
/**
* Checks `value` to determine whether a default value should be returned in
* its place. The `defaultValue` is returned if `value` is `NaN`, `null`,
* or `undefined`.
*
* @static
* @memberOf _
* @since 4.14.0
* @category Util
* @param {*} value The value to check.
* @param {*} defaultValue The default value.
* @returns {*} Returns the resolved value.
* @example
*
* _.defaultTo(1, 10);
* // => 1
*
* _.defaultTo(undefined, 10);
* // => 10
*/
function defaultTo(value, defaultValue) {
return (value == null || value !== value) ? defaultValue : value;
}
/**
* Creates a function that returns the result of invoking the given functions
* with the `this` binding of the created function, where each successive
* invocation is supplied the return value of the previous.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {...(Function|Function[])} [funcs] The functions to invoke.
* @returns {Function} Returns the new composite function.
* @see _.flowRight
* @example
*
* function square(n) {
* return n * n;
* }
*
* var addSquare = _.flow([_.add, square]);
* addSquare(1, 2);
* // => 9
*/
var flow = createFlow();
/**
* This method is like `_.flow` except that it creates a function that
* invokes the given functions from right to left.
*
* @static
* @since 3.0.0
* @memberOf _
* @category Util
* @param {...(Function|Function[])} [funcs] The functions to invoke.
* @returns {Function} Returns the new composite function.
* @see _.flow
* @example
*
* function square(n) {
* return n * n;
* }
*
* var addSquare = _.flowRight([square, _.add]);
* addSquare(1, 2);
* // => 9
*/
var flowRight = createFlow(true);
/**
* This method returns the first argument it receives.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {*} value Any value.
* @returns {*} Returns `value`.
* @example
*
* var object = { 'a': 1 };
*
* console.log(_.identity(object) === object);
* // => true
*/
function identity(value) {
return value;
}
/**
* Creates a function that invokes `func` with the arguments of the created
* function. If `func` is a property name, the created function returns the
* property value for a given element. If `func` is an array or object, the
* created function returns `true` for elements that contain the equivalent
* source properties, otherwise it returns `false`.
*
* @static
* @since 4.0.0
* @memberOf _
* @category Util
* @param {*} [func=_.identity] The value to convert to a callback.
* @returns {Function} Returns the callback.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.filter(users, _.iteratee({ 'user': 'barney', 'active': true }));
* // => [{ 'user': 'barney', 'age': 36, 'active': true }]
*
* // The `_.matchesProperty` iteratee shorthand.
* _.filter(users, _.iteratee(['user', 'fred']));
* // => [{ 'user': 'fred', 'age': 40 }]
*
* // The `_.property` iteratee shorthand.
* _.map(users, _.iteratee('user'));
* // => ['barney', 'fred']
*
* // Create custom iteratee shorthands.
* _.iteratee = _.wrap(_.iteratee, function(iteratee, func) {
* return !_.isRegExp(func) ? iteratee(func) : function(string) {
* return func.test(string);
* };
* });
*
* _.filter(['abc', 'def'], /ef/);
* // => ['def']
*/
function iteratee(func) {
return baseIteratee(typeof func == 'function' ? func : baseClone(func, CLONE_DEEP_FLAG));
}
/**
* Creates a function that performs a partial deep comparison between a given
* object and `source`, returning `true` if the given object has equivalent
* property values, else `false`.
*
* **Note:** The created function is equivalent to `_.isMatch` with `source`
* partially applied.
*
* Partial comparisons will match empty array and empty object `source`
* values against any array or object value, respectively. See `_.isEqual`
* for a list of supported value comparisons.
*
* **Note:** Multiple values can be checked by combining several matchers
* using `_.overSome`
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Object} source The object of property values to match.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 1, 'b': 2, 'c': 3 },
* { 'a': 4, 'b': 5, 'c': 6 }
* ];
*
* _.filter(objects, _.matches({ 'a': 4, 'c': 6 }));
* // => [{ 'a': 4, 'b': 5, 'c': 6 }]
*
* // Checking for several possible values
* _.filter(objects, _.overSome([_.matches({ 'a': 1 }), _.matches({ 'a': 4 })]));
* // => [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 }]
*/
function matches(source) {
return baseMatches(baseClone(source, CLONE_DEEP_FLAG));
}
/**
* Creates a function that performs a partial deep comparison between the
* value at `path` of a given object to `srcValue`, returning `true` if the
* object value is equivalent, else `false`.
*
* **Note:** Partial comparisons will match empty array and empty object
* `srcValue` values against any array or object value, respectively. See
* `_.isEqual` for a list of supported value comparisons.
*
* **Note:** Multiple values can be checked by combining several matchers
* using `_.overSome`
*
* @static
* @memberOf _
* @since 3.2.0
* @category Util
* @param {Array|string} path The path of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 1, 'b': 2, 'c': 3 },
* { 'a': 4, 'b': 5, 'c': 6 }
* ];
*
* _.find(objects, _.matchesProperty('a', 4));
* // => { 'a': 4, 'b': 5, 'c': 6 }
*
* // Checking for several possible values
* _.filter(objects, _.overSome([_.matchesProperty('a', 1), _.matchesProperty('a', 4)]));
* // => [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 }]
*/
function matchesProperty(path, srcValue) {
return baseMatchesProperty(path, baseClone(srcValue, CLONE_DEEP_FLAG));
}
/**
* Creates a function that invokes the method at `path` of a given object.
* Any additional arguments are provided to the invoked method.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Util
* @param {Array|string} path The path of the method to invoke.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {Function} Returns the new invoker function.
* @example
*
* var objects = [
* { 'a': { 'b': _.constant(2) } },
* { 'a': { 'b': _.constant(1) } }
* ];
*
* _.map(objects, _.method('a.b'));
* // => [2, 1]
*
* _.map(objects, _.method(['a', 'b']));
* // => [2, 1]
*/
var method = baseRest(function(path, args) {
return function(object) {
return baseInvoke(object, path, args);
};
});
/**
* The opposite of `_.method`; this method creates a function that invokes
* the method at a given path of `object`. Any additional arguments are
* provided to the invoked method.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Util
* @param {Object} object The object to query.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {Function} Returns the new invoker function.
* @example
*
* var array = _.times(3, _.constant),
* object = { 'a': array, 'b': array, 'c': array };
*
* _.map(['a[2]', 'c[0]'], _.methodOf(object));
* // => [2, 0]
*
* _.map([['a', '2'], ['c', '0']], _.methodOf(object));
* // => [2, 0]
*/
var methodOf = baseRest(function(object, args) {
return function(path) {
return baseInvoke(object, path, args);
};
});
/**
* Adds all own enumerable string keyed function properties of a source
* object to the destination object. If `object` is a function, then methods
* are added to its prototype as well.
*
* **Note:** Use `_.runInContext` to create a pristine `lodash` function to
* avoid conflicts caused by modifying the original.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {Function|Object} [object=lodash] The destination object.
* @param {Object} source The object of functions to add.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.chain=true] Specify whether mixins are chainable.
* @returns {Function|Object} Returns `object`.
* @example
*
* function vowels(string) {
* return _.filter(string, function(v) {
* return /[aeiou]/i.test(v);
* });
* }
*
* _.mixin({ 'vowels': vowels });
* _.vowels('fred');
* // => ['e']
*
* _('fred').vowels().value();
* // => ['e']
*
* _.mixin({ 'vowels': vowels }, { 'chain': false });
* _('fred').vowels();
* // => ['e']
*/
function mixin(object, source, options) {
var props = keys(source),
methodNames = baseFunctions(source, props);
if (options == null &&
!(isObject(source) && (methodNames.length || !props.length))) {
options = source;
source = object;
object = this;
methodNames = baseFunctions(source, keys(source));
}
var chain = !(isObject(options) && 'chain' in options) || !!options.chain,
isFunc = isFunction(object);
arrayEach(methodNames, function(methodName) {
var func = source[methodName];
object[methodName] = func;
if (isFunc) {
object.prototype[methodName] = function() {
var chainAll = this.__chain__;
if (chain || chainAll) {
var result = object(this.__wrapped__),
actions = result.__actions__ = copyArray(this.__actions__);
actions.push({ 'func': func, 'args': arguments, 'thisArg': object });
result.__chain__ = chainAll;
return result;
}
return func.apply(object, arrayPush([this.value()], arguments));
};
}
});
return object;
}
/**
* Reverts the `_` variable to its previous value and returns a reference to
* the `lodash` function.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @returns {Function} Returns the `lodash` function.
* @example
*
* var lodash = _.noConflict();
*/
function noConflict() {
if (root._ === this) {
root._ = oldDash;
}
return this;
}
/**
* This method returns `undefined`.
*
* @static
* @memberOf _
* @since 2.3.0
* @category Util
* @example
*
* _.times(2, _.noop);
* // => [undefined, undefined]
*/
function noop() {
// No operation performed.
}
/**
* Creates a function that gets the argument at index `n`. If `n` is negative,
* the nth argument from the end is returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {number} [n=0] The index of the argument to return.
* @returns {Function} Returns the new pass-thru function.
* @example
*
* var func = _.nthArg(1);
* func('a', 'b', 'c', 'd');
* // => 'b'
*
* var func = _.nthArg(-2);
* func('a', 'b', 'c', 'd');
* // => 'c'
*/
function nthArg(n) {
n = toInteger(n);
return baseRest(function(args) {
return baseNth(args, n);
});
}
/**
* Creates a function that invokes `iteratees` with the arguments it receives
* and returns their results.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [iteratees=[_.identity]]
* The iteratees to invoke.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.over([Math.max, Math.min]);
*
* func(1, 2, 3, 4);
* // => [4, 1]
*/
var over = createOver(arrayMap);
/**
* Creates a function that checks if **all** of the `predicates` return
* truthy when invoked with the arguments it receives.
*
* Following shorthands are possible for providing predicates.
* Pass an `Object` and it will be used as an parameter for `_.matches` to create the predicate.
* Pass an `Array` of parameters for `_.matchesProperty` and the predicate will be created using them.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [predicates=[_.identity]]
* The predicates to check.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.overEvery([Boolean, isFinite]);
*
* func('1');
* // => true
*
* func(null);
* // => false
*
* func(NaN);
* // => false
*/
var overEvery = createOver(arrayEvery);
/**
* Creates a function that checks if **any** of the `predicates` return
* truthy when invoked with the arguments it receives.
*
* Following shorthands are possible for providing predicates.
* Pass an `Object` and it will be used as an parameter for `_.matches` to create the predicate.
* Pass an `Array` of parameters for `_.matchesProperty` and the predicate will be created using them.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [predicates=[_.identity]]
* The predicates to check.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.overSome([Boolean, isFinite]);
*
* func('1');
* // => true
*
* func(null);
* // => true
*
* func(NaN);
* // => false
*
* var matchesFunc = _.overSome([{ 'a': 1 }, { 'a': 2 }])
* var matchesPropertyFunc = _.overSome([['a', 1], ['a', 2]])
*/
var overSome = createOver(arraySome);
/**
* Creates a function that returns the value at `path` of a given object.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Util
* @param {Array|string} path The path of the property to get.
* @returns {Function} Returns the new accessor function.
* @example
*
* var objects = [
* { 'a': { 'b': 2 } },
* { 'a': { 'b': 1 } }
* ];
*
* _.map(objects, _.property('a.b'));
* // => [2, 1]
*
* _.map(_.sortBy(objects, _.property(['a', 'b'])), 'a.b');
* // => [1, 2]
*/
function property(path) {
return isKey(path) ? baseProperty(toKey(path)) : basePropertyDeep(path);
}
/**
* The opposite of `_.property`; this method creates a function that returns
* the value at a given path of `object`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Object} object The object to query.
* @returns {Function} Returns the new accessor function.
* @example
*
* var array = [0, 1, 2],
* object = { 'a': array, 'b': array, 'c': array };
*
* _.map(['a[2]', 'c[0]'], _.propertyOf(object));
* // => [2, 0]
*
* _.map([['a', '2'], ['c', '0']], _.propertyOf(object));
* // => [2, 0]
*/
function propertyOf(object) {
return function(path) {
return object == null ? undefined : baseGet(object, path);
};
}
/**
* Creates an array of numbers (positive and/or negative) progressing from
* `start` up to, but not including, `end`. A step of `-1` is used if a negative
* `start` is specified without an `end` or `step`. If `end` is not specified,
* it's set to `start` with `start` then set to `0`.
*
* **Note:** JavaScript follows the IEEE-754 standard for resolving
* floating-point values which can produce unexpected results.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @param {number} [step=1] The value to increment or decrement by.
* @returns {Array} Returns the range of numbers.
* @see _.inRange, _.rangeRight
* @example
*
* _.range(4);
* // => [0, 1, 2, 3]
*
* _.range(-4);
* // => [0, -1, -2, -3]
*
* _.range(1, 5);
* // => [1, 2, 3, 4]
*
* _.range(0, 20, 5);
* // => [0, 5, 10, 15]
*
* _.range(0, -4, -1);
* // => [0, -1, -2, -3]
*
* _.range(1, 4, 0);
* // => [1, 1, 1]
*
* _.range(0);
* // => []
*/
var range = createRange();
/**
* This method is like `_.range` except that it populates values in
* descending order.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @param {number} [step=1] The value to increment or decrement by.
* @returns {Array} Returns the range of numbers.
* @see _.inRange, _.range
* @example
*
* _.rangeRight(4);
* // => [3, 2, 1, 0]
*
* _.rangeRight(-4);
* // => [-3, -2, -1, 0]
*
* _.rangeRight(1, 5);
* // => [4, 3, 2, 1]
*
* _.rangeRight(0, 20, 5);
* // => [15, 10, 5, 0]
*
* _.rangeRight(0, -4, -1);
* // => [-3, -2, -1, 0]
*
* _.rangeRight(1, 4, 0);
* // => [1, 1, 1]
*
* _.rangeRight(0);
* // => []
*/
var rangeRight = createRange(true);
/**
* This method returns a new empty array.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {Array} Returns the new empty array.
* @example
*
* var arrays = _.times(2, _.stubArray);
*
* console.log(arrays);
* // => [[], []]
*
* console.log(arrays[0] === arrays[1]);
* // => false
*/
function stubArray() {
return [];
}
/**
* This method returns `false`.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {boolean} Returns `false`.
* @example
*
* _.times(2, _.stubFalse);
* // => [false, false]
*/
function stubFalse() {
return false;
}
/**
* This method returns a new empty object.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {Object} Returns the new empty object.
* @example
*
* var objects = _.times(2, _.stubObject);
*
* console.log(objects);
* // => [{}, {}]
*
* console.log(objects[0] === objects[1]);
* // => false
*/
function stubObject() {
return {};
}
/**
* This method returns an empty string.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {string} Returns the empty string.
* @example
*
* _.times(2, _.stubString);
* // => ['', '']
*/
function stubString() {
return '';
}
/**
* This method returns `true`.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {boolean} Returns `true`.
* @example
*
* _.times(2, _.stubTrue);
* // => [true, true]
*/
function stubTrue() {
return true;
}
/**
* Invokes the iteratee `n` times, returning an array of the results of
* each invocation. The iteratee is invoked with one argument; (index).
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {number} n The number of times to invoke `iteratee`.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the array of results.
* @example
*
* _.times(3, String);
* // => ['0', '1', '2']
*
* _.times(4, _.constant(0));
* // => [0, 0, 0, 0]
*/
function times(n, iteratee) {
n = toInteger(n);
if (n < 1 || n > MAX_SAFE_INTEGER) {
return [];
}
var index = MAX_ARRAY_LENGTH,
length = nativeMin(n, MAX_ARRAY_LENGTH);
iteratee = getIteratee(iteratee);
n -= MAX_ARRAY_LENGTH;
var result = baseTimes(length, iteratee);
while (++index < n) {
iteratee(index);
}
return result;
}
/**
* Converts `value` to a property path array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {*} value The value to convert.
* @returns {Array} Returns the new property path array.
* @example
*
* _.toPath('a.b.c');
* // => ['a', 'b', 'c']
*
* _.toPath('a[0].b.c');
* // => ['a', '0', 'b', 'c']
*/
function toPath(value) {
if (isArray(value)) {
return arrayMap(value, toKey);
}
return isSymbol(value) ? [value] : copyArray(stringToPath(toString(value)));
}
/**
* Generates a unique ID. If `prefix` is given, the ID is appended to it.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {string} [prefix=''] The value to prefix the ID with.
* @returns {string} Returns the unique ID.
* @example
*
* _.uniqueId('contact_');
* // => 'contact_104'
*
* _.uniqueId();
* // => '105'
*/
function uniqueId(prefix) {
var id = ++idCounter;
return toString(prefix) + id;
}
/*------------------------------------------------------------------------*/
/**
* Adds two numbers.
*
* @static
* @memberOf _
* @since 3.4.0
* @category Math
* @param {number} augend The first number in an addition.
* @param {number} addend The second number in an addition.
* @returns {number} Returns the total.
* @example
*
* _.add(6, 4);
* // => 10
*/
var add = createMathOperation(function(augend, addend) {
return augend + addend;
}, 0);
/**
* Computes `number` rounded up to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round up.
* @param {number} [precision=0] The precision to round up to.
* @returns {number} Returns the rounded up number.
* @example
*
* _.ceil(4.006);
* // => 5
*
* _.ceil(6.004, 2);
* // => 6.01
*
* _.ceil(6040, -2);
* // => 6100
*/
var ceil = createRound('ceil');
/**
* Divide two numbers.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {number} dividend The first number in a division.
* @param {number} divisor The second number in a division.
* @returns {number} Returns the quotient.
* @example
*
* _.divide(6, 4);
* // => 1.5
*/
var divide = createMathOperation(function(dividend, divisor) {
return dividend / divisor;
}, 1);
/**
* Computes `number` rounded down to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round down.
* @param {number} [precision=0] The precision to round down to.
* @returns {number} Returns the rounded down number.
* @example
*
* _.floor(4.006);
* // => 4
*
* _.floor(0.046, 2);
* // => 0.04
*
* _.floor(4060, -2);
* // => 4000
*/
var floor = createRound('floor');
/**
* Computes the maximum value of `array`. If `array` is empty or falsey,
* `undefined` is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Math
* @param {Array} array The array to iterate over.
* @returns {*} Returns the maximum value.
* @example
*
* _.max([4, 2, 8, 6]);
* // => 8
*
* _.max([]);
* // => undefined
*/
function max(array) {
return (array && array.length)
? baseExtremum(array, identity, baseGt)
: undefined;
}
/**
* This method is like `_.max` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* the value is ranked. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {*} Returns the maximum value.
* @example
*
* var objects = [{ 'n': 1 }, { 'n': 2 }];
*
* _.maxBy(objects, function(o) { return o.n; });
* // => { 'n': 2 }
*
* // The `_.property` iteratee shorthand.
* _.maxBy(objects, 'n');
* // => { 'n': 2 }
*/
function maxBy(array, iteratee) {
return (array && array.length)
? baseExtremum(array, getIteratee(iteratee, 2), baseGt)
: undefined;
}
/**
* Computes the mean of the values in `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @returns {number} Returns the mean.
* @example
*
* _.mean([4, 2, 8, 6]);
* // => 5
*/
function mean(array) {
return baseMean(array, identity);
}
/**
* This method is like `_.mean` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the value to be averaged.
* The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the mean.
* @example
*
* var objects = [{ 'n': 4 }, { 'n': 2 }, { 'n': 8 }, { 'n': 6 }];
*
* _.meanBy(objects, function(o) { return o.n; });
* // => 5
*
* // The `_.property` iteratee shorthand.
* _.meanBy(objects, 'n');
* // => 5
*/
function meanBy(array, iteratee) {
return baseMean(array, getIteratee(iteratee, 2));
}
/**
* Computes the minimum value of `array`. If `array` is empty or falsey,
* `undefined` is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Math
* @param {Array} array The array to iterate over.
* @returns {*} Returns the minimum value.
* @example
*
* _.min([4, 2, 8, 6]);
* // => 2
*
* _.min([]);
* // => undefined
*/
function min(array) {
return (array && array.length)
? baseExtremum(array, identity, baseLt)
: undefined;
}
/**
* This method is like `_.min` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* the value is ranked. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {*} Returns the minimum value.
* @example
*
* var objects = [{ 'n': 1 }, { 'n': 2 }];
*
* _.minBy(objects, function(o) { return o.n; });
* // => { 'n': 1 }
*
* // The `_.property` iteratee shorthand.
* _.minBy(objects, 'n');
* // => { 'n': 1 }
*/
function minBy(array, iteratee) {
return (array && array.length)
? baseExtremum(array, getIteratee(iteratee, 2), baseLt)
: undefined;
}
/**
* Multiply two numbers.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {number} multiplier The first number in a multiplication.
* @param {number} multiplicand The second number in a multiplication.
* @returns {number} Returns the product.
* @example
*
* _.multiply(6, 4);
* // => 24
*/
var multiply = createMathOperation(function(multiplier, multiplicand) {
return multiplier * multiplicand;
}, 1);
/**
* Computes `number` rounded to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round.
* @param {number} [precision=0] The precision to round to.
* @returns {number} Returns the rounded number.
* @example
*
* _.round(4.006);
* // => 4
*
* _.round(4.006, 2);
* // => 4.01
*
* _.round(4060, -2);
* // => 4100
*/
var round = createRound('round');
/**
* Subtract two numbers.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {number} minuend The first number in a subtraction.
* @param {number} subtrahend The second number in a subtraction.
* @returns {number} Returns the difference.
* @example
*
* _.subtract(6, 4);
* // => 2
*/
var subtract = createMathOperation(function(minuend, subtrahend) {
return minuend - subtrahend;
}, 0);
/**
* Computes the sum of the values in `array`.
*
* @static
* @memberOf _
* @since 3.4.0
* @category Math
* @param {Array} array The array to iterate over.
* @returns {number} Returns the sum.
* @example
*
* _.sum([4, 2, 8, 6]);
* // => 20
*/
function sum(array) {
return (array && array.length)
? baseSum(array, identity)
: 0;
}
/**
* This method is like `_.sum` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the value to be summed.
* The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the sum.
* @example
*
* var objects = [{ 'n': 4 }, { 'n': 2 }, { 'n': 8 }, { 'n': 6 }];
*
* _.sumBy(objects, function(o) { return o.n; });
* // => 20
*
* // The `_.property` iteratee shorthand.
* _.sumBy(objects, 'n');
* // => 20
*/
function sumBy(array, iteratee) {
return (array && array.length)
? baseSum(array, getIteratee(iteratee, 2))
: 0;
}
/*------------------------------------------------------------------------*/
// Add methods that return wrapped values in chain sequences.
lodash.after = after;
lodash.ary = ary;
lodash.assign = assign;
lodash.assignIn = assignIn;
lodash.assignInWith = assignInWith;
lodash.assignWith = assignWith;
lodash.at = at;
lodash.before = before;
lodash.bind = bind;
lodash.bindAll = bindAll;
lodash.bindKey = bindKey;
lodash.castArray = castArray;
lodash.chain = chain;
lodash.chunk = chunk;
lodash.compact = compact;
lodash.concat = concat;
lodash.cond = cond;
lodash.conforms = conforms;
lodash.constant = constant;
lodash.countBy = countBy;
lodash.create = create;
lodash.curry = curry;
lodash.curryRight = curryRight;
lodash.debounce = debounce;
lodash.defaults = defaults;
lodash.defaultsDeep = defaultsDeep;
lodash.defer = defer;
lodash.delay = delay;
lodash.difference = difference;
lodash.differenceBy = differenceBy;
lodash.differenceWith = differenceWith;
lodash.drop = drop;
lodash.dropRight = dropRight;
lodash.dropRightWhile = dropRightWhile;
lodash.dropWhile = dropWhile;
lodash.fill = fill;
lodash.filter = filter;
lodash.flatMap = flatMap;
lodash.flatMapDeep = flatMapDeep;
lodash.flatMapDepth = flatMapDepth;
lodash.flatten = flatten;
lodash.flattenDeep = flattenDeep;
lodash.flattenDepth = flattenDepth;
lodash.flip = flip;
lodash.flow = flow;
lodash.flowRight = flowRight;
lodash.fromPairs = fromPairs;
lodash.functions = functions;
lodash.functionsIn = functionsIn;
lodash.groupBy = groupBy;
lodash.initial = initial;
lodash.intersection = intersection;
lodash.intersectionBy = intersectionBy;
lodash.intersectionWith = intersectionWith;
lodash.invert = invert;
lodash.invertBy = invertBy;
lodash.invokeMap = invokeMap;
lodash.iteratee = iteratee;
lodash.keyBy = keyBy;
lodash.keys = keys;
lodash.keysIn = keysIn;
lodash.map = map;
lodash.mapKeys = mapKeys;
lodash.mapValues = mapValues;
lodash.matches = matches;
lodash.matchesProperty = matchesProperty;
lodash.memoize = memoize;
lodash.merge = merge;
lodash.mergeWith = mergeWith;
lodash.method = method;
lodash.methodOf = methodOf;
lodash.mixin = mixin;
lodash.negate = negate;
lodash.nthArg = nthArg;
lodash.omit = omit;
lodash.omitBy = omitBy;
lodash.once = once;
lodash.orderBy = orderBy;
lodash.over = over;
lodash.overArgs = overArgs;
lodash.overEvery = overEvery;
lodash.overSome = overSome;
lodash.partial = partial;
lodash.partialRight = partialRight;
lodash.partition = partition;
lodash.pick = pick;
lodash.pickBy = pickBy;
lodash.property = property;
lodash.propertyOf = propertyOf;
lodash.pull = pull;
lodash.pullAll = pullAll;
lodash.pullAllBy = pullAllBy;
lodash.pullAllWith = pullAllWith;
lodash.pullAt = pullAt;
lodash.range = range;
lodash.rangeRight = rangeRight;
lodash.rearg = rearg;
lodash.reject = reject;
lodash.remove = remove;
lodash.rest = rest;
lodash.reverse = reverse;
lodash.sampleSize = sampleSize;
lodash.set = set;
lodash.setWith = setWith;
lodash.shuffle = shuffle;
lodash.slice = slice;
lodash.sortBy = sortBy;
lodash.sortedUniq = sortedUniq;
lodash.sortedUniqBy = sortedUniqBy;
lodash.split = split;
lodash.spread = spread;
lodash.tail = tail;
lodash.take = take;
lodash.takeRight = takeRight;
lodash.takeRightWhile = takeRightWhile;
lodash.takeWhile = takeWhile;
lodash.tap = tap;
lodash.throttle = throttle;
lodash.thru = thru;
lodash.toArray = toArray;
lodash.toPairs = toPairs;
lodash.toPairsIn = toPairsIn;
lodash.toPath = toPath;
lodash.toPlainObject = toPlainObject;
lodash.transform = transform;
lodash.unary = unary;
lodash.union = union;
lodash.unionBy = unionBy;
lodash.unionWith = unionWith;
lodash.uniq = uniq;
lodash.uniqBy = uniqBy;
lodash.uniqWith = uniqWith;
lodash.unset = unset;
lodash.unzip = unzip;
lodash.unzipWith = unzipWith;
lodash.update = update;
lodash.updateWith = updateWith;
lodash.values = values;
lodash.valuesIn = valuesIn;
lodash.without = without;
lodash.words = words;
lodash.wrap = wrap;
lodash.xor = xor;
lodash.xorBy = xorBy;
lodash.xorWith = xorWith;
lodash.zip = zip;
lodash.zipObject = zipObject;
lodash.zipObjectDeep = zipObjectDeep;
lodash.zipWith = zipWith;
// Add aliases.
lodash.entries = toPairs;
lodash.entriesIn = toPairsIn;
lodash.extend = assignIn;
lodash.extendWith = assignInWith;
// Add methods to `lodash.prototype`.
mixin(lodash, lodash);
/*------------------------------------------------------------------------*/
// Add methods that return unwrapped values in chain sequences.
lodash.add = add;
lodash.attempt = attempt;
lodash.camelCase = camelCase;
lodash.capitalize = capitalize;
lodash.ceil = ceil;
lodash.clamp = clamp;
lodash.clone = clone;
lodash.cloneDeep = cloneDeep;
lodash.cloneDeepWith = cloneDeepWith;
lodash.cloneWith = cloneWith;
lodash.conformsTo = conformsTo;
lodash.deburr = deburr;
lodash.defaultTo = defaultTo;
lodash.divide = divide;
lodash.endsWith = endsWith;
lodash.eq = eq;
lodash.escape = escape;
lodash.escapeRegExp = escapeRegExp;
lodash.every = every;
lodash.find = find;
lodash.findIndex = findIndex;
lodash.findKey = findKey;
lodash.findLast = findLast;
lodash.findLastIndex = findLastIndex;
lodash.findLastKey = findLastKey;
lodash.floor = floor;
lodash.forEach = forEach;
lodash.forEachRight = forEachRight;
lodash.forIn = forIn;
lodash.forInRight = forInRight;
lodash.forOwn = forOwn;
lodash.forOwnRight = forOwnRight;
lodash.get = get;
lodash.gt = gt;
lodash.gte = gte;
lodash.has = has;
lodash.hasIn = hasIn;
lodash.head = head;
lodash.identity = identity;
lodash.includes = includes;
lodash.indexOf = indexOf;
lodash.inRange = inRange;
lodash.invoke = invoke;
lodash.isArguments = isArguments;
lodash.isArray = isArray;
lodash.isArrayBuffer = isArrayBuffer;
lodash.isArrayLike = isArrayLike;
lodash.isArrayLikeObject = isArrayLikeObject;
lodash.isBoolean = isBoolean;
lodash.isBuffer = isBuffer;
lodash.isDate = isDate;
lodash.isElement = isElement;
lodash.isEmpty = isEmpty;
lodash.isEqual = isEqual;
lodash.isEqualWith = isEqualWith;
lodash.isError = isError;
lodash.isFinite = isFinite;
lodash.isFunction = isFunction;
lodash.isInteger = isInteger;
lodash.isLength = isLength;
lodash.isMap = isMap;
lodash.isMatch = isMatch;
lodash.isMatchWith = isMatchWith;
lodash.isNaN = isNaN;
lodash.isNative = isNative;
lodash.isNil = isNil;
lodash.isNull = isNull;
lodash.isNumber = isNumber;
lodash.isObject = isObject;
lodash.isObjectLike = isObjectLike;
lodash.isPlainObject = isPlainObject;
lodash.isRegExp = isRegExp;
lodash.isSafeInteger = isSafeInteger;
lodash.isSet = isSet;
lodash.isString = isString;
lodash.isSymbol = isSymbol;
lodash.isTypedArray = isTypedArray;
lodash.isUndefined = isUndefined;
lodash.isWeakMap = isWeakMap;
lodash.isWeakSet = isWeakSet;
lodash.join = join;
lodash.kebabCase = kebabCase;
lodash.last = last;
lodash.lastIndexOf = lastIndexOf;
lodash.lowerCase = lowerCase;
lodash.lowerFirst = lowerFirst;
lodash.lt = lt;
lodash.lte = lte;
lodash.max = max;
lodash.maxBy = maxBy;
lodash.mean = mean;
lodash.meanBy = meanBy;
lodash.min = min;
lodash.minBy = minBy;
lodash.stubArray = stubArray;
lodash.stubFalse = stubFalse;
lodash.stubObject = stubObject;
lodash.stubString = stubString;
lodash.stubTrue = stubTrue;
lodash.multiply = multiply;
lodash.nth = nth;
lodash.noConflict = noConflict;
lodash.noop = noop;
lodash.now = now;
lodash.pad = pad;
lodash.padEnd = padEnd;
lodash.padStart = padStart;
lodash.parseInt = parseInt;
lodash.random = random;
lodash.reduce = reduce;
lodash.reduceRight = reduceRight;
lodash.repeat = repeat;
lodash.replace = replace;
lodash.result = result;
lodash.round = round;
lodash.runInContext = runInContext;
lodash.sample = sample;
lodash.size = size;
lodash.snakeCase = snakeCase;
lodash.some = some;
lodash.sortedIndex = sortedIndex;
lodash.sortedIndexBy = sortedIndexBy;
lodash.sortedIndexOf = sortedIndexOf;
lodash.sortedLastIndex = sortedLastIndex;
lodash.sortedLastIndexBy = sortedLastIndexBy;
lodash.sortedLastIndexOf = sortedLastIndexOf;
lodash.startCase = startCase;
lodash.startsWith = startsWith;
lodash.subtract = subtract;
lodash.sum = sum;
lodash.sumBy = sumBy;
lodash.template = template;
lodash.times = times;
lodash.toFinite = toFinite;
lodash.toInteger = toInteger;
lodash.toLength = toLength;
lodash.toLower = toLower;
lodash.toNumber = toNumber;
lodash.toSafeInteger = toSafeInteger;
lodash.toString = toString;
lodash.toUpper = toUpper;
lodash.trim = trim;
lodash.trimEnd = trimEnd;
lodash.trimStart = trimStart;
lodash.truncate = truncate;
lodash.unescape = unescape;
lodash.uniqueId = uniqueId;
lodash.upperCase = upperCase;
lodash.upperFirst = upperFirst;
// Add aliases.
lodash.each = forEach;
lodash.eachRight = forEachRight;
lodash.first = head;
mixin(lodash, (function() {
var source = {};
baseForOwn(lodash, function(func, methodName) {
if (!hasOwnProperty.call(lodash.prototype, methodName)) {
source[methodName] = func;
}
});
return source;
}()), { 'chain': false });
/*------------------------------------------------------------------------*/
/**
* The semantic version number.
*
* @static
* @memberOf _
* @type {string}
*/
lodash.VERSION = VERSION;
// Assign default placeholders.
arrayEach(['bind', 'bindKey', 'curry', 'curryRight', 'partial', 'partialRight'], function(methodName) {
lodash[methodName].placeholder = lodash;
});
// Add `LazyWrapper` methods for `_.drop` and `_.take` variants.
arrayEach(['drop', 'take'], function(methodName, index) {
LazyWrapper.prototype[methodName] = function(n) {
n = n === undefined ? 1 : nativeMax(toInteger(n), 0);
var result = (this.__filtered__ && !index)
? new LazyWrapper(this)
: this.clone();
if (result.__filtered__) {
result.__takeCount__ = nativeMin(n, result.__takeCount__);
} else {
result.__views__.push({
'size': nativeMin(n, MAX_ARRAY_LENGTH),
'type': methodName + (result.__dir__ < 0 ? 'Right' : '')
});
}
return result;
};
LazyWrapper.prototype[methodName + 'Right'] = function(n) {
return this.reverse()[methodName](n).reverse();
};
});
// Add `LazyWrapper` methods that accept an `iteratee` value.
arrayEach(['filter', 'map', 'takeWhile'], function(methodName, index) {
var type = index + 1,
isFilter = type == LAZY_FILTER_FLAG || type == LAZY_WHILE_FLAG;
LazyWrapper.prototype[methodName] = function(iteratee) {
var result = this.clone();
result.__iteratees__.push({
'iteratee': getIteratee(iteratee, 3),
'type': type
});
result.__filtered__ = result.__filtered__ || isFilter;
return result;
};
});
// Add `LazyWrapper` methods for `_.head` and `_.last`.
arrayEach(['head', 'last'], function(methodName, index) {
var takeName = 'take' + (index ? 'Right' : '');
LazyWrapper.prototype[methodName] = function() {
return this[takeName](1).value()[0];
};
});
// Add `LazyWrapper` methods for `_.initial` and `_.tail`.
arrayEach(['initial', 'tail'], function(methodName, index) {
var dropName = 'drop' + (index ? '' : 'Right');
LazyWrapper.prototype[methodName] = function() {
return this.__filtered__ ? new LazyWrapper(this) : this[dropName](1);
};
});
LazyWrapper.prototype.compact = function() {
return this.filter(identity);
};
LazyWrapper.prototype.find = function(predicate) {
return this.filter(predicate).head();
};
LazyWrapper.prototype.findLast = function(predicate) {
return this.reverse().find(predicate);
};
LazyWrapper.prototype.invokeMap = baseRest(function(path, args) {
if (typeof path == 'function') {
return new LazyWrapper(this);
}
return this.map(function(value) {
return baseInvoke(value, path, args);
});
});
LazyWrapper.prototype.reject = function(predicate) {
return this.filter(negate(getIteratee(predicate)));
};
LazyWrapper.prototype.slice = function(start, end) {
start = toInteger(start);
var result = this;
if (result.__filtered__ && (start > 0 || end < 0)) {
return new LazyWrapper(result);
}
if (start < 0) {
result = result.takeRight(-start);
} else if (start) {
result = result.drop(start);
}
if (end !== undefined) {
end = toInteger(end);
result = end < 0 ? result.dropRight(-end) : result.take(end - start);
}
return result;
};
LazyWrapper.prototype.takeRightWhile = function(predicate) {
return this.reverse().takeWhile(predicate).reverse();
};
LazyWrapper.prototype.toArray = function() {
return this.take(MAX_ARRAY_LENGTH);
};
// Add `LazyWrapper` methods to `lodash.prototype`.
baseForOwn(LazyWrapper.prototype, function(func, methodName) {
var checkIteratee = /^(?:filter|find|map|reject)|While$/.test(methodName),
isTaker = /^(?:head|last)$/.test(methodName),
lodashFunc = lodash[isTaker ? ('take' + (methodName == 'last' ? 'Right' : '')) : methodName],
retUnwrapped = isTaker || /^find/.test(methodName);
if (!lodashFunc) {
return;
}
lodash.prototype[methodName] = function() {
var value = this.__wrapped__,
args = isTaker ? [1] : arguments,
isLazy = value instanceof LazyWrapper,
iteratee = args[0],
useLazy = isLazy || isArray(value);
var interceptor = function(value) {
var result = lodashFunc.apply(lodash, arrayPush([value], args));
return (isTaker && chainAll) ? result[0] : result;
};
if (useLazy && checkIteratee && typeof iteratee == 'function' && iteratee.length != 1) {
// Avoid lazy use if the iteratee has a "length" value other than `1`.
isLazy = useLazy = false;
}
var chainAll = this.__chain__,
isHybrid = !!this.__actions__.length,
isUnwrapped = retUnwrapped && !chainAll,
onlyLazy = isLazy && !isHybrid;
if (!retUnwrapped && useLazy) {
value = onlyLazy ? value : new LazyWrapper(this);
var result = func.apply(value, args);
result.__actions__.push({ 'func': thru, 'args': [interceptor], 'thisArg': undefined });
return new LodashWrapper(result, chainAll);
}
if (isUnwrapped && onlyLazy) {
return func.apply(this, args);
}
result = this.thru(interceptor);
return isUnwrapped ? (isTaker ? result.value()[0] : result.value()) : result;
};
});
// Add `Array` methods to `lodash.prototype`.
arrayEach(['pop', 'push', 'shift', 'sort', 'splice', 'unshift'], function(methodName) {
var func = arrayProto[methodName],
chainName = /^(?:push|sort|unshift)$/.test(methodName) ? 'tap' : 'thru',
retUnwrapped = /^(?:pop|shift)$/.test(methodName);
lodash.prototype[methodName] = function() {
var args = arguments;
if (retUnwrapped && !this.__chain__) {
var value = this.value();
return func.apply(isArray(value) ? value : [], args);
}
return this[chainName](function(value) {
return func.apply(isArray(value) ? value : [], args);
});
};
});
// Map minified method names to their real names.
baseForOwn(LazyWrapper.prototype, function(func, methodName) {
var lodashFunc = lodash[methodName];
if (lodashFunc) {
var key = lodashFunc.name + '';
if (!hasOwnProperty.call(realNames, key)) {
realNames[key] = [];
}
realNames[key].push({ 'name': methodName, 'func': lodashFunc });
}
});
realNames[createHybrid(undefined, WRAP_BIND_KEY_FLAG).name] = [{
'name': 'wrapper',
'func': undefined
}];
// Add methods to `LazyWrapper`.
LazyWrapper.prototype.clone = lazyClone;
LazyWrapper.prototype.reverse = lazyReverse;
LazyWrapper.prototype.value = lazyValue;
// Add chain sequence methods to the `lodash` wrapper.
lodash.prototype.at = wrapperAt;
lodash.prototype.chain = wrapperChain;
lodash.prototype.commit = wrapperCommit;
lodash.prototype.next = wrapperNext;
lodash.prototype.plant = wrapperPlant;
lodash.prototype.reverse = wrapperReverse;
lodash.prototype.toJSON = lodash.prototype.valueOf = lodash.prototype.value = wrapperValue;
// Add lazy aliases.
lodash.prototype.first = lodash.prototype.head;
if (symIterator) {
lodash.prototype[symIterator] = wrapperToIterator;
}
return lodash;
});
/*--------------------------------------------------------------------------*/
// Export lodash.
var _ = runInContext();
// Some AMD build optimizers, like r.js, check for condition patterns like:
if (typeof define == 'function' && typeof define.amd == 'object' && define.amd) {
// Expose Lodash on the global object to prevent errors when Lodash is
// loaded by a script tag in the presence of an AMD loader.
// See http://requirejs.org/docs/errors.html#mismatch for more details.
// Use `_.noConflict` to remove Lodash from the global object.
root._ = _;
// Define as an anonymous module so, through path mapping, it can be
// referenced as the "underscore" module.
define(function() {
return _;
});
}
// Check for `exports` after `define` in case a build optimizer adds it.
else if (freeModule) {
// Export for Node.js.
(freeModule.exports = _)._ = _;
// Export for CommonJS support.
freeExports._ = _;
}
else {
// Export to the global object.
root._ = _;
}
}.call(this)); | javascript | github | https://github.com/lodash/lodash | dist/lodash.js |
@import 'tailwindcss'; | css | github | https://github.com/tailwindlabs/tailwindcss | playgrounds/nextjs/app/globals.css |
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from time import time
from datetime import timedelta
from ..helpers.orm import Stopwatches
from ..helpers import arguments
from ..helpers.command import Command
def create_stopwatch(args):
row = Stopwatches(time=time())
args.session.add(row)
args.session.flush()
return "Created new stopwatch with ID %d" % row.id
def get_elapsed(session, sw):
stopwatch = session.query(Stopwatches).get(sw)
if stopwatch is None:
return "No stopwatch exists with that ID!"
etime = stopwatch.elapsed
if stopwatch.active == 1:
etime = time() - stopwatch.time
return str(timedelta(seconds=etime))
def stop_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 0:
return "That stopwatch is already stopped!"
etime = stopwatch.elapsed
etime = time() - stopwatch.time
stopwatch.elapsed = etime
stopwatch.active = 0
return "Stopwatch stopped at %s" % get_elapsed(args.session, args.id)
def delete_stopwatch(args):
if not args.isadmin:
return "Nope, not gonna do it!"
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 1:
return "That stopwatch is currently running!"
args.session.delete(stopwatch)
return "Stopwatch deleted!"
def resume_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 1:
return "That stopwatch is not paused!"
stopwatch.active = 1
stopwatch.time = time()
return "Stopwatch resumed!"
def list_stopwatch(args):
active = args.session.query(Stopwatches).filter(Stopwatches.active == 1).order_by(Stopwatches.id).all()
paused = args.session.query(Stopwatches).filter(Stopwatches.active == 0).order_by(Stopwatches.id).all()
for x in active:
args.send('Active stopwatch #%d started at %d' % (x.id, x.time), target=args.nick)
for x in paused:
args.send('Paused stopwatch #%d started at %d time elapsed %d' % (x.id, x.time, x.elapsed), target=args.nick)
return "%d active and %d paused stopwatches." % (len(active), len(paused))
def get_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "Invalid ID!"
status = "Active" if stopwatch.active == 1 else "Paused"
return "%s %s" % (status, get_elapsed(args.session, args.id))
@Command(['stopwatch', 'sw'], ['config', 'db', 'is_admin', 'nick'])
def cmd(send, msg, args):
"""Start/stops/resume/get stopwatch
Syntax: {command} <start|stop|resume|delete|get|list>
"""
parser = arguments.ArgParser(args['config'])
parser.set_defaults(session=args['db'])
subparser = parser.add_subparsers()
start_parser = subparser.add_parser('start')
start_parser.set_defaults(func=create_stopwatch)
stop_parser = subparser.add_parser('stop')
stop_parser.add_argument('id', type=int)
stop_parser.set_defaults(func=stop_stopwatch)
resume_parser = subparser.add_parser('resume')
resume_parser.add_argument('id', type=int)
resume_parser.set_defaults(func=resume_stopwatch)
delete_parser = subparser.add_parser('delete')
delete_parser.add_argument('id', type=int)
delete_parser.set_defaults(func=delete_stopwatch, isadmin=args['is_admin'](args['nick']))
get_parser = subparser.add_parser('get')
get_parser.add_argument('id', type=int)
get_parser.set_defaults(func=get_stopwatch)
list_parser = subparser.add_parser('list')
list_parser.set_defaults(func=list_stopwatch, nick=args['nick'], send=send)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
send(cmdargs.func(cmdargs)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# OctoPrint documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 02 17:08:50 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src/'))
sys.path.append(os.path.abspath('sphinxext'))
import octoprint._version
from datetime import date
year_since = 2013
year_current = date.today().year
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['codeblockext', 'onlineinclude', 'sphinx.ext.todo', 'sphinx.ext.autodoc', 'sphinxcontrib.httpdomain',
'sphinx.ext.napoleon']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OctoPrint'
copyright = u'%d-%d, Gina Häußge' % (year_since, year_current) if year_current > year_since else u'%d, Gina Häußge' % year_since
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = octoprint._version.get_versions()["version"]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
numfig = True
# -- Options for HTML output ---------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_overrides.css")
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OctoPrintdoc'
# -- Options for LaTeX output --------------------------------------------------
#latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
#}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'OctoPrint.tex', u'OctoPrint Documentation',
# u'Gina Häußge', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'octoprint', u'OctoPrint Documentation',
[u'Gina Häußge'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OctoPrint', u'OctoPrint Documentation',
u'Gina Häußge', 'OctoPrint', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | unknown | codeparrot/codeparrot-clean | ||
# Tauri MacOS Sign
Utilities for setting up macOS certificates, code signing and notarization for macOS and iOS apps. | unknown | github | https://github.com/tauri-apps/tauri | crates/tauri-macos-sign/README.md |
# -*- coding:utf-8 -*-
"""
Description:
MemoryStream
Usage:
from neo.IO.MemoryStream import MemoryStream
"""
from io import BytesIO
from binascii import hexlify
__mstreams__ = []
__mstreams_available__ = []
class StreamManager(object):
@staticmethod
def TotalBuffers():
"""
Get the total number of buffers stored in the StreamManager.
Returns:
int:
"""
return len(__mstreams__)
@staticmethod
def GetStream(data=None):
"""
Get a MemoryStream instance.
Args:
data (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
MemoryStream: instance.
"""
if len(__mstreams_available__) == 0:
if data:
mstream = MemoryStream(data)
mstream.seek(0)
else:
mstream = MemoryStream()
__mstreams__.append(mstream)
return mstream
mstream = __mstreams_available__.pop()
if data is not None and len(data):
mstream.Cleanup()
mstream.write(data)
mstream.seek(0)
return mstream
@staticmethod
def ReleaseStream(mstream):
"""
Release the memory stream
Args:
mstream (MemoryStream): instance.
"""
mstream.Cleanup()
__mstreams_available__.append(mstream)
class MemoryStream(BytesIO):
"""docstring for MemoryStream"""
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(MemoryStream, self).__init__(*args, **kwargs)
def canRead(self):
"""
Get readable status.
Returns:
bool: True if the stream can be read from. False otherwise.
"""
return self.readable()
def canSeek(self):
"""
Get random access support status.
Returns:
bool: True if random access is supported. False otherwise.
"""
return self.seekable
def canWrite(self):
"""
Get writeable status.
Returns:
bool: True if the stream is writeable. False otherwise.
"""
return self.writable()
def ToArray(self):
"""
Hexlify the stream data.
Returns:
bytes: b"" object containing the data.
"""
return hexlify(self.getvalue())
def Cleanup(self):
"""
Cleanup the stream by truncating it to size 0.
"""
self.seek(0)
self.truncate(0) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Timothy N. Tsvetkov (email: timothy.tsvetkov@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import argparse
import os
import re
import codecs
import progressbar
def build_money_regex(amount='amount', currency='currency'):
return r'(?P<%s>[^\d]*)\s*(?P<%s>(\d*,?\d*)*\.?\d+)' % (currency, amount)
HEADER_REGEX = re.compile(
r'PokerStars\s+(?P<game_sub_type>Home Game|Zoom)?\s*(Hand|Game)\s+'
r'\#(?P<game_id>\d+)\:\s+'
r'(\"\{o_club}Club \#(?P<club_id>\d+)\{c_club})?\s*'
r'(?P<game_type>.*)\s+'
r'\({sb}/{bb}\s*(?P<currency>\w+)?\)\s+'
r'-\s+'
r'(?P<date>.*)'.
format(
o_club='{',
c_club='}',
sb=build_money_regex('sb','sb_c'),
bb=build_money_regex('bb','bb_c')),
re.UNICODE
)
SEAT_REGEX = re.compile(
r'Seat\s+(?P<seat>\d+)\s*\:\s*(?P<player>.*)\s+\({stack}.*\)'.
format(stack=build_money_regex('stack')),
re.UNICODE
)
def open_out(out_name, out_ext, batch, ix):
fname = "%s_%s%s" % (out_name, ix, out_ext) if batch is not None and batch > 0 else out_name + out_ext
return codecs.open(fname, 'wb', encoding='utf-8', errors='strict')
def filter_hands(hand_files, player, out_name, out_ext, batch):
total_hands, hands_filtered = 0, 0
batch_counter, batch_ix = 0, 0
out = open_out(out_name, out_ext, batch, batch_ix)
for fname in hand_files:
in_hand = False
in_seats = False
after_seats = False
found = False
hand = u''
f = codecs.open(fname, 'rb', encoding='utf-8', errors='replace')
line = f.readline(1000)
while len(line) > 0:
if in_hand:
# Reading hand
if len(line.strip()) > 0:
# Not empty line, so we're still in hand
if not after_seats and not found:
# Haven't found player and haven't finished reading seats
match = SEAT_REGEX.match(line)
if match is not None:
in_seats = True
found = player == match.group('player')
else:
after_seats = in_seats
in_seats = False
hand += line.rstrip() + u"\n"
else:
# Empty line, finishing reading current hand
if found:
print >>out, hand
hands_filtered += 1
batch_counter += 1
in_hand = False
in_seats = False
after_seats = False
found = False
hand = u''
else:
# Waiting for a new hand
match = HEADER_REGEX.match(line)
in_hand = match is not None
if in_hand:
if batch is not None and batch > 0 and batch_counter >= batch:
batch_ix += 1
batch_counter = 0
out.close()
out = open_out(out_name, out_ext, batch, batch_ix)
hand += u"Found in file: %s\n" % os.path.abspath(fname)
hand += line.rstrip() + u"\n"
total_hands += 1
line = f.readline(1000)
f.close()
out.close()
return total_hands, hands_filtered
def main():
parser = argparse.ArgumentParser(
prog='filter_hands_by_player',
description='Filters hands by player. Hands in file must be divided by an empty line.'
)
parser.add_argument('player', metavar='player', type=str, help='Player to filter by')
parser.add_argument('-d', '--dir', type=str, dest='dir', help='directory with hand history files')
parser.add_argument('-f', '--file', type=str, dest='file', help='hand history file')
parser.add_argument('-o', type=str, dest='out', default='out.txt',
help='file name to print found hands (default: out.txt)')
parser.add_argument('-b', '--batch', type=int, dest='batch', help='save by BATCH hands per file')
parser.add_argument('--no-progressbar', action="store_true", dest='no_progressbar',
help='don\'t print a progressbar')
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
files = []
if args.dir is not None:
for root, dirs, fs in os.walk(args.dir):
files += [os.path.join(root, f) for f in fs]
elif args.file is not None:
files = [args.file]
else:
parser.print_help()
exit(1)
out_dir = os.path.dirname(args.out)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
basename = os.path.basename(args.out)
fname, out_ext = os.path.splitext(basename)
out_name = os.path.join(out_dir, fname)
if not args.no_progressbar:
widgets = [
progressbar.Percentage(),
' ',
progressbar.Bar(),
' ',
progressbar.ETA()
]
files_iter = progressbar.ProgressBar(widgets=widgets)(files)
else:
files_iter = files
t0 = time.time()
player = args.player.decode('utf-8')
total_hands, hands_filtered = filter_hands(files_iter, player, out_name, out_ext, args.batch)
t1 = time.time()
print
print "Filtered %d from %d hands" % (hands_filtered, total_hands)
print "Total time: %f sec." % (t1 - t0)
print
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.swob import HTTPBadRequest, HTTPUnauthorized, wsgify
from swift.common.utils import (
config_true_value, get_logger, register_swift_info, streq_const_time)
from swift.proxy.controllers.base import get_container_info
class ContainerSync(object):
"""
WSGI middleware that validates an incoming container sync request
using the container-sync-realms.conf style of container sync.
"""
def __init__(self, app, conf, logger=None):
self.app = app
self.conf = conf
self.logger = logger or get_logger(conf, log_route='container_sync')
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
self.allow_full_urls = config_true_value(
conf.get('allow_full_urls', 'true'))
# configure current realm/cluster for /info
self.realm = self.cluster = None
current = conf.get('current', None)
if current:
try:
self.realm, self.cluster = (p.upper() for p in
current.strip('/').split('/'))
except ValueError:
self.logger.error('Invalid current //REALM/CLUSTER (%s)',
current)
self.register_info()
def register_info(self):
dct = {}
for realm in self.realms_conf.realms():
clusters = self.realms_conf.clusters(realm)
if clusters:
dct[realm] = {'clusters': dict((c, {}) for c in clusters)}
if self.realm and self.cluster:
try:
dct[self.realm]['clusters'][self.cluster]['current'] = True
except KeyError:
self.logger.error('Unknown current //REALM/CLUSTER (%s)',
'//%s/%s' % (self.realm, self.cluster))
register_swift_info('container_sync', realms=dct)
@wsgify
def __call__(self, req):
if not self.allow_full_urls:
sync_to = req.headers.get('x-container-sync-to')
if sync_to and not sync_to.startswith('//'):
raise HTTPBadRequest(
body='Full URLs are not allowed for X-Container-Sync-To '
'values. Only realm values of the format '
'//realm/cluster/account/container are allowed.\n',
request=req)
auth = req.headers.get('x-container-sync-auth')
if auth:
valid = False
auth = auth.split()
if len(auth) != 3:
req.environ.setdefault('swift.log_info', []).append(
'cs:not-3-args')
else:
realm, nonce, sig = auth
realm_key = self.realms_conf.key(realm)
realm_key2 = self.realms_conf.key2(realm)
if not realm_key:
req.environ.setdefault('swift.log_info', []).append(
'cs:no-local-realm-key')
else:
info = get_container_info(
req.environ, self.app, swift_source='CS')
user_key = info.get('sync_key')
if not user_key:
req.environ.setdefault('swift.log_info', []).append(
'cs:no-local-user-key')
else:
expected = self.realms_conf.get_sig(
req.method, req.path,
req.headers.get('x-timestamp', '0'), nonce,
realm_key, user_key)
expected2 = self.realms_conf.get_sig(
req.method, req.path,
req.headers.get('x-timestamp', '0'), nonce,
realm_key2, user_key) if realm_key2 else expected
if not streq_const_time(sig, expected) and \
not streq_const_time(sig, expected2):
req.environ.setdefault(
'swift.log_info', []).append('cs:invalid-sig')
else:
req.environ.setdefault(
'swift.log_info', []).append('cs:valid')
valid = True
if not valid:
exc = HTTPUnauthorized(
body='X-Container-Sync-Auth header not valid; '
'contact cluster operator for support.',
headers={'content-type': 'text/plain'},
request=req)
exc.headers['www-authenticate'] = ' '.join([
'SwiftContainerSync',
exc.www_authenticate().split(None, 1)[1]])
raise exc
else:
req.environ['swift.authorize_override'] = True
if req.path == '/info':
# Ensure /info requests get the freshest results
self.register_info()
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('container_sync')
def cache_filter(app):
return ContainerSync(app, conf)
return cache_filter | unknown | codeparrot/codeparrot-clean | ||
# Missing Token
There is no injection token for a constructor parameter at compile time. [InjectionTokens](api/core/InjectionToken) are tokens that can be used in a Dependency Injection Provider.
## Debugging the error
Look at the parameter that throws the error, and all uses of the class.
This error is commonly thrown when a constructor defines parameters with primitive types such as `string`, `number`, `boolean`, and `Object`.
Use the `@Injectable` method or `@Inject` decorator from `@angular/core` to ensure that the type you are injecting is reified \(has a runtime representation\). Make sure to add a provider to this decorator so that you do not throw [NG0201: No Provider Found](errors/NG0201). | unknown | github | https://github.com/angular/angular | adev/src/content/reference/errors/NG2003.md |
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package materials_main
# Dialog to show information about a material.
#
import os
from tab import tab_class
from icon_lib import icon_get
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QDialog
from PyQt5.QtGui import QPainter,QIcon
#python modules
import webbrowser
from help import help_window
from win_lin import desktop_open
from ref import ref_window
from bibtex import bibtex
from gpvdm_open import gpvdm_open
from QWidgetSavePos import QWidgetSavePos
from plot_widget import plot_widget
from ribbon_materials import ribbon_materials
from import_data import import_data
from equation_editor import equation_editor
articles = []
mesh_articles = []
class materials_main(QWidgetSavePos):
def changed_click(self):
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Electrical parameters"):
help_window().help_set_help(["tab.png",_("<big><b>Electrical parameters</b></big><br>Use this tab to configure the electrical parameters for the material.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Luminescence"):
help_window().help_set_help(["tab.png",_("<big><b>Luminescence</b></big><br>Use this tab to edit the materials Luminescence.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("alpha",html=True)
if text!=False:
help_window().help_set_help(["alpha.png",_("<big><b>Absorption</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("n",html=True)
if text!=False:
help_window().help_set_help(["n.png",_("<big><b>Refractive index</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
def callback_cost(self):
desktop_open(os.path.join(self.path,"cost.xlsx"))
def callback_help(self):
webbrowser.open("https://www.gpvdm.com/man/index.html")
def __init__(self,path):
QWidgetSavePos.__init__(self,"materials_main")
self.path=path
self.setFixedSize(900, 600)
self.setWindowIcon(icon_get("organic_material"))
self.setWindowTitle(_("Material editor")+" (https://www.gpvdm.com)"+" "+os.path.basename(self.path))
self.main_vbox = QVBoxLayout()
self.ribbon=ribbon_materials()
self.ribbon.cost.triggered.connect(self.callback_cost)
self.ribbon.folder_open.triggered.connect(self.callback_dir_open)
self.ribbon.import_data.clicked.connect(self.import_data)
self.ribbon.equation.clicked.connect(self.callback_equation_editor)
self.ribbon.tb_ref.triggered.connect(self.callback_ref)
self.ribbon.help.triggered.connect(self.callback_help)
self.main_vbox.addWidget(self.ribbon)
self.notebook = QTabWidget()
self.notebook.setMovable(True)
self.main_vbox.addWidget(self.notebook)
fname=os.path.join(self.path,"alpha.gmat")
self.alpha=plot_widget(enable_toolbar=False)
self.alpha.set_labels([_("Absorption")])
self.alpha.load_data([fname])
self.alpha.do_plot()
self.notebook.addTab(self.alpha,_("Absorption"))
fname=os.path.join(self.path,"n.gmat")
self.n=plot_widget(enable_toolbar=False)
self.n.set_labels([_("Refractive index")])
self.n.load_data([fname])
self.n.do_plot()
self.notebook.addTab(self.n,_("Refractive index"))
files=["dos.inp","pl.inp","mat.inp"]
description=[_("Electrical parameters"),_("Luminescence"),_("Basic")]
for i in range(0,len(files)):
full_path=os.path.join(self.path,files[i])
if os.path.isfile(full_path)==True:
tab=tab_class(os.path.join(self.path,files[i]))
self.notebook.addTab(tab,description[i])
self.setLayout(self.main_vbox)
self.notebook.currentChanged.connect(self.changed_click)
def callback_equation_editor(self):
equation_file=None
file_name=None
data_label=""
data_units=""
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
equation_file="alpha_eq.inp"
data_label="Absorption"
data_units="m^{-1}"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
equation_file="n_eq.inp"
data_label="n"
data_units="au"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.equation_editor=equation_editor(self.path,equation_file,file_name)
self.equation_editor.data_written.connect(self.update)
self.equation_editor.data.y_label="Wavelength"
self.equation_editor.data.data_label=data_label
self.equation_editor.data.y_units="nm"
self.equation_editor.data.data_units=data_units
self.equation_editor.load()
self.equation_editor.show()
def import_data(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def import_ref(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def update(self):
self.n.update()
self.alpha.update()
def callback_ref(self):
token=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
token="alpha"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
token="n"
if token!=None:
self.ref_window=ref_window(os.path.join(self.path,"mat.bib"),token)
self.ref_window.show()
def callback_dir_open(self):
dialog=gpvdm_open(self.path)
dialog.show_inp_files=False
ret=dialog.exec_()
if ret==QDialog.Accepted:
desktop_open(dialog.get_filename()) | unknown | codeparrot/codeparrot-clean | ||
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import ANY, patch
from coalib.core.ProjectBear import ProjectBear
from coalib.settings.Section import Section
from tests.core.CoreTestBase import CoreTestBase
class TestProjectBear(ProjectBear):
def analyze(self, files):
yield '\n'.join(filename + ':' + str(files[filename])
for filename in sorted(files))
class TestProjectBearWithParameters(ProjectBear):
def analyze(self, files, prefix: str = '---'):
yield '\n'.join(prefix + filename + ':' + str(files[filename])
for filename in sorted(files))
class ProjectBearTest(CoreTestBase):
def assertResultsEqual(self, bear_type, expected,
section=None, file_dict=None, cache=None):
"""
Asserts whether the expected results do match the output of the bear.
Asserts for the results out-of-order.
:param bear_type:
The bear class to check.
:param expected:
A sequence of expected results.
:param section:
A section for the bear to use. By default uses a new section with
name ``test-section``.
:param file_dict:
A file-dictionary for the bear to use. By default uses an empty
dictionary.
:param cache:
A cache the bear can use to speed up runs. If ``None``, no cache
will be used.
"""
if section is None:
section = Section('test-section')
if file_dict is None:
file_dict = {}
uut = bear_type(section, file_dict)
results = self.execute_run({uut}, cache)
self.assertEqual(sorted(expected), sorted(results))
def test_bear_without_parameters(self):
self.assertResultsEqual(
TestProjectBear,
file_dict={},
expected=[''])
self.assertResultsEqual(
TestProjectBear,
file_dict={'fileX': []},
expected=['fileX:[]'])
self.assertResultsEqual(
TestProjectBear,
file_dict={'fileX': [],
'fileY': ['hello']},
expected=['fileX:[]\n'
"fileY:['hello']"])
self.assertResultsEqual(
TestProjectBear,
file_dict={'fileX': [],
'fileY': ['hello'],
'fileZ': ['x\n', 'y']},
expected=['fileX:[]\n'
"fileY:['hello']\n"
"fileZ:['x\\n', 'y']"])
def test_bear_with_parameters_but_keep_defaults(self):
self.assertResultsEqual(
TestProjectBearWithParameters,
file_dict={},
expected=[''])
self.assertResultsEqual(
TestProjectBearWithParameters,
file_dict={'fileX': []},
expected=['---fileX:[]'])
self.assertResultsEqual(
TestProjectBearWithParameters,
file_dict={'fileX': [],
'fileY': ['hello']},
expected=['---fileX:[]\n'
"---fileY:['hello']"])
self.assertResultsEqual(
TestProjectBearWithParameters,
file_dict={'fileX': [],
'fileY': ['hello'],
'fileZ': ['x\n', 'y']},
expected=['---fileX:[]\n'
"---fileY:['hello']\n"
"---fileZ:['x\\n', 'y']"])
def test_bear_with_parameters(self):
section = Section('test-section')
section['prefix'] = '___'
self.assertResultsEqual(
TestProjectBearWithParameters,
section=section,
file_dict={},
expected=[''])
self.assertResultsEqual(
TestProjectBearWithParameters,
section=section,
file_dict={'fileX': []},
expected=['___fileX:[]'])
self.assertResultsEqual(
TestProjectBearWithParameters,
section=section,
file_dict={'fileX': [],
'fileY': ['hello']},
expected=['___fileX:[]\n'
"___fileY:['hello']"])
self.assertResultsEqual(
TestProjectBearWithParameters,
section=section,
file_dict={'fileX': [],
'fileY': ['hello'],
'fileZ': ['x\ny']},
expected=['___fileX:[]\n'
"___fileY:['hello']\n"
"___fileZ:['x\\ny']"])
# Execute the same tests from ProjectBearTest, but use a ThreadPoolExecutor
# instead. It shall also seamlessly work with Python threads. Also there are
# coverage issues on Windows with ProcessPoolExecutor as coverage data isn't
# passed properly back from the pool processes.
class ProjectBearOnThreadPoolExecutorTest(ProjectBearTest):
def setUp(self):
super().setUp()
self.executor = ThreadPoolExecutor, tuple(), dict(max_workers=8)
# Cache-tests require to be executed in the same Python process, as mocks
# aren't multiprocessing capable. Thus put them here.
def test_cache(self):
section = Section('test-section')
filedict1 = {'file.txt': []}
filedict2 = {'file.txt': ['first-line\n']}
expected_results1 = ['file.txt:[]']
expected_results2 = ["file.txt:['first-line\\n']"]
cache = {}
with patch.object(TestProjectBear, 'analyze',
autospec=True,
side_effect=TestProjectBear.analyze) as mock:
self.assertResultsEqual(TestProjectBear,
section=section,
file_dict=filedict1,
cache=cache,
expected=expected_results1)
mock.assert_called_once_with(ANY, filedict1)
self.assertEqual(len(cache), 1)
self.assertEqual(len(next(iter(cache.values()))), 1)
mock.reset_mock()
self.assertResultsEqual(TestProjectBear,
section=section,
file_dict=filedict1,
cache=cache,
expected=expected_results1)
# Due to https://bugs.python.org/issue28380, assert_not_called()
# is not available. The fix for this bug was not backported to
# Python 3.5 or earlier.
self.assertFalse(mock.called)
self.assertEqual(len(cache), 1)
self.assertEqual(len(next(iter(cache.values()))), 1)
self.assertResultsEqual(TestProjectBear,
section=section,
file_dict=filedict2,
cache=cache,
expected=expected_results2)
mock.assert_called_once_with(ANY, filedict2)
self.assertEqual(len(cache), 1)
self.assertEqual(len(next(iter(cache.values()))), 2) | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=too-many-instance-attributes, too-many-arguments, protected-access, too-many-branches
# pylint: disable=too-many-public-methods
"""A `Module` implement the `BaseModule` API by wrapping a `Symbol` and one or
more `Executor` for data parallelization.
"""
import logging
import warnings
from .. import context as ctx
from .. import ndarray as nd
from .. import optimizer as opt
from .executor_group import DataParallelExecutorGroup
from ..model import _create_kvstore, _initialize_kvstore, _update_params, _update_params_on_kvstore
from ..model import load_checkpoint
from ..initializer import Uniform, InitDesc
from ..io import DataDesc
from .base_module import BaseModule, _check_input_names, _parse_data_desc
class Module(BaseModule):
"""Module is a basic module that wrap a `Symbol`. It is functionally the same
as the `FeedForward` model, except under the module API.
Parameters
----------
symbol : Symbol
data_names : list of str
Defaults to `('data')` for a typical model used in image classification.
label_names : list of str
Defaults to `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Defaults to `logging`.
context : Context or list of Context
Defaults to ``mx.cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
state_names : list of str
states are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by `set_states()`.
"""
def __init__(self, symbol, data_names=('data',), label_names=('softmax_label',),
logger=logging, context=ctx.cpu(), work_load_list=None,
fixed_param_names=None, state_names=None):
super(Module, self).__init__(logger=logger)
if isinstance(context, ctx.Context):
context = [context]
self._context = context
if work_load_list is None:
work_load_list = [1] * len(self._context)
assert len(work_load_list) == len(self._context)
self._work_load_list = work_load_list
self._symbol = symbol
data_names = list(data_names) if data_names is not None else []
label_names = list(label_names) if label_names is not None else []
state_names = list(state_names) if state_names is not None else []
fixed_param_names = list(fixed_param_names) if fixed_param_names is not None else []
_check_input_names(symbol, data_names, "data", True)
_check_input_names(symbol, label_names, "label", False)
_check_input_names(symbol, state_names, "state", True)
_check_input_names(symbol, fixed_param_names, "fixed_param", True)
arg_names = symbol.list_arguments()
input_names = data_names + label_names + state_names
self._param_names = [x for x in arg_names if x not in input_names]
self._fixed_param_names = fixed_param_names
self._aux_names = symbol.list_auxiliary_states()
self._data_names = data_names
self._label_names = label_names
self._state_names = state_names
self._output_names = symbol.list_outputs()
self._arg_params = None
self._aux_params = None
self._params_dirty = False
self._optimizer = None
self._kvstore = None
self._update_on_kvstore = None
self._updater = None
self._preload_opt_states = None
self._grad_req = None
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@staticmethod
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Creates a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is ``cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
"""Saves current progress to checkpoint.
Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to.
epoch : int
The current epoch number.
save_optimizer_states : bool
Whether to save optimizer states to continue training.
"""
self._symbol.save('%s-symbol.json'%prefix)
param_name = '%s-%04d.params' % (prefix, epoch)
self.save_params(param_name)
logging.info('Saved checkpoint to \"%s\"', param_name)
if save_optimizer_states:
state_name = '%s-%04d.states' % (prefix, epoch)
self.save_optimizer_states(state_name)
logging.info('Saved optimizer state to \"%s\"', state_name)
def _reset_bind(self):
"""Internal function to reset binded state."""
self.binded = False
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@property
def data_names(self):
"""A list of names for data required by this module."""
return self._data_names
@property
def label_names(self):
"""A list of names for labels required by this module."""
return self._label_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
return self._output_names
@property
def data_shapes(self):
"""Gets data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._data_shapes
@property
def label_shapes(self):
"""Gets label shapes.
Returns
-------
A list of `(name, shape)` pairs.
The return value could be ``None`` if
the module does not need labels, or if the module is not bound for
training (in this case, label information is not available).
"""
assert self.binded
return self._label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._exec_group.get_output_shapes()
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
"""
assert self.binded and self.params_initialized
if self._params_dirty:
self._sync_params_from_devices()
return (self._arg_params, self._aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing arg_params. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing aux_params. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"init_params call ignored.", stacklevel=2)
return
assert self.binded, 'call bind before initializing the parameters'
def _impl(name, arr, cache):
"""Internal helper for parameter initialization"""
if cache is not None:
if name in cache:
cache_arr = cache[name]
# just in case the cached array is just the target itself
if cache_arr is not arr:
cache_arr.copyto(arr)
else:
if not allow_missing:
raise RuntimeError("%s is not presented" % name)
if initializer is not None:
initializer(name, arr)
else:
initializer(name, arr)
attrs = self._symbol.attr_dict()
for name, arr in self._arg_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, arg_params)
for name, arr in self._aux_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, aux_params)
self.params_initialized = True
self._params_dirty = False
# copy the initialized parameters to devices
self._exec_group.set_params(self._arg_params, self._aux_params,
allow_extra=allow_extra)
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
if not for_training:
assert not inputs_need_grad
else:
pass
# this is not True, as some module might not contains a loss function
# that consumes the labels
# assert label_shapes is not None
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
if shared_module is not None:
assert isinstance(shared_module, Module) and \
shared_module.binded and shared_module.params_initialized
shared_group = shared_module._exec_group
assert len(shared_group.execs) == len(self._context)
else:
shared_group = None
self._exec_group = DataParallelExecutorGroup(self._symbol, self._context,
self._work_load_list, self._data_shapes,
self._label_shapes, self._param_names,
for_training, inputs_need_grad,
shared_group, logger=self.logger,
fixed_param_names=self._fixed_param_names,
grad_req=grad_req,
state_names=self._state_names)
self._total_exec_bytes = self._exec_group._total_exec_bytes
if shared_module is not None:
self.params_initialized = True
self._arg_params = shared_module._arg_params
self._aux_params = shared_module._aux_params
elif self.params_initialized:
# if the parameters are already initialized, we are re-binding
# so automatically copy the already initialized params
self._exec_group.set_params(self._arg_params, self._aux_params)
else:
assert self._arg_params is None and self._aux_params is None
param_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.param_arrays
]
self._arg_params = {name:arr for name, arr in zip(self._param_names, param_arrays)}
aux_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.aux_arrays
]
self._aux_params = {name:arr for name, arr in zip(self._aux_names, aux_arrays)}
if shared_module is not None and shared_module.optimizer_initialized:
self.borrow_optimizer(shared_module)
def reshape(self, data_shapes, label_shapes=None):
"""Reshapes the module for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
"""
assert self.binded
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
self._exec_group.reshape(self._data_shapes, self._label_shapes)
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring...')
return
if self._params_dirty:
self._sync_params_from_devices()
(kvstore, update_on_kvstore) = \
_create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:
batch_size *= kvstore.num_workers
rescale_grad = 1.0/batch_size
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i*len(self._context)+k: n
for i, n in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer,
sym=self.symbol, param_idx2name=idx2name,
**optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
#pylint: disable=no-member
warnings.warn(
"Optimizer created manually outside Module but rescale_grad " +
"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%(
optimizer.rescale_grad, rescale_grad) +
"Is this intended?", stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
# copy initialized local parameters to kvstore
_initialize_kvstore(kvstore=kvstore,
param_arrays=self._exec_group.param_arrays,
arg_params=self._arg_params,
param_names=self._param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
self.load_optimizer_states(self._preload_opt_states)
self._preload_opt_states = None
def borrow_optimizer(self, shared_module):
"""Borrows optimizer from a shared module. Used in bucketing, where exactly the same
optimizer (esp. kvstore) is used.
Parameters
----------
shared_module : Module
"""
assert shared_module.optimizer_initialized
self._optimizer = shared_module._optimizer
self._kvstore = shared_module._kvstore
self._update_on_kvstore = shared_module._update_on_kvstore
self._updater = shared_module._updater
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
"""
assert self.binded and self.params_initialized
curr_data_shapes = tuple(i.shape for i in self._data_shapes)
new_data_shapes = tuple(i.shape for i in data_batch.data)
if curr_data_shapes != new_data_shapes:
if hasattr(data_batch, "provide_data") and data_batch.provide_data:
new_dshape = data_batch.provide_data
else:
new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \
for i, shape in zip(self._data_shapes, new_data_shapes)]
if hasattr(data_batch, "provide_label") and data_batch.provide_label:
new_lshape = data_batch.provide_label
elif hasattr(data_batch, "label") and data_batch.label:
new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \
for i, j in zip(self._label_shapes, data_batch.label)]
else:
new_lshape = None
self.reshape(new_dshape, new_lshape)
self._exec_group.forward(data_batch, is_train)
def backward(self, out_grads=None):
"""Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.binded and self.params_initialized
self._exec_group.backward(out_grads=out_grads)
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
See Also
----------
:meth:`BaseModule.update`.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
self._kvstore, self._exec_group.param_names)
else:
_update_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore,
param_names=self._exec_group.param_names)
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray`
might live on different devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Output.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
States
"""
assert self.binded and self.params_initialized
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of the states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
a single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._exec_group.set_states(states, value)
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
See Also
----------
:meth:`BaseModule.update_metric`.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
self._exec_group.update_metric(eval_metric, labels)
def _sync_params_from_devices(self):
"""Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
self._params_dirty = False
def save_optimizer_states(self, fname):
"""Saves optimizer (updater) state to a file.
Parameters
----------
fname : str
Path to output states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname)
else:
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states())
def load_optimizer_states(self, fname):
"""Loads optimizer (updater) state from a file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
else:
self._updater.set_states(open(fname, 'rb').read())
def install_monitor(self, mon):
"""Installs monitor on all executors. """
assert self.binded
self._exec_group.install_monitor(mon) | unknown | codeparrot/codeparrot-clean | ||
import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
@classmethod
def setUpTestData(cls):
# Creating countries
cls.usa = Country.objects.create(name="United States of America")
cls.soviet_union = Country.objects.create(name="Soviet Union")
# Creating People
cls.bob = Person.objects.create(name='Bob', person_country=cls.usa)
cls.jim = Person.objects.create(name='Jim', person_country=cls.usa)
cls.george = Person.objects.create(name='George', person_country=cls.usa)
cls.jane = Person.objects.create(name='Jane', person_country=cls.soviet_union)
cls.mark = Person.objects.create(name='Mark', person_country=cls.soviet_union)
cls.sam = Person.objects.create(name='Sam', person_country=cls.soviet_union)
# Creating Groups
cls.kgb = Group.objects.create(name='KGB', group_country=cls.soviet_union)
cls.cia = Group.objects.create(name='CIA', group_country=cls.usa)
cls.republican = Group.objects.create(name='Republican', group_country=cls.usa)
cls.democrat = Group.objects.create(name='Democrat', group_country=cls.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
with self.assertRaises(Person.DoesNotExist):
getattr(membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
with self.assertNumQueries(1):
membership = self.bob.membership_set.get()
self.assertEqual(membership.group_id, self.cia.id)
self.assertIs(membership.person, self.bob)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None)
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertSequenceEqual(
Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m1]
)
self.assertSequenceEqual(
Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m2]
)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
with self.assertNumQueries(7):
normal_membership_sets = [
list(p.membership_set.all())
for p in Person.objects.order_by('pk')
]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
msg = (
"Cannot resolve keyword 'tags' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
msg = (
"Cannot resolve keyword 'ideas' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertSequenceEqual(
NewsArticle.objects.select_related('active_translation'),
[na]
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
def test_isnull_lookup(self):
m1 = Membership.objects.create(membership_country=self.usa, person=self.bob, group_id=None)
m2 = Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
self.assertSequenceEqual(
Membership.objects.filter(group__isnull=True),
[m1],
)
self.assertSequenceEqual(
Membership.objects.filter(group__isnull=False),
[m2],
)
class TestModelCheckTests(SimpleTestCase):
@isolate_apps('foreign_object')
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
@isolate_apps('foreign_object')
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
class TestExtraJoinFilterQ(TestCase):
@translation.override('fi')
def test_extra_join_filter_q(self):
a = Article.objects.create(pub_date=datetime.datetime.today())
ArticleTranslation.objects.create(article=a, lang='fi', title='title', body='body')
qs = Article.objects.all()
with self.assertNumQueries(2):
self.assertEqual(qs[0].active_translation_q.title, 'title')
qs = qs.select_related('active_translation_q')
with self.assertNumQueries(1):
self.assertEqual(qs[0].active_translation_q.title, 'title') | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/compiler/collisionSuperAndPropertyNameAsConstuctorParameter.ts] ////
//// [collisionSuperAndPropertyNameAsConstuctorParameter.ts]
class a {
}
class b1 extends a {
constructor(_super: number) { // should be error
super();
}
}
class b2 extends a {
constructor(private _super: number) { // should be error
super();
}
}
class b3 extends a {
constructor(_super: number); // no code gen - no error
constructor(_super: string);// no code gen - no error
constructor(_super: any) { // should be error
super();
}
}
class b4 extends a {
constructor(_super: number); // no code gen - no error
constructor(_super: string);// no code gen - no error
constructor(private _super: any) { // should be error
super();
}
}
//// [collisionSuperAndPropertyNameAsConstuctorParameter.js]
"use strict";
class a {
}
class b1 extends a {
constructor(_super) {
super();
}
}
class b2 extends a {
constructor(_super) {
super();
this._super = _super;
}
}
class b3 extends a {
constructor(_super) {
super();
}
}
class b4 extends a {
constructor(_super) {
super();
this._super = _super;
}
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/collisionSuperAndPropertyNameAsConstuctorParameter.js |
from django.contrib import admin
from cms.admin.placeholderadmin import PlaceholderAdmin
from testapp.placeholderapp.models import *
class MixinAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# silly test that placeholderadmin doesn't fuck stuff up
request = kwargs.pop('request', None)
return super(MixinAdmin, self).formfield_for_dbfield(db_field, request=request, **kwargs)
class Example1Admin(PlaceholderAdmin, MixinAdmin):
pass
class Example2Admin(PlaceholderAdmin):
fieldsets = (
('Placeholder + more fields', {
'classes': ('wide',),
'fields': ('char_1', 'placeholder', 'char_2',)
}),
('Other fields', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example3Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder with rigth classes', {
'classes': ('plugin-holder', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example4Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder, with wrong classes', {
'classes': ('wide', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example5Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Two Placeholder, with right classes', {
'classes': ('plugin', 'plugin-holder-nopage',),
'fields': ('placeholder_1', 'placeholder_2',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
admin.site.register(Example1, Example1Admin)
admin.site.register(Example2, Example2Admin)
admin.site.register(Example3, Example3Admin)
admin.site.register(Example4, Example4Admin)
admin.site.register(Example5, Example5Admin) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib
from urlparse import urlsplit
from weboob.deprecated.browser import Browser, BrowserIncorrectPassword
from weboob.capabilities.messages import CantSendMessage
from .pages.index import LoginPage
from .pages.forum import ForumPage, TopicPage, PostingPage
from .tools import id2url, url2id
__all__ = ['PhpBB']
# Browser
class PhpBB(Browser):
PAGES = {'https?://.*/index.php': ForumPage,
'https?://.*/': ForumPage,
'https?://.*/viewforum.php\?f=(\d+)': ForumPage,
'https?://.*/search.php\?.*': ForumPage,
'https?://.*/viewtopic.php\?.*': TopicPage,
'https?://.*/posting.php\?.*': PostingPage,
'https?://.*/ucp.php\?mode=login.*': LoginPage,
}
last_board_msg_id = None
def __init__(self, url, *args, **kwargs):
self.url = url
v = urlsplit(url)
self.PROTOCOL = v.scheme
self.DOMAIN = v.netloc
self.BASEPATH = v.path[:v.path.rfind('/')]
Browser.__init__(self, *args, **kwargs)
def absurl(self, rel):
return Browser.absurl(self, '%s/%s' % (self.BASEPATH, rel))
def home(self):
self.location(self.url)
def is_logged(self):
return not self.page or self.page.is_logged()
def login(self):
data = {'login': 'Connexion',
'username': self.username,
'password': self.password,
}
self.location('%s/ucp.php?mode=login' % self.BASEPATH, urllib.urlencode(data), no_login=True)
assert self.is_on_page(LoginPage)
if not self.page.is_logged():
raise BrowserIncorrectPassword(self.page.get_error_message())
def get_root_feed_url(self):
self.home()
return self.page.get_feed_url()
def iter_links(self, url):
if url:
self.location(url)
else:
self.home()
assert self.is_on_page(ForumPage)
return self.page.iter_links()
def iter_posts(self, id, stop_id=None):
if id.startswith('http'):
self.location(id)
else:
self.location('%s/%s' % (self.BASEPATH, id2url(id)))
assert self.is_on_page(TopicPage)
parent = 0
while True:
for post in self.page.iter_posts():
if stop_id and post.id >= stop_id:
return
post.parent = parent
yield post
parent = post.id
if self.page.cur_page == self.page.tot_pages:
return
self.location(self.page.next_page_url())
def riter_posts(self, id, stop_id=None):
if id.startswith('http'):
self.location(id)
else:
self.location('%s/%s' % (self.BASEPATH, id2url(id)))
assert self.is_on_page(TopicPage)
child = None
while True:
for post in self.page.riter_posts():
if child:
child.parent = post.id
yield child
if post.id <= stop_id:
return
child = post
if self.page.cur_page == 1:
if child:
yield child
return
self.location(self.page.prev_page_url())
def get_post(self, id):
if id.startswith('http'):
self.location(id)
id = url2id(id)
else:
self.location('%s/%s' % (self.BASEPATH, id2url(id)))
assert self.is_on_page(TopicPage)
post = self.page.get_post(int(id.split('.')[-1]))
if not post:
return None
if post.parent == 0 and self.page.cur_page > 1:
self.location(self.page.prev_page_url())
post.parent = self.page.get_last_post_id()
return post
def get_forums(self):
self.home()
return dict(self.page.iter_all_forums())
def post_answer(self, forum_id, topic_id, title, content):
if topic_id == 0:
if not forum_id:
forums = self.get_forums()
forums_prompt = 'Forums list:\n%s' % ('\n'.join(['\t- %s' % f for f in forums.itervalues()]))
m = re.match('\[(.*)\] (.*)', title or '')
if not m:
raise CantSendMessage('Please enter a title formatted like that:\n\t"[FORUM] SUBJECT"\n\n%s' % forums_prompt)
forum_id = None
for k,v in forums.iteritems():
if v.lower() == m.group(1).lower():
forum_id = k
break
if not forum_id:
raise CantSendMessage('Forum "%s" not found.\n\n%s' % (m.group(1), forums_prompt))
self.location('%s/posting.php?mode=post&f=%d' % (self.BASEPATH, forum_id))
assert self.is_on_page(PostingPage)
self.page.post(title, content)
assert self.is_on_page(PostingPage)
error = self.page.get_error_message()
if error:
raise CantSendMessage(u'Unable to send message: %s' % error)
else:
self.location('%s/%s' % (self.BASEPATH, id2url(topic_id)))
assert self.is_on_page(TopicPage)
self.page.go_reply()
assert self.is_on_page(PostingPage)
# Don't send title because it isn't needed in real use case
# and with monboob title is something like:
# Re: [Forum Name] Re: Topic Name
if title is not None and title.startswith('Re:'):
title = None
self.page.post(title, content)
assert self.is_on_page(PostingPage)
error = self.page.get_error_message()
if error:
raise CantSendMessage(u'Unable to send message: %s' % error) | unknown | codeparrot/codeparrot-clean | ||
<?php
// autoload_namespaces.php @generated by Composer
$vendorDir = dirname(__DIR__);
$baseDir = dirname($vendorDir);
return array(
'Main' => array($baseDir . '/src'),
'Lala' => array($baseDir . '/src', $baseDir . '/lib'),
); | php | github | https://github.com/composer/composer | tests/Composer/Test/Autoload/Fixtures/autoload_main.php |
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dpll/dpll-device.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Digital Phase-Locked Loop (DPLL) Device
maintainers:
- Ivan Vecera <ivecera@redhat.com>
description:
Digital Phase-Locked Loop (DPLL) device is used for precise clock
synchronization in networking and telecom hardware. The device can
have one or more channels (DPLLs) and one or more physical input and
output pins. Each DPLL channel can either produce pulse-per-clock signal
or drive ethernet equipment clock. The type of each channel can be
indicated by dpll-types property.
properties:
$nodename:
pattern: "^dpll(@.*)?$"
"#address-cells":
const: 0
"#size-cells":
const: 0
dpll-types:
description: List of DPLL channel types, one per DPLL instance.
$ref: /schemas/types.yaml#/definitions/non-unique-string-array
items:
enum: [pps, eec]
input-pins:
type: object
description: DPLL input pins
unevaluatedProperties: false
properties:
"#address-cells":
const: 1
"#size-cells":
const: 0
patternProperties:
"^pin@[0-9a-f]+$":
$ref: /schemas/dpll/dpll-pin.yaml
unevaluatedProperties: false
required:
- "#address-cells"
- "#size-cells"
output-pins:
type: object
description: DPLL output pins
unevaluatedProperties: false
properties:
"#address-cells":
const: 1
"#size-cells":
const: 0
patternProperties:
"^pin@[0-9]+$":
$ref: /schemas/dpll/dpll-pin.yaml
unevaluatedProperties: false
required:
- "#address-cells"
- "#size-cells"
additionalProperties: true | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/dpll/dpll-device.yaml |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.architecture.url.encode;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
class UrlEncodeWithStringEncoding {
void exampleMethod() throws UnsupportedEncodingException {
URLEncoder.encode("https://example.com", "UTF-8");
}
} | java | github | https://github.com/spring-projects/spring-boot | buildSrc/src/test/java/org/springframework/boot/build/architecture/url/encode/UrlEncodeWithStringEncoding.java |
# Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if 'that' not in self._task.args:
raise AnsibleError('conditional required in "that" string')
msg = None
if 'msg' in self._task.args:
msg = self._task.args['msg']
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [thats]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
result['_ansible_verbose_always'] = True
for that in thats:
cond.when = [that]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
result['assertion'] = that
if msg:
result['msg'] = msg
return result
result['changed'] = False
result['msg'] = 'All assertions passed'
return result | unknown | codeparrot/codeparrot-clean | ||
# Generated by Django 2.0.8 on 2020-07-19 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('adesao', '0062_solicitacaodeadesao'),
]
operations = [
migrations.AddField(
model_name='solicitacaodeadesao',
name='avaliador',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='solicitacao_avaliador', to='adesao.Usuario'),
),
migrations.AddField(
model_name='solicitacaodeadesao',
name='data_analise',
field=models.DateTimeField(blank=True, null=True, verbose_name='Data de Análise'),
),
migrations.AlterField(
model_name='historico',
name='situacao',
field=models.CharField(blank=True, choices=[('0', 'Aguardando preenchimento dos dados cadastrais'), ('1', 'Aguardando envio da documentação'), ('2', 'Aguardando renovação da adesão'), ('3', 'Diligência Documental'), ('4', 'Aguardando análise do Plano de Trabalho'), ('5', 'Diligência Documental'), ('6', 'Publicado no DOU'), ('7', 'Acordo de Cooperação e Termo de Adesão aprovados')], max_length=1, null=True),
),
migrations.AlterField(
model_name='sistemacultura',
name='estado_processo',
field=models.CharField(choices=[('0', 'Aguardando preenchimento dos dados cadastrais'), ('1', 'Aguardando envio da documentação'), ('2', 'Aguardando renovação da adesão'), ('3', 'Diligência Documental'), ('4', 'Aguardando análise do Plano de Trabalho'), ('5', 'Diligência Documental'), ('6', 'Publicado no DOU'), ('7', 'Acordo de Cooperação e Termo de Adesão aprovados')], default='0', max_length=1),
),
migrations.AlterField(
model_name='solicitacaodeadesao',
name='alterado_por',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='solicitacao_alterado_por', to='adesao.Usuario'),
),
migrations.AlterField(
model_name='usuario',
name='estado_processo',
field=models.CharField(choices=[('0', 'Aguardando preenchimento dos dados cadastrais'), ('1', 'Aguardando envio da documentação'), ('2', 'Aguardando renovação da adesão'), ('3', 'Diligência Documental'), ('4', 'Aguardando análise do Plano de Trabalho'), ('5', 'Diligência Documental'), ('6', 'Publicado no DOU'), ('7', 'Acordo de Cooperação e Termo de Adesão aprovados')], default='0', max_length=1),
),
] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import shutil
from os import path
DOCUMENTATION = '''
---
module: layman
author: "Jakub Jirutka (@jirutka)"
version_added: "1.6"
short_description: Manage Gentoo overlays
description:
- Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
Please note that Layman must be installed on a managed node prior using this module.
requirements:
- "python >= 2.6"
- layman python module
options:
name:
description:
- The overlay id to install, synchronize, or uninstall.
Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
required: true
list_url:
description:
- An URL of the alternative overlays list that defines the overlay to install.
This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
C(overlay_defs) is readed from the Layman's configuration.
required: false
state:
description:
- Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
required: false
default: present
choices: [present, absent, updated]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists. Prior to 1.9.3 the code
defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.3'
'''
EXAMPLES = '''
# Install the overlay 'mozilla' which is on the central overlays list.
- layman: name=mozilla
# Install the overlay 'cvut' from the specified alternative list.
- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml
# Update (sync) the overlay 'cvut', or install if not installed yet.
- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated
# Update (sync) all of the installed overlays.
- layman: name=ALL state=updated
# Uninstall the overlay 'cvut'.
- layman: name=cvut state=absent
'''
USERAGENT = 'ansible-httpget'
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
HAS_LAYMAN_API = True
except ImportError:
HAS_LAYMAN_API = False
class ModuleError(Exception): pass
def init_layman(config=None):
'''Returns the initialized ``LaymanAPI``.
:param config: the layman's configuration to use (optional)
'''
if config is None:
config = BareConfig(read_configfile=True, quietness=1)
return LaymanAPI(config)
def download_url(module, url, dest):
'''
:param url: the URL to download
:param dest: the absolute path of where to save the downloaded content to;
it must be writable and not a directory
:raises ModuleError
'''
# Hack to add params in the form that fetch_url expects
module.params['http_agent'] = USERAGENT
response, info = fetch_url(module, url)
if info['status'] != 200:
raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
try:
with open(dest, 'w') as f:
shutil.copyfileobj(response, f)
except IOError, e:
raise ModuleError("Failed to write: %s" % str(e))
def install_overlay(module, name, list_url=None):
'''Installs the overlay repository. If not on the central overlays list,
then :list_url of an alternative list must be provided. The list will be
fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
``overlay_defs`` is read from the Layman's configuration).
:param name: the overlay id
:param list_url: the URL of the remote repositories list to look for the overlay
definition (optional, default: None)
:returns: True if the overlay was installed, or False if already exists
(i.e. nothing has changed)
:raises ModuleError
'''
# read Layman configuration
layman_conf = BareConfig(read_configfile=True)
layman = init_layman(layman_conf)
if layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would add layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)
if not layman.is_repo(name):
if not list_url:
raise ModuleError("Overlay '%s' is not on the list of known " \
"overlays and URL of the remote list was not provided." % name)
overlay_defs = layman_conf.get_option('overlay_defs')
dest = path.join(overlay_defs, name + '.xml')
download_url(module, list_url, dest)
# reload config
layman = init_layman()
if not layman.add_repos(name):
raise ModuleError(layman.get_errors())
return True
def uninstall_overlay(module, name):
'''Uninstalls the given overlay repository from the system.
:param name: the overlay id to uninstall
:returns: True if the overlay was uninstalled, or False if doesn't exist
(i.e. nothing has changed)
:raises ModuleError
'''
layman = init_layman()
if not layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would remove layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)
layman.delete_repos(name)
if layman.get_errors(): raise ModuleError(layman.get_errors())
return True
def sync_overlay(name):
'''Synchronizes the specified overlay repository.
:param name: the overlay repository id to sync
:raises ModuleError
'''
layman = init_layman()
if not layman.sync(name):
messages = [ str(item[1]) for item in layman.sync_results[2] ]
raise ModuleError(messages)
def sync_overlays():
'''Synchronize all of the installed overlays.
:raises ModuleError
'''
layman = init_layman()
for name in layman.get_installed():
sync_overlay(name)
def main():
# define module
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
list_url = dict(aliases=['url']),
state = dict(default="present", choices=['present', 'absent', 'updated']),
validate_certs = dict(required=False, default=True, type='bool'),
),
supports_check_mode=True
)
if not HAS_LAYMAN_API:
module.fail_json(msg='Layman is not installed')
state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
changed = False
try:
if state == 'present':
changed = install_overlay(module, name, url)
elif state == 'updated':
if name == 'ALL':
sync_overlays()
elif install_overlay(module, name, url):
changed = True
else:
sync_overlay(name)
else:
changed = uninstall_overlay(module, name)
except ModuleError, e:
module.fail_json(msg=e.message)
else:
module.exit_json(changed=changed, name=name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Illuminate\Container\Container;
use Illuminate\Database\Capsule\Manager as DB;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Facade;
use PHPUnit\Framework\TestCase;
class DatabaseSchemaBuilderIntegrationTest extends TestCase
{
protected $db;
/**
* Bootstrap database.
*
* @return void
*/
protected function setUp(): void
{
$this->db = $db = new DB;
$db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
]);
$db->setAsGlobal();
$container = new Container;
$container->instance('db', $db->getDatabaseManager());
Facade::setFacadeApplication($container);
}
protected function tearDown(): void
{
Facade::clearResolvedInstances();
Facade::setFacadeApplication(null);
parent::tearDown();
}
public function testHasColumnWithTablePrefix()
{
$this->db->connection()->setTablePrefix('test_');
$this->db->connection()->getSchemaBuilder()->create('table1', function (Blueprint $table) {
$table->integer('id');
$table->string('name');
});
$this->assertTrue($this->db->connection()->getSchemaBuilder()->hasColumn('table1', 'name'));
}
public function testHasColumnAndIndexWithPrefixIndexDisabled()
{
$this->db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
'prefix' => 'example_',
'prefix_indexes' => false,
]);
$this->schemaBuilder()->create('table1', function (Blueprint $table) {
$table->integer('id');
$table->string('name')->index();
});
$this->assertTrue($this->schemaBuilder()->hasIndex('table1', 'table1_name_index'));
}
public function testHasColumnAndIndexWithPrefixIndexEnabled()
{
$this->db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
'prefix' => 'example_',
'prefix_indexes' => true,
]);
$this->schemaBuilder()->create('table1', function (Blueprint $table) {
$table->integer('id');
$table->string('name')->index();
});
$this->assertTrue($this->schemaBuilder()->hasIndex('table1', 'example_table1_name_index'));
}
public function testDropColumnWithTablePrefix()
{
$this->db->connection()->setTablePrefix('test_');
$this->schemaBuilder()->create('pandemic_table', function (Blueprint $table) {
$table->integer('id');
$table->string('stay_home');
$table->string('covid19');
$table->string('wear_mask');
});
// drop single columns
$this->assertTrue($this->schemaBuilder()->hasColumn('pandemic_table', 'stay_home'));
$this->schemaBuilder()->dropColumns('pandemic_table', 'stay_home');
$this->assertFalse($this->schemaBuilder()->hasColumn('pandemic_table', 'stay_home'));
// drop multiple columns
$this->assertTrue($this->schemaBuilder()->hasColumn('pandemic_table', 'covid19'));
$this->schemaBuilder()->dropColumns('pandemic_table', ['covid19', 'wear_mask']);
$this->assertFalse($this->schemaBuilder()->hasColumn('pandemic_table', 'wear_mask'));
$this->assertFalse($this->schemaBuilder()->hasColumn('pandemic_table', 'covid19'));
}
private function schemaBuilder()
{
return $this->db->connection()->getSchemaBuilder();
}
} | php | github | https://github.com/laravel/framework | tests/Database/DatabaseSchemaBuilderIntegrationTest.php |
"""
sentry.plugins.sentry_mail.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from sentry.plugins import register
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.utils.cache import cache
from sentry.utils.email import MessageBuilder, group_id_to_email
from sentry.utils.http import absolute_uri
NOTSET = object()
class MailPlugin(NotificationPlugin):
title = 'Mail'
conf_key = 'mail'
slug = 'mail'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
project_conf_form = None
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
def _send_mail(self, subject, template=None, html_template=None, body=None,
project=None, group=None, headers=None, context=None):
send_to = self.get_send_to(project)
if not send_to:
return
subject_prefix = self.get_option('subject_prefix', project) or self.subject_prefix
subject_prefix = force_text(subject_prefix)
subject = force_text(subject)
msg = MessageBuilder(
subject='%s%s' % (subject_prefix, subject),
template=template,
html_template=html_template,
body=body,
headers=headers,
context=context,
reference=group,
)
msg.add_users(send_to, project=project)
return msg.send()
def send_test_mail(self, project=None):
self._send_mail(
subject='Test Email',
body='This email was requested as a test of Sentry\'s outgoing email',
project=project,
)
def get_notification_settings_url(self):
return absolute_uri(reverse('sentry-account-settings-notifications'))
def get_project_url(self, project):
return absolute_uri(reverse('sentry-stream', args=[
project.organization.slug,
project.slug,
]))
def on_alert(self, alert):
project = alert.project
subject = '[{0} {1}] ALERT: {2}'.format(
project.team.name,
project.name,
alert.message,
)
template = 'sentry/emails/alert.txt'
html_template = 'sentry/emails/alert.html'
context = {
'alert': alert,
'link': alert.get_absolute_url(),
}
headers = {
'X-Sentry-Project': project.name,
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
headers=headers,
context=context,
)
def should_notify(self, group, event):
send_to = self.get_sendable_users(group.project)
if not send_to:
return False
return super(MailPlugin, self).should_notify(group, event)
def get_send_to(self, project=None):
"""
Returns a list of email addresses for the users that should be notified of alerts.
The logic for this is a bit complicated, but it does the following:
The results of this call can be fairly expensive to calculate, so the send_to list gets cached
for 60 seconds.
"""
if project:
project_id = project.pk
else:
project_id = ''
if not (project and project.team):
return []
conf_key = self.get_conf_key()
cache_key = '%s:send_to:%s' % (conf_key, project_id)
send_to_list = cache.get(cache_key)
if send_to_list is None:
send_to_list = self.get_sendable_users(project)
send_to_list = filter(bool, send_to_list)
cache.set(cache_key, send_to_list, 60) # 1 minute cache
return send_to_list
def notify(self, notification):
event = notification.event
group = event.group
project = group.project
interface_list = []
for interface in event.interfaces.itervalues():
body = interface.to_email_html(event)
if not body:
continue
text_body = interface.to_string(event)
interface_list.append(
(interface.get_title(), mark_safe(body), text_body)
)
subject = group.get_email_subject()
link = group.get_absolute_url()
template = 'sentry/emails/error.txt'
html_template = 'sentry/emails/error.html'
rules = []
for rule in notification.rules:
rule_link = reverse('sentry-edit-project-rule', args=[
group.organization.slug, project.slug, rule.id
])
rules.append((rule.label, rule_link))
context = {
'project_label': project.get_full_name(),
'group': group,
'event': event,
'tags': event.get_tags(),
'link': link,
'interfaces': interface_list,
'rules': rules,
}
headers = {
'X-Sentry-Logger': group.logger,
'X-Sentry-Logger-Level': group.get_level_display(),
'X-Sentry-Team': project.team.name,
'X-Sentry-Project': project.name,
'X-Sentry-Reply-To': group_id_to_email(group.id),
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
group=group,
headers=headers,
context=context,
)
# Legacy compatibility
MailProcessor = MailPlugin
register(MailPlugin) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables
set -eu -o pipefail
export ANDROID_NDK_HOME=/opt/ndk
export ANDROID_HOME=/opt/android/sdk
export GRADLE_VERSION=6.8.3
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
echo "BUILD_ENVIRONMENT:$BUILD_ENVIRONMENT"
ls -la ~/workspace
GRADLE_PROPERTIES=~/workspace/android/gradle.properties
IS_SNAPSHOT="$(grep 'VERSION_NAME=[0-9\.]\+-SNAPSHOT' "$GRADLE_PROPERTIES")"
echo "IS_SNAPSHOT:$IS_SNAPSHOT"
if [ -z "$IS_SNAPSHOT" ]; then
echo "Error: version is not snapshot."
elif [ -z "$SONATYPE_NEXUS_USERNAME" ]; then
echo "Error: missing env variable SONATYPE_NEXUS_USERNAME."
elif [ -z "$SONATYPE_NEXUS_PASSWORD" ]; then
echo "Error: missing env variable SONATYPE_NEXUS_PASSWORD."
elif [ -z "$ANDROID_SIGN_KEY" ]; then
echo "Error: missing env variable ANDROID_SIGN_KEY."
elif [ -z "$ANDROID_SIGN_PASS" ]; then
echo "Error: missing env variable ANDROID_SIGN_PASS."
else
GRADLE_LOCAL_PROPERTIES=~/workspace/android/local.properties
rm -f $GRADLE_LOCAL_PROPERTIES
echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES
echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES
$GRADLE_PATH -p ~/workspace/android/ uploadArchives
fi | unknown | github | https://github.com/pytorch/pytorch | .circleci/scripts/publish_android_snapshot.sh |
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add field conversion funcs.
err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("CertificateSigningRequest"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return err
}
err = scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("ClusterTrustBundle"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "spec.signerName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return fmt.Errorf("while adding ClusterTrustBundle field label conversion func: %w", err)
}
err = scheme.AddFieldLabelConversionFunc(
SchemeGroupVersion.WithKind("PodCertificateRequest"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "spec.signerName", "spec.podName", "spec.nodeName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
},
)
if err != nil {
return fmt.Errorf("while adding PodCertificateRequest field label conversion func: %w", err)
}
return nil
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/certificates/v1beta1/conversion.go |
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datadir
// datadir contains functions to navigate file-layout of etcd data-directory. | go | github | https://github.com/etcd-io/etcd | server/storage/datadir/doc.go |
"""Utility functions and classes used by nose internally.
"""
import inspect
import itertools
import logging
import stat
import os
import re
import sys
import types
import unittest
from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
log = logging.getLogger('nose')
ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
class_types = (ClassType, TypeType)
skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
try:
set()
set = set # make from nose.util import set happy
except NameError:
try:
from sets import Set as set
except ImportError:
pass
def ls_tree(dir_path="",
skip_pattern=skip_pattern,
indent="|-- ", branch_indent="| ",
last_indent="`-- ", last_branch_indent=" "):
# TODO: empty directories look like non-directory files
return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent))
def _ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent, last_indent, last_branch_indent):
if dir_path == "":
dir_path = os.getcwd()
lines = []
names = os.listdir(dir_path)
names.sort()
dirs, nondirs = [], []
for name in names:
if re.match(skip_pattern, name):
continue
if os.path.isdir(os.path.join(dir_path, name)):
dirs.append(name)
else:
nondirs.append(name)
# list non-directories first
entries = list(itertools.chain([(name, False) for name in nondirs],
[(name, True) for name in dirs]))
def ls_entry(name, is_dir, ind, branch_ind):
if not is_dir:
yield ind + name
else:
path = os.path.join(dir_path, name)
if not os.path.islink(path):
yield ind + name
subtree = _ls_tree_lines(path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent)
for x in subtree:
yield branch_ind + x
for name, is_dir in entries[:-1]:
for line in ls_entry(name, is_dir, indent, branch_indent):
yield line
if entries:
name, is_dir = entries[-1]
for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
yield line
def absdir(path):
"""Return absolute, normalized path to directory, if it exists; None
otherwise.
"""
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
path)))
if path is None or not os.path.isdir(path):
return None
return path
def absfile(path, where=None):
"""Return absolute, normalized path to file (optionally in directory
where), or None if the file can't be found either in where or the current
working directory.
"""
orig = path
if where is None:
where = os.getcwd()
if isinstance(where, list) or isinstance(where, tuple):
for maybe_path in where:
maybe_abs = absfile(path, maybe_path)
if maybe_abs is not None:
return maybe_abs
return None
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
if path is None or not os.path.exists(path):
if where != os.getcwd():
# try the cwd instead
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
orig)))
if path is None or not os.path.exists(path):
return None
if os.path.isdir(path):
# might want an __init__.py from pacakge
init = os.path.join(path,'__init__.py')
if os.path.isfile(init):
return init
elif os.path.isfile(path):
return path
return None
def anyp(predicate, iterable):
for item in iterable:
if predicate(item):
return True
return False
def file_like(name):
"""A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier.
"""
return (os.path.exists(name)
or os.path.dirname(name)
or name.endswith('.py')
or not ident_re.match(os.path.splitext(name)[0]))
def func_lineno(func):
"""Get the line number of a function. First looks for
compat_co_firstlineno, then func_code.co_first_lineno.
"""
try:
return func.compat_co_firstlineno
except AttributeError:
try:
return func.func_code.co_firstlineno
except AttributeError:
return -1
def isclass(obj):
"""Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything.
"""
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type)
# backwards compat (issue #64)
is_generator = isgenerator
def ispackage(path):
"""
Is this path a package directory?
>>> ispackage('nose')
True
>>> ispackage('unit_tests')
False
>>> ispackage('nose/plugins')
True
>>> ispackage('nose/loader.py')
False
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if ident_re.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
def isproperty(obj):
"""
Is this a property?
>>> class Foo:
... def got(self):
... return 2
... def get(self):
... return 1
... get = property(get)
>>> isproperty(Foo.got)
False
>>> isproperty(Foo.get)
True
"""
return type(obj) == property
def getfilename(package, relativeTo=None):
"""Find the python source file for a package, relative to a
particular directory (defaults to current working directory if not
given).
"""
if relativeTo is None:
relativeTo = os.getcwd()
path = os.path.join(relativeTo, os.sep.join(package.split('.')))
if os.path.exists(path + '/__init__.py'):
return path
filename = path + '.py'
if os.path.exists(filename):
return filename
return None
def getpackage(filename):
"""
Find the full dotted package name for a given python source file
name. Returns None if the file is not a python source file.
>>> getpackage('foo.py')
'foo'
>>> getpackage('biff/baf.py')
'baf'
>>> getpackage('nose/util.py')
'nose.util'
Works for directories too.
>>> getpackage('nose')
'nose'
>>> getpackage('nose/plugins')
'nose.plugins'
And __init__ files stuck onto directories
>>> getpackage('nose/plugins/__init__.py')
'nose.plugins'
Absolute paths also work.
>>> path = os.path.abspath(os.path.join('nose', 'plugins'))
>>> getpackage(path)
'nose.plugins'
"""
src_file = src(filename)
if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
return None
base, ext = os.path.splitext(os.path.basename(src_file))
if base == '__init__':
mod_parts = []
else:
mod_parts = [base]
path, part = os.path.split(os.path.split(src_file)[0])
while part:
if ispackage(os.path.join(path, part)):
mod_parts.append(part)
else:
break
path, part = os.path.split(path)
mod_parts.reverse()
return '.'.join(mod_parts)
def ln(label):
"""Draw a 70-char-wide divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
"""
label_len = len(label) + 2
chunk = (70 - label_len) // 2
out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
pad = 70 - len(out)
if pad > 0:
out = out + ('-' * pad)
return out
def resolve_name(name, module=None):
"""Resolve a dotted name to a module and its parts. This is stolen
wholesale from unittest.TestLoader.loadTestByName.
>>> resolve_name('nose.util') #doctest: +ELLIPSIS
<module 'nose.util' from...>
>>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
<function resolve_name at...>
"""
parts = name.split('.')
parts_copy = parts[:]
if module is None:
while parts_copy:
try:
log.debug("__import__ %s", name)
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
for part in parts:
obj = getattr(obj, part)
return obj
def split_test_name(test):
"""Split a test name into a 3-tuple containing file, module, and callable
names, any of which (but not all) may be blank.
Test names are in the form:
file_or_module:callable
Either side of the : may be dotted. To change the splitting behavior, you
can alter nose.util.split_test_re.
"""
norm = os.path.normpath
file_or_mod = test
fn = None
if not ':' in test:
# only a file or mod part
if file_like(test):
return (norm(test), None, None)
else:
return (None, test, None)
# could be path|mod:callable, or a : in the file path someplace
head, tail = os.path.split(test)
if not head:
# this is a case like 'foo:bar' -- generally a module
# name followed by a callable, but also may be a windows
# drive letter followed by a path
try:
file_or_mod, fn = test.split(':')
if file_like(fn):
# must be a funny path
file_or_mod, fn = test, None
except ValueError:
# more than one : in the test
# this is a case like c:\some\path.py:a_test
parts = test.split(':')
if len(parts[0]) == 1:
file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
else:
# nonsense like foo:bar:baz
raise ValueError("Test name '%s' could not be parsed. Please "
"format test names as path:callable or "
"module:callable." % (test,))
elif not tail:
# this is a case like 'foo:bar/'
# : must be part of the file path, so ignore it
file_or_mod = test
else:
if ':' in tail:
file_part, fn = tail.split(':')
else:
file_part = tail
file_or_mod = os.sep.join([head, file_part])
if file_or_mod:
if file_like(file_or_mod):
return (norm(file_or_mod), None, fn)
else:
return (None, file_or_mod, fn)
else:
return (None, None, fn)
split_test_name.__test__ = False # do not collect
def test_address(test):
"""Find the test address for a test, which may be a module, filename,
class, method or function.
"""
if hasattr(test, "address"):
return test.address()
# type-based polymorphism sucks in general, but I believe is
# appropriate here
t = type(test)
file = module = call = None
if t == types.ModuleType:
file = getattr(test, '__file__', None)
module = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
module = getattr(test, '__module__', None)
if module is not None:
m = sys.modules[module]
file = getattr(m, '__file__', None)
if file is not None:
file = os.path.abspath(file)
call = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.MethodType:
cls_adr = test_address(test.im_class)
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], test.__name__))
# handle unittest.TestCase instances
if isinstance(test, unittest.TestCase):
if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
or hasattr(test, '_testFunc')): # 2.7
# unittest FunctionTestCase
try:
return test_address(test._FunctionTestCase__testFunc)
except AttributeError:
return test_address(test._testFunc)
# regular unittest.TestCase
cls_adr = test_address(test.__class__)
# 2.5 compat: __testMethodName changed to _testMethodName
try:
method_name = test._TestCase__testMethodName
except AttributeError:
method_name = test._testMethodName
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], method_name))
if (hasattr(test, '__class__') and
test.__class__.__module__ not in ('__builtin__', 'builtins')):
return test_address(test.__class__)
raise TypeError("I don't know what %s is (%s)" % (test, t))
test_address.__test__ = False # do not collect
def try_run(obj, names):
"""Given a list of possible method names, try to run them with the
provided object. Keep going until something works. Used to run
setup/teardown methods for module, package, and function tests.
"""
for name in names:
func = getattr(obj, name, None)
if func is not None:
if type(obj) == types.ModuleType:
# py.test compatibility
if isinstance(func, types.FunctionType):
args, varargs, varkw, defaults = \
inspect.getargspec(func)
else:
# Not a function. If it's callable, call it anyway
if hasattr(func, '__call__') and not inspect.ismethod(func):
func = func.__call__
try:
args, varargs, varkw, defaults = \
inspect.getargspec(func)
args.pop(0) # pop the self off
except TypeError:
raise TypeError("Attribute %s of %r is not a python "
"function. Only functions or callables"
" may be used as fixtures." %
(name, obj))
if len(args):
log.debug("call fixture %s.%s(%s)", obj, name, obj)
return func(obj)
log.debug("call fixture %s.%s", obj, name)
return func()
def src(filename):
"""Find the python source file for a .pyc, .pyo or $py.class file on
jython. Returns the filename provided if it is not a python source
file.
"""
if filename is None:
return filename
if sys.platform.startswith('java') and filename.endswith('$py.class'):
return '.'.join((filename[:-9], 'py'))
base, ext = os.path.splitext(filename)
if ext in ('.pyc', '.pyo', '.py'):
return '.'.join((base, 'py'))
return filename
def regex_last_key(regex):
"""Sort key function factory that puts items that match a
regular expression last.
>>> from nose.config import Config
>>> from nose.pyversion import sort_list
>>> c = Config()
>>> regex = c.testMatch
>>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
>>> sort_list(entries, regex_last_key(regex))
>>> entries
['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
"""
def k(obj):
if regex.search(obj):
return (1, obj)
return (0, obj)
return k
def tolist(val):
"""Convert a value that may be a list or a (possibly comma-separated)
string into a list. The exception: None is returned as None, not [None].
>>> tolist(["one", "two"])
['one', 'two']
>>> tolist("hello")
['hello']
>>> tolist("separate,values, with, commas, spaces , are ,ok")
['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
"""
if val is None:
return None
try:
# might already be a list
val.extend([])
return val
except AttributeError:
pass
# might be a string
try:
return re.split(r'\s*,\s*', val)
except TypeError:
# who knows...
return list(val)
class odict(dict):
"""Simple ordered dict implementation, based on:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, *arg, **kw):
self._keys = []
super(odict, self).__init__(*arg, **kw)
def __delitem__(self, key):
super(odict, self).__delitem__(key)
self._keys.remove(key)
def __setitem__(self, key, item):
super(odict, self).__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
def __str__(self):
return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
def clear(self):
super(odict, self).clear()
self._keys = []
def copy(self):
d = super(odict, self).copy()
d._keys = self._keys[:]
return d
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def setdefault(self, key, failobj=None):
item = super(odict, self).setdefault(key, failobj)
if key not in self._keys:
self._keys.append(key)
return item
def update(self, dict):
super(odict, self).update(dict)
for key in dict.keys():
if key not in self._keys:
self._keys.append(key)
def values(self):
return map(self.get, self._keys)
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
if isgenerator(func):
def newfunc(*arg, **kw):
for v in func(*arg, **kw):
yield v
else:
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc
def transplant_class(cls, module):
"""
Make a class appear to reside in `module`, rather than the module in which
it is actually defined.
>>> from nose.failure import Failure
>>> Failure.__module__
'nose.failure'
>>> Nf = transplant_class(Failure, __name__)
>>> Nf.__module__
'nose.util'
>>> Nf.__name__
'Failure'
"""
class C(cls):
pass
C.__module__ = module
C.__name__ = cls.__name__
return C
def safe_str(val, encoding='utf-8'):
try:
return str(val)
except UnicodeEncodeError:
if isinstance(val, Exception):
return ' '.join([safe_str(arg, encoding)
for arg in val])
return unicode(val).encode(encoding)
def is_executable(file):
if not os.path.exists(file):
return False
st = os.stat(file)
return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
if __name__ == '__main__':
import doctest
doctest.testmod() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Traversing Python modules and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.util import tf_inspect
__all__ = ['traverse']
def _traverse_internal(root, visit, stack, path):
"""Internal helper for traverse."""
# Only traverse modules and classes
if not tf_inspect.isclass(root) and not tf_inspect.ismodule(root):
return
try:
children = tf_inspect.getmembers(root)
except ImportError:
# On some Python installations, some modules do not support enumerating
# members (six in particular), leading to import errors.
children = []
new_stack = stack + [root]
visit(path, root, children)
for name, child in children:
# Do not descend into built-in modules
if tf_inspect.ismodule(
child) and child.__name__ in sys.builtin_module_names:
continue
# Break cycles
if any(child is item for item in new_stack): # `in`, but using `is`
continue
child_path = path + '.' + name if path else name
_traverse_internal(child, visit, new_stack, child_path)
def traverse(root, visit):
"""Recursively enumerate all members of `root`.
Similar to the Python library function `os.path.walk`.
Traverses the tree of Python objects starting with `root`, depth first.
Parent-child relationships in the tree are defined by membership in modules or
classes. The function `visit` is called with arguments
`(path, parent, children)` for each module or class `parent` found in the tree
of python objects starting with `root`. `path` is a string containing the name
with which `parent` is reachable from the current context. For example, if
`root` is a local class called `X` which contains a class `Y`, `visit` will be
called with `('Y', X.Y, children)`).
If `root` is not a module or class, `visit` is never called. `traverse`
never descends into built-in modules.
`children`, a list of `(name, object)` pairs are determined by
`tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can
be modified in place, using `del` or slice assignment.
Cycles (determined by reference equality, `is`) stop the traversal. A stack of
objects is kept to find cycles. Objects forming cycles may appear in
`children`, but `visit` will not be called with any object as `parent` which
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which blacklists such modules.
Args:
root: A python object with which to start the traversal.
visit: A function taking arguments `(path, parent, children)`. Will be
called for each object found in the traversal.
"""
_traverse_internal(root, visit, [], '') | unknown | codeparrot/codeparrot-clean | ||
# Tweepy
# Copyright 2010 Joshua Roesslein
# See LICENSE for details.
from datetime import datetime
import time
import htmlentitydefs
import re
import locale
from urllib import quote
from email.utils import parsedate
def parse_datetime(string):
return datetime(*(parsedate(string)[:6]))
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, unicode):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg)
return arg
def import_simplejson():
try:
import simplejson as json
except ImportError:
try:
import json # Python 2.6+
except ImportError:
try:
from django.utils import simplejson as json # Google App Engine
except ImportError:
raise ImportError, "Can't load a json library"
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
def urlencode_noplus(query):
return '&'.join(['%s=%s' % (quote(str(k), ''), quote(str(v), '')) \
for k, v in query.iteritems()]) | unknown | codeparrot/codeparrot-clean | ||
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /usr/include/GL/glu.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLU as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /usr/include/GL/glu.h
GLU_EXT_object_space_tess = 1 # /usr/include/GL/glu.h:71
GLU_EXT_nurbs_tessellator = 1 # /usr/include/GL/glu.h:72
GLU_FALSE = 0 # /usr/include/GL/glu.h:75
GLU_TRUE = 1 # /usr/include/GL/glu.h:76
GLU_VERSION_1_1 = 1 # /usr/include/GL/glu.h:79
GLU_VERSION_1_2 = 1 # /usr/include/GL/glu.h:80
GLU_VERSION_1_3 = 1 # /usr/include/GL/glu.h:81
GLU_VERSION = 100800 # /usr/include/GL/glu.h:84
GLU_EXTENSIONS = 100801 # /usr/include/GL/glu.h:85
GLU_INVALID_ENUM = 100900 # /usr/include/GL/glu.h:88
GLU_INVALID_VALUE = 100901 # /usr/include/GL/glu.h:89
GLU_OUT_OF_MEMORY = 100902 # /usr/include/GL/glu.h:90
GLU_INCOMPATIBLE_GL_VERSION = 100903 # /usr/include/GL/glu.h:91
GLU_INVALID_OPERATION = 100904 # /usr/include/GL/glu.h:92
GLU_OUTLINE_POLYGON = 100240 # /usr/include/GL/glu.h:96
GLU_OUTLINE_PATCH = 100241 # /usr/include/GL/glu.h:97
GLU_NURBS_ERROR = 100103 # /usr/include/GL/glu.h:100
GLU_ERROR = 100103 # /usr/include/GL/glu.h:101
GLU_NURBS_BEGIN = 100164 # /usr/include/GL/glu.h:102
GLU_NURBS_BEGIN_EXT = 100164 # /usr/include/GL/glu.h:103
GLU_NURBS_VERTEX = 100165 # /usr/include/GL/glu.h:104
GLU_NURBS_VERTEX_EXT = 100165 # /usr/include/GL/glu.h:105
GLU_NURBS_NORMAL = 100166 # /usr/include/GL/glu.h:106
GLU_NURBS_NORMAL_EXT = 100166 # /usr/include/GL/glu.h:107
GLU_NURBS_COLOR = 100167 # /usr/include/GL/glu.h:108
GLU_NURBS_COLOR_EXT = 100167 # /usr/include/GL/glu.h:109
GLU_NURBS_TEXTURE_COORD = 100168 # /usr/include/GL/glu.h:110
GLU_NURBS_TEX_COORD_EXT = 100168 # /usr/include/GL/glu.h:111
GLU_NURBS_END = 100169 # /usr/include/GL/glu.h:112
GLU_NURBS_END_EXT = 100169 # /usr/include/GL/glu.h:113
GLU_NURBS_BEGIN_DATA = 100170 # /usr/include/GL/glu.h:114
GLU_NURBS_BEGIN_DATA_EXT = 100170 # /usr/include/GL/glu.h:115
GLU_NURBS_VERTEX_DATA = 100171 # /usr/include/GL/glu.h:116
GLU_NURBS_VERTEX_DATA_EXT = 100171 # /usr/include/GL/glu.h:117
GLU_NURBS_NORMAL_DATA = 100172 # /usr/include/GL/glu.h:118
GLU_NURBS_NORMAL_DATA_EXT = 100172 # /usr/include/GL/glu.h:119
GLU_NURBS_COLOR_DATA = 100173 # /usr/include/GL/glu.h:120
GLU_NURBS_COLOR_DATA_EXT = 100173 # /usr/include/GL/glu.h:121
GLU_NURBS_TEXTURE_COORD_DATA = 100174 # /usr/include/GL/glu.h:122
GLU_NURBS_TEX_COORD_DATA_EXT = 100174 # /usr/include/GL/glu.h:123
GLU_NURBS_END_DATA = 100175 # /usr/include/GL/glu.h:124
GLU_NURBS_END_DATA_EXT = 100175 # /usr/include/GL/glu.h:125
GLU_NURBS_ERROR1 = 100251 # /usr/include/GL/glu.h:128
GLU_NURBS_ERROR2 = 100252 # /usr/include/GL/glu.h:129
GLU_NURBS_ERROR3 = 100253 # /usr/include/GL/glu.h:130
GLU_NURBS_ERROR4 = 100254 # /usr/include/GL/glu.h:131
GLU_NURBS_ERROR5 = 100255 # /usr/include/GL/glu.h:132
GLU_NURBS_ERROR6 = 100256 # /usr/include/GL/glu.h:133
GLU_NURBS_ERROR7 = 100257 # /usr/include/GL/glu.h:134
GLU_NURBS_ERROR8 = 100258 # /usr/include/GL/glu.h:135
GLU_NURBS_ERROR9 = 100259 # /usr/include/GL/glu.h:136
GLU_NURBS_ERROR10 = 100260 # /usr/include/GL/glu.h:137
GLU_NURBS_ERROR11 = 100261 # /usr/include/GL/glu.h:138
GLU_NURBS_ERROR12 = 100262 # /usr/include/GL/glu.h:139
GLU_NURBS_ERROR13 = 100263 # /usr/include/GL/glu.h:140
GLU_NURBS_ERROR14 = 100264 # /usr/include/GL/glu.h:141
GLU_NURBS_ERROR15 = 100265 # /usr/include/GL/glu.h:142
GLU_NURBS_ERROR16 = 100266 # /usr/include/GL/glu.h:143
GLU_NURBS_ERROR17 = 100267 # /usr/include/GL/glu.h:144
GLU_NURBS_ERROR18 = 100268 # /usr/include/GL/glu.h:145
GLU_NURBS_ERROR19 = 100269 # /usr/include/GL/glu.h:146
GLU_NURBS_ERROR20 = 100270 # /usr/include/GL/glu.h:147
GLU_NURBS_ERROR21 = 100271 # /usr/include/GL/glu.h:148
GLU_NURBS_ERROR22 = 100272 # /usr/include/GL/glu.h:149
GLU_NURBS_ERROR23 = 100273 # /usr/include/GL/glu.h:150
GLU_NURBS_ERROR24 = 100274 # /usr/include/GL/glu.h:151
GLU_NURBS_ERROR25 = 100275 # /usr/include/GL/glu.h:152
GLU_NURBS_ERROR26 = 100276 # /usr/include/GL/glu.h:153
GLU_NURBS_ERROR27 = 100277 # /usr/include/GL/glu.h:154
GLU_NURBS_ERROR28 = 100278 # /usr/include/GL/glu.h:155
GLU_NURBS_ERROR29 = 100279 # /usr/include/GL/glu.h:156
GLU_NURBS_ERROR30 = 100280 # /usr/include/GL/glu.h:157
GLU_NURBS_ERROR31 = 100281 # /usr/include/GL/glu.h:158
GLU_NURBS_ERROR32 = 100282 # /usr/include/GL/glu.h:159
GLU_NURBS_ERROR33 = 100283 # /usr/include/GL/glu.h:160
GLU_NURBS_ERROR34 = 100284 # /usr/include/GL/glu.h:161
GLU_NURBS_ERROR35 = 100285 # /usr/include/GL/glu.h:162
GLU_NURBS_ERROR36 = 100286 # /usr/include/GL/glu.h:163
GLU_NURBS_ERROR37 = 100287 # /usr/include/GL/glu.h:164
GLU_AUTO_LOAD_MATRIX = 100200 # /usr/include/GL/glu.h:167
GLU_CULLING = 100201 # /usr/include/GL/glu.h:168
GLU_SAMPLING_TOLERANCE = 100203 # /usr/include/GL/glu.h:169
GLU_DISPLAY_MODE = 100204 # /usr/include/GL/glu.h:170
GLU_PARAMETRIC_TOLERANCE = 100202 # /usr/include/GL/glu.h:171
GLU_SAMPLING_METHOD = 100205 # /usr/include/GL/glu.h:172
GLU_U_STEP = 100206 # /usr/include/GL/glu.h:173
GLU_V_STEP = 100207 # /usr/include/GL/glu.h:174
GLU_NURBS_MODE = 100160 # /usr/include/GL/glu.h:175
GLU_NURBS_MODE_EXT = 100160 # /usr/include/GL/glu.h:176
GLU_NURBS_TESSELLATOR = 100161 # /usr/include/GL/glu.h:177
GLU_NURBS_TESSELLATOR_EXT = 100161 # /usr/include/GL/glu.h:178
GLU_NURBS_RENDERER = 100162 # /usr/include/GL/glu.h:179
GLU_NURBS_RENDERER_EXT = 100162 # /usr/include/GL/glu.h:180
GLU_OBJECT_PARAMETRIC_ERROR = 100208 # /usr/include/GL/glu.h:183
GLU_OBJECT_PARAMETRIC_ERROR_EXT = 100208 # /usr/include/GL/glu.h:184
GLU_OBJECT_PATH_LENGTH = 100209 # /usr/include/GL/glu.h:185
GLU_OBJECT_PATH_LENGTH_EXT = 100209 # /usr/include/GL/glu.h:186
GLU_PATH_LENGTH = 100215 # /usr/include/GL/glu.h:187
GLU_PARAMETRIC_ERROR = 100216 # /usr/include/GL/glu.h:188
GLU_DOMAIN_DISTANCE = 100217 # /usr/include/GL/glu.h:189
GLU_MAP1_TRIM_2 = 100210 # /usr/include/GL/glu.h:192
GLU_MAP1_TRIM_3 = 100211 # /usr/include/GL/glu.h:193
GLU_POINT = 100010 # /usr/include/GL/glu.h:196
GLU_LINE = 100011 # /usr/include/GL/glu.h:197
GLU_FILL = 100012 # /usr/include/GL/glu.h:198
GLU_SILHOUETTE = 100013 # /usr/include/GL/glu.h:199
GLU_SMOOTH = 100000 # /usr/include/GL/glu.h:205
GLU_FLAT = 100001 # /usr/include/GL/glu.h:206
GLU_NONE = 100002 # /usr/include/GL/glu.h:207
GLU_OUTSIDE = 100020 # /usr/include/GL/glu.h:210
GLU_INSIDE = 100021 # /usr/include/GL/glu.h:211
GLU_TESS_BEGIN = 100100 # /usr/include/GL/glu.h:214
GLU_BEGIN = 100100 # /usr/include/GL/glu.h:215
GLU_TESS_VERTEX = 100101 # /usr/include/GL/glu.h:216
GLU_VERTEX = 100101 # /usr/include/GL/glu.h:217
GLU_TESS_END = 100102 # /usr/include/GL/glu.h:218
GLU_END = 100102 # /usr/include/GL/glu.h:219
GLU_TESS_ERROR = 100103 # /usr/include/GL/glu.h:220
GLU_TESS_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:221
GLU_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:222
GLU_TESS_COMBINE = 100105 # /usr/include/GL/glu.h:223
GLU_TESS_BEGIN_DATA = 100106 # /usr/include/GL/glu.h:224
GLU_TESS_VERTEX_DATA = 100107 # /usr/include/GL/glu.h:225
GLU_TESS_END_DATA = 100108 # /usr/include/GL/glu.h:226
GLU_TESS_ERROR_DATA = 100109 # /usr/include/GL/glu.h:227
GLU_TESS_EDGE_FLAG_DATA = 100110 # /usr/include/GL/glu.h:228
GLU_TESS_COMBINE_DATA = 100111 # /usr/include/GL/glu.h:229
GLU_CW = 100120 # /usr/include/GL/glu.h:232
GLU_CCW = 100121 # /usr/include/GL/glu.h:233
GLU_INTERIOR = 100122 # /usr/include/GL/glu.h:234
GLU_EXTERIOR = 100123 # /usr/include/GL/glu.h:235
GLU_UNKNOWN = 100124 # /usr/include/GL/glu.h:236
GLU_TESS_WINDING_RULE = 100140 # /usr/include/GL/glu.h:239
GLU_TESS_BOUNDARY_ONLY = 100141 # /usr/include/GL/glu.h:240
GLU_TESS_TOLERANCE = 100142 # /usr/include/GL/glu.h:241
GLU_TESS_ERROR1 = 100151 # /usr/include/GL/glu.h:244
GLU_TESS_ERROR2 = 100152 # /usr/include/GL/glu.h:245
GLU_TESS_ERROR3 = 100153 # /usr/include/GL/glu.h:246
GLU_TESS_ERROR4 = 100154 # /usr/include/GL/glu.h:247
GLU_TESS_ERROR5 = 100155 # /usr/include/GL/glu.h:248
GLU_TESS_ERROR6 = 100156 # /usr/include/GL/glu.h:249
GLU_TESS_ERROR7 = 100157 # /usr/include/GL/glu.h:250
GLU_TESS_ERROR8 = 100158 # /usr/include/GL/glu.h:251
GLU_TESS_MISSING_BEGIN_POLYGON = 100151 # /usr/include/GL/glu.h:252
GLU_TESS_MISSING_BEGIN_CONTOUR = 100152 # /usr/include/GL/glu.h:253
GLU_TESS_MISSING_END_POLYGON = 100153 # /usr/include/GL/glu.h:254
GLU_TESS_MISSING_END_CONTOUR = 100154 # /usr/include/GL/glu.h:255
GLU_TESS_COORD_TOO_LARGE = 100155 # /usr/include/GL/glu.h:256
GLU_TESS_NEED_COMBINE_CALLBACK = 100156 # /usr/include/GL/glu.h:257
GLU_TESS_WINDING_ODD = 100130 # /usr/include/GL/glu.h:260
GLU_TESS_WINDING_NONZERO = 100131 # /usr/include/GL/glu.h:261
GLU_TESS_WINDING_POSITIVE = 100132 # /usr/include/GL/glu.h:262
GLU_TESS_WINDING_NEGATIVE = 100133 # /usr/include/GL/glu.h:263
GLU_TESS_WINDING_ABS_GEQ_TWO = 100134 # /usr/include/GL/glu.h:264
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
GLUnurbs = struct_GLUnurbs # /usr/include/GL/glu.h:274
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
GLUquadric = struct_GLUquadric # /usr/include/GL/glu.h:275
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
GLUtesselator = struct_GLUtesselator # /usr/include/GL/glu.h:276
GLUnurbsObj = GLUnurbs # /usr/include/GL/glu.h:279
GLUquadricObj = GLUquadric # /usr/include/GL/glu.h:280
GLUtesselatorObj = GLUtesselator # /usr/include/GL/glu.h:281
GLUtriangulatorObj = GLUtesselator # /usr/include/GL/glu.h:282
GLU_TESS_MAX_COORD = 9.9999999999999998e+149 # /usr/include/GL/glu.h:284
_GLUfuncptr = CFUNCTYPE(None) # /usr/include/GL/glu.h:287
# /usr/include/GL/glu.h:289
gluBeginCurve = _link_function('gluBeginCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:290
gluBeginPolygon = _link_function('gluBeginPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:291
gluBeginSurface = _link_function('gluBeginSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:292
gluBeginTrim = _link_function('gluBeginTrim', None, [POINTER(GLUnurbs)], None)
GLint = c_int # /usr/include/GL/gl.h:159
GLenum = c_uint # /usr/include/GL/gl.h:153
GLsizei = c_int # /usr/include/GL/gl.h:163
# /usr/include/GL/glu.h:293
gluBuild1DMipmapLevels = _link_function('gluBuild1DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:294
gluBuild1DMipmaps = _link_function('gluBuild1DMipmaps', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:295
gluBuild2DMipmapLevels = _link_function('gluBuild2DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:296
gluBuild2DMipmaps = _link_function('gluBuild2DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:297
gluBuild3DMipmapLevels = _link_function('gluBuild3DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:298
gluBuild3DMipmaps = _link_function('gluBuild3DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
GLubyte = c_ubyte # /usr/include/GL/gl.h:160
# /usr/include/GL/glu.h:299
gluCheckExtension = _link_function('gluCheckExtension', GLboolean, [POINTER(GLubyte), POINTER(GLubyte)], None)
GLdouble = c_double # /usr/include/GL/gl.h:166
# /usr/include/GL/glu.h:300
gluCylinder = _link_function('gluCylinder', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:301
gluDeleteNurbsRenderer = _link_function('gluDeleteNurbsRenderer', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:302
gluDeleteQuadric = _link_function('gluDeleteQuadric', None, [POINTER(GLUquadric)], None)
# /usr/include/GL/glu.h:303
gluDeleteTess = _link_function('gluDeleteTess', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:304
gluDisk = _link_function('gluDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:305
gluEndCurve = _link_function('gluEndCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:306
gluEndPolygon = _link_function('gluEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:307
gluEndSurface = _link_function('gluEndSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:308
gluEndTrim = _link_function('gluEndTrim', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:309
gluErrorString = _link_function('gluErrorString', POINTER(GLubyte), [GLenum], None)
GLfloat = c_float # /usr/include/GL/gl.h:164
# /usr/include/GL/glu.h:310
gluGetNurbsProperty = _link_function('gluGetNurbsProperty', None, [POINTER(GLUnurbs), GLenum, POINTER(GLfloat)], None)
# /usr/include/GL/glu.h:311
gluGetString = _link_function('gluGetString', POINTER(GLubyte), [GLenum], None)
# /usr/include/GL/glu.h:312
gluGetTessProperty = _link_function('gluGetTessProperty', None, [POINTER(GLUtesselator), GLenum, POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:313
gluLoadSamplingMatrices = _link_function('gluLoadSamplingMatrices', None, [POINTER(GLUnurbs), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLint)], None)
# /usr/include/GL/glu.h:314
gluLookAt = _link_function('gluLookAt', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:315
gluNewNurbsRenderer = _link_function('gluNewNurbsRenderer', POINTER(GLUnurbs), [], None)
# /usr/include/GL/glu.h:316
gluNewQuadric = _link_function('gluNewQuadric', POINTER(GLUquadric), [], None)
# /usr/include/GL/glu.h:317
gluNewTess = _link_function('gluNewTess', POINTER(GLUtesselator), [], None)
# /usr/include/GL/glu.h:318
gluNextContour = _link_function('gluNextContour', None, [POINTER(GLUtesselator), GLenum], None)
# /usr/include/GL/glu.h:319
gluNurbsCallback = _link_function('gluNurbsCallback', None, [POINTER(GLUnurbs), GLenum, _GLUfuncptr], None)
GLvoid = None # /usr/include/GL/gl.h:156
# /usr/include/GL/glu.h:320
gluNurbsCallbackData = _link_function('gluNurbsCallbackData', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:321
gluNurbsCallbackDataEXT = _link_function('gluNurbsCallbackDataEXT', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:322
gluNurbsCurve = _link_function('gluNurbsCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:323
gluNurbsProperty = _link_function('gluNurbsProperty', None, [POINTER(GLUnurbs), GLenum, GLfloat], None)
# /usr/include/GL/glu.h:324
gluNurbsSurface = _link_function('gluNurbsSurface', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLint, POINTER(GLfloat), GLint, GLint, GLenum], None)
# /usr/include/GL/glu.h:325
gluOrtho2D = _link_function('gluOrtho2D', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:326
gluPartialDisk = _link_function('gluPartialDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:327
gluPerspective = _link_function('gluPerspective', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:328
gluPickMatrix = _link_function('gluPickMatrix', None, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLint)], None)
# /usr/include/GL/glu.h:329
gluProject = _link_function('gluProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:330
gluPwlCurve = _link_function('gluPwlCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:331
gluQuadricCallback = _link_function('gluQuadricCallback', None, [POINTER(GLUquadric), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:332
gluQuadricDrawStyle = _link_function('gluQuadricDrawStyle', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:333
gluQuadricNormals = _link_function('gluQuadricNormals', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:334
gluQuadricOrientation = _link_function('gluQuadricOrientation', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:335
gluQuadricTexture = _link_function('gluQuadricTexture', None, [POINTER(GLUquadric), GLboolean], None)
# /usr/include/GL/glu.h:336
gluScaleImage = _link_function('gluScaleImage', GLint, [GLenum, GLsizei, GLsizei, GLenum, POINTER(None), GLsizei, GLsizei, GLenum, POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:337
gluSphere = _link_function('gluSphere', None, [POINTER(GLUquadric), GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:338
gluTessBeginContour = _link_function('gluTessBeginContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:339
gluTessBeginPolygon = _link_function('gluTessBeginPolygon', None, [POINTER(GLUtesselator), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:340
gluTessCallback = _link_function('gluTessCallback', None, [POINTER(GLUtesselator), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:341
gluTessEndContour = _link_function('gluTessEndContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:342
gluTessEndPolygon = _link_function('gluTessEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:343
gluTessNormal = _link_function('gluTessNormal', None, [POINTER(GLUtesselator), GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:344
gluTessProperty = _link_function('gluTessProperty', None, [POINTER(GLUtesselator), GLenum, GLdouble], None)
# /usr/include/GL/glu.h:345
gluTessVertex = _link_function('gluTessVertex', None, [POINTER(GLUtesselator), POINTER(GLdouble), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:346
gluUnProject = _link_function('gluUnProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:347
gluUnProject4 = _link_function('gluUnProject4', GLint, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
__all__ = ['GLU_EXT_object_space_tess', 'GLU_EXT_nurbs_tessellator',
'GLU_FALSE', 'GLU_TRUE', 'GLU_VERSION_1_1', 'GLU_VERSION_1_2',
'GLU_VERSION_1_3', 'GLU_VERSION', 'GLU_EXTENSIONS', 'GLU_INVALID_ENUM',
'GLU_INVALID_VALUE', 'GLU_OUT_OF_MEMORY', 'GLU_INCOMPATIBLE_GL_VERSION',
'GLU_INVALID_OPERATION', 'GLU_OUTLINE_POLYGON', 'GLU_OUTLINE_PATCH',
'GLU_NURBS_ERROR', 'GLU_ERROR', 'GLU_NURBS_BEGIN', 'GLU_NURBS_BEGIN_EXT',
'GLU_NURBS_VERTEX', 'GLU_NURBS_VERTEX_EXT', 'GLU_NURBS_NORMAL',
'GLU_NURBS_NORMAL_EXT', 'GLU_NURBS_COLOR', 'GLU_NURBS_COLOR_EXT',
'GLU_NURBS_TEXTURE_COORD', 'GLU_NURBS_TEX_COORD_EXT', 'GLU_NURBS_END',
'GLU_NURBS_END_EXT', 'GLU_NURBS_BEGIN_DATA', 'GLU_NURBS_BEGIN_DATA_EXT',
'GLU_NURBS_VERTEX_DATA', 'GLU_NURBS_VERTEX_DATA_EXT', 'GLU_NURBS_NORMAL_DATA',
'GLU_NURBS_NORMAL_DATA_EXT', 'GLU_NURBS_COLOR_DATA',
'GLU_NURBS_COLOR_DATA_EXT', 'GLU_NURBS_TEXTURE_COORD_DATA',
'GLU_NURBS_TEX_COORD_DATA_EXT', 'GLU_NURBS_END_DATA',
'GLU_NURBS_END_DATA_EXT', 'GLU_NURBS_ERROR1', 'GLU_NURBS_ERROR2',
'GLU_NURBS_ERROR3', 'GLU_NURBS_ERROR4', 'GLU_NURBS_ERROR5',
'GLU_NURBS_ERROR6', 'GLU_NURBS_ERROR7', 'GLU_NURBS_ERROR8',
'GLU_NURBS_ERROR9', 'GLU_NURBS_ERROR10', 'GLU_NURBS_ERROR11',
'GLU_NURBS_ERROR12', 'GLU_NURBS_ERROR13', 'GLU_NURBS_ERROR14',
'GLU_NURBS_ERROR15', 'GLU_NURBS_ERROR16', 'GLU_NURBS_ERROR17',
'GLU_NURBS_ERROR18', 'GLU_NURBS_ERROR19', 'GLU_NURBS_ERROR20',
'GLU_NURBS_ERROR21', 'GLU_NURBS_ERROR22', 'GLU_NURBS_ERROR23',
'GLU_NURBS_ERROR24', 'GLU_NURBS_ERROR25', 'GLU_NURBS_ERROR26',
'GLU_NURBS_ERROR27', 'GLU_NURBS_ERROR28', 'GLU_NURBS_ERROR29',
'GLU_NURBS_ERROR30', 'GLU_NURBS_ERROR31', 'GLU_NURBS_ERROR32',
'GLU_NURBS_ERROR33', 'GLU_NURBS_ERROR34', 'GLU_NURBS_ERROR35',
'GLU_NURBS_ERROR36', 'GLU_NURBS_ERROR37', 'GLU_AUTO_LOAD_MATRIX',
'GLU_CULLING', 'GLU_SAMPLING_TOLERANCE', 'GLU_DISPLAY_MODE',
'GLU_PARAMETRIC_TOLERANCE', 'GLU_SAMPLING_METHOD', 'GLU_U_STEP', 'GLU_V_STEP',
'GLU_NURBS_MODE', 'GLU_NURBS_MODE_EXT', 'GLU_NURBS_TESSELLATOR',
'GLU_NURBS_TESSELLATOR_EXT', 'GLU_NURBS_RENDERER', 'GLU_NURBS_RENDERER_EXT',
'GLU_OBJECT_PARAMETRIC_ERROR', 'GLU_OBJECT_PARAMETRIC_ERROR_EXT',
'GLU_OBJECT_PATH_LENGTH', 'GLU_OBJECT_PATH_LENGTH_EXT', 'GLU_PATH_LENGTH',
'GLU_PARAMETRIC_ERROR', 'GLU_DOMAIN_DISTANCE', 'GLU_MAP1_TRIM_2',
'GLU_MAP1_TRIM_3', 'GLU_POINT', 'GLU_LINE', 'GLU_FILL', 'GLU_SILHOUETTE',
'GLU_SMOOTH', 'GLU_FLAT', 'GLU_NONE', 'GLU_OUTSIDE', 'GLU_INSIDE',
'GLU_TESS_BEGIN', 'GLU_BEGIN', 'GLU_TESS_VERTEX', 'GLU_VERTEX',
'GLU_TESS_END', 'GLU_END', 'GLU_TESS_ERROR', 'GLU_TESS_EDGE_FLAG',
'GLU_EDGE_FLAG', 'GLU_TESS_COMBINE', 'GLU_TESS_BEGIN_DATA',
'GLU_TESS_VERTEX_DATA', 'GLU_TESS_END_DATA', 'GLU_TESS_ERROR_DATA',
'GLU_TESS_EDGE_FLAG_DATA', 'GLU_TESS_COMBINE_DATA', 'GLU_CW', 'GLU_CCW',
'GLU_INTERIOR', 'GLU_EXTERIOR', 'GLU_UNKNOWN', 'GLU_TESS_WINDING_RULE',
'GLU_TESS_BOUNDARY_ONLY', 'GLU_TESS_TOLERANCE', 'GLU_TESS_ERROR1',
'GLU_TESS_ERROR2', 'GLU_TESS_ERROR3', 'GLU_TESS_ERROR4', 'GLU_TESS_ERROR5',
'GLU_TESS_ERROR6', 'GLU_TESS_ERROR7', 'GLU_TESS_ERROR8',
'GLU_TESS_MISSING_BEGIN_POLYGON', 'GLU_TESS_MISSING_BEGIN_CONTOUR',
'GLU_TESS_MISSING_END_POLYGON', 'GLU_TESS_MISSING_END_CONTOUR',
'GLU_TESS_COORD_TOO_LARGE', 'GLU_TESS_NEED_COMBINE_CALLBACK',
'GLU_TESS_WINDING_ODD', 'GLU_TESS_WINDING_NONZERO',
'GLU_TESS_WINDING_POSITIVE', 'GLU_TESS_WINDING_NEGATIVE',
'GLU_TESS_WINDING_ABS_GEQ_TWO', 'GLUnurbs', 'GLUquadric', 'GLUtesselator',
'GLUnurbsObj', 'GLUquadricObj', 'GLUtesselatorObj', 'GLUtriangulatorObj',
'GLU_TESS_MAX_COORD', '_GLUfuncptr', 'gluBeginCurve', 'gluBeginPolygon',
'gluBeginSurface', 'gluBeginTrim', 'gluBuild1DMipmapLevels',
'gluBuild1DMipmaps', 'gluBuild2DMipmapLevels', 'gluBuild2DMipmaps',
'gluBuild3DMipmapLevels', 'gluBuild3DMipmaps', 'gluCheckExtension',
'gluCylinder', 'gluDeleteNurbsRenderer', 'gluDeleteQuadric', 'gluDeleteTess',
'gluDisk', 'gluEndCurve', 'gluEndPolygon', 'gluEndSurface', 'gluEndTrim',
'gluErrorString', 'gluGetNurbsProperty', 'gluGetString', 'gluGetTessProperty',
'gluLoadSamplingMatrices', 'gluLookAt', 'gluNewNurbsRenderer',
'gluNewQuadric', 'gluNewTess', 'gluNextContour', 'gluNurbsCallback',
'gluNurbsCallbackData', 'gluNurbsCallbackDataEXT', 'gluNurbsCurve',
'gluNurbsProperty', 'gluNurbsSurface', 'gluOrtho2D', 'gluPartialDisk',
'gluPerspective', 'gluPickMatrix', 'gluProject', 'gluPwlCurve',
'gluQuadricCallback', 'gluQuadricDrawStyle', 'gluQuadricNormals',
'gluQuadricOrientation', 'gluQuadricTexture', 'gluScaleImage', 'gluSphere',
'gluTessBeginContour', 'gluTessBeginPolygon', 'gluTessCallback',
'gluTessEndContour', 'gluTessEndPolygon', 'gluTessNormal', 'gluTessProperty',
'gluTessVertex', 'gluUnProject', 'gluUnProject4']
# END GENERATED CONTENT (do not edit above this line) | unknown | codeparrot/codeparrot-clean | ||
import logging
import sys
import time
import unittest
from doekbase.data_api.util import MonitorMemory, get_logger, basic_config
_log = get_logger(__name__)
alerted = {}
MB = 1024 * 1024
def icanhaz_alert(mm, avail, thresh, name):
assert avail < thresh
alerted[name] = avail
_log.debug('\nalerted: {:d} MB'.format(thresh / MB))
def memory_hog(name):
_log.debug('run memory hog until alert {}'.format(name))
a, i = [], 0
while 1:
a.append(['x'] * 1000000)
if _log.isEnabledFor(logging.DEBUG):
sys.stdout.write('{:12d} MB\r'.format(len(a)))
sys.stdout.flush()
if alerted.get(name, False):
break
# allow other threads in, once in a while
i += 1
if i == 100:
i = 0
time.sleep(0.1)
class MyTestCase(unittest.TestCase):
def test_anyalert(self):
"""test that alerting for memory works at all"""
num_mb = 1000000
mm = MonitorMemory()
# call 'alert' when memory goes below a really high amount
mm.add_alert(num_mb, icanhaz_alert, 'key')
# run in a thread
mm.start()
while 1:
time.sleep(0.5)
if alerted.get('key', False):
break
# print('stopping..')
mm.stop()
mm.join()
# check that memory is at the right level
self.assertLess(alerted['key'], num_mb * MB)
# print('stopped. avail: {} MB'.format(alerted['key']/1024/1024))
@unittest.skipIf(True, "skip memory test")
def test_basic(self):
"""one memory alert"""
num_mb = 10000
mm = MonitorMemory()
# call 'alert' when memory goes below 10GB
mm.add_alert(num_mb, icanhaz_alert, 'key')
# run in a thread
mm.start()
memory_hog('key')
# print('stopping..')
mm.stop()
mm.join()
# check that memory is at the right level
self.assertLess(alerted['key'], num_mb * MB)
# print('stopped. avail: {} MB'.format(alerted['key']/1024/1024))
@unittest.skipIf(True, "skip memory test")
def test_multi(self):
"""multiple memory alerts"""
num_mb = [10000, 9000, 8000]
mm = MonitorMemory()
# add multiple alerts
for mb in num_mb:
_log.debug('add alert for {} MB'.format(mb))
mm.add_alert(mb, icanhaz_alert, 'a{}'.format(mb))
# run in a thread
mm.start()
min_key = 'a{}'.format(min(num_mb))
memory_hog(min_key)
_log.debug('stopping..')
mm.stop()
mm.join()
# check that all alerts fired
for mb in num_mb:
self.assertIn('a{}'.format(mb), alerted, 'missing key')
# check that memory is at the right level
self.assertLess(alerted[min_key], min(num_mb) * MB)
_log.debug('stopped. avail: {} MB'.format(alerted[min_key] / MB))
if __name__ == '__main__':
# set DEBUG if number of '-v' options is 2 or more
num_vb = sum([sum([1 if c == 'v' else 0 for c in list(opt)])
for opt in sys.argv if opt.startswith('-v')])
if num_vb > 1:
basic_config(logging.DEBUG)
_log.setLevel(logging.DEBUG)
# run
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# napiprojekt.pl API is used with napiproject administration consent
from __future__ import print_function
import os
import re
import sys
import time
from hashlib import md5
import struct
from six.moves import range
from six.moves import urllib
class GetFPS(object):
def __init__(self, filename):
self.filename = filename
def __enter__(self):
return self.fps
def fps(self):
self.file = open(self.filename, "r+b")
s = self.file.read(4)
if s == "\x1a\x45\xdf\xa3":
return self.get_mkv_fps()
elif s == "RIFF":
self.file.seek(32)
return 1000000.0 / float(struct.unpack('<I', self.file.read(4))[0])
else:
raise Exception('Error: Unknown file format not AVI/MKV')
def __exit__(self, type, value, traceback):
try:
self.file.close()
except:
pass
def eblm(self, bits=0xf0):
suma = 0x00
mask = 0x01
while not (suma & mask):
suma = (suma << 8) + ord(self.file.read(1))
if (mask == 0x01) and not (suma & bits):
raise Exception('Error: MKV stream is broken')
mask <<= 7
if bits == 0xf0:
return (suma, self.eblm(bits=0xff))
else:
return suma ^ mask
def get_mkv_fps(self):
track = 0
self.file.seek(0)
while True:
class_id, length = self.eblm()
# print "class_id: %X length %i position:%i" % (class_id, length, self.file.tell())
if (class_id == 0x83):
track = ord(self.file.read(1))
elif (class_id == 0x23E383 and track == 1):
break
elif (class_id not in [0x18538067, 0x1654AE6B, 0xAE, 0x83]): # Segment,Tracks,TrackEntry,TrackType
self.file.seek(length, 1)
return (1000000000 / float(struct.unpack('>I', self.file.read(4))[0]))
def convert_to_unicode(sub):
if sub.startswith('\xef\xbb\xbf'):
return sub.decode("utf-8-sig", 'ignore'), "utf-8-sig"
iso = 0
for i in (161, 166, 172, 177, 182, 188):
iso += sub.count(chr(i))
win = 0
for i in (140, 143, 156, 159, 165, 185):
win += sub.count(chr(i))
utf = 0
for i in (195, 196, 197):
utf += sub.count(chr(i))
if win > utf and win > iso:
return sub.decode("CP1250", 'ignore'), "CP1250"
if utf > iso and utf > win:
return sub.decode("utf-8", 'ignore'), "utf-8"
# if iso > utf and iso > win:
return sub.decode("iso-8859-2", 'ignore'), "iso-8859-2"
def f(z):
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(z[i], 16)
v = int(z[t:t + 2], 16)
b.append(("%x" % (v * m))[-1])
return ''.join(b)
def get_subtitle(digest, lang="PL"):
url = "http://napiprojekt.pl/unit_napisy/dl.php?l=%s&f=%s&t=%s&v=pynapi&kolejka=false&nick=&pass=&napios=%s" % \
(lang, digest, f(digest), os.name)
repeat = 3
sub = None
http_code = 200
error = "Fetching subtitle failed:"
while repeat > 0:
repeat = repeat - 1
try:
sub = urllib.request.urlopen(url)
if hasattr(sub, 'getcode'):
http_code = sub.getcode()
sub = sub.read()
except (IOError, OSError) as e:
error = error + " %s" % (e)
time.sleep(0.5)
continue
if http_code != 200:
error = error + ",HTTP code: %s" % (str(http_code))
time.sleep(0.5)
continue
if sub.startswith('NPc'):
raise Exception('Subtitle NOT FOUND')
repeat = 0
if sub is None or sub == "":
raise Exception(error)
return sub
def detect_format(list):
"""
Detect the format of input subtitles file.
input: contents of a file as list
returns: format (srt, tmp, mdvd) or "" if unknown
"""
re_mdvd = re.compile("^\{(\d+)\}\{(\d*)\}\s*(.*)")
re_srt = re.compile("^(\d+):(\d+):(\d+),\d+\s*-->.*")
re_tmp = re.compile("^(\d+):(\d+):(\d+):(.*)")
re_sub2 = re.compile("^(\d+):(\d+):(\d+)\.\d+\s*\,.*")
re_mpl2 = re.compile("^\[(\d+)\]\[(\d+)\]\s*(.*)")
for line in list:
if re_mdvd.match(line):
return "mdvd"
elif re_srt.match(line):
return "srt"
elif re_tmp.match(line):
return "tmp"
elif re_sub2.match(line):
return "sub2"
elif re_mpl2.match(line):
return "mpl2"
return ""
def read_mdvd(list, fps):
"""
Read micro-dvd subtitles.
input: contents of a file as list
returns: list of subtitles in form: [[time_start in secs, time_end in secs, line1, ...],....]
"""
re1 = re.compile("^\{(\d+)\}\{(\d*)\}\s*(.*)")
subtitles = []
while len(list) > 0:
x = list.pop(0)
m = re1.match(x, 0)
if m:
time1 = int(m.group(1))
subt = [time1 / fps]
time2 = m.group(2)
if time2 == '':
time2 = int(time1) + 20
subt.append(int(time2) / fps)
texts = m.group(3).strip().split("|")
for i in range(len(texts)):
text = texts[i]
if text.lower().startswith('{c:') or text.lower().startswith('{y:'):
end_marker = text.index('}')
if end_marker:
text = text[end_marker + 1:]
texts[i] = text
subt.extend(texts)
subtitles.append(subt)
return subtitles
def read_mpl2(list):
"""
Read mpl2 subtitles
input: contents of a file as list
returns: list of subtitles in form: [[time_start in secs, time_end is secs, line1, ...],.....]
"""
re1 = re.compile("^\[(\d+)\]\[(\d+)\]\s*(.*)")
subtitles = []
while len(list) > 0:
m = re1.match(list.pop(0), 0)
if m:
subt = [int(m.group(1)) * 0.1]
subt.append(int(m.group(2)) * 0.1)
subt.extend(m.group(3).strip().split("|"))
subtitles.append(subt)
return subtitles
def read_sub2(list):
"""
Reads subviewer 2.0 format subtitles, e.g.:
00:01:54.75,00:01:58.54
You shall not pass!
input: contents of a file as list
returns: list of subtitles in form: [[time_dep, time_end, line1, ...],[time_dep, time_end, line1, ...],....]
"""
re1 = re.compile("^(\d+):(\d+):(\d+)\.(\d+)\s*\,\s*(\d+):(\d+):(\d+)\.(\d+).*$")
subtitles = []
try:
while len(list) > 0:
m = re1.match(list.pop(0), 0)
if m:
subt = [int(m.group(1)) * 3600 + int(m.group(2)) * 60 + int(m.group(3)) + int(m.group(4)) / 100.0]
subt.append(int(m.group(5)) * 3600 + int(m.group(6)) * 60 + int(m.group(7)) + int(m.group(8)) / 100.0)
l = list.pop(0).strip()
lines = l.split("[br]")
for i in range(0, len(lines)):
subt.append(lines[i])
subtitles.append(subt)
except IndexError:
sys.stderr.write("Warning: it seems like input file is damaged or too short.\n")
return subtitles
def read_srt(list):
"""
Reads srt subtitles.
input: contents of a file as list
returns: list of subtitles in form: [[time_dep, time_end, line1, ...],[time_dep, time_end, line1, ...],....]
"""
re1 = re.compile("^(\d+)\s*$")
re2 = re.compile("^(\d+):(\d+):(\d+),(\d+)\s*-->\s*(\d+):(\d+):(\d+),(\d+).*$")
re3 = re.compile("^\s*$")
subtitles = []
try:
while len(list) > 0:
if re1.match(list.pop(0), 0):
m = re2.match(list.pop(0), 0)
if m:
subt = [int(m.group(1)) * 3600 + int(m.group(2)) * 60 + int(m.group(3)) + int(m.group(4)) / 1000.0]
subt.append(int(m.group(5)) * 3600 + int(m.group(6)) * 60 + int(m.group(7)) + int(m.group(8)) / 1000.0)
l = list.pop(0)
while not re3.match(l, 0):
subt.append(l.strip())
l = list.pop(0)
subtitles.append(subt)
except IndexError:
sys.stderr.write("Warning: it seems like input file is damaged or too short.\n")
return subtitles
def read_tmp(list):
"""
Reads tmplayer (tmp) subtitles.
input: contents of a file as list
returns: list of subtitles in form: [[time_dep, time_end, line1, ...],[time_dep, time_end, line1, ...],....]
"""
re1 = re.compile("^(\d+):(\d+):(\d+):(.*)")
subtitles = []
subs = {}
while len(list) > 0:
m = re1.match(list.pop(0), 0)
if m:
time = int(m.group(1)) * 3600 + int(m.group(2)) * 60 + int(m.group(3))
if time in subs:
subs[time].extend(m.group(4).strip().split("|"))
else:
subs[time] = m.group(4).strip().split("|")
times = subs.keys()
times.sort()
for i in range(0, len(times)):
next_time = 1
while (times[i] + next_time) not in subs and next_time < 4:
next_time = next_time + 1
subt = [times[i], times[i] + next_time]
subt.extend(subs[times[i]])
subtitles.append(subt)
return subtitles
def to_srt(list):
"""
Converts list of subtitles (internal format) to srt format
"""
outl = []
count = 1
for l in list:
secs1 = l[0]
h1 = int(secs1 / 3600)
m1 = int(int(secs1 % 3600) / 60)
s1 = int(secs1 % 60)
f1 = (secs1 - int(secs1)) * 1000
secs2 = l[1]
h2 = int(secs2 / 3600)
m2 = int(int(secs2 % 3600) / 60)
s2 = int(secs2 % 60)
f2 = (secs2 - int(secs2)) * 1000
outl.append("%d\n%.2d:%.2d:%.2d,%.3d --> %.2d:%.2d:%.2d,%.3d\n%s\n\n" % (count, h1, m1, s1, f1, h2, m2, s2, f2, "\n".join(l[2:])))
count = count + 1
return outl
def sub_fix_times(sub):
for i in range(len(sub) - 2):
approx = min(1 + (len(" ".join(sub[i][2:])) / 10), 9.9) # 10 char per second
# print sub[i][0],sub[i][1], sub[i][1] - sub[i][0], approx
if (sub[i + 1][0] <= sub[i][0]):
sub[i + 1][0] = sub[i][0] + approx + 0.2
# if less than 1 sec
if sub[i][1] - sub[i][0] < 1:
sub[i][1] = sub[i][0] + approx
# end < start or end > start++ or displayed longer then 15s
if (sub[i][1] < sub[i][0]) or (sub[i][1] > sub[i + 1][0]) or (sub[i][1] - sub[i][0] > 15):
if (sub[i][0] + approx) < sub[i + 1][0]:
sub[i][1] = sub[i][0] + approx
else:
sub[i][1] = sub[i + 1][0] - 0.2
return sub
def get_split_times(str):
"""
Converts comma-separated string of "xx:yy:zz,xx:yy:zz,..." times to list of times (in seconds)
input: string of comma-separated xx:yy:zz time positions
returns: list of times
"""
tlist = str.split(",")
re1 = re.compile("^(\d+):(\d+):(\d+)")
times = []
for t in tlist:
m = re1.match(t, 0)
if not m:
sys.stderr.write("Unknown time format\n")
return []
times.append(int(m.group(1)) * 3600 + int(m.group(2)) * 60 + int(m.group(3)))
return times
def read_subs(file, fmt, fps):
"""
Reads subtitles fomr file, using format fmt
input: file name, format (srt,mdvd,tmp,auto)
returns: list of subtitles in form: [[time in secs, line1, ...],[time in secs, line1, ...],....]
"""
src = open(file, 'r')
subs = src.readlines()
src.close()
if fmt == "tmp":
return read_tmp(subs)
elif fmt == "srt":
return read_srt(subs)
elif fmt == "mdvd":
if fps == -1:
fps = detect_file_fps(file)
if not fps:
fps = detect_fps(subs)
return read_mdvd(subs, fps)
elif fmt == "auto":
fmt = detect_format(subs)
sys.stderr.write("Guessing subs format .. %s\n" % fmt)
return read_subs(file, fmt, fps)
elif fmt == "sub2":
return read_sub2(subs)
elif fmt == "mpl2":
return read_mpl2(subs)
else:
sys.stderr.write("Input format not specified/recognized\n")
sys.exit(1)
def napiprojekt_fps(digest):
url = "http://napiprojekt.pl/api/api.php?mode=file_info&client=dreambox&id=%s" % (urllib.parse.quote(digest))
# element = ET.parse(urllib.request.urlopen(url))
# fps = element.find("video_info/fps").text
try:
fps = float([re.match(r".*<fps>(.*)</fps>.*", x).groups(0)[0] for x in urllib.request.urlopen(url) if x.find('<fps>') > 0][0])
except:
fps = 23.976
return floatfps
def read_sub(fmt, subs):
if fmt == "tmp":
return read_tmp(subs)
elif fmt == "srt":
return read_srt(subs)
elif fmt == "sub2":
return read_sub2(subs)
elif fmt == "mpl2":
return read_mpl2(subs)
def to_srt_utf8(subs_org, file, digest=0, info="", fps=0):
p, f = os.path.split(file)
print("Processing subtitle for:\n Path: %s\n File: %s %s" % (p, f, info))
try:
subs_org = subs_org.replace("\r", "")
dest = file[:-4] + '.srt'
subs_u, org_cod = convert_to_unicode(subs_org)
subs = subs_u.split('\n')
fmt = detect_format(subs)
print(" Oryginal subtitle format: ", fmt, org_cod, end=' ')
if fmt == "mdvd":
if fps < 22 < 32:
f = GetFPS(file)
fps = f.fps()
if not 22 < fps < 32:
print(" failback to napifps ", end=' ')
fps = napiprojekt_fps(digest)
print("FPS:", str(fps)[0:5], end=' ')
subs = "".join(to_srt(sub_fix_times(read_mdvd(subs, fps))))
elif fmt != "srt":
subs = "".join(to_srt(sub_fix_times(read_sub(fmt, subs))))
else:
subs = subs_u
print(" Saved as SRT utf8.")
dst = open(dest, 'w')
dst.write(subs.encode("utf-8-sig"))
dst.close()
print(" Saved:", dest)
except:
print(" Error: %s" % (sys.exc_info()[1]))
def get_sub_from_napi(file, fps=0):
digest = hashFile(file)['npb']
if digest:
to_srt_utf8(get_subtitle(digest), file, digest, fps=fps)
def convert(file, src, fps=0):
try:
if not 100 < os.path.getsize(src) < 200000:
raise Exception('Suspicious file size: %s %i' % (src, os.path.getsize(src)))
to_srt_utf8(subs_org=open(src).read(), file=file, info="\n Convert from: " + os.path.split(src)[1], fps=fps)
except:
print(" Error: %s" % (sys.exc_info()[1]))
prere = (
("[^\w\d]", " "),
("[\.]", " "),
("[\[\]-_]", " "),
("^[^-\s]*-", " "),
("_", " "),
(" (720p|1080i|1080p)( |$)+", " "),
(" (x264|blu-ray|bluray|hdtv|xvid)( |$)+", " "),
(" (eng|rus)( |$)+", " "),
(" (oar)( |$)+", " "),
(" (miniseries)( |$)+", " "),
(" (dts|dd5|ac3|stereo)( |$)+", " "),
(" (xbox)( |$)+", " "),
(" [\[](720p|1080i|1080p)[\]]( |$)+", " ")
)
tvshowRegex = re.compile('(?P<show>.*)S(?P<season>[0-9]{2})E(?P<episode>[0-9]{2}).(?P<teams>.*)', re.IGNORECASE)
tvshowRegex2 = re.compile('(?P<show>.*).(?P<season>[0-9]{1,2})x(?P<episode>[0-9]{1,2}).(?P<teams>.*)', re.IGNORECASE)
movieRegex = re.compile('(?P<movie>.*)[\.|\[|\(| ]{1}(?P<year>(?:(?:19|20)[0-9]{2}))(?P<teams>.*)', re.IGNORECASE)
def parse_name(name):
fn = name.lower()
for co, naco in prere:
fn = re.sub(co, naco, fn)
res = {'type': 'unknown', 'name': fn, 'teams': []}
matches_tvshow = tvshowRegex.match(fn)
if matches_tvshow:
(tvshow, season, episode, teams) = matches_tvshow.groups()
tvshow, tvshow.replace(".", " ").strip()
teams, teams.split('.')
res = {'type': 'tvshow', 'name': tvshow.strip(), 'season': int(season), 'episode': int(episode), 'teams': teams}
else:
matches_tvshow = tvshowRegex2.match(fn)
if matches_tvshow:
(tvshow, season, episode, teams) = matches_tvshow.groups()
tvshow, tvshow.replace(".", " ").strip()
teams, teams.split('.')
res = {'type': 'tvshow', 'name': tvshow.strip(), 'season': int(season), 'episode': int(episode), 'teams': teams}
else:
matches_movie = movieRegex.match(fn)
if matches_movie:
(movie, year, teams) = matches_movie.groups()
res = {'type': 'movie', 'name': movie.strip(), 'year': year, 'teams': teams}
return res
def find_imdb(path):
ImdbId = ''
try:
(dir, fname) = os.path.split(path)
if os.path.exists(path[:-3] + 'nfo'):
nfof = [path[:-3] + 'nfo']
else:
nfof = []
for f in os.listdir(dir):
if f.endswith('.nfo'):
nfof.append(f)
for f in nfof:
for l in open(os.path.join(dir, f)):
m = re.search(r'title\/(?P<imdbid>tt\d{7})', l)
if m and m.group("imdbid"):
ImdbId = m.group("imdbid")
except:
pass
return ImdbId
def hashFile(name):
try:
filesize = 0
d = md5()
longlongformat = 'Q' # unsigned long long little endian
bytesize = struct.calcsize(longlongformat)
format = "<%d%s" % (65536 // bytesize, longlongformat)
f = open(name, "rb")
filesize = os.fstat(f.fileno()).st_size
hash = filesize
buffer = f.read(10485760)
longlongs = struct.unpack(format, buffer[0:65536])
hash += sum(longlongs)
d.update(buffer)
f.seek(-65536, os.SEEK_END) # size is always > 131072
longlongs = struct.unpack(format, f.read(65536))
hash += sum(longlongs)
hash &= 0xFFFFFFFFFFFFFFFF
f.close()
ret = dict(osb="%016x" % hash, npb=d.hexdigest(), fsize=filesize)
# print "[DMnapi] hashFile: ", name, ret
return ret
except:
print("[DMnapi] Error hashFile: ", name)
return dict(osb="%016x" % 0, npb=d.hexdigest(), fsize=filesize)
def get_sub_from_n24(file, id, fps=0):
try:
import N24
to_srt_utf8(N24.get_n24(int(id)), file, fps=fps)
except:
pass | unknown | codeparrot/codeparrot-clean | ||
from nipype.pipeline.engine import MapNode, Node, Workflow
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
'''
Workflow to apply all spatial transformations to each volume
of a time series in a single interpolation
'''
def create_transform_pipeline(name='transfrom_timeseries'):
# set fsl output type
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
# initiate workflow
transform_ts = Workflow(name='transform_timeseries')
# inputnode
inputnode=Node(util.IdentityInterface(fields=['orig_ts',
'anat_head',
'mat_moco',
'fullwarp',
'resolution']),
name='inputnode')
# outputnode
outputnode=Node(util.IdentityInterface(fields=['trans_ts',
'trans_ts_mean',
'resamp_brain']),
name='outputnode')
#resample anatomy
resample = Node(fsl.FLIRT(datatype='float',
out_file='T1_resampled.nii.gz'),
name = 'resample_anat')
transform_ts.connect([(inputnode, resample, [('anat_head', 'in_file'),
('anat_head', 'reference'),
('resolution', 'apply_isoxfm')
]),
(resample, outputnode, [('out_file', 'resamp_brain')])
])
# split timeseries in single volumes
split=Node(fsl.Split(dimension='t',
out_base_name='timeseries'),
name='split')
transform_ts.connect([(inputnode, split, [('orig_ts','in_file')])])
# applymoco premat and fullwarpfield
applywarp = MapNode(fsl.ApplyWarp(interp='spline',
relwarp=True,
out_file='rest2anat.nii.gz',
datatype='float'),
iterfield=['in_file', 'premat'],
name='applywarp')
transform_ts.connect([(split, applywarp, [('out_files', 'in_file')]),
(inputnode, applywarp, [('mat_moco', 'premat'),
('fullwarp','field_file')]),
(resample, applywarp, [('out_file', 'ref_file')])
])
# re-concatenate volumes
merge=Node(fsl.Merge(dimension='t',
merged_file='rest2anat.nii.gz'),
name='merge')
transform_ts.connect([(applywarp,merge,[('out_file','in_files')]),
(merge, outputnode, [('merged_file', 'trans_ts')])])
# calculate new mean
tmean = Node(fsl.maths.MeanImage(dimension='T',
out_file='rest_mean2anat_lowres.nii.gz'),
name='tmean')
transform_ts.connect([(merge, tmean, [('merged_file', 'in_file')]),
(tmean, outputnode, [('out_file', 'trans_ts_mean')])
])
return transform_ts | unknown | codeparrot/codeparrot-clean | ||
# -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id: tixwidgets.py 36560 2004-07-18 06:16:08Z tim_one $
#
# tixwidgets.py --
#
# For Tix, see http://tix.sourceforge.net
#
# This is a demo program of some of the Tix widgets available in Python.
# If you have installed Python & Tix properly, you can execute this as
#
# % python tixwidgets.py
#
import os, os.path, sys, Tix
from Tkconstants import *
import traceback, tkMessageBox
TCL_DONT_WAIT = 1<<1
TCL_WINDOW_EVENTS = 1<<2
TCL_FILE_EVENTS = 1<<3
TCL_TIMER_EVENTS = 1<<4
TCL_IDLE_EVENTS = 1<<5
TCL_ALL_EVENTS = 0
class Demo:
def __init__(self, top):
self.root = top
self.exit = -1
self.dir = None # script directory
self.balloon = None # balloon widget
self.useBalloons = Tix.StringVar()
self.useBalloons.set('0')
self.statusbar = None # status bar widget
self.welmsg = None # Msg widget
self.welfont = '' # font name
self.welsize = '' # font size
progname = sys.argv[0]
dirname = os.path.dirname(progname)
if dirname and dirname != os.curdir:
self.dir = dirname
index = -1
for i in range(len(sys.path)):
p = sys.path[i]
if p in ("", os.curdir):
index = i
if index >= 0:
sys.path[index] = dirname
else:
sys.path.insert(0, dirname)
else:
self.dir = os.getcwd()
sys.path.insert(0, self.dir+'/samples')
def MkMainMenu(self):
top = self.root
w = Tix.Frame(top, bd=2, relief=RAISED)
file = Tix.Menubutton(w, text='File', underline=0, takefocus=0)
help = Tix.Menubutton(w, text='Help', underline=0, takefocus=0)
file.pack(side=LEFT)
help.pack(side=RIGHT)
fm = Tix.Menu(file, tearoff=0)
file['menu'] = fm
hm = Tix.Menu(help, tearoff=0)
help['menu'] = hm
fm.add_command(label='Exit', underline=1,
command = lambda self=self: self.quitcmd () )
hm.add_checkbutton(label='BalloonHelp', underline=0, command=ToggleHelp,
variable=self.useBalloons)
# The trace variable option doesn't seem to work, instead I use 'command'
#apply(w.tk.call, ('trace', 'variable', self.useBalloons, 'w',
# ToggleHelp))
return w
def MkMainNotebook(self):
top = self.root
w = Tix.NoteBook(top, ipadx=5, ipady=5, options="""
tagPadX 6
tagPadY 4
borderWidth 2
""")
# This may be required if there is no *Background option
top['bg'] = w['bg']
w.add('wel', label='Welcome', underline=0,
createcmd=lambda w=w, name='wel': MkWelcome(w, name))
w.add('cho', label='Choosers', underline=0,
createcmd=lambda w=w, name='cho': MkChoosers(w, name))
w.add('scr', label='Scrolled Widgets', underline=0,
createcmd=lambda w=w, name='scr': MkScroll(w, name))
w.add('mgr', label='Manager Widgets', underline=0,
createcmd=lambda w=w, name='mgr': MkManager(w, name))
w.add('dir', label='Directory List', underline=0,
createcmd=lambda w=w, name='dir': MkDirList(w, name))
w.add('exp', label='Run Sample Programs', underline=0,
createcmd=lambda w=w, name='exp': MkSample(w, name))
return w
def MkMainStatus(self):
global demo
top = self.root
w = Tix.Frame(top, relief=Tix.RAISED, bd=1)
demo.statusbar = Tix.Label(w, relief=Tix.SUNKEN, bd=1)
demo.statusbar.form(padx=3, pady=3, left=0, right='%70')
return w
def build(self):
root = self.root
z = root.winfo_toplevel()
z.wm_title('Tix Widget Demonstration')
if z.winfo_screenwidth() <= 800:
z.geometry('790x590+10+10')
else:
z.geometry('890x640+10+10')
demo.balloon = Tix.Balloon(root)
frame1 = self.MkMainMenu()
frame2 = self.MkMainNotebook()
frame3 = self.MkMainStatus()
frame1.pack(side=TOP, fill=X)
frame3.pack(side=BOTTOM, fill=X)
frame2.pack(side=TOP, expand=1, fill=BOTH, padx=4, pady=4)
demo.balloon['statusbar'] = demo.statusbar
z.wm_protocol("WM_DELETE_WINDOW", lambda self=self: self.quitcmd())
# To show Tcl errors - uncomment this to see the listbox bug.
# Tkinter defines a Tcl tkerror procedure that in effect
# silences all background Tcl error reporting.
# root.tk.eval('if {[info commands tkerror] != ""} {rename tkerror pytkerror}')
def quitcmd (self):
"""Quit our mainloop. It is up to you to call root.destroy() after."""
self.exit = 0
def loop(self):
"""This is an explict replacement for _tkinter mainloop()
It lets you catch keyboard interrupts easier, and avoids
the 20 msec. dead sleep() which burns a constant CPU."""
while self.exit < 0:
# There are 2 whiles here. The outer one lets you continue
# after a ^C interrupt.
try:
# This is the replacement for _tkinter mainloop()
# It blocks waiting for the next Tcl event using select.
while self.exit < 0:
self.root.tk.dooneevent(TCL_ALL_EVENTS)
except SystemExit:
# Tkinter uses SystemExit to exit
#print 'Exit'
self.exit = 1
return
except KeyboardInterrupt:
if tkMessageBox.askquestion ('Interrupt', 'Really Quit?') == 'yes':
# self.tk.eval('exit')
self.exit = 1
return
continue
except:
# Otherwise it's some other error - be nice and say why
t, v, tb = sys.exc_info()
text = ""
for line in traceback.format_exception(t,v,tb):
text += line + '\n'
try: tkMessageBox.showerror ('Error', text)
except: pass
self.exit = 1
raise SystemExit, 1
def destroy (self):
self.root.destroy()
def RunMain(root):
global demo
demo = Demo(root)
demo.build()
demo.loop()
demo.destroy()
# Tabs
def MkWelcome(nb, name):
w = nb.page(name)
bar = MkWelcomeBar(w)
text = MkWelcomeText(w)
bar.pack(side=TOP, fill=X, padx=2, pady=2)
text.pack(side=TOP, fill=BOTH, expand=1)
def MkWelcomeBar(top):
global demo
w = Tix.Frame(top, bd=2, relief=Tix.GROOVE)
b1 = Tix.ComboBox(w, command=lambda w=top: MainTextFont(w))
b2 = Tix.ComboBox(w, command=lambda w=top: MainTextFont(w))
b1.entry['width'] = 15
b1.slistbox.listbox['height'] = 3
b2.entry['width'] = 4
b2.slistbox.listbox['height'] = 3
demo.welfont = b1
demo.welsize = b2
b1.insert(Tix.END, 'Courier')
b1.insert(Tix.END, 'Helvetica')
b1.insert(Tix.END, 'Lucida')
b1.insert(Tix.END, 'Times Roman')
b2.insert(Tix.END, '8')
b2.insert(Tix.END, '10')
b2.insert(Tix.END, '12')
b2.insert(Tix.END, '14')
b2.insert(Tix.END, '18')
b1.pick(1)
b2.pick(3)
b1.pack(side=Tix.LEFT, padx=4, pady=4)
b2.pack(side=Tix.LEFT, padx=4, pady=4)
demo.balloon.bind_widget(b1, msg='Choose\na font',
statusmsg='Choose a font for this page')
demo.balloon.bind_widget(b2, msg='Point size',
statusmsg='Choose the font size for this page')
return w
def MkWelcomeText(top):
global demo
w = Tix.ScrolledWindow(top, scrollbar='auto')
win = w.window
text = 'Welcome to TIX in Python'
title = Tix.Label(win,
bd=0, width=30, anchor=Tix.N, text=text)
msg = Tix.Message(win,
bd=0, width=400, anchor=Tix.N,
text='Tix is a set of mega-widgets based on TK. This program \
demonstrates the widgets in the Tix widget set. You can choose the pages \
in this window to look at the corresponding widgets. \n\n\
To quit this program, choose the "File | Exit" command.\n\n\
For more information, see http://tix.sourceforge.net.')
title.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10)
msg.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10)
demo.welmsg = msg
return w
def MainTextFont(w):
global demo
if not demo.welmsg:
return
font = demo.welfont['value']
point = demo.welsize['value']
if font == 'Times Roman':
font = 'times'
fontstr = '%s %s' % (font, point)
demo.welmsg['font'] = fontstr
def ToggleHelp():
if demo.useBalloons.get() == '1':
demo.balloon['state'] = 'both'
else:
demo.balloon['state'] = 'none'
def MkChoosers(nb, name):
w = nb.page(name)
options = "label.padX 4"
til = Tix.LabelFrame(w, label='Chooser Widgets', options=options)
cbx = Tix.LabelFrame(w, label='tixComboBox', options=options)
ctl = Tix.LabelFrame(w, label='tixControl', options=options)
sel = Tix.LabelFrame(w, label='tixSelect', options=options)
opt = Tix.LabelFrame(w, label='tixOptionMenu', options=options)
fil = Tix.LabelFrame(w, label='tixFileEntry', options=options)
fbx = Tix.LabelFrame(w, label='tixFileSelectBox', options=options)
tbr = Tix.LabelFrame(w, label='Tool Bar', options=options)
MkTitle(til.frame)
MkCombo(cbx.frame)
MkControl(ctl.frame)
MkSelect(sel.frame)
MkOptMenu(opt.frame)
MkFileEnt(fil.frame)
MkFileBox(fbx.frame)
MkToolBar(tbr.frame)
# First column: comBox and selector
cbx.form(top=0, left=0, right='%33')
sel.form(left=0, right='&'+str(cbx), top=cbx)
opt.form(left=0, right='&'+str(cbx), top=sel, bottom=-1)
# Second column: title .. etc
til.form(left=cbx, top=0,right='%66')
ctl.form(left=cbx, right='&'+str(til), top=til)
fil.form(left=cbx, right='&'+str(til), top=ctl)
tbr.form(left=cbx, right='&'+str(til), top=fil, bottom=-1)
#
# Third column: file selection
fbx.form(right=-1, top=0, left='%66')
def MkCombo(w):
options="label.width %d label.anchor %s entry.width %d" % (10, Tix.E, 14)
static = Tix.ComboBox(w, label='Static', editable=0, options=options)
editable = Tix.ComboBox(w, label='Editable', editable=1, options=options)
history = Tix.ComboBox(w, label='History', editable=1, history=1,
anchor=Tix.E, options=options)
static.insert(Tix.END, 'January')
static.insert(Tix.END, 'February')
static.insert(Tix.END, 'March')
static.insert(Tix.END, 'April')
static.insert(Tix.END, 'May')
static.insert(Tix.END, 'June')
static.insert(Tix.END, 'July')
static.insert(Tix.END, 'August')
static.insert(Tix.END, 'September')
static.insert(Tix.END, 'October')
static.insert(Tix.END, 'November')
static.insert(Tix.END, 'December')
editable.insert(Tix.END, 'Angola')
editable.insert(Tix.END, 'Bangladesh')
editable.insert(Tix.END, 'China')
editable.insert(Tix.END, 'Denmark')
editable.insert(Tix.END, 'Ecuador')
history.insert(Tix.END, '/usr/bin/ksh')
history.insert(Tix.END, '/usr/local/lib/python')
history.insert(Tix.END, '/var/adm')
static.pack(side=Tix.TOP, padx=5, pady=3)
editable.pack(side=Tix.TOP, padx=5, pady=3)
history.pack(side=Tix.TOP, padx=5, pady=3)
states = ['Bengal', 'Delhi', 'Karnataka', 'Tamil Nadu']
def spin_cmd(w, inc):
idx = states.index(demo_spintxt.get()) + inc
if idx < 0:
idx = len(states) - 1
elif idx >= len(states):
idx = 0
# following doesn't work.
# return states[idx]
demo_spintxt.set(states[idx]) # this works
def spin_validate(w):
global states, demo_spintxt
try:
i = states.index(demo_spintxt.get())
except ValueError:
return states[0]
return states[i]
# why this procedure works as opposed to the previous one beats me.
def MkControl(w):
global demo_spintxt
options="label.width %d label.anchor %s entry.width %d" % (10, Tix.E, 13)
demo_spintxt = Tix.StringVar()
demo_spintxt.set(states[0])
simple = Tix.Control(w, label='Numbers', options=options)
spintxt = Tix.Control(w, label='States', variable=demo_spintxt,
options=options)
spintxt['incrcmd'] = lambda w=spintxt: spin_cmd(w, 1)
spintxt['decrcmd'] = lambda w=spintxt: spin_cmd(w, -1)
spintxt['validatecmd'] = lambda w=spintxt: spin_validate(w)
simple.pack(side=Tix.TOP, padx=5, pady=3)
spintxt.pack(side=Tix.TOP, padx=5, pady=3)
def MkSelect(w):
options = "label.anchor %s" % Tix.CENTER
sel1 = Tix.Select(w, label='Mere Mortals', allowzero=1, radio=1,
orientation=Tix.VERTICAL,
labelside=Tix.TOP,
options=options)
sel2 = Tix.Select(w, label='Geeks', allowzero=1, radio=0,
orientation=Tix.VERTICAL,
labelside= Tix.TOP,
options=options)
sel1.add('eat', text='Eat')
sel1.add('work', text='Work')
sel1.add('play', text='Play')
sel1.add('party', text='Party')
sel1.add('sleep', text='Sleep')
sel2.add('eat', text='Eat')
sel2.add('prog1', text='Program')
sel2.add('prog2', text='Program')
sel2.add('prog3', text='Program')
sel2.add('sleep', text='Sleep')
sel1.pack(side=Tix.LEFT, padx=5, pady=3, fill=Tix.X)
sel2.pack(side=Tix.LEFT, padx=5, pady=3, fill=Tix.X)
def MkOptMenu(w):
options='menubutton.width 15 label.anchor %s' % Tix.E
m = Tix.OptionMenu(w, label='File Format : ', options=options)
m.add_command('text', label='Plain Text')
m.add_command('post', label='PostScript')
m.add_command('format', label='Formatted Text')
m.add_command('html', label='HTML')
m.add_command('sep')
m.add_command('tex', label='LaTeX')
m.add_command('rtf', label='Rich Text Format')
m.pack(fill=Tix.X, padx=5, pady=3)
def MkFileEnt(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='Press the "open file" icon button and a TixFileSelectDialog will popup.')
ent = Tix.FileEntry(w, label='Select a file : ')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
ent.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
def MkFileBox(w):
"""The FileSelectBox is a Motif-style box with various enhancements.
For example, you can adjust the size of the two listboxes
and your past selections are recorded.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix FileSelectBox is a Motif-style box with various enhancements. For example, you can adjust the size of the two listboxes and your past selections are recorded.')
box = Tix.FileSelectBox(w)
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
box.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
def MkToolBar(w):
"""The Select widget is also good for arranging buttons in a tool bar.
"""
global demo
options='frame.borderWidth 1'
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Select widget is also good for arranging buttons in a tool bar.')
bar = Tix.Frame(w, bd=2, relief=Tix.RAISED)
font = Tix.Select(w, allowzero=1, radio=0, label='', options=options)
para = Tix.Select(w, allowzero=0, radio=1, label='', options=options)
font.add('bold', bitmap='@' + demo.dir + '/bitmaps/bold.xbm')
font.add('italic', bitmap='@' + demo.dir + '/bitmaps/italic.xbm')
font.add('underline', bitmap='@' + demo.dir + '/bitmaps/underline.xbm')
font.add('capital', bitmap='@' + demo.dir + '/bitmaps/capital.xbm')
para.add('left', bitmap='@' + demo.dir + '/bitmaps/leftj.xbm')
para.add('right', bitmap='@' + demo.dir + '/bitmaps/rightj.xbm')
para.add('center', bitmap='@' + demo.dir + '/bitmaps/centerj.xbm')
para.add('justify', bitmap='@' + demo.dir + '/bitmaps/justify.xbm')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
bar.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
font.pack({'in':bar}, side=Tix.LEFT, padx=3, pady=3)
para.pack({'in':bar}, side=Tix.LEFT, padx=3, pady=3)
def MkTitle(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='There are many types of "chooser" widgets that allow the user to input different types of information')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
def MkScroll(nb, name):
w = nb.page(name)
options='label.padX 4'
sls = Tix.LabelFrame(w, label='Tix.ScrolledListBox', options=options)
swn = Tix.LabelFrame(w, label='Tix.ScrolledWindow', options=options)
stx = Tix.LabelFrame(w, label='Tix.ScrolledText', options=options)
MkSList(sls.frame)
MkSWindow(swn.frame)
MkSText(stx.frame)
sls.form(top=0, left=0, right='%33', bottom=-1)
swn.form(top=0, left=sls, right='%66', bottom=-1)
stx.form(top=0, left=swn, right=-1, bottom=-1)
def MkSList(w):
"""This TixScrolledListBox is configured so that it uses scrollbars
only when it is necessary. Use the handles to resize the listbox and
watch the scrollbars automatically appear and disappear. """
top = Tix.Frame(w, width=300, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text='This TixScrolledListBox is configured so that it uses scrollbars only when it is necessary. Use the handles to resize the listbox and watch the scrollbars automatically appear and disappear.')
list = Tix.ScrolledListBox(top, scrollbar='auto')
list.place(x=50, y=150, width=120, height=80)
list.listbox.insert(Tix.END, 'Alabama')
list.listbox.insert(Tix.END, 'California')
list.listbox.insert(Tix.END, 'Montana')
list.listbox.insert(Tix.END, 'New Jersey')
list.listbox.insert(Tix.END, 'New York')
list.listbox.insert(Tix.END, 'Pennsylvania')
list.listbox.insert(Tix.END, 'Washington')
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=list: SList_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
list.bind('<Map>', func=lambda arg=0, rh=rh, list=list:
list.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(list)))
def SList_reset(rh, list):
list.place(x=50, y=150, width=120, height=80)
list.update()
rh.attach_widget(list)
def MkSWindow(w):
"""The ScrolledWindow widget allows you to scroll any kind of Tk
widget. It is more versatile than a scrolled canvas widget.
"""
global demo
text = 'The Tix ScrolledWindow widget allows you to scroll any kind of Tk widget. It is more versatile than a scrolled canvas widget.'
file = os.path.join(demo.dir, 'bitmaps', 'tix.gif')
if not os.path.isfile(file):
text += ' (Image missing)'
top = Tix.Frame(w, width=330, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text=text)
win = Tix.ScrolledWindow(top, scrollbar='auto')
image1 = win.window.image_create('photo', file=file)
lbl = Tix.Label(win.window, image=image1)
lbl.pack(expand=1, fill=Tix.BOTH)
win.place(x=30, y=150, width=190, height=120)
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=win: SWindow_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
win.bind('<Map>', func=lambda arg=0, rh=rh, win=win:
win.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(win)))
def SWindow_reset(rh, win):
win.place(x=30, y=150, width=190, height=120)
win.update()
rh.attach_widget(win)
def MkSText(w):
"""The TixScrolledWindow widget allows you to scroll any kind of Tk
widget. It is more versatile than a scrolled canvas widget."""
top = Tix.Frame(w, width=330, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text='The Tix ScrolledWindow widget allows you to scroll any kind of Tk widget. It is more versatile than a scrolled canvas widget.')
win = Tix.ScrolledText(top, scrollbar='auto')
win.text['wrap'] = 'none'
win.text.insert(Tix.END, '''When -scrollbar is set to "auto", the
scrollbars are shown only when needed.
Additional modifiers can be used to force a
scrollbar to be shown or hidden. For example,
"auto -y" means the horizontal scrollbar
should be shown when needed but the vertical
scrollbar should always be hidden;
"auto +x" means the vertical scrollbar
should be shown when needed but the horizontal
scrollbar should always be shown, and so on.'''
)
win.place(x=30, y=150, width=190, height=100)
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=win: SText_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
win.bind('<Map>', func=lambda arg=0, rh=rh, win=win:
win.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(win)))
def SText_reset(rh, win):
win.place(x=30, y=150, width=190, height=120)
win.update()
rh.attach_widget(win)
def MkManager(nb, name):
w = nb.page(name)
options='label.padX 4'
pane = Tix.LabelFrame(w, label='Tix.PanedWindow', options=options)
note = Tix.LabelFrame(w, label='Tix.NoteBook', options=options)
MkPanedWindow(pane.frame)
MkNoteBook(note.frame)
pane.form(top=0, left=0, right=note, bottom=-1)
note.form(top=0, right=-1, bottom=-1)
def MkPanedWindow(w):
"""The PanedWindow widget allows the user to interactively manipulate
the sizes of several panes. The panes can be arranged either vertically
or horizontally.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The PanedWindow widget allows the user to interactively manipulate the sizes of several panes. The panes can be arranged either vertically or horizontally.')
group = Tix.LabelEntry(w, label='Newsgroup:', options='entry.width 25')
group.entry.insert(0,'comp.lang.python')
pane = Tix.PanedWindow(w, orientation='vertical')
p1 = pane.add('list', min=70, size=100)
p2 = pane.add('text', min=70)
list = Tix.ScrolledListBox(p1)
text = Tix.ScrolledText(p2)
list.listbox.insert(Tix.END, " 12324 Re: Tkinter is good for your health")
list.listbox.insert(Tix.END, "+ 12325 Re: Tkinter is good for your health")
list.listbox.insert(Tix.END, "+ 12326 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, " 12327 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, "+ 12328 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, " 12329 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, "+ 12330 Re: Tix is even better for your health (Was: Tkinter is good...)")
text.text['bg'] = list.listbox['bg']
text.text['wrap'] = 'none'
text.text.insert(Tix.END, """
Mon, 19 Jun 1995 11:39:52 comp.lang.python Thread 34 of 220
Lines 353 A new way to put text and bitmaps together iNo responses
ioi@blue.seas.upenn.edu Ioi K. Lam at University of Pennsylvania
Hi,
I have implemented a new image type called "compound". It allows you
to glue together a bunch of bitmaps, images and text strings together
to form a bigger image. Then you can use this image with widgets that
support the -image option. For example, you can display a text string string
together with a bitmap, at the same time, inside a TK button widget.
""")
list.pack(expand=1, fill=Tix.BOTH, padx=4, pady=6)
text.pack(expand=1, fill=Tix.BOTH, padx=4, pady=6)
msg.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
group.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
pane.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH, expand=1)
def MkNoteBook(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The NoteBook widget allows you to layout a complex interface into individual pages.')
# prefix = Tix.OptionName(w)
# if not prefix: prefix = ''
# w.option_add('*' + prefix + '*TixNoteBook*tagPadX', 8)
options = "entry.width %d label.width %d label.anchor %s" % (10, 18, Tix.E)
nb = Tix.NoteBook(w, ipadx=6, ipady=6, options=options)
nb.add('hard_disk', label="Hard Disk", underline=0)
nb.add('network', label="Network", underline=0)
# Frame for the buttons that are present on all pages
common = Tix.Frame(nb.hard_disk)
common.pack(side=Tix.RIGHT, padx=2, pady=2, fill=Tix.Y)
CreateCommonButtons(common)
# Widgets belonging only to this page
a = Tix.Control(nb.hard_disk, value=12, label='Access Time: ')
w = Tix.Control(nb.hard_disk, value=400, label='Write Throughput: ')
r = Tix.Control(nb.hard_disk, value=400, label='Read Throughput: ')
c = Tix.Control(nb.hard_disk, value=1021, label='Capacity: ')
a.pack(side=Tix.TOP, padx=20, pady=2)
w.pack(side=Tix.TOP, padx=20, pady=2)
r.pack(side=Tix.TOP, padx=20, pady=2)
c.pack(side=Tix.TOP, padx=20, pady=2)
common = Tix.Frame(nb.network)
common.pack(side=Tix.RIGHT, padx=2, pady=2, fill=Tix.Y)
CreateCommonButtons(common)
a = Tix.Control(nb.network, value=12, label='Access Time: ')
w = Tix.Control(nb.network, value=400, label='Write Throughput: ')
r = Tix.Control(nb.network, value=400, label='Read Throughput: ')
c = Tix.Control(nb.network, value=1021, label='Capacity: ')
u = Tix.Control(nb.network, value=10, label='Users: ')
a.pack(side=Tix.TOP, padx=20, pady=2)
w.pack(side=Tix.TOP, padx=20, pady=2)
r.pack(side=Tix.TOP, padx=20, pady=2)
c.pack(side=Tix.TOP, padx=20, pady=2)
u.pack(side=Tix.TOP, padx=20, pady=2)
msg.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
nb.pack(side=Tix.TOP, padx=5, pady=5, fill=Tix.BOTH, expand=1)
def CreateCommonButtons(f):
ok = Tix.Button(f, text='OK', width = 6)
cancel = Tix.Button(f, text='Cancel', width = 6)
ok.pack(side=Tix.TOP, padx=2, pady=2)
cancel.pack(side=Tix.TOP, padx=2, pady=2)
def MkDirList(nb, name):
w = nb.page(name)
options = "label.padX 4"
dir = Tix.LabelFrame(w, label='Tix.DirList', options=options)
fsbox = Tix.LabelFrame(w, label='Tix.ExFileSelectBox', options=options)
MkDirListWidget(dir.frame)
MkExFileWidget(fsbox.frame)
dir.form(top=0, left=0, right='%40', bottom=-1)
fsbox.form(top=0, left='%40', right=-1, bottom=-1)
def MkDirListWidget(w):
"""The TixDirList widget gives a graphical representation of the file
system directory and makes it easy for the user to choose and access
directories.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix DirList widget gives a graphical representation of the file system directory and makes it easy for the user to choose and access directories.')
dirlist = Tix.DirList(w, options='hlist.padY 1 hlist.width 25 hlist.height 16')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
dirlist.pack(side=Tix.TOP, padx=3, pady=3)
def MkExFileWidget(w):
"""The TixExFileSelectBox widget is more user friendly than the Motif
style FileSelectBox. """
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix ExFileSelectBox widget is more user friendly than the Motif style FileSelectBox.')
# There's a bug in the ComboBoxes - the scrolledlistbox is destroyed
box = Tix.ExFileSelectBox(w, bd=2, relief=Tix.RAISED)
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
box.pack(side=Tix.TOP, padx=3, pady=3)
###
### List of all the demos we want to show off
comments = {'widget' : 'Widget Demos', 'image' : 'Image Demos'}
samples = {'Balloon' : 'Balloon',
'Button Box' : 'BtnBox',
'Combo Box' : 'ComboBox',
'Compound Image' : 'CmpImg',
'Directory List' : 'DirList',
'Directory Tree' : 'DirTree',
'Control' : 'Control',
'Notebook' : 'NoteBook',
'Option Menu' : 'OptMenu',
'Paned Window' : 'PanedWin',
'Popup Menu' : 'PopMenu',
'ScrolledHList (1)' : 'SHList1',
'ScrolledHList (2)' : 'SHList2',
'Tree (dynamic)' : 'Tree'
}
# There are still a lot of demos to be translated:
## set root {
## {d "File Selectors" file }
## {d "Hierachical ListBox" hlist }
## {d "Tabular ListBox" tlist {c tixTList}}
## {d "Grid Widget" grid {c tixGrid}}
## {d "Manager Widgets" manager }
## {d "Scrolled Widgets" scroll }
## {d "Miscellaneous Widgets" misc }
## {d "Image Types" image }
## }
##
## set image {
## {d "Compound Image" cmpimg }
## {d "XPM Image" xpm {i pixmap}}
## }
##
## set cmpimg {
##done {f "In Buttons" CmpImg.tcl }
## {f "In NoteBook" CmpImg2.tcl }
## {f "Notebook Color Tabs" CmpImg4.tcl }
## {f "Icons" CmpImg3.tcl }
## }
##
## set xpm {
## {f "In Button" Xpm.tcl {i pixmap}}
## {f "In Menu" Xpm1.tcl {i pixmap}}
## }
##
## set file {
##added {f DirList DirList.tcl }
##added {f DirTree DirTree.tcl }
## {f DirSelectDialog DirDlg.tcl }
## {f ExFileSelectDialog EFileDlg.tcl }
## {f FileSelectDialog FileDlg.tcl }
## {f FileEntry FileEnt.tcl }
## }
##
## set hlist {
## {f HList HList1.tcl }
## {f CheckList ChkList.tcl {c tixCheckList}}
##done {f "ScrolledHList (1)" SHList.tcl }
##done {f "ScrolledHList (2)" SHList2.tcl }
##done {f Tree Tree.tcl }
##done {f "Tree (Dynamic)" DynTree.tcl {v win}}
## }
##
## set tlist {
## {f "ScrolledTList (1)" STList1.tcl {c tixTList}}
## {f "ScrolledTList (2)" STList2.tcl {c tixTList}}
## }
## global tcl_platform
## # This demo hangs windows
## if {$tcl_platform(platform) != "windows"} {
##na lappend tlist {f "TList File Viewer" STList3.tcl {c tixTList}}
## }
##
## set grid {
##na {f "Simple Grid" SGrid0.tcl {c tixGrid}}
##na {f "ScrolledGrid" SGrid1.tcl {c tixGrid}}
##na {f "Editable Grid" EditGrid.tcl {c tixGrid}}
## }
##
## set scroll {
## {f ScrolledListBox SListBox.tcl }
## {f ScrolledText SText.tcl }
## {f ScrolledWindow SWindow.tcl }
##na {f "Canvas Object View" CObjView.tcl {c tixCObjView}}
## }
##
## set manager {
## {f ListNoteBook ListNBK.tcl }
##done {f NoteBook NoteBook.tcl }
##done {f PanedWindow PanedWin.tcl }
## }
##
## set misc {
##done {f Balloon Balloon.tcl }
##done {f ButtonBox BtnBox.tcl }
##done {f ComboBox ComboBox.tcl }
##done {f Control Control.tcl }
## {f LabelEntry LabEntry.tcl }
## {f LabelFrame LabFrame.tcl }
## {f Meter Meter.tcl {c tixMeter}}
##done {f OptionMenu OptMenu.tcl }
##done {f PopupMenu PopMenu.tcl }
## {f Select Select.tcl }
## {f StdButtonBox StdBBox.tcl }
## }
##
stypes = {}
stypes['widget'] = ['Balloon', 'Button Box', 'Combo Box', 'Control',
'Directory List', 'Directory Tree',
'Notebook', 'Option Menu', 'Popup Menu', 'Paned Window',
'ScrolledHList (1)', 'ScrolledHList (2)', 'Tree (dynamic)']
stypes['image'] = ['Compound Image']
def MkSample(nb, name):
w = nb.page(name)
options = "label.padX 4"
pane = Tix.PanedWindow(w, orientation='horizontal')
pane.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH)
f1 = pane.add('list', expand='1')
f2 = pane.add('text', expand='5')
f1['relief'] = 'flat'
f2['relief'] = 'flat'
lab = Tix.LabelFrame(f1, label='Select a sample program:')
lab.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5, pady=5)
lab1 = Tix.LabelFrame(f2, label='Source:')
lab1.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5, pady=5)
slb = Tix.Tree(lab.frame, options='hlist.width 20')
slb.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5)
stext = Tix.ScrolledText(lab1.frame, name='stext')
font = root.tk.eval('tix option get fixed_font')
stext.text.config(font=font)
frame = Tix.Frame(lab1.frame, name='frame')
run = Tix.Button(frame, text='Run ...', name='run')
view = Tix.Button(frame, text='View Source ...', name='view')
run.pack(side=Tix.LEFT, expand=0, fill=Tix.NONE)
view.pack(side=Tix.LEFT, expand=0, fill=Tix.NONE)
stext.text['bg'] = slb.hlist['bg']
stext.text['state'] = 'disabled'
stext.text['wrap'] = 'none'
stext.text['width'] = 80
frame.pack(side=Tix.BOTTOM, expand=0, fill=Tix.X, padx=7)
stext.pack(side=Tix.TOP, expand=0, fill=Tix.BOTH, padx=7)
slb.hlist['separator'] = '.'
slb.hlist['width'] = 25
slb.hlist['drawbranch'] = 0
slb.hlist['indent'] = 10
slb.hlist['wideselect'] = 1
slb.hlist['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'run')
slb.hlist['browsecmd'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'browse')
run['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'run')
view['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'view')
for type in ['widget', 'image']:
if type != 'widget':
x = Tix.Frame(slb.hlist, bd=2, height=2, width=150,
relief=Tix.SUNKEN, bg=slb.hlist['bg'])
slb.hlist.add_child(itemtype=Tix.WINDOW, window=x, state='disabled')
x = slb.hlist.add_child(itemtype=Tix.TEXT, state='disabled',
text=comments[type])
for key in stypes[type]:
slb.hlist.add_child(x, itemtype=Tix.TEXT, data=key,
text=key)
slb.hlist.selection_clear()
run['state'] = 'disabled'
view['state'] = 'disabled'
def Sample_Action(w, slb, stext, run, view, action):
global demo
hlist = slb.hlist
anchor = hlist.info_anchor()
if not anchor:
run['state'] = 'disabled'
view['state'] = 'disabled'
elif not hlist.info_parent(anchor):
# a comment
return
run['state'] = 'normal'
view['state'] = 'normal'
key = hlist.info_data(anchor)
title = key
prog = samples[key]
if action == 'run':
exec('import ' + prog)
w = Tix.Toplevel()
w.title(title)
rtn = eval(prog + '.RunSample')
rtn(w)
elif action == 'view':
w = Tix.Toplevel()
w.title('Source view: ' + title)
LoadFile(w, demo.dir + '/samples/' + prog + '.py')
elif action == 'browse':
ReadFile(stext.text, demo.dir + '/samples/' + prog + '.py')
def LoadFile(w, fname):
global root
b = Tix.Button(w, text='Close', command=w.destroy)
t = Tix.ScrolledText(w)
# b.form(left=0, bottom=0, padx=4, pady=4)
# t.form(left=0, bottom=b, right='-0', top=0)
t.pack()
b.pack()
font = root.tk.eval('tix option get fixed_font')
t.text.config(font=font)
t.text['bd'] = 2
t.text['wrap'] = 'none'
ReadFile(t.text, fname)
def ReadFile(w, fname):
old_state = w['state']
w['state'] = 'normal'
w.delete('0.0', Tix.END)
try:
f = open(fname)
lines = f.readlines()
for s in lines:
w.insert(Tix.END, s)
f.close()
finally:
# w.see('1.0')
w['state'] = old_state
if __name__ == '__main__':
root = Tix.Tk()
RunMain(root) | unknown | codeparrot/codeparrot-clean | ||
from twisted.internet import defer
from twisted.web import http
from moira.api.request import delayed, check_json
from moira.api.resources.redis import RedisResource
class Stats(RedisResource):
def __init__(self, db):
RedisResource.__init__(self, db)
@delayed
@defer.inlineCallbacks
def render_GET(self, request):
tags = yield self.db.getTags()
result = []
for tag in tags:
triggers = yield self.db.getTagTriggers(tag)
subs = yield self.db.getTagSubscriptions(tag)
data = yield self.db.getTag(tag)
tag_data = {
"name": tag,
"triggers": triggers,
"subscriptions": subs,
"data": data}
result.append(tag_data)
self.write_json(request, {"list": result})
class Data(RedisResource):
def __init__(self, db, tag):
self.tag = tag
RedisResource.__init__(self, db)
@delayed
@check_json
@defer.inlineCallbacks
def render_PUT(self, request):
existing = yield self.db.getTag(self.tag)
yield self.db.setTag(self.tag, request.body_json, request=request, existing=existing)
request.finish()
class Tag(RedisResource):
def __init__(self, db, tag):
self.tag = tag
RedisResource.__init__(self, db)
self.putChild("data", Data(db, tag))
@delayed
@defer.inlineCallbacks
def render_DELETE(self, request):
triggers = yield self.db.getTagTriggers(self.tag)
if triggers:
request.setResponseCode(http.BAD_REQUEST)
request.write(
"This tag is assigned to %s triggers. Remove tag from triggers first" %
len(triggers))
request.finish()
else:
existing = yield self.db.getTag(self.tag)
yield self.db.removeTag(self.tag, request=request, existing=existing)
self.write_json(request, {"message": "tag deleted"})
class Tags(RedisResource):
def __init__(self, db):
RedisResource.__init__(self, db)
self.putChild("stats", Stats(db))
def getChild(self, path, request):
if not path:
return self
return Tag(self.db, path)
@delayed
@defer.inlineCallbacks
def render_GET(self, request):
tags = yield self.db.getTags()
self.write_json(request, {"tags": tags, "list": [unicode(k) for k in tags]}) | unknown | codeparrot/codeparrot-clean | ||
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
from django.test.utils import override_script_prefix
from django.urls import clear_url_caches, resolve, reverse, translate_url
from django.utils import translation
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(__file__), "locale"),
],
LANGUAGE_CODE="en-us",
LANGUAGES=[
("nl", "Dutch"),
("en", "English"),
("pt-br", "Brazilian Portuguese"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.patterns.urls.default",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(os.path.dirname(__file__), "templates")],
"OPTIONS": {
"context_processors": [
"django.template.context_processors.i18n",
],
},
}
],
)
class URLTestCaseBase(SimpleTestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
# Make sure we will leave an empty cache for other testcases.
self.addCleanup(clear_url_caches)
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override("en"):
self.assertEqual(reverse("not-prefixed"), "/not-prefixed/")
self.assertEqual(
reverse("not-prefixed-included-url"), "/not-prefixed-include/foo/"
)
with translation.override("nl"):
self.assertEqual(reverse("not-prefixed"), "/not-prefixed/")
self.assertEqual(
reverse("not-prefixed-included-url"), "/not-prefixed-include/foo/"
)
def test_prefixed(self):
with translation.override("en"):
self.assertEqual(reverse("prefixed"), "/en/prefixed/")
with translation.override("nl"):
self.assertEqual(reverse("prefixed"), "/nl/prefixed/")
with translation.override(None):
self.assertEqual(
reverse("prefixed"), "/%s/prefixed/" % settings.LANGUAGE_CODE
)
@override_settings(ROOT_URLCONF="i18n.patterns.urls.wrong")
def test_invalid_prefix_use(self):
msg = "Using i18n_patterns in an included URLconf is not allowed."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse("account:register")
@override_settings(ROOT_URLCONF="i18n.patterns.urls.disabled")
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override("en"):
self.assertEqual(reverse("prefixed"), "/prefixed/")
with translation.override("nl"):
self.assertEqual(reverse("prefixed"), "/prefixed/")
class RequestURLConfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF="i18n.patterns.urls.path_unused")
def test_request_urlconf_considered(self):
request = RequestFactory().get("/nl/")
request.urlconf = "i18n.patterns.urls.default"
middleware = LocaleMiddleware(lambda req: HttpResponse())
with translation.override("nl"):
middleware.process_request(request)
self.assertEqual(request.LANGUAGE_CODE, "nl")
@override_settings(ROOT_URLCONF="i18n.patterns.urls.path_unused")
class PathUnusedTests(URLTestCaseBase):
"""
If no i18n_patterns is used in root URLconfs, then no language activation
activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get("/nl/foo/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "en")
self.assertEqual(response.context["LANGUAGE_CODE"], "en")
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override("en"):
self.assertEqual(reverse("no-prefix-translated"), "/translated/")
self.assertEqual(
reverse("no-prefix-translated-regex"), "/translated-regex/"
)
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/translated/yeah/",
)
with translation.override("nl"):
self.assertEqual(reverse("no-prefix-translated"), "/vertaald/")
self.assertEqual(reverse("no-prefix-translated-regex"), "/vertaald-regex/")
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/vertaald/yeah/",
)
with translation.override("pt-br"):
self.assertEqual(reverse("no-prefix-translated"), "/traduzidos/")
self.assertEqual(
reverse("no-prefix-translated-regex"), "/traduzidos-regex/"
)
self.assertEqual(
reverse("no-prefix-translated-slug", kwargs={"slug": "yeah"}),
"/traduzidos/yeah/",
)
def test_users_url(self):
with translation.override("en"):
self.assertEqual(reverse("users"), "/en/users/")
with translation.override("nl"):
self.assertEqual(reverse("users"), "/nl/gebruikers/")
self.assertEqual(reverse("prefixed_xml"), "/nl/prefixed.xml")
with translation.override("pt-br"):
self.assertEqual(reverse("users"), "/pt-br/usuarios/")
def test_translate_url_utility(self):
with translation.override("en"):
self.assertEqual(
translate_url("/en/nonexistent/", "nl"), "/en/nonexistent/"
)
self.assertEqual(translate_url("/en/users/", "nl"), "/nl/gebruikers/")
# Namespaced URL
self.assertEqual(
translate_url("/en/account/register/", "nl"), "/nl/profiel/registreren/"
)
# path() URL pattern
self.assertEqual(
translate_url("/en/account/register-as-path/", "nl"),
"/nl/profiel/registreren-als-pad/",
)
self.assertEqual(translation.get_language(), "en")
# re_path() URL with parameters.
self.assertEqual(
translate_url("/en/with-arguments/regular-argument/", "nl"),
"/nl/with-arguments/regular-argument/",
)
self.assertEqual(
translate_url(
"/en/with-arguments/regular-argument/optional.html", "nl"
),
"/nl/with-arguments/regular-argument/optional.html",
)
# path() URL with parameter.
self.assertEqual(
translate_url("/en/path-with-arguments/regular-argument/", "nl"),
"/nl/path-with-arguments/regular-argument/",
)
with translation.override("nl"):
self.assertEqual(translate_url("/nl/gebruikers/", "en"), "/en/users/")
self.assertEqual(translation.get_language(), "nl")
def test_reverse_translated_with_captured_kwargs(self):
with translation.override("en"):
match = resolve("/translated/apo/")
# Links to the same page in other languages.
tests = [
("nl", "/vertaald/apo/"),
("pt-br", "/traduzidos/apo/"),
]
for lang, expected_link in tests:
with translation.override(lang):
self.assertEqual(
reverse(
match.url_name, args=match.args, kwargs=match.captured_kwargs
),
expected_link,
)
def test_locale_not_interepreted_as_regex(self):
with translation.override("e("):
# Would previously error:
# re.error: missing ), unterminated subpattern at position 1
reverse("users")
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override("en"):
self.assertEqual(reverse("account:register"), "/en/account/register/")
self.assertEqual(
reverse("account:register-as-path"), "/en/account/register-as-path/"
)
with translation.override("nl"):
self.assertEqual(reverse("account:register"), "/nl/profiel/registreren/")
self.assertEqual(
reverse("account:register-as-path"), "/nl/profiel/registreren-als-pad/"
)
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get("/not-prefixed/")
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get(
"/profiel/registreren/", headers={"accept-language": "en"}
)
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get(
"/profiel/registreren/", headers={"accept-language": "nl"}
)
self.assertRedirects(response, "/nl/profiel/registreren/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "nl"}
)
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get(
"/conta/registre-se/", headers={"accept-language": "pt-br"}
)
self.assertRedirects(response, "/pt-br/conta/registre-se/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get(
"/account/register/", headers={"accept-language": "pl-pl"}
)
self.assertRedirects(response, "/en/account/register/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE=[
"i18n.patterns.tests.PermanentRedirectLocaleMiddleWare",
"django.middleware.common.CommonMiddleware",
],
)
def test_custom_redirect_class(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register/", 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
'Accept-Language' is not added to the Vary header when using prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get("/not-prefixed/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get("Vary"), "Accept-Language")
def test_en_redirect(self):
"""
The redirect to a prefixed URL depends on 'Accept-Language' and
'Cookie', but once prefixed no header is set.
"""
response = self.client.get(
"/account/register/", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register/")
self.assertEqual(response.get("Vary"), "Accept-Language, Cookie")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get("Vary"))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get("/not-prefixed", headers={"accept-language": "en"})
self.assertRedirects(response, "/not-prefixed/", 301)
def test_en_redirect(self):
response = self.client.get(
"/account/register", headers={"accept-language": "en"}, follow=True
)
# We only want one redirect, bypassing CommonMiddleware
self.assertEqual(response.redirect_chain, [("/en/account/register/", 302)])
self.assertRedirects(response, "/en/account/register/", 302)
response = self.client.get(
"/prefixed.xml", headers={"accept-language": "en"}, follow=True
)
self.assertRedirects(response, "/en/prefixed.xml", 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get("/not-prefixed", headers={"accept-language": "en"})
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get(
"/account/register-without-slash", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register-without-slash", 302)
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""Tests if the response has the correct language code."""
def test_not_prefixed_with_prefix(self):
response = self.client.get("/en/not-prefixed/")
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get("/en/account/register/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "en")
self.assertEqual(response.context["LANGUAGE_CODE"], "en")
def test_nl_url(self):
response = self.client.get("/nl/profiel/registreren/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "nl")
self.assertEqual(response.context["LANGUAGE_CODE"], "nl")
def test_wrong_en_prefix(self):
response = self.client.get("/en/profiel/registreren/")
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get("/nl/account/register/")
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get("/pt-br/conta/registre-se/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "pt-br")
self.assertEqual(response.context["LANGUAGE_CODE"], "pt-br")
def test_en_path(self):
response = self.client.get("/en/account/register-as-path/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "en")
self.assertEqual(response.context["LANGUAGE_CODE"], "en")
def test_nl_path(self):
response = self.client.get("/nl/profiel/registreren-als-pad/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers["content-language"], "nl")
self.assertEqual(response.context["LANGUAGE_CODE"], "nl")
@override_settings(ROOT_URLCONF="i18n.urls_default_unprefixed", LANGUAGE_CODE="nl")
class URLPrefixedFalseTranslatedTests(URLTestCaseBase):
def test_translated_path_unprefixed_language_other_than_accepted_header(self):
response = self.client.get("/gebruikers/", headers={"accept-language": "en"})
self.assertEqual(response.status_code, 200)
def test_translated_path_unprefixed_language_other_than_cookie_language(self):
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: "en"})
response = self.client.get("/gebruikers/")
self.assertEqual(response.status_code, 200)
def test_translated_path_prefixed_language_other_than_accepted_header(self):
response = self.client.get("/en/users/", headers={"accept-language": "nl"})
self.assertEqual(response.status_code, 200)
def test_translated_path_prefixed_language_other_than_cookie_language(self):
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: "nl"})
response = self.client.get("/en/users/")
self.assertEqual(response.status_code, 200)
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = "/script_prefix"
with override_script_prefix(prefix):
response = self.client.get(
"/prefixed/", headers={"accept-language": "en"}, SCRIPT_NAME=prefix
)
self.assertRedirects(
response, "%s/en/prefixed/" % prefix, target_status_code=404
)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(
t.render(Context({})).strip().split(), ["/vertaald/", "/traduzidos/"]
)
def test_context(self):
ctx = Context({"lang1": "nl", "lang2": "pt-br"})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(
tpl.render(ctx).strip().split(), ["/vertaald/", "/traduzidos/"]
)
def test_args(self):
tpl = Template("""
{% load i18n %}
{% language 'nl' %}
{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}
{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
""")
self.assertEqual(
tpl.render(Context({})).strip().split(),
["/vertaald/apo/", "/traduzidos/apo/"],
)
def test_kwargs(self):
tpl = Template("""
{% load i18n %}
{% language 'nl' %}
{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}
{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
""")
self.assertEqual(
tpl.render(Context({})).strip().split(),
["/vertaald/apo/", "/traduzidos/apo/"],
) | python | github | https://github.com/django/django | tests/i18n/patterns/tests.py |
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { QueryClient } from "@tanstack/react-query";
import { OpenAPI } from "openapi/requests/core/OpenAPI";
// Dynamically set the base URL for XHR requests based on the meta tag.
OpenAPI.BASE = document.querySelector("head>base")?.getAttribute("href") ?? "";
if (OpenAPI.BASE.endsWith("/")) {
OpenAPI.BASE = OpenAPI.BASE.slice(0, -1);
}
export const queryClient = new QueryClient({
defaultOptions: {
mutations: {
retry: 1,
retryDelay: 500,
},
},
}); | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/src/queryClient.ts |
package daemon
import (
// Importing packages here only to make sure their init gets called and
// therefore they register themselves to the logdriver factory.
_ "github.com/moby/moby/v2/daemon/logger/awslogs"
_ "github.com/moby/moby/v2/daemon/logger/etwlogs"
_ "github.com/moby/moby/v2/daemon/logger/fluentd"
_ "github.com/moby/moby/v2/daemon/logger/gcplogs"
_ "github.com/moby/moby/v2/daemon/logger/gelf"
_ "github.com/moby/moby/v2/daemon/logger/jsonfilelog"
_ "github.com/moby/moby/v2/daemon/logger/loggerutils/cache"
_ "github.com/moby/moby/v2/daemon/logger/splunk"
_ "github.com/moby/moby/v2/daemon/logger/syslog"
) | go | github | https://github.com/moby/moby | daemon/logdrivers_windows.go |
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th></th>
<th>0</th>
<th>1</th>
</tr>
<tr>
<th>foo</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<th>0</th>
<td>0</td>
<td>1</td>
</tr>
<tr>
<th>0</th>
<th>1</th>
<td>2</td>
<td>3</td>
</tr>
<tr>
<th>1</th>
<th>0</th>
<td>4</td>
<td>5</td>
</tr>
<tr>
<th>1</th>
<th>1</th>
<td>6</td>
<td>7</td>
</tr>
</tbody>
</table> | html | github | https://github.com/pandas-dev/pandas | pandas/tests/io/formats/data/html/multiindex_sparsify_false_multi_sparse_1.html |
from util import hook, http, web, text
from urllib import urlencode
import re
sc_re = (r'(.*:)//(www.)?(soundcloud.com)(.*)', re.I)
api_url = "http://api.soundcloud.com"
sndsc_re = (r'(.*:)//(www.)?(snd.sc)(.*)', re.I)
def soundcloud(url, api_key):
data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))
if data['description']:
desc = u": {} ".format(text.truncate_str(data['description'], 50))
else:
desc = ""
if data['genre']:
genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
else:
genre = ""
url = web.try_isgd(data['permalink_url'])
return u"SoundCloud track: \x02{}\x02 by \x02{}user\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'],
data['comment_count'], url)
@hook.regex(*sc_re)
def soundcloud_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]
return soundcloud(url, api_key)
@hook.regex(*sndsc_re)
def sndsc_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]
return soundcloud(http.open(url).url, api_key) | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton import Condition, Message, Delivery, Url, symbol, Timeout
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, DIR, Process, unittest, QdManager, TestTimeout
from proton.handlers import MessagingHandler, TransactionHandler
from proton.reactor import Container, AtMostOnce, AtLeastOnce
from proton.utils import BlockingConnection, SyncRequestResponse
from proton import VERSION as PROTON_VERSION
from proton import Terminus
from proton import Data
from qpid_dispatch.management.client import Node, BadRequestStatus
import os, json
from subprocess import PIPE, STDOUT
from time import sleep
from test_broker import FakeBroker
CONNECTION_PROPERTIES_UNICODE_STRING = {u'connection': u'properties', u'int_property': 6451}
CONNECTION_PROPERTIES_SYMBOL = dict()
CONNECTION_PROPERTIES_SYMBOL[symbol("connection")] = symbol("properties")
CONNECTION_PROPERTIES_BINARY = {b'client_identifier': b'policy_server'}
class StandaloneRouterQdManageTest(TestCase):
@classmethod
def setUpClass(cls):
super(StandaloneRouterQdManageTest, cls).setUpClass()
name = "test-router"
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'})
])
cls.router = cls.tester.qdrouterd(name, config, wait=True)
def test_49_add_interrouter_connector_to_standalone_router(self):
"""
This test tries adding an inter-router connector to a stanalone router.
A standalone router can have a route container connector
but never an inter-router connector. Inter router connectors
are allowed only with interior routers.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.connector",
{"host": "0.0.0.0",
"port": "77777",
"role":"inter-router"})
except Exception as e:
if "BadRequestStatus: role='standalone' not allowed to connect to or accept connections from other routers." in str(e):
test_pass = True
self.assertTrue(test_pass)
def test_50_add_edge_listener_to_standalone_router(self):
"""
This test tries to add an edge listener to a standalone router.
Since this is a standalone router, other routers (interior or edge routers)
cannot connect to this router.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.listener",
{"host": "0.0.0.0",
"port": "77777",
"role":"edge",
"authenticatePeer": "no"})
except Exception as e:
if "BadRequestStatus: role='standalone' not allowed to connect to or accept connections from other routers." in str(e):
test_pass = True
self.assertTrue(test_pass)
def test_51_add_interrouter_listener_to_standalone_router(self):
"""
This test tries to add an inter-router listener to a standalone router.
Since this is a standalone router, other routers (interior or edge routers)
cannot connect to this router.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.listener",
{"host": "0.0.0.0",
"port": "77777",
"role":"inter-router",
"authenticatePeer": "no"})
except Exception as e:
if "BadRequestStatus: role='standalone' not allowed to connect to or accept connections from other routers." in str(e):
test_pass = True
self.assertTrue(test_pass)
class EdgeRouterQdManageTest(TestCase):
@classmethod
def setUpClass(cls):
super(EdgeRouterQdManageTest, cls).setUpClass()
name = "test-router"
config = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'normal', 'host': '0.0.0.0'})
])
cls.router = cls.tester.qdrouterd(name, config, wait=True)
def test_52_add_interrouter_connector_to_edge_router(self):
"""
This test tries adding an inter-router connector to an edge router. An edge
router can have an edge connector or route container connector
but never an inter-router connector. Inter router connectors
are allowed only with interior routers.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.connector",
{"host": "0.0.0.0",
"port": "77777",
"role":"inter-router"})
except Exception as e:
if "BadRequestStatus: role='inter-router' only allowed with router mode='interior'" in str(e):
test_pass = True
self.assertTrue(test_pass)
def test_53_add_edge_listener_to_edge_router(self):
"""
This test tries to add an edge listener to an edge router which means
an edge router can connect to another edge router and that is not
allowed.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.listener",
{"host": "0.0.0.0",
"port": "77777",
"role":"edge",
"authenticatePeer": "no"})
except Exception as e:
if "BadRequestStatus: role='edge' only allowed with router mode='interior'" in str(e):
test_pass = True
self.assertTrue(test_pass)
def test_54_add_interrouter_listener_to_edge_router(self):
"""
This test tries to add an edge listener to an edge router which means
an edge router can connect to another edge router and that is not
allowed.
"""
mgmt = QdManager(self, address=self.router.addresses[0])
test_pass = False
try:
out = mgmt.create("org.apache.qpid.dispatch.listener",
{"host": "0.0.0.0",
"port": "77777",
"role":"inter-router",
"authenticatePeer": "no"})
except Exception as e:
if "BadRequestStatus: role='inter-router' only allowed with router mode='interior'" in str(e):
test_pass = True
self.assertTrue(test_pass)
class StandaloneEdgeRouterConfigTest(TestCase):
"""
Try to start the router with bad config and make sure the router
does not start and scan the log files for appropriate error messages.
"""
@classmethod
def setUpClass(cls):
super(StandaloneEdgeRouterConfigTest, cls).setUpClass()
name = "test-router"
# A standalone router cannot have an edge listener because it cannot accept edge connections.
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'edge', 'host': '0.0.0.0'})
])
cls.router = cls.tester.qdrouterd(name, config, wait=False, perform_teardown=False)
# A standalone router cannot have inter-router connectors.
name = "test-router-1"
config_1 = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('connector', {'port': cls.tester.get_port(), 'role': 'inter-router', 'host': '0.0.0.0'})
])
cls.router_1 = cls.tester.qdrouterd(name, config_1, wait=False, perform_teardown=False)
# An edge router cannot have edge listeners.
# Edge routers can have connectors that connect to interior routers
# or route-containers. One edge router cannot connect to another edge router.
name = "test-router-2"
config_2 = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'edge', 'host': '0.0.0.0'})
])
cls.router_2 = cls.tester.qdrouterd(name, config_2, wait=False, perform_teardown=False)
# Edge routers cannot have inter-router listeners. Only interior
# routers can have inter-router listeners.
name = "test-router-3"
config_3 = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'inter-router', 'host': '0.0.0.0'})
])
cls.router_3 = cls.tester.qdrouterd(name, config_3, wait=False, perform_teardown=False)
# Edge routers cannot have inter-router connectors
# Inter-router connectors are allowed only on interior routers.
name = "test-router-4"
config_4 = Qdrouterd.Config([
('router', {'mode': 'edge', 'id': 'QDR'}),
('connector', {'port': cls.tester.get_port(), 'role': 'inter-router', 'host': '0.0.0.0'})
])
cls.router_4 = cls.tester.qdrouterd(name, config_4, wait=False, perform_teardown=False)
# A standalone router cannot have an inter-router listener because
# it cannot accept inter-router connections.
name = "test-router-5"
config_5 = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR'}),
('listener', {'port': cls.tester.get_port(), 'role': 'inter-router', 'host': '0.0.0.0'})
])
cls.router_5 = cls.tester.qdrouterd(name, config_5, wait=False, perform_teardown=False)
# Give some time for the test to write to the .out file. Without
# this sleep, the tests execute too
# fast and find that nothing has yet been written to the .out files.
sleep(3)
def test_48_router_in_error(self):
test_pass = False
with open(self.router.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router.conf: role='standalone' not allowed to connect to or accept connections from other routers." in line:
test_pass = True
break
self.assertTrue(test_pass)
test_pass = False
with open(self.router_1.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router-1.conf: role='standalone' not allowed to connect to or accept connections from other routers." in line:
test_pass = True
break
self.assertTrue(test_pass)
test_pass = False
with open(self.router_2.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router-2.conf: role='edge' only allowed with router mode='interior'" in line:
test_pass = True
break
self.assertTrue(test_pass)
test_pass = False
with open(self.router_3.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router-3.conf: role='inter-router' only allowed with router mode='interior'" in line:
test_pass = True
break
self.assertTrue(test_pass)
test_pass = False
with open(self.router_4.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router-4.conf: role='inter-router' only allowed with router mode='interior'" in line:
test_pass = True
break
self.assertTrue(test_pass)
test_pass = False
with open(self.router_5.outfile + '.out', 'r') as out_file:
for line in out_file:
if "Exception: Cannot load configuration file test-router-5.conf: role='standalone' not allowed to connect to or accept connections from other routers." in line:
test_pass = True
break
self.assertTrue(test_pass)
class OneRouterTest(TestCase):
"""System tests involving a single router"""
@classmethod
def setUpClass(cls):
"""Start a router and a messenger"""
super(OneRouterTest, cls).setUpClass()
name = "test-router"
policy_config_path = os.path.join(DIR, 'one-router-policy')
OneRouterTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR', 'allowUnsettledMulticast': 'yes'}),
('policy', {'policyDir': policy_config_path,
'enableVhostPolicy': 'true'}),
# Setting the stripAnnotations to 'no' so that the existing tests will work.
# Setting stripAnnotations to no will not strip the annotations and any tests that were already in this file
# that were expecting the annotations to not be stripped will continue working.
('listener', {'port': OneRouterTest.listen_port, 'maxFrameSize': '2048', 'stripAnnotations': 'no'}),
# The following listeners were exclusively added to test the stripAnnotations attribute in qdrouterd.conf file
# Different listeners will be used to test all allowed values of stripAnnotations ('no', 'both', 'out', 'in')
('listener', {'port': cls.tester.get_port(), 'maxFrameSize': '2048', 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'maxFrameSize': '2048', 'stripAnnotations': 'both'}),
('listener', {'port': cls.tester.get_port(), 'maxFrameSize': '2048', 'stripAnnotations': 'out'}),
('listener', {'port': cls.tester.get_port(), 'maxFrameSize': '2048', 'stripAnnotations': 'in'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('address', {'prefix': 'unavailable', 'distribution': 'unavailable'})
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
cls.closest_count = 1
cls.no_strip_addr = cls.router.addresses[1]
cls.both_strip_addr = cls.router.addresses[2]
cls.out_strip_addr = cls.router.addresses[3]
cls.in_strip_addr = cls.router.addresses[4]
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or self.address, '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def test_01_listen_error(self):
# Make sure a router exits if a initial listener fails, doesn't hang.
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'bad'}),
('listener', {'port': OneRouterTest.listen_port})])
r = Qdrouterd(name="expect_fail", config=config, wait=False)
self.assertEqual(1, r.wait())
def test_02_pre_settled ( self ):
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = PreSettled ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
def test_03_multicast_unsettled ( self ) :
n_receivers = 5
addr = self.address + '/multicast/1'
test = MulticastUnsettled ( addr, n_messages = 10, n_receivers = 5 )
test.run ( )
self.assertEqual ( None, test.error )
# DISPATCH-1277. This test will fail with a policy but without the fix in policy_local.py
# In other words, if the max-frame-size was 2147483647 and not 16384, this
# test would fail.
def test_04_disposition_returns_to_closed_connection ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = DispositionReturnsToClosedConnection ( addr, n_messages = 100 )
test.run ( )
self.assertEqual ( None, test.error )
def test_05_sender_settles_first ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = SenderSettlesFirst ( addr, n_messages = 100 )
test.run ( )
self.assertEqual ( None, test.error )
def test_06_propagated_disposition ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = PropagatedDisposition ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
def test_07_unsettled_undeliverable ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = UsettledUndeliverable ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
def test_08_three_ack ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = ThreeAck ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
def test_09_message_annotations ( self ) :
addr = self.address + '/closest/' + str(OneRouterTest.closest_count)
OneRouterTest.closest_count += 1
test = MessageAnnotations ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# Tests stripping of ingress and egress annotations.
# There is a property in qdrouter.json called stripAnnotations with possible values of ["in", "out", "both", "no"]
# The default for stripAnnotations is "both" (which means strip annotations on both ingress and egress)
# This test will test the stripAnnotations = no option - meaning no annotations must be stripped.
# We will send in a custom annotation and make sure that we get back 3 annotations on the received message
def test_10_strip_message_annotations_custom(self):
addr = self.no_strip_addr + "/strip_message_annotations_no_custom/1"
OneRouterTest.closest_count += 1
test = StripMessageAnnotationsCustom ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# stripAnnotations property is set to "no"
def test_11_test_strip_message_annotations_no(self):
addr = self.no_strip_addr + "/strip_message_annotations_no/1"
test = StripMessageAnnotationsNo ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# stripAnnotations property is set to "no"
def test_12_test_strip_message_annotations_no_add_trace(self):
addr = self.no_strip_addr + "/strip_message_annotations_no_add_trace/1"
test = StripMessageAnnotationsNoAddTrace ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# Dont send any pre-existing ingress or trace annotations. Make sure that there
# are no outgoing message annotations stripAnnotations property is set to "both".
# Custom annotations, however, are not stripped.
def test_13_test_strip_message_annotations_both(self):
addr = self.both_strip_addr + "/strip_message_annotations_both/1"
test = StripMessageAnnotationsBoth ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# Dont send any pre-existing ingress or trace annotations. Make sure that there
# are no outgoing message annotations
# stripAnnotations property is set to "out"
def test_14_test_strip_message_annotations_out(self):
addr = self.out_strip_addr + "/strip_message_annotations_out/1"
test = StripMessageAnnotationsOut ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
# Send in pre-existing trace and ingress and annotations and make sure
# that they are not in the outgoing annotations.
# stripAnnotations property is set to "in"
def test_15_test_strip_message_annotations_in(self):
addr = self.in_strip_addr + "/strip_message_annotations_in/1"
test = StripMessageAnnotationsIn ( addr, n_messages = 10 )
test.run ( )
self.assertEqual ( None, test.error )
def test_16_management(self):
test = ManagementTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_17_management_get_operations(self):
test = ManagementGetOperationsTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_18_management_not_implemented(self):
test = ManagementNotImplemented(self.address)
test.run()
self.assertEqual(None, test.error)
def test_19_semantics_multicast(self):
test = SemanticsMulticast(self.address)
test.run()
self.assertEqual(None, test.error)
def test_20_semantics_closest(self):
test = SemanticsClosest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_21_semantics_balanced(self):
test = SemanticsBalanced(self.address)
test.run()
self.assertEqual(None, test.error)
def test_22_to_override(self):
test = MessageAnnotaionsPreExistingOverride(self.address)
test.run()
def test_23_send_settle_mode_settled(self):
"""
The receiver sets a snd-settle-mode of settle thus indicating that it wants to receive settled messages from
the sender. This tests make sure that the delivery that comes to the receiver comes as already settled.
"""
send_settle_mode_test = SndSettleModeTest(self.address)
send_settle_mode_test.run()
self.assertTrue(send_settle_mode_test.message_received)
self.assertTrue(send_settle_mode_test.delivery_already_settled)
def test_24_excess_deliveries_released(self):
"""
Message-route a series of deliveries where the receiver provides credit for a subset and
once received, closes the link. The remaining deliveries should be released back to the sender.
"""
test = ExcessDeliveriesReleasedTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_25_multicast_unsettled(self):
test = MulticastUnsettledTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_17_multiframe_presettled(self):
test = MultiframePresettledTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_27_released_vs_modified(self):
test = ReleasedVsModifiedTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_28_appearance_of_balance(self):
test = AppearanceOfBalanceTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_29_batched_settlement(self):
test = BatchedSettlementTest(self.address)
test.run()
self.assertEqual(None, test.error)
self.assertTrue(test.accepted_count_match)
def test_30_presettled_overflow(self):
test = PresettledOverflowTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_31_create_unavailable_sender(self):
test = UnavailableSender(self.address)
test.run()
self.assertTrue(test.passed)
def test_32_create_unavailable_receiver(self):
test = UnavailableReceiver(self.address)
test.run()
self.assertTrue(test.passed)
def test_33_large_streaming_test(self):
test = LargeMessageStreamTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_34_reject_coordinator(self):
test = RejectCoordinatorTest(self.address)
test.run()
self.assertTrue(test.passed)
def test_35_reject_disposition(self):
test = RejectDispositionTest(self.address)
test.run()
self.assertTrue(test.received_error)
self.assertTrue(test.reject_count_match)
def test_37_connection_properties_unicode_string(self):
"""
Tests connection property that is a map of unicode strings and integers
"""
connection = BlockingConnection(self.router.addresses[0],
timeout=TIMEOUT,
properties=CONNECTION_PROPERTIES_UNICODE_STRING)
client = SyncRequestResponse(connection)
node = Node.connect(self.router.addresses[0])
results = node.query(type='org.apache.qpid.dispatch.connection', attribute_names=[u'properties']).results
found = False
for result in results:
if u'connection' in result[0] and u'int_property' in result[0]:
found = True
self.assertEqual(result[0][u'connection'], u'properties')
self.assertEqual(result[0][u'int_property'], 6451)
self.assertTrue(found)
client.connection.close()
def test_38_connection_properties_symbols(self):
"""
Tests connection property that is a map of symbols
"""
connection = BlockingConnection(self.router.addresses[0],
timeout=TIMEOUT,
properties=CONNECTION_PROPERTIES_SYMBOL)
client = SyncRequestResponse(connection)
node = Node.connect(self.router.addresses[0])
results = node.query(type='org.apache.qpid.dispatch.connection', attribute_names=[u'properties']).results
found = False
for result in results:
if u'connection' in result[0]:
if result[0][u'connection'] == u'properties':
found = True
break
self.assertTrue(found)
client.connection.close()
def test_40_anonymous_sender_no_receiver(self):
test = AnonymousSenderNoRecvLargeMessagedTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_41_large_streaming_close_conn_test(self):
test = LargeMessageStreamCloseConnTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_42_unsettled_large_message_test(self):
test = UnsettledLargeMessageTest(self.address, 250)
test.run()
self.assertEqual(None, test.error)
def test_43_dropped_presettled_receiver_stops(self):
local_node = Node.connect(self.address, timeout=TIMEOUT)
res = local_node.query('org.apache.qpid.dispatch.router')
deliveries_ingress = res.attribute_names.index(
'deliveriesIngress')
ingress_delivery_count = res.results[0][deliveries_ingress]
test = DroppedPresettledTest(self.address, 200, ingress_delivery_count)
test.run()
self.assertEqual(None, test.error)
def test_44_delete_connection_fail(self):
"""
This test creates a blocking connection and tries to update the adminStatus on that connection to "deleted".
Since the policy associated with this router set allowAdminStatusUpdate as false,
the update operation will not be permitted.
"""
# Create a connection with some properties so we can easily identify the connection
connection = BlockingConnection(self.address,
properties=CONNECTION_PROPERTIES_UNICODE_STRING)
query_command = 'QUERY --type=connection'
outputs = json.loads(self.run_qdmanage(query_command))
identity = None
passed = False
for output in outputs:
if output.get('properties'):
conn_properties = output['properties']
# Find the connection that has our properties - CONNECTION_PROPERTIES_UNICODE_STRING
# Delete that connection and run another qdmanage to see
# if the connection is gone.
if conn_properties.get('int_property'):
identity = output.get("identity")
if identity:
update_command = 'UPDATE --type=connection adminStatus=deleted --id=' + identity
try:
outputs = json.loads(self.run_qdmanage(update_command))
except Exception as e:
if "Forbidden" in str(e):
passed = True
# The test has passed since we were not allowed to delete a connection
# because we do not have the policy permission to do so.
self.assertTrue(passed)
def test_45_q2_holdoff_drop_stalled_rx(self):
"""
Verify that dropping a slow consumer while in Q2 flow control does
not hang the router
"""
test = Q2HoldoffDropTest(self.router)
test.run()
self.assertEqual(None, test.error)
def test_48_connection_uptime_last_dlv(self):
test = ConnectionUptimeLastDlvTest(self.address, "test_48")
test.run()
self.assertEqual(None, test.error)
def test_49_unexpected_release_test(self):
"""
Verify that the on_released function is only called once for every
released message. Without the fix for DISPATCH-1626, the on_released
might be called twice for the same delivery.
This test will fail once in five runs without the fix for DISPATCH-1626
"""
test = UnexpectedReleaseTest(self.address)
test.run()
self.assertEqual(None, test.error)
class Entity(object):
def __init__(self, status_code, status_description, attrs):
self.status_code = status_code
self.status_description = status_description
self.attrs = attrs
def __getattr__(self, key):
return self.attrs[key]
class RouterProxy(object):
def __init__(self, reply_addr):
self.reply_addr = reply_addr
def response(self, msg):
ap = msg.properties
bd = msg.body
if isinstance(bd, dict) and 'results' in bd and 'attributeNames' in bd:
##
## This is a query response
##
response = []
anames = bd['attributeNames']
for row in bd['results']:
cols = {}
for i in range(len(row)):
cols[anames[i]] = row[i]
response.append(Entity(ap['statusCode'], ap['statusDescription'], cols))
return response
return Entity(ap['statusCode'], ap['statusDescription'], msg.body)
def read_address(self, name):
ap = {'operation': 'READ', 'type': 'org.apache.qpid.dispatch.router.address', 'name': name}
return Message(properties=ap, reply_to=self.reply_addr)
def query_addresses(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.address'}
return Message(properties=ap, reply_to=self.reply_addr)
def query_links(self):
ap = {'operation': 'QUERY', 'type': 'org.apache.qpid.dispatch.router.link'}
return Message(properties=ap, reply_to=self.reply_addr)
class ReleasedChecker(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.released_check_timeout()
class UnexpectedReleaseTest(MessagingHandler):
def __init__(self, address):
super(UnexpectedReleaseTest, self).__init__(auto_accept=False)
self.address = address
self.dest = "UnexpectedReleaseTest"
self.timer = None
self.sender_conn = None
self.receiver_conn = None
self.sender = None
self.receiver = None
self.num_messages = 250
self.recv_messages_max = 200
self.num_sent = 0
self.num_received = 0
self.num_released = 0
self.num_accepted = 0
# Send a large message
self.body = "123456789" * 8192
self.receiver_conn_closed = False
self.error = None
self.released_checker = None
def timeout(self):
self.error = "Timeout Expired: sent=%d accepted=%d released=%d number excpected to be released=%d" % \
(self.num_sent, self.num_accepted, self.num_released, self.num_messages - self.recv_messages_max)
if not self.receiver_conn_closed:
self.receiver_conn.close()
self.sender_conn.close()
def released_check_timeout(self):
if not self.receiver_conn_closed:
self.receiver_conn.close()
self.sender_conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.address)
self.receiver_conn = event.container.connect(self.address)
self.receiver = event.container.create_receiver(self.receiver_conn, self.dest)
def on_link_opened(self, event):
if event.receiver == self.receiver:
# Wait for the receiver to be created and then create the sender.
self.sender = event.container.create_sender(self.sender_conn, self.dest)
def on_sendable(self, event):
if self.num_sent < self.num_messages:
msg = Message(body=self.body)
self.sender.send(msg)
self.num_sent += 1
def on_released(self, event):
self.num_released += 1
if self.num_released == self.num_messages - self.recv_messages_max:
# We have received the expected number of calls to on_released
# but without the fix for DISPATCH-1626 we expect the on_released
# to be called an additional one or more times. We will kick off
# a 3 second timer after which we will check if we got more
# calls to on_released.
self.released_checker = event.reactor.schedule(3, ReleasedChecker(self))
if self.num_released > self.num_messages - self.recv_messages_max:
# This if statement will be true if the client receives a 2 part
# dispostion from the router like the following
#
# [0x562a0083ed80]:0 <- @disposition(21) [role=true, first=981, state=@released(38) []]
# [0x562a0083ed80]:0 <- @disposition(21) [role=true, first=981, last=982, settled=true, state=@released(38) []]
#
self.error = "Expected %d calls to on_released but got %d" % (self.num_messages - self.recv_messages_max, self.num_released)
def on_accepted(self, event):
self.num_accepted +=1
def on_message(self, event):
if event.receiver == self.receiver:
self.num_received += 1
if self.num_received <= self.recv_messages_max:
event.delivery.settle()
if self.num_received == self.recv_messages_max:
self.receiver_conn.close()
self.receiver_conn_closed = True
def run(self):
Container(self).run()
class SemanticsClosest(MessagingHandler):
def __init__(self, address):
super(SemanticsClosest, self).__init__()
self.address = address
self.dest = "closest.1"
self.timer = None
self.conn = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 100
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
self.rx_set = []
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
# Receiver on same router as the sender must receive all the messages. The other two
# receivers are on the other router
self.receiver_a = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.num_messages\
and self.n_received_b != 0 and self.n_received_c != 0:
self.rx_set.sort()
#print self.rx_set
all_messages_received = True
for i in range(self.num_messages):
if not i == self.rx_set[i]:
all_messages_received = False
if all_messages_received:
self.timer.cancel()
self.conn.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body={'number': self.n_sent})
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
self.rx_set.append(event.message.body['number'])
if event.receiver == self.receiver_b:
self.n_received_b += 1
self.rx_set.append(event.message.body['number'])
if event.receiver == self.receiver_c:
self.n_received_c += 1
self.rx_set.append(event.message.body['number'])
def on_accepted(self, event):
self.check_if_done()
def run(self):
Container(self).run()
class MessageAnnotaionsPreExistingOverride(MessagingHandler):
def __init__(self, address):
super(MessageAnnotaionsPreExistingOverride, self).__init__()
self.address = address
self.dest = "toov/1"
self.error = "Pre-existing x-opt-qd.to has been stripped"
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.msg_not_sent = True
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest)
def timeout(self):
self.error = "Timeout Expired: Sent message not received"
self.conn.close()
def bail(self, message):
self.error = message
self.conn.close()
self.timer.cancel()
def on_sendable(self, event):
if self.msg_not_sent:
msg = Message(body={'number': 0})
msg.annotations = {'x-opt-qd.to': 'toov/1'}
event.sender.send(msg)
self.msg_not_sent = False
def on_message(self, event):
if 0 == event.message.body['number']:
ma = event.message.annotations
if ma['x-opt-qd.to'] == 'toov/1':
self.bail(None)
else:
self.bail("Pre-existing x-opt-qd.to has been stripped")
else:
self.bail("body does not match with the sent message body")
def run(self):
Container(self).run()
class SemanticsMulticast(MessagingHandler):
def __init__(self, address):
"""
Verify that for every 1 unsettled mcast message received, N messages are sent
out (where N == number of receivers). Assert that multiple received
dispositions are summarized to send out one disposition.
"""
super(SemanticsMulticast, self).__init__(auto_accept=False)
self.address = address
self.dest = "multicast.2"
self.error = None
self.n_sent = 0
self.n_settled = 0
self.count = 3
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.n_accepts = 0
self.n_recv_ready = 0
self.timer = None
self.conn_1 = None
self.conn_2 = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn_1 = event.container.connect(self.address)
self.conn_2 = event.container.connect(self.address)
self.receiver_a = event.container.create_receiver(self.conn_2, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn_1, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn_2, self.dest, name="C")
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn_1.close()
self.conn_2.close()
def check_if_done(self):
c = self.n_received_a + self.n_received_b + self.n_received_c
if (c == self.count
and self.n_received_a == self.n_received_b
and self.n_received_c == self.n_received_b
and self.n_accepts == self.n_sent
and self.n_settled == self.count):
self.timer.cancel()
self.conn_1.close()
self.conn_2.close()
def on_link_opened(self, event):
if event.receiver:
self.n_recv_ready += 1
if self.n_recv_ready == self.count:
self.sender = event.container.create_sender(self.conn_1, self.dest)
def on_sendable(self, event):
if self.n_sent == 0:
msg = Message(body="SemanticsMulticast-Test")
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
event.delivery.update(Delivery.ACCEPTED)
def on_accepted(self, event):
self.n_accepts += 1
event.delivery.settle()
def on_settled(self, event):
self.n_settled += 1
self.check_if_done()
def run(self):
Container(self).run()
class ManagementNotImplemented(MessagingHandler):
def __init__(self, address):
super(ManagementNotImplemented, self).__init__()
self.address = address
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.error = None
def timeout(self):
self.error = "No response received for management request"
self.conn.close()
def bail(self, message):
self.error = message
self.conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn)
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
def on_link_opened(self, event):
if event.receiver == self.receiver:
request = Message()
request.address = "amqp:/_local/$management"
request.reply_to = event.receiver.remote_source.address
request.properties = {u'type': u'org.amqp.management',
u'name': u'self',
u'operation': u'NOT-IMPL'}
self.sender.send(request)
def run(self):
Container(self).run()
def on_message(self, event):
if event.receiver == self.receiver:
if event.message.properties['statusCode'] == 501:
self.bail(None)
else:
self.bail("The return status code is %s. It should be 501" % str(event.message.properties['statusCode']))
class ManagementGetOperationsTest(MessagingHandler):
def __init__(self, address):
super(ManagementGetOperationsTest, self).__init__()
self.address = address
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.error = None
def timeout(self):
self.error = "No response received for management request"
self.conn.close()
def bail(self, message):
self.error = message
self.conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn)
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
def on_link_opened(self, event):
if self.receiver == event.receiver:
request = Message()
request.address = "amqp:/_local/$management"
request.reply_to = self.receiver.remote_source.address
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-OPERATIONS'}
self.sender.send(request)
def run(self):
Container(self).run()
def on_message(self, event):
if event.receiver == self.receiver:
if event.message.properties['statusCode'] == 200:
if 'org.apache.qpid.dispatch.router' in event.message.body.keys():
if len(event.message.body.keys()) > 2:
self.bail(None)
else:
self.bail('size of keys in message body less than or equal 2')
else:
self.bail('org.apache.qpid.dispatch.router is not in the keys')
else:
self.bail("The return status code is %s. It should be 200" % str(event.message.properties['statusCode']))
class ManagementTest(MessagingHandler):
def __init__(self, address):
super(ManagementTest, self).__init__()
self.address = address
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.msg_not_sent = True
self.error = None
self.response1 = False
self.response2 = False
def timeout(self):
if not self.response1:
self.error = "Incorrect response received for message with correlation id C1"
if not self.response1:
self.error = self.error + "and incorrect response received for message with correlation id C2"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn)
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
def on_link_opened(self, event):
if event.receiver == self.receiver:
request = Message()
request.address = "amqp:/$management"
request.reply_to = self.receiver.remote_source.address
request.correlation_id = "C1"
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
self.sender.send(request)
request = Message()
request.address = "amqp:/_topo/0/QDR.B/$management"
request.correlation_id = "C2"
request.reply_to = self.receiver.remote_source.address
request.properties = {u'type': u'org.amqp.management', u'name': u'self', u'operation': u'GET-MGMT-NODES'}
self.sender.send(request)
def on_message(self, event):
if event.receiver == self.receiver:
if event.message.correlation_id == "C1":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and event.message.body == []:
self.response1 = True
elif event.message.correlation_id == "C2":
if event.message.properties['statusCode'] == 200 and \
event.message.properties['statusDescription'] is not None \
and event.message.body == []:
self.response2 = True
if self.response1 and self.response2:
self.error = None
if self.error is None:
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class CustomTimeout(object):
def __init__(self, parent):
self.parent = parent
def addr_text(self, addr):
if not addr:
return ""
if addr[0] == 'M':
return addr[2:]
else:
return addr[1:]
def on_timer_task(self, event):
local_node = Node.connect(self.parent.address, timeout=TIMEOUT)
res = local_node.query('org.apache.qpid.dispatch.router.address')
name = res.attribute_names.index('name')
found = False
for results in res.results:
if "balanced.1" == self.addr_text(results[name]):
found = True
break
if found:
self.parent.cancel_custom()
self.parent.create_sender(event)
else:
event.reactor.schedule(2, self)
class SemanticsBalanced(MessagingHandler):
def __init__(self, address):
super(SemanticsBalanced, self).__init__(auto_accept=False, prefetch=0)
self.address = address
self.dest = "balanced.1"
self.timer = None
self.conn = None
self.sender = None
self.receiver_a = None
self.receiver_b = None
self.receiver_c = None
self.num_messages = 250
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
self.error = None
self.n_sent = 0
self.rx_set = []
self.custom_timer = None
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.custom_timer = event.reactor.schedule(2, CustomTimeout(self))
self.conn = event.container.connect(self.address)
# This receiver is on the same router as the sender
self.receiver_a = event.container.create_receiver(self.conn, self.dest, name="A")
# These two receivers are connected to a different router than the sender
self.receiver_b = event.container.create_receiver(self.conn, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn, self.dest, name="C")
self.receiver_a.flow(100)
self.receiver_b.flow(100)
self.receiver_c.flow(100)
def cancel_custom(self):
self.custom_timer.cancel()
def create_sender(self, event):
self.sender = event.container.create_sender(self.conn, self.dest)
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn.close()
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.num_messages and \
self.n_received_a > 0 and self.n_received_b > 0 and self.n_received_c > 0:
self.rx_set.sort()
all_messages_received = True
for i in range(self.num_messages):
if not i == self.rx_set[i]:
all_messages_received = False
if all_messages_received:
self.timer.cancel()
self.conn.close()
def on_sendable(self, event):
if self.n_sent < self.num_messages:
msg = Message(body={'number': self.n_sent})
self.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_b:
self.n_received_b += 1
self.rx_set.append(event.message.body['number'])
elif event.receiver == self.receiver_c:
self.n_received_c += 1
self.rx_set.append(event.message.body['number'])
self.check_if_done()
def run(self):
Container(self).run()
class PreSettled ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( PreSettled, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.error = None
self.test_timer = None
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired: %d messages received, %d expected." % (self.n_received, self.n_messages) )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.send_conn, self.addr )
self.receiver.flow ( self.n_messages )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
# Presettle the delivery.
dlv = self.sender.send ( msg )
dlv.settle()
self.n_sent += 1
def on_message ( self, event ) :
self.n_received += 1
if self.n_received >= self.n_messages :
self.bail ( None )
class PresettledCustomTimeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
local_node = Node.connect(self.parent.addr, timeout=TIMEOUT)
res = local_node.query('org.apache.qpid.dispatch.router')
deliveries_ingress = res.attribute_names.index(
'deliveriesIngress')
ingress_delivery_count = res.results[0][deliveries_ingress]
self.parent.cancel_custom()
# Without the fix for DISPATCH--1213 the ingress count will be less than
# 200 because the sender link has stalled. The q2_holdoff happened
# and so all the remaining messages are still in the
# proton buffers.
if ingress_delivery_count - self.parent.begin_ingress_count > self.parent.n_messages:
self.parent.bail(None)
else:
self.parent.bail("Messages sent to the router is %d, "
"Messages processed by the router is %d" %
(self.parent.n_messages,
ingress_delivery_count - self.parent.begin_ingress_count))
class DroppedPresettledTest(MessagingHandler):
def __init__(self, addr, n_messages, begin_ingress_count):
super (DroppedPresettledTest, self).__init__()
self.addr = addr
self.n_messages = n_messages
self.sender = None
self.receiver = None
self.sender_conn = None
self.recv_conn = None
self.n_sent = 0
self.n_received = 0
self.error = None
self.test_timer = None
self.max_receive = 10
self.custom_timer = None
self.timer = None
self.begin_ingress_count = begin_ingress_count
self.str1 = "0123456789abcdef"
self.msg_str = ""
for i in range(8192):
self.msg_str += self.str1
def run (self):
Container(self).run()
def bail(self, travail):
self.error = travail
self.sender_conn.close()
if self.recv_conn:
self.recv_conn.close()
self.timer.cancel()
def timeout(self,):
self.bail("Timeout Expired: %d messages received, %d expected." %
(self.n_received, self.n_messages))
def on_start (self, event):
self.sender_conn = event.container.connect(self.addr)
self.recv_conn = event.container.connect(self.addr)
self.receiver = event.container.create_receiver(self.recv_conn,
"test_43")
self.sender = event.container.create_sender(self.sender_conn,
"test_43")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def cancel_custom(self):
self.custom_timer.cancel()
def on_sendable(self, event):
while self.n_sent < self.n_messages:
msg = Message(id=(self.n_sent + 1),
body={'sequence': (self.n_sent + 1),
'msg_str': self.msg_str})
# Presettle the delivery.
dlv = self.sender.send (msg)
dlv.settle()
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
if self.n_received == self.max_receive:
# Receiver bails after receiving max_receive messages.
self.receiver.close()
self.recv_conn.close()
# The sender is only sending 200 large messages which is less
# that the initial credit of 250 that the router gives.
# Lets do a qdstat to find out if all 200 messages is handled
# by the router.
self.custom_timer = event.reactor.schedule(1,
PresettledCustomTimeout(
self))
class MulticastUnsettled ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages,
n_receivers
) :
super ( MulticastUnsettled, self ) . __init__ (auto_accept=False, prefetch=n_messages)
self.addr = addr
self.n_messages = n_messages
self.n_receivers = n_receivers
self.sender = None
self.receivers = list ( )
self.n_sent = 0
self.n_received = list ( )
self.error = None
self.test_timer = None
self.bailing = False
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.recv_conn = event.container.connect ( self.addr )
for i in range ( self.n_receivers ) :
rcvr = event.container.create_receiver ( self.recv_conn, self.addr, name = "receiver_" + str(i) )
rcvr.flow ( self.n_messages )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_link_opened(self, event):
if event.receiver:
self.receivers.append(event.receiver)
self.n_received.append(0)
# start the sender once all receivers links are up
if len(self.receivers) == self.n_receivers:
self.send_conn = event.container.connect(self.addr)
self.sender = event.container.create_sender(self.send_conn, self.addr)
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
for i in range ( self.n_messages ) :
msg = Message ( body = i )
# The sender does not settle, but the
# receivers will..
self.sender.send ( msg )
self.n_sent += 1
def on_message ( self, event ) :
if self.bailing :
return
event.delivery.settle()
for i in range ( self.n_receivers ) :
if event.receiver == self.receivers [ i ] :
# Body conetnts of the messages count from 0 ... n,
# so the contents of this message should be same as
# the current number of messages received by this receiver.
if self.n_received [ i ] != event.message.body :
self.bail ( "out of order or missed message: receiver %d got %d instead of %d" %
( i, event.message.body, self.n_received [ i ] )
)
return
self.n_received [ i ] += 1
self.check_n_received ( )
def check_n_received ( self ) :
for i in range ( self.n_receivers ) :
if self.n_received [ i ] < self.n_messages :
return
# All messages have been received by all receivers.
self.bail ( None )
class DispositionReturnsToClosedConnection ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( DispositionReturnsToClosedConnection, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.test_timer.cancel ( )
self.error = travail
if self.send_conn :
self.send_conn.close ( )
self.recv_conn.close ( )
def timeout ( self ) :
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
if not self.send_conn :
return
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
self.sender.send ( msg )
self.n_sent += 1
# Immediately upon finishing sending all the messages, the
# sender closes its connection, so that when the dispositions
# try to come back they will find no one who cares.
# The only problem I can directly detect here is a test
# timeout. And, indirectly, we are making sure that the router
# does not blow sky high.
if self.n_sent >= self.n_messages :
self.send_conn.close()
self.send_conn = None
# On the receiver side, we keep accepting and settling
# messages, tragically unaware that no one cares.
def on_message ( self, event ) :
event.delivery.update ( Delivery.ACCEPTED )
event.delivery.settle ( )
self.n_received += 1
if self.n_received >= self.n_messages :
self.bail ( None )
class SenderSettlesFirst ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( SenderSettlesFirst, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
# Settle the delivery immediately after sending.
dlv = self.sender.send ( msg )
dlv.settle()
self.n_sent += 1
def on_message ( self, event ) :
self.n_received += 1
event.delivery.settle ( )
if self.n_received >= self.n_messages :
self.bail ( None )
class PropagatedDisposition ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( PropagatedDisposition, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.bailing = False
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
# Sender Side ================================================
def on_sendable ( self, event ) :
if self.bailing :
return
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
dlv = self.sender.send ( msg )
if dlv.remote_state != 0 :
self.bail ( "remote state nonzero on send." )
break
if not dlv.pending :
self.bail ( "dlv not pending immediately after send." )
break
self.n_sent += 1
def on_accepted ( self, event ) :
if self.bailing :
return
dlv = event.delivery
if dlv.pending :
self.bail ( "Delivery still pending after accepted." )
return
if dlv.remote_state != Delivery.ACCEPTED :
self.bail ( "Delivery remote state is not ACCEPTED after accept." )
return
self.n_accepted += 1
if self.n_accepted >= self.n_messages :
# Success!
self.bail ( None )
# Receiver Side ================================================
def on_message ( self, event ) :
if self.bailing :
return
self.n_received += 1
dlv = event.delivery
if dlv.pending :
self.bail ( 'Delivery still pending at receiver.' )
return
if dlv.local_state != 0 :
self.bail ( 'At receiver: delivery local state nonzero at receiver before accept.' )
return
dlv.update ( Delivery.ACCEPTED )
class UsettledUndeliverable ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( UsettledUndeliverable, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.n_sent = 0
self.n_received = 0
self.bailing = False
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
if self.n_sent > 0 :
self.bail ( "Messages sent with no receiver." )
else :
self.bail ( None )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
# Uh-oh. We are not creating a receiver!
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
msg = Message ( body = self.n_sent )
dlv = self.sender.send ( msg )
dlv.settle()
self.n_sent += 1
def on_message ( self, event ) :
self.n_received += 1
class ThreeAck ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( ThreeAck, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.bailing = False
self.tmp_dlv = None
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
# Sender Side ================================================
def on_sendable ( self, event ) :
if self.bailing :
return
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
dlv = self.sender.send ( msg )
self.n_sent += 1
def on_accepted ( self, event ) :
if self.bailing :
return
dlv = event.delivery
if dlv.remote_state != Delivery.ACCEPTED :
self.bail ( "Delivery remote state is not ACCEPTED in on_accepted." )
return
# When sender knows that receiver has accepted, we settle.
# That's two-ack.
dlv.settle()
self.n_accepted += 1
if self.n_accepted >= self.n_messages :
# Success!
self.bail ( None )
# Receiver Side ================================================
def on_message ( self, event ) :
if self.bailing :
return
dlv = event.delivery
dlv.update ( Delivery.ACCEPTED )
if event.message.body != self.n_received :
self.bail ( "out-of-order message" )
return
self.n_received += 1
if self.tmp_dlv == None :
self.tmp_dlv = dlv
# We have no way, on receiver side, of tracking when sender settles.
# See PROTON-395 .
class MessageAnnotations ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( MessageAnnotations, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.bailing = False
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
if event.sender.credit < 1 :
return
# No added annotations.
msg = Message ( body = self.n_sent )
self.n_sent += 1
self.sender.send ( msg )
# Add an annotation.
msg = Message ( body = self.n_sent )
self.n_sent += 1
msg.annotations = { 'x-opt-qd.ingress': 'i_changed_the_annotation' }
self.sender.send ( msg )
# Try to supply an invalid type for trace.
msg = Message ( body = self.n_sent )
self.n_sent += 1
msg.annotations = { 'x-opt-qd.trace' : 45 }
self.sender.send ( msg )
# Add a value to the trace list.
msg = Message ( body = self.n_sent )
self.n_sent += 1
msg.annotations = { 'x-opt-qd.trace' : [ '0/first-hop' ] }
self.sender.send ( msg )
def on_message ( self, event ) :
ingress_router_name = '0/QDR'
self.n_received += 1
if self.n_received >= self.n_messages :
self.bail ( None )
return
annotations = event.message.annotations
if self.n_received == 1 :
if annotations [ 'x-opt-qd.ingress' ] != ingress_router_name :
self.bail ( 'Bad ingress router name on msg %d' % self.n_received )
return
if annotations [ 'x-opt-qd.trace' ] != [ ingress_router_name ] :
self.bail ( 'Bad trace on msg %d.' % self.n_received )
return
elif self.n_received == 2 :
if annotations [ 'x-opt-qd.ingress' ] != 'i_changed_the_annotation' :
self.bail ( 'Bad ingress router name on msg %d' % self.n_received )
return
if annotations [ 'x-opt-qd.trace' ] != [ ingress_router_name ] :
self.bail ( 'Bad trace on msg %d .' % self.n_received )
return
elif self.n_received == 3 :
# The invalid type for trace has no effect.
if annotations [ 'x-opt-qd.ingress' ] != ingress_router_name :
self.bail ( 'Bad ingress router name on msg %d ' % self.n_received )
return
if annotations [ 'x-opt-qd.trace' ] != [ ingress_router_name ] :
self.bail ( 'Bad trace on msg %d' % self.n_received )
return
elif self.n_received == 4 :
if annotations [ 'x-opt-qd.ingress' ] != ingress_router_name :
self.bail ( 'Bad ingress router name on msg %d ' % self.n_received )
return
# The sender prepended a value to the trace list.
if annotations [ 'x-opt-qd.trace' ] != [ '0/first-hop', ingress_router_name ] :
self.bail ( 'Bad trace on msg %d' % self.n_received )
return
# success
self.bail ( None )
class StripMessageAnnotationsCustom ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsCustom, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
self.n_sent += 1
msg.annotations = { 'custom-annotation' : '1/Custom_Annotation' }
self.sender.send ( msg )
def on_message ( self, event ) :
self.n_received += 1
if not 'custom-annotation' in event.message.annotations :
self.bail ( 'custom annotation not found' )
return
if event.message.annotations [ 'custom-annotation'] != '1/Custom_Annotation' :
self.bail ( 'custom annotation bad value' )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
class StripMessageAnnotationsNo ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsNo, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
self.n_sent += 1
# This test has no added annotations.
# The receiver should get the expected standard annotations anyway,
# because the address we are using has 'stripAnnotations' set to 'no'.
msg.annotations = { }
self.sender.send ( msg )
def on_message ( self, event ) :
self.n_received += 1
if event.message.annotations [ 'x-opt-qd.ingress' ] != '0/QDR' :
self.bail ( "x-opt-qd.ingress annotation has been stripped!" )
return
if event.message.annotations [ 'x-opt-qd.trace' ] != [ '0/QDR' ] :
self.bail ( "x-opt-qd.trace annotations has been stripped!" )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
class StripMessageAnnotationsNoAddTrace ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsNoAddTrace, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
annotations = {'Canis_meus' : 'id_comedit',
'x-opt-qd.ingress': 'ingress-router',
'x-opt-qd.trace': ['0/QDR.1']
}
self.n_sent += 1
# This test has no added annotations.
# The receiver should get the expected standard annotations anyway,
# because the address we are using has 'stripAnnotations' set to 'no'.
msg.annotations = annotations
self.sender.send ( msg )
def on_message ( self, event ) :
self.n_received += 1
notes = event.message.annotations
if not isinstance(notes, dict):
self.bail("annotations are not a dictionary")
return
# No annotations should get stripped -- neither the
# ones that the router adds, not the custome one that
# I added.
if not 'x-opt-qd.ingress' in notes :
self.bail ( 'x-opt-qd.ingress annotation missing' )
return
if not 'x-opt-qd.trace' in notes :
self.bail ( 'x-opt-qd.trace annotation missing' )
return
if not 'Canis_meus' in notes :
self.bail ( 'Canis_meus annotation missing' )
return
if notes [ 'x-opt-qd.ingress' ] != 'ingress-router' :
self.bail ( "x-opt-qd.ingress bad value" )
return
if notes [ 'x-opt-qd.trace' ] != ['0/QDR.1', '0/QDR'] :
self.bail ( "x-opt-qd.trace bad value" )
return
if notes [ 'Canis_meus' ] != 'id_comedit' :
self.bail ( "Canis_meus bad value" )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
class StripMessageAnnotationsBoth ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsBoth, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
annotations = { 'Canis_meus' : 'id_comedit',
'x-opt-qd.ingress': 'ingress-router',
'x-opt-qd.trace': ['0/QDR.1'],
}
self.n_sent += 1
# This test has no added annotations.
# The receiver should get the expected standard annotations anyway,
# because the address we are using has 'stripAnnotations' set to 'no'.
msg.annotations = annotations
self.sender.send ( msg )
def on_message ( self, event ) :
self.n_received += 1
# The annotations that the router adds should get stripped,
# but not the custom one that I added.
notes = event.message.annotations
if 'x-opt-qd.ingress' in notes :
self.bail ( 'x-opt-qd.ingress annotation not stripped' )
return
if 'x-opt-qd.trace' in notes :
self.bail ( 'x-opt-qd.trace annotation not stripped' )
return
if not 'Canis_meus' in notes :
self.bail ( 'Canis_meus annotation missing' )
return
if notes [ 'Canis_meus' ] != 'id_comedit' :
self.bail ( "Canis_meus bad value" )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
class StripMessageAnnotationsOut ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsOut, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
self.n_sent += 1
# This test has no added annotations.
# The receiver should get the expected standard annotations anyway,
# because the address we are using has 'stripAnnotations' set to 'no'.
self.sender.send ( msg )
def on_message ( self, event ) :
self.n_received += 1
# The annotations that the router routinely adds
# should all get stripped,
if event.message.annotations != None :
self.bail ( "An annotation was not stripped in egress message." )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
class StripMessageAnnotationsIn ( MessagingHandler ) :
def __init__ ( self,
addr,
n_messages
) :
super ( StripMessageAnnotationsIn, self ) . __init__ ( prefetch = n_messages )
self.addr = addr
self.n_messages = n_messages
self.test_timer = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
def run ( self ) :
Container(self).run()
def bail ( self, travail ) :
self.bailing = True
self.error = travail
self.send_conn.close ( )
self.recv_conn.close ( )
self.test_timer.cancel ( )
def timeout ( self ):
self.bail ( "Timeout Expired" )
def on_start ( self, event ):
self.send_conn = event.container.connect ( self.addr )
self.recv_conn = event.container.connect ( self.addr )
self.sender = event.container.create_sender ( self.send_conn, self.addr )
self.receiver = event.container.create_receiver ( self.recv_conn, self.addr )
self.test_timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_sendable ( self, event ) :
while self.n_sent < self.n_messages :
if event.sender.credit < 1 :
break
msg = Message ( body = self.n_sent )
# Attach some standard annotations to the message.
# These are ingress annotations, and should get stripped.
# These annotation-keys will then get values assigned by
# the router.
notes = {'x-opt-qd.ingress': 'ingress-router', 'x-opt-qd.trace': ['0/QDR.1']}
self.sender.send ( msg )
self.n_sent += 1
def on_message ( self, event ) :
self.n_received += 1
if event.message.annotations [ 'x-opt-qd.ingress' ] == 'ingress-router' :
self.bail ( "x-opt-qd.ingress value was not stripped." )
return
if event.message.annotations [ 'x-opt-qd.trace' ] == ['0/QDR.1'] :
self.bail ( "x-opt-qd.trace value was not stripped." )
return
if self.n_received >= self.n_messages :
# success
self.bail ( None )
HELLO_WORLD = "Hello World!"
class SndSettleModeTest(MessagingHandler):
def __init__(self, address):
super(SndSettleModeTest, self).__init__()
self.address = address
self.sender = None
self.receiver = None
self.message_received = False
self.delivery_already_settled = False
def on_start(self, event):
conn = event.container.connect(self.address)
# The receiver sets link.snd_settle_mode = Link.SND_SETTLED. It wants to receive settled messages
self.receiver = event.container.create_receiver(conn, "org/apache/dev", options=AtMostOnce())
# With AtLeastOnce, the sender will not settle.
self.sender = event.container.create_sender(conn, "org/apache/dev", options=AtLeastOnce())
def on_sendable(self, event):
msg = Message(body=HELLO_WORLD)
event.sender.send(msg)
event.sender.close()
def on_message(self, event):
self.delivery_already_settled = event.delivery.settled
if HELLO_WORLD == event.message.body:
self.message_received = True
else:
self.message_received = False
event.connection.close()
def run(self):
Container(self).run()
class ExcessDeliveriesReleasedTest(MessagingHandler):
def __init__(self, address):
super(ExcessDeliveriesReleasedTest, self).__init__(prefetch=0)
self.address = address
self.dest = "closest.EDRtest"
self.error = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.n_released = 0
def on_start(self, event):
conn = event.container.connect(self.address)
self.sender = event.container.create_sender(conn, self.dest)
self.receiver = event.container.create_receiver(conn, self.dest)
self.receiver.flow(6)
def on_sendable(self, event):
for i in range(10 - self.n_sent):
msg = Message(body=i)
event.sender.send(msg)
self.n_sent += 1
def on_accepted(self, event):
self.n_accepted += 1
def on_released(self, event):
self.n_released += 1
if self.n_released == 4:
if self.n_accepted != 6:
self.error = "Expected 6 accepted, got %d" % self.n_accepted
if self.n_received != 6:
self.error = "Expected 6 received, got %d" % self.n_received
event.connection.close()
def on_message(self, event):
self.n_received += 1
if self.n_received == 6:
self.receiver.close()
def run(self):
Container(self).run()
class UnavailableBase(MessagingHandler):
def __init__(self, address):
super(UnavailableBase, self).__init__()
self.address = address
self.dest = "unavailable"
self.conn = None
self.sender = None
self.receiver = None
self.link_error = False
self.link_closed = False
self.passed = False
self.timer = None
self.link_name = "test_link"
def check_if_done(self):
if self.link_error and self.link_closed:
self.passed = True
self.conn.close()
self.timer.cancel()
def on_link_error(self, event):
link = event.link
if event.link.name == self.link_name and link.remote_condition.description \
== "Node not found":
self.link_error = True
self.check_if_done()
def on_link_remote_close(self, event):
if event.link.name == self.link_name:
self.link_closed = True
self.check_if_done()
def run(self):
Container(self).run()
class UnavailableSender(UnavailableBase):
def __init__(self, address):
super(UnavailableSender, self).__init__(address)
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Creating a sender to an address with unavailable distribution
# The router will not allow this link to be established. It will close the link with an error of
# "Node not found"
self.sender = event.container.create_sender(self.conn, self.dest, name=self.link_name)
class UnavailableReceiver(UnavailableBase):
def __init__(self, address):
super(UnavailableReceiver, self).__init__(address)
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Creating a receiver to an address with unavailable distribution
# The router will not allow this link to be established. It will close the link with an error of
# "Node not found"
self.receiver = event.container.create_receiver(self.conn, self.dest, name=self.link_name)
class MulticastUnsettledTest(MessagingHandler):
"""
Send N unsettled multicast messages to 2 receivers. Ensure sender is
notified of settlement and disposition changes from the receivers.
"""
def __init__(self, address):
super(MulticastUnsettledTest, self).__init__(auto_accept=False, prefetch=0)
self.address = address
self.dest = "multicast.MUtest"
self.error = None
self.count = 10
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.n_receivers = 0
def check_if_done(self):
if self.n_received == self.count * 2 and self.n_accepted == self.count:
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d, accepted=%d" % (self.n_sent, self.n_received, self.n_accepted)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.receiver1 = event.container.create_receiver(self.conn, self.dest,
name="A",
options=AtLeastOnce())
self.receiver2 = event.container.create_receiver(self.conn, self.dest,
name="B",
options=AtLeastOnce());
self.receiver1.flow(self.count)
self.receiver2.flow(self.count)
def on_link_opened(self, event):
if event.receiver:
self.n_receivers += 1
# start the sender once all receivers links are up
if self.n_receivers == 2:
self.sender = event.container.create_sender(self.conn, self.dest,
options=AtLeastOnce())
def on_sendable(self, event):
for i in range(self.count - self.n_sent):
msg = Message(body=i)
event.sender.send(msg)
self.n_sent += 1
def on_accepted(self, event):
self.n_accepted += 1
self.check_if_done()
def on_message(self, event):
if event.delivery.settled:
self.error = "Received settled delivery"
event.delivery.update(Delivery.ACCEPTED)
event.delivery.settle()
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class LargeMessageStreamCloseConnTest(MessagingHandler):
def __init__(self, address):
super(LargeMessageStreamCloseConnTest, self).__init__()
self.address = address
self.dest = "LargeMessageStreamCloseConnTest"
self.error = None
self.timer = None
self.sender_conn = None
self.receiver_conn = None
self.sender = None
self.receiver = None
self.body = ""
self.aborted = False
for i in range(20000):
self.body += "0123456789101112131415"
def timeout(self):
if self.aborted:
self.error = "Message has been aborted. Test failed"
else:
self.error = "Message not received. test failed"
self.receiver_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.address)
self.receiver_conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.sender_conn, self.dest)
self.receiver = event.container.create_receiver(self.receiver_conn,
self.dest, name="A")
def on_sendable(self, event):
msg = Message(body=self.body)
# send(msg) calls the stream function which streams data
# from sender to the router
event.sender.send(msg)
# Close the connection immediately after sending the message
# Without the fix for DISPATCH-1085, this test will fail
# one in five times with an abort
# With the fix in place, this test will never fail (the
# on_aborted will never be called).
self.sender_conn.close()
def on_message(self, event):
self.timer.cancel()
self.receiver_conn.close()
def on_aborted(self, event):
self.aborted = True
self.timer.cancel()
self.timeout()
def run(self):
Container(self).run()
class LargeMessageStreamTest(MessagingHandler):
def __init__(self, address):
super(LargeMessageStreamTest, self).__init__()
self.address = address
self.dest = "LargeMessageStreamTest"
self.error = None
self.count = 10
self.n_sent = 0
self.timer = None
self.conn = None
self.sender = None
self.receiver = None
self.n_received = 0
self.body = ""
for i in range(10000):
self.body += "0123456789101112131415"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver.flow(self.count)
def on_sendable(self, event):
for i in range(self.count):
msg = Message(body=self.body)
# send(msg) calls the stream function which streams data from sender to the router
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class MultiframePresettledTest(MessagingHandler):
def __init__(self, address):
super(MultiframePresettledTest, self).__init__(prefetch=0)
self.address = address
self.dest = "closest.MFPtest"
self.error = None
self.count = 10
self.n_sent = 0
self.n_received = 0
self.body = ""
for i in range(10000):
self.body += "0123456789"
def check_if_done(self):
if self.n_received == self.count:
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d" % (self.n_sent, self.n_received)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver.flow(self.count)
def on_sendable(self, event):
for i in range(self.count - self.n_sent):
msg = Message(body=self.body)
dlv = event.sender.send(msg)
dlv.settle()
self.n_sent += 1
def on_message(self, event):
if not event.delivery.settled:
self.error = "Received unsettled delivery"
self.n_received += 1
self.check_if_done()
def run(self):
Container(self).run()
class UptimeLastDlvChecker(object):
def __init__(self, parent, lastDlv=None, uptime=0):
self.parent = parent
self.uptime = uptime
self.lastDlv = lastDlv
self.expected_num_connections = 2
self.num_connections = 0
def on_timer_task(self, event):
local_node = Node.connect(self.parent.address, timeout=TIMEOUT)
result = local_node.query('org.apache.qpid.dispatch.connection')
container_id_index = result.attribute_names.index('container')
uptime_seconds_index = result.attribute_names.index('uptimeSeconds')
last_dlv_seconds_index = result.attribute_names.index('lastDlvSeconds')
for res in result.results:
container_id = res[container_id_index]
# We only care if the container_id is "UPTIME-TEST"
if container_id == self.parent.container_id:
uptime_seconds = res[uptime_seconds_index]
if self.uptime != 0 and uptime_seconds < self.uptime:
self.parent.error = "The connection uptime should be greater than or equal to %d seconds but instead is %d seconds" % (self.uptime, uptime_seconds)
last_dlv_seconds = res[last_dlv_seconds_index]
if self.lastDlv is None:
if last_dlv_seconds is not None:
self.parent.error = "Expected lastDlvSeconds to be empty"
else:
if not last_dlv_seconds >= self.lastDlv:
self.parent.error = "Connection lastDeliverySeconds must be greater than or equal to %d but is %d" % (self.lastDlv, last_dlv_seconds)
else:
self.parent.success = True
self.num_connections += 1
if self.expected_num_connections != self.num_connections:
self.parent.error = "Number of client connections expected=%d, but got %d" % (self.expected_num_connections, self.num_connections)
self.parent.cancel_custom()
class ConnectionUptimeLastDlvTest(MessagingHandler):
def __init__(self, address, dest):
super(ConnectionUptimeLastDlvTest, self).__init__()
self.timer = None
self.sender_conn = None
self.receiver_conn = None
self.address = address
self.sender = None
self.receiver = None
self.error = None
self.custom_timer = None
self.container_id = "UPTIME-TEST"
self.dest = dest
self.reactor = None
self.success = False
self.receiver_link_opened = False
self.sender_link_opened = False
self.custom_timer_created = False
def cancel_custom(self):
self.custom_timer.cancel()
if self.error or self.success:
self.timer.cancel()
self.sender_conn.close()
self.receiver_conn.close()
else:
msg = Message(body=self.container_id)
self.sender.send(msg)
# We have now sent a message that the router must have sent to the
# receiver. We will wait for 2 seconds and once again check
# uptime and lastDlv.
# Allow for some slop in the calculation of uptime and last delivery:
# * reactor.schedule needs leeway in calculating the time delta and delivering the callback
# * dispatch needs leeway rounding stats to whole seconds
self.custom_timer = self.reactor.schedule(2, UptimeLastDlvChecker(self, uptime=2, lastDlv=1))
def timeout(self):
self.error = "Timeout Expired:, Test took too long to execute. "
self.sender_conn.close()
self.receiver_conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
# Create separate sender and receiver connections.
self.sender_conn = event.container.connect(self.address)
self.receiver_conn = event.container.connect(self.address)
# Let's create a sender and receiver but not send any messages.
self.sender = event.container.create_sender(self.sender_conn, self.dest)
self.receiver = event.container.create_receiver(self.receiver_conn, self.dest)
# Execute a management query for connections after 1 second
# This will help us check the uptime and lastDlv time
# No deliveries were sent on any link yet, so the lastDlv must be "-"
self.reactor = event.reactor
def on_link_opened(self, event):
# Schedule the UptimeLastDlvChecker only after the sender and
# receiver links have been opened on those connections. This
# will help the test pass 100% of the time in slow systems.
if self.receiver == event.receiver:
self.receiver_link_opened = True
if event.sender == self.sender:
self.sender_link_opened = True
if self.receiver_link_opened and self.sender_link_opened:
if not self.custom_timer_created:
self.custom_timer_created = True
self.custom_timer = event.reactor.schedule(1, UptimeLastDlvChecker(self, uptime=1,
lastDlv=None))
def run(self):
container = Container(self)
container.container_id = self.container_id
container.run()
class AnonymousSenderNoRecvLargeMessagedTest(MessagingHandler):
def __init__(self, address):
super(AnonymousSenderNoRecvLargeMessagedTest, self).__init__(auto_accept=False)
self.timer = None
self.conn = None
self.sender = None
self.address = address
self.released = False
self.error = None
self.body = ""
for i in range(20000):
self.body += "0123456789101112131415"
def timeout(self):
self.error = "Timeout Expired:, delivery not released. "
self.conn.close()
def check_if_done(self):
if self.released:
self.sender.close()
self.conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# This sender is an anonymous sender
self.sender = event.container.create_sender(self.conn)
def on_sendable(self, event):
msg = Message(body=self.body, address="someaddress")
# send(msg) calls the stream function which streams data from sender to the router
event.sender.send(msg)
def on_released(self, event):
self.released = True
self.check_if_done()
def run(self):
Container(self).run()
class ReleasedVsModifiedTest(MessagingHandler):
def __init__(self, address):
super(ReleasedVsModifiedTest, self).__init__(prefetch=0, auto_accept=False)
self.address = address
self.dest = "closest.RVMtest"
self.error = None
self.count = 10
self.accept = 6
self.n_sent = 0
self.n_received = 0
self.n_released = 0
self.n_modified = 0
self.node_modified_at_start = 0
def get_modified_deliveries ( self ) :
local_node = Node.connect(self.address, timeout=TIMEOUT)
outs = local_node.query(type='org.apache.qpid.dispatch.router')
pos = outs.attribute_names.index("modifiedDeliveries")
results = outs.results[0]
n_modified_deliveries = results [ pos ]
return n_modified_deliveries
def check_if_done(self):
if self.n_received == self.accept and self.n_released == self.count - self.accept and self.n_modified == self.accept:
node_modified_now = self.get_modified_deliveries ( )
this_test_modified_deliveries = node_modified_now - self.node_modified_at_start
if this_test_modified_deliveries == self.accept:
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d, received=%d, released=%d, modified=%d" % \
(self.n_sent, self.n_received, self.n_released, self.n_modified)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver.flow(self.accept)
self.node_modified_at_start = self.get_modified_deliveries ( )
def on_sendable(self, event):
for i in range(self.count - self.n_sent):
msg = Message(body="RvM-Test")
event.sender.send(msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
if self.n_received == self.accept:
self.receiver.close()
def on_released(self, event):
if event.delivery.remote_state == Delivery.MODIFIED:
self.n_modified += 1
else:
self.n_released += 1
self.check_if_done()
def run(self):
Container(self).run()
class AppearanceOfBalanceTest(MessagingHandler):
def __init__(self, address):
super(AppearanceOfBalanceTest, self).__init__()
self.address = address
self.dest = "balanced.AppearanceTest"
self.error = None
self.count = 9
self.n_sent = 0
self.n_received_a = 0
self.n_received_b = 0
self.n_received_c = 0
def check_if_done(self):
if self.n_received_a + self.n_received_b + self.n_received_c == self.count:
if self.n_received_a != 3 or self.n_received_b != 3 or self.n_received_c != 3:
self.error = "Incorrect Distribution: %d/%d/%d" % (self.n_received_a, self.n_received_b, self.n_received_c)
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d/%d/%d" % \
(self.n_sent, self.n_received_a, self.n_received_b, self.n_received_c)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver_a = event.container.create_receiver(self.conn, self.dest, name="A")
self.receiver_b = event.container.create_receiver(self.conn, self.dest, name="B")
self.receiver_c = event.container.create_receiver(self.conn, self.dest, name="C")
def send(self):
if self.n_sent < self.count:
msg = Message(body="Appearance-Test")
self.sender.send(msg)
self.n_sent += 1
def on_sendable(self, event):
if self.n_sent == 0:
self.send()
def on_message(self, event):
if event.receiver == self.receiver_a:
self.n_received_a += 1
if event.receiver == self.receiver_b:
self.n_received_b += 1
if event.receiver == self.receiver_c:
self.n_received_c += 1
def on_accepted(self, event):
self.send()
self.check_if_done()
def run(self):
Container(self).run()
class BatchedSettlementTest(MessagingHandler):
def __init__(self, address):
super(BatchedSettlementTest, self).__init__(auto_accept=False)
self.address = address
self.dest = "balanced.BatchedSettlement"
self.error = None
self.count = 200
self.batch_count = 20
self.n_sent = 0
self.n_received = 0
self.n_settled = 0
self.batch = []
self.accepted_count_match = False
def check_if_done(self):
if self.n_settled == self.count:
local_node = Node.connect(self.address, timeout=TIMEOUT)
outs = local_node.query(type='org.apache.qpid.dispatch.router')
pos = outs.attribute_names.index("acceptedDeliveries")
results = outs.results[0]
if results[pos] >= self.count:
self.accepted_count_match = True
self.timer.cancel()
self.conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d settled=%d" % \
(self.n_sent, self.n_received, self.n_settled)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest)
def send(self):
while self.n_sent < self.count and self.sender.credit > 0:
msg = Message(body="Batch-Test")
self.sender.send(msg)
self.n_sent += 1
def on_sendable(self, event):
self.send()
def on_message(self, event):
self.n_received += 1
self.batch.insert(0, event.delivery)
if len(self.batch) == self.batch_count:
while len(self.batch) > 0:
self.accept(self.batch.pop())
def on_accepted(self, event):
self.n_settled += 1
self.check_if_done()
def run(self):
Container(self).run()
class RejectCoordinatorTest(MessagingHandler, TransactionHandler):
def __init__(self, url):
super(RejectCoordinatorTest, self).__init__(prefetch=0)
self.url = Url(url)
self.error = "The router can't coordinate transactions by itself, a linkRoute to a coordinator must be " \
"configured to use transactions."
self.container = None
self.conn = None
self.sender = None
self.timer = None
self.passed = False
self.link_error = False
self.link_remote_close = False
def timeout(self):
self.conn.close()
def check_if_done(self):
if self.link_remote_close and self.link_error:
self.passed = True
self.conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.container = event.container
self.conn = self.container.connect(self.url)
self.sender = self.container.create_sender(self.conn, self.url.path)
# declare_transaction tries to create a link with name "txn-ctrl" to the
# transaction coordinator which has its own target, it has no address
# The router cannot coordinate transactions itself and so there will be a link error when this
# link is attempted to be created
self.container.declare_transaction(self.conn, handler=self)
def on_link_error(self, event):
link = event.link
# If the link name is 'txn-ctrl' and there is a link error and it matches self.error, then we know
# that the router has rejected the link because it cannot coordinate transactions itself
if link.name == "txn-ctrl" and link.remote_condition.description == self.error and \
link.remote_condition.name == 'amqp:precondition-failed':
self.link_error = True
self.check_if_done()
def on_link_remote_close(self, event):
link = event.link
if link.name == "txn-ctrl":
self.link_remote_close = True
self.check_if_done()
def run(self):
Container(self).run()
class PresettledOverflowTest(MessagingHandler):
def __init__(self, address):
super(PresettledOverflowTest, self).__init__(prefetch=0)
self.address = address
self.dest = "balanced.PresettledOverflow"
self.error = None
self.count = 500
self.n_sent = 0
self.n_received = 0
self.last_seq = -1
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d last_seq=%d" % (self.n_sent, self.n_received, self.last_seq)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
self.sender = event.container.create_sender(self.conn, self.dest)
self.receiver = event.container.create_receiver(self.conn, self.dest)
self.receiver.flow(10)
def send(self):
while self.n_sent < self.count and self.sender.credit > 0:
msg = Message(body={"seq": self.n_sent})
dlv = self.sender.send(msg)
dlv.settle()
self.n_sent += 1
if self.n_sent == self.count:
self.receiver.flow(self.count)
def on_sendable(self, event):
if self.n_sent < self.count:
self.send()
def on_message(self, event):
self.n_received += 1
self.last_seq = event.message.body["seq"]
if self.last_seq == self.count - 1:
if self.n_received == self.count:
self.error = "No deliveries were dropped"
if not self.error:
local_node = Node.connect(self.address, timeout=TIMEOUT)
out = local_node.query(type='org.apache.qpid.dispatch.router.link')
for result in out.results:
if result[5] == 'out' and 'balanced.PresettledOverflow' in result[6]:
if result[16] != 249:
self.error = "Expected 249 dropped presettled deliveries but got " + str(result[16])
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class RejectDispositionTest(MessagingHandler):
def __init__(self, address):
super(RejectDispositionTest, self).__init__(auto_accept=False)
self.address = address
self.sent = False
self.received_error = False
self.dest = "rejectDispositionTest"
# explicitly convert to str due to
# https://issues.apache.org/jira/browse/PROTON-1843
self.error_description = str('you were out of luck this time!')
self.error_name = u'amqp:internal-error'
self.reject_count_match = False
self.rejects_at_start = 0
def count_rejects ( self ) :
local_node = Node.connect(self.address, timeout=TIMEOUT)
outs = local_node.query(type='org.apache.qpid.dispatch.router')
pos = outs.attribute_names.index("rejectedDeliveries")
results = outs.results[0]
return results[pos]
def on_start(self, event):
conn = event.container.connect(self.address)
event.container.create_sender(conn, self.dest)
event.container.create_receiver(conn, self.dest)
self.rejects_at_start = self.count_rejects ( )
def on_sendable(self, event):
if not self.sent:
event.sender.send(Message(body=u"Hello World!"))
self.sent = True
def on_rejected(self, event):
if event.delivery.remote.condition.description == self.error_description \
and event.delivery.remote.condition.name == self.error_name:
self.received_error = True
rejects_now = self.count_rejects ( )
rejects_for_this_test = rejects_now - self.rejects_at_start
if rejects_for_this_test == 1:
self.reject_count_match = True
event.connection.close()
def on_message(self, event):
event.delivery.local.condition = Condition(self.error_name, self.error_description)
self.reject(event.delivery)
def run(self):
Container(self).run()
class UnsettledLargeMessageTest(MessagingHandler):
def __init__(self, addr, n_messages):
super (UnsettledLargeMessageTest, self).__init__()
self.addr = addr
self.n_messages = n_messages
self.sender = None
self.receiver = None
self.sender_conn = None
self.recv_conn = None
self.n_sent = 0
self.n_received = 0
self.error = None
self.test_timer = None
self.max_receive = 1
self.custom_timer = None
self.timer = None
self.n_accepted = 0
self.n_modified = 0
self.n_released = 0
self.str1 = "0123456789abcdef"
self.msg_str = ""
for i in range(16384):
self.msg_str += self.str1
def run (self):
Container(self).run()
def check_if_done(self):
# self.n_accepted + self.n_modified + self.n_released will never
# equal self.n_messages without the fix for DISPATCH-1197 because
# the router will never pull the data from the proton buffers once
# the router hits q2_holdoff
if self.n_accepted + self.n_modified + \
self.n_released == self.n_messages:
self.timer.cancel()
self.sender_conn.close()
def timeout(self):
self.error = "Timeout Expired: sent=%d accepted=%d " \
"released=%d modified=%d" % (self.n_messages,
self.n_accepted,
self.n_released,
self.n_modified)
def on_start (self, event):
self.sender_conn = event.container.connect(self.addr)
self.recv_conn = event.container.connect(self.addr)
self.receiver = event.container.create_receiver(self.recv_conn,
"test_42")
self.sender = event.container.create_sender(self.sender_conn,
"test_42")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
def on_accepted(self, event):
self.n_accepted += 1
def on_released(self, event):
if event.delivery.remote_state == Delivery.MODIFIED:
self.n_modified += 1
else:
self.n_released += 1
self.check_if_done()
def on_sendable(self, event):
while self.n_sent < self.n_messages:
msg = Message(id=(self.n_sent + 1),
body={'sequence': (self.n_sent + 1),
'msg_str': self.msg_str})
# Presettle the delivery.
self.sender.send (msg)
self.n_sent += 1
def on_message(self, event):
self.n_received += 1
if self.n_received == self.max_receive:
# Close the receiver connection after receiving just one message
# This will cause the release of multi-frame deliveries.
# Meanwhile the sender will keep sending but will run into
# the q2_holodd situation and never recover.
# The sender link will be stalled
# This test will NEVER pass without the fix to DISPATCH-1197
# Receiver bails after receiving max_receive messages.
self.receiver.close()
self.recv_conn.close()
class OneRouterUnavailableCoordinatorTest(TestCase):
@classmethod
def setUpClass(cls):
super(OneRouterUnavailableCoordinatorTest, cls).setUpClass()
name = "test-router"
OneRouterTest.listen_port = cls.tester.get_port()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR', 'defaultDistribution': 'unavailable'}),
('listener', {'port': cls.tester.get_port() }),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
])
cls.router = cls.tester.qdrouterd(name, config)
cls.router.wait_ready()
cls.address = cls.router.addresses[0]
def test_46_coordinator_linkroute_unavailable_DISPATCH_1453(self):
# The defaultDistribution on the router is unavailable. We try to connect a tx sender
# to make sure a good detailed message saying "the link route to a coordinator must be
# configured" is sent back.
test = RejectCoordinatorGoodMessageTest(self.address)
test.run()
self.assertTrue(test.passed)
def test_47_coordinator_linkroute_available_DISPATCH_1453(self):
# The defaultDistribution on the router is unavailable. We create a link route with $coordinator address
# The link route is not attached to any broker. When the attach comes in, the reject message must be
# condition=:"qd:no-route-to-dest", description="No route to the destination node"
COORDINATOR = "$coordinator"
long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
qd_manager = QdManager(self, address=self.address)
args = {"prefix": COORDINATOR, "connection": "broker", "dir": "in"}
qd_manager.create(long_type, args)
link_route_created = False
# Verify that the link route was created by querying for it.
outs = qd_manager.query(long_type)[0]
if outs:
try:
if outs['prefix'] == COORDINATOR:
link_route_created = True
except:
pass
self.assertTrue(link_route_created)
# We have verified that the link route has been created but there is no broker connections.
# Now let's try to open a transaction. We should get a no route to destination error
test = RejectCoordinatorGoodMessageTest(self.address, link_route_present=True)
test.run()
self.assertTrue(test.passed)
class RejectCoordinatorGoodMessageTest(RejectCoordinatorTest):
def __init__(self, url, link_route_present=False):
super(RejectCoordinatorGoodMessageTest, self).__init__(url)
self.link_route_present = link_route_present
self.error_with_link_route = "No route to the destination node"
def on_link_error(self, event):
link = event.link
# If the link name is 'txn-ctrl' and there is a link error and it matches self.error, then we know
# that the router has rejected the link because it cannot coordinate transactions itself
if link.name == "txn-ctrl":
if self.link_route_present:
if link.remote_condition.description == self.error_with_link_route and link.remote_condition.name == 'qd:no-route-to-dest':
self.link_error = True
else:
if link.remote_condition.description == self.error and link.remote_condition.name == 'amqp:precondition-failed':
self.link_error = True
self.check_if_done()
def run(self):
Container(self).run()
class Q2HoldoffDropTest(MessagingHandler):
"""
Create 3 multicast receivers, two which grant 2 credits and one that grants
only one. Send enough data to force Q2 holdoff (since one rx is blocked)
Close the stalled rx connection, verify the remaining receivers get the
message (Q2 holdoff disabled)
"""
def __init__(self, router):
super(Q2HoldoffDropTest, self).__init__(prefetch=0,
auto_accept=False,
auto_settle=False)
self.router = router
self.rx_fast1_conn = None
self.rx_fast1 = None
self.rx_fast2_conn = None
self.rx_fast2 = None
self.rx_slow_conn = None
self.rx_slow = None
self.tx_conn = None
self.tx = None
self.timer = None
self.reactor = None
self.error = None
self.n_attached = 0
self.n_rx = 0
self.n_tx = 0
self.close_timer = 0
# currently the router buffer size is 512 bytes and the Q2 holdoff
# buffer chain high watermark is 256 buffers. We need to send a
# message that will be big enough to trigger Q2 holdoff
self.big_msg = Message(body=["DISPATCH-1330" * (512 * 256 * 4)])
def done(self):
if self.timer:
self.timer.cancel()
if self.close_timer:
self.close_timer.cancel()
if self.tx_conn:
self.tx_conn.close()
if self.rx_fast1_conn:
self.rx_fast1_conn.close()
if self.rx_fast2_conn:
self.rx_fast2_conn.close()
if self.rx_slow_conn:
self.rx_slow_conn.close()
def timeout(self):
self.error = "Timeout Expired"
self.done()
def on_start(self, event):
self.reactor = event.reactor
self.timer = self.reactor.schedule(TIMEOUT, TestTimeout(self))
self.rx_slow_conn = event.container.connect(self.router.addresses[0])
self.rx_fast1_conn = event.container.connect(self.router.addresses[0])
self.rx_fast2_conn = event.container.connect(self.router.addresses[0])
self.rx_slow = event.container.create_receiver(self.rx_slow_conn,
source="multicast.dispatch-1330",
name="rx_slow")
self.rx_fast1 = event.container.create_receiver(self.rx_fast1_conn,
source="multicast.dispatch-1330",
name="rx_fast1")
self.rx_fast2 = event.container.create_receiver(self.rx_fast2_conn,
source="multicast.dispatch-1330",
name="rx_fast2")
def on_link_opened(self, event):
if event.receiver:
self.n_attached += 1
if self.n_attached == 3:
self.rx_fast1.flow(2)
self.rx_fast2.flow(2)
self.rx_slow.flow(1) # stall on 2nd msg
self.tx_conn = event.container.connect(self.router.addresses[0])
self.tx = event.container.create_sender(self.tx_conn,
target="multicast.dispatch-1330",
name="tx")
def on_sendable(self, event):
if self.n_tx == 0:
# wait until all subscribers present
self.router.wait_address("multicast.dispatch-1330", subscribers=3)
for i in range(2):
dlv = self.tx.send(self.big_msg)
dlv.settle()
self.n_tx += 1
def close_rx_slow(self, event):
if self.rx_slow_conn:
self.rx_slow_conn.close()
self.rx_slow_conn = None
self.rx_slow = None
def on_message(self, event):
self.n_rx += 1
if self.n_rx == 3: # first will arrive, second is blocked
class CloseTimer(Timeout):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.close_rx_slow(event)
# 2 second wait for Q2 to fill up
self.close_timer = self.reactor.schedule(2.0, CloseTimer(self))
if self.n_rx == 5:
# succesfully received on last two receivers
self.done()
def run(self):
Container(self).run()
# wait until the router has cleaned up the route table
clean = False
while not clean:
clean = True
atype = 'org.apache.qpid.dispatch.router.address'
addrs = self.router.management.query(type=atype).get_dicts()
if list(filter(lambda a: a['name'].find("dispatch-1330") != -1, addrs)):
clean = False
break
if not clean:
sleep(0.1)
class OneRouterTransactionalAttachTest(TestCase):
"""
Verify that a transaction is properly forwarded through the router
"""
class FakeTxnBroker(FakeBroker):
"""
A FakeBroker that tracks Transaction declaration.
Note well: Proton python does not provide the ability to set a delivery
state to DECLARED (0x0033), so this broker cannot simulate a full
transactional delivery. At best we ensure that the router properly
forwards the target capabilities and the declare message.
"""
def __init__(self, url, container_id=None, **handler_kwargs):
super(OneRouterTransactionalAttachTest.FakeTxnBroker,
self).__init__(url, container_id, **handler_kwargs)
self.txn_link = None
self.remote_caps = None
self.declare_body = None
def on_link_opening(self, event):
if event.link.remote_target.type == Terminus.COORDINATOR:
self.txn_link = event.link
self.txn_link.source.copy(event.link.remote_source)
self.txn_link.target.copy(event.link.remote_target)
self.remote_caps = self.txn_link.remote_target.capabilities
self.txn_link.flow(1)
else:
super(OneRouterTransactionalAttachTest.FakeTxnBroker,
self).on_link_opening(event)
def on_message(self, event):
if event.link == self.txn_link:
self.declare_body = event.message.body
event.delivery.update(Delivery.REJECTED)
event.delivery.settle()
else:
super(OneRouterTransactionalAttachTest.FakeTxnBroker,
self).on_message(event)
class TxSender(MessagingHandler, TransactionHandler):
"""
Transactional publisher client. The transaction will fail since the
fake broker cannot declare the transaction properly
"""
def __init__(self, url, messages=1):
super(OneRouterTransactionalAttachTest.TxSender, self).__init__()
self.url = Url(url)
self.sent = 0
self.declare_failed = False
self.total = messages
def on_start(self, event):
self.container = event.container
self.conn = self.container.connect(self.url)
self.sender = self.container.create_sender(self.conn, self.url.path)
self.container.declare_transaction(self.conn, handler=self)
self.transaction = None
def on_transaction_declared(self, event):
self.transaction = event.transaction
self.declare_failed = False
self.send()
def on_sendable(self, event):
self.send()
def send(self):
if self.transaction and self.sender.credit > 0 and self.sent < self.total:
seq = self.sent
self.sent -= 1
msg = Message(id=seq, body={'sequence':seq})
self.transaction.send(self.sender, msg)
self.transaction.commit()
self.transaction = None
def on_transaction_declare_failed(self, event):
# expected to fail, since the FakeBroker cannot declare a transaction
self.declare_failed = True
self.conn.close()
@classmethod
def setUpClass(cls):
super(OneRouterTransactionalAttachTest, cls).setUpClass()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'TxnRouter'}),
('listener', {'port': cls.tester.get_port() }),
('connector', {'port': cls.tester.get_port(),
'role': 'route-container'}),
('linkRoute', {'prefix': "$coordinator",
'containerId': "FakeBroker",
'direction': "in"}),
('linkRoute', {'prefix': 'closest/queue01',
'containerId': 'FakeBroker',
'direction': 'in'}),
('linkRoute', {'prefix': 'closest/queue01',
'containerId': 'FakeBroker',
'direction': 'out'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
])
cls.router = cls.tester.qdrouterd('TxnRouter', config, wait=False)
cls.listener = cls.router.addresses[0]
cls.connector = cls.router.connector_addresses[0]
cls.broker = cls.FakeTxnBroker(url=cls.connector,
prefetch=0,
auto_accept=False,
auto_settle=False)
cls.router.wait_connectors()
cls.router.wait_address("closest/queue01")
def test_01_verify_attach(self):
"""
Verify the transaction link attach is correctly forwarded to the broker
"""
client = self.TxSender(url=self.listener)
Container(client).run()
self.assertTrue(client.declare_failed)
self.assertTrue(self.broker.txn_link is not None)
self.assertTrue(self.broker.declare_body is not None)
self.assertEqual(symbol('amqp:declare:list'),
self.broker.declare_body.descriptor)
if PROTON_VERSION >= (0, 30, 0):
# prior to proton 0.30.0 capabilities were not provided
# see PROTON-2138
self.assertTrue(self.broker.remote_caps is not None)
# capabilities should be a list with a txn-capability type
# verify router has forwarded this correctly:
rc = self.broker.remote_caps
rc.rewind()
count = 0
while rc.next() == Data.SYMBOL:
s = rc.get_symbol()
self.assertTrue(s in [symbol('amqp:local-transactions'),
symbol('amqp:distributed-transactions'),
symbol('amqp:promotable-transactions'),
symbol('amqp:multi-txns-per-ssn'),
symbol('amqp:multi-ssns-per-txn')])
count += 1
self.assertTrue(count > 0)
if __name__ == '__main__':
unittest.main(main_module()) | unknown | codeparrot/codeparrot-clean | ||
/* mips_init.c - MSA optimised filter functions
*
* Copyright (c) 2018-2024 Cosmin Truta
* Copyright (c) 2016 Glenn Randers-Pehrson
* Written by Mandar Sahastrabuddhe, 2016
* Updated by guxiwei, 2023
*
* This code is released under the libpng license.
* For conditions of distribution and use, see the disclaimer
* and license in png.h
*/
/* Below, after checking __linux__, various non-C90 POSIX 1003.1 functions are
* called.
*/
#define _POSIX_SOURCE 1
#include <stdio.h>
#include "../pngpriv.h"
#ifdef PNG_READ_SUPPORTED
#if PNG_MIPS_MSA_IMPLEMENTATION == 1 || PNG_MIPS_MMI_IMPLEMENTATION > 0
#ifdef PNG_MIPS_MSA_CHECK_SUPPORTED /* Do MIPS MSA run-time checks */
/* WARNING: it is strongly recommended that you do not build libpng with
* run-time checks for CPU features if at all possible. In the case of the MIPS
* MSA instructions there is no processor-specific way of detecting the
* presence of the required support, therefore run-time detection is extremely
* OS specific.
*
* You may set the macro PNG_MIPS_MSA_FILE to the file name of file containing
* a fragment of C source code which defines the png_have_msa function. There
* are a number of implementations in contrib/mips-msa, but the only one that
* has partial support is contrib/mips-msa/linux.c - a generic Linux
* implementation which reads /proc/cpufino.
*/
#ifndef PNG_MIPS_MSA_FILE
# ifdef __linux__
# define PNG_MIPS_MSA_FILE "contrib/mips-msa/linux.c"
# endif
#endif
#ifdef PNG_MIPS_MSA_FILE
#include <signal.h> /* for sig_atomic_t */
static int png_have_msa(png_structp png_ptr);
#include PNG_MIPS_MSA_FILE
#else /* PNG_MIPS_MSA_FILE */
# error PNG_MIPS_MSA_FILE undefined: no support for run-time MIPS MSA checks
#endif /* PNG_MIPS_MSA_FILE */
#endif /* PNG_MIPS_MSA_CHECK_SUPPORTED */
#ifdef PNG_MIPS_MMI_CHECK_SUPPORTED /* Do MIPS MMI run-times checks */
#ifndef PNG_MIPS_MMI_FILE
# ifdef __linux__
# define PNG_MIPS_MMI_FILE "contrib/mips-mmi/linux.c"
# endif
#endif
#ifdef PNG_MIPS_MMI_FILE
#include <signal.h> /* for sig_atomic_t */
static int png_have_mmi();
#include PNG_MIPS_MMI_FILE
#else /* PNG_MIPS_MMI_FILE */
# error PNG_MIPS_MMI_FILE undefined: no support for run-time MIPS MMI checks
#endif /* PNG_MIPS_MMI_FILE */
#endif /* PNG_MIPS_MMI_CHECK_SUPPORTED*/
#ifndef PNG_ALIGNED_MEMORY_SUPPORTED
# error ALIGNED_MEMORY is required; please define PNG_ALIGNED_MEMORY_SUPPORTED
#endif
/* MIPS supports two optimizations: MMI and MSA. The appropriate
* optimization is chosen at runtime
*/
void
png_init_filter_functions_mips(png_structp pp, unsigned int bpp)
{
#if PNG_MIPS_MMI_IMPLEMENTATION > 0
#ifdef PNG_MIPS_MMI_API_SUPPORTED
switch ((pp->options >> PNG_MIPS_MMI) & 3)
{
case PNG_OPTION_UNSET:
#endif /* PNG_MIPS_MMI_API_SUPPORTED */
#ifdef PNG_MIPS_MMI_CHECK_SUPPORTED
{
static volatile sig_atomic_t no_mmi = -1; /* not checked */
if (no_mmi < 0)
no_mmi = !png_have_mmi();
if (no_mmi)
goto MIPS_MSA_INIT;
}
#ifdef PNG_MIPS_MMI_API_SUPPORTED
break;
#endif
#endif /* PNG_MIPS_MMI_CHECK_SUPPORTED */
#ifdef PNG_MIPS_MMI_API_SUPPORTED
default: /* OFF or INVALID */
goto MIPS_MSA_INIT;
case PNG_OPTION_ON:
/* Option turned on */
break;
}
#endif
pp->read_filter[PNG_FILTER_VALUE_UP-1] = png_read_filter_row_up_mmi;
if (bpp == 3)
{
pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_mmi;
pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg3_mmi;
pp->read_filter[PNG_FILTER_VALUE_PAETH-1] =
png_read_filter_row_paeth3_mmi;
}
else if (bpp == 4)
{
pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub4_mmi;
pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg4_mmi;
pp->read_filter[PNG_FILTER_VALUE_PAETH-1] =
png_read_filter_row_paeth4_mmi;
}
#endif /* PNG_MIPS_MMI_IMPLEMENTATION > 0 */
MIPS_MSA_INIT:
#if PNG_MIPS_MSA_IMPLEMENTATION == 1
/* The switch statement is compiled in for MIPS_MSA_API, the call to
* png_have_msa is compiled in for MIPS_MSA_CHECK. If both are defined
* the check is only performed if the API has not set the MSA option on
* or off explicitly. In this case the check controls what happens.
*/
#ifdef PNG_MIPS_MSA_API_SUPPORTED
switch ((pp->options >> PNG_MIPS_MSA) & 3)
{
case PNG_OPTION_UNSET:
/* Allow the run-time check to execute if it has been enabled -
* thus both API and CHECK can be turned on. If it isn't supported
* this case will fall through to the 'default' below, which just
* returns.
*/
#endif /* PNG_MIPS_MSA_API_SUPPORTED */
#ifdef PNG_MIPS_MSA_CHECK_SUPPORTED
{
static volatile sig_atomic_t no_msa = -1; /* not checked */
if (no_msa < 0)
no_msa = !png_have_msa(pp);
if (no_msa)
return;
}
#ifdef PNG_MIPS_MSA_API_SUPPORTED
break;
#endif
#endif /* PNG_MIPS_MSA_CHECK_SUPPORTED */
#ifdef PNG_MIPS_MSA_API_SUPPORTED
default: /* OFF or INVALID */
return;
case PNG_OPTION_ON:
/* Option turned on */
break;
}
#endif
/* IMPORTANT: any new external functions used here must be declared using
* PNG_INTERNAL_FUNCTION in ../pngpriv.h. This is required so that the
* 'prefix' option to configure works:
*
* ./configure --with-libpng-prefix=foobar_
*
* Verify you have got this right by running the above command, doing a build
* and examining pngprefix.h; it must contain a #define for every external
* function you add. (Notice that this happens automatically for the
* initialization function.)
*/
pp->read_filter[PNG_FILTER_VALUE_UP-1] = png_read_filter_row_up_msa;
if (bpp == 3)
{
pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_msa;
pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg3_msa;
pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = png_read_filter_row_paeth3_msa;
}
else if (bpp == 4)
{
pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub4_msa;
pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg4_msa;
pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = png_read_filter_row_paeth4_msa;
}
#endif /* PNG_MIPS_MSA_IMPLEMENTATION == 1 */
return;
}
#endif /* PNG_MIPS_MSA_IMPLEMENTATION == 1 || PNG_MIPS_MMI_IMPLEMENTATION > 0 */
#endif /* READ */ | c | github | https://github.com/opencv/opencv | 3rdparty/libpng/mips/mips_init.c |
# Dockers for `transformers`
In this folder you will find various docker files, and some subfolders.
- dockerfiles (ex: `consistency.dockerfile`) present under `~/docker` are used for our "fast" CIs. You should be able to use them for tasks that only need CPU. For example `torch-light` is a very light weights container (703MiB).
- subfolders contain dockerfiles used for our `slow` CIs, which *can* be used for GPU tasks, but they are **BIG** as they were not specifically designed for a single model / single task. Thus the `~/docker/transformers-pytorch-gpu` includes additional dependencies to allow us to run ALL model tests (say `librosa` or `tesseract`, which you do not need to run LLMs)
Note that in both case, you need to run `uv pip install -e .`, which should take around 5 seconds. We do it outside the dockerfile for the need of our CI: we checkout a new branch each time, and the `transformers` code is thus updated.
We are open to contribution, and invite the community to create dockerfiles with potential arguments that properly choose extras depending on the model's dependencies! :hugs: | unknown | github | https://github.com/huggingface/transformers | docker/README.md |
# -*- coding: utf-8 -*-
#
# Copyright 2015-2017 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of fiware-cygnus (FIWARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
import time
from lettuce import world, after, before
import tools.general_utils
@before.all
def before_all_scenarios():
"""
actions before all scenario
:param scenario:
"""
world.test_time_init = time.strftime("%c")
world.background_executed = False # used to that background will be executed only once in each feature
@before.each_scenario
def before_each_scenario(scenario):
"""
actions before each scenario
:param scenario:
"""
pass
@after.each_scenario
def after_each_scenario(scenario):
"""
actions after each scenario
:param scenario:
"""
pass
@after.all
def after_all_scenarios(scenario):
"""
Actions after all scenarios
Show the initial and final time of the tests completed
Delete all cygnus instances files and cygnus services is stopped
:param scenario:
"""
world.cygnus.cygnus_service("stop")
world.cygnus.delete_cygnus_instances_files()
tools.general_utils.show_times(world.test_time_init) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors instrument the training process.
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import summary_io
from tensorflow.python.util import deprecation
# TODO(ptucker): Split each monitor class into a separate file.
# TODO(ptucker): Fail if epoch or step does not monotonically increase?
class BaseMonitor(object):
"""Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
"""
@deprecation.deprecated(
"2016-12-05",
"Monitors are deprecated. Please use tf.train.SessionRunHook.")
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
"""A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
"""
if estimator is None:
raise ValueError("Missing estimator.")
# TODO(mdan): This should fail if called twice with the same estimator.
self._estimator = estimator
def begin(self, max_steps=None):
"""Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
"""Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
"""Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
"""
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
"""End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
"""
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
"""Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
"""
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output): # pylint: disable=unused-argument
"""Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
"""
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session): # pylint: disable=unused-argument
"""Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
"""
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
"""Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictable behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
"""
# TODO(ipolosukhin): Add also every n seconds.
def __init__(self, every_n_steps=100, first_n_steps=1):
"""Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
"""
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
# Last step in the model.
self._last_successful_step = None
# Last step at which we called one of the every_n methods
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step): # pylint: disable=unused-argument
"""Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
"""
return []
def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument
"""Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
"""
return False
def every_n_post_step(self, step, session):
"""Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
"""
pass
def step_begin(self, step):
"""Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
"""
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps): # Note: max_steps can be None here.
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
"""Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
"""
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.
class PrintTensor(EveryN):
"""Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensor_names, every_n=100, first_n=1):
"""Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
"""Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
"""
def __init__(self, scope=None, every_n=100, first_n=1):
"""Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
"""
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the begining of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
"""Saves summaries every N steps."""
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
"""Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `summary.scalar` or
`summary.merge_all`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
"""
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
"""Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
"""
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, hooks=None,
early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
"""Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
"""
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.hooks = hooks
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
# Run evaluation and log it.
validation_outputs = self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,
steps=self.eval_steps, metrics=self.metrics, hooks=self.hooks,
name=self.name)
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
# Early stopping logic.
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
# TODO(ptucker): This really reads any tensor, not just vars, and requires the
# ':0' suffix on var_name.
class CaptureVariable(EveryN):
"""Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
"""
def __init__(self, var_name, every_n=100, first_n=1):
"""Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
"""Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
"""
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
"""Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
"""
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
"""Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
"""
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
"""Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
"""
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
# TODO(ptucker): Handle keys that are in one but not the other.
def compare(self, other_dump, step, atol=1e-06):
"""Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
"""
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
"""Monitor that exports Estimator every N steps."""
@deprecated("2017-03-25",
"ExportMonitor is deprecated. Please pass an "
"ExportStrategy to Experiment instead.")
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
"""Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, labels), where features is a dict of string key to `Tensor`
and labels is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Should be `None` if and only if
you're passing in a `signature_fn` that does not use the first arg
(`Tensor` of `Example` strings).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
"""
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
"""Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
"""
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
"""Saves checkpoints every N steps or N seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
"""Wraps monitors into a SessionRunHook."""
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def replace_monitors_with_hooks(monitors_or_hooks, estimator):
"""Wraps monitors with a hook.
`Monitor` is deprecated in favor of `SessionRunHook`. If you're using a
monitor, you can wrap it with a hook using function. It is recommended to
implement hook version of your monitor.
Args:
monitors_or_hooks: A `list` may contain both monitors and hooks.
estimator: An `Estimator` that monitor will be used with.
Returns:
Returns a list of hooks. If there is any monitor in the given list, it is
replaced by a hook.
"""
monitors_or_hooks = monitors_or_hooks or []
hooks = [
m for m in monitors_or_hooks
if isinstance(m, session_run_hook.SessionRunHook)
]
deprecated_monitors = [
m for m in monitors_or_hooks
if not isinstance(m, session_run_hook.SessionRunHook)
]
if not estimator.config.is_chief:
# Prune list of monitor to the ones runnable on all workers.
deprecated_monitors = [
m for m in deprecated_monitors if m.run_on_all_workers
]
# Setup monitors.
for monitor in deprecated_monitors:
monitor.set_estimator(estimator)
if deprecated_monitors:
hooks.append(RunHookAdapterForMonitors(deprecated_monitors))
return hooks
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element | unknown | codeparrot/codeparrot-clean | ||
""" generic datetimelike tests """
import pytest
import pandas as pd
from .common import Base
import pandas.util.testing as tm
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
tm.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert not "length=%s" % len(idx) in str(idx)
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
if hasattr(idx, 'tz'):
if idx.tz is not None:
assert idx.tz in str(idx)
if hasattr(idx, 'freq'):
assert "freq='%s'" % idx.freqstr in str(idx)
def test_view(self, indices):
super(DatetimeLike, self).test_view(indices)
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
def test_map_callable(self):
expected = self.index + 1
result = self.index.map(lambda x: x + 1)
tm.assert_index_equal(result, expected)
# map to NaT
result = self.index.map(lambda x: pd.NaT if x == self.index[0] else x)
expected = pd.Index([pd.NaT] + self.index[1:].tolist())
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
expected = self.index + 1
# don't compare the freqs
if isinstance(expected, pd.DatetimeIndex):
expected.freq = None
result = self.index.map(mapper(expected, self.index))
tm.assert_index_equal(result, expected)
expected = pd.Index([pd.NaT] + self.index[1:].tolist())
result = self.index.map(mapper(expected, self.index))
tm.assert_index_equal(result, expected)
# empty map; these map to np.nan because we cannot know
# to re-infer things
expected = pd.Index([pd.NaT] * len(self.index))
result = self.index.map(mapper([], []))
tm.assert_index_equal(result, expected) | unknown | codeparrot/codeparrot-clean | ||
class MyType {};
// CHECK: #include <a.h>
// CHECK-NEXT: #include <b.h>
// CHECK-NEXT: class MyType {}; | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-apply-replacements/Inputs/ignore-conflict/ignore-conflict.cpp |
class Solution:
def areConnected(self, n: int, threshold: int, queries: List[List[int]]) -> List[bool]:
cities=[0]*(n+1)
group={}
nextGroupId=1
def union(source, to):
if source==to:
return
for c in group[source]:
cities[c]=to
group[to].extend(group[source])
del group[source]
for base in range(threshold+1, n):
currentGroupId=nextGroupId
nextGroupId+=1
group[currentGroupId]=[]
for member in range(base, n+1, base):
if cities[member]==0:
cities[member]=currentGroupId
group[currentGroupId].append(member)
else:
union(cities[member], currentGroupId)
answer=[False]*len(queries)
for i in range(len(queries)):
u,v=queries[i]
if cities[u]==cities[v] and cities[u]!=0:
answer[i]=True
return answer | unknown | codeparrot/codeparrot-clean | ||
:host {
font-family: var(--inter-font);
display: flex;
justify-content: center;
}
[ngTabs] {
overflow: hidden;
width: 600px;
border-radius: 0.5rem;
background-color: color-mix(in srgb, var(--bright-blue) 5%, transparent);
}
[ngTabList] {
padding: 0;
display: flex;
list-style: none;
position: relative;
border-bottom: 1px solid color-mix(in srgb, var(--primary-contrast) 20%, transparent);
}
[ngTab] {
flex: 1;
outline: none;
padding: 1rem 0;
cursor: pointer;
text-align: center;
color: color-mix(in srgb, var(--primary-contrast) 60%, transparent);
}
[ngTab]:focus {
outline-offset: -8px;
border-radius: 0.7rem;
outline: 2px solid var(--bright-blue);
}
[ngTab]:hover {
background-color: color-mix(in srgb, var(--primary-contrast) 5%, transparent);
}
[ngTab][aria-selected='true'] {
color: var(--bright-blue);
}
.bottom-border {
position: absolute;
pointer-events: none;
left: 0;
bottom: 0;
height: 3px;
width: calc(100% / 3);
background-color: var(--bright-blue);
transition: all 0.2s ease-in-out;
transform: translateX(0%);
border-top-left-radius: 2px;
border-top-right-radius: 2px;
}
[ngTab]:nth-child(1)[aria-selected='true'] ~ .bottom-border {
transform: translateX(0%);
}
[ngTab]:nth-child(2)[aria-selected='true'] ~ .bottom-border {
transform: translateX(100%);
}
[ngTab]:nth-child(3)[aria-selected='true'] ~ .bottom-border {
transform: translateX(200%);
}
.sliding-window {
width: 300%;
display: flex;
transition: all 0.2s ease-in-out;
}
[ngTabList]:has([ngTab]:nth-child(1)[aria-selected='true']) ~ .sliding-window {
transform: translateX(0%);
}
[ngTabList]:has([ngTab]:nth-child(2)[aria-selected='true']) ~ .sliding-window {
transform: translateX(-33.333%);
}
[ngTabList]:has([ngTab]:nth-child(3)[aria-selected='true']) ~ .sliding-window {
transform: translateX(-66.666%);
}
[ngTabPanel] {
display: grid;
place-items: center;
padding: 1rem;
min-height: 100px;
flex: 1;
}
[ngTabPanel]:focus {
outline-offset: -4px;
border-radius: 0.5rem;
outline: 2px solid var(--bright-blue);
} | css | github | https://github.com/angular/angular | adev/src/content/examples/aria/tabs/src/explicit-selection/material/app/app.css |
it("should allow to export via exports", () => {
expect(require("./assign-exports-property?1").abc).toBe("abc");
expect(require("./assign-exports-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to export via module.exports", () => {
expect(require("./assign-module-exports-property?1").abc).toBe("abc");
expect(require("./assign-module-exports-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to export via this", () => {
expect(require("./assign-this-property?1").abc).toBe("abc");
expect(require("./assign-this-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to export via define property on exports", () => {
expect(require("./define-exports-property?1").abc).toBe("abc");
expect(require("./define-exports-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to export via define property on module.exports", () => {
expect(require("./define-module-exports-property?1").abc).toBe("abc");
expect(require("./define-module-exports-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to export via define property on this", () => {
expect(require("./define-this-property?1").abc).toBe("abc");
expect(require("./define-this-property?2")).toEqual({
abc: "abc",
def: "def"
});
});
it("should allow to read own exports via exports", () => {
var test = require("./reading-self-from-exports").test;
expect(test()).toBe("abc");
});
it("should allow to read own exports via module.exports", () => {
var test = require("./reading-self-from-module-exports").test;
expect(test()).toBe("abc");
});
it("should allow to read own exports via this", () => {
var test = require("./reading-self-from-this").test;
expect(test()).toBe("abc");
});
it("should allow to attach exports to object", () => {
expect(require("./attach-to-object?1").abc).toBe("abc");
expect(require("./attach-to-object?2").def).toBe("def");
expect(require("./attach-to-object?3").abc).toBe("abc");
expect(require("./attach-to-object?3").def).toBe("def");
});
it("should allow to attach exports to function", () => {
expect(require("./attach-to-function?1")()).toBe("abc");
expect(require("./attach-to-function?2").def).toBe("def");
expect(require("./attach-to-function?3")()).toBe("abc");
expect(require("./attach-to-function?3").def).toBe("def");
});
it("should allow to attach exports to arrow function", () => {
expect(require("./attach-to-arrow-function?1")()).toBe("abc");
expect(require("./attach-to-arrow-function?2").def).toBe("def");
expect(require("./attach-to-arrow-function?3")()).toBe("abc");
expect(require("./attach-to-arrow-function?3").def).toBe("def");
});
it("should properly handle export / require `default`", () => {
expect(require("./require-default").moduleExportsDefault).toBe("hello");
expect(require("./require-default").hello1).toBe("hello");
expect(require("./require-default").hello2).toBe("hello");
expect(require("./require-default").hello3).toBe("hello");
expect(require("./require-default").hello4).toBe("hello");
expect(require("./require-default").hello5).toBe("hello");
expect(require("./require-default").hello6).toBe("hello");
expect(require("./require-default").hello7).toBe("hello");
expect(require("./require-default").hello8).toBe("hello");
}); | javascript | github | https://github.com/webpack/webpack | test/cases/cjs-tree-shaking/exports/index.js |
#!/usr/bin/env python
# -*- python-mode -*-
"""Emulate iostat for NFS mount points using /proc/self/mountstats
"""
__copyright__ = """
Copyright (C) 2005, Chuck Lever <cel@netapp.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, time
Iostats_version = '0.2'
def difference(x, y):
"""Used for a map() function
"""
return x - y
NfsEventCounters = [
'inoderevalidates',
'dentryrevalidates',
'datainvalidates',
'attrinvalidates',
'vfsopen',
'vfslookup',
'vfspermission',
'vfsupdatepage',
'vfsreadpage',
'vfsreadpages',
'vfswritepage',
'vfswritepages',
'vfsreaddir',
'vfssetattr',
'vfsflush',
'vfsfsync',
'vfslock',
'vfsrelease',
'congestionwait',
'setattrtrunc',
'extendwrite',
'sillyrenames',
'shortreads',
'shortwrites',
'delay'
]
NfsByteCounters = [
'normalreadbytes',
'normalwritebytes',
'directreadbytes',
'directwritebytes',
'serverreadbytes',
'serverwritebytes',
'readpages',
'writepages'
]
class DeviceData:
"""DeviceData objects provide methods for parsing and displaying
data for a single mount grabbed from /proc/self/mountstats
"""
def __init__(self):
self.__nfs_data = dict()
self.__rpc_data = dict()
self.__rpc_data['ops'] = []
def __parse_nfs_line(self, words):
if words[0] == 'device':
self.__nfs_data['export'] = words[1]
self.__nfs_data['mountpoint'] = words[4]
self.__nfs_data['fstype'] = words[7]
if words[7] == 'nfs':
self.__nfs_data['statvers'] = words[8]
elif words[0] == 'age:':
self.__nfs_data['age'] = long(words[1])
elif words[0] == 'opts:':
self.__nfs_data['mountoptions'] = ''.join(words[1:]).split(',')
elif words[0] == 'caps:':
self.__nfs_data['servercapabilities'] = ''.join(words[1:]).split(',')
elif words[0] == 'nfsv4:':
self.__nfs_data['nfsv4flags'] = ''.join(words[1:]).split(',')
elif words[0] == 'sec:':
keys = ''.join(words[1:]).split(',')
self.__nfs_data['flavor'] = int(keys[0].split('=')[1])
self.__nfs_data['pseudoflavor'] = 0
if self.__nfs_data['flavor'] == 6:
self.__nfs_data['pseudoflavor'] = int(keys[1].split('=')[1])
elif words[0] == 'events:':
i = 1
for key in NfsEventCounters:
self.__nfs_data[key] = int(words[i])
i += 1
elif words[0] == 'bytes:':
i = 1
for key in NfsByteCounters:
self.__nfs_data[key] = long(words[i])
i += 1
def __parse_rpc_line(self, words):
if words[0] == 'RPC':
self.__rpc_data['statsvers'] = float(words[3])
self.__rpc_data['programversion'] = words[5]
elif words[0] == 'xprt:':
self.__rpc_data['protocol'] = words[1]
if words[1] == 'udp':
self.__rpc_data['port'] = int(words[2])
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['rpcsends'] = int(words[4])
self.__rpc_data['rpcreceives'] = int(words[5])
self.__rpc_data['badxids'] = int(words[6])
self.__rpc_data['inflightsends'] = long(words[7])
self.__rpc_data['backlogutil'] = long(words[8])
elif words[1] == 'tcp':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['inflightsends'] = long(words[10])
self.__rpc_data['backlogutil'] = long(words[11])
elif words[1] == 'rdma':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['backlogutil'] = int(words[10])
self.__rpc_data['read_chunks'] = int(words[11])
self.__rpc_data['write_chunks'] = int(words[12])
self.__rpc_data['reply_chunks'] = int(words[13])
self.__rpc_data['total_rdma_req'] = int(words[14])
self.__rpc_data['total_rdma_rep'] = int(words[15])
self.__rpc_data['pullup'] = int(words[16])
self.__rpc_data['fixup'] = int(words[17])
self.__rpc_data['hardway'] = int(words[18])
self.__rpc_data['failed_marshal'] = int(words[19])
self.__rpc_data['bad_reply'] = int(words[20])
elif words[0] == 'per-op':
self.__rpc_data['per-op'] = words
else:
op = words[0][:-1]
self.__rpc_data['ops'] += [op]
self.__rpc_data[op] = [long(word) for word in words[1:]]
def parse_stats(self, lines):
"""Turn a list of lines from a mount stat file into a
dictionary full of stats, keyed by name
"""
found = False
for line in lines:
words = line.split()
if len(words) == 0:
continue
if (not found and words[0] != 'RPC'):
self.__parse_nfs_line(words)
continue
found = True
self.__parse_rpc_line(words)
def is_nfs_mountpoint(self):
"""Return True if this is an NFS or NFSv4 mountpoint,
otherwise return False
"""
if self.__nfs_data['fstype'] == 'nfs':
return True
elif self.__nfs_data['fstype'] == 'nfs4':
return True
return False
def compare_iostats(self, old_stats):
"""Return the difference between two sets of stats
"""
result = DeviceData()
# copy self into result
for key, value in self.__nfs_data.iteritems():
result.__nfs_data[key] = value
for key, value in self.__rpc_data.iteritems():
result.__rpc_data[key] = value
# compute the difference of each item in the list
# note the copy loop above does not copy the lists, just
# the reference to them. so we build new lists here
# for the result object.
for op in result.__rpc_data['ops']:
result.__rpc_data[op] = map(difference, self.__rpc_data[op], old_stats.__rpc_data[op])
# update the remaining keys we care about
result.__rpc_data['rpcsends'] -= old_stats.__rpc_data['rpcsends']
result.__rpc_data['backlogutil'] -= old_stats.__rpc_data['backlogutil']
for key in NfsEventCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
for key in NfsByteCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
return result
def __print_data_cache_stats(self):
"""Print the data cache hit rate
"""
nfs_stats = self.__nfs_data
app_bytes_read = float(nfs_stats['normalreadbytes'])
if app_bytes_read != 0:
client_bytes_read = float(nfs_stats['serverreadbytes'] - nfs_stats['directreadbytes'])
ratio = ((app_bytes_read - client_bytes_read) * 100) / app_bytes_read
print
print 'app bytes: %f client bytes %f' % (app_bytes_read, client_bytes_read)
print 'Data cache hit ratio: %4.2f%%' % ratio
def __print_attr_cache_stats(self, sample_time):
"""Print attribute cache efficiency stats
"""
nfs_stats = self.__nfs_data
getattr_stats = self.__rpc_data['GETATTR']
if nfs_stats['inoderevalidates'] != 0:
getattr_ops = float(getattr_stats[1])
opens = float(nfs_stats['vfsopen'])
revalidates = float(nfs_stats['inoderevalidates']) - opens
if revalidates != 0:
ratio = ((revalidates - getattr_ops) * 100) / revalidates
else:
ratio = 0.0
data_invalidates = float(nfs_stats['datainvalidates'])
attr_invalidates = float(nfs_stats['attrinvalidates'])
print
print '%d inode revalidations, hitting in cache %4.2f%% of the time' % \
(revalidates, ratio)
print '%d open operations (mandatory GETATTR requests)' % opens
if getattr_ops != 0:
print '%4.2f%% of GETATTRs resulted in data cache invalidations' % \
((data_invalidates * 100) / getattr_ops)
def __print_dir_cache_stats(self, sample_time):
"""Print directory stats
"""
nfs_stats = self.__nfs_data
lookup_ops = self.__rpc_data['LOOKUP'][0]
readdir_ops = self.__rpc_data['READDIR'][0]
if self.__rpc_data.has_key('READDIRPLUS'):
readdir_ops += self.__rpc_data['READDIRPLUS'][0]
dentry_revals = nfs_stats['dentryrevalidates']
opens = nfs_stats['vfsopen']
lookups = nfs_stats['vfslookup']
getdents = nfs_stats['vfsreaddir']
print
print '%d open operations (pathname lookups)' % opens
print '%d dentry revalidates and %d vfs lookup requests' % \
(dentry_revals, lookups),
print 'resulted in %d LOOKUPs on the wire' % lookup_ops
print '%d vfs getdents calls resulted in %d READDIRs on the wire' % \
(getdents, readdir_ops)
def __print_page_stats(self, sample_time):
"""Print page cache stats
"""
nfs_stats = self.__nfs_data
vfsreadpage = nfs_stats['vfsreadpage']
vfsreadpages = nfs_stats['vfsreadpages']
pages_read = nfs_stats['readpages']
vfswritepage = nfs_stats['vfswritepage']
vfswritepages = nfs_stats['vfswritepages']
pages_written = nfs_stats['writepages']
print
print '%d nfs_readpage() calls read %d pages' % \
(vfsreadpage, vfsreadpage)
print '%d nfs_readpages() calls read %d pages' % \
(vfsreadpages, pages_read - vfsreadpage),
if vfsreadpages != 0:
print '(%.1f pages per call)' % \
(float(pages_read - vfsreadpage) / vfsreadpages)
else:
print
print
print '%d nfs_updatepage() calls' % nfs_stats['vfsupdatepage']
print '%d nfs_writepage() calls wrote %d pages' % \
(vfswritepage, vfswritepage)
print '%d nfs_writepages() calls wrote %d pages' % \
(vfswritepages, pages_written - vfswritepage),
if (vfswritepages) != 0:
print '(%.1f pages per call)' % \
(float(pages_written - vfswritepage) / vfswritepages)
else:
print
congestionwaits = nfs_stats['congestionwait']
if congestionwaits != 0:
print
print '%d congestion waits' % congestionwaits
def __print_rpc_op_stats(self, op, sample_time):
"""Print generic stats for one RPC op
"""
if not self.__rpc_data.has_key(op):
return
rpc_stats = self.__rpc_data[op]
ops = float(rpc_stats[0])
retrans = float(rpc_stats[1] - rpc_stats[0])
kilobytes = float(rpc_stats[3] + rpc_stats[4]) / 1024
rtt = float(rpc_stats[6])
exe = float(rpc_stats[7])
# prevent floating point exceptions
if ops != 0:
kb_per_op = kilobytes / ops
retrans_percent = (retrans * 100) / ops
rtt_per_op = rtt / ops
exe_per_op = exe / ops
else:
kb_per_op = 0.0
retrans_percent = 0.0
rtt_per_op = 0.0
exe_per_op = 0.0
op += ':'
print '%s' % op.lower().ljust(15),
print ' ops/s\t\t kB/s\t\t kB/op\t\tretrans\t\tavg RTT (ms)\tavg exe (ms)'
print '\t\t%7.3f' % (ops / sample_time),
print '\t%7.3f' % (kilobytes / sample_time),
print '\t%7.3f' % kb_per_op,
print ' %7d (%3.1f%%)' % (retrans, retrans_percent),
print '\t%7.3f' % rtt_per_op,
print '\t%7.3f' % exe_per_op
def display_iostats(self, sample_time, which):
"""Display NFS and RPC stats in an iostat-like way
"""
sends = float(self.__rpc_data['rpcsends'])
if sample_time == 0:
sample_time = float(self.__nfs_data['age'])
if sends != 0:
backlog = (float(self.__rpc_data['backlogutil']) / sends) / sample_time
else:
backlog = 0.0
print
print '%s mounted on %s:' % \
(self.__nfs_data['export'], self.__nfs_data['mountpoint'])
print
print ' op/s\t\trpc bklog'
print '%7.2f' % (sends / sample_time),
print '\t%7.2f' % backlog
if which == 0:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
elif which == 1:
self.__print_rpc_op_stats('GETATTR', sample_time)
self.__print_rpc_op_stats('ACCESS', sample_time)
self.__print_attr_cache_stats(sample_time)
elif which == 2:
self.__print_rpc_op_stats('LOOKUP', sample_time)
self.__print_rpc_op_stats('READDIR', sample_time)
if self.__rpc_data.has_key('READDIRPLUS'):
self.__print_rpc_op_stats('READDIRPLUS', sample_time)
self.__print_dir_cache_stats(sample_time)
elif which == 3:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
self.__print_page_stats(sample_time)
#
# Functions
#
def print_iostat_help(name):
print 'usage: %s [ <interval> [ <count> ] ] [ <options> ] [ <mount point> ] ' % name
print
print ' Version %s' % Iostats_version
print
print ' Sample iostat-like program to display NFS client per-mount statistics.'
print
print ' The <interval> parameter specifies the amount of time in seconds between'
print ' each report. The first report contains statistics for the time since each'
print ' file system was mounted. Each subsequent report contains statistics'
print ' collected during the interval since the previous report.'
print
print ' If the <count> parameter is specified, the value of <count> determines the'
print ' number of reports generated at <interval> seconds apart. If the interval'
print ' parameter is specified without the <count> parameter, the command generates'
print ' reports continuously.'
print
print ' Options include "--attr", which displays statistics related to the attribute'
print ' cache, "--dir", which displays statistics related to directory operations,'
print ' and "--page", which displays statistics related to the page cache.'
print ' By default, if no option is specified, statistics related to file I/O are'
print ' displayed.'
print
print ' If one or more <mount point> names are specified, statistics for only these'
print ' mount points will be displayed. Otherwise, all NFS mount points on the'
print ' client are listed.'
def parse_stats_file(filename):
"""pop the contents of a mountstats file into a dictionary,
keyed by mount point. each value object is a list of the
lines in the mountstats file corresponding to the mount
point named in the key.
"""
ms_dict = dict()
key = ''
f = file(filename)
for line in f.readlines():
words = line.split()
if len(words) == 0:
continue
if words[0] == 'device':
key = words[4]
new = [ line.strip() ]
else:
new += [ line.strip() ]
ms_dict[key] = new
f.close
return ms_dict
def print_iostat_summary(old, new, devices, time, ac):
for device in devices:
stats = DeviceData()
stats.parse_stats(new[device])
if not old:
stats.display_iostats(time, ac)
else:
old_stats = DeviceData()
old_stats.parse_stats(old[device])
diff_stats = stats.compare_iostats(old_stats)
diff_stats.display_iostats(time, ac)
def iostat_command(name):
"""iostat-like command for NFS mount points
"""
mountstats = parse_stats_file('/proc/self/mountstats')
devices = []
which = 0
interval_seen = False
count_seen = False
for arg in sys.argv:
if arg in ['-h', '--help', 'help', 'usage']:
print_iostat_help(name)
return
if arg in ['-v', '--version', 'version']:
print '%s version %s' % (name, Iostats_version)
return
if arg in ['-a', '--attr']:
which = 1
continue
if arg in ['-d', '--dir']:
which = 2
continue
if arg in ['-p', '--page']:
which = 3
continue
if arg == sys.argv[0]:
continue
if arg in mountstats:
devices += [arg]
elif not interval_seen:
interval = int(arg)
if interval > 0:
interval_seen = True
else:
print 'Illegal <interval> value'
return
elif not count_seen:
count = int(arg)
if count > 0:
count_seen = True
else:
print 'Illegal <count> value'
return
# make certain devices contains only NFS mount points
if len(devices) > 0:
check = []
for device in devices:
stats = DeviceData()
stats.parse_stats(mountstats[device])
if stats.is_nfs_mountpoint():
check += [device]
devices = check
else:
for device, descr in mountstats.iteritems():
stats = DeviceData()
stats.parse_stats(descr)
if stats.is_nfs_mountpoint():
devices += [device]
if len(devices) == 0:
print 'No NFS mount points were found'
return
old_mountstats = None
sample_time = 0.0
if not interval_seen:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
return
if count_seen:
while count != 0:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
count -= 1
else:
while True:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
#
# Main
#
prog = os.path.basename(sys.argv[0])
try:
iostat_command(prog)
except KeyboardInterrupt:
print 'Caught ^C... exiting'
sys.exit(1)
sys.exit(0) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to create a rpm package from a explicit target name.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
scons = test.program
rpm = test.Environment().WhereIs('rpm')
if not rpm:
test.skip_test('rpm not found, skipping test\n')
rpm_build_root = test.workpath('rpm_build_root')
test.subdir('src')
test.write( [ 'src', 'main.c' ], r"""
int main( int argc, char* argv[] )
{
return 0;
}
""")
test.write('SConstruct', """
import os
env=Environment(tools=['default', 'packaging'])
env.Prepend(RPM = 'TAR_OPTIONS=--wildcards ')
env.Append(RPMFLAGS = r' --buildroot %(rpm_build_root)s')
prog = env.Install( '/bin/' , Program( 'src/main.c') )
env.Alias( 'install', prog )
env.Package( NAME = 'foo',
VERSION = '1.2.3',
PACKAGEVERSION = 0,
PACKAGETYPE = 'rpm',
LICENSE = 'gpl',
SUMMARY = 'balalalalal',
X_RPM_GROUP = 'Application/fu',
X_RPM_INSTALL = r'%(_python_)s %(scons)s --debug=tree --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"',
DESCRIPTION = 'this should be really really long',
source = [ prog ],
target = "my_rpm_package.rpm",
SOURCE_URL = 'http://foo.org/foo-1.2.3.tar.gz'
)
""" % locals())
expect = """
scons: *** Setting target is not supported for rpm.
""" + test.python_file_line(test.workpath('SConstruct'), 24)
test.run(arguments='', status=2, stderr=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# pylint: disable=too-many-arguments, no-member
"""Functions for constructing recurrent neural networks."""
import warnings
from ..model import save_checkpoint, load_checkpoint
from .rnn_cell import BaseRNNCell
def rnn_unroll(cell, length, inputs=None, begin_state=None, input_prefix='', layout='NTC'):
"""Deprecated. Please use cell.unroll instead"""
warnings.warn('rnn_unroll is deprecated. Please call cell.unroll directly.')
return cell.unroll(length=length, inputs=inputs, begin_state=begin_state,
input_prefix=input_prefix, layout=layout)
def save_rnn_checkpoint(cells, prefix, epoch, symbol, arg_params, aux_params):
"""Save checkpoint for model using RNN cells.
Unpacks weight before saving.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input symbol
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg_params = cell.unpack_weights(arg_params)
save_checkpoint(prefix, epoch, symbol, arg_params, aux_params)
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
"""
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux
def do_rnn_checkpoint(cells, prefix, period=1):
"""Make a callback to checkpoint Module to prefix every epoch.
unpacks weights used by cells before saving.
Parameters
----------
cells : RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
The file prefix to checkpoint to
period : int
How many epochs to wait before checkpointing. Default is 1.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
save_rnn_checkpoint(cells, prefix, iter_no+1, sym, arg, aux)
return _callback | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package dbplugin
import (
"context"
"errors"
"reflect"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/pluginutil"
"github.com/hashicorp/vault/sdk/helper/wrapping"
"github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/mock"
"google.golang.org/grpc"
)
func TestNewPluginClient(t *testing.T) {
type testCase struct {
config pluginutil.PluginClientConfig
pluginClient pluginutil.PluginClient
expectedResp *DatabasePluginClient
expectedErr error
}
tests := map[string]testCase{
"happy path": {
config: testPluginClientConfig(),
pluginClient: &fakePluginClient{
connResp: nil,
dispenseResp: gRPCClient{client: fakeClient{}},
dispenseErr: nil,
},
expectedResp: &DatabasePluginClient{
client: &fakePluginClient{
connResp: nil,
dispenseResp: gRPCClient{client: fakeClient{}},
dispenseErr: nil,
},
Database: gRPCClient{client: proto.NewDatabaseClient(nil), versionClient: logical.NewPluginVersionClient(nil), doneCtx: context.Context(nil)},
},
expectedErr: nil,
},
"dispense error": {
config: testPluginClientConfig(),
pluginClient: &fakePluginClient{
connResp: nil,
dispenseResp: gRPCClient{},
dispenseErr: errors.New("dispense error"),
},
expectedResp: nil,
expectedErr: errors.New("dispense error"),
},
"error unsupported client type": {
config: testPluginClientConfig(),
pluginClient: &fakePluginClient{
connResp: nil,
dispenseResp: nil,
dispenseErr: nil,
},
expectedResp: nil,
expectedErr: errors.New("unsupported client type"),
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
ctx := context.Background()
mockWrapper := new(mockRunnerUtil)
mockWrapper.On("NewPluginClient", ctx, mock.Anything).
Return(test.pluginClient, nil)
defer mockWrapper.AssertNumberOfCalls(t, "NewPluginClient", 1)
resp, err := NewPluginClient(ctx, mockWrapper, test.config)
if test.expectedErr != nil && err == nil {
t.Fatalf("err expected, got nil")
}
if test.expectedErr == nil && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
if test.expectedErr == nil && !reflect.DeepEqual(resp, test.expectedResp) {
t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp)
}
})
}
}
func testPluginClientConfig() pluginutil.PluginClientConfig {
return pluginutil.PluginClientConfig{
Name: "test-plugin",
PluginSets: PluginSets,
PluginType: consts.PluginTypeDatabase,
HandshakeConfig: HandshakeConfig,
Logger: log.NewNullLogger(),
IsMetadataMode: true,
AutoMTLS: true,
}
}
var _ pluginutil.PluginClient = &fakePluginClient{}
type fakePluginClient struct {
connResp grpc.ClientConnInterface
dispenseResp interface{}
dispenseErr error
}
func (f *fakePluginClient) Conn() grpc.ClientConnInterface {
return nil
}
func (f *fakePluginClient) Reload() error {
return nil
}
func (f *fakePluginClient) Dispense(name string) (interface{}, error) {
return f.dispenseResp, f.dispenseErr
}
func (f *fakePluginClient) Ping() error {
return nil
}
func (f *fakePluginClient) Close() error {
return nil
}
var _ pluginutil.RunnerUtil = &mockRunnerUtil{}
type mockRunnerUtil struct {
mock.Mock
}
func (m *mockRunnerUtil) VaultVersion(ctx context.Context) (string, error) {
return "dummyversion", nil
}
func (m *mockRunnerUtil) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) {
args := m.Called(ctx, config)
return args.Get(0).(pluginutil.PluginClient), args.Error(1)
}
func (m *mockRunnerUtil) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
args := m.Called(ctx, data, ttl, jwt)
return args.Get(0).(*wrapping.ResponseWrapInfo), args.Error(1)
}
func (m *mockRunnerUtil) MlockEnabled() bool {
args := m.Called()
return args.Bool(0)
}
func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) {
return "clusterid", nil
}
func (m *mockRunnerUtil) DownloadExtractVerifyPlugin(_ context.Context, _ *pluginutil.PluginRunner) error {
return nil
} | go | github | https://github.com/hashicorp/vault | sdk/database/dbplugin/v5/plugin_client_test.go |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"compress/gzip"
"errors"
"fmt"
"io"
"math"
"net/http"
"slices"
"sort"
"sync"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/util/annotations"
)
const (
// decodeReadLimit is the maximum size of a read request body in bytes.
decodeReadLimit = 32 * 1024 * 1024
pbContentType = "application/x-protobuf"
jsonContentType = "application/json"
)
type HTTPError struct {
msg string
status int
}
func (e HTTPError) Error() string {
return e.msg
}
func (e HTTPError) Status() int {
return e.status
}
// DecodeReadRequest reads a remote.Request from a http.Request.
func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) {
compressed, err := io.ReadAll(io.LimitReader(r.Body, decodeReadLimit))
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.ReadRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
// EncodeReadResponse writes a remote.Response to a http.ResponseWriter.
func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error {
data, err := proto.Marshal(resp)
if err != nil {
return err
}
compressed := snappy.Encode(nil, data)
_, err = w.Write(compressed)
return err
}
// ToQuery builds a Query proto.
func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) {
ms, err := ToLabelMatchers(matchers)
if err != nil {
return nil, err
}
var rp *prompb.ReadHints
if hints != nil {
rp = &prompb.ReadHints{
StartMs: hints.Start,
EndMs: hints.End,
StepMs: hints.Step,
Func: hints.Func,
Grouping: hints.Grouping,
By: hints.By,
RangeMs: hints.Range,
}
}
return &prompb.Query{
StartTimestampMs: from,
EndTimestampMs: to,
Matchers: ms,
Hints: rp,
}, nil
}
// ToQueryResult builds a QueryResult proto.
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, annotations.Annotations, error) {
numSamples := 0
resp := &prompb.QueryResult{}
var iter chunkenc.Iterator
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
var (
samples []prompb.Sample
histograms []prompb.Histogram
)
for valType := iter.Next(); valType != chunkenc.ValNone; valType = iter.Next() {
numSamples++
if sampleLimit > 0 && numSamples > sampleLimit {
return nil, ss.Warnings(), HTTPError{
msg: fmt.Sprintf("exceeded sample limit (%d)", sampleLimit),
status: http.StatusBadRequest,
}
}
switch valType {
case chunkenc.ValFloat:
ts, val := iter.At()
samples = append(samples, prompb.Sample{
Timestamp: ts,
Value: val,
})
case chunkenc.ValHistogram:
ts, h := iter.AtHistogram(nil)
histograms = append(histograms, prompb.FromIntHistogram(ts, h))
case chunkenc.ValFloatHistogram:
ts, fh := iter.AtFloatHistogram(nil)
histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
default:
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
}
}
if err := iter.Err(); err != nil {
return nil, ss.Warnings(), err
}
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
Labels: prompb.FromLabels(series.Labels(), nil),
Samples: samples,
Histograms: histograms,
})
}
return resp, ss.Warnings(), ss.Err()
}
// FromQueryResult unpacks and sorts a QueryResult proto.
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
b := labels.NewScratchBuilder(0)
series := make([]storage.Series, 0, len(res.Timeseries))
for _, ts := range res.Timeseries {
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
return errSeriesSet{err: err}
}
lbls := ts.ToLabels(&b, nil)
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
}
if sortSeries {
slices.SortFunc(series, func(a, b storage.Series) int {
return labels.Compare(a.Labels(), b.Labels())
})
}
return &concreteSeriesSet{
series: series,
}
}
// NegotiateResponseType returns first accepted response type that this server supports.
// On the empty accepted list we assume that the SAMPLES response type was requested. This is to maintain backward compatibility.
func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.ReadRequest_ResponseType, error) {
if len(accepted) == 0 {
accepted = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES}
}
supported := map[prompb.ReadRequest_ResponseType]struct{}{
prompb.ReadRequest_SAMPLES: {},
prompb.ReadRequest_STREAMED_XOR_CHUNKS: {},
}
for _, resType := range accepted {
if _, ok := supported[resType]; ok {
return resType, nil
}
}
return 0, fmt.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported)
}
// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller.
// It expects Series set with populated chunks.
func StreamChunkedReadResponses(
stream io.Writer,
queryIndex int64,
ss storage.ChunkSeriesSet,
sortedExternalLabels []prompb.Label,
maxBytesInFrame int,
marshalPool *sync.Pool,
) (annotations.Annotations, error) {
var (
chks []prompb.Chunk
lbls []prompb.Label
iter chunks.Iterator
)
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
maxDataLength := maxBytesInFrame
for _, lbl := range lbls {
maxDataLength -= lbl.Size()
}
frameBytesLeft := maxDataLength
isNext := iter.Next()
// Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame.
for isNext {
chk := iter.At()
if chk.Chunk == nil {
return ss.Warnings(), fmt.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref)
}
// Cut the chunk.
chks = append(chks, prompb.Chunk{
MinTimeMs: chk.MinTime,
MaxTimeMs: chk.MaxTime,
Type: prompb.Chunk_Encoding(chk.Chunk.Encoding()),
Data: chk.Chunk.Bytes(),
})
frameBytesLeft -= chks[len(chks)-1].Size()
// We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
isNext = iter.Next()
if frameBytesLeft > 0 && isNext {
continue
}
resp := &prompb.ChunkedReadResponse{
ChunkedSeries: []*prompb.ChunkedSeries{
{Labels: lbls, Chunks: chks},
},
QueryIndex: queryIndex,
}
b, err := resp.PooledMarshal(marshalPool)
if err != nil {
return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err)
}
if _, err := stream.Write(b); err != nil {
return ss.Warnings(), fmt.Errorf("write to stream: %w", err)
}
// We immediately flush the Write() so it is safe to return to the pool.
marshalPool.Put(&b)
chks = chks[:0]
frameBytesLeft = maxDataLength
}
if err := iter.Err(); err != nil {
return ss.Warnings(), err
}
}
return ss.Warnings(), ss.Err()
}
// MergeLabels merges two sets of sorted proto labels, preferring those in
// primary to those in secondary when there is an overlap.
func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
result := make([]prompb.Label, 0, len(primary)+len(secondary))
i, j := 0, 0
for i < len(primary) && j < len(secondary) {
switch {
case primary[i].Name < secondary[j].Name:
result = append(result, primary[i])
i++
case primary[i].Name > secondary[j].Name:
result = append(result, secondary[j])
j++
default:
result = append(result, primary[i])
i++
j++
}
}
for ; i < len(primary); i++ {
result = append(result, primary[i])
}
for ; j < len(secondary); j++ {
result = append(result, secondary[j])
}
return result
}
// errSeriesSet implements storage.SeriesSet, just returning an error.
type errSeriesSet struct {
err error
}
func (errSeriesSet) Next() bool {
return false
}
func (errSeriesSet) At() storage.Series {
return nil
}
func (e errSeriesSet) Err() error {
return e.err
}
func (errSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeriesSet implements storage.SeriesSet.
type concreteSeriesSet struct {
cur int
series []storage.Series
}
func (c *concreteSeriesSet) Next() bool {
c.cur++
return c.cur-1 < len(c.series)
}
func (c *concreteSeriesSet) At() storage.Series {
return c.series[c.cur-1]
}
func (*concreteSeriesSet) Err() error {
return nil
}
func (*concreteSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeries implements storage.Series.
type concreteSeries struct {
labels labels.Labels
floats []prompb.Sample
histograms []prompb.Histogram
}
func (c *concreteSeries) Labels() labels.Labels {
return c.labels.Copy()
}
func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
if csi, ok := it.(*concreteSeriesIterator); ok {
csi.reset(c)
return csi
}
return newConcreteSeriesIterator(c)
}
// concreteSeriesIterator implements storage.SeriesIterator.
type concreteSeriesIterator struct {
floatsCur int
histogramsCur int
curValType chunkenc.ValueType
series *concreteSeries
err error
// These are pre-filled with the current model histogram if curValType
// is ValHistogram or ValFloatHistogram, respectively.
curH *histogram.Histogram
curFH *histogram.FloatHistogram
}
func newConcreteSeriesIterator(series *concreteSeries) chunkenc.Iterator {
return &concreteSeriesIterator{
floatsCur: -1,
histogramsCur: -1,
curValType: chunkenc.ValNone,
series: series,
}
}
func (c *concreteSeriesIterator) reset(series *concreteSeries) {
c.floatsCur = -1
c.histogramsCur = -1
c.curValType = chunkenc.ValNone
c.series = series
c.err = nil
}
// Seek implements storage.SeriesIterator.
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
if c.err != nil {
return chunkenc.ValNone
}
if c.floatsCur == -1 {
c.floatsCur = 0
}
if c.histogramsCur == -1 {
c.histogramsCur = 0
}
if c.floatsCur >= len(c.series.floats) && c.histogramsCur >= len(c.series.histograms) {
return chunkenc.ValNone
}
// No-op check.
if (c.curValType == chunkenc.ValFloat && c.series.floats[c.floatsCur].Timestamp >= t) ||
((c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram) && c.series.histograms[c.histogramsCur].Timestamp >= t) {
return c.curValType
}
c.curValType = chunkenc.ValNone
// Binary search between current position and end for both float and histograms samples.
c.floatsCur += sort.Search(len(c.series.floats)-c.floatsCur, func(n int) bool {
return c.series.floats[n+c.floatsCur].Timestamp >= t
})
c.histogramsCur += sort.Search(len(c.series.histograms)-c.histogramsCur, func(n int) bool {
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
})
switch {
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
c.curValType = chunkenc.ValFloat
} else {
c.curValType = chunkenc.ValHistogram
}
// When the timestamps do not overlap the cursor for the non-selected sample type has advanced too
// far; we decrement it back down here.
if c.series.floats[c.floatsCur].Timestamp != c.series.histograms[c.histogramsCur].Timestamp {
if c.curValType == chunkenc.ValFloat {
c.histogramsCur--
} else {
c.floatsCur--
}
}
case c.floatsCur < len(c.series.floats):
c.curValType = chunkenc.ValFloat
case c.histogramsCur < len(c.series.histograms):
c.curValType = chunkenc.ValHistogram
}
if c.curValType == chunkenc.ValHistogram {
c.setCurrentHistogram()
}
if c.err != nil {
c.curValType = chunkenc.ValNone
}
return c.curValType
}
// setCurrentHistogram pre-fills either the curH or the curFH field with a
// converted model histogram and sets c.curValType accordingly. It validates the
// histogram and sets c.err accordingly. This all has to be done in Seek() and
// Next() already so that we know if the histogram we got from the remote-read
// source is valid or not before we allow the AtHistogram()/AtFloatHistogram()
// call.
func (c *concreteSeriesIterator) setCurrentHistogram() {
pbH := c.series.histograms[c.histogramsCur]
// Basic schema check first.
schema := pbH.Schema
if !histogram.IsKnownSchema(schema) {
c.err = histogram.UnknownSchemaError(schema)
return
}
if pbH.IsFloatHistogram() {
c.curValType = chunkenc.ValFloatHistogram
mFH := pbH.ToFloatHistogram()
if mFH.Schema > histogram.ExponentialSchemaMax && mFH.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// sample is from a newer Prometheus version that supports higher
// resolution.
if err := mFH.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
c.err = err
return
}
}
if err := mFH.Validate(); err != nil {
c.err = err
return
}
c.curFH = mFH
return
}
c.curValType = chunkenc.ValHistogram
mH := pbH.ToIntHistogram()
if mH.Schema > histogram.ExponentialSchemaMax && mH.Schema <= histogram.ExponentialSchemaMaxReserved {
// This is a very slow path, but it should only happen if the
// sample is from a newer Prometheus version that supports higher
// resolution.
if err := mH.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
c.err = err
return
}
}
if err := mH.Validate(); err != nil {
c.err = err
return
}
c.curH = mH
}
// At implements chunkenc.Iterator.
func (c *concreteSeriesIterator) At() (t int64, v float64) {
if c.curValType != chunkenc.ValFloat {
panic("iterator is not on a float sample")
}
s := c.series.floats[c.floatsCur]
return s.Timestamp, s.Value
}
// AtHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
if c.curValType != chunkenc.ValHistogram {
panic("iterator is not on an integer histogram sample")
}
return c.series.histograms[c.histogramsCur].Timestamp, c.curH
}
// AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
switch c.curValType {
case chunkenc.ValFloatHistogram:
return c.series.histograms[c.histogramsCur].Timestamp, c.curFH
case chunkenc.ValHistogram:
return c.series.histograms[c.histogramsCur].Timestamp, c.curH.ToFloat(nil)
default:
panic("iterator is not on a histogram sample")
}
}
// AtT implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtT() int64 {
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
return c.series.histograms[c.histogramsCur].Timestamp
}
return c.series.floats[c.floatsCur].Timestamp
}
// TODO(krajorama): implement AtST. Maybe. concreteSeriesIterator is used
// for turning query results into an iterable, but query results do not have ST.
func (*concreteSeriesIterator) AtST() int64 {
return 0
}
const noTS = int64(math.MaxInt64)
// Next implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
if c.err != nil {
return chunkenc.ValNone
}
peekFloatTS := noTS
if c.floatsCur+1 < len(c.series.floats) {
peekFloatTS = c.series.floats[c.floatsCur+1].Timestamp
}
peekHistTS := noTS
if c.histogramsCur+1 < len(c.series.histograms) {
peekHistTS = c.series.histograms[c.histogramsCur+1].Timestamp
}
c.curValType = chunkenc.ValNone
switch {
case peekFloatTS < peekHistTS:
c.floatsCur++
c.curValType = chunkenc.ValFloat
case peekHistTS < peekFloatTS:
c.histogramsCur++
c.curValType = chunkenc.ValHistogram
case peekFloatTS == noTS && peekHistTS == noTS:
// This only happens when the iterator is exhausted; we set the cursors off the end to prevent
// Seek() from returning anything afterwards.
c.floatsCur = len(c.series.floats)
c.histogramsCur = len(c.series.histograms)
default:
// Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms
// anyway otherwise the histogram sample will get selected on the next call to Next().
c.floatsCur++
c.histogramsCur++
c.curValType = chunkenc.ValFloat
}
if c.curValType == chunkenc.ValHistogram {
c.setCurrentHistogram()
}
if c.err != nil {
c.curValType = chunkenc.ValNone
}
return c.curValType
}
// Err implements chunkenc.Iterator.
func (c *concreteSeriesIterator) Err() error {
return c.err
}
// chunkedSeriesSet implements storage.SeriesSet.
type chunkedSeriesSet struct {
chunkedReader *ChunkedReader
respBody io.ReadCloser
mint, maxt int64
cancel func(error)
current storage.Series
err error
exhausted bool
}
func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, mint, maxt int64, cancel func(error)) storage.SeriesSet {
return &chunkedSeriesSet{
chunkedReader: chunkedReader,
respBody: respBody,
mint: mint,
maxt: maxt,
cancel: cancel,
}
}
// Next return true if there is a next series and false otherwise. It will
// block until the next series is available.
func (s *chunkedSeriesSet) Next() bool {
if s.exhausted {
// Don't try to read the next series again.
// This prevents errors like "http: read on closed response body" if Next() is called after it has already returned false.
return false
}
res := &prompb.ChunkedReadResponse{}
err := s.chunkedReader.NextProto(res)
if err != nil {
if !errors.Is(err, io.EOF) {
s.err = err
_, _ = io.Copy(io.Discard, s.respBody)
}
_ = s.respBody.Close()
s.cancel(err)
s.exhausted = true
return false
}
s.current = &chunkedSeries{
ChunkedSeries: prompb.ChunkedSeries{
Labels: res.ChunkedSeries[0].Labels,
Chunks: res.ChunkedSeries[0].Chunks,
},
mint: s.mint,
maxt: s.maxt,
}
return true
}
func (s *chunkedSeriesSet) At() storage.Series {
return s.current
}
func (s *chunkedSeriesSet) Err() error {
return s.err
}
func (*chunkedSeriesSet) Warnings() annotations.Annotations {
return nil
}
type chunkedSeries struct {
prompb.ChunkedSeries
mint, maxt int64
}
var _ storage.Series = &chunkedSeries{}
func (s *chunkedSeries) Labels() labels.Labels {
b := labels.NewScratchBuilder(0)
return s.ToLabels(&b, nil)
}
func (s *chunkedSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
csIt, ok := it.(*chunkedSeriesIterator)
if ok {
csIt.reset(s.Chunks, s.mint, s.maxt)
return csIt
}
return newChunkedSeriesIterator(s.Chunks, s.mint, s.maxt)
}
type chunkedSeriesIterator struct {
chunks []prompb.Chunk
idx int
cur chunkenc.Iterator
valType chunkenc.ValueType
mint, maxt int64
err error
}
var _ chunkenc.Iterator = &chunkedSeriesIterator{}
func newChunkedSeriesIterator(chunks []prompb.Chunk, mint, maxt int64) *chunkedSeriesIterator {
it := &chunkedSeriesIterator{}
it.reset(chunks, mint, maxt)
return it
}
func (it *chunkedSeriesIterator) Next() chunkenc.ValueType {
if it.err != nil {
return chunkenc.ValNone
}
if len(it.chunks) == 0 {
return chunkenc.ValNone
}
for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() {
atT := it.AtT()
if atT > it.maxt {
it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast.
return chunkenc.ValNone
}
if atT >= it.mint {
return it.valType
}
}
if it.idx >= len(it.chunks)-1 {
it.valType = chunkenc.ValNone
} else {
it.idx++
it.resetIterator()
it.valType = it.Next()
}
return it.valType
}
func (it *chunkedSeriesIterator) Seek(t int64) chunkenc.ValueType {
if it.err != nil {
return chunkenc.ValNone
}
if len(it.chunks) == 0 {
return chunkenc.ValNone
}
startIdx := it.idx
it.idx += sort.Search(len(it.chunks)-startIdx, func(i int) bool {
return it.chunks[startIdx+i].MaxTimeMs >= t
})
if it.idx > startIdx {
it.resetIterator()
} else {
ts := it.cur.AtT()
if ts >= t {
return it.valType
}
}
for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() {
ts := it.cur.AtT()
if ts > it.maxt {
it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast.
return chunkenc.ValNone
}
if ts >= t && ts >= it.mint {
return it.valType
}
}
it.valType = chunkenc.ValNone
return it.valType
}
func (it *chunkedSeriesIterator) resetIterator() {
if it.idx < len(it.chunks) {
chunk := it.chunks[it.idx]
decodedChunk, err := chunkenc.FromData(chunkenc.Encoding(chunk.Type), chunk.Data)
if err != nil {
it.err = err
return
}
it.cur = decodedChunk.Iterator(nil)
} else {
it.cur = chunkenc.NewNopIterator()
}
}
func (it *chunkedSeriesIterator) reset(chunks []prompb.Chunk, mint, maxt int64) {
it.chunks = chunks
it.mint = mint
it.maxt = maxt
it.idx = 0
if len(chunks) > 0 {
it.resetIterator()
}
}
func (it *chunkedSeriesIterator) At() (ts int64, v float64) {
return it.cur.At()
}
func (it *chunkedSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
return it.cur.AtHistogram(h)
}
func (it *chunkedSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
return it.cur.AtFloatHistogram(fh)
}
func (it *chunkedSeriesIterator) AtT() int64 {
return it.cur.AtT()
}
// TODO(krajorama): test AtST once we have a chunk format that provides ST.
func (it *chunkedSeriesIterator) AtST() int64 {
return it.cur.AtST()
}
func (it *chunkedSeriesIterator) Err() error {
return it.err
}
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
// also making sure that there are no labels with duplicate names.
func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls {
if l.Name == labels.MetricName && !model.UTF8Validation.IsValidMetricName(l.Value) {
return fmt.Errorf("invalid metric name: %v", l.Value)
}
if !model.UTF8Validation.IsValidLabelName(l.Name) {
return fmt.Errorf("invalid label name: %v", l.Name)
}
if !model.LabelValue(l.Value).IsValid() {
return fmt.Errorf("invalid label value: %v", l.Value)
}
if i > 0 && l.Name == ls[i-1].Name {
return fmt.Errorf("duplicate label with name: %v", l.Name)
}
}
return nil
}
// ToLabelMatchers converts Prometheus label matchers to protobuf label matchers.
func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers))
for _, m := range matchers {
var mType prompb.LabelMatcher_Type
switch m.Type {
case labels.MatchEqual:
mType = prompb.LabelMatcher_EQ
case labels.MatchNotEqual:
mType = prompb.LabelMatcher_NEQ
case labels.MatchRegexp:
mType = prompb.LabelMatcher_RE
case labels.MatchNotRegexp:
mType = prompb.LabelMatcher_NRE
default:
return nil, errors.New("invalid matcher type")
}
pbMatchers = append(pbMatchers, &prompb.LabelMatcher{
Type: mType,
Name: m.Name,
Value: m.Value,
})
}
return pbMatchers, nil
}
// FromLabelMatchers converts protobuf label matchers to Prometheus label matchers.
func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
result := make([]*labels.Matcher, 0, len(matchers))
for _, matcher := range matchers {
var mtype labels.MatchType
switch matcher.Type {
case prompb.LabelMatcher_EQ:
mtype = labels.MatchEqual
case prompb.LabelMatcher_NEQ:
mtype = labels.MatchNotEqual
case prompb.LabelMatcher_RE:
mtype = labels.MatchRegexp
case prompb.LabelMatcher_NRE:
mtype = labels.MatchNotRegexp
default:
return nil, errors.New("invalid matcher type")
}
matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value)
if err != nil {
return nil, err
}
result = append(result, matcher)
}
return result, nil
}
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression.
// Used also by documentation/examples/remote_storage.
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.WriteRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
// snappy decompression.
// Used also by documentation/examples/remote_storage.
func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req writev2.Request
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
contentType := r.Header.Get("Content-Type")
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
switch contentType {
case pbContentType:
decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
return req, req.UnmarshalProto(buf)
}
case jsonContentType:
decoderFunc = func(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
return req, req.UnmarshalJSON(buf)
}
default:
return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported content type: %s, supported: [%s, %s]", contentType, jsonContentType, pbContentType)
}
reader := r.Body
// Handle compression.
switch r.Header.Get("Content-Encoding") {
case "gzip":
gr, err := gzip.NewReader(reader)
if err != nil {
return pmetricotlp.NewExportRequest(), err
}
reader = gr
case "":
// No compression.
default:
return pmetricotlp.NewExportRequest(), fmt.Errorf("unsupported compression: %s. Only \"gzip\" or no compression supported", r.Header.Get("Content-Encoding"))
}
body, err := io.ReadAll(reader)
if err != nil {
r.Body.Close()
return pmetricotlp.NewExportRequest(), err
}
if err = r.Body.Close(); err != nil {
return pmetricotlp.NewExportRequest(), err
}
otlpReq, err := decoderFunc(body)
if err != nil {
return pmetricotlp.NewExportRequest(), err
}
return otlpReq, nil
} | go | github | https://github.com/prometheus/prometheus | storage/remote/codec.go |
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
send_fds() -- Send file descriptor to the socket.
recv_fds() -- Receive file descriptors from the socket.
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
create_server() -- create a TCP socket and bind it to a specified address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import io
import os
import sys
from enum import IntEnum, IntFlag
from functools import partial
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection", "create_server",
"has_dualstack_ipv6", "AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert_(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert_(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
IntFlag._convert_(
'MsgFlag',
__name__,
lambda C: C.isupper() and C.startswith('MSG_'))
IntFlag._convert_(
'AddressInfo',
__name__,
lambda C: C.isupper() and C.startswith('AI_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {
6: "Specified event object handle is invalid.",
8: "Insufficient memory available.",
87: "One or more parameters are invalid.",
995: "Overlapped operation aborted.",
996: "Overlapped I/O event object not in signaled state.",
997: "Overlapped operation will complete later.",
10004: "The operation was interrupted.",
10009: "A bad file handle was passed.",
10013: "Permission denied.",
10014: "A fault occurred on the network??",
10022: "An invalid operation was attempted.",
10024: "Too many open files.",
10035: "The socket operation would block.",
10036: "A blocking operation is already in progress.",
10037: "Operation already in progress.",
10038: "Socket operation on nonsocket.",
10039: "Destination address required.",
10040: "Message too long.",
10041: "Protocol wrong type for socket.",
10042: "Bad protocol option.",
10043: "Protocol not supported.",
10044: "Socket type not supported.",
10045: "Operation not supported.",
10046: "Protocol family not supported.",
10047: "Address family not supported by protocol family.",
10048: "The network address is in use.",
10049: "Cannot assign requested address.",
10050: "Network is down.",
10051: "Network is unreachable.",
10052: "Network dropped connection on reset.",
10053: "Software caused connection abort.",
10054: "The connection has been reset.",
10055: "No buffer space available.",
10056: "Socket is already connected.",
10057: "Socket is not connected.",
10058: "The network has been shut down.",
10059: "Too many references.",
10060: "The operation timed out.",
10061: "Connection refused.",
10062: "Cannot translate name.",
10063: "The name is too long.",
10064: "The host is down.",
10065: "The host is unreachable.",
10066: "Directory not empty.",
10067: "Too many processes.",
10068: "User quota exceeded.",
10069: "Disk quota exceeded.",
10070: "Stale file handle reference.",
10071: "Item is remote.",
10091: "Network subsystem is unavailable.",
10092: "Winsock.dll version out of range.",
10093: "Successful WSAStartup not yet performed.",
10101: "Graceful shutdown in progress.",
10102: "No more results from WSALookupServiceNext.",
10103: "Call has been canceled.",
10104: "Procedure call table is invalid.",
10105: "Service provider is invalid.",
10106: "Service provider failed to initialize.",
10107: "System call failure.",
10108: "Service not found.",
10109: "Class type not found.",
10110: "No more results from WSALookupServiceNext.",
10111: "Call was canceled.",
10112: "Database query was refused.",
11001: "Host not found.",
11002: "Nonauthoritative host not found.",
11003: "This is a nonrecoverable error.",
11004: "Valid name, no data record requested type.",
11005: "QoS receivers.",
11006: "QoS senders.",
11007: "No QoS senders.",
11008: "QoS no receivers.",
11009: "QoS request confirmed.",
11010: "QoS admission error.",
11011: "QoS policy failure.",
11012: "QoS bad style.",
11013: "QoS bad object.",
11014: "QoS traffic control error.",
11015: "QoS generic error.",
11016: "QoS service type error.",
11017: "QoS flowspec error.",
11018: "Invalid QoS provider buffer.",
11019: "Invalid QoS filter style.",
11020: "Invalid QoS filter style.",
11021: "Incorrect QoS filter count.",
11022: "Invalid QoS object length.",
11023: "Incorrect QoS flow count.",
11024: "Unrecognized QoS object.",
11025: "Invalid QoS policy object.",
11026: "Invalid QoS flow descriptor.",
11027: "Invalid QoS provider-specific flowspec.",
11028: "Invalid QoS provider-specific filterspec.",
11029: "Invalid QoS shape discard mode object.",
11030: "Invalid QoS shaping rate object.",
11031: "Reserved policy QoS element type."
}
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
if fileno is None:
if family == -1:
family = AF_INET
if type == -1:
type = SOCK_STREAM
if proto == -1:
proto = 0
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
# getsockname and getpeername may not be available on WASI.
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except (error, AttributeError):
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except (error, AttributeError):
pass
s += '>'
return s
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w', 'b', or a combination of
those.
"""
# XXX refactor to share code?
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
encoding = io.text_encoding(encoding)
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _sendfile_zerocopy(self, zerocopy_func, giveup_exc_type, file,
offset=0, count=None):
"""
Send a file using a zero-copy function.
"""
import selectors
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise giveup_exc_type(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise giveup_exc_type(err) # not a regular file
if not fsize:
return 0 # empty file
# Truncate to 1GiB to avoid OverflowError, see bpo-38319.
blocksize = min(count or fsize, 2 ** 30)
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
try:
while True:
if timeout and not selector_select(timeout):
raise TimeoutError('timed out')
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
try:
sent = zerocopy_func(fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise giveup_exc_type(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
return self._sendfile_zerocopy(
partial(os.sendfile, self.fileno()),
_GiveupOnSendfile,
file, offset, count,
)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "sendmsg"):
def send_fds(sock, buffers, fds, flags=0, address=None):
""" send_fds(sock, buffers, fds[, flags[, address]]) -> integer
Send the list of file descriptors fds over an AF_UNIX socket.
"""
import array
return sock.sendmsg(buffers, [(_socket.SOL_SOCKET,
_socket.SCM_RIGHTS, array.array("i", fds))])
__all__.append("send_fds")
if hasattr(_socket.socket, "recvmsg"):
def recv_fds(sock, bufsize, maxfds, flags=0):
""" recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file
descriptors, msg_flags, address)
Receive up to maxfds file descriptors returning the message
data and a list containing the descriptors.
"""
import array
# Array of ints
fds = array.array("i")
msg, ancdata, flags, addr = sock.recvmsg(bufsize,
_socket.CMSG_LEN(maxfds * fds.itemsize))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS):
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return msg, list(fds), flags, addr
__all__.append("recv_fds")
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
# This is used if _socket doesn't natively provide socketpair. It's
# always defined so that it can be patched in for testing purposes.
def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
# Authenticating avoids using a connection from something else
# able to connect to {host}:{port} instead of us.
# We expect only AF_INET and AF_INET6 families.
try:
if (
ssock.getsockname() != csock.getpeername()
or csock.getsockname() != ssock.getpeername()
):
raise ConnectionError("Unexpected peer connection")
except:
# getsockname() and getpeername() can fail
# if either socket isn't connected.
ssock.close()
csock.close()
raise
return (ssock, csock)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
socketpair = _fallback_socketpair
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.errno in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.errno in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available and `name`
was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::',
hostname from gethostname() is returned.
"""
name = name.strip()
if not name or name in ('0.0.0.0', '::'):
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, all_errors=False):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default. When a connection
cannot be created, raises the last error if *all_errors* is False,
and an ExceptionGroup of all errors if *all_errors* is True.
"""
host, port = address
exceptions = []
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
# Break explicitly a reference cycle
exceptions.clear()
return sock
except error as exc:
if not all_errors:
exceptions.clear() # raise only the last error
exceptions.append(exc)
if sock is not None:
sock.close()
if len(exceptions):
try:
if not all_errors:
raise exceptions[0]
raise ExceptionGroup("create_connection failed", exceptions)
finally:
# Break explicitly a reference cycle
exceptions.clear()
else:
raise error("getaddrinfo returns an empty list")
def has_dualstack_ipv6():
"""Return True if the platform supports creating a SOCK_STREAM socket
which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections.
"""
if not has_ipv6 \
or not hasattr(_socket, 'IPPROTO_IPV6') \
or not hasattr(_socket, 'IPV6_V6ONLY'):
return False
try:
with socket(AF_INET6, SOCK_STREAM) as sock:
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
return True
except error:
return False
def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False,
dualstack_ipv6=False):
"""Convenience function which creates a SOCK_STREAM type socket
bound to *address* (a 2-tuple (host, port)) and return the socket
object.
*family* should be either AF_INET or AF_INET6.
*backlog* is the queue size passed to socket.listen().
*reuse_port* dictates whether to use the SO_REUSEPORT socket option.
*dualstack_ipv6*: if true and the platform supports it, it will
create an AF_INET6 socket able to accept both IPv4 or IPv6
connections. When false it will explicitly disable this option on
platforms that enable it by default (e.g. Linux).
>>> with create_server(('', 8000)) as server:
... while True:
... conn, addr = server.accept()
... # handle new connection
"""
if reuse_port and not hasattr(_socket, "SO_REUSEPORT"):
raise ValueError("SO_REUSEPORT not supported on this platform")
if dualstack_ipv6:
if not has_dualstack_ipv6():
raise ValueError("dualstack_ipv6 not supported on this platform")
if family != AF_INET6:
raise ValueError("dualstack_ipv6 requires AF_INET6 family")
sock = socket(family, SOCK_STREAM)
try:
# Note about Windows. We don't set SO_REUSEADDR because:
# 1) It's unnecessary: bind() will succeed even in case of a
# previous closed socket on the same address and still in
# TIME_WAIT state.
# 2) If set, another socket is free to bind() on the same
# address, effectively preventing this one from accepting
# connections. Also, it may set the process in a state where
# it'll no longer respond to any signals or graceful kills.
# See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
if os.name not in ('nt', 'cygwin') and \
hasattr(_socket, 'SO_REUSEADDR'):
try:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
except error:
# Fail later on bind(), for platforms which may not
# support this option.
pass
# Since Linux 6.12.9, SO_REUSEPORT is not allowed
# on other address families than AF_INET/AF_INET6.
if reuse_port and family in (AF_INET, AF_INET6):
sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
if has_ipv6 and family == AF_INET6:
if dualstack_ipv6:
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
elif hasattr(_socket, "IPV6_V6ONLY") and \
hasattr(_socket, "IPPROTO_IPV6"):
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1)
try:
sock.bind(address)
except error as err:
msg = '%s (while attempting to bind on address %r)' % \
(err.strerror, address)
raise error(err.errno, msg) from None
if backlog is None:
sock.listen()
else:
sock.listen(backlog)
return sock
except error:
sock.close()
raise
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist | python | github | https://github.com/python/cpython | Lib/socket.py |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/access-controllers/access-controllers.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Generic Domain Access Controllers
maintainers:
- Oleksii Moisieiev <oleksii_moisieiev@epam.com>
description: |+
Common access controllers properties
Access controllers are in charge of stating which of the hardware blocks under
their responsibility (their domain) can be accesssed by which compartment. A
compartment can be a cluster of CPUs (or coprocessors), a range of addresses
or a group of hardware blocks. An access controller's domain is the set of
resources covered by the access controller.
This device tree binding can be used to bind devices to their access
controller provided by access-controllers property. In this case, the device
is a consumer and the access controller is the provider.
An access controller can be represented by any node in the device tree and
can provide one or more configuration parameters, needed to control parameters
of the consumer device. A consumer node can refer to the provider by phandle
and a set of phandle arguments, specified by '#access-controller-cells'
property in the access controller node.
Access controllers are typically used to set/read the permissions of a
hardware block and grant access to it. Any of which depends on the access
controller. The capabilities of each access controller are defined by the
binding of the access controller device.
Each node can be a consumer for the several access controllers.
# always select the core schema
select: true
properties:
"#access-controller-cells":
description:
Number of cells in an access-controllers specifier;
Can be any value as specified by device tree binding documentation
of a particular provider. The node is an access controller.
access-controller-names:
$ref: /schemas/types.yaml#/definitions/string-array
description:
A list of access-controllers names, sorted in the same order as
access-controllers entries. Consumer drivers will use
access-controller-names to match with existing access-controllers entries.
access-controllers:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
A list of access controller specifiers, as defined by the
bindings of the access-controllers provider.
additionalProperties: true
examples:
- |
clock_controller: access-controllers@50000 {
reg = <0x50000 0x400>;
#access-controller-cells = <2>;
};
bus_controller: bus@60000 {
reg = <0x60000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
#access-controller-cells = <3>;
uart4: serial@60100 {
reg = <0x60100 0x400>;
clocks = <&clk_serial>;
access-controllers = <&clock_controller 1 2>,
<&bus_controller 1 3 5>;
access-controller-names = "clock", "bus";
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/access-controllers/access-controllers.yaml |
#!/usr/bin/python
# -*- coding: utf8 -*-
# lt_LT
################################################################################
#
# Lithuanian language support assembled from contributions provided by:
# Paulius Sladkevičius
#
################################################################################
from openerp.addons.report_aeroo.ctt_objects import ctt_language
class lt_LT(ctt_language):
def _init_lang(self):
self.name = 'lt_LT'
# digits - masculine, singular
self.number_sng_msc = [u'nulis', u'vienas', u'du', u'trys', u'keturi',
u'penkti', u'šeši', u'septyni', u'aštuoni',
u'devyni']
# tens - masculine, singular
self.tens_sng_msc = [u'nulis', u'vienas', u'dvi', u'tris',
u'keturias', u'penkias', u'šešias', u'septynias',
u'aštuonias', u'devynias']
# teens - masculine
self.teens = [u'dešimt', u'vienuolika', u'dvylika', u'trylika',
u'keturiolika', u'penkiolika', u'šešiolika',
u'septyniolika', u'aštuonolika', u'devyniolika']
# multiplier - masculine, singular
self.multi_sng_msc = [u' šimtas', u' tūkstantis', u' milijonas',
u' milijardas']
# multiplier - masculine, plural
self.multi_plr_msc = [u' šimtai', u' tūkstančiai', u' milijonai',
u' milijardai']
# multiplier - masculine, plural (other form)
self.multi_plr_msc_2 = [u' šimtų', u' tūkstančių', u' milijonų',
u' milijardų']
# next line is needed for correct loading of currencies
import currencies
return currencies
def wordify(self, chunk, chunknr, gender):
if gender == 'm':
number = self.number_sng_msc
elif gender == 'f':
number = self.number_sng_fem
elif gender == 'n':
number = self.number_sng_neu
words = u''
digit1 = u''
digit2 = u''
digit3 = u''
chunklength = len(chunk)
# placing digits in right places
if chunklength == 1:
digit3 = chunk[0 : 1]
if chunklength == 2:
digit2 = chunk[0 : 1]
digit3 = chunk[1 : 2]
if chunklength == 3:
digit1 = chunk[0 : 1]
digit2 = chunk[1 : 2]
digit3 = chunk[-1]
# processing zero
if chunklength == 1 and digit3 == '0' :
return number[0]
# processing hundreds
if chunklength == 3 :
if digit1 == '1' :
words += self.multi_sng_msc[0]
else :
if int(digit1) > 1 : words += number[int(digit1)] + \
self.multi_plr_msc[0]
# processing tens
if chunklength > 1:
spacer = ''
if len(words) > 0 : spacer = ' '
if digit2 == '1':
words += spacer + self.number_teens[int(digit3)]
else:
if int(digit2) > 1 and int(digit2) > 0:
words += spacer + self.tens_sng_msc[int(digit2)] + u'dešimt'
# processing ones
if chunklength > 0 and digit2 != '1' :
spacer = ''
if len(words) > 0: spacer = u' '
if int(digit3) > 0:
words += spacer + number[int(digit3)]
# end processing
if len(words) > 0 :
if digit3 == '1' and chunknr > 0:
return words + self.multi_sng_msc[chunknr]
elif digit3 != '1' and chunknr > 0:
if chunklength >= 2 and ((int(chunk) % 10) == 0 or (digit2 == \
'1' and int(digit3) > 0)):
return words + multi_plr_msc_2[chunknr]
else:
return words + multi_plr_msc[chunknr]
else:
return words
else:
return ''
lt_LT() | unknown | codeparrot/codeparrot-clean | ||
// boost/filesystem/directory.hpp ---------------------------------------------------//
// Copyright Beman Dawes 2002-2009
// Copyright Jan Langer 2002
// Copyright Dietmar Kuehl 2001
// Copyright Vladimir Prus 2002
// Copyright Andrey Semashev 2019, 2022
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
// Library home page: http://www.boost.org/libs/filesystem
//--------------------------------------------------------------------------------------//
#ifndef BOOST_FILESYSTEM_DIRECTORY_HPP
#define BOOST_FILESYSTEM_DIRECTORY_HPP
#include <boost/filesystem/config.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/filesystem/file_status.hpp>
#include <boost/filesystem/detail/path_traits.hpp>
#include <cstddef>
#include <string>
#include <vector>
#include <boost/assert.hpp>
#include <boost/detail/bitmask.hpp>
#include <boost/system/error_code.hpp>
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/iterator/iterator_categories.hpp>
#include <boost/filesystem/detail/header.hpp> // must be the last #include
//--------------------------------------------------------------------------------------//
namespace boost {
namespace filesystem {
enum class directory_options : unsigned int
{
none = 0u,
skip_permission_denied = 1u, // if a directory cannot be opened because of insufficient permissions, pretend that the directory is empty
follow_directory_symlink = 1u << 1u, // recursive_directory_iterator: follow directory symlinks
skip_dangling_symlinks = 1u << 2u, // non-standard extension for recursive_directory_iterator: don't follow dangling directory symlinks,
pop_on_error = 1u << 3u, // non-standard extension for recursive_directory_iterator: instead of producing an end iterator on errors,
// repeatedly invoke pop() until it succeeds or the iterator becomes equal to end iterator
_detail_no_follow = 1u << 4u, // internal use only
_detail_no_push = 1u << 5u // internal use only
};
BOOST_BITMASK(directory_options)
class directory_iterator;
class recursive_directory_iterator;
namespace detail {
struct directory_iterator_params;
BOOST_FILESYSTEM_DECL void directory_iterator_construct(directory_iterator& it, path const& p, directory_options opts, directory_iterator_params* params, system::error_code* ec);
BOOST_FILESYSTEM_DECL void directory_iterator_increment(directory_iterator& it, system::error_code* ec);
struct recur_dir_itr_imp;
BOOST_FILESYSTEM_DECL void recursive_directory_iterator_construct(recursive_directory_iterator& it, path const& dir_path, directory_options opts, system::error_code* ec);
BOOST_FILESYSTEM_DECL void recursive_directory_iterator_increment(recursive_directory_iterator& it, system::error_code* ec);
BOOST_FILESYSTEM_DECL void recursive_directory_iterator_pop(recursive_directory_iterator& it, system::error_code* ec);
} // namespace detail
//--------------------------------------------------------------------------------------//
// //
// directory_entry //
// //
//--------------------------------------------------------------------------------------//
// GCC has a problem with a member function named path within a namespace or
// sub-namespace that also has a class named path. The workaround is to always
// fully qualify the name path when it refers to the class name.
class directory_entry
{
friend BOOST_FILESYSTEM_DECL void detail::directory_iterator_construct(directory_iterator& it, path const& p, directory_options opts, detail::directory_iterator_params* params, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::directory_iterator_increment(directory_iterator& it, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::recursive_directory_iterator_increment(recursive_directory_iterator& it, system::error_code* ec);
public:
typedef boost::filesystem::path::value_type value_type; // enables class path ctor taking directory_entry
directory_entry() noexcept {}
explicit directory_entry(boost::filesystem::path const& p);
#if BOOST_FILESYSTEM_VERSION >= 4
directory_entry(boost::filesystem::path const& p, system::error_code& ec) :
m_path(p)
{
refresh_impl(&ec);
if (ec)
m_path.clear();
}
#else
directory_entry(boost::filesystem::path const& p, file_status st, file_status symlink_st = file_status()) :
m_path(p), m_status(st), m_symlink_status(symlink_st)
{
}
#endif
directory_entry(directory_entry const& rhs) :
m_path(rhs.m_path), m_status(rhs.m_status), m_symlink_status(rhs.m_symlink_status)
{
}
directory_entry& operator=(directory_entry const& rhs)
{
m_path = rhs.m_path;
m_status = rhs.m_status;
m_symlink_status = rhs.m_symlink_status;
return *this;
}
directory_entry(directory_entry&& rhs) noexcept :
m_path(static_cast< boost::filesystem::path&& >(rhs.m_path)),
m_status(static_cast< file_status&& >(rhs.m_status)),
m_symlink_status(static_cast< file_status&& >(rhs.m_symlink_status))
{
}
directory_entry& operator=(directory_entry&& rhs) noexcept
{
m_path = static_cast< boost::filesystem::path&& >(rhs.m_path);
m_status = static_cast< file_status&& >(rhs.m_status);
m_symlink_status = static_cast< file_status&& >(rhs.m_symlink_status);
return *this;
}
void assign(boost::filesystem::path&& p);
#if BOOST_FILESYSTEM_VERSION >= 4
void assign(boost::filesystem::path&& p, system::error_code& ec)
{
m_path = static_cast< boost::filesystem::path&& >(p);
refresh_impl(&ec);
}
#else
void assign(boost::filesystem::path&& p, file_status st, file_status symlink_st = file_status())
{
assign_with_status(static_cast< boost::filesystem::path&& >(p), st, symlink_st);
}
#endif
void assign(boost::filesystem::path const& p);
#if BOOST_FILESYSTEM_VERSION >= 4
void assign(boost::filesystem::path const& p, system::error_code& ec)
{
m_path = p;
refresh_impl(&ec);
}
#else
void assign(boost::filesystem::path const& p, file_status st, file_status symlink_st = file_status())
{
assign_with_status(p, st, symlink_st);
}
#endif
void replace_filename(boost::filesystem::path const& p);
#if BOOST_FILESYSTEM_VERSION >= 4
void replace_filename(boost::filesystem::path const& p, system::error_code& ec)
{
m_path.replace_filename(p);
refresh_impl(&ec);
}
#else
void replace_filename(boost::filesystem::path const& p, file_status st, file_status symlink_st = file_status())
{
replace_filename_with_status(p, st, symlink_st);
}
BOOST_FILESYSTEM_DETAIL_DEPRECATED("Use directory_entry::replace_filename() instead")
void replace_leaf(boost::filesystem::path const& p, file_status st, file_status symlink_st)
{
replace_filename_with_status(p, st, symlink_st);
}
#endif
boost::filesystem::path const& path() const noexcept { return m_path; }
operator boost::filesystem::path const&() const noexcept { return m_path; }
void refresh() { refresh_impl(); }
void refresh(system::error_code& ec) noexcept { refresh_impl(&ec); }
file_status status() const
{
if (!filesystem::status_known(m_status))
refresh_impl();
return m_status;
}
file_status status(system::error_code& ec) const noexcept
{
ec.clear();
if (!filesystem::status_known(m_status))
refresh_impl(&ec);
return m_status;
}
file_status symlink_status() const
{
if (!filesystem::status_known(m_symlink_status))
refresh_impl();
return m_symlink_status;
}
file_status symlink_status(system::error_code& ec) const noexcept
{
ec.clear();
if (!filesystem::status_known(m_symlink_status))
refresh_impl(&ec);
return m_symlink_status;
}
filesystem::file_type file_type() const
{
if (!filesystem::type_present(m_status))
refresh_impl();
return m_status.type();
}
filesystem::file_type file_type(system::error_code& ec) const noexcept
{
ec.clear();
if (!filesystem::type_present(m_status))
refresh_impl(&ec);
return m_status.type();
}
filesystem::file_type symlink_file_type() const
{
if (!filesystem::type_present(m_symlink_status))
refresh_impl();
return m_symlink_status.type();
}
filesystem::file_type symlink_file_type(system::error_code& ec) const noexcept
{
ec.clear();
if (!filesystem::type_present(m_symlink_status))
refresh_impl(&ec);
return m_symlink_status.type();
}
bool exists() const
{
filesystem::file_type ft = this->file_type();
return ft != filesystem::status_error && ft != filesystem::file_not_found;
}
bool exists(system::error_code& ec) const noexcept
{
filesystem::file_type ft = this->file_type(ec);
return ft != filesystem::status_error && ft != filesystem::file_not_found;
}
bool is_regular_file() const
{
return this->file_type() == filesystem::regular_file;
}
bool is_regular_file(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::regular_file;
}
bool is_directory() const
{
return this->file_type() == filesystem::directory_file;
}
bool is_directory(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::directory_file;
}
bool is_symlink() const
{
return this->symlink_file_type() == filesystem::symlink_file;
}
bool is_symlink(system::error_code& ec) const noexcept
{
return this->symlink_file_type(ec) == filesystem::symlink_file;
}
bool is_block_file() const
{
return this->file_type() == filesystem::block_file;
}
bool is_block_file(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::block_file;
}
bool is_character_file() const
{
return this->file_type() == filesystem::character_file;
}
bool is_character_file(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::character_file;
}
bool is_fifo() const
{
return this->file_type() == filesystem::fifo_file;
}
bool is_fifo(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::fifo_file;
}
bool is_socket() const
{
return this->file_type() == filesystem::socket_file;
}
bool is_socket(system::error_code& ec) const noexcept
{
return this->file_type(ec) == filesystem::socket_file;
}
bool is_reparse_file() const
{
return this->symlink_file_type() == filesystem::reparse_file;
}
bool is_reparse_file(system::error_code& ec) const noexcept
{
return this->symlink_file_type(ec) == filesystem::reparse_file;
}
bool is_other() const
{
filesystem::file_type ft = this->file_type();
return ft != filesystem::status_error && ft != filesystem::file_not_found &&
ft != filesystem::regular_file && ft != filesystem::directory_file;
}
bool is_other(system::error_code& ec) const noexcept
{
filesystem::file_type ft = this->file_type(ec);
return ft != filesystem::status_error && ft != filesystem::file_not_found &&
ft != filesystem::regular_file && ft != filesystem::directory_file;
}
bool operator==(directory_entry const& rhs) const { return m_path == rhs.m_path; }
bool operator!=(directory_entry const& rhs) const { return m_path != rhs.m_path; }
bool operator<(directory_entry const& rhs) const { return m_path < rhs.m_path; }
bool operator<=(directory_entry const& rhs) const { return m_path <= rhs.m_path; }
bool operator>(directory_entry const& rhs) const { return m_path > rhs.m_path; }
bool operator>=(directory_entry const& rhs) const { return m_path >= rhs.m_path; }
private:
BOOST_FILESYSTEM_DECL void refresh_impl(system::error_code* ec = nullptr) const;
void assign_with_status(boost::filesystem::path&& p, file_status st, file_status symlink_st)
{
m_path = static_cast< boost::filesystem::path&& >(p);
m_status = static_cast< file_status&& >(st);
m_symlink_status = static_cast< file_status&& >(symlink_st);
}
void assign_with_status(boost::filesystem::path const& p, file_status st, file_status symlink_st)
{
m_path = p;
m_status = static_cast< file_status&& >(st);
m_symlink_status = static_cast< file_status&& >(symlink_st);
}
void replace_filename_with_status(boost::filesystem::path const& p, file_status st, file_status symlink_st)
{
m_path.replace_filename(p);
m_status = static_cast< file_status&& >(st);
m_symlink_status = static_cast< file_status&& >(symlink_st);
}
private:
boost::filesystem::path m_path;
mutable file_status m_status; // stat()-like
mutable file_status m_symlink_status; // lstat()-like
};
#if !defined(BOOST_FILESYSTEM_SOURCE)
inline directory_entry::directory_entry(boost::filesystem::path const& p) :
m_path(p)
{
#if BOOST_FILESYSTEM_VERSION >= 4
refresh_impl();
#endif
}
inline void directory_entry::assign(boost::filesystem::path&& p)
{
m_path = static_cast< boost::filesystem::path&& >(p);
#if BOOST_FILESYSTEM_VERSION >= 4
refresh_impl();
#else
m_status = file_status();
m_symlink_status = file_status();
#endif
}
inline void directory_entry::assign(boost::filesystem::path const& p)
{
m_path = p;
#if BOOST_FILESYSTEM_VERSION >= 4
refresh_impl();
#else
m_status = file_status();
m_symlink_status = file_status();
#endif
}
inline void directory_entry::replace_filename(boost::filesystem::path const& p)
{
m_path.replace_filename(p);
#if BOOST_FILESYSTEM_VERSION >= 4
refresh_impl();
#else
m_status = file_status();
m_symlink_status = file_status();
#endif
}
#endif // !defined(BOOST_FILESYSTEM_SOURCE)
namespace detail {
namespace path_traits {
// Dispatch function for integration with path class
template< typename Callback >
BOOST_FORCEINLINE typename Callback::result_type dispatch(directory_entry const& de, Callback cb, const codecvt_type* cvt, directory_entry_tag)
{
boost::filesystem::path::string_type const& source = de.path().native();
return cb(source.data(), source.data() + source.size(), cvt);
}
} // namespace path_traits
} // namespace detail
//--------------------------------------------------------------------------------------//
// //
// directory_entry overloads //
// //
//--------------------------------------------------------------------------------------//
// Without these functions, calling (for example) 'is_directory' with a 'directory_entry' results in:
// - a conversion to 'path' using 'operator boost::filesystem::path const&()',
// - then a call to 'is_directory(path const& p)' which recomputes the status with 'detail::status(p)'.
//
// These functions avoid a costly recomputation of the status if one calls 'is_directory(e)' instead of 'is_directory(e.status())'
inline file_status status(directory_entry const& e)
{
return e.status();
}
inline file_status status(directory_entry const& e, system::error_code& ec) noexcept
{
return e.status(ec);
}
inline file_status symlink_status(directory_entry const& e)
{
return e.symlink_status();
}
inline file_status symlink_status(directory_entry const& e, system::error_code& ec) noexcept
{
return e.symlink_status(ec);
}
inline bool type_present(directory_entry const& e)
{
return e.file_type() != filesystem::status_error;
}
inline bool type_present(directory_entry const& e, system::error_code& ec) noexcept
{
return e.file_type(ec) != filesystem::status_error;
}
inline bool status_known(directory_entry const& e)
{
return filesystem::status_known(e.status());
}
inline bool status_known(directory_entry const& e, system::error_code& ec) noexcept
{
return filesystem::status_known(e.status(ec));
}
inline bool exists(directory_entry const& e)
{
return e.exists();
}
inline bool exists(directory_entry const& e, system::error_code& ec) noexcept
{
return e.exists(ec);
}
inline bool is_regular_file(directory_entry const& e)
{
return e.is_regular_file();
}
inline bool is_regular_file(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_regular_file(ec);
}
inline bool is_directory(directory_entry const& e)
{
return e.is_directory();
}
inline bool is_directory(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_directory(ec);
}
inline bool is_symlink(directory_entry const& e)
{
return e.is_symlink();
}
inline bool is_symlink(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_symlink(ec);
}
inline bool is_block_file(directory_entry const& e)
{
return e.is_block_file();
}
inline bool is_block_file(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_block_file(ec);
}
inline bool is_character_file(directory_entry const& e)
{
return e.is_character_file();
}
inline bool is_character_file(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_character_file(ec);
}
inline bool is_fifo(directory_entry const& e)
{
return e.is_fifo();
}
inline bool is_fifo(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_fifo(ec);
}
inline bool is_socket(directory_entry const& e)
{
return e.is_socket();
}
inline bool is_socket(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_socket(ec);
}
inline bool is_reparse_file(directory_entry const& e)
{
return e.is_reparse_file();
}
inline bool is_reparse_file(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_reparse_file(ec);
}
inline bool is_other(directory_entry const& e)
{
return e.is_other();
}
inline bool is_other(directory_entry const& e, system::error_code& ec) noexcept
{
return e.is_other(ec);
}
//--------------------------------------------------------------------------------------//
// //
// directory_iterator helpers //
// //
//--------------------------------------------------------------------------------------//
namespace detail {
struct dir_itr_imp :
public boost::intrusive_ref_counter< dir_itr_imp >
{
#ifdef BOOST_WINDOWS_API
bool close_handle;
unsigned char extra_data_format;
std::size_t current_offset;
#endif
directory_entry dir_entry;
void* handle;
dir_itr_imp() noexcept :
#ifdef BOOST_WINDOWS_API
close_handle(false),
extra_data_format(0u),
current_offset(0u),
#endif
handle(nullptr)
{
}
BOOST_FILESYSTEM_DECL ~dir_itr_imp() noexcept;
BOOST_FILESYSTEM_DECL static void* operator new(std::size_t class_size, std::size_t extra_size) noexcept;
BOOST_FILESYSTEM_DECL static void operator delete(void* p, std::size_t extra_size) noexcept;
BOOST_FILESYSTEM_DECL static void operator delete(void* p) noexcept;
};
} // namespace detail
//--------------------------------------------------------------------------------------//
// //
// directory_iterator //
// //
//--------------------------------------------------------------------------------------//
class directory_iterator :
public boost::iterator_facade<
directory_iterator,
directory_entry,
boost::single_pass_traversal_tag
>
{
friend class boost::iterator_core_access;
friend BOOST_FILESYSTEM_DECL void detail::directory_iterator_construct(directory_iterator& it, path const& p, directory_options opts, detail::directory_iterator_params* params, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::directory_iterator_increment(directory_iterator& it, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::recursive_directory_iterator_increment(recursive_directory_iterator& it, system::error_code* ec);
public:
directory_iterator() noexcept {} // creates the "end" iterator
// iterator_facade derived classes don't seem to like implementations in
// separate translation unit dll's, so forward to detail functions
explicit directory_iterator(path const& p, directory_options opts = directory_options::none)
{
detail::directory_iterator_construct(*this, p, opts, nullptr, nullptr);
}
directory_iterator(path const& p, system::error_code& ec) noexcept
{
detail::directory_iterator_construct(*this, p, directory_options::none, nullptr, &ec);
}
directory_iterator(path const& p, directory_options opts, system::error_code& ec) noexcept
{
detail::directory_iterator_construct(*this, p, opts, nullptr, &ec);
}
directory_iterator(directory_iterator const&) = default;
directory_iterator& operator=(directory_iterator const&) = default;
directory_iterator(directory_iterator&& that) noexcept :
m_imp(static_cast< boost::intrusive_ptr< detail::dir_itr_imp >&& >(that.m_imp))
{
}
directory_iterator& operator=(directory_iterator&& that) noexcept
{
m_imp = static_cast< boost::intrusive_ptr< detail::dir_itr_imp >&& >(that.m_imp);
return *this;
}
directory_iterator& increment(system::error_code& ec) noexcept
{
detail::directory_iterator_increment(*this, &ec);
return *this;
}
private:
boost::iterator_facade<
directory_iterator,
directory_entry,
boost::single_pass_traversal_tag
>::reference dereference() const
{
BOOST_ASSERT_MSG(!is_end(), "attempt to dereference end directory iterator");
return m_imp->dir_entry;
}
void increment() { detail::directory_iterator_increment(*this, nullptr); }
bool equal(directory_iterator const& rhs) const noexcept
{
return m_imp == rhs.m_imp || (is_end() && rhs.is_end());
}
bool is_end() const noexcept
{
// Note: The check for handle is needed because the iterator can be copied and the copy
// can be incremented to end while the original iterator still refers to the same dir_itr_imp.
return !m_imp || !m_imp->handle;
}
private:
// intrusive_ptr provides the shallow-copy semantics required for single pass iterators
// (i.e. InputIterators). The end iterator is indicated by is_end().
boost::intrusive_ptr< detail::dir_itr_imp > m_imp;
};
// enable directory_iterator C++11 range-based for statement use --------------------//
// begin() and end() are only used by a range-based for statement in the context of
// auto - thus the top-level const is stripped - so returning const is harmless and
// emphasizes begin() is just a pass through.
inline directory_iterator const& begin(directory_iterator const& iter) noexcept
{
return iter;
}
inline directory_iterator end(directory_iterator const&) noexcept
{
return directory_iterator();
}
// enable C++14 generic accessors for range const iterators
inline directory_iterator const& cbegin(directory_iterator const& iter) noexcept
{
return iter;
}
inline directory_iterator cend(directory_iterator const&) noexcept
{
return directory_iterator();
}
// enable directory_iterator BOOST_FOREACH -----------------------------------------//
inline directory_iterator& range_begin(directory_iterator& iter) noexcept
{
return iter;
}
inline directory_iterator range_begin(directory_iterator const& iter) noexcept
{
return iter;
}
inline directory_iterator range_end(directory_iterator&) noexcept
{
return directory_iterator();
}
inline directory_iterator range_end(directory_iterator const&) noexcept
{
return directory_iterator();
}
} // namespace filesystem
// namespace boost template specializations
template< typename C, typename Enabler >
struct range_mutable_iterator;
template<>
struct range_mutable_iterator< boost::filesystem::directory_iterator, void >
{
typedef boost::filesystem::directory_iterator type;
};
template< typename C, typename Enabler >
struct range_const_iterator;
template<>
struct range_const_iterator< boost::filesystem::directory_iterator, void >
{
typedef boost::filesystem::directory_iterator type;
};
namespace filesystem {
//--------------------------------------------------------------------------------------//
// //
// recursive_directory_iterator helpers //
// //
//--------------------------------------------------------------------------------------//
namespace detail {
struct recur_dir_itr_imp :
public boost::intrusive_ref_counter< recur_dir_itr_imp >
{
typedef directory_iterator element_type;
std::vector< element_type > m_stack;
directory_options m_options;
explicit recur_dir_itr_imp(directory_options opts) noexcept : m_options(opts) {}
};
} // namespace detail
//--------------------------------------------------------------------------------------//
// //
// recursive_directory_iterator //
// //
//--------------------------------------------------------------------------------------//
class recursive_directory_iterator :
public boost::iterator_facade<
recursive_directory_iterator,
directory_entry,
boost::single_pass_traversal_tag
>
{
friend class boost::iterator_core_access;
friend BOOST_FILESYSTEM_DECL void detail::recursive_directory_iterator_construct(recursive_directory_iterator& it, path const& dir_path, directory_options opts, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::recursive_directory_iterator_increment(recursive_directory_iterator& it, system::error_code* ec);
friend BOOST_FILESYSTEM_DECL void detail::recursive_directory_iterator_pop(recursive_directory_iterator& it, system::error_code* ec);
public:
recursive_directory_iterator() noexcept {} // creates the "end" iterator
explicit recursive_directory_iterator(path const& dir_path)
{
detail::recursive_directory_iterator_construct(*this, dir_path, directory_options::none, nullptr);
}
recursive_directory_iterator(path const& dir_path, system::error_code& ec)
{
detail::recursive_directory_iterator_construct(*this, dir_path, directory_options::none, &ec);
}
recursive_directory_iterator(path const& dir_path, directory_options opts)
{
detail::recursive_directory_iterator_construct(*this, dir_path, opts, nullptr);
}
recursive_directory_iterator(path const& dir_path, directory_options opts, system::error_code& ec)
{
detail::recursive_directory_iterator_construct(*this, dir_path, opts, &ec);
}
recursive_directory_iterator(recursive_directory_iterator const&) = default;
recursive_directory_iterator& operator=(recursive_directory_iterator const&) = default;
recursive_directory_iterator(recursive_directory_iterator&& that) noexcept :
m_imp(static_cast< boost::intrusive_ptr< detail::recur_dir_itr_imp >&& >(that.m_imp))
{
}
recursive_directory_iterator& operator=(recursive_directory_iterator&& that) noexcept
{
m_imp = static_cast< boost::intrusive_ptr< detail::recur_dir_itr_imp >&& >(that.m_imp);
return *this;
}
recursive_directory_iterator& increment(system::error_code& ec) noexcept
{
detail::recursive_directory_iterator_increment(*this, &ec);
return *this;
}
int depth() const noexcept
{
BOOST_ASSERT_MSG(!is_end(), "depth() on end recursive_directory_iterator");
return static_cast< int >(m_imp->m_stack.size() - 1u);
}
bool recursion_pending() const noexcept
{
BOOST_ASSERT_MSG(!is_end(), "recursion_pending() on end recursive_directory_iterator");
return (m_imp->m_options & directory_options::_detail_no_push) == directory_options::none;
}
void pop()
{
detail::recursive_directory_iterator_pop(*this, nullptr);
}
void pop(system::error_code& ec) noexcept
{
detail::recursive_directory_iterator_pop(*this, &ec);
}
void disable_recursion_pending(bool value = true) noexcept
{
BOOST_ASSERT_MSG(!is_end(), "disable_recursion_pending() on end recursive_directory_iterator");
if (value)
m_imp->m_options |= directory_options::_detail_no_push;
else
m_imp->m_options &= ~directory_options::_detail_no_push;
}
file_status status() const
{
BOOST_ASSERT_MSG(!is_end(), "status() on end recursive_directory_iterator");
return m_imp->m_stack.back()->status();
}
file_status symlink_status() const
{
BOOST_ASSERT_MSG(!is_end(), "symlink_status() on end recursive_directory_iterator");
return m_imp->m_stack.back()->symlink_status();
}
private:
boost::iterator_facade<
recursive_directory_iterator,
directory_entry,
boost::single_pass_traversal_tag
>::reference dereference() const
{
BOOST_ASSERT_MSG(!is_end(), "dereference of end recursive_directory_iterator");
return *m_imp->m_stack.back();
}
void increment() { detail::recursive_directory_iterator_increment(*this, nullptr); }
bool equal(recursive_directory_iterator const& rhs) const noexcept
{
return m_imp == rhs.m_imp || (is_end() && rhs.is_end());
}
bool is_end() const noexcept
{
// Note: The check for m_stack.empty() is needed because the iterator can be copied and the copy
// can be incremented to end while the original iterator still refers to the same recur_dir_itr_imp.
return !m_imp || m_imp->m_stack.empty();
}
private:
// intrusive_ptr provides the shallow-copy semantics required for single pass iterators
// (i.e. InputIterators). The end iterator is indicated by is_end().
boost::intrusive_ptr< detail::recur_dir_itr_imp > m_imp;
};
// enable recursive directory iterator C++11 range-base for statement use ----------//
// begin() and end() are only used by a range-based for statement in the context of
// auto - thus the top-level const is stripped - so returning const is harmless and
// emphasizes begin() is just a pass through.
inline recursive_directory_iterator const& begin(recursive_directory_iterator const& iter) noexcept
{
return iter;
}
inline recursive_directory_iterator end(recursive_directory_iterator const&) noexcept
{
return recursive_directory_iterator();
}
// enable C++14 generic accessors for range const iterators
inline recursive_directory_iterator const& cbegin(recursive_directory_iterator const& iter) noexcept
{
return iter;
}
inline recursive_directory_iterator cend(recursive_directory_iterator const&) noexcept
{
return recursive_directory_iterator();
}
// enable recursive directory iterator BOOST_FOREACH -------------------------------//
inline recursive_directory_iterator& range_begin(recursive_directory_iterator& iter) noexcept
{
return iter;
}
inline recursive_directory_iterator range_begin(recursive_directory_iterator const& iter) noexcept
{
return iter;
}
inline recursive_directory_iterator range_end(recursive_directory_iterator&) noexcept
{
return recursive_directory_iterator();
}
inline recursive_directory_iterator range_end(recursive_directory_iterator const&) noexcept
{
return recursive_directory_iterator();
}
} // namespace filesystem
// namespace boost template specializations
template<>
struct range_mutable_iterator< boost::filesystem::recursive_directory_iterator, void >
{
typedef boost::filesystem::recursive_directory_iterator type;
};
template<>
struct range_const_iterator< boost::filesystem::recursive_directory_iterator, void >
{
typedef boost::filesystem::recursive_directory_iterator type;
};
} // namespace boost
#include <boost/filesystem/detail/footer.hpp>
#endif // BOOST_FILESYSTEM_DIRECTORY_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/filesystem/directory.hpp |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.nddata import NDData
def test_basic_nddata():
arr = np.zeros((11, 11))
arr[5, 5] = 1
ndd = NDData(arr)
test_kernel = Gaussian2DKernel(1)
result = convolve(ndd, test_kernel)
x, y = np.mgrid[:11, :11]
expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2))
np.testing.assert_allclose(result, expected, atol=1e-6)
resultf = convolve_fft(ndd, test_kernel)
np.testing.assert_allclose(resultf, expected, atol=1e-6)
@pytest.mark.parametrize('convfunc',
[lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True),
lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)])
def test_masked_nddata(convfunc):
arr = np.zeros((11, 11))
arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2
arr[5, 5] = 1.5
ndd_base = NDData(arr)
mask = arr < 0 # this is all False
mask[5, 5] = True
ndd_mask = NDData(arr, mask=mask)
arrnan = arr.copy()
arrnan[5, 5] = np.nan
ndd_nan = NDData(arrnan)
test_kernel = Gaussian2DKernel(1)
result_base = convfunc(ndd_base, test_kernel)
result_nan = convfunc(ndd_nan, test_kernel)
result_mask = convfunc(ndd_mask, test_kernel)
assert np.allclose(result_nan, result_mask)
assert not np.allclose(result_base, result_mask)
assert not np.allclose(result_base, result_nan)
# check to make sure the mask run doesn't talk back to the initial array
assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from itertools import groupby
import mock
import yaml
from nailgun.errors import errors
from nailgun.orchestrator import deployment_graph
from nailgun.orchestrator import graph_configuration
from nailgun.test import base
TASKS = """
- id: pre_deployment_start
type: stage
- id: pre_deployment
type: stage
requires: [pre_deployment_start]
- id: deploy_start
type: stage
requires: [pre_deployment]
- id: deploy_end
type: stage
requires: [deploy_start]
- id: primary-controller
type: group
role: [primary-controller]
required_for: [deploy_end]
requires: [deploy_start]
parameters:
strategy:
type: one_by_one
- id: controller
type: group
role: [controller]
requires: [primary-controller]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
amount: 2
- id: cinder
type: group
role: [cinder]
requires: [controller]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
- id: compute
type: group
role: [compute]
requires: [controller]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
- id: network
type: group
role: [network]
requires: [controller]
required_for: [compute, deploy_end]
parameters:
strategy:
type: parallel
"""
SUBTASKS = """
- id: install_controller
type: puppet
requires: [setup_network]
groups: [controller, primary-controller]
required_for: [deploy_end]
parameters:
puppet_manifests: /etc/puppet/manifests/controller.pp
puppet_modules: /etc/puppet/modules
timeout: 360
- id: setup_network
type: puppet
groups: [controller, primary-controller]
required_for: [deploy_end]
requires: [deploy_start]
parameters:
puppet_manifest: run_setup_network.pp
puppet_modules: /etc/puppet
timeout: 120
- id: setup_anything
requires: [pre_deployment_start]
required_for: [pre_deployment]
type: shell
- id: setup_more_stuff
type: shell
requires_for: [pre_deployment]
requires: [setup_anything]
"""
class TestGraphDependencies(base.BaseTestCase):
def setUp(self):
super(TestGraphDependencies, self).setUp()
self.tasks = yaml.load(TASKS)
self.subtasks = yaml.load(SUBTASKS)
self.graph = deployment_graph.DeploymentGraph()
def test_build_deployment_graph(self):
self.graph.add_tasks(self.tasks)
roles = self.graph.get_groups_subgraph()
topology_by_id = [item['id'] for item in roles.topology]
self.assertEqual(
topology_by_id[:2], ['primary-controller', 'controller'])
network_pos = topology_by_id.index('network')
compute_pos = topology_by_id.index('compute')
cinder_pos = topology_by_id.index('cinder')
controller_pos = topology_by_id.index('controller')
# we dont have constraint on certain order between cinder and network
# therefore there should not be one
self.assertGreater(compute_pos, network_pos)
self.assertGreater(cinder_pos, controller_pos)
def test_subtasks_in_correct_order(self):
self.graph.add_tasks(self.tasks + self.subtasks)
subtasks = self.graph.get_group_tasks('controller')
topology_by_id = [item['id'] for item in subtasks]
self.assertItemsEqual(
topology_by_id,
['setup_network', 'install_controller'])
class TestAddDependenciesToNodes(base.BaseTestCase):
def setUp(self):
super(TestAddDependenciesToNodes, self).setUp()
self.cluster = mock.Mock()
self.cluster.deployment_tasks = yaml.load(TASKS + SUBTASKS)
self.graph = deployment_graph.AstuteGraph(self.cluster)
def test_priority_serilized_correctly_for_all_roles(self):
nodes = [{'uid': '3', 'role': 'primary-controller'},
{'uid': '1', 'role': 'controller'},
{'uid': '2', 'role': 'controller'},
{'uid': '4', 'role': 'controller'},
{'uid': '6', 'role': 'controller'},
{'uid': '7', 'role': 'cinder'},
{'uid': '8', 'role': 'cinder'},
{'uid': '9', 'role': 'network'},
{'uid': '10', 'role': 'compute'}]
self.graph.add_priorities(nodes)
by_priority = defaultdict(list)
for role, group in groupby(nodes, lambda node: node['priority']):
by_priority[role].extend(list(group))
self.assertEqual(
by_priority[100],
[{'uid': '3', 'role': 'primary-controller', 'priority': 100}])
self.assertEqual(
by_priority[200],
[{'uid': '1', 'role': 'controller', 'priority': 200},
{'uid': '2', 'role': 'controller', 'priority': 200}])
self.assertEqual(
by_priority[300],
[{'uid': '4', 'role': 'controller', 'priority': 300},
{'uid': '6', 'role': 'controller', 'priority': 300}])
self.assertEqual(
by_priority[400],
[{'uid': '7', 'role': 'cinder', 'priority': 400},
{'uid': '8', 'role': 'cinder', 'priority': 400},
{'uid': '9', 'role': 'network', 'priority': 400}])
self.assertEqual(
by_priority[500],
[{'uid': '10', 'role': 'compute', 'priority': 500}])
def test_serialize_priority_for_same_node_diff_roles(self):
nodes = [{'uid': '3', 'role': 'primary-controller'},
{'uid': '1', 'role': 'controller'},
{'uid': '2', 'role': 'controller'},
{'uid': '1', 'role': 'cinder'},
{'uid': '4', 'role': 'cinder'},
{'uid': '4', 'role': 'network'}]
self.graph.add_priorities(nodes)
by_uid = defaultdict(list)
for role, group in groupby(nodes, lambda node: node['uid']):
by_uid[role].extend(list(group))
self.assertItemsEqual(
by_uid['3'],
[{'uid': '3', 'role': 'primary-controller', 'priority': 100}])
self.assertItemsEqual(
by_uid['1'],
[{'uid': '1', 'role': 'cinder', 'priority': 300},
{'priority': 200, 'role': 'controller', 'uid': '1'}])
self.assertItemsEqual(
by_uid['2'],
[{'uid': '2', 'role': 'controller', 'priority': 200}])
# cinder and network roles are equal, so the only condition is that
# one of the roles should be deployed first
uid_4_priorities = [i['priority'] for i in by_uid['4']]
self.assertItemsEqual(uid_4_priorities, [300, 400])
class TestLegacyGraphSerialized(base.BaseTestCase):
def setUp(self):
super(TestLegacyGraphSerialized, self).setUp()
self.cluster = mock.Mock()
self.cluster.deployment_tasks = yaml.load(
graph_configuration.DEPLOYMENT_51_60)
self.graph = deployment_graph.AstuteGraph(self.cluster)
def test_serialized_with_tasks_and_priorities(self):
"""Test verifies that priorities and tasks."""
nodes = [{'uid': '3', 'role': 'primary-controller'},
{'uid': '1', 'role': 'controller'},
{'uid': '2', 'role': 'controller'},
{'uid': '7', 'role': 'cinder'},
{'uid': '8', 'role': 'compute'},
{'uid': '9', 'role': 'mongo'},
{'uid': '10', 'role': 'primary-mongo'},
{'uid': '11', 'role': 'ceph-osd'},
{'uid': '12', 'role': 'zabbix-server'}]
self.graph.add_priorities(nodes)
by_priority = defaultdict(list)
for role, group in groupby(nodes, lambda node: node['priority']):
by_priority[role].extend(list(group))
self.assertEqual(by_priority[100][0]['role'], 'zabbix-server')
self.assertEqual(by_priority[200][0]['role'], 'mongo')
self.assertEqual(by_priority[300][0]['role'], 'primary-mongo')
self.assertEqual(by_priority[400][0]['role'], 'primary-controller')
self.assertEqual(by_priority[500][0]['role'], 'controller')
self.assertEqual(by_priority[500][1]['role'], 'controller')
self.assertEqual(
set([i['role'] for i in by_priority[600]]),
set(['compute', 'cinder', 'ceph-osd']))
class TestTasksRemoval(base.BaseTestCase):
def setUp(self):
super(TestTasksRemoval, self).setUp()
self.cluster = mock.Mock()
self.cluster.deployment_tasks = yaml.load(TASKS + SUBTASKS)
self.astute = deployment_graph.AstuteGraph(self.cluster)
def test_only_tasks(self):
self.astute.only_tasks(['setup_network'])
tasks = self.astute.graph.get_group_tasks('controller')
self.assertEqual(len(tasks), 1)
self.assertItemsEqual(tasks[0]['id'], 'setup_network')
def test_full_graph_content(self):
self.astute.only_tasks([])
tasks = self.astute.graph.get_group_tasks('controller')
self.assertEqual(len(tasks), 2)
self.assertItemsEqual(
[t['id'] for t in tasks], ['setup_network', 'install_controller'])
class GroupsTraversalTest(base.BaseTestCase):
GROUPS = ""
def setUp(self):
super(GroupsTraversalTest, self).setUp()
self.cluster = mock.Mock()
self.cluster.deployment_tasks = yaml.load(self.GROUPS)
self.astute = deployment_graph.AstuteGraph(self.cluster)
self.nodes = []
def get_node_by_role(self, role):
return next(n for n in self.nodes if n['role'] == role)
class TestParallelGroupsTraversal(GroupsTraversalTest):
GROUPS = """
- id: a
type: group
role: [a]
parameters:
strategy:
type: parallel
- id: b
type: group
requires: [a]
role: [b]
parameters:
strategy:
type: parallel
- id: c
type: group
requires: [b]
role: [c]
parameters:
strategy:
type: parallel
- id: d
type: group
requires: [c]
role: [d]
parameters:
strategy:
type: parallel
"""
def test_with_all_nodes_present(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '1', 'role': 'b'},
{'uid': '2', 'role': 'c'},
{'uid': '4', 'role': 'd'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('b')['priority'], 200)
self.assertEqual(self.get_node_by_role('c')['priority'], 300)
self.assertEqual(self.get_node_by_role('d')['priority'], 400)
def test_middle_role_is_not_present(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '1', 'role': 'b'},
{'uid': '2', 'role': 'd'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('b')['priority'], 200)
self.assertEqual(self.get_node_by_role('d')['priority'], 300)
def test_two_middle_roles_is_not_present(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '2', 'role': 'd'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('d')['priority'], 200)
class TestMixedGroupsTraversal(GroupsTraversalTest):
GROUPS = """
- id: a
type: group
role: [a]
parameters:
strategy:
type: one_by_one
- id: b
type: group
role: [b]
parameters:
strategy:
type: parallel
- id: c
type: group
requires: [a]
role: [c]
parameters:
strategy:
type: parallel
- id: d
type: group
requires: [c]
role: [d]
parameters:
strategy:
type: parallel
- id: e
type: group
requires: [d, c]
role: [e]
parameters:
strategy:
type: one_by_one
"""
def test_one_by_one_will_be_earlier(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '1', 'role': 'b'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('b')['priority'], 200)
def test_couple_missed_without_last(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '1', 'role': 'c'},
{'uid': '4', 'role': 'd'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('c')['priority'], 200)
self.assertEqual(self.get_node_by_role('d')['priority'], 300)
def test_only_one_by_one(self):
self.nodes = [{'uid': '3', 'role': 'a'},
{'uid': '1', 'role': 'e'}]
self.astute.add_priorities(self.nodes)
self.assertEqual(self.get_node_by_role('a')['priority'], 100)
self.assertEqual(self.get_node_by_role('e')['priority'], 200)
COMPLEX_DEPENDENCIES = """
- id: pre_deployment_start
type: stage
- id: pre_deployment
type: stage
requires: [pre_deployment_start]
- id: deploy_start
type: stage
requires: [pre_deployment]
- id: deploy_end
type: stage
requires: [deploy_start]
- id: post_deployment_start
type: stage
requires: [deploy_end]
- id: post_deployment
type: stage
requires: [post_deployment_start]
- id: pre_a
requires: [pre_deployment_start]
type: shell
- id: pre_b
requires: [pre_a]
type: shell
- id: pre_c
required_for: [pre_deployment]
type: shell
- id: pre_d
required_for: [pre_deployment]
requires: [pre_b]
type: shell
- id: group_a
type: group
requires: [deploy_start]
required_for: [deploy_end]
- id: group_b
type: group
required_for: [deploy_end]
requires: [group_a]
- id: group_c
type: group
required_for: [deploy_end]
requires: [deploy_start]
- id: task_a
groups: [group_a, group_b]
required_for: [deploy_end]
requires: [deploy_start]
type: puppet
- id: task_b
requires: [task_a]
required_for: [deploy_end]
type: puppet
groups: [group_a, group_c]
- id: task_c
requires: [task_a]
type: puppet
required_for: [deploy_end]
groups: [group_a, group_b]
- id: task_d
requires: [task_b, task_c]
type: puppet
groups: [group_b]
required_for: [deploy_end]
- id: post_a
requires: [post_deployment_start]
required_for: [post_deployment]
type: shell
"""
class TestFindGraph(base.BaseTestCase):
def setUp(self):
super(TestFindGraph, self).setUp()
self.tasks = yaml.load(COMPLEX_DEPENDENCIES)
self.graph = deployment_graph.DeploymentGraph()
self.graph.add_tasks(self.tasks)
def test_end_at_pre_deployment(self):
"""Only pre_deployment tasks, groups and stages."""
subgraph = self.graph.find_subgraph(end="pre_deployment")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_c', 'pre_b', 'pre_a',
'pre_deployment', 'pre_deployment_start'])
def test_end_at_task_in_pre_deployment(self):
"""Task pre_d doesnt requires pre_c, but requires pre_b."""
subgraph = self.graph.find_subgraph(end="pre_d")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_b', 'pre_a', 'pre_deployment_start'])
def test_end_at_deploy(self):
"""All tasks should be included because deploy is last node
in this graph.
All tasks from pre_deployment and deploy stage will be added.
post_a not included
"""
subgraph = self.graph.find_subgraph(end="deploy_end")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_c', 'pre_b', 'pre_a', 'deploy_end', 'deploy_start',
'pre_deployment_start',
'pre_deployment', 'group_c', 'group_b', 'group_a', 'task_a',
'task_b', 'task_c', 'task_d'])
def test_end_at_post_deployment(self):
"""All tasks will be included."""
subgraph = self.graph.find_subgraph(end="post_deployment")
self.assertItemsEqual(
subgraph.nodes(),
[t['id'] for t in self.tasks])
def test_end_at_group(self):
"""In general end_at group should be used only when tasks that are
specific for that group, and there is no deps between those groups
In current graph only task_a and task_b will be present, because
there is link between them
"""
subgraph = self.graph.find_subgraph(end="group_c")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_c', 'pre_b', 'pre_a', 'pre_deployment',
'pre_deployment_start', 'deploy_start',
'group_c', 'task_a', 'task_b'])
def test_end_at_task_that_has_two_parents(self):
"""Both parents should be in the graph.
Parents are task_b and task_c, the only absent task is post_a.
"""
subgraph = self.graph.find_subgraph(end="task_d")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_c', 'pre_b', 'pre_a', 'deploy_start',
'pre_deployment_start',
'pre_deployment', 'task_a',
'task_b', 'task_c', 'task_d'])
def test_end_at_first_task(self):
subgraph = self.graph.find_subgraph(end="task_a")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_c', 'pre_b', 'pre_a',
'pre_deployment', 'task_a', 'pre_deployment_start',
'deploy_start'])
def test_start_at_task_a(self):
"""Everything except predeployment tasks will be included."""
subgraph = self.graph.find_subgraph(start="task_a")
self.assertItemsEqual(
subgraph.nodes(),
['deploy_end', 'post_deployment_start', 'group_c', 'group_b',
'group_a',
'task_a', 'task_b', 'task_c', 'task_d', 'post_deployment',
'post_a'])
def test_start_at_pre_deployment(self):
"""Everything except pre_deployment tasks."""
subgraph = self.graph.find_subgraph(start="pre_deployment")
self.assertItemsEqual(
subgraph.nodes(),
['deploy_end', 'pre_deployment', 'group_c', 'group_b', 'group_a',
'task_a', 'task_b', 'task_c', 'task_d', 'post_deployment',
'post_a', 'post_deployment_start', 'deploy_start'])
def test_start_at_post_a(self):
"""Only post_a task."""
subgraph = self.graph.find_subgraph(start="post_a")
self.assertItemsEqual(
subgraph.nodes(),
['post_deployment', 'post_a'])
def test_start_pre_a_end_at_pre_d(self):
"""pre_c will not be included, because this is not a dependency
for pre_d.
"""
subgraph = self.graph.find_subgraph(start="pre_a", end="pre_d")
self.assertItemsEqual(
subgraph.nodes(),
['pre_d', 'pre_b', 'pre_a'])
def test_start_pre_a_end_at_post_a(self):
subgraph = self.graph.find_subgraph(start="pre_a", end="post_a")
self.assertItemsEqual(
subgraph.nodes(),
['deploy_end', 'pre_deployment', 'group_c', 'group_b', 'group_a',
'task_a', 'task_b', 'task_c', 'task_d', 'post_deployment_start',
'post_a', 'pre_d', 'pre_b', 'pre_a', 'deploy_start'])
def test_start_task_a_end_at_task_d(self):
"""All tasks in deploy stage will be included."""
subgraph = self.graph.find_subgraph(start="task_a", end="task_d")
self.assertItemsEqual(
subgraph.nodes(),
['task_a', 'task_b', 'task_c', 'task_d'])
def test_preserve_ordering_when_task_skipped(self):
self.graph.only_tasks(['task_a', 'task_d'])
# we skipped both tasks that are predecessors for task_d
self.assertTrue(self.graph.node['task_b']['skipped'])
self.assertTrue(self.graph.node['task_c']['skipped'])
self.assertEqual(
[t['id'] for t in self.graph.get_group_tasks('group_b')],
['task_a', 'task_d'])
class TestOrdered(base.BaseTestCase):
TASKS = """
- id: a
- id: b
requires: [a]
- id: c
requires: [a]
- id: d
requires: [a]
- id: e
requires: [b,c,d]
- id: f
requires: [e]
"""
def setUp(self):
super(TestOrdered, self).setUp()
self.tasks = yaml.load(self.TASKS)
def test_always_same_order(self):
graph = deployment_graph.DeploymentGraph(tasks=self.tasks)
# (dshulyak) order should be static
self.assertEqual(
[n['id'] for n in graph.topology],
['a', 'b', 'c', 'd', 'e', 'f'])
class TestIncludeSkipped(base.BaseTestCase):
TASKS = """
- id: a
type: puppet
- id: b
requires: [a]
skipped: true
type: shell
- id: c
requires: [b]
type: puppet
"""
def setUp(self):
super(TestIncludeSkipped, self).setUp()
self.tasks = yaml.load(self.TASKS)
self.graph = deployment_graph.DeploymentGraph(tasks=self.tasks)
def test_filter_subgraph_will_not_return_skipped(self):
subgraph = self.graph.filter_subgraph(start='a', end='c')
self.assertItemsEqual(
subgraph.nodes(),
['a', 'c'])
def test_filter_subgraph_will_return_skipped_if_included(self):
subgraph = self.graph.filter_subgraph(
start='a', end='c', include=('b',))
self.assertItemsEqual(
subgraph.nodes(),
[t['id'] for t in self.tasks])
def test_include_task_with_only_tasks_routine(self):
self.graph.only_tasks(['a', 'b', 'c'])
subgraph = self.graph.filter_subgraph(start='a', end='c')
self.assertItemsEqual(
subgraph.nodes(),
[t['id'] for t in self.tasks])
class TestDeploymentGraphValidator(base.BaseTestCase):
def test_validation_pass_with_existing_dependencies(self):
yaml_tasks = """
- id: deploy_end
type: stage
- id: pre_deployment_start
type: stage
- id: test-controller
type: group
role: [test-controller]
requires: [pre_deployment_start]
required_for: [deploy_end]
parameters:
strategy:
type: parallel
amount: 2
"""
tasks = yaml.load(yaml_tasks)
graph_validator = deployment_graph.DeploymentGraphValidator(tasks)
graph_validator.check()
def test_validation_failed_with_not_existing_dependencies(self):
dependencies_types = ['requires', 'required_for', 'groups', 'tasks']
for dependency_type in dependencies_types:
yaml_tasks = """
- id: test-controller
type: group
role: [test-controlle]
{dependency_type}: [non_existing_stage]
parameters:
strategy:
type: one_by_one
""".format(dependency_type=dependency_type)
tasks = yaml.load(yaml_tasks)
graph_validator = deployment_graph.DeploymentGraphValidator(tasks)
with self.assertRaisesRegexp(
errors.InvalidData,
"Tasks 'non_existing_stage' can't be in requires|"
"required_for|groups|tasks for \['test-controller'\] "
"because they don't exist in the graph"):
graph_validator.check()
def test_validation_failed_with_cycling_dependencies(self):
yaml_tasks = """
- id: test-controller-1
type: role
requires: [test-controller-2]
- id: test-controller-2
type: role
requires: [test-controller-1]
"""
tasks = yaml.load(yaml_tasks)
graph_validator = deployment_graph.DeploymentGraphValidator(tasks)
with self.assertRaisesRegexp(
errors.InvalidData,
"Tasks can not be processed because it contains cycles in it"):
graph_validator.check() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Unmanaged2, Managed1
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertTrue(table not in tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertTrue(table in tables, "Table '%s' does not exist." % table) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"go.opentelemetry.io/otel/trace"
grpcstatus "google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/logs/logreduction"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
crierror "k8s.io/cri-api/pkg/errors"
remote "k8s.io/cri-client/pkg"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/credentialprovider/plugin"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
"k8s.io/kubernetes/pkg/kubelet/allocation/state"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
imagepullmanager "k8s.io/kubernetes/pkg/kubelet/images/pullmanager"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/kubelet/token"
"k8s.io/kubernetes/pkg/kubelet/types"
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/kubelet/util/format"
sc "k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/utils/ptr"
)
const (
// The api version of kubelet runtime api
kubeRuntimeAPIVersion = "0.1.0"
// A minimal shutdown window for avoiding unnecessary SIGKILLs
minimumGracePeriodInSeconds = 2
// The expiration time of version cache.
versionCacheTTL = 60 * time.Second
// How frequently to report identical errors
identicalErrorDelay = 1 * time.Minute
// OpenTelemetry instrumentation scope name
instrumentationScope = "k8s.io/kubernetes/pkg/kubelet/kuberuntime"
actuatedPodsStateFile = "actuated_pods_state"
)
var (
// ErrVersionNotSupported is returned when the api version of runtime interface is not supported
ErrVersionNotSupported = errors.New("runtime api version is not supported")
)
// podStateProvider can determine if none of the elements are necessary to retain (pod content)
// or if none of the runtime elements are necessary to retain (containers)
type podStateProvider interface {
IsPodTerminationRequested(kubetypes.UID) bool
ShouldPodContentBeRemoved(kubetypes.UID) bool
ShouldPodRuntimeBeRemoved(kubetypes.UID) bool
}
type kubeGenericRuntimeManager struct {
runtimeName string
recorder record.EventRecorderLogger
osInterface kubecontainer.OSInterface
// machineInfo contains the machine information.
machineInfo *cadvisorapi.MachineInfo
// Container GC manager
containerGC *containerGC
// Runner of lifecycle events.
runner kubecontainer.HandlerRunner
// RuntimeHelper that wraps kubelet to generate runtime container options.
runtimeHelper kubecontainer.RuntimeHelper
// Health check results.
livenessManager proberesults.Manager
readinessManager proberesults.Manager
startupManager proberesults.Manager
// If false, pass "memory.oom.group" to container cgroups when using cgroups v2 to cause processes
// in those cgroups to be killed as a unit by the OOM killer.
// It must be nil except for linux
singleProcessOOMKill *bool
// If true, enforce container cpu limits with CFS quota support
cpuCFSQuota bool
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
cpuCFSQuotaPeriod metav1.Duration
// wrapped image puller.
imagePuller images.ImageManager
// gRPC service clients
runtimeService internalapi.RuntimeService
imageService internalapi.ImageManagerService
// The version cache of runtime daemon.
versionCache *cache.ObjectCache
// The directory path for seccomp profiles.
seccompProfileRoot string
// Container management interface for pod container.
containerManager cm.ContainerManager
// Internal lifecycle event handlers for container resource management.
internalLifecycle cm.InternalContainerLifecycle
// Manage container logs.
logManager logs.ContainerLogManager
// Manage RuntimeClass resources.
runtimeClassManager *runtimeclass.Manager
// actuatedState tracks actuated resources.
actuatedState state.State
// Cache last per-container error message to reduce log spam
logReduction *logreduction.LogReduction
// PodState provider instance
podStateProvider podStateProvider
// Use RuntimeDefault as the default seccomp profile for all workloads.
seccompDefault bool
// MemorySwapBehavior defines how swap is used
memorySwapBehavior string
//Function to get node allocatable resources
getNodeAllocatable func() v1.ResourceList
// Memory throttling factor for MemoryQoS
memoryThrottlingFactor float64
// Root directory used to store pod logs
podLogsDirectory string
// Swap controller availability check function (Linux only)
// Uses sync.OnceValue for lazy initialization
getSwapControllerAvailable func() bool
}
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
type KubeGenericRuntime interface {
kubecontainer.Runtime
kubecontainer.StreamingRuntime
kubecontainer.CommandRunner
}
// NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager
func NewKubeGenericRuntimeManager(
ctx context.Context,
recorder record.EventRecorderLogger,
livenessManager proberesults.Manager,
readinessManager proberesults.Manager,
startupManager proberesults.Manager,
rootDirectory string,
podLogsDirectory string,
machineInfo *cadvisorapi.MachineInfo,
podStateProvider podStateProvider,
maxPods int32,
osInterface kubecontainer.OSInterface,
runtimeHelper kubecontainer.RuntimeHelper,
insecureContainerLifecycleHTTPClient types.HTTPDoer,
imageBackOff *flowcontrol.Backoff,
serializeImagePulls bool,
maxParallelImagePulls *int32,
imagePullQPS float32,
imagePullBurst int,
imagePullsCredentialVerificationPolicy string,
preloadedImagesCredentialVerificationWhitelist []string,
imageCredentialProviderConfigPath string,
imageCredentialProviderBinDir string,
singleProcessOOMKill *bool,
cpuCFSQuota bool,
cpuCFSQuotaPeriod metav1.Duration,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
containerManager cm.ContainerManager,
logManager logs.ContainerLogManager,
runtimeClassManager *runtimeclass.Manager,
seccompDefault bool,
memorySwapBehavior string,
getNodeAllocatable func() v1.ResourceList,
memoryThrottlingFactor float64,
podPullingTimeRecorder images.ImagePodPullingTimeRecorder,
tracerProvider trace.TracerProvider,
tokenManager *token.Manager,
getServiceAccount plugin.GetServiceAccountFunc,
) (KubeGenericRuntime, []images.PostImageGCHook, error) {
logger := klog.FromContext(ctx)
runtimeService = newInstrumentedRuntimeService(runtimeService)
imageService = newInstrumentedImageManagerService(imageService)
tracer := tracerProvider.Tracer(instrumentationScope)
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
singleProcessOOMKill: singleProcessOOMKill,
cpuCFSQuota: cpuCFSQuota,
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
seccompProfileRoot: filepath.Join(rootDirectory, "seccomp"),
livenessManager: livenessManager,
readinessManager: readinessManager,
startupManager: startupManager,
machineInfo: machineInfo,
osInterface: osInterface,
runtimeHelper: runtimeHelper,
runtimeService: runtimeService,
imageService: imageService,
containerManager: containerManager,
internalLifecycle: containerManager.InternalContainerLifecycle(),
logManager: logManager,
runtimeClassManager: runtimeClassManager,
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
seccompDefault: seccompDefault,
memorySwapBehavior: memorySwapBehavior,
getNodeAllocatable: getNodeAllocatable,
memoryThrottlingFactor: memoryThrottlingFactor,
podLogsDirectory: podLogsDirectory,
}
// Initialize swap controller availability check with lazy evaluation
kubeRuntimeManager.getSwapControllerAvailable = initSwapControllerAvailabilityCheck(ctx)
typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx)
if err != nil {
logger.Error(err, "Get runtime version failed")
return nil, nil, err
}
// Only matching kubeRuntimeAPIVersion is supported now
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
if typedVersion.Version != kubeRuntimeAPIVersion {
logger.Error(err, "This runtime api version is not supported",
"apiVersion", typedVersion.Version,
"supportedAPIVersion", kubeRuntimeAPIVersion)
return nil, nil, ErrVersionNotSupported
}
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
logger.Info("Container runtime initialized",
"containerRuntime", typedVersion.RuntimeName,
"version", typedVersion.RuntimeVersion,
"apiVersion", typedVersion.RuntimeApiVersion)
if imageCredentialProviderConfigPath != "" || imageCredentialProviderBinDir != "" {
if err := plugin.RegisterCredentialProviderPlugins(imageCredentialProviderConfigPath, imageCredentialProviderBinDir, tokenManager.GetServiceAccountToken, getServiceAccount); err != nil {
logger.Error(err, "Failed to register CRI auth plugins")
os.Exit(1)
}
}
var imageGCHooks []images.PostImageGCHook
var imagePullManager imagepullmanager.ImagePullManager = &imagepullmanager.NoopImagePullManager{}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
imagePullCredentialsVerificationPolicy, err := imagepullmanager.NewImagePullCredentialVerificationPolicy(
kubeletconfiginternal.ImagePullCredentialsVerificationPolicy(imagePullsCredentialVerificationPolicy),
preloadedImagesCredentialVerificationWhitelist)
if err != nil {
return nil, nil, err
}
fsRecordAccessor, err := imagepullmanager.NewFSPullRecordsAccessor(logger, rootDirectory)
if err != nil {
return nil, nil, fmt.Errorf("failed to setup the FSPullRecordsAccessor: %w", err)
}
var ( // variables used to determine cache/lock set sizes
maxParallelPulls = ptr.Deref(maxParallelImagePulls, 0)
intentCacheSize = max(2*maxPods, 2*maxParallelPulls)
pullRecordsCacheSize = 5 * maxPods
)
memCacheRecordsAccessor := imagepullmanager.NewCachedPullRecordsAccessor(logger, fsRecordAccessor, intentCacheSize, pullRecordsCacheSize, maxParallelPulls)
imagePullManager, err = imagepullmanager.NewImagePullManager(ctx, memCacheRecordsAccessor, imagePullCredentialsVerificationPolicy, kubeRuntimeManager, maxParallelPulls)
if err != nil {
return nil, nil, fmt.Errorf("failed to create image pull manager: %w", err)
}
imageGCHooks = append(imageGCHooks, imagePullManager.PruneUnknownRecords)
}
nodeKeyring := credentialprovider.NewDefaultDockerKeyring()
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
nodeKeyring,
kubeRuntimeManager,
imagePullManager,
imageBackOff,
serializeImagePulls,
maxParallelImagePulls,
imagePullQPS,
imagePullBurst,
podPullingTimeRecorder)
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(insecureContainerLifecycleHTTPClient, kubeRuntimeManager, kubeRuntimeManager, recorder)
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager, tracer)
kubeRuntimeManager.podStateProvider = podStateProvider
kubeRuntimeManager.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return kubeRuntimeManager.getTypedVersion(ctx)
},
versionCacheTTL,
)
kubeRuntimeManager.actuatedState, err = state.NewStateCheckpoint(rootDirectory, actuatedPodsStateFile)
if err != nil {
return nil, nil, fmt.Errorf("failed to initialize actuated state checkpoint: %w", err)
}
return kubeRuntimeManager, imageGCHooks, nil
}
// Type returns the type of the container runtime.
func (m *kubeGenericRuntimeManager) Type() string {
return m.runtimeName
}
func newRuntimeVersion(version string) (*utilversion.Version, error) {
if ver, err := utilversion.ParseSemantic(version); err == nil {
return ver, err
}
return utilversion.ParseGeneric(version)
}
func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(ctx, kubeRuntimeAPIVersion)
if err != nil {
return nil, fmt.Errorf("get remote runtime typed version failed: %v", err)
}
return typedVersion, nil
}
// Version returns the version information of the container runtime.
func (m *kubeGenericRuntimeManager) Version(ctx context.Context) (kubecontainer.Version, error) {
typedVersion, err := m.getTypedVersion(ctx)
if err != nil {
return nil, err
}
return newRuntimeVersion(typedVersion.RuntimeVersion)
}
// APIVersion returns the cached API version information of the container
// runtime. Implementation is expected to update this cache periodically.
// This may be different from the runtime engine's version.
func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) {
versionObject, err := m.versionCache.Get(m.machineInfo.MachineID)
if err != nil {
return nil, err
}
typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.RuntimeApiVersion)
}
// Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise.
func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.RuntimeStatus, error) {
resp, err := m.runtimeService.Status(ctx, false)
if err != nil {
return nil, err
}
if resp.GetStatus() == nil {
return nil, errors.New("runtime status is nil")
}
return toKubeRuntimeStatus(resp.GetStatus(), resp.GetRuntimeHandlers(), resp.GetFeatures()), nil
}
// GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {
logger := klog.FromContext(ctx)
pods := make(map[kubetypes.UID]*kubecontainer.Pod)
sandboxes, err := m.getKubeletSandboxes(ctx, all)
if err != nil {
return nil, err
}
for i := range sandboxes {
s := sandboxes[i]
if s.Metadata == nil {
logger.V(4).Info("Sandbox does not have metadata", "sandbox", s)
continue
}
podUID := kubetypes.UID(s.Metadata.Uid)
if _, ok := pods[podUID]; !ok {
pods[podUID] = &kubecontainer.Pod{
ID: podUID,
Name: s.Metadata.Name,
Namespace: s.Metadata.Namespace,
}
}
p := pods[podUID]
converted, err := m.sandboxToKubeContainer(s)
if err != nil {
logger.V(4).Info("Convert sandbox of pod failed", "runtimeName", m.runtimeName, "sandbox", s, "podUID", podUID, "err", err)
continue
}
p.Sandboxes = append(p.Sandboxes, converted)
p.CreatedAt = uint64(s.GetCreatedAt())
}
containers, err := m.getKubeletContainers(ctx, all)
if err != nil {
return nil, err
}
for i := range containers {
c := containers[i]
if c.Metadata == nil {
logger.V(4).Info("Container does not have metadata", "container", c)
continue
}
labelledInfo := getContainerInfoFromLabels(ctx, c.Labels)
pod, found := pods[labelledInfo.PodUID]
if !found {
pod = &kubecontainer.Pod{
ID: labelledInfo.PodUID,
Name: labelledInfo.PodName,
Namespace: labelledInfo.PodNamespace,
}
pods[labelledInfo.PodUID] = pod
}
converted, err := m.toKubeContainer(ctx, c)
if err != nil {
logger.V(4).Info("Convert container of pod failed", "runtimeName", m.runtimeName, "container", c, "podUID", labelledInfo.PodUID, "err", err)
continue
}
pod.Containers = append(pod.Containers, converted)
}
// Convert map to list.
var result []*kubecontainer.Pod
for _, pod := range pods {
result = append(result, pod)
}
// There are scenarios where multiple pods are running in parallel having
// the same name, because one of them have not been fully terminated yet.
// To avoid unexpected behavior on container name based search (for example
// by calling *Kubelet.findContainer() without specifying a pod ID), we now
// return the list of pods ordered by their creation time.
sort.SliceStable(result, func(i, j int) bool {
return result[i].CreatedAt > result[j].CreatedAt
})
logger.V(4).Info("Retrieved pods from runtime", "all", all)
return result, nil
}
// containerKillReason explains what killed a given container
type containerKillReason string
const (
reasonStartupProbe containerKillReason = "StartupProbe"
reasonLivenessProbe containerKillReason = "LivenessProbe"
reasonFailedPostStartHook containerKillReason = "FailedPostStartHook"
reasonRestartAllContainers containerKillReason = "RestartAllContainers"
reasonUnknown containerKillReason = "Unknown"
)
// containerToKillInfo contains necessary information to kill a container.
type containerToKillInfo struct {
// The spec of the container.
container *v1.Container
// The name of the container.
name string
// The message indicates why the container will be killed.
message string
// The reason is a clearer source of info on why a container will be killed
// TODO: replace message with reason?
reason containerKillReason
}
// containerResources holds the set of resources applicable to the running container
type containerResources struct {
memoryLimit int64
memoryRequest int64
cpuLimit int64
cpuRequest int64
}
// containerToUpdateInfo contains necessary information to update a container's resources.
type containerToUpdateInfo struct {
// The spec of the container.
container *v1.Container
// ID of the runtime container that needs resource update
kubeContainerID kubecontainer.ContainerID
// Desired resources for the running container
desiredContainerResources containerResources
// Most recently configured resources on the running container
currentContainerResources *containerResources
}
// containerToRemoveInfo contains necessary information to update a container's resources.
type containerToRemoveInfo struct {
// The ID of the container.
containerID kubecontainer.ContainerID
// The spec of the container.
container *v1.Container
// Whether to kill the container before removal.
kill bool
}
// podActions keeps information what to do for a pod.
type podActions struct {
// Stop all running (regular, init and ephemeral) containers and the sandbox for the pod.
KillPod bool
// Whether need to create a new sandbox. If needed to kill pod and create
// a new pod sandbox, all init containers need to be purged (i.e., removed).
CreateSandbox bool
// The id of existing sandbox. It is used for starting containers in ContainersToStart.
SandboxID string
// The attempt number of creating sandboxes for the pod.
Attempt uint32
// InitContainersToStart keeps a list of indexes for the init containers to
// start, where the index is the index of the specific init container in the
// pod spec (pod.Spec.InitContainers).
InitContainersToStart []int
// ContainersToStart keeps a list of indexes for the containers to start,
// where the index is the index of the specific container in the pod spec (
// pod.Spec.Containers).
ContainersToStart []int
// ContainersToKill keeps a map of containers that need to be killed, note that
// the key is the container ID of the container, while
// the value contains necessary information to kill a container.
ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo
// EphemeralContainersToStart is a list of indexes for the ephemeral containers to start,
// where the index is the index of the specific container in pod.Spec.EphemeralContainers.
EphemeralContainersToStart []int
// ContainersToUpdate keeps a list of containers needing resource update.
// Container resource update is applicable only for CPU and memory.
ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
// UpdatePodResources is true if container(s) need resource update with restart
UpdatePodResources bool
// ContainersToReset is a list of containers to be killed (if running) and removed from
// runtime for RestartAllContainers. The container that triggered RestartAllContainers
// will be reset the last.
ContainersToReset []containerToRemoveInfo
// UpdatePodLevelResources is true if pod-level resources need to be updated
UpdatePodLevelResources bool
}
// podLevelResources holds the set of resources applicable to the running pod
type podLevelResources struct {
memoryLimit int64
memoryRequest int64
cpuLimit int64
cpuRequest int64
}
func (p podActions) String() string {
return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, UpdatePodLevelResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,ContainersToUpdate: %v, ContainersToKill: %v, ContainersToRemove: %v",
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.UpdatePodLevelResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill, p.ContainersToReset)
}
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
// Currently, there are only `image` and `name` fields.
// we don't need to consider the pod UID here, because we find the containerStatus through the pod UID.
// If the pod UID changes, we will not be able to find the containerStatus to compare against.
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
expectedHash := kubecontainer.HashContainer(container)
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
}
func shouldRestartOnFailure(pod *v1.Pod) bool {
return pod.Spec.RestartPolicy != v1.RestartPolicyNever
}
func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
cStatus := podStatus.FindContainerStatusByName(c.Name)
if cStatus == nil {
return false
}
// Container has exited, with an exit code of 0.
return cStatus.State == kubecontainer.ContainerStateExited && cStatus.ExitCode == 0
}
func containerResourcesFromRequirements(podRequirements, containerRequirements *v1.ResourceRequirements) containerResources {
resources := containerResources{
memoryLimit: containerRequirements.Limits.Memory().Value(),
memoryRequest: containerRequirements.Requests.Memory().Value(),
cpuLimit: containerRequirements.Limits.Cpu().MilliValue(),
cpuRequest: containerRequirements.Requests.Cpu().MilliValue(),
}
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
return resources
}
containerLimits := kubeutil.GetLimits(&kubeutil.ResourceOpts{PodResources: podRequirements, ContainerResources: containerRequirements})
resources.memoryLimit = containerLimits.Memory().Value()
resources.cpuLimit = containerLimits.Cpu().MilliValue()
return resources
}
func podResourcesFromRequirements(requirements *v1.ResourceRequirements) podLevelResources {
if requirements == nil {
return podLevelResources{}
}
return podLevelResources{
memoryLimit: requirements.Limits.Memory().Value(),
memoryRequest: requirements.Requests.Memory().Value(),
cpuLimit: requirements.Limits.Cpu().MilliValue(),
cpuRequest: requirements.Requests.Cpu().MilliValue(),
}
}
// computePodResizeAction determines the actions required (if any) to resize the given container.
// Returns whether to keep (true) or restart (false) the container.
// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
func (m *kubeGenericRuntimeManager) computePodResizeAction(ctx context.Context, pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
logger := klog.FromContext(ctx)
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); !resizable {
return true
}
var container v1.Container
if isRestartableInitContainer {
container = pod.Spec.InitContainers[containerIdx]
} else {
container = pod.Spec.Containers[containerIdx]
}
// Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired)
// with v1.Status.Resources / runtime.Status.Resources (last known actual).
// Proceed only when kubelet has accepted the resize a.k.a v1.Spec.Resources.Requests == v1.Status.AllocatedResources.
// Skip if runtime containerID doesn't match pod.Status containerID (container is restarting)
if kubeContainerStatus.State != kubecontainer.ContainerStateRunning {
return true
}
actuatedContainerResources, found := m.actuatedState.GetContainerResources(pod.UID, container.Name)
if !found {
logger.Error(nil, "Missing actuated resource record", "pod", klog.KObj(pod), "container", container.Name)
// Proceed with the zero-value actuated resources. For restart NotRequired, this may
// result in an extra call to UpdateContainerResources, but that call should be idempotent.
// For RestartContainer, this may trigger a container restart.
}
var actuatedPodResources *v1.ResourceRequirements
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
actuatedPodResources, _ = m.actuatedState.GetPodLevelResources(pod.UID)
}
desiredResources := containerResourcesFromRequirements(pod.Spec.Resources, &container.Resources)
currentResources := containerResourcesFromRequirements(actuatedPodResources, &actuatedContainerResources)
if currentResources == desiredResources {
// No resize required.
return true
}
determineContainerResize := func(rName v1.ResourceName, desiredValue, currentValue int64) (resize, restart bool) {
if desiredValue == currentValue {
return false, false
}
for _, policy := range container.ResizePolicy {
if policy.ResourceName == rName {
return true, policy.RestartPolicy == v1.RestartContainer
}
}
// If a resource policy isn't set, the implicit default is NotRequired.
return true, false
}
markContainerForUpdate := func(rName v1.ResourceName, desiredValue, currentValue int64) {
cUpdateInfo := containerToUpdateInfo{
container: &container,
kubeContainerID: kubeContainerStatus.ID,
desiredContainerResources: desiredResources,
currentContainerResources: ¤tResources,
}
// Order the container updates such that resource decreases are applied before increases
switch {
case desiredValue > currentValue: // append
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
case desiredValue < currentValue: // prepend
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
changes.ContainersToUpdate[rName][0] = cUpdateInfo
}
}
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
resizeMemReq, restartMemReq := determineContainerResize(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
resizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
resizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
if restartCPULim || restartCPUReq || restartMemLim || restartMemReq {
// resize policy requires this container to restart
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
name: kubeContainerStatus.Name,
container: &container,
message: fmt.Sprintf("Container %s resize requires restart", container.Name),
}
if isRestartableInitContainer {
changes.InitContainersToStart = append(changes.InitContainersToStart, containerIdx)
} else {
changes.ContainersToStart = append(changes.ContainersToStart, containerIdx)
}
changes.UpdatePodResources = true
return false
} else {
if resizeMemLim {
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
} else if resizeMemReq {
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
}
if resizeCPULim {
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
} else if resizeCPUReq {
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
}
}
return true
}
func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, podContainerChanges podActions) *kubecontainer.SyncResult {
logger := klog.FromContext(ctx)
start := time.Now()
success := false
defer func() {
metrics.PodResizeDurationMilliseconds.WithLabelValues(strconv.FormatBool(success)).Observe(float64(time.Since(start).Milliseconds()))
}()
resizeResult := kubecontainer.NewSyncResult(kubecontainer.ResizePodInPlace, format.Pod(pod))
pcm := m.containerManager.NewPodContainerManager()
//TODO(vinaykul,InPlacePodVerticalScaling): Figure out best way to get enforceMemoryQoS value (parameter #4 below) in platform-agnostic way
enforceCPULimits := m.cpuCFSQuota
if utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod) {
enforceCPULimits = false
logger.V(2).Info("Disabled CFS quota", "pod", klog.KObj(pod))
}
podResources := cm.ResourceConfigForPod(pod, enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
if podResources == nil {
logger.Error(nil, "Unable to get resource configuration", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get resource configuration processing resize for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
if err != nil {
logger.Error(err, "Unable to get pod cgroup memory config", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup memory config for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodCPUConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
if err != nil {
logger.Error(err, "Unable to get pod cgroup cpu config", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup cpu config for pod %q", format.Pod(pod)))
return resizeResult
}
currentPodResources := podResources
currentPodResources = mergeResourceConfig(currentPodResources, currentPodMemoryConfig)
currentPodResources = mergeResourceConfig(currentPodResources, currentPodCPUConfig)
// Before proceeding with the resize, perform a best-effort check to catch potential resize
// errors in order to avoid a partial-resize state.
if err := m.validatePodResizeAction(ctx, pod, podStatus, currentPodResources, podResources, podContainerChanges); err != nil {
logger.Error(err, "Allocated pod resize is not currently feasible", "pod", klog.KObj(pod))
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, err.Error())
return resizeResult
}
updateActuatedPodLevelResources := func(resourceName v1.ResourceName) error {
allocatedResources := pod.Spec.Resources
if allocatedResources == nil {
return nil
}
// allocated resources will never be nil
actuatedPodResources, found := m.actuatedState.GetPodLevelResources(pod.UID)
if !found || actuatedPodResources == nil {
logger.Error(nil, "Missing actuated resource record", "pod", klog.KObj(pod), "pod", pod.Name)
// Proceed with the zero-value actuated resources.
actuatedPodResources = &v1.ResourceRequirements{}
}
defaultResourceListIfNil := func(rl v1.ResourceList) v1.ResourceList {
if rl == nil {
return make(v1.ResourceList)
}
return rl
}
switch resourceName {
case v1.ResourceMemory:
if allocatedResources.Requests != nil {
actuatedPodResources.Requests = defaultResourceListIfNil(actuatedPodResources.Requests)
actuatedPodResources.Requests[v1.ResourceMemory] = allocatedResources.Requests[v1.ResourceMemory]
}
if allocatedResources.Limits != nil {
actuatedPodResources.Limits = defaultResourceListIfNil(actuatedPodResources.Limits)
actuatedPodResources.Limits[v1.ResourceMemory] = allocatedResources.Limits[v1.ResourceMemory]
}
case v1.ResourceCPU:
if allocatedResources.Requests != nil {
actuatedPodResources.Requests = defaultResourceListIfNil(actuatedPodResources.Requests)
actuatedPodResources.Requests[v1.ResourceCPU] = allocatedResources.Requests[v1.ResourceCPU]
}
if allocatedResources.Limits != nil {
actuatedPodResources.Limits = defaultResourceListIfNil(actuatedPodResources.Limits)
actuatedPodResources.Limits[v1.ResourceCPU] = allocatedResources.Limits[v1.ResourceCPU]
}
}
if err = m.actuatedState.SetPodLevelResources(pod.UID, actuatedPodResources); err != nil {
logger.Error(err, "SetPodLevelResources failed", "pod", pod.Name, "UID", pod.UID,
"pod", format.Pod(pod), "resourceName", resourceName)
return err
}
return nil
}
setPodCgroupConfig := func(logger klog.Logger, rName v1.ResourceName, setLimitValue bool) error {
var err error
resizedResources := &cm.ResourceConfig{}
switch rName {
case v1.ResourceCPU:
if setLimitValue {
resizedResources.CPUPeriod = podResources.CPUPeriod
resizedResources.CPUQuota = podResources.CPUQuota
} else {
resizedResources.CPUShares = podResources.CPUShares
}
case v1.ResourceMemory:
if !setLimitValue {
// Memory requests aren't written to cgroups.
return nil
}
resizedResources.Memory = podResources.Memory
}
err = pcm.SetPodCgroupConfig(logger, pod, resizedResources)
if err != nil {
logger.Error(err, "Failed to set cgroup config", "resource", rName, "pod", klog.KObj(pod))
return err
}
currentPodResources = mergeResourceConfig(currentPodResources, resizedResources)
if err = m.updatePodSandboxResources(ctx, podContainerChanges.SandboxID, pod, currentPodResources); err != nil {
logger.Error(err, "Failed to notify runtime for UpdatePodSandboxResources", "resource", rName, "pod", klog.KObj(pod))
// Don't propagate the error since the updatePodSandboxResources call is best-effort.
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
if err = updateActuatedPodLevelResources(rName); err != nil {
logger.Error(err, "Failed to update pod-level actuated resources", "resource", rName, "pod", klog.KObj(pod))
}
}
return nil
}
// Memory and CPU are updated separately because memory resizes may be ordered differently than CPU resizes.
// If resize results in net pod resource increase, set pod cgroup config before resizing containers.
// If resize results in net pod resource decrease, set pod cgroup config after resizing containers.
// If an error occurs at any point, abort. Let future syncpod iterations retry the unfinished stuff.
resizeContainers := func(rName v1.ResourceName, currPodCgLimValue, newPodCgLimValue, currPodCgReqValue, newPodCgReqValue int64) error {
var err error
// At upsizing, limits should expand prior to requests in order to keep "requests <= limits".
if newPodCgLimValue > currPodCgLimValue {
// TODO: Pass logger from context once contextual logging migration is complete
if err = setPodCgroupConfig(klog.TODO(), rName, true); err != nil {
return err
}
}
if newPodCgReqValue > currPodCgReqValue {
// TODO: Pass logger from context once contextual logging migration is complete
if err = setPodCgroupConfig(klog.TODO(), rName, false); err != nil {
return err
}
}
if len(podContainerChanges.ContainersToUpdate[rName]) > 0 {
if err = m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.ContainersToUpdate[rName]); err != nil {
logger.Error(err, "updatePodContainerResources failed", "pod", format.Pod(pod), "resource", rName)
return err
}
}
// At downsizing, requests should shrink prior to limits in order to keep "requests <= limits".
if newPodCgReqValue < currPodCgReqValue {
// TODO: Pass logger from context once contextual logging migration is complete
if err = setPodCgroupConfig(klog.TODO(), rName, false); err != nil {
return err
}
}
if newPodCgLimValue < currPodCgLimValue {
// TODO(#127825): Pass logger from context once contextual logging migration is complete
if err = setPodCgroupConfig(klog.TODO(), rName, true); err != nil {
return err
}
}
return err
}
// Always update the pod status once. Even if there was a resize error, the resize may have been
// partially actuated.
defer m.runtimeHelper.SetPodWatchCondition(pod.UID, "doPodResizeAction", func(*kubecontainer.PodStatus) bool { return true })
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources || podContainerChanges.UpdatePodLevelResources {
if podResources.Memory == nil {
// Default pod memory limit to the current memory limit if unset to prevent it from updating.
// TODO(#128675): This does not support removing limits.
podResources.Memory = currentPodMemoryConfig.Memory
}
if errResize := resizeContainers(v1.ResourceMemory, int64(*currentPodMemoryConfig.Memory), *podResources.Memory, 0, 0); errResize != nil {
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, errResize.Error())
return resizeResult
}
}
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources || podContainerChanges.UpdatePodLevelResources {
if podResources.CPUShares == nil {
// This shouldn't happen: ResourceConfigForPod always returns a non-nil value for CPUShares.
logger.Error(nil, "podResources.CPUShares is nil", "pod", pod.Name)
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("podResources.CPUShares is nil for pod %s", pod.Name))
return resizeResult
}
// Default pod CPUQuota to the current CPUQuota if no limit is set to prevent the pod limit
// from updating.
// TODO(#128675): This does not support removing limits.
if podResources.CPUQuota == nil {
podResources.CPUQuota = currentPodCPUConfig.CPUQuota
}
if errResize := resizeContainers(v1.ResourceCPU, *currentPodCPUConfig.CPUQuota, *podResources.CPUQuota,
int64(*currentPodCPUConfig.CPUShares), int64(*podResources.CPUShares)); errResize != nil {
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, errResize.Error())
return resizeResult
}
}
success = true
return resizeResult
}
// validatePodResizeAction checks whether the proposed resize actions are currently viable.
func (m *kubeGenericRuntimeManager) validatePodResizeAction(
ctx context.Context,
pod *v1.Pod,
podStatus *kubecontainer.PodStatus,
currentPodResources, desiredPodResources *cm.ResourceConfig,
podContainerChanges podActions,
) error {
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
return m.validateMemoryResizeAction(ctx, pod, podStatus, currentPodResources, desiredPodResources, podContainerChanges)
}
return nil
}
func (m *kubeGenericRuntimeManager) validateMemoryResizeAction(
ctx context.Context,
pod *v1.Pod,
podStatus *kubecontainer.PodStatus,
currentPodResources, desiredPodResources *cm.ResourceConfig,
podContainerChanges podActions,
) error {
// Determine which memory limits are decreasing.
podLimitDecreasing := desiredPodResources.Memory != nil &&
(currentPodResources.Memory == nil || // Pod memory limit added
*desiredPodResources.Memory < *currentPodResources.Memory) // Pod memory limit decreasing
decreasingContainerLimits := map[string]int64{} // Map of container name to desired memory limit.
for _, cUpdate := range podContainerChanges.ContainersToUpdate[v1.ResourceMemory] {
if cUpdate.desiredContainerResources.memoryLimit != 0 {
if cUpdate.currentContainerResources == nil || cUpdate.currentContainerResources.memoryLimit == 0 || // Limit added
cUpdate.desiredContainerResources.memoryLimit < cUpdate.currentContainerResources.memoryLimit { // Limit decreasing
decreasingContainerLimits[cUpdate.container.Name] = cUpdate.desiredContainerResources.memoryLimit
}
}
}
if !podLimitDecreasing && len(decreasingContainerLimits) == 0 {
// No memory limits are decreasing: nothing else to check here.
return nil
}
// Check whether any of the new memory limits are below current memory usage.
podUsageStats, err := m.runtimeHelper.PodCPUAndMemoryStats(ctx, pod, podStatus)
if err != nil {
return fmt.Errorf("unable to read memory usage for pod %q", format.Pod(pod))
}
var errs []error
if podLimitDecreasing {
if podUsageStats.Memory == nil || podUsageStats.Memory.UsageBytes == nil {
errs = append(errs, fmt.Errorf("missing pod memory usage"))
} else if *podUsageStats.Memory.UsageBytes >= uint64(*desiredPodResources.Memory) {
errs = append(errs, fmt.Errorf("attempting to set pod memory limit (%d) below current usage (%d)",
*desiredPodResources.Memory, *podUsageStats.Memory.UsageBytes))
}
}
for _, cStats := range podUsageStats.Containers {
if desiredLimit, ok := decreasingContainerLimits[cStats.Name]; ok {
if cStats.Memory == nil || cStats.Memory.UsageBytes == nil {
errs = append(errs, fmt.Errorf("missing container %q memory usage", cStats.Name))
} else if *cStats.Memory.UsageBytes >= uint64(desiredLimit) {
errs = append(errs, fmt.Errorf("attempting to set container %q memory limit (%d) below current usage (%d)",
cStats.Name, desiredLimit, *podUsageStats.Memory.UsageBytes))
}
}
}
if len(errs) > 0 {
agg := utilerrors.NewAggregate(errs)
return fmt.Errorf("cannot decrease memory limits: %w", agg)
}
return nil
}
func (m *kubeGenericRuntimeManager) updatePodContainerResources(ctx context.Context, pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error {
logger := klog.FromContext(ctx)
logger.V(5).Info("Updating container resources", "pod", klog.KObj(pod))
for _, cInfo := range containersToUpdate {
container := cInfo.container.DeepCopy()
// If updating memory limit, use most recently configured CPU request and limit values.
// If updating CPU request and limit, use most recently configured memory request and limit values.
switch resourceName {
case v1.ResourceMemory:
container.Resources.Limits = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.currentContainerResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.desiredContainerResources.memoryLimit, resource.BinarySI),
}
container.Resources.Requests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.currentContainerResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.desiredContainerResources.memoryRequest, resource.BinarySI),
}
case v1.ResourceCPU:
container.Resources.Limits = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.desiredContainerResources.cpuLimit, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.currentContainerResources.memoryLimit, resource.BinarySI),
}
container.Resources.Requests = v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cInfo.desiredContainerResources.cpuRequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(cInfo.currentContainerResources.memoryRequest, resource.BinarySI),
}
}
if err := m.updateContainerResources(ctx, pod, container, cInfo.kubeContainerID); err != nil {
// Log error and abort as container updates need to succeed in the order determined by computePodResizeAction.
// The recovery path is for SyncPod to keep retrying at later times until it succeeds.
logger.Error(err, "updateContainerResources failed", "container", container.Name, "cID", cInfo.kubeContainerID,
"pod", format.Pod(pod), "resourceName", resourceName)
return err
}
// If UpdateContainerResources is error-free, it means desired values for 'resourceName' was accepted by runtime.
// So we update currentContainerResources for 'resourceName', which is our view of most recently configured resources.
// Note: We can't rely on GetPodStatus as runtime may lag in actuating the resource values it just accepted.
switch resourceName {
case v1.ResourceMemory:
cInfo.currentContainerResources.memoryLimit = cInfo.desiredContainerResources.memoryLimit
cInfo.currentContainerResources.memoryRequest = cInfo.desiredContainerResources.memoryRequest
case v1.ResourceCPU:
cInfo.currentContainerResources.cpuLimit = cInfo.desiredContainerResources.cpuLimit
cInfo.currentContainerResources.cpuRequest = cInfo.desiredContainerResources.cpuRequest
}
}
return nil
}
// computePodActions checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, restartAllContainers bool) podActions {
logger := klog.FromContext(ctx)
logger.V(5).Info("Syncing Pod", "pod", klog.KObj(pod))
createPodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)
changes := podActions{
KillPod: createPodSandbox,
CreateSandbox: createPodSandbox,
SandboxID: sandboxID,
Attempt: attempt,
ContainersToStart: []int{},
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
}
// Needs to kill and remove all containers in reverse order when the pod is marked for RestartAllContainers.
if utilfeature.DefaultFeatureGate.Enabled(features.RestartAllContainersOnContainerExits) && restartAllContainers {
logger.V(3).Info("Pod marked for RestartAllContainers", "pod", klog.KObj(pod))
// Kill and remove containers in reverse order. Source containers (which exited and triggered
// RestartAllContainers) are removed last.
sourceInitContainers, targetInitContainers := m.getContainersToReset(pod.Spec.InitContainers, podStatus)
sourceContainers, targetContainers := m.getContainersToReset(pod.Spec.Containers, podStatus)
changes.ContainersToReset = append(changes.ContainersToReset, targetContainers...)
changes.ContainersToReset = append(changes.ContainersToReset, targetInitContainers...)
changes.ContainersToReset = append(changes.ContainersToReset, sourceContainers...)
changes.ContainersToReset = append(changes.ContainersToReset, sourceInitContainers...)
return changes
}
// If we need to (re-)create the pod sandbox, everything will need to be
// killed and recreated, and init containers should be purged.
if createPodSandbox {
if !shouldRestartOnFailure(pod) && attempt != 0 && len(podStatus.ContainerStatuses) != 0 {
// Should not restart the pod, just return.
// we should not create a sandbox, and just kill the pod if it is already done.
// if all containers are done and should not be started, there is no need to create a new sandbox.
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
//
// If ContainerStatuses is empty, we assume that we've never
// successfully created any containers. In this case, we should
// retry creating the sandbox.
changes.CreateSandbox = false
return changes
}
// Get the containers to start, excluding the ones that succeeded if RestartPolicy is OnFailure.
var containersToStart []int
for idx, c := range pod.Spec.Containers {
runOnce := pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if c.RestartPolicy != nil {
runOnce = *c.RestartPolicy == v1.ContainerRestartPolicyOnFailure
}
}
if runOnce && containerSucceeded(&c, podStatus) {
continue
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyOnFailure && containerSucceeded(&c, podStatus) {
continue
}
}
containersToStart = append(containersToStart, idx)
}
// We should not create a sandbox, and just kill the pod if initialization
// is done and there is no container to start.
if len(containersToStart) == 0 {
hasInitialized := false
// If there is any regular container, it means all init containers have
// been initialized.
hasInitialized = HasAnyRegularContainerCreated(pod, podStatus)
if hasInitialized {
changes.CreateSandbox = false
return changes
}
}
// If we are creating a pod sandbox, we should restart from the initial
// state.
if len(pod.Spec.InitContainers) != 0 {
// Pod has init containers, return the first one.
changes.InitContainersToStart = []int{0}
return changes
}
changes.ContainersToStart = containersToStart
return changes
}
// Ephemeral containers may be started even if initialization is not yet complete.
for i := range pod.Spec.EphemeralContainers {
c := (*v1.Container)(&pod.Spec.EphemeralContainers[i].EphemeralContainerCommon)
// Ephemeral Containers are never restarted
if podStatus.FindContainerStatusByName(c.Name) == nil {
changes.EphemeralContainersToStart = append(changes.EphemeralContainersToStart, i)
}
}
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); resizable {
changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
}
// Check initialization progress.
// TODO: Remove this code path as logically it is the subset of the next
// code path.
hasInitialized := m.computeInitContainerActions(ctx, pod, podStatus, &changes)
if changes.KillPod || !hasInitialized {
// Initialization failed or still in progress. Skip inspecting non-init
// containers.
return changes
}
// Number of running containers to keep.
keepCount := 0
// check the status of containers.
for idx, container := range pod.Spec.Containers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
// Call internal container post-stop lifecycle hook for any non-running container so that any
// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated
// to it.
if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
if err := m.internalLifecycle.PostStopContainer(logger, containerStatus.ID.ID); err != nil {
logger.Error(err, "Internal container post-stop lifecycle hook failed for container in pod with error",
"containerName", container.Name, "pod", klog.KObj(pod))
}
}
// If container does not exist, or is not running, check whether we
// need to restart it.
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
if kubecontainer.ShouldContainerBeRestarted(logger, &container, pod, podStatus) {
logger.V(3).Info("Container of pod is not in the desired state and shall be started", "containerName", container.Name, "pod", klog.KObj(pod))
changes.ContainersToStart = append(changes.ContainersToStart, idx)
if containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {
// If container is in unknown state, we don't know whether it
// is actually running or not, always try killing it before
// restart to avoid having 2 running instances of the same container.
changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
name: containerStatus.Name,
container: &pod.Spec.Containers[idx],
message: fmt.Sprintf("Container is in %q state, try killing it before restart",
containerStatus.State),
reason: reasonUnknown,
}
}
}
continue
}
// The container is running, but kill the container if any of the following condition is met.
var message string
var reason containerKillReason
restart := shouldRestartOnFailure(pod)
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
// For probe failures, use container-level restart policy only. Container-level restart
// rules are not evaluated because the container is still running.
if container.RestartPolicy != nil {
restart = *container.RestartPolicy != v1.ContainerRestartPolicyNever
}
}
if _, _, changed := containerChanged(&container, containerStatus); changed {
message = fmt.Sprintf("Container %s definition changed", container.Name)
// Restart regardless of the restart policy because the container
// spec changed.
restart = true
} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {
// If the container failed the liveness probe, we should kill it.
message = fmt.Sprintf("Container %s failed liveness probe", container.Name)
reason = reasonLivenessProbe
} else if startup, found := m.startupManager.Get(containerStatus.ID); found && startup == proberesults.Failure {
// If the container failed the startup probe, we should kill it.
message = fmt.Sprintf("Container %s failed startup probe", container.Name)
reason = reasonStartupProbe
} else if !m.computePodResizeAction(ctx, pod, idx, false, containerStatus, &changes) {
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
continue
} else {
// Keep the container.
keepCount++
continue
}
// We need to kill the container, but if we also want to restart the
// container afterwards, make the intent clear in the message. Also do
// not kill the entire pod since we expect container to be running eventually.
if restart {
message = fmt.Sprintf("%s, will be restarted", message)
changes.ContainersToStart = append(changes.ContainersToStart, idx)
}
changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
name: containerStatus.Name,
container: &pod.Spec.Containers[idx],
message: message,
reason: reason,
}
logger.V(2).Info("Message for Container of pod", "containerName", container.Name, "containerStatusID", containerStatus.ID, "pod", klog.KObj(pod), "containerMessage", message)
}
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
changes.KillPod = true
// To prevent the restartable init containers to keep pod alive, we should
// not restart them.
changes.InitContainersToStart = nil
}
changes.UpdatePodLevelResources = m.computePodLevelResourcesResizeAction(ctx, pod)
return changes
}
// getContainersToReset returns container info about the containers to remove from the runtime.
// The first list are the containers that triggered the RestartAllContainers; the second list
// are the containers that are victim of the RestartAllContainers.
func (m *kubeGenericRuntimeManager) getContainersToReset(containers []v1.Container, podStatus *kubecontainer.PodStatus) (sources []containerToRemoveInfo, targets []containerToRemoveInfo) {
for idx, c := range containers {
// podStatus.FindContainerStatusByName cannot be used because there can be multiple container
// statuses per container, and RestartAllContainers require all container to be purged from runtime.
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.Name != c.Name {
continue
}
info := containerToRemoveInfo{
containerID: containerStatus.ID,
container: &containers[idx],
}
if containerStatus.State == kubecontainer.ContainerStateExited {
exitCode := containerStatus.ExitCode
rule, ok := podutil.FindMatchingContainerRestartRule(c, int32(exitCode))
if ok && rule.Action == v1.ContainerRestartRuleActionRestartAllContainers {
sources = append(sources, info)
} else {
targets = append(targets, info)
}
} else {
info.kill = true
targets = append(targets, info)
}
}
}
return
}
func (m *kubeGenericRuntimeManager) computePodLevelResourcesResizeAction(ctx context.Context, pod *v1.Pod) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
return false
}
logger := klog.FromContext(ctx)
actuatedPodLevelResources, found := m.actuatedState.GetPodLevelResources(pod.UID)
if !found {
logger.Error(nil, "Missing actuated pod level resource record", "pod", klog.KObj(pod), "pod", pod.Name)
// Proceed with the zero-value actuated resources. For restart NotRequired, this may
// result in an extra call to UpdateContainerResources, but that call should be idempotent.
// For RestartContainer, this may trigger a container restart.
}
desiredPodLevelResources := podResourcesFromRequirements(pod.Spec.Resources)
currentPodLevelResources := podResourcesFromRequirements(actuatedPodLevelResources)
return currentPodLevelResources != desiredPodLevelResources
}
// SyncPod syncs the running pod into the desired pod by executing following steps:
//
// 1. Compute sandbox and container changes.
// 2. Kill pod sandbox if necessary.
// 3. Kill any containers that should not be running.
// 4. Create sandbox if necessary.
// 5. Create ephemeral containers.
// 6. Create init containers.
// 7. Resize running containers (if InPlacePodVerticalScaling==true)
// 8. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff, restartAllContainers bool) (result kubecontainer.PodSyncResult) {
logger := klog.FromContext(ctx)
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodActions(ctx, pod, podStatus, restartAllContainers)
logger.V(3).Info("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
if podContainerChanges.CreateSandbox {
ref, err := ref.GetReference(legacyscheme.Scheme, pod)
if err != nil {
logger.Error(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
if podContainerChanges.SandboxID != "" {
m.recorder.WithLogger(logger).Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.")
} else {
logger.V(4).Info("SyncPod received new pod, will create a sandbox for it", "pod", klog.KObj(pod))
}
}
// Step 2: Kill the pod if the sandbox has changed.
if podContainerChanges.KillPod {
if podContainerChanges.CreateSandbox {
logger.V(4).Info("Stopping PodSandbox for pod, will start new one", "pod", klog.KObj(pod))
} else {
logger.V(4).Info("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
}
killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
logger.Error(killResult.Error(), "killPodWithSyncResult failed")
return
}
if podContainerChanges.CreateSandbox {
m.purgeInitContainers(ctx, pod, podStatus)
}
} else {
// Step 3: kill any running containers in this pod which are not to keep.
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
logger.V(3).Info("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
result.AddSyncResult(killContainerResult)
if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
logger.Error(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
return
}
}
// Removes the containers if they are marked for removal (for in-place restart)
if utilfeature.DefaultFeatureGate.Enabled(features.RestartAllContainersOnContainerExits) {
for _, containerInfo := range podContainerChanges.ContainersToReset {
cName := containerInfo.container.Name
logger.V(3).Info("Removing container before pod restarts", "containerName", cName, "containerID", containerInfo.containerID, "pod", klog.KObj(pod))
removeContainerResult := kubecontainer.NewSyncResult(kubecontainer.RemoveContainer, cName)
result.AddSyncResult(removeContainerResult)
if containerInfo.kill {
logger.V(3).Info("Killing container before removal", "containerName", cName, "containerID", containerInfo.containerID, "pod", klog.KObj(pod))
// Killing containers without grace period.
var gracePeriod int64 = 0
if err := m.killContainer(ctx, pod, containerInfo.containerID, cName, "killing", reasonRestartAllContainers, &gracePeriod, nil); err != nil {
removeContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
logger.Error(err, "killContainer for pod failed", "containerName", cName, "containerID", containerInfo.containerID, "pod", klog.KObj(pod))
return
}
}
// TODO(yuanwang04): Revisit whether container logs should be persisted.
if err := m.removeContainer(ctx, containerInfo.containerID.ID); err != nil {
removeContainerResult.Fail(kubecontainer.ErrRemoveContainer, err.Error())
logger.Error(err, "removeContainer for pod failed", "containerName", cName, "containerID", containerInfo.containerID, "pod", klog.KObj(pod))
return
}
}
}
}
// Keep terminated init containers fairly aggressively controlled
// This is an optimization because container removals are typically handled
// by container garbage collector.
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
// We pass the value of the PRIMARY podIP and list of podIPs down to
// generatePodSandboxConfig and generateContainerConfig, which in turn
// passes it to various other functions, in order to facilitate functionality
// that requires this value (hosts file and downward API) and avoid races determining
// the pod IP in cases where a container requires restart but the
// podIP isn't in the status manager yet. The list of podIPs is used to
// generate the hosts file.
//
// We default to the IPs in the passed-in pod status, and overwrite them if the
// sandbox needs to be (re)started.
var podIPs []string
if podStatus != nil {
podIPs = podStatus.IPs
}
// Step 4: Create a sandbox for the pod if necessary.
podSandboxID := podContainerChanges.SandboxID
if podContainerChanges.CreateSandbox {
var msg string
var err error
logger.V(4).Info("Creating PodSandbox for pod", "pod", klog.KObj(pod))
metrics.StartedPodsTotal.Inc()
if utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) && pod.Spec.HostUsers != nil && !*pod.Spec.HostUsers {
metrics.StartedUserNamespacedPodsTotal.Inc()
// Failures in user namespace creation could happen at any point in the pod lifecycle,
// but usually will be caught in container creation.
// To avoid specifically handling each error case, loop through the result after the sync finishes
defer func() {
// catch unhandled errors
for _, res := range result.SyncResults {
if res.Error != nil {
metrics.StartedUserNamespacedPodsErrorsTotal.Inc()
return
}
}
// catch handled error
if result.SyncError != nil {
metrics.StartedUserNamespacedPodsErrorsTotal.Inc()
}
}()
}
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
result.AddSyncResult(createSandboxResult)
// ConvertPodSysctlsVariableToDotsSeparator converts sysctl variable
// in the Pod.Spec.SecurityContext.Sysctls slice into a dot as a separator.
// runc uses the dot as the separator to verify whether the sysctl variable
// is correct in a separate namespace, so when using the slash as the sysctl
// variable separator, runc returns an error: "sysctl is not in a separate kernel namespace"
// and the podSandBox cannot be successfully created. Therefore, before calling runc,
// we need to convert the sysctl variable, the dot is used as a separator to separate the kernel namespace.
// When runc supports slash as sysctl separator, this function can no longer be used.
sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext)
// Prepare resources allocated by the Dynammic Resource Allocation feature for the pod
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if err := m.runtimeHelper.PrepareDynamicResources(ctx, pod); err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
return
}
m.recorder.WithLogger(logger).Eventf(ref, v1.EventTypeWarning, events.FailedPrepareDynamicResources, "Failed to prepare dynamic resources: %v", err)
logger.Error(err, "Failed to prepare dynamic resources", "pod", klog.KObj(pod))
return
}
}
podSandboxID, msg, err = m.createPodSandbox(ctx, pod, podContainerChanges.Attempt)
if err != nil {
// createPodSandbox can return an error from CNI, CSI,
// or CRI if the Pod has been deleted while the POD is
// being created. If the pod has been deleted then it's
// not a real error.
//
// SyncPod can still be running when we get here, which
// means the PodWorker has not acked the deletion.
if m.podStateProvider.IsPodTerminationRequested(pod.UID) {
logger.V(4).Info("Pod was deleted and sandbox failed to be created", "pod", klog.KObj(pod), "podUID", pod.UID)
return
}
metrics.StartedPodsErrorsTotal.Inc()
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
logger.Error(err, "CreatePodSandbox for pod failed", "pod", klog.KObj(pod))
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
m.recorder.WithLogger(logger).Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed to create pod sandbox: %v", err)
return
}
logger.V(4).Info("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
if err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
}
m.recorder.WithLogger(logger).Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err)
logger.Error(err, "Failed to get pod sandbox status; Skipping pod", "pod", klog.KObj(pod))
result.Fail(err)
return
}
if resp.GetStatus() == nil {
result.Fail(errors.New("pod sandbox status is nil"))
return
}
// If we ever allow updating a pod from non-host-network to
// host-network, we may use a stale IP.
if !kubecontainer.IsHostNetworkPod(pod) {
// Overwrite the podIPs passed in the pod status, since we just started the pod sandbox.
podIPs = m.determinePodSandboxIPs(ctx, pod.Namespace, pod.Name, resp.GetStatus())
logger.V(4).Info("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
}
}
// the start containers routines depend on pod ip(as in primary pod ip)
// instead of trying to figure out if we have 0 < len(podIPs)
// everytime, we short circuit it here
podIP := ""
if len(podIPs) != 0 {
podIP = podIPs[0]
}
// Get podSandboxConfig for containers to start.
configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
result.AddSyncResult(configPodSandboxResult)
podSandboxConfig, err := m.generatePodSandboxConfig(ctx, pod, podContainerChanges.Attempt)
if err != nil {
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
logger.Error(err, "GeneratePodSandboxConfig for pod failed", "pod", klog.KObj(pod))
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
return
}
imageVolumePullResults, err := m.getImageVolumes(ctx, pod, podSandboxConfig, pullSecrets)
if err != nil {
logger.Error(err, "Get image volumes for pod failed", "pod", klog.KObj(pod))
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, err.Error())
return
}
// Helper containing boilerplate common to starting all types of containers.
// typeName is a description used to describe this type of container in log messages,
// currently: "container", "init container" or "ephemeral container"
// metricLabel is the label used to describe this type of container in monitoring metrics.
// currently: "container", "init_container" or "ephemeral_container"
start := func(ctx context.Context, typeName, metricLabel string, spec *startSpec) error {
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
result.AddSyncResult(startContainerResult)
isInBackOff, msg, err := m.doBackOff(ctx, pod, spec.container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
logger.V(4).Info("Backing Off restarting container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
return err
}
metrics.StartedContainersTotal.WithLabelValues(metricLabel).Inc()
if sc.HasWindowsHostProcessRequest(pod, spec.container) {
metrics.StartedHostProcessContainersTotal.WithLabelValues(metricLabel).Inc()
}
logger.V(4).Info("Creating container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
// We fail late here to populate the "ErrImagePull" and "ImagePullBackOff" correctly to the end user.
imageVolumes, err := m.toKubeContainerImageVolumes(ctx, imageVolumePullResults, spec.container, pod, startContainerResult)
if err != nil {
return err
}
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
msg, err = m.startContainer(ctx, podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs, imageVolumes)
incrementImageVolumeMetrics(err, msg, spec.container, imageVolumes)
if err != nil {
// startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are
// useful to cluster administrators to distinguish "server errors" from "user errors".
metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
if sc.HasWindowsHostProcessRequest(pod, spec.container) {
metrics.StartedHostProcessContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc()
}
startContainerResult.Fail(err, msg)
// known errors that are logged in other places are logged at higher levels here to avoid
// repetitive log spam
switch {
case err == images.ErrImagePullBackOff:
logger.V(3).Info("Container start failed in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod), "containerMessage", msg, "err", err)
default:
utilruntime.HandleError(fmt.Errorf("%v %v start failed in pod %v: %w: %s", typeName, spec.container.Name, format.Pod(pod), err, msg))
}
return err
}
return nil
}
// Step 5: start ephemeral containers
// These are started "prior" to init containers to allow running ephemeral containers even when there
// are errors starting an init container. In practice init containers will start first since ephemeral
// containers cannot be specified on pod creation.
for _, idx := range podContainerChanges.EphemeralContainersToStart {
start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
}
// Step 6: start init containers.
for _, idx := range podContainerChanges.InitContainersToStart {
container := &pod.Spec.InitContainers[idx]
// Start the next init container.
if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
if podutil.IsRestartableInitContainer(container) {
logger.V(4).Info("Failed to start the restartable init container for the pod, skipping", "initContainerName", container.Name, "pod", klog.KObj(pod))
continue
}
logger.V(4).Info("Failed to initialize the pod, as the init container failed to start, aborting", "initContainerName", container.Name, "pod", klog.KObj(pod))
return
}
// Successfully started the container; clear the entry in the failure
logger.V(4).Info("Completed init container for pod", "containerName", container.Name, "pod", klog.KObj(pod))
}
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); resizable {
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources || podContainerChanges.UpdatePodLevelResources {
result.SyncResults = append(result.SyncResults, m.doPodResizeAction(ctx, pod, podStatus, podContainerChanges))
}
}
// Step 8: start containers in podContainerChanges.ContainersToStart.
for _, idx := range podContainerChanges.ContainersToStart {
start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
}
return
}
// incrementImageVolumeMetrics increments the image volume mount metrics
// depending on the provided error and the usage of the image volume mount
// within the container.
func incrementImageVolumeMetrics(err error, msg string, container *v1.Container, imageVolumes kubecontainer.ImageVolumes) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
return
}
metrics.ImageVolumeRequestedTotal.Add(float64(len(imageVolumes)))
for _, m := range container.VolumeMounts {
if _, exists := imageVolumes[m.Name]; exists {
if errors.Is(err, ErrCreateContainer) && strings.HasPrefix(msg, crierror.ErrImageVolumeMountFailed.Error()) {
metrics.ImageVolumeMountedErrorsTotal.Inc()
} else {
metrics.ImageVolumeMountedSucceedTotal.Inc()
}
}
}
}
// imageVolumePulls are the pull results for each image volume name.
type imageVolumePulls = map[string]imageVolumePullResult
// imageVolumePullResult is a pull result for a single image volume.
// If spec is nil, then err and msg should be set.
// If err is nil, then spec should be set.
type imageVolumePullResult struct {
spec *runtimeapi.ImageSpec
err error
msg string
}
func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(ctx context.Context, imageVolumePullResults imageVolumePulls, container *v1.Container, pod *v1.Pod, syncResult *kubecontainer.SyncResult) (kubecontainer.ImageVolumes, error) {
if len(imageVolumePullResults) == 0 {
return nil, nil
}
imageVolumes := kubecontainer.ImageVolumes{}
var (
lastErr error
lastMsg string
)
for _, v := range container.VolumeMounts {
res, ok := imageVolumePullResults[v.Name]
if !ok {
continue
}
if res.err != nil {
s, _ := grpcstatus.FromError(res.err)
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
lastErr = res.err
lastMsg = res.msg
continue
}
imageVolumes[v.Name] = res.spec
}
if lastErr != nil {
syncResult.Fail(lastErr, lastMsg)
return nil, lastErr
}
return imageVolumes, nil
}
func (m *kubeGenericRuntimeManager) getImageVolumes(ctx context.Context, pod *v1.Pod, podSandboxConfig *runtimeapi.PodSandboxConfig, pullSecrets []v1.Secret) (imageVolumePulls, error) {
logger := klog.FromContext(ctx)
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
return nil, nil
}
podRuntimeHandler, err := m.getPodRuntimeHandler(pod)
if err != nil {
logger.Error(err, "Failed to get pod runtime handler", "pod", klog.KObj(pod))
return nil, err
}
res := make(imageVolumePulls)
for _, volume := range pod.Spec.Volumes {
if volume.Image == nil {
continue
}
objectRef, _ := ref.GetReference(legacyscheme.Scheme, pod) // objectRef can be nil, no error check required
ref, msg, err := m.imagePuller.EnsureImageExists(
ctx, objectRef, pod, volume.Image.Reference, pullSecrets, podSandboxConfig, podRuntimeHandler, volume.Image.PullPolicy,
)
if err != nil {
logger.Error(err, "Failed to ensure image", "pod", klog.KObj(pod))
res[volume.Name] = imageVolumePullResult{err: err, msg: msg}
continue
}
logger.V(4).Info("Pulled image", "ref", ref, "pod", klog.KObj(pod))
res[volume.Name] = imageVolumePullResult{spec: &runtimeapi.ImageSpec{
Image: ref,
UserSpecifiedImage: volume.Image.Reference,
RuntimeHandler: podRuntimeHandler,
Annotations: pod.Annotations,
}}
}
return res, nil
}
// If a container is still in backoff, the function will return a brief backoff error and
// a detailed error message.
func (m *kubeGenericRuntimeManager) doBackOff(ctx context.Context, pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
logger := klog.FromContext(ctx)
var cStatus *kubecontainer.Status
for _, c := range podStatus.ContainerStatuses {
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
cStatus = c
break
}
}
if cStatus == nil {
return false, "", nil
}
logger.V(3).Info("Checking backoff for container in pod", "containerName", container.Name, "pod", klog.KObj(pod))
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
ts := cStatus.FinishedAt
// backOff requires a unique key to identify the container.
key := GetBackoffKey(pod, container)
if backOff.IsInBackOffSince(key, ts) {
if containerRef, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
m.recorder.WithLogger(logger).Eventf(containerRef, v1.EventTypeWarning, events.BackOffStartContainer,
fmt.Sprintf("Back-off restarting failed container %s in pod %s", container.Name, format.Pod(pod)))
}
backoff := backOff.Get(key)
err := fmt.Errorf("back-off %s restarting failed container=%s pod=%s", backoff, container.Name, format.Pod(pod))
logger.V(3).Info("Back-off restarting failed container", "err", err.Error())
return true, err.Error(), kubecontainer.NewBackoffError(kubecontainer.ErrCrashLoopBackOff, ts.Add(backoff))
}
backOff.Next(key, ts)
return false, "", nil
}
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
err := m.killPodWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
return err.Error()
}
// killPodWithSyncResult kills a runningPod and returns SyncResult.
// Note: The pod passed in could be *nil* when kubelet restarted.
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
logger := klog.FromContext(ctx)
killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
for _, containerResult := range killContainerResults {
result.AddSyncResult(containerResult)
}
// stop sandbox, the sandbox will be removed in GarbageCollect
killSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, runningPod.ID)
result.AddSyncResult(killSandboxResult)
// Stop all sandboxes belongs to same pod
for _, podSandbox := range runningPod.Sandboxes {
if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
logger.Error(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
}
}
return
}
func (m *kubeGenericRuntimeManager) GeneratePodStatus(event *runtimeapi.ContainerEventResponse) *kubecontainer.PodStatus {
ctx := context.TODO() // This context will be passed as parameter in the future
podUID := kubetypes.UID(event.PodSandboxStatus.Metadata.Uid)
podIPs := m.determinePodSandboxIPs(ctx, event.PodSandboxStatus.Metadata.Namespace, event.PodSandboxStatus.Metadata.Name, event.PodSandboxStatus)
kubeContainerStatuses := []*kubecontainer.Status{}
for _, status := range event.ContainersStatuses {
kubeContainerStatuses = append(kubeContainerStatuses, m.convertToKubeContainerStatus(ctx, podUID, status))
}
sort.Sort(containerStatusByCreated(kubeContainerStatuses))
return &kubecontainer.PodStatus{
ID: kubetypes.UID(event.PodSandboxStatus.Metadata.Uid),
Name: event.PodSandboxStatus.Metadata.Name,
Namespace: event.PodSandboxStatus.Metadata.Namespace,
IPs: podIPs,
SandboxStatuses: []*runtimeapi.PodSandboxStatus{event.PodSandboxStatus},
ContainerStatuses: kubeContainerStatuses,
}
}
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime.
func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
logger := klog.FromContext(ctx)
// Now we retain restart count of container as a container label. Each time a container
// restarts, pod will read the restart count from the registered dead container, increment
// it to get the new restart count, and then add a label with the new restart count on
// the newly started container.
// However, there are some limitations of this method:
// 1. When all dead containers were garbage collected, the container status could
// not get the historical value and would be *inaccurate*. Fortunately, the chance
// is really slim.
// 2. When working with old version containers which have no restart count label,
// we can only assume their restart count is 0.
// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
// these limitations now.
// TODO: move this comment to SyncPod.
podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil)
if err != nil {
return nil, err
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
UID: uid,
},
}
podFullName := format.Pod(pod)
logger.V(4).Info("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod))
sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
containerStatuses := []*kubecontainer.Status{}
activeContainerStatuses := []*kubecontainer.Status{}
timestamp := time.Now()
podIPs := []string{}
var activePodSandboxID string
for idx, podSandboxID := range podSandboxIDs {
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
// Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal.
// The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing.
// Therefore, this method should not either, but instead act as if the previous call failed,
// which means the error should be ignored.
if crierror.IsNotFound(err) {
continue
}
if err != nil {
logger.Error(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
return nil, err
}
if resp.GetStatus() == nil {
return nil, errors.New("pod sandbox status is nil")
}
sandboxStatuses = append(sandboxStatuses, resp.Status)
// Only get pod IP from latest sandbox
if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIPs = m.determinePodSandboxIPs(ctx, namespace, name, resp.Status)
activePodSandboxID = podSandboxID
}
if idx == 0 && utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
if resp.Timestamp == 0 {
// If the Evented PLEG is enabled in the kubelet, but not in the runtime
// then the pod status we get will not have the timestamp set.
// e.g. CI job 'pull-kubernetes-e2e-gce-alpha-features' will runs with
// features gate enabled, which includes Evented PLEG, but uses the
// runtime without Evented PLEG support.
logger.V(4).Info("Runtime does not set pod status timestamp", "pod", klog.KObj(pod))
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
}
return nil, err
}
} else {
// Get the statuses of all containers visible to the pod and
// timestamp from sandboxStatus.
timestamp = time.Unix(0, resp.Timestamp)
for _, cs := range resp.ContainersStatuses {
cStatus := m.convertToKubeContainerStatus(ctx, uid, cs)
containerStatuses = append(containerStatuses, cStatus)
}
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// Get statuses of all containers visible in the pod.
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
}
return nil, err
}
}
m.logReduction.ClearID(podFullName)
return &kubecontainer.PodStatus{
ID: uid,
Name: name,
Namespace: namespace,
IPs: podIPs,
SandboxStatuses: sandboxStatuses,
ContainerStatuses: containerStatuses,
ActiveContainerStatuses: activeContainerStatuses,
TimeStamp: timestamp,
}, nil
}
func (m *kubeGenericRuntimeManager) GetContainerStatus(ctx context.Context, podUID kubetypes.UID, id kubecontainer.ContainerID) (*kubecontainer.Status, error) {
resp, err := m.runtimeService.ContainerStatus(ctx, id.ID, false)
if err != nil {
return nil, fmt.Errorf("runtime container status: %w", err)
}
status := resp.GetStatus()
if status == nil {
return nil, remote.ErrContainerStatusNil
}
return m.convertToKubeContainerStatus(ctx, podUID, status), nil
}
// GarbageCollect removes dead containers using the specified container gc policy.
func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
logger := klog.FromContext(ctx)
// Remove terminated pods from the actuated state.
for uid := range m.actuatedState.GetPodResourceInfoMap() {
if m.podStateProvider.ShouldPodContentBeRemoved(uid) {
if err := m.actuatedState.RemovePod(uid); err != nil {
// No need to act on the error beyond logging it here.
logger.Error(err, "Failed to remove pod from actuated state", "podUID", uid)
}
}
}
return m.containerGC.GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods)
}
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
// with the podCIDR supplied by the kubelet.
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
logger := klog.FromContext(ctx)
// TODO(#35531): do we really want to write a method on this manager for each
// field of the config?
logger.Info("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
return m.runtimeService.UpdateRuntimeConfig(ctx,
&runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: podCIDR,
},
})
}
func (m *kubeGenericRuntimeManager) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error {
return m.runtimeService.CheckpointContainer(ctx, options)
}
func (m *kubeGenericRuntimeManager) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
return m.runtimeService.ListMetricDescriptors(ctx)
}
func (m *kubeGenericRuntimeManager) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
return m.runtimeService.ListPodSandboxMetrics(ctx)
}
func (m *kubeGenericRuntimeManager) UpdateActuatedPodLevelResources(actuatedPod *v1.Pod) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return nil
}
if actuatedPod.Spec.Resources == nil {
return nil
}
if actuatedPod.Spec.Resources.Requests == nil && actuatedPod.Spec.Resources.Limits == nil {
return nil
}
return m.actuatedState.SetPodLevelResources(actuatedPod.UID, actuatedPod.Spec.Resources)
}
// isPodResizeInProgress checks whether the actuated resizable resources differ from
// the resources allocated for:
// * any running containers - Specifically, the following differences are ignored:
// - Non-resizable containers: non-restartable init containers, ephemeral containers
// - Non-resizable resources: only CPU & memory are resizable
// - Non-running containers: they will be sized correctly when (re)started
// * any running pod if InPlacePodLevelResourcesVerticalScaling is enabled.
func (m *kubeGenericRuntimeManager) IsPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
if m.isContainerResourceResizeInProgress(allocatedPod, podStatus) {
return true
}
return m.isPodLevelResourcesResizeInProgress(allocatedPod, podStatus)
}
func (m *kubeGenericRuntimeManager) isContainerResourceResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
return !podutil.VisitContainers(&allocatedPod.Spec, podutil.InitContainers|podutil.Containers,
func(allocatedContainer *v1.Container, containerType podutil.ContainerType) (shouldContinue bool) {
if !isResizableContainer(allocatedContainer, containerType) {
return true
}
containerStatus := podStatus.FindContainerStatusByName(allocatedContainer.Name)
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
// If the container isn't running, it doesn't need to be resized.
return true
}
actuatedResources, _ := m.actuatedState.GetContainerResources(allocatedPod.UID, allocatedContainer.Name)
allocatedResources := allocatedContainer.Resources
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
allocatedResources.Limits = kubeutil.GetLimits(&kubeutil.ResourceOpts{PodResources: allocatedPod.Spec.Resources, ContainerResources: &allocatedContainer.Resources})
}
return allocatedResources.Requests[v1.ResourceCPU].Equal(actuatedResources.Requests[v1.ResourceCPU]) &&
allocatedResources.Limits[v1.ResourceCPU].Equal(actuatedResources.Limits[v1.ResourceCPU]) &&
allocatedResources.Requests[v1.ResourceMemory].Equal(actuatedResources.Requests[v1.ResourceMemory]) &&
allocatedResources.Limits[v1.ResourceMemory].Equal(actuatedResources.Limits[v1.ResourceMemory])
})
}
func (m *kubeGenericRuntimeManager) isPodLevelResourcesResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling) {
return false
}
if allocatedPod.Spec.Resources == nil {
return false
}
actuatedPodResources, _ := m.actuatedState.GetPodLevelResources(allocatedPod.UID)
allocatedPodResources := allocatedPod.Spec.Resources
return !cpuMemoryResourcesEqual(actuatedPodResources, allocatedPodResources)
}
func cpuMemoryResourcesEqual(actuatedPodResources, allocatedPodResources *v1.ResourceRequirements) bool {
if actuatedPodResources == nil && allocatedPodResources == nil {
return true
}
cmpResources := func(actuated, allocated v1.ResourceList) bool {
return actuated[v1.ResourceCPU].Equal(allocated[v1.ResourceCPU]) && actuated[v1.ResourceMemory].Equal(allocated[v1.ResourceMemory])
}
if actuatedPodResources == nil {
return cmpResources(nil, allocatedPodResources.Requests) && cmpResources(nil, allocatedPodResources.Limits)
}
if allocatedPodResources == nil {
return cmpResources(actuatedPodResources.Requests, nil) && cmpResources(actuatedPodResources.Limits, nil)
}
return cmpResources(actuatedPodResources.Requests, allocatedPodResources.Requests) && cmpResources(actuatedPodResources.Limits, allocatedPodResources.Limits)
}
func isResizableContainer(container *v1.Container, containerType podutil.ContainerType) bool {
switch containerType {
case podutil.InitContainers:
return podutil.IsRestartableInitContainer(container)
case podutil.Containers:
return true
default:
return false
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/kuberuntime/kuberuntime_manager.go |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import paddle.fluid as fluid
import paddle
import sys
import numpy
import unittest
import math
import sys
import os
BATCH_SIZE = 64
def inference_program():
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
hidden = fluid.layers.fc(input=hidden, size=200, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
return prediction
def train_program():
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
predict = inference_program()
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label)
return [avg_cost, acc]
def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
trainer = fluid.Trainer(
train_func=train_program, place=place, optimizer=optimizer)
def event_handler(event):
if isinstance(event, fluid.EndEpochEvent):
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
avg_cost, acc = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
print("avg_cost: %s" % avg_cost)
print("acc : %s" % acc)
if acc > 0.2: # Smaller value to increase CI speed
trainer.save_params(params_dirname)
else:
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
event.epoch + 1, avg_cost, acc))
if math.isnan(avg_cost):
sys.exit("got NaN loss, training failed.")
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
trainer.train(
num_epochs=1,
event_handler=event_handler,
reader=train_reader,
feed_order=['img', 'label'])
def infer(use_cuda, inference_program, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path=params_dirname, place=place)
batch_size = 1
tensor_img = numpy.random.uniform(-1.0, 1.0,
[batch_size, 1, 28, 28]).astype("float32")
results = inferencer.infer({'img': tensor_img})
print("infer results: ", results[0])
def main(use_cuda):
params_dirname = "recognize_digits_mlp.inference.model"
# call train() with is_local argument to run distributed train
train(
use_cuda=use_cuda,
train_program=train_program,
params_dirname=params_dirname)
infer(
use_cuda=use_cuda,
inference_program=inference_program,
params_dirname=params_dirname)
if __name__ == '__main__':
# for use_cuda in (False, True):
main(use_cuda=False) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2022 - 2025 R. Thomas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_PDB_COMPILATION_UNIT_H
#define LIEF_PDB_COMPILATION_UNIT_H
#include <memory>
#include <string>
#include <vector>
#include <ostream>
#include "LIEF/iterators.hpp"
#include "LIEF/PDB/Function.hpp"
#include "LIEF/visibility.h"
namespace LIEF {
namespace pdb {
class BuildMetadata;
namespace details {
class CompilationUnit;
class CompilationUnitIt;
}
/// This class represents a CompilationUnit (or Module) in a PDB file
class LIEF_API CompilationUnit {
public:
class LIEF_API Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = std::unique_ptr<CompilationUnit>;
using difference_type = std::ptrdiff_t;
using pointer = CompilationUnit*;
using reference = CompilationUnit&;
using implementation = details::CompilationUnitIt;
class PointerProxy {
// Inspired from LLVM's iterator_facade_base
friend class Iterator;
public:
pointer operator->() const { return R.get(); }
private:
value_type R;
template <typename RefT>
PointerProxy(RefT &&R) : R(std::forward<RefT>(R)) {} // NOLINT(bugprone-forwarding-reference-overload)
};
Iterator(const Iterator&);
Iterator(Iterator&&);
Iterator(std::unique_ptr<details::CompilationUnitIt> impl);
~Iterator();
friend LIEF_API bool operator==(const Iterator& LHS, const Iterator& RHS);
friend LIEF_API bool operator!=(const Iterator& LHS, const Iterator& RHS) {
return !(LHS == RHS);
}
Iterator& operator++();
Iterator& operator--();
Iterator operator--(int) {
Iterator tmp = *static_cast<Iterator*>(this);
--*static_cast<Iterator *>(this);
return tmp;
}
Iterator operator++(int) {
Iterator tmp = *static_cast<Iterator*>(this);
++*static_cast<Iterator *>(this);
return tmp;
}
std::unique_ptr<CompilationUnit> operator*() const;
PointerProxy operator->() const {
return static_cast<const Iterator*>(this)->operator*();
}
private:
std::unique_ptr<details::CompilationUnitIt> impl_;
};
/// Iterator over the sources file (std::string)
using sources_iterator = iterator_range<std::vector<std::string>::const_iterator>;
using function_iterator = iterator_range<Function::Iterator>;
CompilationUnit(std::unique_ptr<details::CompilationUnit> impl);
~CompilationUnit();
/// Name (or path) to the COFF object (`.obj`) associated with this
/// compilation unit (e.g. `e:\obj.amd64fre\minkernel\ntos\hvl\mp\objfre\amd64\hvlp.obj`)
std::string module_name() const;
/// Name of path to the original binary object (COFF, Archive) in which
/// the compilation unit was located before being linked.
/// e.g. `e:\obj.amd64fre\minkernel\ntos\hvl\mp\objfre\amd64\hvl.lib`
std::string object_filename() const;
/// Iterator over the sources files that compose this compilation unit.
/// These files also include **headers** (`.h, .hpp`, ...).
sources_iterator sources() const;
/// Return an iterator over the function defined in this compilation unit.
/// If the PDB does not contain or has an empty DBI stream, it returns
/// an empty iterator.
function_iterator functions() const;
/// Return build metadata such as the version of the compiler or
/// the original source language of this compilation unit
std::unique_ptr<BuildMetadata> build_metadata() const;
std::string to_string() const;
LIEF_API friend
std::ostream& operator<<(std::ostream& os, const CompilationUnit& CU)
{
os << CU.to_string();
return os;
}
private:
std::unique_ptr<details::CompilationUnit> impl_;
};
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/PDB/CompilationUnit.hpp |
from django.core.management import call_command
from django.test import override_settings
from .test_base import MigrationTestBase
class Tests(MigrationTestBase):
"""
Deprecated model fields should still be usable in historic migrations.
"""
@override_settings(
MIGRATION_MODULES={"migrations": "migrations.deprecated_field_migrations"}
)
def test_migrate(self):
# Make sure no tables are created
self.assertTableNotExists("migrations_ipaddressfield")
# Run migration
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_ipaddressfield")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_ipaddressfield") | python | github | https://github.com/django/django | tests/migrations/test_deprecated_fields.py |
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/ilitek,ili9486.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ilitek ILI9486 display panels
maintainers:
- Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
description:
This binding is for display panels using an Ilitek ILI9486 controller in SPI
mode.
allOf:
- $ref: panel/panel-common.yaml#
properties:
compatible:
items:
- enum:
# Waveshare 3.5" 320x480 Color TFT LCD
- waveshare,rpi-lcd-35
# Ozzmaker 3.5" 320x480 Color TFT LCD
- ozzmaker,piscreen
- const: ilitek,ili9486
spi-max-frequency:
maximum: 32000000
dc-gpios:
maxItems: 1
description: Display data/command selection (D/CX)
backlight: true
reg: true
reset-gpios: true
rotation: true
required:
- compatible
- reg
- dc-gpios
- reset-gpios
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
display@0{
compatible = "waveshare,rpi-lcd-35", "ilitek,ili9486";
reg = <0>;
spi-max-frequency = <32000000>;
dc-gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
rotation = <180>;
backlight = <&backlight>;
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/display/ilitek,ili9486.yaml |
"""
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
import six
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = escape_ajax(safe_url_string(url))
elif isinstance(url, six.text_type):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._set_url(url.encode(self.encoding))
else:
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
if ':' not in self._url:
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if isinstance(body, str):
self._body = body
elif isinstance(body, six.text_type):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self.encoding)
elif body is None:
self._body = ''
else:
raise TypeError("Request body must either str or unicode. Got: '%s'" % type(body).__name__)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta',
'encoding', 'priority', 'dont_filter', 'callback', 'errback']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
import json
import logging
from django.http import HttpResponse
from django.views.generic import View
from codex.baseerror import BaseError, InputError
__author__ = "Epsirom"
class BaseView(View):
logger = logging.getLogger('View')
def dispatch(self, request, *args, **kwargs):
self.request = request
return self.do_dispatch(*args, **kwargs)
def do_dispatch(self, *args, **kwargs):
raise NotImplementedError('You should implement do_dispatch() in sub-class of BaseView')
def http_method_not_allowed(self, *args, **kwargs):
return super(BaseView, self).http_method_not_allowed(self.request, *args, **kwargs)
class APIView(BaseView):
logger = logging.getLogger('API')
def do_dispatch(self, *args, **kwargs):
self.input = self.query or self.body
handler = getattr(self, self.request.method.lower(), None)
if not callable(handler):
return self.http_method_not_allowed()
return self.api_wrapper(handler, *args, **kwargs)
@property
def body(self):
return json.loads(self.request.body.decode() or '{}')
@property
def query(self):
d = getattr(self.request, self.request.method, None)
if d:
d = d.dict()
else:
d = dict()
d.update(self.request.FILES)
return d
def api_wrapper(self, func, *args, **kwargs):
code = 0
msg = ''
result = None
try:
result = func(*args, **kwargs)
except BaseError as e:
code = e.code
msg = e.msg
self.logger.exception('Error occurred when requesting %s: %s', self.request.path, e)
except Exception as e:
code = -1
msg = str(e)
self.logger.exception('Error occurred when requesting %s: %s', self.request.path, e)
try:
response = json.dumps({
'code': code,
'msg': msg,
'data': result,
})
except:
self.logger.exception('JSON Serializing failed in requesting %s', self.request.path)
code = -1
msg = 'Internal Error'
response = json.dumps({
'code': code,
'msg': msg,
'data': None,
})
return HttpResponse(response, content_type='application/json')
def check_input(self, *keys):
for k in keys:
if k not in self.input:
raise InputError('Field "%s" required' % (k, )) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export { buildLabelMatchers, labelMatchersToString } from './matcher';
export { Parser } from './parser';
export { walkBackward, containsAtLeastOneChild, containsChild } from './path-finder'; | typescript | github | https://github.com/prometheus/prometheus | web/ui/module/codemirror-promql/src/parser/index.ts |
# $HeadURL: $
""" Statistics
Module containing little helpers that extract information from the RSS databases
providing information for comparisons and plots.
"""
import datetime
# DIRAC
from DIRAC import S_ERROR, S_OK
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
#from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import getValidElements, getValidStatus
__RCSID__ = '$Id: $'
class Statistics( object ):
"""
Statistics class that provides helpers to extract information from the database
more easily.
"""
def __init__( self ):
"""
Constructor
"""
self.rsClient = ResourceStatusClient()
#self.rmClient = ResourceManagementClient()
def getElementHistory( self, element, elementName, statusType,
oldAs = None, newAs = None ):
"""
Returns the succession of statuses and the dates since they are effective. The
values are comprised in the time interval [ oldAs, newAs ]. If not specified,
all values up to the present are returned.
It returns a list of tuples, of which the first element is the Status and the
second one the time-stamp since it is effective. Note that the time-stamps will
not necessarily match the time window.
:Parameters:
**element** - `str`
element family ( either Site, Resource or Node )
**elementName** - `str`
element name
**statusType** - `str`
status type of the element <elementName> (e.g. 'all', 'ReadAccess',... )
**oldAs** - [ None, `datetime` ]
datetime with the start point for the time window. If not specified, it
is used the oldest time in the history.
**newAs** - [ None, `datetime` ]
datetime with the end point for the time window. If not specified, it
is used datetime.utcnow.
:return: S_OK( [ (StatusA, datetimeA),(StatusB,datetimeB) ] ) | S_ERROR
"""
# Checks we are not passing a silly element ( we only accept Site, Resource and Node )
if not element in getValidElements():
return S_ERROR( '"%s" is not a valid element' % element )
# FIXME: read below
# Gets all elements in history. If the history is long, this query is going to
# be rather heavy...
result = self.rsClient.selectStatusElement( element, 'History', name = elementName,
statusType = statusType,
meta = { 'columns' : [ 'Status', 'DateEffective' ] } )
if not result[ 'OK' ]:
return result
result = result[ 'Value' ]
if not result:
return S_OK( [] )
# To avoid making exceptions in the for-loop, we feed history with the first
# item in the results
history = [ result[ 0 ] ]
# Sets defaults.
# OldAs is as old as datetime.min if not defined.
#oldAs = ( 1 and oldAs ) or history[ 0 ][ 1 ]
oldAs = ( 1 and oldAs ) or datetime.datetime.min
# NewAs is as new as as set or datetime.now
newAs = ( 1 and newAs ) or datetime.datetime.utcnow()
# Sanity check: no funny time windows
if oldAs > newAs:
return S_ERROR( "oldAs (%s) > newAs (%s)" % ( oldAs, newAs ) )
# This avoids that the window finishes before having the first point in the
# history.
if history[ 0 ][ 1 ] > newAs:
return S_OK( [] )
# Iterate starting from the second element in the list. The elements in the
# list are SORTED. Otherwise, the break statement would be a mess. And same
# applies for the elif
for historyElement in result[1:]:
# If the point is newer than the superior limit of the window, we are done.
if historyElement[ 1 ] > newAs:
break
# If the point is older than the window lower limit, we buffer it. We just
# want the closest point to the lower limit.
elif historyElement[ 1 ] <= oldAs:
history = [ historyElement ]
# Otherwise, we add it to the history
else:
history.append( historyElement )
return S_OK( history )
def getElementStatusAt( self, element, elementName, statusType, statusTime ):
"""
Returns the status of the <element><elementName><statusType> at the given
time <statusTime>. If not know, will return an empty list. If known, will
return a tuple with two elements: Status and time since it is effective.
:Parameters:
**element** - `str`
element family ( either Site, Resource or Node )
**elementName** - `str`
element name
**statusType** - `str`
status type of the element <elementName> (e.g. 'all', 'ReadAccess',... )
**statusTime** - `datetime`
datetime when we want to know the status of <element><elementName><statusType>
:return: S_OK( (StatusA, datetimeA) ) | S_ERROR
"""
result = self.getElementHistory( element, elementName, statusType, statusTime, statusTime )
if not result[ 'OK' ]:
return result
result = result[ 'Value' ]
if result:
result = list( result[ 0 ] )
return S_OK( result )
def getElementStatusTotalTimes( self, element, elementName, statusType,
oldAs = None, newAs = None ):
"""
Returns a dictionary with all the possible statuses as keys and as values the
number of seconds that <element><elementName><statusType> hold it for a time
window between [ oldAs, newAs ]. If oldAs is not defined, it is considered
as datetime.min. If newAs is not defined, it is considered datetime.utcnow.
:Parameters:
**element** - `str`
element family ( either Site, Resource or Node )
**elementName** - `str`
element name
**statusType** - `str`
status type of the element <elementName> (e.g. 'all', 'ReadAccess',... )
**oldAs** - [ None, `datetime` ]
datetime with the start point for the time window. If not specified, it
is used the oldest time in the history.
**newAs** - [ None, `datetime` ]
datetime with the end point for the time window. If not specified, it
is used datetime.utcnow.
:return: S_OK( [ { StatusA : secondsA },{ StatusB : secondsB } ] ) | S_ERROR
"""
# Gets all history withing the window
result = self.getElementHistory( element, elementName, statusType, oldAs, newAs )
if not result[ 'OK' ]:
return result
result = result[ 'Value' ]
# Dictionary to be returned
statusCounter = dict.fromkeys( getValidStatus()[ 'Value' ], 0 )
# If history is empty, return empty dictionary
if not result:
return S_OK( statusCounter )
# Set defaults
oldAs = ( 1 and oldAs ) or datetime.datetime.min
newAs = ( 1 and newAs ) or datetime.datetime.utcnow()
# If users are not behaving well, we force newAs to not be in the future.
newAs = min( newAs, datetime.datetime.utcnow() )
# Iterate over the results in tuples.
for statusTuple in zip( result, result[ 1: ] ):
# Make sure the time taken as base is not older than the lower limit of
# the window. In principle, this should be only checked on the first element,
# but it is harmless anyway and cleaner than the if-else.
startingPoint = max( statusTuple[ 0 ][ 1 ], oldAs )
# Get number of seconds and add them
statusCounter[ statusTuple[0][0] ] += timedelta_to_seconds( statusTuple[1][1] - startingPoint )
# The method selected to iterate over the results does not take into account the
# last one. Gets the time using as lower limit the window lower limit. This applies
# when we have only one element in the list for example.
statusCounter[ result[ -1 ][ 0 ] ] += timedelta_to_seconds( newAs - max( result[ -1 ][ 1 ], oldAs ) )
return S_OK( statusCounter )
def timedelta_to_seconds( duration ):
"""
As Python does not provide a function to transform a timedelta into seconds,
here we go.
:Parameters:
**duration** - `datetime.timedelta`
timedelta to be transformed into seconds
:return: int ( seconds )
"""
days, seconds = duration.days, duration.seconds
# We use integer division, not float division !
hours = seconds // 3600
minutes = ( seconds % 3600 ) // 60
seconds = ( seconds % 60 )
return ((( days * 24 ) + hours ) * 60 + minutes ) * 60 + seconds
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | unknown | codeparrot/codeparrot-clean | ||
/*[clinic input]
preserve
[clinic start generated code]*/
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
# include "pycore_gc.h" // PyGC_Head
# include "pycore_runtime.h" // _Py_ID()
#endif
#include "pycore_critical_section.h"// Py_BEGIN_CRITICAL_SECTION()
#include "pycore_modsupport.h" // _PyArg_NoKeywords()
PyDoc_STRVAR(simplequeue_new__doc__,
"SimpleQueue()\n"
"--\n"
"\n"
"Simple, unbounded, reentrant FIFO queue.");
static PyObject *
simplequeue_new_impl(PyTypeObject *type);
static PyObject *
simplequeue_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{
PyObject *return_value = NULL;
PyTypeObject *base_tp = simplequeue_get_state_by_type(type)->SimpleQueueType;
if ((type == base_tp || type->tp_init == base_tp->tp_init) &&
!_PyArg_NoPositional("SimpleQueue", args)) {
goto exit;
}
if ((type == base_tp || type->tp_init == base_tp->tp_init) &&
!_PyArg_NoKeywords("SimpleQueue", kwargs)) {
goto exit;
}
return_value = simplequeue_new_impl(type);
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_put__doc__,
"put($self, /, item, block=True, timeout=None)\n"
"--\n"
"\n"
"Put the item on the queue.\n"
"\n"
"The optional \'block\' and \'timeout\' arguments are ignored, as this method\n"
"never blocks. They are provided for compatibility with the Queue class.");
#define _QUEUE_SIMPLEQUEUE_PUT_METHODDEF \
{"put", _PyCFunction_CAST(_queue_SimpleQueue_put), METH_FASTCALL|METH_KEYWORDS, _queue_SimpleQueue_put__doc__},
static PyObject *
_queue_SimpleQueue_put_impl(simplequeueobject *self, PyObject *item,
int block, PyObject *timeout);
static PyObject *
_queue_SimpleQueue_put(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 3
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(item), &_Py_ID(block), &_Py_ID(timeout), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"item", "block", "timeout", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "put",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[3];
Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *item;
int block = 1;
PyObject *timeout = Py_None;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 3, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
item = args[0];
if (!noptargs) {
goto skip_optional_pos;
}
if (args[1]) {
block = PyObject_IsTrue(args[1]);
if (block < 0) {
goto exit;
}
if (!--noptargs) {
goto skip_optional_pos;
}
}
timeout = args[2];
skip_optional_pos:
Py_BEGIN_CRITICAL_SECTION(self);
return_value = _queue_SimpleQueue_put_impl((simplequeueobject *)self, item, block, timeout);
Py_END_CRITICAL_SECTION();
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_put_nowait__doc__,
"put_nowait($self, /, item)\n"
"--\n"
"\n"
"Put an item into the queue without blocking.\n"
"\n"
"This is exactly equivalent to `put(item)` and is only provided\n"
"for compatibility with the Queue class.");
#define _QUEUE_SIMPLEQUEUE_PUT_NOWAIT_METHODDEF \
{"put_nowait", _PyCFunction_CAST(_queue_SimpleQueue_put_nowait), METH_FASTCALL|METH_KEYWORDS, _queue_SimpleQueue_put_nowait__doc__},
static PyObject *
_queue_SimpleQueue_put_nowait_impl(simplequeueobject *self, PyObject *item);
static PyObject *
_queue_SimpleQueue_put_nowait(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 1
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(item), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"item", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "put_nowait",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[1];
PyObject *item;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
item = args[0];
Py_BEGIN_CRITICAL_SECTION(self);
return_value = _queue_SimpleQueue_put_nowait_impl((simplequeueobject *)self, item);
Py_END_CRITICAL_SECTION();
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_get__doc__,
"get($self, /, block=True, timeout=None)\n"
"--\n"
"\n"
"Remove and return an item from the queue.\n"
"\n"
"If optional args \'block\' is true and \'timeout\' is None (the default),\n"
"block if necessary until an item is available. If \'timeout\' is\n"
"a non-negative number, it blocks at most \'timeout\' seconds and raises\n"
"the Empty exception if no item was available within that time.\n"
"Otherwise (\'block\' is false), return an item if one is immediately\n"
"available, else raise the Empty exception (\'timeout\' is ignored\n"
"in that case).");
#define _QUEUE_SIMPLEQUEUE_GET_METHODDEF \
{"get", _PyCFunction_CAST(_queue_SimpleQueue_get), METH_METHOD|METH_FASTCALL|METH_KEYWORDS, _queue_SimpleQueue_get__doc__},
static PyObject *
_queue_SimpleQueue_get_impl(simplequeueobject *self, PyTypeObject *cls,
int block, PyObject *timeout_obj);
static PyObject *
_queue_SimpleQueue_get(PyObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
#if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
#define NUM_KEYWORDS 2
static struct {
PyGC_Head _this_is_not_used;
PyObject_VAR_HEAD
Py_hash_t ob_hash;
PyObject *ob_item[NUM_KEYWORDS];
} _kwtuple = {
.ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS)
.ob_hash = -1,
.ob_item = { &_Py_ID(block), &_Py_ID(timeout), },
};
#undef NUM_KEYWORDS
#define KWTUPLE (&_kwtuple.ob_base.ob_base)
#else // !Py_BUILD_CORE
# define KWTUPLE NULL
#endif // !Py_BUILD_CORE
static const char * const _keywords[] = {"block", "timeout", NULL};
static _PyArg_Parser _parser = {
.keywords = _keywords,
.fname = "get",
.kwtuple = KWTUPLE,
};
#undef KWTUPLE
PyObject *argsbuf[2];
Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int block = 1;
PyObject *timeout_obj = Py_None;
args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser,
/*minpos*/ 0, /*maxpos*/ 2, /*minkw*/ 0, /*varpos*/ 0, argsbuf);
if (!args) {
goto exit;
}
if (!noptargs) {
goto skip_optional_pos;
}
if (args[0]) {
block = PyObject_IsTrue(args[0]);
if (block < 0) {
goto exit;
}
if (!--noptargs) {
goto skip_optional_pos;
}
}
timeout_obj = args[1];
skip_optional_pos:
Py_BEGIN_CRITICAL_SECTION(self);
return_value = _queue_SimpleQueue_get_impl((simplequeueobject *)self, cls, block, timeout_obj);
Py_END_CRITICAL_SECTION();
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_get_nowait__doc__,
"get_nowait($self, /)\n"
"--\n"
"\n"
"Remove and return an item from the queue without blocking.\n"
"\n"
"Only get an item if one is immediately available. Otherwise\n"
"raise the Empty exception.");
#define _QUEUE_SIMPLEQUEUE_GET_NOWAIT_METHODDEF \
{"get_nowait", _PyCFunction_CAST(_queue_SimpleQueue_get_nowait), METH_METHOD|METH_FASTCALL|METH_KEYWORDS, _queue_SimpleQueue_get_nowait__doc__},
static PyObject *
_queue_SimpleQueue_get_nowait_impl(simplequeueobject *self,
PyTypeObject *cls);
static PyObject *
_queue_SimpleQueue_get_nowait(PyObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
if (nargs || (kwnames && PyTuple_GET_SIZE(kwnames))) {
PyErr_SetString(PyExc_TypeError, "get_nowait() takes no arguments");
goto exit;
}
Py_BEGIN_CRITICAL_SECTION(self);
return_value = _queue_SimpleQueue_get_nowait_impl((simplequeueobject *)self, cls);
Py_END_CRITICAL_SECTION();
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_empty__doc__,
"empty($self, /)\n"
"--\n"
"\n"
"Return True if the queue is empty, False otherwise (not reliable!).");
#define _QUEUE_SIMPLEQUEUE_EMPTY_METHODDEF \
{"empty", (PyCFunction)_queue_SimpleQueue_empty, METH_NOARGS, _queue_SimpleQueue_empty__doc__},
static int
_queue_SimpleQueue_empty_impl(simplequeueobject *self);
static PyObject *
_queue_SimpleQueue_empty(PyObject *self, PyObject *Py_UNUSED(ignored))
{
PyObject *return_value = NULL;
int _return_value;
Py_BEGIN_CRITICAL_SECTION(self);
_return_value = _queue_SimpleQueue_empty_impl((simplequeueobject *)self);
Py_END_CRITICAL_SECTION();
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyBool_FromLong((long)_return_value);
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue_qsize__doc__,
"qsize($self, /)\n"
"--\n"
"\n"
"Return the approximate size of the queue (not reliable!).");
#define _QUEUE_SIMPLEQUEUE_QSIZE_METHODDEF \
{"qsize", (PyCFunction)_queue_SimpleQueue_qsize, METH_NOARGS, _queue_SimpleQueue_qsize__doc__},
static Py_ssize_t
_queue_SimpleQueue_qsize_impl(simplequeueobject *self);
static PyObject *
_queue_SimpleQueue_qsize(PyObject *self, PyObject *Py_UNUSED(ignored))
{
PyObject *return_value = NULL;
Py_ssize_t _return_value;
Py_BEGIN_CRITICAL_SECTION(self);
_return_value = _queue_SimpleQueue_qsize_impl((simplequeueobject *)self);
Py_END_CRITICAL_SECTION();
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyLong_FromSsize_t(_return_value);
exit:
return return_value;
}
PyDoc_STRVAR(_queue_SimpleQueue___sizeof____doc__,
"__sizeof__($self, /)\n"
"--\n"
"\n"
"Returns size in memory, in bytes.");
#define _QUEUE_SIMPLEQUEUE___SIZEOF___METHODDEF \
{"__sizeof__", (PyCFunction)_queue_SimpleQueue___sizeof__, METH_NOARGS, _queue_SimpleQueue___sizeof____doc__},
static Py_ssize_t
_queue_SimpleQueue___sizeof___impl(simplequeueobject *self);
static PyObject *
_queue_SimpleQueue___sizeof__(PyObject *self, PyObject *Py_UNUSED(ignored))
{
PyObject *return_value = NULL;
Py_ssize_t _return_value;
Py_BEGIN_CRITICAL_SECTION(self);
_return_value = _queue_SimpleQueue___sizeof___impl((simplequeueobject *)self);
Py_END_CRITICAL_SECTION();
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyLong_FromSsize_t(_return_value);
exit:
return return_value;
}
/*[clinic end generated code: output=4af5d1b1ea31ac7d input=a9049054013a1b77]*/ | c | github | https://github.com/python/cpython | Modules/clinic/_queuemodule.c.h |
"""Generates common contexts"""
import logging
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from util.request import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert isinstance(course_id, CourseKey)
return {
'course_id': course_id.to_deprecated_string(),
'org_id': course_id.org,
} | unknown | codeparrot/codeparrot-clean | ||
// This code has been modified from its original form by The Cockroach Authors.
// All modifications are Copyright 2024 The Cockroach Authors.
//
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"fmt"
"math"
"math/rand"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/raft/raftlogger"
pb "github.com/cockroachdb/cockroach/pkg/raft/raftpb"
"github.com/cockroachdb/cockroach/pkg/raft/raftstoreliveness"
"github.com/cockroachdb/cockroach/pkg/raft/tracker"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// nextEnts returns the appliable entries and updates the applied index.
func nextEnts(r *raft, s *MemoryStorage) (ents []pb.Entry) {
// Append unstable entries.
s.Append(r.raftLog.nextUnstableEnts())
r.raftLog.stableTo(r.raftLog.unstable.mark())
// Run post-append steps.
r.advanceMessagesAfterAppend()
// Return committed entries.
ents = r.raftLog.nextCommittedEnts(true)
r.raftLog.appliedTo(r.raftLog.committed)
return ents
}
func mustAppendEntry(r *raft, ents ...pb.Entry) {
if !r.appendEntry(ents...) {
panic("entry unexpectedly dropped")
}
}
type stateMachine interface {
Step(m pb.Message) error
readMessages() []pb.Message
advanceMessagesAfterAppend()
}
func (r *raft) readMessages() []pb.Message {
r.advanceMessagesAfterAppend()
msgs := r.msgs
r.msgs = nil
return msgs
}
func (r *raft) advanceMessagesAfterAppend() {
for {
msgs := r.takeMessagesAfterAppend()
if len(msgs) == 0 {
break
}
r.stepOrSend(msgs)
}
}
func (r *raft) takeMessagesAfterAppend() []pb.Message {
msgs := r.msgsAfterAppend
r.msgsAfterAppend = nil
return msgs
}
func (r *raft) stepOrSend(msgs []pb.Message) error {
for _, m := range msgs {
if m.To == r.id {
if err := r.Step(m); err != nil {
return err
}
} else {
r.msgs = append(r.msgs, m)
}
}
return nil
}
func TestProgressLeader(t *testing.T) {
s := newTestMemoryStorage(withPeers(1, 2))
r := newTestRaft(1, 5, 1, s)
r.becomeCandidate()
r.becomeLeader()
r.trk.Progress(2).BecomeReplicate()
// Send proposals to r1. The first 5 entries should be queued in the unstable log.
propMsg := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("foo")}}}
for i := 0; i < 5; i++ {
require.NoError(t, r.Step(propMsg), "#%d", i)
}
require.Zero(t, r.trk.Progress(1).Match)
ents := r.raftLog.nextUnstableEnts()
require.Len(t, ents, 6)
require.Len(t, ents[0].Data, 0)
require.Equal(t, "foo", string(ents[5].Data))
r.advanceMessagesAfterAppend()
require.Equal(t, uint64(6), r.trk.Progress(1).Match)
require.Equal(t, uint64(7), r.trk.Progress(1).Next)
}
// TestProgressResumeByHeartbeatResp ensures raft.heartbeat reset progress.paused by heartbeat response.
func TestProgressResumeByHeartbeatResp(t *testing.T) {
r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
r.trk.Progress(2).MsgAppProbesPaused = true
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
r.trk.Progress(2).BecomeReplicate()
assert.False(t, r.trk.Progress(2).MsgAppProbesPaused)
r.trk.Progress(2).MsgAppProbesPaused = true
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
}
func TestProgressPaused(t *testing.T) {
r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
ms := r.readMessages()
assert.Len(t, ms, 1)
}
func TestProgressFlowControl(t *testing.T) {
cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1, 2)))
cfg.MaxInflightMsgs = 3
cfg.MaxSizePerMsg = 2048
cfg.MaxInflightBytes = 9000 // A little over MaxInflightMsgs * MaxSizePerMsg.
r := newRaft(cfg)
r.becomeCandidate()
r.becomeLeader()
// Throw away all the messages relating to the initial election.
r.readMessages()
// While node 2 is in probe state, propose a bunch of entries.
r.trk.Progress(2).BecomeProbe()
blob := []byte(strings.Repeat("a", 1000))
large := []byte(strings.Repeat("b", 5000))
for i := 0; i < 22; i++ {
blob := blob
if i >= 10 && i < 16 { // Temporarily send large messages.
blob = large
}
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: blob}}})
}
ms := r.readMessages()
// First append has two entries: the empty entry to confirm the
// election, and the first proposal (only one proposal gets sent
// because we're in probe state).
require.Len(t, ms, 1)
require.Equal(t, pb.MsgApp, ms[0].Type)
require.Len(t, ms[0].Entries, 2)
require.Empty(t, ms[0].Entries[0].Data)
require.Len(t, ms[0].Entries[1].Data, 1000)
ackAndVerify := func(index uint64, expEntries ...int) uint64 {
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: index})
ms := r.readMessages()
require.Equal(t, len(expEntries), len(ms))
for i, m := range ms {
assert.Equal(t, pb.MsgApp, m.Type, "#%d", i)
assert.Len(t, m.Entries, expEntries[i], "#%d", i)
}
last := ms[len(ms)-1].Entries
if len(last) == 0 {
return index
}
return last[len(last)-1].Index
}
// When this append is acked, we change to replicate state and can
// send multiple messages at once.
index := ackAndVerify(ms[0].Entries[1].Index, 2, 2, 2)
// Ack all three of those messages together and get another 3 messages. The
// third message contains a single large entry, in contrast to 2 before.
index = ackAndVerify(index, 2, 1, 1)
// All subsequent messages contain one large entry, and we cap at 2 messages
// because it overflows MaxInflightBytes.
index = ackAndVerify(index, 1, 1)
index = ackAndVerify(index, 1, 1)
// Start getting small messages again.
index = ackAndVerify(index, 1, 2, 2)
ackAndVerify(index, 2)
}
func TestUncommittedEntryLimit(t *testing.T) {
// Use a relatively large number of entries here to prevent regression of a
// bug which computed the size before it was fixed. This test would fail
// with the bug, either because we'd get dropped proposals earlier than we
// expect them, or because the final tally ends up nonzero. (At the time of
// writing, the former).
const maxEntries = 1024
testEntry := pb.Entry{Data: []byte("testdata")}
maxEntrySize := maxEntries * payloadSize(testEntry)
require.Zero(t, payloadSize(pb.Entry{Data: nil}))
cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
cfg.MaxUncommittedEntriesSize = uint64(maxEntrySize)
cfg.MaxInflightMsgs = 2 * 1024 // avoid interference
r := newRaft(cfg)
r.becomeCandidate()
r.becomeLeader()
require.Zero(t, r.uncommittedSize)
// Set the two followers to the replicate state. Commit to tail of log.
const numFollowers = 2
r.trk.Progress(2).BecomeReplicate()
r.trk.Progress(3).BecomeReplicate()
r.uncommittedSize = 0
// Send proposals to r1. The first 5 entries should be appended to the log.
propMsg := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{testEntry}}
propEnts := make([]pb.Entry, maxEntries)
for i := 0; i < maxEntries; i++ {
require.NoError(t, r.Step(propMsg), "#%d", i)
propEnts[i] = testEntry
}
// Send one more proposal to r1. It should be rejected.
require.Equal(t, ErrProposalDropped, r.Step(propMsg))
// Read messages and reduce the uncommitted size as if we had committed
// these entries.
ms := r.readMessages()
require.Len(t, ms, maxEntries*numFollowers)
r.reduceUncommittedSize(payloadsSize(propEnts))
require.Zero(t, r.uncommittedSize)
// Send a single large proposal to r1. Should be accepted even though it
// pushes us above the limit because we were beneath it before the proposal.
propEnts = make([]pb.Entry, 2*maxEntries)
for i := range propEnts {
propEnts[i] = testEntry
}
propMsgLarge := pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: propEnts}
require.NoError(t, r.Step(propMsgLarge))
// Send one more proposal to r1. It should be rejected, again.
require.Equal(t, ErrProposalDropped, r.Step(propMsg))
// But we can always append an entry with no Data. This is used both for the
// leader's first empty entry and for auto-transitioning out of joint config
// states.
require.NoError(t, r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}}))
// Read messages and reduce the uncommitted size as if we had committed
// these entries.
ms = r.readMessages()
require.Len(t, ms, 2*numFollowers)
r.reduceUncommittedSize(payloadsSize(propEnts))
require.Zero(t, r.uncommittedSize)
}
func TestLeaderElection(t *testing.T) {
testLeaderElection(t, false)
}
func TestLeaderElectionPreVote(t *testing.T) {
testLeaderElection(t, true)
}
func testLeaderElection(t *testing.T, preVote bool) {
var cfg func(*Config)
candState := pb.StateCandidate
candTerm := uint64(1)
if preVote {
cfg = preVoteConfig
// In pre-vote mode, an election that fails to complete
// leaves the node in pre-candidate state without advancing
// the term.
candState = pb.StatePreCandidate
candTerm = 0
}
tests := []struct {
*network
state pb.StateType
expTerm uint64
}{
{newNetworkWithConfig(cfg, nil, nil, nil), pb.StateLeader, 1},
{newNetworkWithConfig(cfg, nil, nil, nopStepper), pb.StateLeader, 1},
{newNetworkWithConfig(cfg, nil, nopStepper, nopStepper), candState, candTerm},
{newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil), candState, candTerm},
{newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil, nil), pb.StateLeader, 1},
// three logs further along than 0, but in the same term so rejections
// are returned instead of the votes being ignored.
{newNetworkWithConfig(cfg,
nil, entsWithConfig(cfg, 1), entsWithConfig(cfg, 1), entsWithConfig(cfg, 1, 1), nil),
pb.StateFollower, 1},
}
for i, tt := range tests {
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.network.peers[1].(*raft)
assert.Equal(t, tt.state, sm.state, "#%d", i)
assert.Equal(t, tt.expTerm, sm.Term, "#%d", i)
}
}
// TestLearnerElectionTimeout verfies that the leader should not start election even
// when times out.
func TestLearnerElectionTimeout(t *testing.T) {
n1 := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
// n2 is learner. Learner should not start election even when times out.
setRandomizedElectionTimeout(n2, n2.electionTimeout)
for i := int64(0); i < n2.electionTimeout; i++ {
n2.tick()
}
assert.Equal(t, pb.StateFollower, n2.state)
}
// TestLearnerPromotion verifies that the learner should not election until
// it is promoted to a normal peer.
func TestLearnerPromotion(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLearnerPromotion(t, storeLivenessEnabled)
})
}
func testLearnerPromotion(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
} else {
n1 = newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2)
assert.NotEqual(t, pb.StateLeader, n1.state)
// n1 should become leader
setRandomizedElectionTimeout(n1, n1.electionTimeout)
for i := int64(0); i < n1.electionTimeout; i++ {
n1.tick()
}
n1.advanceMessagesAfterAppend()
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, pb.StateFollower, n2.state)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
n1.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
n2.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
assert.False(t, n2.isLearner)
if storeLivenessEnabled {
// We need to withdraw support of 1 to allow 2 to campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
// n2 start election, should become leader
setRandomizedElectionTimeout(n2, n2.electionTimeout)
for i := int64(0); i < n2.electionTimeout; i++ {
n2.tick()
}
n2.advanceMessagesAfterAppend()
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})
assert.Equal(t, pb.StateFollower, n1.state)
assert.Equal(t, pb.StateLeader, n2.state)
}
// TestLearnerCanVote checks that a learner can vote when it receives a valid Vote request.
// See (*raft).Step for why this is necessary and correct behavior.
func TestLearnerCanVote(t *testing.T) {
n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
n2.becomeFollower(1, None)
n2.Step(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgVote, LogTerm: 11, Index: 11})
msgs := n2.readMessages()
require.Len(t, msgs, 1)
require.Equal(t, msgs[0].Type, pb.MsgVoteResp)
require.False(t, msgs[0].Reject, "expected learner to not reject vote")
}
func TestLeaderCycle(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderCycle(t, false, storeLivenessEnabled)
})
}
func TestLeaderCyclePreVote(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderCycle(t, true, storeLivenessEnabled)
})
}
// testLeaderCycle verifies that each node in a cluster can campaign
// and be elected in turn. This ensures that elections (including
// pre-vote) work when not starting from a clean slate (as they do in
// TestLeaderElection)
func testLeaderCycle(t *testing.T, preVote bool, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if preVote {
cfg = preVoteConfigWithFortificationDisabled
}
if preVote && storeLivenessEnabled {
cfg = preVoteConfig
} else if preVote && !storeLivenessEnabled {
cfg = preVoteConfigWithFortificationDisabled
} else if !preVote && storeLivenessEnabled {
// The default configuration satisfies this condition.
} else if !preVote && !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
n := newNetworkWithConfig(cfg, nil, nil, nil)
n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
curLeader := pb.PeerID(1)
for campaignerID := pb.PeerID(1); campaignerID <= 3; campaignerID++ {
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
n.livenessFabric.WithdrawSupportForPeerFromAllPeers(curLeader)
}
n.send(pb.Message{From: campaignerID, To: campaignerID, Type: pb.MsgHup})
if storeLivenessEnabled {
// Restore the support state.
n.livenessFabric.GrantSupportForPeerFromAllPeers(curLeader)
}
// Update the current leader to prep for the next iteration.
curLeader = campaignerID
for _, peer := range n.peers {
sm := peer.(*raft)
if sm.id == campaignerID {
assert.Equal(t, pb.StateLeader, sm.state, "preVote=%v: campaigning node %d", preVote, sm.id)
} else {
assert.Equal(t, pb.StateFollower, sm.state, "preVote=%v: campaigning node %d, current node %d", preVote, campaignerID, sm.id)
}
}
}
}
// TestLeaderElectionOverwriteNewerLogs tests a scenario in which a
// newly-elected leader does *not* have the newest (i.e. highest term)
// log entries, and must overwrite higher-term log entries with
// lower-term ones.
func TestLeaderElectionOverwriteNewerLogs(t *testing.T) {
testLeaderElectionOverwriteNewerLogs(t, false)
}
func TestLeaderElectionOverwriteNewerLogsPreVote(t *testing.T) {
testLeaderElectionOverwriteNewerLogs(t, true)
}
func testLeaderElectionOverwriteNewerLogs(t *testing.T, preVote bool) {
var cfg func(*Config)
if preVote {
cfg = preVoteConfig
}
// This network represents the results of the following sequence of
// events:
// - Node 1 won the election in term 1.
// - Node 1 replicated a log entry to node 2 but died before sending
// it to other nodes.
// - Node 3 won the second election in term 2.
// - Node 3 wrote an entry to its logs but died without sending it
// to any other nodes.
//
// At this point, nodes 1, 2, and 3 all have uncommitted entries in
// their logs and could win an election at term 3. The winner's log
// entry overwrites the losers'. (TestLeaderSyncFollowerLog tests
// the case where older log entries are overwritten, so this test
// focuses on the case where the newer entries are lost).
n := newNetworkWithConfig(cfg,
entsWithConfig(cfg, 1), // Node 1: Won first election
entsWithConfig(cfg, 1), // Node 2: Got logs from node 1
entsWithConfig(cfg, 2), // Node 3: Won second election
votedWithConfig(cfg, 3, 2), // Node 4: Voted but didn't get logs
votedWithConfig(cfg, 3, 2)) // Node 5: Voted but didn't get logs
// Node 1 campaigns. The election fails because a quorum of nodes
// know about the election that already happened at term 2. Node 1's
// term is pushed ahead to 2.
n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm1 := n.peers[1].(*raft)
assert.Equal(t, pb.StateFollower, sm1.state)
assert.Equal(t, uint64(2), sm1.Term)
// Node 1 campaigns again with a higher term. This time it succeeds.
n.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, sm1.state)
assert.Equal(t, uint64(3), sm1.Term)
// Now all nodes agree on a log entry with term 1 at index 1 (and
// term 3 at index 2).
for i := range n.peers {
sm := n.peers[i].(*raft)
entries := sm.raftLog.allEntries()
require.Len(t, entries, 2)
assert.Equal(t, uint64(1), entries[0].Term)
assert.Equal(t, uint64(3), entries[1].Term)
}
}
func TestVoteFromAnyState(t *testing.T) {
testVoteFromAnyState(t, pb.MsgVote)
}
func TestPreVoteFromAnyState(t *testing.T) {
testVoteFromAnyState(t, pb.MsgPreVote)
}
func testVoteFromAnyState(t *testing.T, vt pb.MessageType) {
for st := pb.StateType(0); st < pb.NumStates; st++ {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
r.Term = 1
switch st {
case pb.StateFollower:
r.becomeFollower(r.Term, 3)
case pb.StatePreCandidate:
r.becomePreCandidate()
case pb.StateCandidate:
r.becomeCandidate()
case pb.StateLeader:
r.becomeCandidate()
r.becomeLeader()
}
// Note that setting our state above may have advanced r.Term
// past its initial value.
origTerm := r.Term
newTerm := r.Term + 1
msg := pb.Message{
From: 2,
To: 1,
Type: vt,
Term: newTerm,
LogTerm: newTerm,
Index: 42,
}
assert.NoError(t, r.Step(msg), "%s,%s", vt, st)
msgs := r.readMessages()
if assert.Len(t, msgs, 1, "%s,%s", vt, st) {
resp := msgs[0]
assert.Equal(t, voteRespMsgType(vt), resp.Type, "%s,%s", vt, st)
assert.False(t, resp.Reject, "%s,%s", vt, st)
}
// If this was a real vote, we reset our state and term.
if vt == pb.MsgVote {
assert.Equal(t, pb.StateFollower, r.state, "%s,%s", vt, st)
assert.Equal(t, newTerm, r.Term, "%s,%s", vt, st)
assert.Equal(t, pb.PeerID(2), r.Vote, "%s,%s", vt, st)
} else {
// In a prevote, nothing changes.
assert.Equal(t, st, r.state, "%s,%s", vt, st)
assert.Equal(t, origTerm, r.Term, "%s,%s", vt, st)
// if st == StateFollower or StatePreCandidate, r hasn't voted yet.
// In StateCandidate or StateLeader, it's voted for itself.
assert.True(t, r.Vote == None || r.Vote == 1, "%s,%s: vote %d, want %d or 1", vt, st, r.Vote, None)
}
}
}
// TestLogReplication tests that the normal replication flow works.
func TestLogReplication(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLogReplication(t, storeLivenessEnabled)
})
}
func testLogReplication(t *testing.T, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
tests := []struct {
*network
msgs []pb.Message
wcommitted uint64
}{
{
newNetworkWithConfig(cfg, nil, nil, nil),
[]pb.Message{
{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
},
2,
},
{
newNetworkWithConfig(cfg, nil, nil, nil),
[]pb.Message{
{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
{From: 1, To: 2, Type: pb.MsgHup},
{From: 1, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}},
},
4,
},
}
for i, tt := range tests {
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
for _, m := range tt.msgs {
if m.Type == pb.MsgHup && storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
tt.send(m)
if m.Type == pb.MsgHup && storeLivenessEnabled {
// Restore the support state.
tt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
}
for j, x := range tt.network.peers {
sm := x.(*raft)
assert.Equal(t, tt.wcommitted, sm.raftLog.committed, "#%d.%d", i, j)
var ents []pb.Entry
for _, e := range nextEnts(sm, tt.network.storage[j]) {
if e.Data != nil {
ents = append(ents, e)
}
}
var props []pb.Message
for _, m := range tt.msgs {
if m.Type == pb.MsgProp {
props = append(props, m)
}
}
for k, m := range props {
assert.Equal(t, m.Entries[0].Data, ents[k].Data, "#%d.%d", i, j)
}
}
}
}
// TestLearnerLogReplication tests that a learner can receive entries from the leader.
func TestLearnerLogReplication(t *testing.T) {
s1 := newTestMemoryStorage(withPeers(1), withLearners(2))
n1 := newTestLearnerRaft(1, 10, 1, s1)
n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
nt := newNetwork(n1, n2)
nt.t = t
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
setRandomizedElectionTimeout(n1, n1.electionTimeout)
for i := int64(0); i < n1.electionTimeout; i++ {
n1.tick()
}
n1.advanceMessagesAfterAppend()
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
// n1 is leader and n2 is learner
assert.Equal(t, pb.StateLeader, n1.state)
assert.True(t, n2.isLearner)
nextCommitted := uint64(2)
{
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
}
assert.Equal(t, nextCommitted, n1.raftLog.committed)
assert.Equal(t, n1.raftLog.committed, n2.raftLog.committed)
match := n1.trk.Progress(2).Match
assert.Equal(t, n2.raftLog.committed, match)
}
func TestSingleNodeCommit(t *testing.T) {
s := newTestMemoryStorage(withPeers(1))
cfg := newTestConfig(1, 10, 1, s)
r := newRaft(cfg)
tt := newNetwork(r)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
sm := tt.peers[1].(*raft)
assert.Equal(t, uint64(3), sm.raftLog.committed)
}
// TestCannotCommitWithoutNewTermEntry tests the entries cannot be committed
// when leader changes, no new proposal comes in and ChangeTerm proposal is
// filtered.
func TestCannotCommitWithoutNewTermEntry(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testCannotCommitWithoutNewTermEntry(t, storeLivenessEnabled)
})
}
func testCannotCommitWithoutNewTermEntry(t *testing.T, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
tt := newNetworkWithConfig(cfg, nil, nil, nil, nil, nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// 0 cannot reach 2,3,4
tt.cut(1, 3)
tt.cut(1, 4)
tt.cut(1, 5)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
sm := tt.peers[1].(*raft)
assert.Equal(t, uint64(1), sm.raftLog.committed)
// network recovery
tt.recover()
// avoid committing ChangeTerm proposal
tt.ignore(pb.MsgApp)
// Elect 2 as the new leader with term 2.
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
if storeLivenessEnabled {
// Restore the support state.
tt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
// no log entries from previous term should be committed
sm = tt.peers[2].(*raft)
assert.Equal(t, uint64(1), sm.raftLog.committed)
tt.recover()
// send heartbeat; reset wait
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgBeat})
// append an entry at current term
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
// expect the committed to be advanced
assert.Equal(t, uint64(5), sm.raftLog.committed)
}
// TestCommitWithoutNewTermEntry tests the entries could be committed when
// leader changes, no new proposal comes in.
func TestCommitWithoutNewTermEntry(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testCommitWithoutNewTermEntry(t, storeLivenessEnabled)
})
}
func testCommitWithoutNewTermEntry(t *testing.T, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
tt := newNetworkWithConfig(cfg, nil, nil, nil, nil, nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// 0 cannot reach 3,4,5
tt.cut(1, 3)
tt.cut(1, 4)
tt.cut(1, 5)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
sm := tt.peers[1].(*raft)
assert.Equal(t, uint64(1), sm.raftLog.committed)
// network recovery
tt.recover()
// elect 2 as the new leader with term 2
// after append a ChangeTerm entry from the current term, all entries
// should be committed
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
if storeLivenessEnabled {
// Restore the support state.
tt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
assert.Equal(t, uint64(4), sm.raftLog.committed)
}
func TestDuelingCandidates(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testDuelingCandidates(t, storeLivenessEnabled)
})
}
func testDuelingCandidates(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var a, b, c *raft
s1 := newTestMemoryStorage(withPeers(1, 2, 3))
s2 := newTestMemoryStorage(withPeers(1, 2, 3))
s3 := newTestMemoryStorage(withPeers(1, 2, 3))
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
a = newTestRaft(1, 10, 1, s1, withStoreLiveness(fabric.GetStoreLiveness(1)))
b = newTestRaft(2, 10, 1, s2, withStoreLiveness(fabric.GetStoreLiveness(2)))
c = newTestRaft(3, 10, 1, s3, withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
a = newTestRaft(1, 10, 1, s1, withStoreLiveness(raftstoreliveness.Disabled{}))
b = newTestRaft(2, 10, 1, s2, withStoreLiveness(raftstoreliveness.Disabled{}))
c = newTestRaft(3, 10, 1, s3, withStoreLiveness(raftstoreliveness.Disabled{}))
}
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, a, b, c)
nt.cut(1, 3)
if storeLivenessEnabled {
// We need to withdraw support for and from 1 and 3 to simulate a partition.
nt.livenessFabric.WithdrawSupport(1, 3)
nt.livenessFabric.WithdrawSupport(3, 1)
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
// 1 becomes leader since it receives votes from 1 and 2
sm := nt.peers[1].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
// 3 stays as candidate since it receives a vote from 3 and a rejection from 2
sm = nt.peers[3].(*raft)
assert.Equal(t, pb.StateCandidate, sm.state)
nt.recover()
if storeLivenessEnabled {
// Fix the network at the store liveness layer.
nt.livenessFabric.GrantSupport(1, 3)
nt.livenessFabric.GrantSupport(3, 1)
}
// candidate 3 now increases its term and tries to vote again
// we expect it to disrupt the leader 1 since it has a higher term
// 3 will be follower again since both 1 and 2 rejects its vote request since 3 does not have a long enough log
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can be elected as leader.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateFollower, sm.state)
tests := []struct {
sm *raft
state pb.StateType
term uint64
lastIndex uint64
}{
{a, pb.StateFollower, 2, 1},
{b, pb.StateFollower, 2, 1},
{c, pb.StateFollower, 2, 0},
}
for i, tt := range tests {
assert.Equal(t, tt.state, tt.sm.state, "#%d", i)
assert.Equal(t, tt.term, tt.sm.Term, "#%d", i)
assert.Equal(t, tt.lastIndex, tt.sm.raftLog.lastIndex(), "#%d", i)
}
}
func TestDuelingPreCandidates(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testDuelingPreCandidates(t, storeLivenessEnabled)
})
}
func testDuelingPreCandidates(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var cfgA, cfgB, cfgC *Config
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
cfgA = newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
cfgB = newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
cfgC = newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
cfgA = newTestConfig(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
cfgB = newTestConfig(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
cfgC = newTestConfig(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
cfgA.PreVote = true
cfgB.PreVote = true
cfgC.PreVote = true
a := newRaft(cfgA)
b := newRaft(cfgB)
c := newRaft(cfgC)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, a, b, c)
nt.t = t
nt.cut(1, 3)
if storeLivenessEnabled {
// Withdraw the support between 1 and 3 to simulate a network partition.
nt.livenessFabric.WithdrawSupport(1, 3)
nt.livenessFabric.WithdrawSupport(3, 1)
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
// 1 becomes leader since it receives votes from 1 and 2
sm := nt.peers[1].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
// 3 campaigns then reverts to follower when its PreVote is rejected
sm = nt.peers[3].(*raft)
assert.Equal(t, pb.StateFollower, sm.state)
nt.recover()
// Candidate 3 now increases its term and tries to vote again.
// With PreVote, it does not disrupt the leader.
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can campaign and not get rejected
// because of store liveness support.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
tests := []struct {
sm *raft
state pb.StateType
term uint64
lastIndex uint64
}{
{a, pb.StateLeader, 1, 1},
{b, pb.StateFollower, 1, 1},
{c, pb.StateFollower, 1, 0},
}
for i, tt := range tests {
assert.Equal(t, tt.state, tt.sm.state, "#%d", i)
assert.Equal(t, tt.term, tt.sm.Term, "#%d", i)
assert.Equal(t, tt.lastIndex, tt.sm.raftLog.lastIndex(), "#%d", i)
}
}
func TestCandidateConcede(t *testing.T) {
tt := newNetwork(nil, nil, nil)
tt.isolate(1)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
// heal the partition
tt.recover()
// send heartbeat; reset wait
p := tt.peers[pb.PeerID(3)].(*raft)
for ticks := p.heartbeatTimeout; ticks > 0; ticks-- {
tt.tick(p)
}
data := []byte("force follower")
// send a proposal to 3 to flush out a MsgApp to 1
tt.send(pb.Message{From: 3, To: 3, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
// send heartbeat; flush out commit
for ticks := p.heartbeatTimeout; ticks > 0; ticks-- {
tt.tick(p)
}
a := tt.peers[1].(*raft)
assert.Equal(t, pb.StateFollower, a.state)
assert.Equal(t, uint64(1), a.Term)
wantLog := ltoa(newLog(&MemoryStorage{ls: LogSlice{
entries: []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1, Data: data}},
}}, nil))
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
l := ltoa(sm.raftLog)
assert.Empty(t, diffu(wantLog, l), "#%d", i)
} else {
t.Logf("#%d: empty log", i)
}
}
}
func TestSingleNodeCandidate(t *testing.T) {
tt := newNetwork(nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.peers[1].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
}
func TestSingleNodePreCandidate(t *testing.T) {
tt := newNetworkWithConfig(preVoteConfig, nil)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := tt.peers[1].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
}
func TestOldMessages(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testOldMessages(t, storeLivenessEnabled)
})
}
func testOldMessages(t *testing.T, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
tt := newNetworkWithConfig(cfg, nil, nil, nil)
// make 0 leader @ term 3
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
tt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
tt.livenessFabric.WithdrawSupportForPeerFromAllPeers(2)
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
tt.livenessFabric.GrantSupportForPeerFromAllPeers(2)
} else {
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
}
// pretend we're an old leader trying to make progress; this entry is expected to be ignored.
tt.send(pb.Message{From: 2, To: 1, Type: pb.MsgApp, Term: 2, Entries: index(3).terms(2)})
// commit a new entry
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
ents := index(1).terms(1, 2, 3, 3)
ents[3].Data = []byte("somedata")
ilog := newLog(&MemoryStorage{ls: LogSlice{entries: ents}}, nil)
base := ltoa(ilog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
l := ltoa(sm.raftLog)
assert.Empty(t, diffu(base, l), "#%d", i)
} else {
t.Logf("#%d: empty log", i)
}
}
}
// TestOldMessagesReply - optimization - reply with new term.
func TestProposal(t *testing.T) {
tests := []struct {
*network
success bool
}{
{newNetwork(nil, nil, nil), true},
{newNetwork(nil, nil, nopStepper), true},
{newNetwork(nil, nopStepper, nopStepper), false},
{newNetwork(nil, nopStepper, nopStepper, nil), false},
{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},
}
for j, tt := range tests {
send := func(m pb.Message) {
defer func() {
// only recover if we expect it to panic (success==false)
if !tt.success {
e := recover()
if e != nil {
t.Logf("#%d: err: %s", j, e)
}
}
}()
tt.send(m)
}
data := []byte("somedata")
// promote 1 to become leader
send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
r := tt.network.peers[1].(*raft)
wantLog := newLog(NewMemoryStorage(), raftlogger.RaftLogger)
if tt.success {
wantLog = newLog(&MemoryStorage{ls: LogSlice{
entries: []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1, Data: data}},
}}, nil)
}
base := ltoa(wantLog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
l := ltoa(sm.raftLog)
assert.Empty(t, diffu(base, l), "#%d, peer %d", j, i)
} else {
t.Logf("#%d: peer %d empty log", j, i)
}
}
assert.Equal(t, uint64(1), r.Term, "#%d", j)
}
}
func TestProposalByProxy(t *testing.T) {
data := []byte("somedata")
tests := []*network{
newNetwork(nil, nil, nil),
newNetwork(nil, nil, nopStepper),
}
for j, tt := range tests {
// promote 0 the leader
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// propose via follower
tt.send(pb.Message{From: 2, To: 2, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
wantLog := newLog(&MemoryStorage{ls: LogSlice{
entries: []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 1, Data: data}},
}}, nil)
base := ltoa(wantLog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
l := ltoa(sm.raftLog)
assert.Empty(t, diffu(base, l), "#%d.%d", j, i)
} else {
t.Logf("#%d: peer %d empty log", j, i)
}
}
sm := tt.peers[1].(*raft)
assert.Equal(t, uint64(1), sm.Term, "#%d", j)
}
}
func TestCommit(t *testing.T) {
m := func(indices ...uint64) []uint64 { return indices }
for _, tt := range []struct {
term uint64 // term before becoming leader
log []pb.Entry // log before becoming leader
app []pb.Entry // appended entries after becoming leader
match []uint64 // match indices for all peers
want uint64 // expected commit index
}{
// single node
{term: 0, match: m(0), want: 0},
{term: 0, match: m(1), want: 1},
{term: 1, log: index(1).terms(1), match: m(1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2), want: 2},
{term: 1, log: index(1).terms(1), app: index(3).terms(2), match: m(1), want: 0},
{term: 1, log: index(1).terms(1), app: index(3).terms(2), match: m(2), want: 2},
{term: 1, log: index(1).terms(1), app: index(3).terms(2), match: m(3), want: 3},
// odd number of nodes
{term: 1, log: index(1).terms(1), match: m(1, 1, 1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2, 1, 1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2, 1, 2), want: 2},
{term: 1, log: index(1).terms(1), match: m(2, 2, 2), want: 2},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(2, 2, 2), want: 0},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(3, 3, 2), want: 3},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(4, 4, 5), want: 4},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(5, 4, 5), want: 5},
// even number of nodes
{term: 1, log: index(1).terms(1), match: m(1, 1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2, 1, 1, 1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2, 1, 2, 1), want: 0},
{term: 1, log: index(1).terms(1), match: m(2, 1, 2, 2), want: 2},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(2, 2, 2, 1), want: 0},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(3, 2, 2, 3), want: 0},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(3, 3, 1, 3), want: 3},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(4, 4, 4, 5), want: 4},
{term: 1, log: index(1).terms(1, 1), app: index(4).terms(2, 2), match: m(5, 4, 5, 5), want: 5},
} {
t.Run("", func(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1))
require.NoError(t, storage.Append(tt.log))
require.NoError(t, storage.SetHardState(pb.HardState{Term: tt.term}))
sm := newTestRaft(1, 10, 2, storage)
sm.becomeCandidate()
sm.becomeLeader()
require.Equal(t, tt.term+1, sm.Term)
require.True(t, sm.appendEntry(tt.app...))
for i, match := range tt.match {
id := pb.PeerID(i + 1)
if id > 1 {
sm.applyConfChange(pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: id}.AsV2())
}
require.LessOrEqual(t, match, sm.raftLog.lastIndex())
pr := sm.trk.Progress(id)
pr.MaybeUpdate(match)
}
sm.maybeCommit()
assert.Equal(t, tt.want, sm.raftLog.committed)
})
}
}
// TestAtRandomizedElectionTimeout tests that the followers who call
// atRandomizedElectionTimeout() will campaign uniformly randomly between the
// range of [electionTimeout, 2 * electionTimeout - 1].
func TestAtRandomizedElectionTimeout(t *testing.T) {
tests := []struct {
electionElapsed int64
// wprobability is the expected probability of an election at
// the given electionElapsed.
wprobability float64
round bool
}{
// randomizedElectionTimeout = [10,15) since we are setting the
// electionTimeoutJitter field to 5 below.
// electionElapsed less than the electionTimeout should never campaign.
{0, 0, false},
{5, 0, false},
{9, 0, false},
// Since there are 5 possible values for randomizedElectionTimeout, we
// expect the probability to be 1/5 for each value.
{10, 0.2, true},
{11, 0.2, true},
{14, 0.2, true},
//
// No possible value of randomizedElectionTimeout [10,15) would cause an
// election at electionElapsed = [15,19].
{15, 0, false},
{16, 0, false},
{17, 0, false},
{18, 0, false},
{19, 0, false},
//
// Only one out of ten values of randomizedElectionTimeout (10) leads to
// election at electionElapsed = 20.
{22, 0.2, true},
//
// Two out of ten values of randomizedElectionTimeout (10, 11) would lead
// to election at electionElapsed = 120.
{110, 0.4, true},
}
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
sm.electionElapsed = tt.electionElapsed
sm.electionTimeoutJitter = 5
c := 0
for j := 0; j < 10000; j++ {
sm.resetRandomizedElectionTimeout()
if sm.atRandomizedElectionTimeout() {
c++
}
}
got := float64(c) / 10000.0
if tt.round {
got = math.Floor(got*10+0.5) / 10.0
}
assert.Equal(t, tt.wprobability, got, "#%d", i)
}
}
// TestStepIgnoreOldTermMsg to ensure that the Step function ignores the message
// from old term and does not pass it to the actual stepX function.
func TestStepIgnoreOldTermMsg(t *testing.T) {
called := false
fakeStep := func(r *raft, m pb.Message) error {
called = true
return nil
}
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
sm.step = fakeStep
sm.Term = 2
sm.Step(pb.Message{Type: pb.MsgApp, Term: sm.Term - 1})
assert.False(t, called)
}
// TestHandleMsgApp ensures:
// 1. Reply false if log doesn’t contain an entry at prevLogIndex whose term matches prevLogTerm.
// 2. If an existing entry conflicts with a new one (same index but different terms),
// delete the existing entry and all that follow it; append any new entries not already in the log.
// 3. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry).
func TestHandleMsgApp(t *testing.T) {
const term = 2
init := index(1).terms(1, 2) // the initial log
msgApp := func(term uint64, ls LogSlice, commit uint64) pb.Message {
return pb.Message{
From: 2, To: 1, Type: pb.MsgApp, Term: term,
Index: ls.prev.index, LogTerm: ls.prev.term, Entries: ls.Entries(),
Commit: commit,
}
}
for _, tt := range []struct {
commit uint64 // the initial commit index
m pb.Message
wIndex uint64
wCommit uint64
wReject bool
}{
// Ensure 1
{m: msgApp(3, entryID{index: 2, term: 3}.terms(), 3), wIndex: 2, wReject: true}, // previous log mismatch
{m: msgApp(3, entryID{index: 3, term: 3}.terms(), 3), wIndex: 2, wReject: true}, // previous log non-exist
// Ensure 2
{m: msgApp(2, entryID{index: 1, term: 1}.terms(), 1), wIndex: 2, wCommit: 1},
{m: msgApp(3, entryID{}.terms(3), 1), wIndex: 1, wCommit: 1},
{m: msgApp(2, entryID{index: 2, term: 2}.terms(2, 2), 3), wIndex: 4, wCommit: 3},
{m: msgApp(2, entryID{index: 2, term: 2}.terms(2), 3), wIndex: 3, wCommit: 3},
{m: msgApp(2, entryID{index: 1, term: 1}.terms(2), 4), wIndex: 2, wCommit: 2},
// Appends overlapping the commit index.
{commit: 2, m: msgApp(2, entryID{index: 1, term: 1}.terms(2), 2), wIndex: 2, wCommit: 2},
{commit: 2, m: msgApp(2, entryID{index: 1, term: 1}.terms(2, 2, 2), 4), wIndex: 4, wCommit: 4},
{commit: 2, m: msgApp(2, entryID{index: 2, term: 2}.terms(2), 3), wIndex: 3, wCommit: 3},
// Something is wrong with the appended slice. Entry at index 2 is already
// committed with term = 2, but we are receiving an append which says entry
// 2 has term 1 and is committed. This must be rejected.
{commit: 2, m: msgApp(2, entryID{index: 1, term: 1}.terms(1, 1), 3), wIndex: 2, wCommit: 2, wReject: true},
// Ensure 3
{m: msgApp(1, entryID{index: 1, term: 1}.terms(), 3), wIndex: 2, wCommit: 1}, // match entry 1, commit up to last new entry 1
{m: msgApp(2, entryID{index: 1, term: 1}.terms(2), 3), wIndex: 2, wCommit: 2}, // match entry 1, commit up to last new entry 2
{m: msgApp(2, entryID{index: 2, term: 2}.terms(), 3), wIndex: 2, wCommit: 2}, // match entry 2, commit up to last new entry 2
{m: msgApp(2, entryID{index: 2, term: 2}.terms(), 4), wIndex: 2, wCommit: 2}, // commit up to log.last()
} {
t.Run("", func(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1, 2))
require.NoError(t, storage.Append(init))
require.NoError(t, storage.SetHardState(pb.HardState{
Term: term,
Commit: tt.commit,
}))
sm := newTestRaft(1, 10, 1, storage)
sm.handleAppendEntries(tt.m)
assert.Equal(t, tt.wIndex, sm.raftLog.lastIndex())
assert.Equal(t, tt.wCommit, sm.raftLog.committed)
m := sm.readMessages()
require.Len(t, m, 1)
assert.Equal(t, tt.wReject, m[0].Reject)
})
}
}
// TestHandleHeartbeat ensures that the follower handles heartbeats properly.
func TestHandleHeartbeat(t *testing.T) {
commit := uint64(2)
tests := []struct {
m pb.Message
accTerm uint64
wCommit uint64
}{
{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, 2, commit + 1},
{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, 2, commit}, // do not decrease commit
{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit - 1}, 1, commit},
// Do not increase the commit index if the log is not guaranteed to be a
// prefix of the leader's log.
{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 1}, 1, commit},
// Do not increase the commit index beyond our log size.
{pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeat, Term: 2, Commit: commit + 10}, 2, commit + 1}, // do not decrease commit
}
for i, tt := range tests {
storage := newTestMemoryStorage(withPeers(1, 2))
init := entryID{}.append(1, 1, tt.accTerm)
require.NoError(t, storage.Append(init.entries))
sm := newTestRaft(1, 5, 1, storage)
sm.becomeFollower(init.term, 2)
sm.raftLog.commitTo(LogMark{Term: init.term, Index: commit})
sm.handleHeartbeat(tt.m)
m := sm.readMessages()
require.Len(t, m, 1, "#%d", i)
assert.Equal(t, pb.MsgHeartbeatResp, m[0].Type, "#%d", i)
}
}
// TestHandleHeartbeatRespStoreLivenessDisabled ensures that we re-send log
// entries when we get a heartbeat response.
func TestHandleHeartbeatRespStoreLivenessDisabled(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1, 2))
require.NoError(t, storage.SetHardState(pb.HardState{Term: 3}))
require.NoError(t, storage.Append(index(1).terms(1, 2, 3)))
sm := newTestRaft(1, 5, 1, storage, withStoreLiveness(raftstoreliveness.Disabled{}))
sm.becomeCandidate()
sm.becomeLeader()
sm.raftLog.commitTo(sm.raftLog.unstable.mark())
// A heartbeat response from a node that is behind; re-send MsgApp
sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
msgs := sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
// A second heartbeat response generates another MsgApp re-send
sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
msgs = sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
// Once we have an MsgAppResp, heartbeats no longer send MsgApp.
sm.Step(pb.Message{
From: 2,
Type: pb.MsgAppResp,
Index: msgs[0].Index + uint64(len(msgs[0].Entries)),
Commit: sm.raftLog.lastIndex(),
})
// Consume the message sent in response to MsgAppResp
sm.readMessages()
sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp})
msgs = sm.readMessages()
require.Empty(t, msgs)
}
// TestHandleHeatbeatTimeoutStoreLivenessEnabled ensures that we re-send log
// entries on heartbeat timeouts only if we need to.
func TestHandleHeatbeatTimeoutStoreLivenessEnabled(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1, 2))
require.NoError(t, storage.SetHardState(pb.HardState{Term: 3}))
require.NoError(t, storage.Append(index(1).terms(1, 2, 3)))
sm := newTestRaft(1, 5, 1, storage)
sm.becomeCandidate()
sm.becomeLeader()
sm.fortificationTracker.RecordFortification(pb.PeerID(2), 1)
sm.fortificationTracker.RecordFortification(pb.PeerID(3), 1)
sm.raftLog.commitTo(sm.raftLog.unstable.mark())
// On heartbeat timeout, the leader sends a MsgApp.
for ticks := sm.heartbeatTimeout; ticks > 0; ticks-- {
sm.tick()
}
msgs := sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
// On another heartbeat timeout, the leader sends a MsgApp.
for ticks := sm.heartbeatTimeout; ticks > 0; ticks-- {
sm.tick()
}
msgs = sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
// Once the leader receives a MsgAppResp, it doesn't send MsgApp.
sm.Step(pb.Message{
From: 2,
Type: pb.MsgAppResp,
Index: msgs[0].Index + uint64(len(msgs[0].Entries)),
Commit: sm.raftLog.lastIndex(),
})
// Consume the message sent in response to MsgAppResp
sm.readMessages()
// On heartbeat timeout, the leader doesn't send a MsgApp because the follower
// is up-to-date.
for ticks := sm.heartbeatTimeout; ticks > 0; ticks-- {
sm.tick()
}
msgs = sm.readMessages()
require.Len(t, msgs, 0)
}
// TestMsgAppRespWaitReset verifies the resume behavior of a leader
// MsgAppResp.
func TestMsgAppRespWaitReset(t *testing.T) {
s := newTestMemoryStorage(withPeers(1, 2, 3))
sm := newTestRaft(1, 5, 1, s)
sm.becomeCandidate()
sm.becomeLeader()
// Run n1 which includes sending a message like the below
// one to n2, but also appending to its own log.
nextEnts(sm, s)
// Node 2 acks the first entry, making it committed.
sm.Step(pb.Message{
From: 2,
Type: pb.MsgAppResp,
Index: 1,
})
require.Equal(t, uint64(1), sm.raftLog.committed)
// Also consume the MsgApp messages that update Commit on the followers.
sm.readMessages()
// A new command is now proposed on node 1.
sm.Step(pb.Message{
From: 1,
Type: pb.MsgProp,
Entries: []pb.Entry{{}},
})
// The command is broadcast to all nodes not in the wait state.
// Node 2 left the wait state due to its MsgAppResp, but node 3 is still waiting.
msgs := sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
assert.Equal(t, pb.PeerID(2), msgs[0].To)
assert.Len(t, msgs[0].Entries, 1)
assert.Equal(t, uint64(2), msgs[0].Entries[0].Index)
// Now Node 3 acks the first entry. This releases the wait and entry 2 is sent.
sm.Step(pb.Message{
From: 3,
Type: pb.MsgAppResp,
Index: 1,
})
msgs = sm.readMessages()
require.Len(t, msgs, 1)
assert.Equal(t, pb.MsgApp, msgs[0].Type)
assert.Equal(t, pb.PeerID(3), msgs[0].To)
assert.Len(t, msgs[0].Entries, 1)
assert.Equal(t, uint64(2), msgs[0].Entries[0].Index)
}
func TestRecvMsgVote(t *testing.T) {
testRecvMsgVote(t, pb.MsgVote)
}
func TestRecvMsgPreVote(t *testing.T) {
testRecvMsgVote(t, pb.MsgPreVote)
}
func testRecvMsgVote(t *testing.T, msgType pb.MessageType) {
tests := []struct {
state pb.StateType
index, logTerm uint64
voteFor pb.PeerID
wreject bool
}{
{pb.StateFollower, 0, 0, None, true},
{pb.StateFollower, 0, 1, None, true},
{pb.StateFollower, 0, 2, None, true},
{pb.StateFollower, 0, 3, None, false},
{pb.StateFollower, 1, 0, None, true},
{pb.StateFollower, 1, 1, None, true},
{pb.StateFollower, 1, 2, None, true},
{pb.StateFollower, 1, 3, None, false},
{pb.StateFollower, 2, 0, None, true},
{pb.StateFollower, 2, 1, None, true},
{pb.StateFollower, 2, 2, None, false},
{pb.StateFollower, 2, 3, None, false},
{pb.StateFollower, 3, 0, None, true},
{pb.StateFollower, 3, 1, None, true},
{pb.StateFollower, 3, 2, None, false},
{pb.StateFollower, 3, 3, None, false},
{pb.StateFollower, 3, 2, 2, false},
{pb.StateFollower, 3, 2, 1, true},
{pb.StateLeader, 3, 3, 1, true},
{pb.StatePreCandidate, 3, 3, 1, true},
{pb.StateCandidate, 3, 3, 1, true},
}
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
sm.state = tt.state
switch tt.state {
case pb.StateFollower:
sm.step = stepFollower
case pb.StateCandidate, pb.StatePreCandidate:
sm.step = stepCandidate
case pb.StateLeader:
sm.step = stepLeader
}
sm.Vote = tt.voteFor
sm.raftLog = newLog(&MemoryStorage{ls: LogSlice{
entries: index(1).terms(2, 2),
}}, nil)
// raft.Term is greater than or equal to raft.raftLog.lastTerm. In this
// test we're only testing MsgVote responses when the campaigning node
// has a different raft log compared to the recipient node.
// Additionally we're verifying behaviour when the recipient node has
// already given out its vote for its current term. We're not testing
// what the recipient node does when receiving a message with a
// different term number, so we simply initialize both term numbers to
// be the same.
term := max(sm.raftLog.lastEntryID().term, tt.logTerm)
sm.Term = term
sm.Step(pb.Message{Type: msgType, Term: term, From: 2, Index: tt.index, LogTerm: tt.logTerm})
msgs := sm.readMessages()
require.Len(t, msgs, 1, "#%d", i)
assert.Equal(t, voteRespMsgType(msgType), msgs[0].Type, "#%d", i)
assert.Equal(t, tt.wreject, msgs[0].Reject, "#%d", i)
}
}
func TestStateTransition(t *testing.T) {
tests := []struct {
from pb.StateType
to pb.StateType
wallow bool
wterm uint64
wlead pb.PeerID
}{
{pb.StateFollower, pb.StateFollower, true, 1, None},
{pb.StateFollower, pb.StatePreCandidate, true, 0, None},
{pb.StateFollower, pb.StateCandidate, true, 1, None},
{pb.StateFollower, pb.StateLeader, false, 0, None},
{pb.StatePreCandidate, pb.StateFollower, true, 0, None},
{pb.StatePreCandidate, pb.StatePreCandidate, true, 0, None},
{pb.StatePreCandidate, pb.StateCandidate, true, 1, None},
{pb.StatePreCandidate, pb.StateLeader, true, 0, 1},
{pb.StateCandidate, pb.StateFollower, true, 0, None},
{pb.StateCandidate, pb.StatePreCandidate, true, 0, None},
{pb.StateCandidate, pb.StateCandidate, true, 1, None},
{pb.StateCandidate, pb.StateLeader, true, 0, 1},
{pb.StateLeader, pb.StateFollower, true, 1, None},
{pb.StateLeader, pb.StatePreCandidate, false, 0, None},
{pb.StateLeader, pb.StateCandidate, false, 1, None},
{pb.StateLeader, pb.StateLeader, true, 0, 1},
}
for i, tt := range tests {
func() {
defer func() {
if r := recover(); r != nil {
assert.False(t, tt.wallow, "#%d", i)
}
}()
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
sm.state = tt.from
switch tt.to {
case pb.StateFollower:
sm.becomeFollower(tt.wterm, tt.wlead)
case pb.StatePreCandidate:
sm.becomePreCandidate()
case pb.StateCandidate:
sm.becomeCandidate()
case pb.StateLeader:
sm.becomeLeader()
}
assert.Equal(t, tt.wterm, sm.Term, "#%d", i)
assert.Equal(t, tt.wlead, sm.lead, "#%d", i)
}()
}
}
func TestAllServerStepdown(t *testing.T) {
tests := []struct {
state pb.StateType
wstate pb.StateType
wterm uint64
windex uint64
}{
{pb.StateFollower, pb.StateFollower, 3, 0},
{pb.StatePreCandidate, pb.StateFollower, 3, 0},
{pb.StateCandidate, pb.StateFollower, 3, 0},
{pb.StateLeader, pb.StateFollower, 3, 1},
}
tmsgTypes := [...]pb.MessageType{pb.MsgVote, pb.MsgApp}
tterm := uint64(3)
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
switch tt.state {
case pb.StateFollower:
sm.becomeFollower(1, None)
case pb.StatePreCandidate:
sm.becomePreCandidate()
case pb.StateCandidate:
sm.becomeCandidate()
case pb.StateLeader:
sm.becomeCandidate()
sm.becomeLeader()
}
for j, msgType := range tmsgTypes {
sm.Step(pb.Message{From: 2, Type: msgType, Term: tterm, LogTerm: tterm})
assert.Equal(t, tt.wstate, sm.state, "#%d.%d", i, j)
assert.Equal(t, tt.wterm, sm.Term, "#%d.%d", i, j)
assert.Equal(t, tt.windex, sm.raftLog.lastIndex(), "#%d.%d", i, j)
assert.Len(t, sm.raftLog.allEntries(), int(tt.windex), "#%d.%d", i, j)
wlead := pb.PeerID(2)
if msgType == pb.MsgVote {
wlead = None
}
assert.Equal(t, wlead, sm.lead, "#%d.%d", i, j)
}
}
}
func TestCandidateResetTermMsgHeartbeat(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testCandidateResetTerm(t, pb.MsgHeartbeat, storeLivenessEnabled)
})
}
func TestCandidateResetTermMsgApp(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testCandidateResetTerm(t, pb.MsgApp, storeLivenessEnabled)
})
}
// testCandidateResetTerm tests when a candidate receives a
// MsgHeartbeat or MsgApp from leader, "Step" resets the term
// with leader's and reverts back to follower.
func testCandidateResetTerm(t *testing.T, mt pb.MessageType, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var a, b, c *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, a, b, c)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, pb.StateFollower, b.state)
assert.Equal(t, pb.StateFollower, c.state)
// isolate 3 and increase term in rest
nt.isolate(3)
if storeLivenessEnabled {
// We need to withdraw from 1 to allow 2 to campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
if storeLivenessEnabled {
// We need to grant support to 1, and withdraw it from 2 (the current
// leader) to allow 1 to campaign and get elected.
nt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(2)
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, pb.StateFollower, b.state)
if storeLivenessEnabled {
// We need to withdraw support from 1 to allow 3 to campaign.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
// trigger campaign in isolated c
c.resetRandomizedElectionTimeout()
for i := int64(0); i < c.randomizedElectionTimeout; i++ {
c.tick()
}
c.advanceMessagesAfterAppend()
assert.Equal(t, pb.StateCandidate, c.state)
nt.recover()
// leader sends to isolated candidate
// and expects candidate to revert to follower
nt.send(pb.Message{From: 1, To: 3, Term: a.Term, Type: mt})
assert.Equal(t, pb.StateFollower, c.state)
// follower c term is reset with leader's
assert.Equal(t, a.Term, c.Term)
}
// The following three tests exercise the behavior of a (pre-)candidate when its
// own self-vote is delivered back to itself after the peer has already learned
// that it has lost the election. The self-vote should be ignored in these cases.
func TestCandidateSelfVoteAfterLostElection(t *testing.T) {
testCandidateSelfVoteAfterLostElection(t, false)
}
func TestCandidateSelfVoteAfterLostElectionPreVote(t *testing.T) {
testCandidateSelfVoteAfterLostElection(t, true)
}
func testCandidateSelfVoteAfterLostElection(t *testing.T, preVote bool) {
sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
sm.preVote = preVote
// n1 calls an election.
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
steps := sm.takeMessagesAfterAppend()
// n1 hears that n2 already won the election before it has had a
// change to sync its vote to disk and account for its self-vote.
// Becomes a follower.
sm.Step(pb.Message{From: 2, To: 1, Term: sm.Term, Type: pb.MsgHeartbeat})
assert.Equal(t, pb.StateFollower, sm.state)
// n1 remains a follower even after its self-vote is delivered.
sm.stepOrSend(steps)
assert.Equal(t, pb.StateFollower, sm.state)
// Its self-vote does not make its way to its ProgressTracker.
granted, _, _ := sm.electionTracker.TallyVotes()
assert.Zero(t, granted)
}
func TestCandidateDeliversPreCandidateSelfVoteAfterBecomingCandidate(t *testing.T) {
sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
sm.preVote = true
// n1 calls an election.
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StatePreCandidate, sm.state)
steps := sm.takeMessagesAfterAppend()
// n1 receives pre-candidate votes from both other peers before
// voting for itself. n1 becomes a candidate.
// NB: pre-vote messages carry the local term + 1.
sm.Step(pb.Message{From: 2, To: 1, Term: sm.Term + 1, Type: pb.MsgPreVoteResp})
sm.Step(pb.Message{From: 3, To: 1, Term: sm.Term + 1, Type: pb.MsgPreVoteResp})
assert.Equal(t, pb.StateCandidate, sm.state)
// n1 remains a candidate even after its delayed pre-vote self-vote is
// delivered.
sm.stepOrSend(steps)
assert.Equal(t, pb.StateCandidate, sm.state)
steps = sm.takeMessagesAfterAppend()
// Its pre-vote self-vote does not make its way to its ProgressTracker.
granted, _, _ := sm.electionTracker.TallyVotes()
assert.Zero(t, granted)
// A single vote from n2 does not move n1 to the leader.
sm.Step(pb.Message{From: 2, To: 1, Term: sm.Term, Type: pb.MsgVoteResp})
assert.Equal(t, pb.StateCandidate, sm.state)
// n1 becomes the leader once its self-vote is received because now
// quorum is reached.
sm.stepOrSend(steps)
assert.Equal(t, pb.StateLeader, sm.state)
}
func TestLeaderMsgAppSelfAckAfterTermChange(t *testing.T) {
sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
sm.becomeCandidate()
sm.becomeLeader()
// n1 proposes a write.
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
steps := sm.takeMessagesAfterAppend()
// n1 hears that n2 is the new leader.
sm.Step(pb.Message{From: 2, To: 1, Term: sm.Term + 1, Type: pb.MsgHeartbeat})
assert.Equal(t, pb.StateFollower, sm.state)
// n1 advances, ignoring its earlier self-ack of its MsgApp. The
// corresponding MsgAppResp is ignored because it carries an earlier term.
sm.stepOrSend(steps)
assert.Equal(t, pb.StateFollower, sm.state)
}
func TestLeaderStepdownWhenQuorumActive(t *testing.T) {
sm := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
sm.checkQuorum = true
sm.becomeCandidate()
sm.becomeLeader()
for i := int64(0); i < sm.electionTimeout+1; i++ {
sm.Step(pb.Message{From: 2, Type: pb.MsgHeartbeatResp, Term: sm.Term})
sm.tick()
}
assert.Equal(t, pb.StateLeader, sm.state)
}
func TestLeaderStepdownWhenQuorumLost(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderStepdownWhenQuorumLost(t, storeLivenessEnabled)
})
}
func testLeaderStepdownWhenQuorumLost(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var sm *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
sm = newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
} else {
sm = newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
sm.checkQuorum = true
sm.becomeCandidate()
sm.becomeLeader()
assert.Equal(t, pb.StateLeader, sm.state)
for i := int64(0); i < sm.electionTimeout; i++ {
sm.tick()
}
assert.Equal(t, pb.StateFollower, sm.state)
}
func TestLeaderSupersedingWithCheckQuorum(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderSupersedingWithCheckQuorum(t, storeLivenessEnabled)
})
}
func testLeaderSupersedingWithCheckQuorum(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.checkQuorum = true
n2.checkQuorum = true
n3.checkQuorum = true
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
setRandomizedElectionTimeout(n2, n2.electionTimeout+1)
for i := int64(0); i < n2.electionTimeout; i++ {
n2.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, pb.StateFollower, n3.state)
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can campaign and not get rejected
// because other followers support 1.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
if storeLivenessEnabled {
// 2 voted for 3 since its support for 1 was withdrawn.
assert.Equal(t, pb.StateLeader, n3.state)
} else {
// 2 rejected 3's vote request since its electionElapsed had not reached to
// electionTimeout.
assert.Equal(t, pb.StateCandidate, n3.state)
}
// Letting b's electionElapsed reach to electionTimeout
for i := int64(0); i < n2.electionTimeout; i++ {
n2.tick()
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n3.state)
}
func TestLeaderElectionWithCheckQuorum(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderElectionWithCheckQuorum(t, storeLivenessEnabled)
})
}
func testLeaderElectionWithCheckQuorum(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var a, b, c *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
a.checkQuorum = true
b.checkQuorum = true
c.checkQuorum = true
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, a, b, c)
setRandomizedElectionTimeout(a, a.electionTimeout+1)
setRandomizedElectionTimeout(b, b.electionTimeout+2)
// Immediately after creation, votes are cast regardless of the
// election timeout.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, pb.StateFollower, b.state)
assert.Equal(t, pb.StateFollower, c.state)
// need to reset randomizedElectionTimeout larger than electionTimeout again,
// because the value might be reset to electionTimeout since the last state changes
setRandomizedElectionTimeout(a, a.electionTimeout+1)
setRandomizedElectionTimeout(b, b.electionTimeout+2)
if storeLivenessEnabled {
// We need to withdraw from 1 to allow 3 to campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
for i := int64(0); i < a.electionTimeout; i++ {
a.tick()
}
// Increment electionElapsed to electionTimeout. This will allow b to vote for
// c when it campaigns.
if storeLivenessEnabled {
// Tick b once. This will allow it to realize that it no longer supports a
// leader and will forward its electionElapsed to electionTimeout.
b.tick()
} else {
for i := int64(0); i < b.electionTimeout; i++ {
b.tick()
}
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateFollower, a.state)
assert.Equal(t, pb.StateLeader, c.state)
}
// TestFreeStuckCandidateWithCheckQuorum ensures that a candidate with a higher term
// can disrupt the leader even if the leader still "officially" holds the lease, The
// leader is expected to step down and adopt the candidate's term.
func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testFreeStuckCandidateWithCheckQuorum(t, storeLivenessEnabled)
})
}
func testFreeStuckCandidateWithCheckQuorum(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var a, b, c *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
a = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
b = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
c = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
a.checkQuorum = true
b.checkQuorum = true
c.checkQuorum = true
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, a, b, c)
// Elect node 1 as leader.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, a.state)
if storeLivenessEnabled {
assert.Equal(t, hlc.MaxTimestamp, getBasicStatus(a).LeadSupportUntil)
}
nt.isolate(1)
if storeLivenessEnabled {
// For the purposes of this test, we want 3 to campaign and get rejected.
// However, if we withdraw the support between 2 and 1, 2 will vote for 3
// when it campaigns. Therefore, we only withdraw the support between
// 1 and 3.
nt.livenessFabric.WithdrawSupport(1, 3)
nt.livenessFabric.WithdrawSupport(3, 1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateFollower, b.state)
assert.Equal(t, pb.StateCandidate, c.state)
assert.Equal(t, b.Term+1, c.Term)
// Vote again for safety.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateFollower, b.state)
assert.Equal(t, pb.StateCandidate, c.state)
assert.Equal(t, b.Term+2, c.Term)
nt.recover()
if storeLivenessEnabled {
// Recover the store liveness layer as well.
nt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
// If the stuck candidate were to talk to the follower, it may be ignored,
// depending on whether the follower is fortified by the leader.
nt.send(pb.Message{From: 3, To: 2, Type: pb.MsgAppResp, Term: c.Term})
if storeLivenessEnabled {
assert.Equal(t, c.Term-2, b.Term)
} else {
assert.Equal(t, c.Term, b.Term)
}
// Disrupt the leader so that the stuck peer is freed. The leader steps down
// immediately, but only changes its term if it was not fortified. If it was,
// it waits for defortification.
hbType := pb.MsgHeartbeat
if storeLivenessEnabled {
hbType = pb.MsgFortifyLeader
}
nt.send(pb.Message{From: 1, To: 3, Type: hbType, Term: a.Term})
if storeLivenessEnabled {
// Expect that we are still the leader since it's still not safe to step
// down, however, the step-down intent is recorded.
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, true, a.fortificationTracker.SteppingDown())
assert.Equal(t, c.Term, a.fortificationTracker.SteppingDownTerm())
// The leader hasn't defortified yet, so 3 can't win an election.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateCandidate, c.state)
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, c.Term-3, a.Term)
assert.Equal(t, a.id, a.lead)
// Expire the support, and tick it once. It should step down.
nt.livenessFabric.SetSupportExpired(1, true)
a.tick()
}
assert.Equal(t, pb.StateFollower, a.state)
// Node 1 doesn't remember that it was the leader.
assert.Equal(t, None, a.lead)
if storeLivenessEnabled {
// Since node 3 campaigned one extra time above, it will have a term that is
// higher than node 1 by one.
assert.Equal(t, c.Term-1, a.Term)
} else {
assert.Equal(t, c.Term, a.Term)
}
// Vote again, should become leader this time.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, c.state)
}
func TestNonPromotableVoterWithCheckQuorum(t *testing.T) {
a := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
b := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1)))
a.checkQuorum = true
b.checkQuorum = true
nt := newNetwork(a, b)
setRandomizedElectionTimeout(b, b.electionTimeout+1)
// Need to remove 2 again to make it a non-promotable node since newNetwork overwritten some internal states
b.applyConfChange(pb.ConfChange{Type: pb.ConfChangeRemoveNode, NodeID: 2}.AsV2())
require.False(t, b.promotable())
for i := int64(0); i < b.electionTimeout; i++ {
b.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, a.state)
assert.Equal(t, pb.StateFollower, b.state)
assert.Equal(t, pb.PeerID(1), b.lead)
}
// TestDisruptiveFollower tests isolated follower,
// with slow network incoming from leader, election times out
// to become a candidate with an increased term. Then, the
// candiate's response to late leader heartbeat forces the leader
// to step down.
func TestDisruptiveFollower(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testDisruptiveFollower(t, storeLivenessEnabled)
})
}
func testDisruptiveFollower(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.checkQuorum = true
n2.checkQuorum = true
n3.checkQuorum = true
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// check state
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
require.Equal(t, pb.StateFollower, n3.state)
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can campaign and not get rejected
// because of store liveness support.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
// etcd server "advanceTicksForElection" on restart;
// this is to expedite campaign trigger when given larger
// election timeouts (e.g. multi-datacenter deploy)
// Or leader messages are being delayed while ticks elapse
setRandomizedElectionTimeout(n3, n3.electionTimeout+2)
for i := int64(0); i < n3.randomizedElectionTimeout-1; i++ {
n3.tick()
}
// ideally, before last election tick elapses,
// the follower n3 receives "pb.MsgApp" or "pb.MsgHeartbeat"
// from leader n1, and then resets its "electionElapsed"
// however, last tick may elapse before receiving any
// messages from leader, thus triggering campaign
n3.tick()
// n1 is still leader yet
// while its heartbeat to candidate n3 is being delayed
// check state
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
require.Equal(t, pb.StateCandidate, n3.state)
// check term
require.Equal(t, uint64(2), n1.Term)
require.Equal(t, uint64(2), n2.Term)
require.Equal(t, uint64(3), n3.Term)
// while outgoing vote requests are still queued in n3,
// leader heartbeat finally arrives at candidate n3
// however, due to delayed network from leader, leader
// heartbeat was sent with lower term than candidate's
nt.send(pb.Message{From: 1, To: 3, Term: n1.Term, Type: pb.MsgHeartbeat})
// then candidate n3 responds with "pb.MsgAppResp" of higher term
// and leader steps down from a message with higher term
// this is to disrupt the current leader, so that candidate
// with higher term can be freed with following election
// check state
require.Equal(t, pb.StateFollower, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
if storeLivenessEnabled {
// Since the support for 1 was withdrawn, the inFortifyLease is no longer
// valid, and n3 will receive votes and become a leader.
require.Equal(t, pb.StateLeader, n3.state)
require.Equal(t, uint64(3), n1.Term)
require.Equal(t, uint64(3), n2.Term)
require.Equal(t, uint64(3), n3.Term)
} else {
// Since other peers still hold a valid inHeartbeatLease, n3 will not
// receive enough votes to become a leader.
require.Equal(t, pb.StateCandidate, n3.state)
require.Equal(t, uint64(3), n1.Term)
require.Equal(t, uint64(2), n2.Term)
require.Equal(t, uint64(3), n3.Term)
}
}
// TestDisruptiveFollowerPreVote tests isolated follower,
// with slow network incoming from leader, election times out
// to become a pre-candidate with less log than current leader.
// Then pre-vote phase prevents this isolated node from forcing
// current leader to step down, thus less disruptions.
func TestDisruptiveFollowerPreVote(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testDisruptiveFollowerPreVote(t, storeLivenessEnabled)
})
}
func testDisruptiveFollowerPreVote(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.checkQuorum = true
n2.checkQuorum = true
n3.checkQuorum = true
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// check state
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
require.Equal(t, pb.StateFollower, n3.state)
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
n1.preVote = true
n2.preVote = true
n3.preVote = true
nt.recover()
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can campaign and not get rejected
// because of store liveness support.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
// check state
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
if storeLivenessEnabled {
// Since the peers no longer hold a valid inFortifyLease, 3 will receive
// rejection votes and become a follower again.
require.Equal(t, pb.StateFollower, n3.state)
} else {
// Peers will just ignore the MsgVoteRequest due to the inHeartbeatLease and
// 3 will remain a preCandidate.
require.Equal(t, pb.StatePreCandidate, n3.state)
}
// check term
require.Equal(t, uint64(2), n1.Term)
require.Equal(t, uint64(2), n2.Term)
require.Equal(t, uint64(2), n3.Term)
// delayed leader heartbeat does not force current leader to step down
nt.send(pb.Message{From: 1, To: 3, Term: n1.Term, Type: pb.MsgHeartbeat})
require.Equal(t, pb.StateLeader, n1.state)
}
// TestPreCandidateIgnoresDefortification tests that a pre-candidate ignores
// MsgDefortifyLeader and doesn't become a follower again.
func TestPreCandidateIgnoresDefortification(t *testing.T) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2 *raft
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n1.checkQuorum = true
n2.checkQuorum = true
n1.preVote = true
n2.preVote = true
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Check raft states.
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
// The term is 2 for both nodes.
require.Equal(t, uint64(2), n1.Term)
require.Equal(t, uint64(2), n2.Term)
// Withdraw 2's support for 1. This allows 2 to pre-campaign since it's not
// supporting a fortified leader.
nt.livenessFabric.WithdrawSupportFor(2, 1)
// Isolate 1 so that it doesn't receive the MsgVoteRequest from 2, and
// therefore it doesn't vote for it.
nt.isolate(1)
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// 2 is now a pre-candidate.
require.Equal(t, pb.StatePreCandidate, n2.state)
// 2 should remain a PreCandidate even if it receives a MsgDefortifyLeader.
nt.send(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgDeFortifyLeader})
require.Equal(t, pb.StatePreCandidate, n2.state)
// However, receiving another message from a leader would cause 2 to become
// follower again.
nt.send(pb.Message{From: 1, To: 2, Term: 2, Type: pb.MsgApp})
require.Equal(t, pb.StateFollower, n2.state)
}
func TestLeaderAppResp(t *testing.T) {
// The test creates a leader node at term 2, with raft log [1 1 1 2 2 2].
// Initial progress: match = 0, next = 4.
for _, tt := range []struct {
index uint64
reject bool
// progress
wmatch uint64
// log index of the next entry to send to this follower
wnext uint64
// number of messages the leader sends out
wmsgNum int
// prevLogIndex in MsgApp from leader to followers
windex uint64
// leader's commit index
wcommitted uint64
// storage access counts for getting term number
ctStgTerm int
}{
// stale resp; no replies
{2, true, 0, 4, 0, 0, 0, 1},
// stale resp; no replies
{6, true, 0, 4, 0, 0, 0, 1},
// denied resp; leader does not commit; decrease next and send probing msg
// An additional term storage access is involved for an entry
// that's already persisted since we are probing backwards.
{3, true, 0, 3, 1, 2, 0, 2},
// Follower 2 responds to leader, indicating log index 2 is replicated.
// Leader tries to commit, but commit index doesn't advance since the index
// is from a previous term.
// We hit maybeCommit() and do term check comparison by using the
// last "term flip" entryID stored in the termCache.
// There is no storage access for term in the maybeCommit() code path
{2, false, 2, 7, 1, 2, 0, 2},
// Follower 2 responds to leader, indicating log index 3 is replicated.
// Leader tries to commit, but commit index doesn't advance since the index
// is from a previous term. Same as above.
{3, false, 3, 7, 1, 3, 0, 1},
// NB: For the following tests, we are skipping the MsgAppResp for the first
// 3 entries, by directly processing MsgAppResp for later entries.
//
// Follower 2 is StateProbing at 4, it sends MsgAppResp for 4, and is moved
// to StateReplicate and as many entries as possible are sent to it (5, 6).
// Correspondingly the Next is then 7 (entry 7 does not exist, indicating
// the follower will be up to date should it process the emitted MsgApp).
// accept resp; leader commits; respond with commit index.
// maybeCommit() is successful.
{4, false, 4, 7, 1, 4, 4, 1},
// Follower 2 says term2, index5 is already replicated.
// The leader responds with the updated commit index to follower 2.
// maybeCommit() is successful.
{5, false, 5, 7, 1, 5, 5, 1},
// Follower 2 says term2, index6 is already replicated.
// The leader responds with the updated commit index to follower 2.
// maybeCommit() is successful.
{6, false, 6, 7, 1, 6, 6, 1},
} {
t.Run("", func(t *testing.T) {
storage := newTestMemoryStorage(withPeers(1, 2, 3))
require.NoError(t, storage.Append(index(1).terms(1, 1, 1)))
require.NoError(t, storage.SetHardState(pb.HardState{Term: 1}))
sm := newTestRaft(1, 10, 1, storage)
sm.becomeCandidate()
require.Equal(t, uint64(2), sm.Term)
require.Equal(t, uint64(3), sm.raftLog.lastIndex())
sm.becomeLeader()
require.Equal(t, uint64(4), sm.raftLog.lastIndex()) // appended a dummy
sm.appendEntry(index(5).terms(2, 2)...)
require.Equal(t, uint64(0), sm.raftLog.committed)
sm.bcastAppend()
sm.readMessages()
require.NoError(t, sm.Step(pb.Message{
From: 2,
Type: pb.MsgAppResp,
Index: tt.index,
Term: sm.Term,
Reject: tt.reject,
RejectHint: tt.index,
}))
p := sm.trk.Progress(2)
require.Equal(t, tt.wmatch, p.Match)
require.Equal(t, tt.wnext, p.Next)
msgs := sm.readMessages()
require.Len(t, msgs, tt.wmsgNum)
for _, msg := range msgs {
require.Equal(t, tt.windex, msg.Index, "%v", DescribeMessage(msg, nil))
require.Equal(t, tt.wcommitted, msg.Commit, "%v", DescribeMessage(msg, nil))
}
assert.Equal(t, tt.ctStgTerm, storage.callStats.term)
})
}
}
// TestBcastBeat is when the leader receives a heartbeat tick, it should
// send a MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries if
// store liveness is disabled. On the other hand, if store liveness is enabled,
// the leader doesn't send a MsgHeartbeat but sends a MsgApp if the follower
// needs it to catch up.
func TestBcastBeat(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
offset := uint64(1000)
// make a state machine with log.offset = 1000
s := pb.Snapshot{
Metadata: pb.SnapshotMetadata{
Index: offset,
Term: 1,
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2, 3}},
},
}
storage := NewMemoryStorage()
storage.ApplySnapshot(s)
testOptions := emptyTestConfigModifierOpt()
if !storeLivenessEnabled {
testOptions = withStoreLiveness(raftstoreliveness.Disabled{})
}
sm := newTestRaft(1, 10, 1, storage, testOptions)
sm.Term = 1
sm.becomeCandidate()
sm.becomeLeader()
for i := 0; i < 10; i++ {
mustAppendEntry(sm, pb.Entry{Index: uint64(i) + 1})
}
sm.advanceMessagesAfterAppend()
// slow follower
sm.trk.Progress(2).Match, sm.trk.Progress(2).Next = 5, 6
// normal follower
sm.trk.Progress(3).Match, sm.trk.Progress(3).Next = sm.raftLog.lastIndex(),
sm.raftLog.lastIndex()+1
// TODO(ibrahim): Create a test helper function that takes the number of
// ticks and calls tick() that many times. Then we can refactor a lot of
// tests that have this pattern.
for ticks := sm.heartbeatTimeout; ticks > 0; ticks-- {
sm.tick()
}
msgs := sm.readMessages()
// If storeliveness is enabled, the heartbeat timeout will send a MsgApp
// if it needs to. In this case since follower 2 is slow, we will send a
// MsgApp to it.
if storeLivenessEnabled {
require.Len(t, msgs, 3)
assert.Equal(t, []pb.Message{
{From: 1, To: 2, Term: 2, Type: pb.MsgFortifyLeader},
{From: 1, To: 3, Term: 2, Type: pb.MsgFortifyLeader},
{From: 1, To: 3, Term: 2, Type: pb.MsgApp, LogTerm: 2, Index: 1011, Commit: 1000,
Match: 1011},
}, msgs)
} else {
require.Len(t, msgs, 2)
assert.Equal(t, []pb.Message{
{From: 1, To: 2, Term: 2, Type: pb.MsgHeartbeat, Match: 5},
{From: 1, To: 3, Term: 2, Type: pb.MsgHeartbeat, Match: 1011},
}, msgs)
// Make sure that the heartbeat messages contain the expected fields.
for i, m := range msgs {
require.Equal(t, pb.MsgHeartbeat, m.Type, "#%d", i)
require.Zero(t, m.Index, "#%d", i)
require.Zero(t, m.LogTerm, "#%d", i)
require.Empty(t, m.Entries, "#%d", i)
}
}
})
}
// TestRecvMsgBeat tests the output of the state machine when receiving MsgBeat
func TestRecvMsgBeat(t *testing.T) {
tests := []struct {
state pb.StateType
wMsg int
}{
{pb.StateLeader, 2},
// candidate and follower should ignore MsgBeat
{pb.StateCandidate, 0},
{pb.StateFollower, 0},
}
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
sm.raftLog = newLog(&MemoryStorage{ls: LogSlice{
entries: index(1).terms(1, 1),
}}, nil)
sm.Term = 1
sm.state = tt.state
switch tt.state {
case pb.StateFollower:
sm.step = stepFollower
case pb.StateCandidate:
sm.step = stepCandidate
case pb.StateLeader:
sm.step = stepLeader
}
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
msgs := sm.readMessages()
assert.Len(t, msgs, tt.wMsg, "#%d", i)
for _, m := range msgs {
assert.Equal(t, pb.MsgHeartbeat, m.Type, "#%d", i)
}
}
}
func TestLeaderIncreaseNext(t *testing.T) {
init := entryID{}.append(1, 2, 3)
tests := []struct {
// progress
state tracker.StateType
next uint64
wnext uint64
}{
// state replicate, optimistically increase next
// previous entries + noop entry + propose + 1
{tracker.StateReplicate, 2, uint64(len(init.entries) + 1 + 1 + 1)},
// state probe, not optimistically increase next
{tracker.StateProbe, 2, 2},
}
for i, tt := range tests {
sm := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
sm.becomeFollower(init.term, None)
require.True(t, sm.raftLog.append(init))
sm.becomeCandidate()
sm.becomeLeader()
sm.trk.Progress(2).State = tt.state
sm.trk.Progress(2).Next = tt.next
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
p := sm.trk.Progress(2)
assert.Equal(t, tt.wnext, p.Next, "#%d", i)
}
}
func TestSendAppendForProgressProbeStoreLivenessDisabled(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(raftstoreliveness.Disabled{}))
r.becomeCandidate()
r.becomeLeader()
// Initialize the log with some data.
mustAppendEntry(r, pb.Entry{Data: []byte("init")})
// Force set the match index to 1. This will make the leader use the index 1
// when sending the MsgApp.
r.trk.Progress(2).Match = 1
r.trk.Progress(2).BecomeProbe()
// each round is a heartbeat
for i := 0; i < 3; i++ {
if i == 0 {
// We expect that raft will only send out one MsgApp on the first loop.
// After that, the follower is paused until a heartbeat response is
// received.
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
msg := r.readMessages()
assert.Len(t, msg, 1)
assert.Equal(t, pb.MsgApp, msg[0].Type)
assert.Equal(t, msg[0].Index, uint64(1))
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
for j := 0; j < 10; j++ {
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
assert.Empty(t, r.readMessages())
}
// do a heartbeat
for j := int64(0); j < r.heartbeatTimeout; j++ {
r.tick()
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
// No MsgApp gets sent since we haven't received a MsgHeartbeatResp.
msg := r.readMessages()
assert.Len(t, msg, 1)
assert.Equal(t, pb.MsgHeartbeat, msg[0].Type)
}
// a MsgHeartbeatResp will allow another message to be sent
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgHeartbeatResp})
msg := r.readMessages()
assert.Len(t, msg, 1)
assert.Equal(t, msg[0].Type, pb.MsgApp)
assert.Equal(t, msg[0].Index, uint64(1))
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
}
func TestSendAppendForProgressProbeStoreLivenessEnabled(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
// Initialize the log with some data.
mustAppendEntry(r, pb.Entry{Data: []byte("init")})
// Force set the match index to 1. This will make the leader use the index 1
// when sending the probe MsgApp.
r.trk.Progress(2).Match = 1
r.trk.Progress(2).BecomeProbe()
r.readMessages()
r.trk.Progress(2).BecomeProbe()
// each round is a heartbeat
for i := 0; i < 3; i++ {
if i == 0 {
// We expect that raft will only send out one MsgApp on the first loop.
// After that, the follower is paused until the next heartbeat timeout.
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
msg := r.readMessages()
assert.Len(t, msg, 1)
assert.Equal(t, pb.MsgApp, msg[0].Type)
assert.Equal(t, msg[0].Index, uint64(1))
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
for j := 0; j < 10; j++ {
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
assert.Empty(t, r.readMessages())
}
// The next heartbeat timeout will allow another message to be sent.
for j := int64(0); j < r.heartbeatTimeout; j++ {
r.tick()
}
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
msg := r.readMessages()
assert.Len(t, msg, 2)
assert.Equal(t, pb.MsgFortifyLeader, msg[0].Type)
assert.Equal(t, pb.MsgApp, msg[1].Type)
assert.Equal(t, msg[1].Index, uint64(1))
assert.True(t, r.trk.Progress(2).MsgAppProbesPaused)
}
}
func TestSendAppendForProgressReplicate(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
r.trk.Progress(2).BecomeReplicate()
for i := 0; i < 10; i++ {
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
msgs := r.readMessages()
assert.Len(t, msgs, 1, "#%d", i)
}
}
func TestSendAppendForProgressSnapshot(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
r.trk.Progress(2).BecomeSnapshot(10)
for i := 0; i < 10; i++ {
mustAppendEntry(r, pb.Entry{Data: []byte("somedata")})
r.maybeSendAppend(2)
msgs := r.readMessages()
assert.Empty(t, msgs, "#%d", i)
}
}
func TestRecvMsgUnreachable(t *testing.T) {
previousEnts := index(1).terms(1, 2, 3)
s := newTestMemoryStorage(withPeers(1, 2))
s.SetHardState(pb.HardState{Term: 3})
s.Append(previousEnts)
r := newTestRaft(1, 10, 1, s)
r.becomeCandidate()
r.becomeLeader()
r.readMessages()
// set node 2 to state replicate
r.trk.Progress(2).Match = 3
r.trk.Progress(2).BecomeReplicate()
r.trk.Progress(2).Next = 6
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgUnreachable})
assert.Equal(t, tracker.StateProbe, r.trk.Progress(2).State)
wnext := r.trk.Progress(2).Match + 1
assert.Equal(t, wnext, r.trk.Progress(2).Next)
}
func TestRestore(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2, 3}},
}},
}
storage := newTestMemoryStorage(withPeers(1, 2))
sm := newTestRaft(1, 10, 1, storage)
require.True(t, sm.restore(s))
assert.Equal(t, s.lastEntryID(), sm.raftLog.lastEntryID())
assert.Equal(t, s.snap.Metadata.ConfState.Voters, sm.trk.VoterNodes())
require.False(t, sm.restore(s))
for i := int64(0); i < sm.randomizedElectionTimeout; i++ {
sm.tick()
}
assert.Equal(t, pb.StateFollower, sm.state)
}
// TestRestoreWithLearner restores a snapshot which contains learners.
func TestRestoreWithLearner(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}, Learners: []pb.PeerID{3}},
}},
}
storage := newTestMemoryStorage(withPeers(1, 2), withLearners(3))
sm := newTestLearnerRaft(3, 8, 2, storage)
assert.True(t, sm.restore(s))
assert.Equal(t, s.lastEntryID(), sm.raftLog.lastEntryID())
sg := sm.trk.VoterNodes()
assert.Len(t, sg, len(s.snap.Metadata.ConfState.Voters))
lns := sm.trk.LearnerNodes()
assert.Len(t, lns, len(s.snap.Metadata.ConfState.Learners))
for _, n := range s.snap.Metadata.ConfState.Voters {
assert.False(t, sm.trk.Progress(n).IsLearner)
}
for _, n := range s.snap.Metadata.ConfState.Learners {
assert.True(t, sm.trk.Progress(n).IsLearner)
}
assert.False(t, sm.restore(s))
}
// TestRestoreWithVotersOutgoing tests if outgoing voter can receive and apply snapshot correctly.
func TestRestoreWithVotersOutgoing(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{2, 3, 4}, VotersOutgoing: []pb.PeerID{1, 2, 3}},
}},
}
storage := newTestMemoryStorage(withPeers(1, 2))
sm := newTestRaft(1, 10, 1, storage)
require.True(t, sm.restore(s))
assert.Equal(t, s.lastEntryID(), sm.raftLog.lastEntryID())
sg := sm.trk.VoterNodes()
assert.Equal(t, []pb.PeerID{1, 2, 3, 4}, sg)
require.False(t, sm.restore(s))
// It should not campaign before actually applying data.
for i := int64(0); i < sm.randomizedElectionTimeout; i++ {
sm.tick()
}
assert.Equal(t, pb.StateFollower, sm.state)
}
// TestRestoreVoterToLearner verifies that a normal peer can be downgraded to a
// learner through a snapshot. At the time of writing, we don't allow
// configuration changes to do this directly, but note that the snapshot may
// compress multiple changes to the configuration into one: the voter could have
// been removed, then readded as a learner and the snapshot reflects both
// changes. In that case, a voter receives a snapshot telling it that it is now
// a learner. In fact, the node has to accept that snapshot, or it is
// permanently cut off from the Raft log.
func TestRestoreVoterToLearner(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}, Learners: []pb.PeerID{3}},
}},
}
storage := newTestMemoryStorage(withPeers(1, 2, 3))
sm := newTestRaft(3, 10, 1, storage)
assert.False(t, sm.isLearner)
assert.True(t, sm.restore(s))
}
// TestRestoreLearnerPromotion checks that a learner can become to a follower after
// restoring snapshot.
func TestRestoreLearnerPromotion(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2, 3}},
}},
}
storage := newTestMemoryStorage(withPeers(1, 2), withLearners(3))
sm := newTestLearnerRaft(3, 10, 1, storage)
assert.True(t, sm.isLearner)
assert.True(t, sm.restore(s))
assert.False(t, sm.isLearner)
}
// TestLearnerReceiveSnapshot tests that a learner can receive a snpahost from leader
func TestLearnerReceiveSnapshot(t *testing.T) {
// restore the state machine from a snapshot so it has a compacted log and a snapshot
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1}, Learners: []pb.PeerID{2}},
}},
}
store := newTestMemoryStorage(withPeers(1), withLearners(2))
store.SetHardState(pb.HardState{Term: 11})
n1 := newTestLearnerRaft(1, 10, 1, store)
n2 := newTestLearnerRaft(2, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
n1.restore(s)
snap := n1.raftLog.nextUnstableSnapshot()
store.ApplySnapshot(*snap)
n1.appliedSnap(snap.Metadata.Index)
nt := newNetwork(n1, n2)
setRandomizedElectionTimeout(n1, n1.electionTimeout)
for i := int64(0); i < n1.electionTimeout; i++ {
n1.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
assert.Equal(t, n1.raftLog.committed, n2.raftLog.committed)
}
func TestRestoreIgnoreSnapshot(t *testing.T) {
init := entryID{}.append(1, 1, 1)
commit := uint64(1)
storage := newTestMemoryStorage(withPeers(1, 2))
sm := newTestRaft(1, 10, 1, storage)
require.True(t, sm.raftLog.append(init))
sm.raftLog.commitTo(LogMark{Term: init.term, Index: commit})
s := snapshot{
term: 1,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: commit,
Term: 1,
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}},
}},
}
// ignore snapshot
assert.False(t, sm.restore(s))
assert.Equal(t, sm.raftLog.committed, commit)
// ignore snapshot and fast forward commit
s.snap.Metadata.Index = commit + 1
assert.False(t, sm.restore(s))
assert.Equal(t, sm.raftLog.committed, commit+1)
}
func TestProvideSnap(t *testing.T) {
// restore the state machine from a snapshot so it has a compacted log and a snapshot
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}},
}},
}
storage := newTestMemoryStorage(withPeers(1))
sm := newTestRaft(1, 10, 1, storage)
sm.becomeFollower(s.term, None)
sm.restore(s)
sm.becomeCandidate()
sm.becomeLeader()
// force set the next of node 2, so that node 2 needs a snapshot
sm.trk.Progress(2).Next = sm.raftLog.compacted() + 1
sm.Step(pb.Message{From: 2, To: 1, Type: pb.MsgAppResp, Index: sm.trk.Progress(2).Next - 1, Reject: true})
msgs := sm.readMessages()
require.Len(t, msgs, 1)
m := msgs[0]
assert.Equal(t, m.Type, pb.MsgSnap)
}
func TestIgnoreProvidingSnap(t *testing.T) {
// restore the state machine from a snapshot so it has a compacted log and a snapshot
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}},
}},
}
storage := newTestMemoryStorage(withPeers(1))
sm := newTestRaft(1, 10, 1, storage)
sm.becomeFollower(s.term, None)
sm.restore(s)
sm.becomeCandidate()
sm.becomeLeader()
// force set the next of node 2, so that node 2 needs a snapshot
// change node 2 to be inactive, expect node 1 ignore sending snapshot to 2
sm.trk.Progress(2).Next = sm.raftLog.compacted()
sm.trk.Progress(2).RecentActive = false
sm.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
msgs := sm.readMessages()
assert.Empty(t, msgs)
}
func TestRestoreFromSnapMsg(t *testing.T) {
s := snapshot{
term: 11,
snap: pb.Snapshot{Metadata: pb.SnapshotMetadata{
Index: 11, // magic number
Term: 11, // magic number
ConfState: pb.ConfState{Voters: []pb.PeerID{1, 2}},
}},
}
m := pb.Message{Type: pb.MsgSnap, From: 1, Term: 2, Snapshot: &s.snap}
sm := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
sm.Step(m)
assert.Equal(t, None, sm.lead)
// TODO(bdarnell): what should this test?
}
func TestSlowNodeRestore(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
for j := 0; j <= 100; j++ {
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
}
lead := nt.peers[1].(*raft)
nextEnts(lead, nt.storage[1])
nt.storage[1].CreateSnapshot(lead.raftLog.applied, &pb.ConfState{Voters: lead.trk.VoterNodes()}, nil)
nt.storage[1].Compact(lead.raftLog.applied)
nt.recover()
// send heartbeats so that the leader can learn everyone is active.
// node 3 will only be considered as active when node 1 receives a reply from it.
for {
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgBeat})
if lead.trk.Progress(3).RecentActive {
break
}
}
// trigger a snapshot
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
follower := nt.peers[3].(*raft)
// trigger a commit
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
assert.Equal(t, lead.raftLog.committed, follower.raftLog.committed)
}
// TestStepConfig tests that when raft step msgProp in EntryConfChange type,
// it appends the entry to log and sets pendingConf to be true.
func TestStepConfig(t *testing.T) {
// a raft that cannot make progress
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
index := r.raftLog.lastIndex()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
assert.Equal(t, index+1, r.raftLog.lastIndex())
assert.Equal(t, index+1, r.pendingConfIndex)
}
// TestStepIgnoreConfig tests that if raft step the second msgProp in
// EntryConfChange type when the first one is uncommitted, the node will set
// the proposal to noop and keep its original state.
func TestStepIgnoreConfig(t *testing.T) {
// a raft that cannot make progress
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.becomeCandidate()
r.becomeLeader()
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
index := r.raftLog.lastIndex()
pendingConfIndex := r.pendingConfIndex
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange}}})
wents := []pb.Entry{{Type: pb.EntryNormal, Term: 1, Index: 3, Data: nil}}
ents, err := r.raftLog.entries(index, noLimit)
require.NoError(t, err)
assert.Equal(t, wents, ents)
assert.Equal(t, pendingConfIndex, r.pendingConfIndex)
}
// TestNewLeaderPendingConfig tests that new leader sets its pendingConfigIndex
// based on uncommitted entries.
func TestNewLeaderPendingConfig(t *testing.T) {
tests := []struct {
addEntry bool
wpendingIndex uint64
}{
{false, 0},
{true, 1},
}
for i, tt := range tests {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
if tt.addEntry {
mustAppendEntry(r, pb.Entry{Type: pb.EntryNormal})
}
r.becomeCandidate()
r.becomeLeader()
assert.Equal(t, tt.wpendingIndex, r.pendingConfIndex, "#%d", i)
}
}
// TestAddNode tests that addNode could update nodes correctly.
func TestAddNode(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
nodes := r.trk.VoterNodes()
assert.Equal(t, []pb.PeerID{1, 2}, nodes)
}
// TestAddLearner tests that addLearner could update nodes correctly.
func TestAddLearner(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
// Add new learner peer.
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
require.False(t, r.isLearner)
nodes := r.trk.LearnerNodes()
assert.Equal(t, []pb.PeerID{2}, nodes)
require.True(t, r.trk.Progress(2).IsLearner)
// Promote peer to voter.
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
require.False(t, r.trk.Progress(2).IsLearner)
// Demote r.
r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeAddLearnerNode}.AsV2())
require.True(t, r.trk.Progress(1).IsLearner)
require.True(t, r.isLearner)
// Promote r again.
r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeAddNode}.AsV2())
require.False(t, r.trk.Progress(1).IsLearner)
require.False(t, r.isLearner)
}
// TestAddNodeCheckQuorum tests that addNode does not trigger a leader election
// immediately when checkQuorum is set.
func TestAddNodeCheckQuorum(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testAddNodeCheckQuorum(t, storeLivenessEnabled)
})
}
func testAddNodeCheckQuorum(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var r *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2)
r = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
} else {
r = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
r.checkQuorum = true
r.becomeCandidate()
r.becomeLeader()
for i := int64(0); i < r.electionTimeout-1; i++ {
r.tick()
}
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddNode}.AsV2())
// This tick will reach electionTimeout, which triggers a quorum check.
r.tick()
// Node 1 should still be the leader after a single tick.
assert.Equal(t, pb.StateLeader, r.state)
// After another electionTimeout ticks without hearing from node 2,
// node 1 should step down.
for i := int64(0); i < r.electionTimeout; i++ {
r.tick()
}
assert.Equal(t, pb.StateFollower, r.state)
}
// TestRemoveNode tests that removeNode could update nodes and
// removed list correctly.
func TestRemoveNode(t *testing.T) {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeRemoveNode}.AsV2())
w := []pb.PeerID{1}
assert.Equal(t, w, r.trk.VoterNodes())
// Removing the remaining voter will panic.
defer func() {
assert.NotNil(t, recover(), "did not panic")
}()
r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeRemoveNode}.AsV2())
}
// TestRemoveLearner tests that removeNode could update nodes and
// removed list correctly.
func TestRemoveLearner(t *testing.T) {
r := newTestLearnerRaft(1, 10, 1, newTestMemoryStorage(withPeers(1), withLearners(2)))
r.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeRemoveNode}.AsV2())
w := []pb.PeerID{1}
assert.Equal(t, w, r.trk.VoterNodes())
w = nil
assert.Equal(t, w, r.trk.LearnerNodes())
// Removing the remaining voter will panic.
defer func() {
assert.NotNil(t, recover(), "did not panic")
}()
r.applyConfChange(pb.ConfChange{NodeID: 1, Type: pb.ConfChangeRemoveNode}.AsV2())
}
func TestPromotable(t *testing.T) {
id := pb.PeerID(1)
tests := []struct {
peers []pb.PeerID
wp bool
}{
{[]pb.PeerID{1}, true},
{[]pb.PeerID{1, 2, 3}, true},
{[]pb.PeerID{}, false},
{[]pb.PeerID{2, 3}, false},
}
for i, tt := range tests {
r := newTestRaft(id, 5, 1, newTestMemoryStorage(withPeers(tt.peers...)))
assert.Equal(t, tt.wp, r.promotable(), "#%d", i)
}
}
func TestRaftNodes(t *testing.T) {
tests := []struct {
ids []pb.PeerID
wids []pb.PeerID
}{
{
[]pb.PeerID{1, 2, 3},
[]pb.PeerID{1, 2, 3},
},
{
[]pb.PeerID{3, 2, 1},
[]pb.PeerID{1, 2, 3},
},
}
for i, tt := range tests {
r := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(tt.ids...)))
assert.Equal(t, tt.wids, r.trk.VoterNodes(), "#%d", i)
}
}
func TestCampaignWhileLeader(t *testing.T) {
testCampaignWhileLeader(t, false)
}
func TestPreCampaignWhileLeader(t *testing.T) {
testCampaignWhileLeader(t, true)
}
func testCampaignWhileLeader(t *testing.T, preVote bool) {
cfg := newTestConfig(1, 5, 1, newTestMemoryStorage(withPeers(1)))
cfg.PreVote = preVote
r := newRaft(cfg)
assert.Equal(t, pb.StateFollower, r.state)
// We don't call campaign() directly because it comes after the check
// for our current state.
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
r.advanceMessagesAfterAppend()
assert.Equal(t, pb.StateLeader, r.state)
term := r.Term
r.Step(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
r.advanceMessagesAfterAppend()
assert.Equal(t, pb.StateLeader, r.state)
assert.Equal(t, term, r.Term)
}
// TestCommitAfterRemoveNode verifies that pending commands can become
// committed when a config change reduces the quorum requirements.
func TestCommitAfterRemoveNode(t *testing.T) {
// Create a cluster with two nodes.
s := newTestMemoryStorage(withPeers(1, 2))
r := newTestRaft(1, 5, 1, s)
r.becomeCandidate()
r.becomeLeader()
// Begin to demote the second node by entering a joint config.
cc := pb.ConfChangeV2{
Changes: []pb.ConfChangeSingle{
{Type: pb.ConfChangeRemoveNode, NodeID: 2},
{Type: pb.ConfChangeAddLearnerNode, NodeID: 2},
},
}
ccData, err := cc.Marshal()
require.NoError(t, err)
r.Step(pb.Message{
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: pb.EntryConfChangeV2, Data: ccData},
},
})
// Stabilize the log and make sure nothing is committed yet.
require.Empty(t, nextEnts(r, s))
ccIndex := r.raftLog.lastIndex()
// Node 2 acknowledges the config change, committing it.
r.Step(pb.Message{
Type: pb.MsgAppResp,
From: 2,
Index: ccIndex,
})
ents := nextEnts(r, s)
require.Len(t, ents, 2)
require.Equal(t, pb.EntryNormal, ents[0].Type)
require.Nil(t, ents[0].Data)
require.Equal(t, pb.EntryConfChangeV2, ents[1].Type)
// Apply the config changes. This enters a joint config. At this point the
// quorum requirement is 2, because node 2 remains a voter on the outgoing
// side of the joint config.
r.applyConfChange(cc.AsV2())
// Immediately exit the joint config.
cc = pb.ConfChangeV2{}
ccData, err = cc.Marshal()
require.NoError(t, err)
r.Step(pb.Message{
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: pb.EntryConfChangeV2, Data: ccData},
},
})
// Stabilize the log and make sure nothing is committed yet.
require.Empty(t, nextEnts(r, s))
ccIndex = r.raftLog.lastIndex()
// While the config change is pending, make another proposal.
r.Step(pb.Message{
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: pb.EntryNormal, Data: []byte("hello")},
},
})
// Node 2 acknowledges the config change, committing it.
r.Step(pb.Message{
Type: pb.MsgAppResp,
From: 2,
Index: ccIndex,
})
ents = nextEnts(r, s)
require.Len(t, ents, 1)
require.Equal(t, pb.EntryConfChangeV2, ents[0].Type)
// Apply the config changes to exit the joint config. This reduces quorum
// requirements so the pending command can now commit.
r.applyConfChange(cc.AsV2())
ents = nextEnts(r, s)
require.Len(t, ents, 1)
require.Equal(t, pb.EntryNormal, ents[0].Type)
require.Equal(t, []byte("hello"), ents[0].Data)
}
// TestLeaderTransferToUpToDateNode verifies transferring should start
// immediately if the transferee has the most up-to-date log entries when
// transfer is requested.
func TestLeaderTransferToUpToDateNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
require.Equal(t, pb.PeerID(1), lead.lead)
// Transfer leadership to 2.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateFollower, 2)
// After some log replication, transfer leadership back to 1.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
// TestLeaderTransferToUpToDateNodeFromFollower verifies transferring should
// start immediately if the transferee has the most up-to-date log entries when
// transfer starts. Unlike TestLeaderTransferToUpToDateNode, where the leader
// transfer message is sent to the leader, in this test case every leader
// transfer message is sent to the follower and is redirected to the leader.
func TestLeaderTransferToUpToDateNodeFromFollower(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
require.Equal(t, pb.PeerID(1), lead.lead)
// Transfer leadership to 2.
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateFollower, 2)
// After some log replication, transfer leadership back to 1.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
// TestLeaderTransferLeaderStepsDownImmediately verifies that the outgoing
// leader steps down to a follower as soon as it sends a MsgTimeoutNow to the
// transfer target, even before (and regardless of if) the target receives the
// MsgTimeoutNow and campaigns.
func TestLeaderTransferLeaderStepsDownImmediately(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderTransferLeaderStepsDownImmediately(t, storeLivenessEnabled)
})
}
func testLeaderTransferLeaderStepsDownImmediately(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3. It is up-to-date, so the leadership transfer will be
// initiated immediately, but node 3 will never receive the MsgTimeoutNow and
// call an election.
nt.isolate(3)
lead := nt.peers[1].(*raft)
require.Equal(t, uint64(1), lead.Term)
require.Equal(t, pb.PeerID(1), lead.lead)
// Transfer leadership to 3. The leader steps down immediately in the same
// term, waiting for the transfer target to call an election.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, uint64(1), lead.Term)
checkLeaderTransferState(t, lead, pb.StateFollower, None)
// With leader leases, the ex-leader would send a MsgDefortifyLeader to
// its followers when the support is expired.
if storeLivenessEnabled {
nt.livenessFabric.SetSupportExpired(1, true)
lead.tick()
nt.send(lead.readMessages()...)
nt.livenessFabric.SetSupportExpired(1, false)
}
// Eventually, the previous leader gives up on waiting and calls an election
// to reestablish leadership at the next term.
for i := int64(0); i < lead.randomizedElectionTimeout; i++ {
lead.tick()
}
nt.send(lead.readMessages()...)
require.Equal(t, uint64(2), lead.Term)
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
// TestLeaderTransferWithCheckQuorum ensures transferring leader still works
// even the current leader is still under its leader lease.
func TestLeaderTransferWithCheckQuorum(t *testing.T) {
nt := newNetwork(nil, nil, nil)
for i := int64(1); i < 4; i++ {
r := nt.peers[pb.PeerID(i)].(*raft)
r.checkQuorum = true
setRandomizedElectionTimeout(r, r.electionTimeout+i)
}
// Letting peer 2 electionElapsed reach to timeout so that it can vote for peer 1
f := nt.peers[2].(*raft)
for i := int64(0); i < f.electionTimeout; i++ {
f.tick()
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
require.Equal(t, pb.StateLeader, lead.state)
// Transfer leadership to 2.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateFollower, 2)
// After some log replication, transfer leadership back to 1.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func TestLeaderTransferToSlowFollower(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
require.Equal(t, uint64(2), lead.trk.Progress(1).Match)
require.Equal(t, uint64(1), lead.trk.Progress(3).Match)
// Reconnect node 3 and initiate a transfer of leadership from node 1 to node
// 3. The leader (node 1) will catch it up on log entries using MsgApps before
// transferring it leadership using MsgTimeoutNow.
nt.recover()
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateFollower, 3)
}
func TestLeaderTransferToCandidate(t *testing.T) {
nt := newNetworkWithConfig(preVoteConfigWithFortificationDisabled, nil, nil, nil)
n3 := nt.peers[3].(*raft)
// Elect node 1 as the leader of term 1.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
require.Equal(t, uint64(1), n3.Term)
// Isolate node 3 so that it decides to become a pre-candidate.
nt.isolate(3)
for i := int64(0); i < n3.randomizedElectionTimeout; i++ {
nt.tick(n3)
}
require.Equal(t, pb.StatePreCandidate, n3.state)
require.Equal(t, uint64(1), n3.Term)
// Reconnect node 3 and initiate a transfer of leadership from node 1 to node
// 3, all before node 3 steps back to a follower. This will instruct node 3 to
// call an election at the next term, which it can and does win.
nt.recover()
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.StateLeader, n3.state)
require.Equal(t, uint64(2), n3.Term)
}
func TestLeaderTransferAfterSnapshot(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
nextEnts(lead, nt.storage[1])
nt.storage[1].CreateSnapshot(lead.raftLog.applied, &pb.ConfState{Voters: lead.trk.VoterNodes()}, nil)
nt.storage[1].Compact(lead.raftLog.applied)
nt.recover()
require.Equal(t, uint64(1), lead.trk.Progress(3).Match)
filtered := pb.Message{}
// Snapshot needs to be applied before sending MsgAppResp.
nt.msgHook = func(m pb.Message) bool {
if m.Type != pb.MsgAppResp || m.From != 3 || m.Reject {
return true
}
filtered = m
return false
}
// Transfer leadership to 3 when node 3 is missing a snapshot.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.StateLeader, lead.state)
require.NotEqual(t, pb.Message{}, filtered)
// Apply snapshot and resume progress.
follower := nt.peers[3].(*raft)
snap := follower.raftLog.nextUnstableSnapshot()
nt.storage[3].ApplySnapshot(*snap)
follower.appliedSnap(snap.Metadata.Index)
nt.msgHook = nil
nt.send(filtered)
checkLeaderTransferState(t, lead, pb.StateFollower, 3)
}
func TestLeaderTransferToSelf(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
// Transfer leadership to self, there will be noop.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func TestLeaderTransferToNonExistingNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
lead := nt.peers[1].(*raft)
// Transfer leadership to non-existing node, there will be noop.
nt.send(pb.Message{From: 4, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func TestLeaderTransferTimeout(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately. If it were, we couldn't test the timeout.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
// Transfer leadership to isolated node, wait for timeout.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
for i := int64(0); i < lead.heartbeatTimeout; i++ {
lead.tick()
}
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
for i := int64(0); i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
lead.tick()
}
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func TestLeaderTransferIgnoreProposal(t *testing.T) {
s := newTestMemoryStorage(withPeers(1, 2, 3))
r := newTestRaft(1, 10, 1, s)
nt := newNetwork(r, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
// Transfer leadership to the isolated, behind node. This will leave the
// transfer in a pending state as the leader tries to catch up the target.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
lead := nt.peers[1].(*raft)
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
// Then send proposal. This should be dropped.
err := lead.Step(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
require.Equal(t, ErrProposalDropped, err)
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
require.Equal(t, uint64(2), lead.trk.Progress(1).Match)
}
func TestLeaderTransferReceiveHigherTermVote(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderTransferReceiveHigherTermVote(t, storeLivenessEnabled)
})
}
func testLeaderTransferReceiveHigherTermVote(t *testing.T, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
nt := newNetworkWithConfig(cfg, nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
// Transfer leadership to the isolated, behind node. This will leave the
// transfer in a pending state as the leader tries to catch up the target.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup, Index: 1, Term: 2})
checkLeaderTransferState(t, lead, pb.StateFollower, 2)
}
func TestLeaderTransferRemoveNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
// The leadTransferee is removed with leadership transfer in progress.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
lead.applyConfChange(pb.ConfChange{NodeID: 3, Type: pb.ConfChangeRemoveNode}.AsV2())
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func TestLeaderTransferDemoteNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
// The leadTransferee is demoted with leadership transfer in progress.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
lead.applyConfChange(pb.ConfChangeV2{
Changes: []pb.ConfChangeSingle{
{
Type: pb.ConfChangeRemoveNode,
NodeID: 3,
},
{
Type: pb.ConfChangeAddLearnerNode,
NodeID: 3,
},
},
})
// Make the Raft group commit the LeaveJoint entry.
lead.applyConfChange(pb.ConfChangeV2{})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
// TestLeaderTransferBack verifies leadership can transfer back to self when
// last transfer is pending, which cancels the transfer attempt.
func TestLeaderTransferBack(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
// Transfer leadership back to self.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
// TestLeaderTransferSecondTransferToAnotherNode verifies leader can transfer to
// another node when last transfer is pending, which cancels the previous
// transfer attempt and starts a new one.
func TestLeaderTransferSecondTransferToAnotherNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
// Transfer leadership to another node.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
checkLeaderTransferState(t, lead, pb.StateFollower, 2)
}
// TestLeaderTransferSecondTransferToSameNode verifies second transfer leader
// request to the same node should not extend the timeout while the first one is
// pending.
func TestLeaderTransferSecondTransferToSameNode(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Isolate node 3 and propose an entry on 1. This will cause node 3 to fall
// behind on its log, so that the leadership transfer won't be initiated
// immediately.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{}}})
lead := nt.peers[1].(*raft)
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
require.Equal(t, pb.PeerID(3), lead.leadTransferee)
for i := int64(0); i < lead.heartbeatTimeout; i++ {
lead.tick()
}
// Second transfer leadership request to the same node.
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
for i := int64(0); i < lead.electionTimeout-lead.heartbeatTimeout; i++ {
lead.tick()
}
checkLeaderTransferState(t, lead, pb.StateLeader, 1)
}
func checkLeaderTransferState(t *testing.T, r *raft, state pb.StateType, lead pb.PeerID) {
require.Equal(t, state, r.state)
require.Equal(t, lead, r.lead)
require.Equal(t, None, r.leadTransferee)
}
// TestLeaderTransferNonMember verifies that when a MsgTimeoutNow arrives at a
// node that has been removed from the group, nothing happens. (previously, if
// the node also got votes, it would panic as it transitioned to StateLeader).
func TestLeaderTransferNonMember(t *testing.T) {
r := newTestRaft(1, 5, 1, newTestMemoryStorage(withPeers(2, 3, 4)))
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgTimeoutNow})
r.Step(pb.Message{From: 2, To: 1, Type: pb.MsgVoteResp})
r.Step(pb.Message{From: 3, To: 1, Type: pb.MsgVoteResp})
require.Equal(t, pb.StateFollower, r.state)
}
// TestLeaderTransferDifferentTerms verifies that a MsgTimeoutNow will only be
// respected if it is from the current term or from a new term.
func TestLeaderTransferDifferentTerms(t *testing.T) {
nt := newNetwork(nil, nil, nil)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// Transfer leadership to node 2, then 3, to drive up the term.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
nt.send(pb.Message{From: 3, To: 2, Type: pb.MsgTransferLeader})
for i, p := range nt.peers {
r := p.(*raft)
expState := pb.StateFollower
if i == 3 {
expState = pb.StateLeader
}
require.Equal(t, expState, r.state)
require.Equal(t, uint64(3), r.Term)
}
// Send a MsgTimeoutNow to node 1 from an old term. This should be ignored.
// This is important, as a MsgTimeoutNow allows a follower to call a "force"
// election, which bypasses pre-vote and leader support safeguards. We don't
// want a stale MsgTimeoutNow sent from an old leader giving a follower
// permission to overthrow a newer leader.
nt.send(pb.Message{From: 2, To: 1, Term: 2, Type: pb.MsgTimeoutNow})
n1 := nt.peers[1].(*raft)
require.Equal(t, pb.StateFollower, n1.state)
require.Equal(t, uint64(3), n1.Term)
// Send a MsgTimeoutNow to node 1 from the current term. This should cause it
// to call an election for the _next_ term, which it will win.
nt.send(pb.Message{From: 3, To: 1, Term: 3, Type: pb.MsgTimeoutNow})
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, uint64(4), n1.Term)
// Send a MsgTimeoutNow to node 2 from a new term. This should advance the
// term on node 2 and cause it to call an election for the _next_ term, which
// it will win.
nt.send(pb.Message{From: 1, To: 2, Term: 5, Type: pb.MsgTimeoutNow})
n2 := nt.peers[2].(*raft)
require.Equal(t, pb.StateLeader, n2.state)
require.Equal(t, uint64(6), n2.Term)
}
// TestLeaderTransferStaleFollower verifies that a MsgTimeoutNow received by a
// stale follower (a follower still at an earlier term) will cause the follower
// to call an election which it can not win.
func TestLeaderTransferStaleFollower(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testLeaderTransferStaleFollower(t, storeLivenessEnabled)
})
}
func testLeaderTransferStaleFollower(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
nodes := []*raft{n1, n2, n3}
// Attempt to transfer leadership to node 3. The MsgTimeoutNow is sent
// immediately and node 1 steps down as leader, but node 3 does not receive
// the message due to a network partition.
nt.isolate(3)
nt.send(pb.Message{From: 3, To: 1, Type: pb.MsgTransferLeader})
for _, n := range nodes {
require.Equal(t, pb.StateFollower, n.state)
require.Equal(t, uint64(1), n.Term)
}
// With leader leases, the ex-leader would send a MsgDefortifyLeader to
// its followers when the support is expired.
if storeLivenessEnabled {
nt.livenessFabric.SetSupportExpired(1, true)
n1.tick()
nt.send(nt.filter(n1.readMessages())...)
nt.livenessFabric.SetSupportExpired(1, false)
}
// Eventually, the previous leader gives up on waiting and calls an election
// to reestablish leadership at the next term. Node 3 does not hear about this
// either.
for i := int64(0); i < n1.randomizedElectionTimeout; i++ {
n1.tick()
}
nt.send(nt.filter(n1.readMessages())...)
for _, n := range nodes {
expState := pb.StateFollower
if n == n1 {
expState = pb.StateLeader
}
expTerm := uint64(2)
if n == n3 {
expTerm = 1
}
require.Equal(t, expState, n.state)
require.Equal(t, expTerm, n.Term)
}
// The network partition heals and n3 receives the lost MsgTimeoutNow that n1
// had previously tried to send to it back in term 1. It calls an unsuccessful
// election, through which it learns about the new leadership term.
nt.recover()
nt.send(pb.Message{From: 1, To: 3, Term: 1, Type: pb.MsgTimeoutNow})
for _, n := range nodes {
expState := pb.StateFollower
if n == n1 {
expState = pb.StateLeader
}
require.Equal(t, expState, n.state)
require.Equal(t, uint64(2), n.Term)
}
}
// TestNodeWithSmallerTermCanCompleteElection tests the scenario where a node
// that has been partitioned away (and fallen behind) rejoins the cluster at
// about the same time the leader node gets partitioned away.
// Previously the cluster would come to a standstill when run with PreVote
// enabled.
func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testNodeWithSmallerTermCanCompleteElection(t, storeLivenessEnabled)
})
}
func testNodeWithSmallerTermCanCompleteElection(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
n1.preVote = true
n2.preVote = true
n3.preVote = true
// cause a network partition to isolate node 3
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.cut(1, 3)
nt.cut(2, 3)
if storeLivenessEnabled {
// We need to isolate node 3 in the store liveness layer as well.
nt.livenessFabric.Isolate(3)
}
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
sm := nt.peers[1].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
sm = nt.peers[2].(*raft)
assert.Equal(t, pb.StateFollower, sm.state)
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
sm = nt.peers[3].(*raft)
if storeLivenessEnabled {
// Since 3 isn't supported by a majority, it won't pre-campaign,
assert.Equal(t, pb.StateFollower, sm.state)
} else {
assert.Equal(t, pb.StatePreCandidate, sm.state)
}
if storeLivenessEnabled {
// Withdraw support from 1 so 2 can campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// check whether the term values are expected
sm = nt.peers[1].(*raft)
assert.Equal(t, uint64(3), sm.Term)
sm = nt.peers[2].(*raft)
assert.Equal(t, uint64(3), sm.Term)
sm = nt.peers[3].(*raft)
assert.Equal(t, uint64(1), sm.Term)
// check state
sm = nt.peers[1].(*raft)
assert.Equal(t, pb.StateFollower, sm.state)
sm = nt.peers[2].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
sm = nt.peers[3].(*raft)
if storeLivenessEnabled {
// Since 3 wasn't supported by a majority, it didn't pre-campaign.
assert.Equal(t, pb.StateFollower, sm.state)
} else {
assert.Equal(t, pb.StatePreCandidate, sm.state)
}
sm.logger.Infof("going to bring back peer 3 and kill peer 2")
// recover the network then immediately isolate b which is currently
// the leader, this is to emulate the crash of b.
nt.recover()
nt.cut(2, 1)
nt.cut(2, 3)
if storeLivenessEnabled {
// Un-isolate 3 in store liveness as well.
nt.livenessFabric.UnIsolate(3)
// Re-grant support for 3 from all peers. This will allow 1 to campaign.
nt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
// Isolate node 2 in store liveness as well.
nt.livenessFabric.Isolate(2)
}
// call for election
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// do we have a leader?
sma := nt.peers[1].(*raft)
smb := nt.peers[3].(*raft)
assert.True(t, sma.state == pb.StateLeader || smb.state == pb.StateLeader)
}
// TestPreVoteWithSplitVote verifies that after split vote, cluster can complete
// election in next round.
func TestPreVoteWithSplitVote(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testPreVoteWithSplitVote(t, storeLivenessEnabled)
})
}
func testPreVoteWithSplitVote(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
var n1, n2, n3 *raft
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
n1.preVote = true
n2.preVote = true
n3.preVote = true
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// simulate leader down. followers start split vote.
nt.isolate(1)
if storeLivenessEnabled {
// We need to isolate 1 in the store liveness layer as well.
nt.livenessFabric.Isolate(1)
}
nt.send([]pb.Message{
{From: 2, To: 2, Type: pb.MsgHup},
{From: 3, To: 3, Type: pb.MsgHup},
}...)
// check whether the term values are expected
sm := nt.peers[2].(*raft)
assert.Equal(t, uint64(3), sm.Term)
sm = nt.peers[3].(*raft)
assert.Equal(t, uint64(3), sm.Term)
// check state
sm = nt.peers[2].(*raft)
assert.Equal(t, pb.StateCandidate, sm.state)
sm = nt.peers[3].(*raft)
assert.Equal(t, pb.StateCandidate, sm.state)
// node 2 election timeout first
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// check whether the term values are expected
sm = nt.peers[2].(*raft)
assert.Equal(t, uint64(4), sm.Term)
sm = nt.peers[3].(*raft)
assert.Equal(t, uint64(4), sm.Term)
// check state
sm = nt.peers[2].(*raft)
assert.Equal(t, pb.StateLeader, sm.state)
sm = nt.peers[3].(*raft)
assert.Equal(t, pb.StateFollower, sm.state)
}
// TestPreVoteWithCheckQuorum ensures that after a node become pre-candidate,
// it will checkQuorum correctly.
func TestPreVoteWithCheckQuorum(t *testing.T) {
n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)))
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
n1.preVote = true
n2.preVote = true
n3.preVote = true
n1.checkQuorum = true
n2.checkQuorum = true
n3.checkQuorum = true
nt := newNetwork(n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// isolate node 1. node 2 and node 3 have leader info
nt.isolate(1)
// check state
sm := nt.peers[1].(*raft)
require.Equal(t, pb.StateLeader, sm.state)
sm = nt.peers[2].(*raft)
require.Equal(t, pb.StateFollower, sm.state)
sm = nt.peers[3].(*raft)
require.Equal(t, pb.StateFollower, sm.state)
// node 2 will ignore node 3's PreVote
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// Do we have a leader?
assert.True(t, n2.state == pb.StateLeader || n3.state == pb.StateFollower)
}
// TestLearnerCampaign verifies that a learner won't campaign even if it receives
// a MsgHup or MsgTimeoutNow.
func TestLearnerCampaign(t *testing.T) {
n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1)))
n1.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1)))
n2.applyConfChange(pb.ConfChange{NodeID: 2, Type: pb.ConfChangeAddLearnerNode}.AsV2())
nt := newNetwork(n1, n2)
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
require.True(t, n2.isLearner)
require.Equal(t, pb.StateFollower, n2.state)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
require.True(t, n1.state == pb.StateLeader && n1.lead == 1)
// NB: TransferLeader already checks that the recipient is not a learner, but
// the check could have happened by the time the recipient becomes a learner,
// in which case it will receive MsgTimeoutNow as in this test case and we
// verify that it's ignored.
nt.send(pb.Message{From: 1, To: 2, Type: pb.MsgTimeoutNow})
require.Equal(t, pb.StateFollower, n2.state)
}
// simulate rolling update a cluster for Pre-Vote. cluster has 3 nodes [n1, n2, n3].
// n1 is leader with term 2
// n2 is follower with term 2
// n3 is partitioned, with term 4 and less log, state is candidate
func newPreVoteMigrationCluster(
t *testing.T, storeLivenessEnabled bool, fabric *raftstoreliveness.LivenessFabric,
) *network {
var n1, n2, n3 *raft
if storeLivenessEnabled {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n3 = newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n3.becomeFollower(1, None)
n1.preVote = true
n2.preVote = true
// We intentionally do not enable PreVote for n3, this is done so in order
// to simulate a rolling restart process where it's possible to have a mixed
// version cluster with replicas with PreVote enabled, and replicas without.
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
require.Equal(t, pb.StateFollower, n3.state)
// Cause a network partition to isolate n3.
nt.isolate(3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("some data")}}})
if storeLivenessEnabled {
// We need to withdraw support from 1 before 3 can campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
// check state
require.Equal(t, pb.StateLeader, n1.state)
require.Equal(t, pb.StateFollower, n2.state)
require.Equal(t, pb.StateCandidate, n3.state)
// check term
require.Equal(t, uint64(2), n1.Term)
require.Equal(t, uint64(2), n2.Term)
require.Equal(t, uint64(4), n3.Term)
if storeLivenessEnabled {
// Restore the liveness support state to return a working cluster with all
// nodes having support.
nt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
// Enable prevote on n3, then recover the network
n3.preVote = true
nt.recover()
return nt
}
func TestPreVoteMigrationCanCompleteElection(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testPreVoteMigrationCanCompleteElection(t, storeLivenessEnabled)
})
}
func testPreVoteMigrationCanCompleteElection(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
}
nt := newPreVoteMigrationCluster(t, storeLivenessEnabled, fabric)
// n1 is leader with term 2
// n2 is follower with term 2
// n3 is pre-candidate with term 4, and less log
n2 := nt.peers[2].(*raft)
n3 := nt.peers[3].(*raft)
// simulate leader down
nt.isolate(1)
if storeLivenessEnabled {
// We need to withdraw support from 1 so 3 can campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
// Call for elections from both n2 and n3.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
if storeLivenessEnabled {
// We need to withdraw support from 3 so 2 can campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(3)
}
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// check state
assert.Equal(t, pb.StateFollower, n2.state)
assert.Equal(t, pb.StatePreCandidate, n3.state)
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup})
// Do we have a leader?
assert.True(t, n2.state == pb.StateLeader || n3.state == pb.StateFollower)
}
func TestPreVoteMigrationWithFreeStuckPreCandidate(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testPreVoteMigrationWithFreeStuckPreCandidate(t, storeLivenessEnabled)
})
}
func testPreVoteMigrationWithFreeStuckPreCandidate(t *testing.T, storeLivenessEnabled bool) {
var fabric *raftstoreliveness.LivenessFabric
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3)
}
nt := newPreVoteMigrationCluster(t, storeLivenessEnabled, fabric)
// n1 is leader with term 2
// n2 is follower with term 2
// n3 is pre-candidate with term 4, and less log
n1 := nt.peers[1].(*raft)
n2 := nt.peers[2].(*raft)
n3 := nt.peers[3].(*raft)
assert.Equal(t, pb.StateLeader, n1.state)
if storeLivenessEnabled {
assert.Equal(t, hlc.MaxTimestamp, getBasicStatus(n1).LeadSupportUntil)
}
if storeLivenessEnabled {
// 1 needs to withdraw support for 3 before it can become a preCandidate.
nt.livenessFabric.WithdrawSupport(1, 3)
}
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, pb.StateFollower, n2.state)
assert.Equal(t, pb.StatePreCandidate, n3.state)
// Pre-Vote again for safety.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, pb.StateFollower, n2.state)
assert.Equal(t, pb.StatePreCandidate, n3.state)
// If the stuck candidate were to talk to the follower, it may be ignored,
// depending on whether the follower is fortified by the leader.
nt.send(pb.Message{From: 3, To: 2, Type: pb.MsgAppResp, Term: n3.Term})
if storeLivenessEnabled {
assert.Equal(t, n3.Term-2, n2.Term)
} else {
assert.Equal(t, n3.Term, n2.Term)
}
// Disrupt the leader so that the stuck peer is freed. The leader steps down
// immediately if it's not fortified. However, if it was fortified, it will
// only step down when a quorum stops supporting it.
hbType := pb.MsgHeartbeat
if storeLivenessEnabled {
hbType = pb.MsgFortifyLeader
}
nt.send(pb.Message{From: 1, To: 3, Type: hbType, Term: n1.Term})
if storeLivenessEnabled {
// Expect that we are still the leader since it's still not safe to step
// down, however, the step-down intent is recorded.
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, true, n1.fortificationTracker.SteppingDown())
// The leader still hasn't defortified, so the stranded peer still can't
// win an election.
nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})
assert.Equal(t, pb.StatePreCandidate, n3.state)
assert.Equal(t, pb.StateLeader, n1.state)
assert.Equal(t, n3.Term-2, n1.Term)
assert.Equal(t, n1.id, n1.lead)
// Expire the support, and tick it once. It should step down.
nt.livenessFabric.SetSupportExpired(1, true)
n1.tick()
}
assert.Equal(t, pb.StateFollower, n1.state)
// Node 1 doesn't remember that it was the leader.
assert.Equal(t, None, n1.lead)
assert.Equal(t, n3.Term, n1.Term)
// Return the support back to node 1 so that it can call an election.
if storeLivenessEnabled {
fabric.SetSupportExpired(1, false)
}
// The ex-leader calls an election, which it wins.
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n1.state)
}
func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool, storeLivenessEnabled bool) {
var cfg func(c *Config) = nil
if !storeLivenessEnabled {
cfg = fortificationDisabledConfig
}
nt := newNetworkWithConfig(cfg, nil, nil, nil)
n1 := nt.peers[1].(*raft)
n2 := nt.peers[2].(*raft)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
assert.Equal(t, pb.StateLeader, n1.state)
// Begin to add a fourth node.
cc := pb.ConfChange{
Type: pb.ConfChangeAddNode,
NodeID: 4,
}
var ccData []byte
var err error
var ty pb.EntryType
if v2 {
ccv2 := cc.AsV2()
ccData, err = ccv2.Marshal()
ty = pb.EntryConfChangeV2
} else {
ccData, err = cc.Marshal()
ty = pb.EntryConfChange
}
require.NoError(t, err)
nt.send(pb.Message{
From: 1,
To: 1,
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: ty, Data: ccData},
},
})
if storeLivenessEnabled {
// We need to withdraw support of the current leader to allow the new peer
// to campaign and get elected.
nt.livenessFabric.WithdrawSupportForPeerFromAllPeers(1)
}
// Trigger campaign in node 2
for i := int64(0); i < n2.randomizedElectionTimeout; i++ {
n2.tick()
}
// It's still follower because committed conf change is not applied.
assert.Equal(t, pb.StateFollower, n2.state)
// Transfer leadership to peer 2.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
// The outgoing leader steps down immediately.
assert.Equal(t, pb.StateFollower, n1.state)
// The transfer target does not campaign immediately because the committed
// conf change is not applied.
assert.Equal(t, pb.StateFollower, n2.state)
if storeLivenessEnabled {
// Restore the support state.
nt.livenessFabric.GrantSupportForPeerFromAllPeers(1)
}
// Advance apply on node 1 and re-establish leadership.
nextEnts(n1, nt.storage[1])
for i := int64(0); i < n1.randomizedElectionTimeout; i++ {
n1.tick()
}
nt.send(n1.readMessages()...)
assert.Equal(t, pb.StateLeader, n1.state)
// Advance apply on node 2.
nextEnts(n2, nt.storage[2])
// Transfer leadership to peer 2 again.
nt.send(pb.Message{From: 2, To: 1, Type: pb.MsgTransferLeader})
// The outgoing leader steps down immediately.
assert.Equal(t, pb.StateFollower, n1.state)
// The transfer target campaigns immediately now that the committed conf
// change is applied.
assert.Equal(t, pb.StateLeader, n2.state)
}
// TestConfChangeCheckBeforeCampaign tests if unapplied ConfChange is checked before campaign.
func TestConfChangeCheckBeforeCampaign(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testConfChangeCheckBeforeCampaign(t, false, storeLivenessEnabled)
})
}
// TestConfChangeV2CheckBeforeCampaign tests if unapplied ConfChangeV2 is checked before campaign.
func TestConfChangeV2CheckBeforeCampaign(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testConfChangeCheckBeforeCampaign(t, true, storeLivenessEnabled)
})
}
func TestFastLogRejection(t *testing.T) {
tests := []struct {
leaderLog []pb.Entry // Logs on the leader
followerLog []pb.Entry // Logs on the follower
followerCompact uint64 // Index at which the follower log is compacted.
rejectHintTerm uint64 // Expected term included in rejected MsgAppResp.
rejectHintIndex uint64 // Expected index included in rejected MsgAppResp.
nextAppendTerm uint64 // Expected term when leader appends after rejected.
nextAppendIndex uint64 // Expected index when leader appends after rejected.
}{
// This case tests that leader can find the conflict index quickly.
// Firstly leader appends (type=MsgApp,index=7,logTerm=4, entries=...);
// After rejected leader appends (type=MsgApp,index=3,logTerm=2).
{
leaderLog: index(1).terms(1, 2, 2, 4, 4, 4, 4),
followerLog: index(1).terms(1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3),
rejectHintTerm: 3,
rejectHintIndex: 7,
nextAppendTerm: 2,
nextAppendIndex: 3,
},
// This case tests that leader can find the conflict index quickly.
// Firstly leader appends (type=MsgApp,index=8,logTerm=5, entries=...);
// After rejected leader appends (type=MsgApp,index=4,logTerm=3).
{
leaderLog: index(1).terms(1, 2, 2, 3, 4, 4, 4, 5),
followerLog: index(1).terms(1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3),
rejectHintTerm: 3,
rejectHintIndex: 8,
nextAppendTerm: 3,
nextAppendIndex: 4,
},
// This case tests that follower can find the conflict index quickly.
// Firstly leader appends (type=MsgApp,index=4,logTerm=1, entries=...);
// After rejected leader appends (type=MsgApp,index=1,logTerm=1).
{
leaderLog: index(1).terms(1, 1, 1, 1),
followerLog: index(1).terms(1, 2, 2, 4),
rejectHintTerm: 1,
rejectHintIndex: 1,
nextAppendTerm: 1,
nextAppendIndex: 1,
},
// This case is similar to the previous case. However, this time, the
// leader has a longer uncommitted log tail than the follower.
// Firstly leader appends (type=MsgApp,index=6,logTerm=1, entries=...);
// After rejected leader appends (type=MsgApp,index=1,logTerm=1).
{
leaderLog: index(1).terms(1, 1, 1, 1, 1, 1),
followerLog: index(1).terms(1, 2, 2, 4),
rejectHintTerm: 1,
rejectHintIndex: 1,
nextAppendTerm: 1,
nextAppendIndex: 1,
},
// This case is similar to the previous case. However, this time, the
// follower has a longer uncommitted log tail than the leader.
// Firstly leader appends (type=MsgApp,index=4,logTerm=1, entries=...);
// After rejected leader appends (type=MsgApp,index=1,logTerm=1).
{
leaderLog: index(1).terms(1, 1, 1, 1),
followerLog: index(1).terms(1, 2, 2, 4, 4, 4),
rejectHintTerm: 1,
rejectHintIndex: 1,
nextAppendTerm: 1,
nextAppendIndex: 1,
},
// An normal case that there are no log conflicts.
// Firstly leader appends (type=MsgApp,index=5,logTerm=5, entries=...);
// After rejected leader appends (type=MsgApp,index=4,logTerm=4).
{
leaderLog: index(1).terms(1, 1, 1, 4, 5),
followerLog: index(1).terms(1, 1, 1, 4),
rejectHintTerm: 4,
rejectHintIndex: 4,
nextAppendTerm: 4,
nextAppendIndex: 4,
},
// Test case from example comment in stepLeader (on leader).
{
leaderLog: index(1).terms(2, 5, 5, 5, 5, 5, 5, 5, 5),
followerLog: index(1).terms(2, 4, 4, 4, 4, 4),
rejectHintTerm: 4,
rejectHintIndex: 6,
nextAppendTerm: 2,
nextAppendIndex: 1,
},
// Test case from example comment in handleAppendEntries (on follower).
{
leaderLog: index(1).terms(2, 2, 2, 2, 2),
followerLog: index(1).terms(2, 4, 4, 4, 4, 4, 4, 4),
rejectHintTerm: 2,
rejectHintIndex: 1,
nextAppendTerm: 2,
nextAppendIndex: 1,
},
// A case when a stale MsgApp from leader arrives after the corresponding
// log index got compacted.
// A stale (type=MsgApp,index=3,logTerm=3,entries=[(term=3,index=4)]) is
// delivered to a follower who has already compacted beyond log index 3. The
// MsgAppResp rejection will return same index=3, with logTerm=0. The leader
// will rollback by one entry, and send MsgApp with index=2,logTerm=1.
{
leaderLog: index(1).terms(1, 1, 3),
followerLog: index(1).terms(1, 1, 3, 3, 3),
followerCompact: 5, // entries <= index 5 are compacted
rejectHintTerm: 0,
rejectHintIndex: 3,
nextAppendTerm: 1,
nextAppendIndex: 2,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
s1 := NewMemoryStorage()
s1.snapshot.Metadata.ConfState = pb.ConfState{Voters: []pb.PeerID{1, 2, 3}}
s1.Append(test.leaderLog)
last := test.leaderLog[len(test.leaderLog)-1]
s1.SetHardState(pb.HardState{
Term: last.Term - 1,
Commit: last.Index,
})
n1 := newTestRaft(1, 10, 1, s1)
n1.becomeCandidate() // bumps Term to last.Term
n1.becomeLeader()
s2 := NewMemoryStorage()
s2.snapshot.Metadata.ConfState = pb.ConfState{Voters: []pb.PeerID{1, 2, 3}}
s2.Append(test.followerLog)
s2.SetHardState(pb.HardState{
Term: last.Term,
Vote: 1,
Commit: 0,
})
n2 := newTestRaft(2, 10, 1, s2)
if test.followerCompact != 0 {
s2.Compact(test.followerCompact)
// NB: the state of n2 after this compaction isn't realistic because the
// commit index is still at 0. We do this to exercise a "doesn't happen"
// edge case behaviour, in case it still does happen in some other way.
}
require.NoError(t, n2.Step(pb.Message{From: 1, To: 2, Type: pb.MsgHeartbeat}))
msgs := n2.readMessages()
require.Len(t, msgs, 1, "can't read 1 message from peer 2")
require.Equal(t, pb.MsgHeartbeatResp, msgs[0].Type)
require.NoError(t, n1.Step(msgs[0]))
msgs = n1.readMessages()
require.Len(t, msgs, 1, "can't read 1 message from peer 1")
require.Equal(t, pb.MsgApp, msgs[0].Type)
require.NoError(t, n2.Step(msgs[0]), "peer 2 step append fail")
msgs = n2.readMessages()
require.Len(t, msgs, 1, "can't read 1 message from peer 2")
require.Equal(t, pb.MsgAppResp, msgs[0].Type)
require.True(t, msgs[0].Reject, "expected rejected append response from peer 2")
require.Equal(t, test.rejectHintTerm, msgs[0].LogTerm, "hint log term mismatch")
require.Equal(t, test.rejectHintIndex, msgs[0].RejectHint, "hint log index mismatch")
require.NoError(t, n1.Step(msgs[0]), "peer 1 step append fail")
msgs = n1.readMessages()
require.Equal(t, test.nextAppendTerm, msgs[0].LogTerm)
require.Equal(t, test.nextAppendIndex, msgs[0].Index)
})
}
}
func entsWithConfig(configFunc func(*Config), terms ...uint64) *raft {
storage := NewMemoryStorage()
for i, term := range terms {
storage.Append([]pb.Entry{{Index: uint64(i + 1), Term: term}})
}
cfg := newTestConfig(1, 5, 1, storage)
if configFunc != nil {
configFunc(cfg)
}
sm := newRaft(cfg)
sm.reset(terms[len(terms)-1])
return sm
}
// votedWithConfig creates a raft state machine with Vote and Term set
// to the given value but no log entries (indicating that it voted in
// the given term but has not received any logs).
func votedWithConfig(configFunc func(*Config), vote pb.PeerID, term uint64) *raft {
storage := NewMemoryStorage()
storage.SetHardState(pb.HardState{Vote: vote, Term: term})
cfg := newTestConfig(1, 5, 1, storage)
if configFunc != nil {
configFunc(cfg)
}
sm := newRaft(cfg)
sm.reset(term)
return sm
}
func TestLogReplicationWithReorderedMessage(t *testing.T) {
r1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
r1.becomeCandidate()
r1.becomeLeader()
r1.readMessages()
r1.trk.Progress(2).BecomeReplicate()
r2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)))
// r1 sends 2 MsgApp messages to r2.
mustAppendEntry(r1, pb.Entry{Data: []byte("somedata")})
r1.maybeSendAppend(2)
req1 := expectOneMessage(t, r1)
mustAppendEntry(r1, pb.Entry{Data: []byte("somedata")})
r1.maybeSendAppend(2)
req2 := expectOneMessage(t, r1)
// r2 receives the second MsgApp first due to reordering.
r2.Step(req2)
resp2 := expectOneMessage(t, r2)
// r2 rejects req2
require.True(t, resp2.Reject)
require.Zero(t, resp2.RejectHint)
require.Equal(t, uint64(2), resp2.Index)
// r2 handles the first MsgApp and responses to r1.
// And r1 updates match index accordingly.
r2.Step(req1)
m := expectOneMessage(t, r2)
require.False(t, m.Reject)
require.Equal(t, uint64(2), m.Index)
r1.Step(m)
m = expectOneMessage(t, r1)
require.Equal(t, uint64(2), r1.trk.Progress(2).Match)
// r1 observes a transient network issue to r2, hence transits to probe state.
r1.Step(pb.Message{From: 2, To: 1, Type: pb.MsgUnreachable})
require.Equal(t, tracker.StateProbe, r1.trk.Progress(2).State)
// now r1 receives the delayed resp2.
r1.Step(resp2)
m = expectOneMessage(t, r1)
// r1 shall re-send MsgApp from match index even if resp2's reject hint is less than matching index.
require.Equal(t, r1.trk.Progress(2).Match, m.Index)
}
func TestFortificationMetrics(t *testing.T) {
fabric := raftstoreliveness.NewLivenessFabricWithPeers(1, 2, 3, 4)
n1 := newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3, 4)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 := newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3, 4)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
n3 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3, 4)),
withStoreLiveness(fabric.GetStoreLiveness(3)))
n4 := newTestRaft(3, 10, 1, newTestMemoryStorage(withPeers(1, 2, 3, 4)),
withStoreLiveness(fabric.GetStoreLiveness(4)))
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2, n3, n4)
// Withdraw 2's SupportFor() 1. This should cause 2 to reject the
// fortification request.
nt.livenessFabric.WithdrawSupportFor(2, 1)
// Withdraw 1's SupportFrom() 3. This should cause 1 to skip sending the
// fortification message to 3.
nt.livenessFabric.WithdrawSupportFrom(1, 3)
nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
// The leader should receive an accepted MsgFortifyResp from itself and 4.
require.Equal(t, int64(2), n1.metrics.AcceptedFortificationResponses.Count())
// The leader should receive a rejected MsgFortifyResp from 2.
require.Equal(t, int64(1), n1.metrics.RejectedFortificationResponses.Count())
// The leader should skip sending a MsgFortify to 3.
require.Equal(t, int64(1), n1.metrics.SkippedFortificationDueToLackOfSupport.Count())
}
// TestPeerForgetsLeaderWhenIsolated ensures that peers forget the leader when
// they are isolated.
func TestPeerForgetsLeaderWhenIsolated(t *testing.T) {
testutils.RunTrueAndFalse(t, "store-liveness-enabled",
func(t *testing.T, storeLivenessEnabled bool) {
testPeerForgetsLeaderWhenIsolated(t, storeLivenessEnabled)
})
}
func testPeerForgetsLeaderWhenIsolated(t *testing.T, storeLivenessEnabled bool) {
var n1, n2 *raft
var fabric *raftstoreliveness.LivenessFabric
if storeLivenessEnabled {
fabric = raftstoreliveness.NewLivenessFabricWithPeers(1, 2)
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(fabric.GetStoreLiveness(1)))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(fabric.GetStoreLiveness(2)))
} else {
n1 = newTestRaft(1, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(raftstoreliveness.Disabled{}))
n2 = newTestRaft(2, 10, 1, newTestMemoryStorage(withPeers(1, 2)),
withStoreLiveness(raftstoreliveness.Disabled{}))
}
n1.becomeFollower(1, None)
n2.becomeFollower(1, None)
n1.preVote = true
n2.preVote = true
n1.checkQuorum = true
n2.checkQuorum = true
// Randomly select a leader between node 1, and 2.
leader := pb.PeerID(rand.Intn(2) + 1)
nt := newNetworkWithConfigAndLivenessFabric(nil, fabric, n1, n2)
nt.send(pb.Message{From: leader, To: leader, Type: pb.MsgHup})
// Iterate over all peers and check that they have the same leader.
for _, peer := range nt.peers {
sm := peer.(*raft)
require.Equal(t, leader, sm.lead)
}
// Isolate node one from the quorum.
nt.isolate(1)
if storeLivenessEnabled {
nt.livenessFabric.SetSupportExpired(1, true)
nt.livenessFabric.Isolate(1)
}
// Tick node 1 until it attempts to campaign.
for i := int64(0); i < 2*n1.randomizedElectionTimeout; i++ {
n1.tick()
}
// If storeliveness is enabled, the follower will never become a
// pre-candidate since it's not supported by a quorum. However, if
// storeliveness is disabled, the follower may become a pre-candidate.
if storeLivenessEnabled {
require.Equal(t, pb.StateFollower, n1.state)
} else {
require.True(t, n1.state == pb.StateFollower || n1.state == pb.StatePreCandidate)
}
// Make sure that 1 has forgotten the leader.
require.Equal(t, None, n1.lead)
}
func expectOneMessage(t *testing.T, r *raft) pb.Message {
msgs := r.readMessages()
require.Len(t, msgs, 1, "expect one message")
return msgs[0]
}
type network struct {
t *testing.T // optional
peers map[pb.PeerID]stateMachine
storage map[pb.PeerID]*MemoryStorage
dropm map[connem]float64
ignorem map[pb.MessageType]bool
// msgHook is called for each message sent. It may inspect the
// message and return true to send it or false to drop it.
msgHook func(pb.Message) bool
livenessFabric *raftstoreliveness.LivenessFabric
}
// newNetwork initializes a network from peers.
// A nil node will be replaced with a new *stateMachine.
// A *stateMachine will get its k, id.
// When using stateMachine, the address list is always [1, n].
func newNetwork(peers ...stateMachine) *network {
return newNetworkWithConfig(nil, peers...)
}
// newNetworkWithConfig is like newNetwork but calls the given func to
// modify the configuration of any state machines it creates.
func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *network {
return newNetworkWithConfigAndLivenessFabric(configFunc, nil /* fabric */, peers...)
}
// newNetworkWithConfig is like newNetwork but calls the given func to
// modify the configuration of any state machines it creates and uses the store
// liveness fabric if provided.
func newNetworkWithConfigAndLivenessFabric(
configFunc func(*Config), fabric *raftstoreliveness.LivenessFabric, peers ...stateMachine,
) *network {
size := len(peers)
peerAddrs := idsBySize(size)
npeers := make(map[pb.PeerID]stateMachine, size)
nstorage := make(map[pb.PeerID]*MemoryStorage, size)
createNewFabric := fabric == nil
if createNewFabric {
fabric = raftstoreliveness.NewLivenessFabric()
if createNewFabric {
for j := range peers {
id := peerAddrs[j]
fabric.AddPeer(id)
}
}
}
for j, p := range peers {
id := peerAddrs[j]
switch v := p.(type) {
case nil:
nstorage[id] = newTestMemoryStorage(withPeers(peerAddrs...))
cfg := newTestConfig(id, 10, 1, nstorage[id],
withStoreLiveness(fabric.GetStoreLiveness(id)))
if configFunc != nil {
configFunc(cfg)
}
sm := newRaft(cfg)
npeers[id] = sm
case *raft:
// TODO(tbg): this is all pretty confused. Clean this up.
learners := make(map[pb.PeerID]bool, len(v.config.Learners))
for i := range v.config.Learners {
learners[i] = true
}
v.id = id
v.trk = tracker.MakeProgressTracker(&v.config, tracker.MakeEmptyProgressMap())
if len(learners) > 0 {
v.config.Learners = map[pb.PeerID]struct{}{}
}
for i := 0; i < size; i++ {
pr := &tracker.Progress{}
if _, ok := learners[peerAddrs[i]]; ok {
pr.IsLearner = true
v.config.Learners[peerAddrs[i]] = struct{}{}
} else {
v.config.Voters[0][peerAddrs[i]] = struct{}{}
}
v.trk.TestingSetProgress(peerAddrs[i], pr)
}
v.reset(v.Term)
npeers[id] = v
case *blackHole:
npeers[id] = v
default:
panic(fmt.Sprintf("unexpected state machine type: %T", p))
}
}
return &network{
peers: npeers,
storage: nstorage,
dropm: make(map[connem]float64),
ignorem: make(map[pb.MessageType]bool),
livenessFabric: fabric,
}
}
func preVoteConfig(c *Config) {
c.PreVote = true
}
func fortificationDisabledConfig(c *Config) {
c.StoreLiveness = raftstoreliveness.Disabled{}
}
func preVoteConfigWithFortificationDisabled(c *Config) {
c.PreVote = true
c.StoreLiveness = raftstoreliveness.Disabled{}
}
func (nw *network) send(msgs ...pb.Message) {
for len(msgs) > 0 {
m := msgs[0]
p := nw.peers[m.To]
if nw.t != nil {
nw.t.Log(DescribeMessage(m, nil))
}
_ = p.Step(m)
p.advanceMessagesAfterAppend()
msgs = append(msgs[1:], nw.filter(p.readMessages())...)
}
}
// tick takes a raft instance and calls tick(). It then uses the network.send
// function if that generates any messages.
func (nw *network) tick(p *raft) {
p.tick()
msgs := nw.filter(p.readMessages())
if len(msgs) > 0 {
nw.send(msgs...)
}
}
func (nw *network) drop(from, to pb.PeerID, perc float64) {
nw.dropm[connem{from, to}] = perc
}
func (nw *network) cut(one, other pb.PeerID) {
nw.drop(one, other, 2.0) // always drop
nw.drop(other, one, 2.0) // always drop
}
func (nw *network) isolate(id pb.PeerID) {
for i := 0; i < len(nw.peers); i++ {
nid := pb.PeerID(i + 1)
if nid != id {
nw.drop(id, nid, 1.0) // always drop
nw.drop(nid, id, 1.0) // always drop
}
}
}
func (nw *network) ignore(t pb.MessageType) {
nw.ignorem[t] = true
}
func (nw *network) recover() {
nw.dropm = make(map[connem]float64)
nw.ignorem = make(map[pb.MessageType]bool)
}
func (nw *network) filter(msgs []pb.Message) []pb.Message {
var mm []pb.Message
for _, m := range msgs {
if nw.ignorem[m.Type] {
continue
}
switch m.Type {
case pb.MsgHup:
// hups never go over the network, so don't drop them but panic
panic("unexpected msgHup")
default:
perc := nw.dropm[connem{m.From, m.To}]
if n := rand.Float64(); n < perc {
continue
}
}
if nw.msgHook != nil {
if !nw.msgHook(m) {
continue
}
}
mm = append(mm, m)
}
return mm
}
type connem struct {
from, to pb.PeerID
}
type blackHole struct{}
func (blackHole) Step(pb.Message) error { return nil }
func (blackHole) readMessages() []pb.Message { return nil }
func (blackHole) advanceMessagesAfterAppend() {}
var nopStepper = &blackHole{}
func idsBySize(size int) []pb.PeerID {
ids := make([]pb.PeerID, size)
for i := 0; i < size; i++ {
ids[i] = pb.PeerID(1 + i)
}
return ids
}
// setRandomizedElectionTimeout set up the value by caller instead of choosing
// by system, in some test scenario we need to fill in some expected value to
// ensure the certainty
func setRandomizedElectionTimeout(r *raft, v int64) {
r.randomizedElectionTimeout = v
}
// SetRandomizedElectionTimeout is like setRandomizedElectionTimeout, but
// exported for use by tests that are not in the raft package, using RawNode.
func SetRandomizedElectionTimeout(r *RawNode, v int64) {
setRandomizedElectionTimeout(r.raft, v)
}
// testConfigModifiers allows callers to optionally modify newTestConfig.
type testConfigModifiers struct {
testingStoreLiveness raftstoreliveness.StoreLiveness
testingLogger raftlogger.Logger
}
// testConfigModifierOpt is the type of an optional parameter to newTestConfig
// that may be used to modify the config.
type testConfigModifierOpt func(*testConfigModifiers)
// emptyTestConfigModifierOpt returns an empty testConfigModifierOpt.
func emptyTestConfigModifierOpt() testConfigModifierOpt {
return func(modifier *testConfigModifiers) {}
}
// withStoreLiveness explicitly uses the supplied StoreLiveness implementation.
func withStoreLiveness(storeLiveness raftstoreliveness.StoreLiveness) testConfigModifierOpt {
return func(modifier *testConfigModifiers) {
modifier.testingStoreLiveness = storeLiveness
}
}
// withLogger explicitly uses the supplied raft logger.
func withLogger(logger raftlogger.Logger) testConfigModifierOpt {
return func(modifier *testConfigModifiers) {
modifier.testingLogger = logger
}
}
func newTestConfig(
id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
) *Config {
modifiers := testConfigModifiers{}
for _, opt := range opts {
opt(&modifiers)
}
var storeLiveness raftstoreliveness.StoreLiveness
if modifiers.testingStoreLiveness != nil {
storeLiveness = modifiers.testingStoreLiveness
} else {
storeLiveness = raftstoreliveness.AlwaysLive{}
}
var logger raftlogger.Logger
if modifiers.testingLogger != nil {
logger = modifiers.testingLogger
} else {
logger = raftlogger.DefaultRaftLogger
}
return &Config{
ID: id,
ElectionTick: election,
ElectionJitterTick: election,
HeartbeatTick: heartbeat,
Storage: storage,
MaxSizePerMsg: noLimit,
MaxInflightMsgs: 256,
StoreLiveness: storeLiveness,
Logger: logger,
CRDBVersion: cluster.MakeTestingClusterSettings().Version,
Metrics: NewMetrics(),
}
}
type testMemoryStorageOptions func(*MemoryStorage)
func withPeers(peers ...pb.PeerID) testMemoryStorageOptions {
return func(ms *MemoryStorage) {
ms.snapshot.Metadata.ConfState.Voters = peers
}
}
func withLearners(learners ...pb.PeerID) testMemoryStorageOptions {
return func(ms *MemoryStorage) {
ms.snapshot.Metadata.ConfState.Learners = learners
}
}
func newTestMemoryStorage(opts ...testMemoryStorageOptions) *MemoryStorage {
ms := NewMemoryStorage()
for _, o := range opts {
o(ms)
}
return ms
}
func newTestRaft(
id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
) *raft {
return newRaft(newTestConfig(id, election, heartbeat, storage, opts...))
}
func newTestLearnerRaft(
id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
) *raft {
cfg := newTestConfig(id, election, heartbeat, storage, opts...)
return newRaft(cfg)
}
// newTestRawNode sets up a RawNode with the given peers. The configuration will
// not be reflected in the Storage.
func newTestRawNode(
id pb.PeerID, election, heartbeat int64, storage Storage, opts ...testConfigModifierOpt,
) *RawNode {
cfg := newTestConfig(id, election, heartbeat, storage, opts...)
rn, err := NewRawNode(cfg)
if err != nil {
panic(err)
}
return rn
} | go | github | https://github.com/cockroachdb/cockroach | pkg/raft/raft_test.go |
from twython import Twython, TwythonError, TwythonAuthError
from .config import app_key, app_secret, screen_name, unittest
class TwythonAuthTestCase(unittest.TestCase):
def setUp(self):
self.api = Twython(app_key, app_secret)
self.bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET')
self.bad_api_invalid_tokens = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
'BAD_OT', 'BAD_OTS')
self.oauth2_api = Twython(app_key, app_secret, oauth_version=2)
self.oauth2_bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
oauth_version=2)
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens(self):
"""Test getting authentication tokens works"""
self.api.get_authentication_tokens(callback_url='http://google.com/',
force_login=True,
screen_name=screen_name)
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens_bad_tokens(self):
"""Test getting authentication tokens with bad tokens
raises TwythonAuthError"""
self.assertRaises(TwythonAuthError, self.bad_api.get_authentication_tokens,
callback_url='http://google.com/')
@unittest.skip('skipping non-updated test')
def test_get_authorized_tokens_bad_tokens(self):
"""Test getting final tokens fails with wrong tokens"""
self.assertRaises(TwythonError, self.bad_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_get_authorized_tokens_invalid_or_expired_tokens(self):
"""Test getting final token fails when invalid or expired tokens have been passed"""
self.assertRaises(TwythonError, self.bad_api_invalid_tokens.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_get_authentication_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authentication_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authentication_tokens)
@unittest.skip('skipping non-updated test')
def test_get_authorization_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authorized_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
@unittest.skip('skipping non-updated test')
def test_obtain_access_token(self):
"""Test obtaining an Application Only OAuth 2 access token succeeds"""
self.oauth2_api.obtain_access_token()
@unittest.skip('skipping non-updated test')
def test_obtain_access_token_bad_tokens(self):
"""Test obtaining an Application Only OAuth 2 access token using bad app tokens fails"""
self.assertRaises(TwythonAuthError,
self.oauth2_bad_api.obtain_access_token)
@unittest.skip('skipping non-updated test')
def test_obtain_access_token_raises_error_when_oauth1(self):
"""Test when API is set for OAuth 1, obtain_access_token raises a
TwythonError"""
self.assertRaises(TwythonError, self.api.obtain_access_token) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
httpbin.helpers
~~~~~~~~~~~~~~~
This module provides helper functions for httpbin.
"""
import json
from hashlib import md5
from werkzeug.http import parse_authorization_header
from flask import request, make_response
from .structures import CaseInsensitiveDict
ASCII_ART = """
-=[ teapot ]=-
_...._
.' _ _ `.
| ."` ^ `". _,
\_;`"---"`|//
| ;/
\_ _/
`\"\"\"`
"""
REDIRECT_LOCATION = '/redirect/1'
ENV_HEADERS = (
'X-Varnish',
'X-Request-Start',
'X-Heroku-Queue-Depth',
'X-Real-Ip',
'X-Forwarded-Proto',
'X-Heroku-Queue-Wait-Time',
'X-Forwarded-For',
'X-Heroku-Dynos-In-Use',
'X-Forwarded-For',
'X-Forwarded-Protocol',
'X-Forwarded-Port'
)
def get_files():
"""Returns files dict from request context."""
files = dict()
for k, v in request.files.items():
files[k] = v.read()
return files
def get_headers(hide_env=True):
"""Returns headers dict from request context."""
headers = dict(request.headers.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_HEADERS:
try:
del headers[key]
except KeyError:
pass
return CaseInsensitiveDict(headers.items())
def get_url():
scheme = request.headers.get("X-Forwarded-Proto", "http")
return request.url.replace("http", scheme)
def get_dict(*keys, **extras):
"""Returns request dict of given keys."""
_keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')
assert all(map(_keys.__contains__, keys))
data = request.data
form = request.form
if (len(form) == 1) and (not data):
if not form.values().pop():
data = form.keys().pop()
form = None
if form:
nonflat_dict = form.to_dict(flat=False)
for k, v in nonflat_dict.items():
if len(v) == 1:
nonflat_dict[k] = v[0]
form = nonflat_dict
try:
json_input = json.loads(request.data)
except ValueError:
json_input = None
d = dict(
url=get_url(),
args=request.args,
form=form,
data=data,
origin=request.remote_addr,
headers=get_headers(),
files=get_files()
)
out_d = dict()
for key in keys:
out_d[key] = d.get(key)
out_d.update(extras)
return out_d
def status_code(code):
"""Returns response object of given status code."""
redirect = dict(headers=dict(location=REDIRECT_LOCATION))
code_map = {
301: redirect,
302: redirect,
303: redirect,
304: dict(data=''),
305: redirect,
307: redirect,
401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}),
407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}),
418: dict( # I'm a teapot!
data=ASCII_ART,
headers={
'x-more-info': 'http://tools.ietf.org/html/rfc2324'
}
),
}
r = make_response()
r.status_code = code
if code in code_map:
m = code_map[code]
if 'data' in m:
r.data = m['data']
if 'headers' in m:
r.headers = m['headers']
return r
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd
# Digest auth helpers
# qop is a quality of protection
def H(data):
return md5(data).hexdigest()
def HA1(realm, username, password):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
return H("%s:%s:%s" % (username,
realm,
password))
def HA2(credentails, request):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentails.get("qop") == "auth" or credentails.get('qop') is None:
return H("%s:%s" % (request['method'], request['uri']))
elif credentails.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
return H("%s:%s:%s" % (request['method'],
request['uri'],
H(request['body'])))
raise ValueError
def response(credentails, password, request):
"""Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentails`: credentails dict
- `password`: request user password
- `request`: request dict
"""
response = None
HA1_value = HA1(credentails.get('realm'), credentails.get('username'), password)
HA2_value = HA2(credentails, request)
if credentails.get('qop') is None:
response = H(":".join([HA1_value, credentails.get('nonce'), HA2_value]))
elif credentails.get('qop') == 'auth' or credentails.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentails:
raise ValueError("%s required for response H" % k)
response = H(":".join([HA1_value,
credentails.get('nonce'),
credentails.get('nc'),
credentails.get('cnonce'),
credentails.get('qop'),
HA2_value]))
else:
raise ValueError("qop value are wrong")
return response
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentails = parse_authorization_header(request.headers.get('Authorization'))
if not credentails:
return
response_hash = response(credentails, passwd, dict(uri=request.path,
body=request.data,
method=request.method))
if credentails['response'] == response_hash:
return True
return False | unknown | codeparrot/codeparrot-clean | ||
"""
Class Dashboard API endpoint urls.
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('', # nopep8
# Json request data for metrics for entire course
url(r'^(?P<course_id>[^/]+/[^/]+/[^/]+)/all_sequential_open_distrib$',
'class_dashboard.views.all_sequential_open_distrib', name="all_sequential_open_distrib"),
url(r'^(?P<course_id>[^/]+/[^/]+/[^/]+)/all_problem_grade_distribution$',
'class_dashboard.views.all_problem_grade_distribution', name="all_problem_grade_distribution"),
# Json request data for metrics for particular section
url(r'^(?P<course_id>[^/]+/[^/]+/[^/]+)/problem_grade_distribution/(?P<section>\d+)$',
'class_dashboard.views.section_problem_grade_distrib', name="section_problem_grade_distrib"),
# For listing students that opened a sub-section
url(r'^get_students_opened_subsection$',
'class_dashboard.dashboard_data.get_students_opened_subsection', name="get_students_opened_subsection"),
# For listing of students' grade per problem
url(r'^get_students_problem_grades$',
'class_dashboard.dashboard_data.get_students_problem_grades', name="get_students_problem_grades"),
# For generating metrics data as a csv
url(r'^post_metrics_data_csv_url',
'class_dashboard.dashboard_data.post_metrics_data_csv', name="post_metrics_data_csv"),
) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.conf import settings
from django.db.backends import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append('%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append('%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append('%s microseconds' % timedelta.microseconds)
mods = ' '.join(modifiers)
conn = ' %s ' % connector
return '(%s)' % conn.join([sql, 'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
sql = "EXTRACT('dow' FROM %s) + 1" % field_name
else:
sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type, internal_type):
if internal_type == "GenericIPAddressField" or internal_type == "IPAddressField":
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql, _allow_fallback=False):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_hamode_db
from neutron.extensions import l3
from neutron.extensions import l3_ext_ha_mode
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.scheduler import l3_agent_scheduler
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
class FakeL3PluginWithAgents(common_db_mixin.CommonDbMixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class L3HATestFramework(testlib_api.SqlTestCase):
def setUp(self):
super(L3HATestFramework, self).setUp()
self.admin_ctx = context.get_admin_context()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.core_plugin = manager.NeutronManager.get_plugin()
notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated')
self.notif_m = notif_p.start()
cfg.CONF.set_override('allow_overlapping_ips', True)
self.plugin = FakeL3PluginWithAgents()
self._register_agents()
def _register_agents(self):
agent_status = {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': 'l3host',
'topic': 'N/A',
'configurations': {'agent_mode': 'legacy'}
}
self.plugin.create_or_update_agent(self.admin_ctx, agent_status)
agent_status['host'] = 'l3host_2'
agent_status['configurations'] = {'agent_mode': 'dvr_snat'}
self.plugin.create_or_update_agent(self.admin_ctx, agent_status)
self.agent1, self.agent2 = self.plugin.get_agents(self.admin_ctx)
def _create_router(self, ha=True, tenant_id='tenant1', distributed=None,
ctx=None):
if ctx is None:
ctx = self.admin_ctx
ctx.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
if distributed is not None:
router['distributed'] = distributed
return self.plugin.create_router(ctx, {'router': router})
def _update_router(self, router_id, ha=True, distributed=None, ctx=None):
if ctx is None:
ctx = self.admin_ctx
data = {'ha': ha} if ha is not None else {}
if distributed is not None:
data['distributed'] = distributed
return self.plugin._update_router_db(ctx, router_id,
data, None)
def _bind_router(self, router_id):
with self.admin_ctx.session.begin(subtransactions=True):
scheduler = l3_agent_scheduler.ChanceScheduler()
agents_db = self.plugin.get_agents_db(self.admin_ctx)
scheduler.bind_ha_router_to_agents(
self.plugin,
self.admin_ctx,
router_id,
agents_db)
def test_get_ha_router_port_bindings(self):
router = self._create_router()
self._bind_router(router['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router['id']])
binding_dicts = [{'router_id': binding['router_id'],
'l3_agent_id': binding['l3_agent_id']}
for binding in bindings]
self.assertIn({'router_id': router['id'],
'l3_agent_id': self.agent1['id']}, binding_dicts)
self.assertIn({'router_id': router['id'],
'l3_agent_id': self.agent2['id']}, binding_dicts)
def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self):
router = self._create_router()
self._bind_router(router['id'])
self.plugin.update_routers_states(
self.admin_ctx, {router['id']: 'active'}, self.agent1['host'])
bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
self.admin_ctx, router['id'])
agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings]
self.assertIn((self.agent1['id'], 'active'), agent_ids)
self.assertIn((self.agent2['id'], 'standby'), agent_ids)
def test_get_l3_bindings_hosting_router_with_ha_states_not_scheduled(self):
router = self._create_router(ha=False)
bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
self.admin_ctx, router['id'])
self.assertEqual([], bindings)
class L3HATestCase(L3HATestFramework):
def test_verify_configuration_succeed(self):
# Default configuration should pass
self.plugin._verify_configuration()
def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self):
cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self):
cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8')
self.assertRaises(
l3_ext_ha_mode.HANetworkCIDRNotValid,
self.plugin._verify_configuration)
def test_verify_configuration_min_l3_agents_per_router_below_minimum(self):
cfg.CONF.set_override('min_l3_agents_per_router', 0)
self.assertRaises(
l3_ext_ha_mode.HAMinimumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_below_min_l3_agents(self):
cfg.CONF.set_override('max_l3_agents_per_router', 3)
cfg.CONF.set_override('min_l3_agents_per_router', 4)
self.assertRaises(
l3_ext_ha_mode.HAMaximumAgentsNumberNotValid,
self.plugin._check_num_agents_per_router)
def test_verify_configuration_max_l3_agents_unlimited(self):
cfg.CONF.set_override('max_l3_agents_per_router',
l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER)
self.plugin._check_num_agents_per_router()
def test_ha_router_create(self):
router = self._create_router()
self.assertTrue(router['ha'])
def test_ha_router_create_with_distributed(self):
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._create_router,
distributed=True)
def test_no_ha_router_create(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
def test_router_create_with_ha_conf_enabled(self):
cfg.CONF.set_override('l3_ha', True)
router = self._create_router(ha=None)
self.assertTrue(router['ha'])
def test_migration_from_ha(self):
router = self._create_router()
self.assertTrue(router['ha'])
router = self._update_router(router['id'], ha=False)
self.assertFalse(router.extra_attributes['ha'])
self.assertIsNone(router.extra_attributes['ha_vr_id'])
def test_migration_to_ha(self):
router = self._create_router(ha=False)
self.assertFalse(router['ha'])
router = self._update_router(router['id'], ha=True)
self.assertTrue(router.extra_attributes['ha'])
self.assertIsNotNone(router.extra_attributes['ha_vr_id'])
def test_migrate_ha_router_to_distributed(self):
router = self._create_router()
self.assertTrue(router['ha'])
self.assertRaises(l3_ext_ha_mode.DistributedHARouterNotSupported,
self._update_router,
router['id'],
distributed=True)
def test_l3_agent_routers_query_interface(self):
router = self._create_router()
self._bind_router(router['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
self.assertIsNotNone(interface)
self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF,
interface['device_owner'])
subnets = interface['subnets']
self.assertEqual(1, len(subnets))
self.assertEqual(cfg.CONF.l3_ha_net_cidr, subnets[0]['cidr'])
def test_unique_ha_network_per_tenant(self):
tenant1 = _uuid()
tenant2 = _uuid()
self._create_router(tenant_id=tenant1)
self._create_router(tenant_id=tenant2)
ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1)
ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2)
self.assertNotEqual(
ha_network1['network_id'], ha_network2['network_id'])
def _deployed_router_change_ha_flag(self, to_ha):
self._create_router(ha=not to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNone(interface)
else:
self.assertIsNotNone(interface)
self._update_router(router['id'], to_ha)
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
router = routers[0]
interface = router.get(constants.HA_INTERFACE_KEY)
if to_ha:
self.assertIsNotNone(interface)
else:
self.assertIsNone(interface)
def test_deployed_router_can_have_ha_enabled(self):
self._deployed_router_change_ha_flag(to_ha=True)
def test_deployed_router_can_have_ha_disabled(self):
self._deployed_router_change_ha_flag(to_ha=False)
def test_create_ha_router_notifies_agent(self):
self._create_router()
self.assertTrue(self.notif_m.called)
def test_update_router_to_ha_notifies_agent(self):
router = self._create_router(ha=False)
self.notif_m.reset_mock()
self._update_router(router['id'], ha=True)
self.assertTrue(self.notif_m.called)
def test_unique_vr_id_between_routers(self):
self._create_router()
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1)))
def test_vr_id_depleted(self):
self.assertRaises(l3_ext_ha_mode.NoVRIDAvailable, self._create_router)
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2)))
def test_vr_id_unique_range_per_tenant(self):
self._create_router()
self._create_router(tenant_id=_uuid())
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
self.assertEqual(2, len(routers))
self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id'])
@mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2)
def test_vr_id_allocation_contraint_conflict(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_get_allocated_vr_id',
return_value=set()) as alloc:
self.assertRaises(l3_ext_ha_mode.MaxVRIDAllocationTriesReached,
self.plugin._allocate_vr_id, self.admin_ctx,
network.network_id, router['id'])
self.assertEqual(2, len(alloc.mock_calls))
def test_vr_id_allocation_delete_router(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertNotEqual(allocs_before, allocs_current)
self.plugin.delete_router(self.admin_ctx, router['id'])
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_vr_id_allocation_router_migration(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
router = self._create_router()
self._update_router(router['id'], ha=False)
allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx,
network.network_id)
self.assertEqual(allocs_before, allocs_after)
def test_one_ha_router_one_not(self):
self._create_router(ha=False)
self._create_router()
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx)
ha0 = routers[0]['ha']
ha1 = routers[1]['ha']
self.assertNotEqual(ha0, ha1)
def test_add_ha_port_binding_failure_rolls_back_port(self):
router = self._create_router()
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin.add_ha_port,
self.admin_ctx, router['id'], network.network_id,
router['tenant_id'])
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_ha_network_binding_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin,
'_create_ha_network_tenant_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_network_subnet_failure_rolls_back_network(self):
networks_before = self.core_plugin.get_networks(self.admin_ctx)
with mock.patch.object(self.plugin, '_create_ha_subnet',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_network,
self.admin_ctx, _uuid())
networks_after = self.core_plugin.get_networks(self.admin_ctx)
self.assertEqual(networks_before, networks_after)
def test_create_ha_interfaces_binding_failure_rolls_back_ports(self):
router = self._create_router()
network = self.plugin.get_ha_network(self.admin_ctx,
router['tenant_id'])
device_filter = {'device_id': [router['id']]}
ports_before = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
router_db = self.plugin._get_router(self.admin_ctx, router['id'])
with mock.patch.object(self.plugin, '_create_ha_port_binding',
side_effect=ValueError):
self.assertRaises(ValueError, self.plugin._create_ha_interfaces,
self.admin_ctx, router_db, network)
ports_after = self.core_plugin.get_ports(
self.admin_ctx, filters=device_filter)
self.assertEqual(ports_before, ports_after)
def test_create_router_db_ha_attribute_failure_rolls_back_router(self):
routers_before = self.plugin.get_routers(self.admin_ctx)
for method in ('_set_vr_id',
'_create_ha_interfaces',
'_notify_ha_interfaces_updated'):
with mock.patch.object(self.plugin, method,
side_effect=ValueError):
self.assertRaises(ValueError, self._create_router)
routers_after = self.plugin.get_routers(self.admin_ctx)
self.assertEqual(routers_before, routers_after)
def test_update_routers_states(self):
router1 = self._create_router()
self._bind_router(router1['id'])
router2 = self._create_router()
self._bind_router(router2['id'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
for router in routers:
self.assertEqual('standby', router[constants.HA_ROUTER_STATE_KEY])
states = {router1['id']: 'active',
router2['id']: 'standby'}
self.plugin.update_routers_states(
self.admin_ctx, states, self.agent1['host'])
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
for router in routers:
self.assertEqual(states[router['id']],
router[constants.HA_ROUTER_STATE_KEY])
def test_set_router_states_handles_concurrently_deleted_router(self):
router1 = self._create_router()
self._bind_router(router1['id'])
router2 = self._create_router()
self._bind_router(router2['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router1['id'], router2['id']])
self.plugin.delete_router(self.admin_ctx, router1['id'])
self.plugin._set_router_states(
self.admin_ctx, bindings, {router1['id']: 'active',
router2['id']: 'active'})
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
self.agent1['host'])
self.assertEqual('active', routers[0][constants.HA_ROUTER_STATE_KEY])
def test_exclude_dvr_agents_for_ha_candidates(self):
"""Test dvr agents are not counted in the ha candidates.
This test case tests that when get_number_of_agents_for_scheduling
is called, it doesn't count dvr agents.
"""
# Test setup registers two l3 agents.
# Register another l3 agent with dvr mode and assert that
# get_number_of_ha_agent_candidates return 2.
dvr_agent_status = {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': 'l3host_3',
'topic': 'N/A',
'configurations': {'agent_mode': 'dvr'}
}
self.plugin.create_or_update_agent(self.admin_ctx, dvr_agent_status)
num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling(
self.admin_ctx)
self.assertEqual(2, num_ha_candidates)
class L3HAModeDbTestCase(L3HATestFramework):
def _create_network(self, plugin, ctx, name='net',
tenant_id='tenant1'):
network = {'network': {'name': name,
'shared': False,
'admin_state_up': True,
'tenant_id': tenant_id}}
return plugin.create_network(ctx, network)['id']
def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8',
name='subnet', tenant_id='tenant1'):
subnet = {'subnet': {'name': name,
'ip_version': 4,
'network_id': network_id,
'cidr': cidr,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'tenant_id': tenant_id,
'enable_dhcp': True,
'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED}}
created_subnet = plugin.create_subnet(ctx, subnet)
return created_subnet
def test_remove_ha_in_use(self):
router = self._create_router(ctx=self.admin_ctx)
network_id = self._create_network(self.core_plugin, self.admin_ctx)
subnet = self._create_subnet(self.core_plugin, self.admin_ctx,
network_id)
interface_info = {'subnet_id': subnet['id']}
self.plugin.add_router_interface(self.admin_ctx,
router['id'],
interface_info)
self.assertRaises(l3.RouterInUse, self.plugin.delete_router,
self.admin_ctx, router['id'])
bindings = self.plugin.get_ha_router_port_bindings(
self.admin_ctx, [router['id']])
self.assertEqual(2, len(bindings))
class L3HAUserTestCase(L3HATestFramework):
def setUp(self):
super(L3HAUserTestCase, self).setUp()
self.user_ctx = context.Context('', _uuid())
def test_create_ha_router(self):
self._create_router(ctx=self.user_ctx)
def test_update_router(self):
router = self._create_router(ctx=self.user_ctx)
self._update_router(router['id'], ha=False, ctx=self.user_ctx)
def test_delete_router(self):
router = self._create_router(ctx=self.user_ctx)
self.plugin.delete_router(self.user_ctx, router['id']) | unknown | codeparrot/codeparrot-clean | ||
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from contextlib import nullcontext
from functools import reduce
from itertools import chain, islice
from weakref import ref as weak_ref
from asgiref.sync import sync_to_async
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
IntegrityError,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, Field, Max, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, DatabaseDefault, F, OrderBy, Value, When
from django.db.models.fetch_modes import FETCH_ONE
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, ROW_COUNT
from django.db.models.utils import (
AltersData,
create_namedtuple_class,
resolve_callables,
)
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango70Warning
from django.utils.functional import cached_property
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
PROHIBITED_FILTER_KWARGS = frozenset(["_connector", "_negated"])
class BaseIterable:
def __init__(
self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
async def _async_generator(self):
# Generators don't actually start running until the first time you call
# next() on them, so make the generator object in the async thread and
# then repeatedly dispatch to it in a sync thread.
sync_generator = self.__iter__()
def next_slice(gen):
return list(islice(gen, self.chunk_size))
while True:
chunk = await sync_to_async(next_slice)(sync_generator)
for item in chunk:
yield item
if len(chunk) < self.chunk_size:
break
# __aiter__() is a *synchronous* method that has to then return an
# *asynchronous* iterator/generator. Thus, nest an async generator inside
# it.
# This is a generic iterable converter for now, and is going to suffer a
# performance penalty on large sets of items due to the cost of crossing
# over the sync barrier for each chunk. Custom __aiter__() methods should
# be added to each Iterable subclass, but that needs some work in the
# Compiler first.
def __aiter__(self):
return self._async_generator()
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
fetch_mode = queryset._fetch_mode
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db, fetch_mode)
known_related_objects = [
(
field,
related_objs,
attnames := [
(
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
)
for from_field in field.from_fields
],
operator.attrgetter(*attnames),
)
for field, related_objs in queryset._known_related_objects.items()
]
peers = []
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db,
init_list,
row[model_fields_start:model_fields_end],
fetch_mode=fetch_mode,
)
if fetch_mode.track_peers:
peers.append(weak_ref(obj))
obj._state.peers = peers
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_attnames, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
# Avoid fetching potentially deferred attributes that would
# result in unexpected queries.
if any(attname not in obj.__dict__ for attname in rel_attnames):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class RawModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row from a raw queryset.
"""
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.queryset.db
query = self.queryset.query
connection = connections[db]
compiler = connection.ops.compiler("SQLCompiler")(query, connection, db)
query_iterator = iter(query)
try:
(
model_init_names,
model_init_pos,
annotation_fields,
) = self.queryset.resolve_model_init_order()
model_cls = self.queryset.model
if any(
f.attname not in model_init_names for f in model_cls._meta.pk_fields
):
raise exceptions.FieldDoesNotExist(
"Raw query must include the primary key"
)
fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]
cols = [f.get_col(f.model._meta.db_table) if f else None for f in fields]
converters = compiler.get_converters(cols)
if converters:
query_iterator = compiler.apply_converters(query_iterator, converters)
if compiler.has_composite_fields(cols):
query_iterator = compiler.composite_fields_to_tuples(
query_iterator, cols
)
fetch_mode = self.queryset._fetch_mode
peers = []
for values in query_iterator:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(
db, model_init_names, model_init_values, fetch_mode=fetch_mode
)
if fetch_mode.track_peers:
peers.append(weak_ref(instance))
instance._state.peers = peers
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(query, "cursor") and query.cursor:
query.cursor.close()
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if query.selected:
names = list(query.selected)
else:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
return compiler.results_iter(
tuple_expected=True,
chunked_fetch=self.chunked_fetch,
chunk_size=self.chunk_size,
)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
class QuerySet(AltersData):
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fetch_mode = FETCH_ONE
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == "_result_cache":
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
"QuerySet indices must be integers or slices, not %s."
% type(k).__name__
)
if (isinstance(k, int) and k < 0) or (
isinstance(k, slice)
and (
(k.start is not None and k.start < 0)
or (k.stop is not None and k.stop < 0)
)
):
raise ValueError("Negative indexing is not supported.")
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[:: k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._check_operator_queryset(other, "&")
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._check_operator_queryset(other, "|")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.OR)
return combined
def __xor__(self, other):
self._check_operator_queryset(other, "^")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.XOR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
iterable = self._iterable_class(
self,
chunked_fetch=use_chunked_fetch,
chunk_size=chunk_size or 2000,
)
if not self._prefetch_related_lookups or chunk_size is None:
yield from iterable
return
iterator = iter(iterable)
while results := list(islice(iterator, chunk_size)):
prefetch_related_objects(results, *self._prefetch_related_lookups)
yield from results
def iterator(self, chunk_size=None):
"""
An iterator over the results from applying this QuerySet to the
database. chunk_size must be provided for QuerySets that prefetch
related objects. Otherwise, a default chunk_size of 2000 is supplied.
"""
if chunk_size is None:
if self._prefetch_related_lookups:
raise ValueError(
"chunk_size must be provided when using QuerySet.iterator() after "
"prefetch_related()."
)
elif chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
return self._iterator(use_chunked_fetch, chunk_size)
async def aiterator(self, chunk_size=2000):
"""
An asynchronous iterator over the results from applying this QuerySet
to the database.
"""
if chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
iterable = self._iterable_class(
self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size
)
if self._prefetch_related_lookups:
results = []
async for item in iterable:
results.append(item)
if len(results) >= chunk_size:
await aprefetch_related_objects(
results, *self._prefetch_related_lookups
)
for result in results:
yield result
results.clear()
if results:
await aprefetch_related_objects(
results, *self._prefetch_related_lookups
)
for result in results:
yield result
else:
async for item in iterable:
yield item
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
return self.query.chain().get_aggregation(self.db, kwargs)
async def aaggregate(self, *args, **kwargs):
return await sync_to_async(self.aggregate)(*args, **kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
async def acount(self):
return await sync_to_async(self.count)()
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
"Calling QuerySet.get(...) with filters after %s() is not "
"supported." % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if (
not clone.query.select_for_update
or connections[clone.db].features.supports_select_for_update_with_limit
):
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." % self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!"
% (
self.model._meta.object_name,
num if not limit or num < limit else "more than %s" % (limit - 1),
)
)
async def aget(self, *args, **kwargs):
return await sync_to_async(self.get)(*args, **kwargs)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
reverse_one_to_one_fields = frozenset(kwargs).intersection(
self.model._meta._reverse_one_to_one_field_names
)
if reverse_one_to_one_fields:
raise ValueError(
"The following fields do not exist in this model: %s"
% ", ".join(reverse_one_to_one_fields)
)
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
obj._state.fetch_mode = self._fetch_mode
return obj
create.alters_data = True
async def acreate(self, **kwargs):
return await sync_to_async(self.create)(**kwargs)
acreate.alters_data = True
def _prepare_for_bulk_create(self, objs):
objs_with_pk, objs_without_pk = [], []
for obj in objs:
if isinstance(obj.pk, DatabaseDefault):
objs_without_pk.append(obj)
elif obj._is_pk_set():
objs_with_pk.append(obj)
else:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
if obj._is_pk_set():
objs_with_pk.append(obj)
else:
objs_without_pk.append(obj)
obj._prepare_related_fields_for_save(operation_name="bulk_create")
return objs_with_pk, objs_without_pk
def _check_bulk_create_options(
self, ignore_conflicts, update_conflicts, update_fields, unique_fields
):
if ignore_conflicts and update_conflicts:
raise ValueError(
"ignore_conflicts and update_conflicts are mutually exclusive."
)
db_features = connections[self.db].features
if ignore_conflicts:
if not db_features.supports_ignore_conflicts:
raise NotSupportedError(
"This database backend does not support ignoring conflicts."
)
return OnConflict.IGNORE
elif update_conflicts:
if not db_features.supports_update_conflicts:
raise NotSupportedError(
"This database backend does not support updating conflicts."
)
if not update_fields:
raise ValueError(
"Fields that will be updated when a row insertion fails "
"on conflicts must be provided."
)
if unique_fields and not db_features.supports_update_conflicts_with_target:
raise NotSupportedError(
"This database backend does not support updating "
"conflicts with specifying unique fields that can trigger "
"the upsert."
)
if not unique_fields and db_features.supports_update_conflicts_with_target:
raise ValueError(
"Unique fields that can trigger the upsert must be provided."
)
# Updating primary keys and non-concrete fields is forbidden.
if any(not f.concrete for f in update_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields in "
"update_fields."
)
if any(f in self.model._meta.pk_fields for f in update_fields):
raise ValueError(
"bulk_create() cannot be used with primary keys in "
"update_fields."
)
if unique_fields:
if any(not f.concrete for f in unique_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields "
"in unique_fields."
)
return OnConflict.UPDATE
return None
def bulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if
features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking
# self.model._meta.proxy would not identify that case as involving
# multiple tables.
for parent in self.model._meta.all_parents:
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
opts = self.model._meta
if unique_fields:
# Primary key is allowed in unique_fields.
unique_fields = [
self.model._meta.get_field(opts.pk.name if name == "pk" else name)
for name in unique_fields
]
if update_fields:
update_fields = [self.model._meta.get_field(name) for name in update_fields]
on_conflict = self._check_bulk_create_options(
ignore_conflicts,
update_conflicts,
update_fields,
unique_fields,
)
self._for_write = True
fields = [f for f in opts.concrete_fields if not f.generated]
objs = list(objs)
objs_with_pk, objs_without_pk = self._prepare_for_bulk_create(objs)
if objs_with_pk and objs_without_pk:
context = transaction.atomic(using=self.db, savepoint=False)
else:
context = nullcontext()
with context:
self._handle_order_with_respect_to(objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
connection = connections[self.db]
if (
connection.features.can_return_rows_from_bulk_insert
and on_conflict is None
):
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def _handle_order_with_respect_to(self, objs):
if objs and (order_wrt := self.model._meta.order_with_respect_to):
get_filter_kwargs_for_object = order_wrt.get_filter_kwargs_for_object
attnames = list(get_filter_kwargs_for_object(objs[0]))
group_keys = set()
obj_groups = []
for obj in objs:
group_key = tuple(get_filter_kwargs_for_object(obj).values())
group_keys.add(group_key)
obj_groups.append((obj, group_key))
filters = [
Q.create(list(zip(attnames, group_key))) for group_key in group_keys
]
next_orders = (
self.model._base_manager.using(self.db)
.filter(reduce(operator.or_, filters))
.values_list(*attnames)
.annotate(_order__max=Max("_order") + 1)
)
# Create mapping of group values to max order.
group_next_orders = dict.fromkeys(group_keys, 0)
group_next_orders.update(
(tuple(group_key), next_order) for *group_key, next_order in next_orders
)
# Assign _order values to new objects.
for obj, group_key in obj_groups:
if getattr(obj, "_order", None) is None:
group_next_order = group_next_orders[group_key]
obj._order = group_next_order
group_next_orders[group_key] += 1
bulk_create.alters_data = True
async def abulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
return await sync_to_async(self.bulk_create)(
objs=objs,
batch_size=batch_size,
ignore_conflicts=ignore_conflicts,
update_conflicts=update_conflicts,
update_fields=update_fields,
unique_fields=unique_fields,
)
abulk_create.alters_data = True
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
if not fields:
raise ValueError("Field names must be given to bulk_update().")
objs = tuple(objs)
if not all(obj._is_pk_set() for obj in objs):
raise ValueError("All bulk_update() objects must have a primary key set.")
opts = self.model._meta
fields = [opts.get_field(name) for name in fields]
if any(not f.concrete for f in fields):
raise ValueError("bulk_update() can only be used with concrete fields.")
all_pk_fields = set(opts.pk_fields)
for parent in opts.all_parents:
all_pk_fields.update(parent._meta.pk_fields)
if any(f in all_pk_fields for f in fields):
raise ValueError("bulk_update() cannot be used with primary key fields.")
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(
operation_name="bulk_update", fields=fields
)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
self._for_write = True
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(
[opts.pk, opts.pk, *fields], objs
)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, "resolve_expression"):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
queryset = self.using(self.db)
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
async def abulk_update(self, objs, fields, batch_size=None):
return await sync_to_async(self.bulk_update)(
objs=objs,
fields=fields,
batch_size=batch_size,
)
abulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
get_or_create.alters_data = True
async def aget_or_create(self, defaults=None, **kwargs):
return await sync_to_async(self.get_or_create)(
defaults=defaults,
**kwargs,
)
aget_or_create.alters_data = True
def update_or_create(self, defaults=None, create_defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one. Optionally, an object can
be created with different values than defaults by using
create_defaults.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
update_defaults = defaults or {}
if create_defaults is None:
create_defaults = update_defaults
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(
create_defaults, **kwargs
)
if created:
return obj, created
for k, v in resolve_callables(update_defaults):
setattr(obj, k, v)
update_fields = set(update_defaults)
concrete_field_names = self.model._meta._non_pk_concrete_field_names
# update_fields does not support non-concrete fields.
if concrete_field_names.issuperset(update_fields):
# Add fields which are set on pre_save(), e.g. auto_now fields.
# This is to maintain backward compatibility as these fields
# are not updated unless explicitly specified in the
# update_fields list.
pk_fields = self.model._meta.pk_fields
for field in self.model._meta.local_concrete_fields:
if not (
field in pk_fields or field.__class__.pre_save is Field.pre_save
):
update_fields.add(field.name)
if field.name != field.attname:
update_fields.add(field.attname)
obj.save(using=self.db, update_fields=update_fields)
else:
obj.save(using=self.db)
return obj, False
update_or_create.alters_data = True
async def aupdate_or_create(self, defaults=None, create_defaults=None, **kwargs):
return await sync_to_async(self.update_or_create)(
defaults=defaults,
create_defaults=create_defaults,
**kwargs,
)
aupdate_or_create.alters_data = True
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'."
% (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
)
)
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, "get_latest_by")
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self._earliest(*fields)
async def aearliest(self, *fields):
return await sync_to_async(self.earliest)(*fields)
def latest(self, *fields):
"""
Return the latest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self.reverse()._earliest(*fields)
async def alatest(self, *fields):
return await sync_to_async(self.latest)(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
if self.ordered or not self.query.default_ordering:
queryset = self
else:
self._check_ordering_first_last_queryset_aggregation(method="first")
queryset = self.order_by("pk")
for obj in queryset[:1]:
return obj
async def afirst(self):
return await sync_to_async(self.first)()
def last(self):
"""Return the last object of a query or None if no match is found."""
if self.ordered or not self.query.default_ordering:
queryset = self.reverse()
else:
self._check_ordering_first_last_queryset_aggregation(method="last")
queryset = self.order_by("-pk")
for obj in queryset[:1]:
return obj
async def alast(self):
return await sync_to_async(self.last)()
def in_bulk(self, id_list=None, *, field_name="pk"):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
if id_list is not None and not id_list:
return {}
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != "pk"
and not opts.get_field(field_name).unique
and field_name not in unique_fields
and self.query.distinct_fields != (field_name,)
):
raise ValueError(
"in_bulk()'s field_name must be a unique field but %r isn't."
% field_name
)
qs = self
def get_obj(obj):
return obj
if issubclass(self._iterable_class, ModelIterable):
# Raise an AttributeError if field_name is deferred.
get_key = operator.attrgetter(field_name)
elif issubclass(self._iterable_class, ValuesIterable):
if field_name not in self.query.values_select:
qs = qs.values(field_name, *self.query.values_select)
def get_obj(obj): # noqa: F811
# We can safely mutate the dictionaries returned by
# ValuesIterable here, since they are limited to the scope
# of this function, and get_key runs before get_obj.
del obj[field_name]
return obj
get_key = operator.itemgetter(field_name)
elif issubclass(self._iterable_class, ValuesListIterable):
try:
field_index = self.query.values_select.index(field_name)
except ValueError:
# field_name is missing from values_select, so add it.
field_index = 0
if issubclass(self._iterable_class, NamedValuesListIterable):
kwargs = {"named": True}
else:
kwargs = {}
get_obj = operator.itemgetter(slice(1, None))
qs = qs.values_list(field_name, *self.query.values_select, **kwargs)
get_key = operator.itemgetter(field_index)
elif issubclass(self._iterable_class, FlatValuesListIterable):
if self.query.values_select == (field_name,):
# Mapping field_name to itself.
get_key = get_obj
else:
# Transform it back into a non-flat values_list().
qs = qs.values_list(field_name, *self.query.values_select)
get_key = operator.itemgetter(0)
get_obj = operator.itemgetter(1)
else:
raise TypeError(
f"in_bulk() cannot be used with {self._iterable_class.__name__}."
)
if id_list is not None:
filter_key = "{}__in".format(field_name)
id_list = tuple(id_list)
batch_size = connections[self.db].ops.bulk_batch_size([opts.pk], id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
results = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset : offset + batch_size]
results += tuple(qs.filter(**{filter_key: batch}))
qs = results
else:
qs = qs.filter(**{filter_key: id_list})
else:
qs = qs._chain()
return {get_key(obj): get_obj(obj) for obj in qs}
async def ain_bulk(self, id_list=None, *, field_name="pk"):
return await sync_to_async(self.in_bulk)(
id_list=id_list,
field_name=field_name,
)
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries("delete")
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with delete().")
if self.query.distinct_fields:
raise TypeError("Cannot call delete() after .distinct(*fields).")
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force=True)
collector = Collector(using=del_query.db, origin=self)
collector.collect(del_query)
num_deleted, num_deleted_per_model = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return num_deleted, num_deleted_per_model
delete.alters_data = True
delete.queryset_only = True
async def adelete(self):
return await sync_to_async(self.delete)()
adelete.alters_data = True
adelete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
return query.get_compiler(using).execute_sql(ROW_COUNT)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries("update")
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
if self.query.distinct_fields:
raise TypeError("Cannot call update() after .distinct(*fields).")
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Inline annotations in order_by(), if possible.
new_order_by = []
for col in query.order_by:
alias = col
descending = False
if isinstance(alias, str) and alias.startswith("-"):
alias = alias.removeprefix("-")
descending = True
if annotation := query.annotations.get(alias):
if getattr(annotation, "contains_aggregate", False):
raise exceptions.FieldError(
f"Cannot update when ordering by an aggregate: {annotation}"
)
if descending:
annotation = annotation.desc()
new_order_by.append(annotation)
else:
new_order_by.append(col)
query.order_by = tuple(new_order_by)
# Clear SELECT clause as all annotation references were inlined by
# add_update_values() already.
query.clear_select_clause()
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(ROW_COUNT)
self._result_cache = None
return rows
update.alters_data = True
async def aupdate(self, **kwargs):
return await sync_to_async(self.update)(**kwargs)
aupdate.alters_data = True
def _update(self, values, returning_fields=None):
"""
A version of update() that accepts field objects instead of field
names. Used primarily for model saving and not intended for use by
general code (it requires too much poking around at model internals to
be useful at that level).
"""
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
if returning_fields is None:
return query.get_compiler(self.db).execute_sql(ROW_COUNT)
return query.get_compiler(self.db).execute_returning_sql(returning_fields)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
"""
Return True if the QuerySet would have any results, False otherwise.
"""
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
async def aexists(self):
return await sync_to_async(self.exists)()
def contains(self, obj):
"""
Return True if the QuerySet contains the provided obj,
False otherwise.
"""
self._not_support_combined_queries("contains")
if self._fields is not None:
raise TypeError(
"Cannot call QuerySet.contains() after .values() or .values_list()."
)
try:
if obj._meta.concrete_model != self.model._meta.concrete_model:
return False
except AttributeError:
raise TypeError("'obj' must be a model instance.")
if not obj._is_pk_set():
raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
if self._result_cache is not None:
return obj in self._result_cache
return self.filter(pk=obj.pk).exists()
async def acontains(self, obj):
return await sync_to_async(self.contains)(obj=obj)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
"""
Runs an EXPLAIN on the SQL query this QuerySet would perform, and
returns the results.
"""
return self.query.explain(using=self.db, format=format, **options)
async def aexplain(self, *, format=None, **options):
return await sync_to_async(self.explain)(format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=(), translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(
raw_query,
model=self.model,
params=params,
translations=translations,
using=using,
fetch_mode=self._fetch_mode,
)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
# RemovedInDjango70Warning: When the deprecation ends, deindent as:
# clone = clone.annotate(**expressions)
with warnings.catch_warnings(
action="ignore", category=RemovedInDjango70Warning
):
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat:
if len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one "
"field."
)
elif not fields:
# RemovedInDjango70Warning: When the deprecation ends, replace
# with:
# raise TypeError(
# "'flat' is not valid when values_list is called with no "
# "fields."
# )
warnings.warn(
"Calling values_list() with no field name and flat=True "
"is deprecated. Pass an explicit field name instead, like "
"'pk'.",
RemovedInDjango70Warning,
)
fields = [self.model._meta.concrete_fields[0].attname]
field_names = {f: False for f in fields if not hasattr(f, "resolve_expression")}
_fields = []
expressions = {}
counter = 1
for field in fields:
field_name = field
expression = None
if hasattr(field, "resolve_expression"):
field_name = getattr(
field, "default_alias", field.__class__.__name__.lower()
)
expression = field
# For backward compatibility reasons expressions are always
# prefixed with the counter even if their default alias doesn't
# collide with field names. Changing this logic could break
# some usage of named=True.
seen = True
elif seen := field_names[field_name]:
expression = F(field_name)
if seen:
field_name_prefix = field_name
while (field_name := f"{field_name_prefix}{counter}") in field_names:
counter += 1
if expression is not None:
expressions[field_name] = expression
field_names[field_name] = True
_fields.append(field_name)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable
if named
else FlatValuesListIterable if flat else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order="ASC"):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day"):
raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
return (
self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name),
)
.values_list("datefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datefield")
)
def datetimes(self, field_name, kind, order="ASC", tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
raise ValueError(
"'kind' must be one of 'year', 'month', 'week', 'day', "
"'hour', 'minute', or 'second'."
)
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return (
self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
),
plain_field=F(field_name),
)
.values_list("datetimefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datetimefield")
)
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries("filter")
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries("exclude")
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if (args or kwargs) and self.query.is_sliced:
raise TypeError("Cannot filter a query once a slice has been taken.")
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if invalid_kwargs := PROHIBITED_FILTER_KWARGS.intersection(kwargs):
invalid_kwargs_str = ", ".join(f"'{k}'" for k in sorted(invalid_kwargs))
raise TypeError(f"The following kwargs are invalid: {invalid_kwargs_str}")
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(force=True)
clone.query.default_ordering = True
clone.query.clear_limits()
clone.query.combined_queries = (self.query, *(qs.query for qs in other_qs))
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query("union", *qs[1:], all=all)
elif not other_qs:
return self
return self._combinator_query("union", *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query("intersection", *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query("difference", *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError("The nowait option cannot be used with skip_locked.")
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries("select_related")
if self._fields is not None:
raise TypeError(
"Cannot call select_related() after .values() or .values_list()"
)
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries("prefetch_related")
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError(
"prefetch_related() is not supported with FilteredRelation."
)
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries("annotate")
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries("alias")
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(
args + tuple(kwargs.values()), method_name="annotate"
)
annotations = {}
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
if arg.default_alias in kwargs:
raise ValueError(
"The named annotation '%s' conflicts with the "
"default name for another annotation." % arg.default_alias
)
except (TypeError, AttributeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(
chain.from_iterable(
(
(field.name, field.attname)
if hasattr(field, "attname")
else (field.name,)
)
for field in self.model._meta.get_fields()
)
)
for alias, annotation in annotations.items():
if alias in names:
raise ValueError(
"The annotation '%s' conflicts with a field on "
"the model." % alias
)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation,
alias,
select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
if self.query.is_sliced:
raise TypeError("Cannot reorder a query once a slice has been taken.")
obj = self._chain()
obj.query.clear_ordering(force=True, clear_default=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries("distinct")
if self.query.is_sliced:
raise TypeError(
"Cannot create distinct fields once a slice has been taken."
)
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(
self,
select=None,
where=None,
params=None,
tables=None,
order_by=None,
select_params=None,
):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries("extra")
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError("Cannot reverse a query once a slice has been taken.")
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case remove all deferrals.
"""
self._not_support_combined_queries("defer")
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries("only")
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError("only() is not supported with FilteredRelation.")
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
def fetch_mode(self, fetch_mode):
"""Set the fetch mode for the QuerySet."""
clone = self._chain()
clone._fetch_mode = fetch_mode
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def totally_ordered(self):
"""
Returns True if the QuerySet is ordered and the ordering is
deterministic. This requires that the ordering includes a field
(or set of fields) that is unique and non-nullable.
For queries involving a GROUP BY clause, the model's default
ordering is ignored. Ordering specified via .extra(order_by=...)
is also ignored.
"""
if not self.ordered:
return False
ordering = self.query.order_by
if not ordering and self.query.default_ordering:
ordering = self.query.get_meta().ordering
if not ordering:
return False
opts = self.model._meta
pk_fields = {f.attname for f in opts.pk_fields}
ordering_fields = set()
for part in ordering:
# Search for single field providing a total ordering.
field_name = None
if isinstance(part, str):
field_name = part.lstrip("-")
elif isinstance(part, F):
field_name = part.name
elif isinstance(part, OrderBy) and isinstance(part.expression, F):
field_name = part.expression.name
if field_name:
if field_name == "pk":
return True
# Normalize attname references by using get_field().
try:
field = opts.get_field(field_name)
except exceptions.FieldDoesNotExist:
# Could be "?" for random ordering or a related field
# lookup. Skip this part of introspection for now.
continue
# Ordering by a related field name orders by the referenced
# model's ordering. Skip this part of introspection for now.
if field.remote_field and field_name == field.name:
continue
if field.attname in pk_fields and len(pk_fields) == 1:
return True
if field.unique and not field.null:
return True
ordering_fields.add(field.attname)
# Account for members of a CompositePrimaryKey.
if ordering_fields.issuperset(pk_fields):
return True
# No single total ordering field, try unique_together and total
# unique constraints.
constraint_field_names = (
*opts.unique_together,
*(constraint.fields for constraint in opts.total_unique_constraints),
)
for field_names in constraint_field_names:
# Normalize attname references by using get_field().
try:
fields = [opts.get_field(field_name) for field_name in field_names]
except exceptions.FieldDoesNotExist:
continue
# Composite unique constraints containing a nullable column
# cannot ensure total ordering.
if any(field.null for field in fields):
continue
if ordering_fields.issuperset(field.attname for field in fields):
return True
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(
self,
objs,
fields,
returning_fields=None,
raw=False,
using=None,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(
self.model,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(
self,
objs,
fields,
batch_size,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
connection = connections[self.db]
ops = connection.ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
returning_fields = (
self.model._meta.db_returning_fields
if (
connection.features.can_return_rows_from_bulk_insert
and (on_conflict is None or on_conflict == OnConflict.UPDATE)
)
else None
)
batches = [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]
if len(batches) > 1:
context = transaction.atomic(using=self.db, savepoint=False)
else:
context = nullcontext()
with context:
for item in batches:
inserted_rows.extend(
self._insert(
item,
fields=fields,
using=self.db,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
returning_fields=returning_fields,
)
)
return inserted_rows
def _chain(self):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(
model=self.model,
query=self.query.chain(),
using=self._db,
hints=self._hints,
)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fetch_mode = self._fetch_mode
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select)
or set(self.query.extra_select) != set(other.query.extra_select)
or set(self.query.annotation_select) != set(other.query.annotation_select)
):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(
str(arg) for arg in values if not hasattr(arg, "resolve_expression")
)
if invalid_args:
raise TypeError(
"QuerySet.%s() received non-expression(s): %s."
% (
method_name,
", ".join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
"Calling QuerySet.%s() after %s() is not supported."
% (operation_name, self.query.combinator)
)
def _check_operator_queryset(self, other, operator_):
if self.query.combinator or other.query.combinator:
raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
def _check_ordering_first_last_queryset_aggregation(self, method):
if (
isinstance(self.query.group_by, tuple)
# Raise if the pk fields are not in the group_by.
and self.model._meta.pk
not in {col.output_field for col in self.query.group_by}
and set(self.model._meta.pk_fields).difference(
{col.target for col in self.query.group_by}
)
):
raise TypeError(
f"Cannot use QuerySet.{method}() on an unordered queryset performing "
f"aggregation. Add an ordering with order_by()."
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(
self,
raw_query,
model=None,
query=None,
params=(),
translations=None,
using=None,
hints=None,
fetch_mode=FETCH_ONE,
):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._fetch_mode = fetch_mode
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [
field
for column_name, field in self.model_fields.items()
if column_name in self.columns
]
annotation_fields = [
(column, pos)
for pos, column in enumerate(self.columns)
if column not in self.model_fields
]
model_init_order = [
self.columns.index(converter(f.column)) for f in model_init_fields
]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query,
model=self.model,
query=self.query,
params=self.params,
translations=self.translations,
using=self._db,
hints=self._hints,
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def iterator(self):
yield from RawModelIterable(self)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
fetch_mode=self._fetch_mode,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for query_name, model_name in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
return {
converter(field.column): field
for field in self.model._meta.fields
# Fields with None "column" should be ignored
# (e.g. CompositePrimaryKey).
if field.column
}
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet)
or (
hasattr(queryset, "_iterable_class")
and not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
"Prefetch querysets cannot use raw(), values(), and values_list()."
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(
lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
)
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
queryset = self.queryset._chain()
# Prevent the QuerySet from being evaluated
queryset._result_cache = []
queryset._prefetch_done = True
obj_dict["queryset"] = queryset
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_querysets(self, level):
if (
self.get_current_prefetch_to(level) == self.prefetch_to
and self.queryset is not None
):
return [self.queryset]
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for an iterable of model instances based
on the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError(
"'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups."
% lookup.prefetch_to
)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to
# have the same instance multiple times in obj_list, so obj
# might already be prepared.
if not hasattr(obj, "_prefetched_objects_cache"):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so
# quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the
# premise of prefetch_related), so what applies to first object
# applies to all.
first_obj = next(iter(obj_list))
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
first_obj, through_attr, to_attr
)
if not attr_found:
raise AttributeError(
"Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()"
% (
through_attr,
first_obj.__class__.__name__,
lookup.prefetch_through,
)
)
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError(
"'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through
)
obj_to_fetch = None
if prefetcher is not None:
obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
if obj_to_fetch:
obj_list, additional_lookups = prefetch_one_level(
obj_to_fetch,
prefetcher,
lookup,
level,
)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (
prefetch_to in done_queries
and lookup in auto_lookups
and descriptor in followed_descriptors
):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(
reversed(additional_lookups), prefetch_to
)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally
# match user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
async def aprefetch_related_objects(model_instances, *related_lookups):
"""See prefetch_related_objects()."""
return await sync_to_async(prefetch_related_objects)(
model_instances, *related_lookups
)
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_querysets().
Return a 4 tuple containing:
(the object with get_prefetch_querysets (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a function that takes an instance and returns a boolean that is True if
the attribute has already been fetched for that instance)
"""
def is_to_attr_fetched(model, to_attr):
# Special case cached_property instances because hasattr() triggers
# attribute computation and assignment.
if isinstance(getattr(model, to_attr, None), cached_property):
def has_cached_property(instance):
return to_attr in instance.__dict__
return has_cached_property
def has_to_attr_attribute(instance):
return hasattr(instance, to_attr)
return has_to_attr_attribute
prefetcher = None
is_fetched = is_to_attr_fetched(instance.__class__, to_attr)
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_querysets() method.
if hasattr(rel_obj_descriptor, "get_prefetch_querysets"):
prefetcher = rel_obj_descriptor
# If to_attr is set, check if the value has already been set,
# which is done with has_to_attr_attribute(). Do not use the
# method from the descriptor, as the cache_name it defines
# checks the field name, not the to_attr value.
if through_attr == to_attr:
is_fetched = rel_obj_descriptor.is_cached
else:
# descriptor doesn't support prefetching, so we go ahead and
# get the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, "get_prefetch_querysets"):
prefetcher = rel_obj
if through_attr == to_attr:
def in_prefetched_cache(instance):
return through_attr in instance._prefetched_objects_cache
is_fetched = in_prefetched_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_querysets() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in
# instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a
# field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
(
rel_qs,
rel_obj_attr,
instance_attr,
single,
cache_name,
is_descriptor,
) = prefetcher.get_prefetch_querysets(
instances, lookup.get_current_querysets(level)
)
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup)
for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the
# premise of prefetch_related), so what applies to first object applies
# to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = "to_attr={} conflicts with a field on the {} model."
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db, fetch_mode):
self.db = db
self.fetch_mode = fetch_mode
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
# A primary key must have all of its constituents not-NULL as
# NULL != NULL and thus NULL cannot be referenced through a foreign
# relationship. Therefore checking for a single member of the primary
# key is enough to determine if the referenced object exists or not.
self.pk_idx = self.init_list.index(self.model_cls._meta.pk_fields[0].attname)
self.related_populators = get_related_populators(
klass_info, select, self.db, fetch_mode
)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(
self.db,
self.init_list,
obj_data,
fetch_mode=self.fetch_mode,
)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db, fetch_mode):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db, fetch_mode)
iterators.append(rel_cls)
return iterators | python | github | https://github.com/django/django | django/db/models/query.py |
"""
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
import pandas.core.common as com
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'},
'where': {'int64', 'float64', 'bool'}
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(com.values_from_object(cond), com.values_from_object(a),
com.values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* pg_config.c
* Expose same output as pg_config except as an SRF
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/misc/pg_config.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "common/config_info.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "utils/builtins.h"
Datum
pg_config(PG_FUNCTION_ARGS)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
ConfigData *configdata;
size_t configdata_len;
int i = 0;
/* initialize our tuplestore */
InitMaterializedSRF(fcinfo, 0);
configdata = get_configdata(my_exec_path, &configdata_len);
for (i = 0; i < configdata_len; i++)
{
Datum values[2];
bool nulls[2];
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
values[0] = CStringGetTextDatum(configdata[i].name);
values[1] = CStringGetTextDatum(configdata[i].setting);
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
}
return (Datum) 0;
} | c | github | https://github.com/postgres/postgres | src/backend/utils/misc/pg_config.c |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.swisshash;
import org.elasticsearch.common.breaker.NoopCircuitBreaker;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.swisshash.LongSwissHash;
import org.elasticsearch.swisshash.SwissHashFactory;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.function.LongConsumer;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, jvmArgsPrepend = { "--add-modules=jdk.incubator.vector" })
@State(Scope.Thread)
public class LongSwissHashBenchmark {
static {
LogConfigurator.configureESLogging(); // native access requires logging to be initialized
}
@Param({ "1000", "10000", "100000", "1000000", "10000000" })
int cardinality;
@Param({ "uniform", "duplicates", "collision" })
String distribution;
long[] keys;
LongSwissHash swiss;
LongHash legacy;
@Setup(Level.Iteration)
public void setup() {
keys = null;
keys = generate(distribution, cardinality);
BigArrays bigArrays = BigArrays.NON_RECYCLING_INSTANCE;
PageCacheRecycler recycler = PageCacheRecycler.NON_RECYCLING_INSTANCE;
NoopCircuitBreaker breaker = new NoopCircuitBreaker("dummy");
swiss = SwissHashFactory.getInstance().newLongSwissHash(recycler, breaker);
legacy = new LongHash(1, bigArrays);
}
/**
* Build Swiss table completely, then iterate.
* Mirrors STATS build -> finalize -> output.
*/
@Benchmark
public long swissBuildThenIterate(Blackhole bh) {
return swissBuildThenIterateImpl(bh::consume);
}
long swissBuildThenIterateImpl(LongConsumer bh) {
for (long v : keys) {
swiss.add(v);
}
for (int i = 0; i < swiss.size(); i++) {
bh.accept(swiss.get(i));
}
return swiss.size();
}
/**
* Same for legacy hash table.
*/
@Benchmark
public long legacyBuildThenIterate(Blackhole bh) {
return legacyBuildThenIterateImpl(bh::consume);
}
long legacyBuildThenIterateImpl(LongConsumer bh) {
for (long v : keys) {
legacy.add(v);
}
for (int i = 0; i < legacy.size(); i++) {
bh.accept(legacy.get(i));
}
return legacy.size();
}
private long[] generate(String dist, int size) {
ThreadLocalRandom r = ThreadLocalRandom.current();
long[] out = new long[size];
switch (dist) {
case "uniform":
for (int i = 0; i < size; i++) {
out[i] = r.nextLong();
}
break;
case "duplicates":
// 80% of keys come from a small "hot" set
int hotSet = Math.max(32, Math.min(1000, size / 50)); // ~2% of cardinality
long[] hot = new long[hotSet];
for (int i = 0; i < hotSet; i++) {
hot[i] = r.nextLong();
}
for (int i = 0; i < size; i++) {
if (r.nextInt(10) < 8) { // 80% duplicates
out[i] = hot[r.nextInt(hotSet)];
} else { // 20% random noise
out[i] = r.nextLong();
}
}
break;
case "collision":
// Force collisions by clamping top bits so BitMixer mixes poorly
final long seed = 0xABCDEFL;
for (int i = 0; i < size; i++) {
out[i] = seed | ((long) i & 0xFFFF); // all share same high bits
}
break;
default:
throw new IllegalArgumentException("unknown distribution: " + dist);
}
return out;
}
} | java | github | https://github.com/elastic/elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/swisshash/LongSwissHashBenchmark.java |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test ExistingCloud."""
import jsonschema
from rally import consts
from rally.deployment import engine as deploy_engine
from rally.deployment.engines import existing
from tests.unit import test
class TestExistingCloud(test.TestCase):
def setUp(self):
super(TestExistingCloud, self).setUp()
self.deployment = {
"config": {
"type": "ExistingCloud",
"auth_url": "http://example.net:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": consts.EndpointType.INTERNAL,
"https_insecure": False,
"https_cacert": None,
"admin": {
"username": "admin",
"password": "myadminpass",
"tenant_name": "demo",
"domain_name": None,
"project_domain_name": "Default",
"user_domain_name": "Default",
"admin_domain_name": "Default",
}
}
}
def test_init(self):
existing.ExistingCloud(self.deployment)
def test_invalid_config(self):
self.deployment["config"]["admin"] = 42
engine = existing.ExistingCloud(self.deployment)
self.assertRaises(jsonschema.ValidationError,
engine.validate)
def test_deploy(self):
engine = existing.ExistingCloud(self.deployment)
endpoints = engine.deploy()
admin_endpoint = self.deployment["config"].copy()
admin_endpoint.pop("type")
admin_endpoint["endpoint"] = None
admin_endpoint.update(admin_endpoint.pop("admin"))
self.assertEqual(admin_endpoint, endpoints["admin"].to_dict())
self.assertEqual([], endpoints["users"])
def test_cleanup(self):
existing.ExistingCloud(self.deployment).cleanup()
def test_is_in_factory(self):
name = self.deployment["config"]["type"]
engine = deploy_engine.Engine.get_engine(name,
self.deployment)
self.assertIsInstance(engine, existing.ExistingCloud) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_packet_ext_traceroute_test` --- lib.packet.ext.traceroute unit tests
==============================================================================
"""
# Stdlib
from unittest.mock import patch, call
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.packet.ext.traceroute import TracerouteExt
from test.testcommon import assert_these_calls, create_mock
class TestTracerouteExtParse(object):
"""
Unit tests for lib.packet.ext.traceroute.TracerouteExt._parse
"""
@patch("lib.packet.ext.traceroute.ISD_AS", autospec=True)
@patch("lib.packet.ext.traceroute.HopByHopExtension._parse", autospec=True)
@patch("lib.packet.ext.traceroute.Raw", autospec=True)
def test(self, raw, super_parse, isd_as):
inst = TracerouteExt()
inst.append_hop = create_mock()
data = create_mock(["pop"])
data.pop.side_effect = (
None,
"isd as 1", bytes.fromhex('1111 2222'),
"isd as 2", bytes.fromhex('3333 4444'),
)
raw.return_value = data
isd_as.LEN = 4
isd_as.side_effect = "1-11", "2-22"
dlen = inst.MIN_LEN + 2 * inst.HOP_LEN
arg = bytes([2]) + bytes(dlen - 1)
# Call
inst._parse(arg)
# Tests
raw.assert_called_once_with(arg, "TracerouteExt", dlen, min_=True)
super_parse.assert_called_once_with(inst, data)
assert_these_calls(isd_as, (call("isd as 1"), call("isd as 2")))
assert_these_calls(inst.append_hop, (
call("1-11", 0x1111, 0x2222),
call("2-22", 0x3333, 0x4444),
))
class TestTracerouteExtPack(object):
"""
Unit tests for lib.packet.ext.traceroute.TracerouteExt.pack
"""
def test(self):
inst = TracerouteExt()
inst._check_len = create_mock()
inst._hdr_len = 2
isd_as_1_2 = create_mock(["pack"])
isd_as_1_2.pack.return_value = b"1-2"
isd_as_5_6 = create_mock(["pack"])
isd_as_5_6.pack.return_value = b"5-6"
inst.hops = [(isd_as_1_2, 3, 4), (isd_as_5_6, 7, 8)]
expected = b"".join((
b'\x02', bytes(inst.PADDING_LEN), b'1-2',
bytes.fromhex('0003 0004'), b'5-6',
bytes.fromhex('0007 0008')))
# Call
ntools.eq_(inst.pack(), expected)
# Tests
inst._check_len.assert_called_once_with(expected)
class TestTracerouteExtAppendHop(object):
"""
Unit tests for lib.packet.ext.traceroute.TracerouteExt.append_hop
"""
def test(self):
inst = TracerouteExt()
inst.hops = [1]
inst._hdr_len = 2
# Call
inst.append_hop("3-4", 5, 6)
# Tests
ntools.eq_(inst.hops, [1, ("3-4", 5, 6)])
if __name__ == "__main__":
nose.run(defaultTest=__name__) | unknown | codeparrot/codeparrot-clean | ||
import os
from gensim import corpora, models, similarities
from pprint import pprint # pretty-printer
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
if (os.path.exists("/tmp/deerwester.dict")):
dictionary = corpora.Dictionary.load('/tmp/deerwester.dict')
corpus = corpora.MmCorpus('/tmp/deerwester.mm')
print("Used files generated from first tutorial")
else:
print("Please run first tutorial to generate data set")
pprint(corpus.__dict__)
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
pprint(tfidf.__dict__)
doc_bow = [(0, 1), (1, 1)]
print(tfidf[doc_bow]) # step 2 -- use the model to transform vectors
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
print doc
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2) # initialize an LSI transformation
pprint(lsi.__dict__)
pprint(lsi.id2word.__dict__)
corpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
pprint(corpus_lsi.__dict__)
lsi.print_topics(2)
for doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly
print doc
lsi.save('/tmp/model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('/tmp/model.lsi')
model = models.TfidfModel(corpus, normalize=True)
print model.__dict__
model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=300)
print model.__dict__
model = models.RpModel(corpus_tfidf, num_topics=500)
print model.__dict__
model = models.LdaModel(corpus, id2word=dictionary, num_topics=100)
print model.__dict__
model = models.HdpModel(corpus, id2word=dictionary)
print model.__dict__ | unknown | codeparrot/codeparrot-clean | ||
import sys
from . import main
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error:', e, file=sys.stderr)
sys.exit(rc) | python | github | https://github.com/python/cpython | Lib/venv/__main__.py |
from __future__ import print_function
import IMP
import IMP.test
import IMP.core
import IMP.gsl
class WoodsFunc(IMP.Restraint):
"""Woods function for four input values, defined as an IMP restraint"""
def __init__(self, model, particles):
IMP.Restraint.__init__(self, model, "WoodsFunc%1%")
self.particles = particles
self.index = IMP.FloatKey("x")
def do_show(self, junk):
print("Woods function")
def get_version_info(self):
return IMP.VersionInfo("Daniel Russel", "0.5")
def unprotected_evaluate(self, accum):
(x1, x2, x3, x4) = [p.get_value(self.index) for p in self.particles]
a = x2 - x1 * x1
b = x4 - x3 * x3
e = 100.0 * a * a + (1.0 - x1) ** 2 + 90.0 * b * b + (1.0 - x3) ** 2 \
+ 10.1 * ((x2 - 1.0) ** 2 + (x4 - 1.0) ** 2) \
+ 19.8 * (x2 - 1.0) * (x4 - 1.0)
if accum:
dx = [-2.0 * (200.0 * x1 * a + 1.0 - x1),
2.0 * (100.0 * a + 10.1 * (x2 - 1.0) + 9.9 * (x4 - 1.0)),
-2.0 * (180.0 * x3 * b + 1.0 - x3),
2.0 * (90.0 * b + 10.1 * (x4 - 1.0) + 9.9 * (x2 - 1.0))]
for (p, d) in zip(self.particles, dx):
p.add_to_derivative(self.index, d, accum)
# for (i, d) in zip(self.indices, dx):
# accum.add_to_deriv(i, d)
return e
def do_get_inputs(self):
return self.particles
class Tests(IMP.test.TestCase):
def test_cg_woods_func(self):
"""Check that we can optimize the Woods function with CG"""
self._test_starting_conditions((-3.0, -1.0, -3.0, -1.0))
self._test_starting_conditions((2.0, 3.0, 8.0, -5.0))
def _test_starting_conditions(self, starting_values):
"""Test the optimizer with given starting conditions"""
model = IMP.Model()
particles = []
for value in starting_values:
p = IMP.Particle(model)
particles.append(p)
p.add_attribute(IMP.FloatKey("x"), value, True)
rsr = WoodsFunc(model, particles)
opt = IMP.gsl.ConjugateGradients(model)
opt.set_scoring_function(rsr)
# opt.set_threshold(1e-5)
e = opt.optimize(500)
for p in particles:
val = p.get_value(IMP.FloatKey("x"))
self.assertAlmostEqual(val, 1.0, places=1)
self.assertAlmostEqual(e, 0.0, places=2)
if __name__ == '__main__':
IMP.test.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os
from future.moves.urllib.parse import urljoin
from django.db import models
import markupsafe
import gitlab
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab import settings as gitlab_settings
from addons.gitlab.exceptions import ApiError, NotFoundError, GitLabError
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = gitlab_settings.HOOK_DOMAIN or settings.DOMAIN
class GitLabFileNode(BaseFileNode):
_provider = 'gitlab'
class GitLabFolder(GitLabFileNode, Folder):
pass
class GitLabFile(GitLabFileNode, File):
version_identifier = 'commitSha'
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
def touch(self, auth_header, revision=None, ref=None, branch=None, **kwargs):
revision = revision or ref or branch
return super(GitLabFile, self).touch(auth_header, revision=revision, **kwargs)
class GitLabProvider(object):
name = 'GitLab'
short_name = 'gitlab'
serializer = GitLabSerializer
def __init__(self, account=None):
super(GitLabProvider, self).__init__() # this does exactly nothing...
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
repo_id = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
hook_secret = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def has_auth(self):
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='gitlab_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.repo_id = None
self.hook_id = None
self.hook_secret = None
def deauthorize(self, auth=None, log=True):
self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='gitlab_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.repo:
return 'https://{0}/{1}'.format(self.external_account.display_name, self.repo)
@property
def short_url(self):
if self.repo:
return self.repo
@property
def is_private(self):
connection = GitLabClient(external_account=self.external_account)
return connection.repo(self.repo_id).visibility == 'private'
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('gitlab')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
owner = self.user_settings.owner
connection = GitLabClient(external_account=self.external_account)
valid_credentials = True
try:
repos = [repo.attributes for repo in connection.repos(all=True)]
except GitLabError:
valid_credentials = False
if owner == user:
ret.update({'repos': repos})
ret.update({
'node_has_auth': True,
'gitlab_user': self.user or '',
'gitlab_repo': self.repo or '',
'gitlab_repo_id': self.repo_id if self.repo_id is not None else '0',
'gitlab_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'gitlab_host': self.external_account.display_name,
'gitlab_user_name': self.external_account.display_name,
'gitlab_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'host': self.external_account.oauth_secret,
'owner': self.user,
'repo': self.repo,
'repo_id': self.repo_id
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
if not metadata.get('extra'):
sha = None
urls = {}
else:
sha = metadata['extra']['fileSha']
urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
self.owner.add_log(
'gitlab_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'gitlab': {
'host': 'https://{0}'.format(self.external_account.display_name),
'user': self.user,
'repo': self.repo,
'sha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor_or_group_member(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
connect = GitLabClient(external_account=self.external_account)
try:
repo = connect.repo(self.repo_id)
except (ApiError, GitLabError):
return
except gitlab.exceptions.GitlabError as exc:
if exc.response_code == 403 and 'must accept the Terms of Service' in exc.error_message:
return [('Your gitlab account does not have proper authentication. Ensure you have agreed to Gitlab\'s '
'current Terms of Service by disabling and re-enabling your account.')]
else:
raise exc
# GitLab has visibility types: public, private, internal.
node_permissions = 'public' if node.is_public else 'private'
if repo.visibility != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the GitLab '
'repo {user} / {repo} has {repo_perm} visibility.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo.visibility),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo.visibility == 'private':
message += (
' Users can view the contents of this private GitLab '
'repository through this public project.'
)
else:
message += (
' The files in this GitLab repo can be viewed on GitLab '
'<u><a href="{url}">here</a></u>.'
).format(url=repo.http_url_to_repo)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'gitlab/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the GitLab add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private GitLab repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on GitLab.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
#########
# Hooks #
#########
# TODO: Should Events be added here?
# TODO: Move hook logic to service
def add_hook(self, save=True):
if self.user_settings:
connect = GitLabClient(external_account=self.external_account)
secret = utils.make_hook_secret()
hook = connect.add_hook(
self.user, self.repo,
'web',
{
'url': urljoin(
hook_domain,
os.path.join(
self.owner.api_url, 'gitlab', 'hook/'
)
),
'content_type': gitlab_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=gitlab_settings.HOOK_EVENTS,
)
if hook:
self.hook_id = hook.id
self.hook_secret = secret
if save:
self.save()
def delete_hook(self, save=True):
"""
:return bool: Hook was deleted
"""
if self.user_settings and self.hook_id:
connection = GitLabClient(external_account=self.external_account)
try:
response = connection.delete_hook(self.user, self.repo, self.hook_id)
except (GitLabError, NotFoundError):
return False
if response:
self.hook_id = None
if save:
self.save()
return True
return False | unknown | codeparrot/codeparrot-clean | ||
import numpy
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
class Contrastive(function.Function):
"""Contrastive loss function."""
def __init__(self, margin, reduce='mean'):
if margin <= 0:
raise ValueError("margin should be positive value.")
self.margin = margin
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x0_type, x1_type, y_type = in_types
type_check.expect(
x0_type.dtype == numpy.float32,
x1_type.dtype == numpy.float32,
y_type.dtype.kind == 'i',
x0_type.shape == x1_type.shape,
x1_type.shape[0] == y_type.shape[0],
x1_type.shape[0] > 0,
x0_type.ndim == 2,
x1_type.ndim == 2,
y_type.ndim == 1
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
self.diff = x0 - x1
self.dist_sq = xp.sum(self.diff ** 2, axis=1)
self.dist = xp.sqrt(self.dist_sq)
self.mdist = self.margin - self.dist
dist = xp.maximum(self.mdist, 0)
loss = (y * self.dist_sq + (1 - y) * dist * dist) * .5
if self.reduce == 'mean':
loss = xp.sum(loss) / x0.shape[0]
return xp.array(loss, dtype=xp.float32),
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
x0, x1, y = inputs
x_dim = x0.shape[1]
y = xp.repeat(y[:, None], x_dim, axis=1)
if self.reduce == 'mean':
alpha = gy[0] / y.shape[0]
else:
alpha = gy[0][:, None]
dist = xp.repeat(self.dist[:, None], x_dim, axis=1)
# avoid division by zero
dist = xp.maximum(dist, 1e-8)
# similar pair
gx0 = alpha * y * self.diff
# dissimilar pair
mdist = xp.maximum(xp.repeat(self.mdist[:, None], x_dim, axis=1), 0)
gx0 += alpha * (1 - y) * mdist * -(self.diff / dist)
gx0 = gx0.astype(xp.float32)
return gx0, -gx0, None
def contrastive(x0, x1, y, margin=1, reduce='mean'):
"""Computes contrastive loss.
It takes a pair of samples and a label as inputs.
The label is :math:`1` when those samples are similar,
or :math:`0` when they are dissimilar.
Let :math:`N` and :math:`K` denote mini-batch size and the dimension
of input variables, respectively. The shape of both input variables
``x0`` and ``x1`` should be ``(N, K)``.
The loss value of the :math:`n`-th sample pair :math:`L_n` is
.. math::
L_n = \\frac{1}{2} \\left( y_n d_n^2
+ (1 - y_n) \\max ({\\rm margin} - d_n, 0)^2 \\right)
where :math:`d_n = \\| {\\bf x_0}_n - {\\bf x_1}_n \\|_2`,
:math:`{\\bf x_0}_n` and :math:`{\\bf x_1}_n` are :math:`n`-th
K-dimensional vectors of ``x0`` and ``x1``.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, this function takes a mean of
loss values.
Args:
x0 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): The first input variable. The shape should be
(N, K), where N denotes the mini-batch size, and K denotes the
dimension of ``x0``.
x1 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): The second input variable. The shape should be
the same as ``x0``.
y (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Labels. All values should be 0 or 1. The shape
should be ``(N,)``, where N denotes the mini-batch size.
margin (float): A parameter for contrastive loss. It should be positive
value.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding the loss value(s) calculated by the
above equation.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'mean'``, the output variable holds a scalar value.
.. note::
This cost can be used to train siamese networks. See `Learning a
Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_
for details.
.. admonition:: Example
>>> x0 = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> x1 = np.array([[-1.0, 3.0, 1.0], [3.5, 0.5, -2.0]]).\
astype(np.float32)
>>> y = np.array([1, 0]).astype(np.int32)
>>> F.contrastive(x0, x1, y)
variable(0.3125)
>>> F.contrastive(x0, x1, y, margin=3.0) # harder penalty
variable(0.3528857)
>>> z = F.contrastive(x0, x1, y, reduce='no')
>>> z.shape
(2,)
>>> z.data
array([0.625, 0. ], dtype=float32)
"""
return Contrastive(margin, reduce)(x0, x1, y) | unknown | codeparrot/codeparrot-clean | ||
"""
After years of study, scientists have discovered an alien language transmitted from a faraway planet. The alien
language is very unique in that every word consists of exactly L lowercase letters. Also, there are exactly D words in
this language.
Once the dictionary of all the words in the alien language was built, the next breakthrough was to discover that the
aliens have been transmitting messages to Earth for the past decade. Unfortunately, these signals are weakened due to
the distance between our two planets and some of the words may be misinterpreted. In order to help them decipher these
messages, the scientists have asked you to devise an algorithm that will determine the number of possible
interpretations for a given pattern.
A pattern consists of exactly L tokens. Each token is either a single lowercase letter (the scientists are very sure
that this is the letter) or a group of unique lowercase letters surrounded by parenthesis ( and ). For example:
(ab)d(dc) means the first letter is either a or b, the second letter is definitely d and the last letter is either d or
c. Therefore, the pattern (ab)d(dc) can stand for either one of these 4 possibilities: add, adc, bdd, bdc.
Please note that sample i/p and o/p is given in the link below
Link [https://code.google.com/codejam/contest/90101/dashboard#s=p0]
"""
def extract(d):
L = int(d[0])
D = int(d[1])
N = int(d[2])
d = d[3:]
w_list = d[:D]
inp = d[D:]
return L, D, N, w_list, inp
def separate(l):
"""
Sweeps through l from left to right and separates the format into a list of three.
If parens found, goes into collection mode before adding to result list
"""
tmp = ''
res = []
coll = False
for i in l:
# Collection mode enable/disable
if i == '(':
coll = True
tmp = ''
continue
elif i == ')':
coll = False
res.append(tmp)
continue
# if collection mode, add to temp, else directly append to result list
if coll:
tmp += i
else:
res.append(i)
return res
def compare(length, i_list, w_list):
n = 0
for w in w_list:
for m in range(length):
if w[m] not in i_list[m]:
break
else:
n += 1
return n
def main():
with open('A-large-practice.in') as f:
data = f.read().split()
L, D, N, w_list, inp = extract(data)
for n, i in enumerate(inp):
inp[n] = separate(i)
out = compare(L, inp[n], w_list)
print('Case #{}: {}'.format(n+1, out))
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
/* contrib/pageinspect/pageinspect--1.11--1.12.sql */
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.12'" to load this file. \quit
--
-- bt_multi_page_stats()
--
CREATE FUNCTION bt_multi_page_stats(IN relname text, IN blkno int8, IN blk_count int8,
OUT blkno int8,
OUT type "char",
OUT live_items int4,
OUT dead_items int4,
OUT avg_item_size int4,
OUT page_size int4,
OUT free_size int4,
OUT btpo_prev int8,
OUT btpo_next int8,
OUT btpo_level int8,
OUT btpo_flags int4)
RETURNS SETOF record
AS 'MODULE_PATHNAME', 'bt_multi_page_stats'
LANGUAGE C STRICT PARALLEL RESTRICTED;
--
-- add information about BRIN empty ranges
--
DROP FUNCTION brin_page_items(IN page bytea, IN index_oid regclass);
CREATE FUNCTION brin_page_items(IN page bytea, IN index_oid regclass,
OUT itemoffset int,
OUT blknum int8,
OUT attnum int,
OUT allnulls bool,
OUT hasnulls bool,
OUT placeholder bool,
OUT empty bool,
OUT value text)
RETURNS SETOF record
AS 'MODULE_PATHNAME', 'brin_page_items'
LANGUAGE C STRICT PARALLEL RESTRICTED; | sql | github | https://github.com/postgres/postgres | contrib/pageinspect/pageinspect--1.11--1.12.sql |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"context"
"crypto/tls"
"errors"
"fmt"
"math"
"net"
"net/http"
"os"
"path/filepath"
sysruntime "runtime"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
inuserns "github.com/moby/sys/userns"
"github.com/opencontainers/selinux/go-selinux"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
"k8s.io/client-go/informers"
ndf "k8s.io/component-helpers/nodedeclaredfeatures"
ndffeatures "k8s.io/component-helpers/nodedeclaredfeatures/features"
"k8s.io/mount-utils"
apiequality "k8s.io/apimachinery/pkg/api/equality"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
netutils "k8s.io/utils/net"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
versionutil "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/flagz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformersv1 "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/flowcontrol"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/component-base/version"
"k8s.io/component-helpers/apimachinery/lease"
resourcehelper "k8s.io/component-helpers/resource"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
remote "k8s.io/cri-client/pkg"
"k8s.io/klog/v2"
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/allocation"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown"
oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom"
"k8s.io/kubernetes/pkg/kubelet/pleg"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
plugincache "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
"k8s.io/kubernetes/pkg/kubelet/preemption"
"k8s.io/kubernetes/pkg/kubelet/prober"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server"
servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics"
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/kubelet/token"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/userns"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/kubelet/watchdog"
httpprobe "k8s.io/kubernetes/pkg/probe/http"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/utils/clock"
)
const (
// Max amount of time to wait for the container runtime to come up.
maxWaitForContainerRuntime = 30 * time.Second
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5
// nodeReadyGracePeriod is the period to allow for before fast status update is
// terminated and container runtime not being ready is logged without verbosity guard.
nodeReadyGracePeriod = 120 * time.Second
// DefaultContainerLogsDir is the location of container logs.
DefaultContainerLogsDir = "/var/log/containers"
// MaxCrashLoopBackOff is the max backoff period for container restarts, exported for the e2e test
MaxCrashLoopBackOff = v1beta1.MaxContainerBackOff
// reducedMaxCrashLoopBackOff is the default max backoff period for container restarts when the alpha feature
// gate ReduceDefaultCrashLoopBackOffDecay is enabled
reducedMaxCrashLoopBackOff = 60 * time.Second
// Initial period for the exponential backoff for container restarts.
initialCrashLoopBackOff = time.Second * 10
// reducedInitialCrashLoopBackOff is the default initial backoff period for container restarts when the alpha feature
// gate ReduceDefaultCrashLoopBackOffDecay is enabled
reducedInitialCrashLoopBackOff = 1 * time.Second
// MaxImageBackOff is the max backoff period for image pulls, exported for the e2e test
MaxImageBackOff = 300 * time.Second
// Period for performing global cleanup tasks.
housekeepingPeriod = time.Second * 2
// Duration at which housekeeping failed to satisfy the invariant that
// housekeeping should be fast to avoid blocking pod config (while
// housekeeping is running no new pods are started or deleted).
housekeepingWarningDuration = time.Second * 1
// Period after which the runtime cache expires - set to slightly longer than
// the expected length between housekeeping periods, which explicitly refreshes
// the cache.
runtimeCacheRefreshPeriod = housekeepingPeriod + housekeepingWarningDuration
// Period for performing eviction monitoring.
// ensure this is kept in sync with internal cadvisor housekeeping.
evictionMonitoringPeriod = time.Second * 10
// The path in containers' filesystems where the hosts file is mounted.
linuxEtcHostsPath = "/etc/hosts"
windowsEtcHostsPath = "C:\\Windows\\System32\\drivers\\etc\\hosts"
// Capacity of the channel for receiving pod lifecycle events. This number
// is a bit arbitrary and may be adjusted in the future.
plegChannelCapacity = 1000
// Generic PLEG relies on relisting for discovering container events.
// A longer period means that kubelet will take longer to detect container
// changes and to update pod status. On the other hand, a shorter period
// will cause more frequent relisting (e.g., container runtime operations),
// leading to higher cpu usage.
// Note that even though we set the period to 1s, the relisting itself can
// take more than 1s to finish if the container runtime responds slowly
// and/or when there are many container changes in one cycle.
genericPlegRelistPeriod = time.Second * 1
genericPlegRelistThreshold = time.Minute * 3
// Generic PLEG relist period and threshold when used with Evented PLEG.
eventedPlegRelistPeriod = time.Second * 300
eventedPlegRelistThreshold = time.Minute * 10
eventedPlegMaxStreamRetries = 5
// backOffPeriod is the period to back off when pod syncing results in an
// error.
backOffPeriod = time.Second * 10
// Initial period for the exponential backoff for image pulls.
imageBackOffPeriod = time.Second * 10
// ContainerGCPeriod is the period for performing container garbage collection.
ContainerGCPeriod = time.Minute
// ImageGCPeriod is the period for performing image garbage collection.
ImageGCPeriod = 5 * time.Minute
// Minimum number of dead containers to keep in a pod
minDeadContainerInPod = 1
// nodeLeaseRenewIntervalFraction is the fraction of lease duration to renew the lease
nodeLeaseRenewIntervalFraction = 0.25
// instrumentationScope is the name of OpenTelemetry instrumentation scope
instrumentationScope = "k8s.io/kubernetes/pkg/kubelet"
)
var (
// ContainerLogsDir can be overwritten for testing usage
ContainerLogsDir = DefaultContainerLogsDir
etcHostsPath = getContainerEtcHostsPath()
admissionRejectionReasons = sets.New[string](
lifecycle.AppArmorNotAdmittedReason,
lifecycle.PodOSSelectorNodeLabelDoesNotMatch,
lifecycle.PodOSNotSupported,
lifecycle.InvalidNodeInfo,
lifecycle.InitContainerRestartPolicyForbidden,
lifecycle.SupplementalGroupsPolicyNotSupported,
lifecycle.UnexpectedAdmissionError,
lifecycle.UnknownReason,
lifecycle.UnexpectedPredicateFailureType,
lifecycle.OutOfCPU,
lifecycle.OutOfMemory,
lifecycle.OutOfEphemeralStorage,
lifecycle.OutOfPods,
lifecycle.PodLevelResourcesNotAdmittedReason,
lifecycle.PodFeatureUnsupported,
tainttoleration.ErrReasonNotMatch,
eviction.Reason,
sysctl.ForbiddenReason,
topologymanager.ErrorTopologyAffinity,
nodeshutdown.NodeShutdownNotAdmittedReason,
volumemanager.VolumeAttachmentLimitExceededReason,
)
// This is exposed for unit tests.
goos = sysruntime.GOOS
)
func getContainerEtcHostsPath() string {
if goos == "windows" {
return windowsEtcHostsPath
}
return linuxEtcHostsPath
}
// SyncHandler is an interface implemented by Kubelet, for testability
type SyncHandler interface {
HandlePodAdditions(ctx context.Context, pods []*v1.Pod)
HandlePodUpdates(ctx context.Context, pods []*v1.Pod)
HandlePodRemoves(ctx context.Context, pods []*v1.Pod)
HandlePodReconcile(ctx context.Context, pods []*v1.Pod)
HandlePodSyncs(ctx context.Context, pods []*v1.Pod)
HandlePodCleanups(ctx context.Context) error
}
// Option is a functional option type for Kubelet
type Option func(*Kubelet)
// Bootstrap is a bootstrapping interface for kubelet, targets the initialization protocol
type Bootstrap interface {
GetConfiguration() kubeletconfiginternal.KubeletConfiguration
BirthCry()
StartGarbageCollection(ctx context.Context)
ListenAndServe(ctx context.Context, kubeCfg *kubeletconfiginternal.KubeletConfiguration, tlsOptions *server.TLSOptions, auth server.AuthInterface, tp trace.TracerProvider)
ListenAndServeReadOnly(ctx context.Context, address net.IP, port uint, tp trace.TracerProvider)
ListenAndServePodResources(ctx context.Context)
Run(ctx context.Context, updates <-chan kubetypes.PodUpdate)
}
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
type Dependencies struct {
Options []Option
// Injected Dependencies
Flagz flagz.Reader
Auth server.AuthInterface
CAdvisorInterface cadvisor.Interface
ContainerManager cm.ContainerManager
EventClient v1core.EventsGetter
HeartbeatClient clientset.Interface
OnHeartbeatFailure func()
KubeClient clientset.Interface
Mounter mount.Interface
HostUtil hostutil.HostUtils
OOMAdjuster *oom.OOMAdjuster
OSInterface kubecontainer.OSInterface
PodConfig *config.PodConfig
ProbeManager prober.Manager
Recorder record.EventRecorderLogger
Subpather subpath.Interface
TracerProvider trace.TracerProvider
VolumePlugins []volume.VolumePlugin
DynamicPluginProber volume.DynamicPluginProber
TLSOptions *server.TLSOptions
RemoteRuntimeService internalapi.RuntimeService
RemoteImageService internalapi.ImageManagerService
PodStartupLatencyTracker util.PodStartupLatencyTracker
NodeStartupLatencyTracker util.NodeStartupLatencyTracker
HealthChecker watchdog.HealthChecker
// remove it after cadvisor.UsingLegacyCadvisorStats dropped.
useLegacyCadvisorStats bool
}
// newCrashLoopBackOff configures the backoff maximum to be used
// by kubelet for container restarts depending on the alpha gates
// and kubelet configuration set
func newCrashLoopBackOff(kubeCfg *kubeletconfiginternal.KubeletConfiguration) (time.Duration, time.Duration) {
boMax := MaxCrashLoopBackOff
boInitial := initialCrashLoopBackOff
if utilfeature.DefaultFeatureGate.Enabled(features.ReduceDefaultCrashLoopBackOffDecay) {
boMax = reducedMaxCrashLoopBackOff
boInitial = reducedInitialCrashLoopBackOff
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletCrashLoopBackOffMax) {
// operator-invoked configuration always has precedence if valid
boMax = kubeCfg.CrashLoopBackOff.MaxContainerRestartPeriod.Duration
if boMax < boInitial {
boInitial = boMax
}
}
return boMax, boInitial
}
// makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error.
func makePodSourceConfig(ctx context.Context, kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) {
logger := klog.FromContext(ctx)
manifestURLHeader := make(http.Header)
if len(kubeCfg.StaticPodURLHeader) > 0 {
for k, v := range kubeCfg.StaticPodURLHeader {
for i := range v {
manifestURLHeader.Add(k, v[i])
}
}
}
// source of all configuration
cfg := config.NewPodConfig(kubeDeps.Recorder, kubeDeps.PodStartupLatencyTracker)
// define file config source
if kubeCfg.StaticPodPath != "" {
logger.Info("Adding static pod path", "path", kubeCfg.StaticPodPath)
config.NewSourceFile(logger, kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(ctx, kubetypes.FileSource))
}
// define url config source
if kubeCfg.StaticPodURL != "" {
logger.Info("Adding pod URL with HTTP header", "URL", kubeCfg.StaticPodURL, "header", manifestURLHeader)
config.NewSourceURL(logger, kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(ctx, kubetypes.HTTPSource))
}
if kubeDeps.KubeClient != nil {
logger.Info("Adding apiserver pod source")
config.NewSourceApiserver(logger, kubeDeps.KubeClient, nodeName, nodeHasSynced, cfg.Channel(ctx, kubetypes.ApiserverSource))
}
return cfg, nil
}
// PreInitRuntimeService will init runtime service before RunKubelet.
func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies) error {
remoteImageEndpoint := kubeCfg.ImageServiceEndpoint
if remoteImageEndpoint == "" && kubeCfg.ContainerRuntimeEndpoint != "" {
remoteImageEndpoint = kubeCfg.ContainerRuntimeEndpoint
}
var err error
logger := klog.Background()
if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(kubeCfg.ContainerRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider, &logger); err != nil {
return err
}
if kubeDeps.RemoteImageService, err = remote.NewRemoteImageService(remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.TracerProvider, &logger); err != nil {
return err
}
kubeDeps.useLegacyCadvisorStats = cadvisor.UsingLegacyCadvisorStats(kubeCfg.ContainerRuntimeEndpoint)
return nil
}
// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
// No initialization of Kubelet and its modules should happen here.
func NewMainKubelet(ctx context.Context,
kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *Dependencies,
crOptions *kubeletconfig.ContainerRuntimeOptions,
hostname string,
nodeName types.NodeName,
nodeIPs []net.IP,
providerID string,
cloudProvider string,
certDirectory string,
rootDirectory string,
podLogsDirectory string,
imageCredentialProviderConfigPath string,
imageCredentialProviderBinDir string,
registerNode bool,
registerWithTaints []v1.Taint,
allowedUnsafeSysctls []string,
experimentalMounterPath string,
kernelMemcgNotification bool,
experimentalNodeAllocatableIgnoreEvictionThreshold bool,
minimumGCAge metav1.Duration,
maxPerPodContainerCount int32,
maxContainerCount int32,
nodeLabels map[string]string,
nodeStatusMaxImages int32,
seccompDefault bool,
) (*Kubelet, error) {
logger := klog.FromContext(ctx)
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
}
if podLogsDirectory == "" {
return nil, errors.New("pod logs root directory is empty")
}
if kubeCfg.SyncFrequency.Duration <= 0 {
return nil, fmt.Errorf("invalid sync frequency %d", kubeCfg.SyncFrequency.Duration)
}
if !cloudprovider.IsExternal(cloudProvider) && len(cloudProvider) != 0 {
cloudprovider.DisableWarningForProvider(cloudProvider)
return nil, cloudprovider.ErrorForDisabledProvider(cloudProvider)
}
var nodeHasSynced cache.InformerSynced
var nodeInformer coreinformersv1.NodeInformer
var nodeLister corelisters.NodeLister
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
// If not nil, we are running as part of a cluster and should sync w/API
if kubeDeps.KubeClient != nil {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
}))
nodeInformer = kubeInformers.Core().V1().Nodes()
nodeLister = nodeInformer.Lister()
nodeHasSynced = func() bool {
return kubeInformers.Core().V1().Nodes().Informer().HasSynced()
}
kubeInformers.Start(wait.NeverStop)
logger.Info("Attempting to sync node with API server")
} else {
// we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
logger.Info("Kubelet is running in standalone mode, will skip API server sync")
}
if kubeDeps.PodConfig == nil {
var err error
kubeDeps.PodConfig, err = makePodSourceConfig(ctx, kubeCfg, kubeDeps, nodeName, nodeHasSynced)
if err != nil {
return nil, err
}
}
containerGCPolicy := kubecontainer.GCPolicy{
MinAge: minimumGCAge.Duration,
MaxPerPodContainer: int(maxPerPodContainerCount),
MaxContainers: int(maxContainerCount),
}
daemonEndpoints := &v1.NodeDaemonEndpoints{
KubeletEndpoint: v1.DaemonEndpoint{Port: kubeCfg.Port},
}
imageGCPolicy := images.ImageGCPolicy{
MinAge: kubeCfg.ImageMinimumGCAge.Duration,
HighThresholdPercent: int(kubeCfg.ImageGCHighThresholdPercent),
LowThresholdPercent: int(kubeCfg.ImageGCLowThresholdPercent),
}
imageGCPolicy.MaxAge = kubeCfg.ImageMaximumGCAge.Duration
enforceNodeAllocatable := kubeCfg.EnforceNodeAllocatable
if experimentalNodeAllocatableIgnoreEvictionThreshold {
// Do not provide kubeCfg.EnforceNodeAllocatable to eviction threshold parsing if we are not enforcing Evictions
enforceNodeAllocatable = []string{}
}
thresholds, err := eviction.ParseThresholdConfig(enforceNodeAllocatable, kubeCfg.EvictionHard, kubeCfg.EvictionSoft, kubeCfg.EvictionSoftGracePeriod, kubeCfg.EvictionMinimumReclaim)
if err != nil {
return nil, err
}
evictionConfig := eviction.Config{
PressureTransitionPeriod: kubeCfg.EvictionPressureTransitionPeriod.Duration,
MaxPodGracePeriodSeconds: int64(kubeCfg.EvictionMaxPodGracePeriod),
Thresholds: thresholds,
KernelMemcgNotification: kernelMemcgNotification,
PodCgroupRoot: kubeDeps.ContainerManager.GetPodCgroupRoot(),
}
var serviceLister corelisters.ServiceLister
var serviceHasSynced cache.InformerSynced
if kubeDeps.KubeClient != nil {
// don't watch headless services, they are not needed since this informer is only used to create the environment variables for pods.
// See https://issues.k8s.io/122394
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermNotEqualSelector("spec.clusterIP", v1.ClusterIPNone).String()
}))
serviceLister = kubeInformers.Core().V1().Services().Lister()
serviceHasSynced = kubeInformers.Core().V1().Services().Informer().HasSynced
kubeInformers.Start(wait.NeverStop)
} else {
serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
serviceLister = corelisters.NewServiceLister(serviceIndexer)
serviceHasSynced = func() bool { return true }
}
// construct a node reference used for events
nodeRef := &v1.ObjectReference{
APIVersion: "v1",
Kind: "Node",
Name: string(nodeName),
Namespace: "",
}
oomWatcher, err := oomwatcher.NewWatcher(kubeDeps.Recorder)
if err != nil {
if inuserns.RunningInUserNS() {
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletInUserNamespace) {
// oomwatcher.NewWatcher returns "open /dev/kmsg: operation not permitted" error,
// when running in a user namespace with sysctl value `kernel.dmesg_restrict=1`.
logger.V(2).Info("Failed to create an oomWatcher (running in UserNS, ignoring)", "err", err)
oomWatcher = nil
} else {
logger.Error(err, "Failed to create an oomWatcher (running in UserNS, Hint: enable KubeletInUserNamespace feature flag to ignore the error)")
return nil, err
}
} else {
return nil, err
}
}
clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS))
for _, ipEntry := range kubeCfg.ClusterDNS {
ip := netutils.ParseIPSloppy(ipEntry)
if ip == nil {
logger.Info("Invalid clusterDNS IP", "IP", ipEntry)
} else {
clusterDNS = append(clusterDNS, ip)
}
}
// A TLS transport is needed to make HTTPS-based container lifecycle requests,
// but we do not have the information necessary to do TLS verification.
//
// This client must not be modified to include credentials, because it is
// critical that credentials not leak from the client to arbitrary hosts.
insecureContainerLifecycleHTTPClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
CheckRedirect: httpprobe.RedirectChecker(false),
}
tracer := kubeDeps.TracerProvider.Tracer(instrumentationScope)
klet := &Kubelet{
hostname: hostname,
nodeName: nodeName,
kubeClient: kubeDeps.KubeClient,
heartbeatClient: kubeDeps.HeartbeatClient,
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
rootDirectory: filepath.Clean(rootDirectory),
podLogsDirectory: podLogsDirectory,
resyncInterval: kubeCfg.SyncFrequency.Duration,
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
registerNode: registerNode,
registerWithTaints: registerWithTaints,
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, nodeIPs, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
serviceLister: serviceLister,
serviceHasSynced: serviceHasSynced,
nodeLister: nodeLister,
nodeHasSynced: nodeHasSynced,
recorder: kubeDeps.Recorder,
cadvisor: kubeDeps.CAdvisorInterface,
externalCloudProvider: cloudprovider.IsExternal(cloudProvider),
providerID: providerID,
nodeRef: nodeRef,
nodeLabels: nodeLabels,
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration,
os: kubeDeps.OSInterface,
oomWatcher: oomWatcher,
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
cgroupRoot: kubeCfg.CgroupRoot,
mounter: kubeDeps.Mounter,
hostutil: kubeDeps.HostUtil,
subpather: kubeDeps.Subpather,
maxPods: int(kubeCfg.MaxPods),
podsPerCore: int(kubeCfg.PodsPerCore),
syncLoopMonitor: atomic.Value{},
daemonEndpoints: daemonEndpoints,
containerManager: kubeDeps.ContainerManager,
nodeIPs: nodeIPs,
nodeIPValidator: validateNodeIP,
clock: clock.RealClock{},
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
nodeStatusMaxImages: nodeStatusMaxImages,
tracer: tracer,
nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker,
healthChecker: kubeDeps.HealthChecker,
flagz: kubeDeps.Flagz,
}
var secretManager secret.Manager
var configMapManager configmap.Manager
if klet.kubeClient != nil {
switch kubeCfg.ConfigMapAndSecretChangeDetectionStrategy {
case kubeletconfiginternal.WatchChangeDetectionStrategy:
secretManager = secret.NewWatchingSecretManager(klet.kubeClient, klet.resyncInterval)
configMapManager = configmap.NewWatchingConfigMapManager(klet.kubeClient, klet.resyncInterval)
case kubeletconfiginternal.TTLCacheChangeDetectionStrategy:
secretManager = secret.NewCachingSecretManager(
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
configMapManager = configmap.NewCachingConfigMapManager(
klet.kubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
case kubeletconfiginternal.GetChangeDetectionStrategy:
secretManager = secret.NewSimpleSecretManager(klet.kubeClient)
configMapManager = configmap.NewSimpleConfigMapManager(klet.kubeClient)
default:
return nil, fmt.Errorf("unknown configmap and secret manager mode: %v", kubeCfg.ConfigMapAndSecretChangeDetectionStrategy)
}
klet.secretManager = secretManager
klet.configMapManager = configMapManager
}
machineInfo, err := klet.cadvisor.MachineInfo()
if err != nil {
return nil, err
}
// Avoid collector collects it as a timestamped metric
// See PR #95210 and #97006 for more details.
machineInfo.Timestamp = time.Time{}
klet.setCachedMachineInfo(machineInfo)
imageBackOff := flowcontrol.NewBackOff(imageBackOffPeriod, MaxImageBackOff)
klet.livenessManager = proberesults.NewManager()
klet.readinessManager = proberesults.NewManager()
klet.startupManager = proberesults.NewManager()
klet.podCache = kubecontainer.NewCache()
klet.mirrorPodClient = kubepod.NewBasicMirrorClient(klet.kubeClient, string(nodeName), nodeLister)
klet.podManager = kubepod.NewBasicPodManager()
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet, kubeDeps.PodStartupLatencyTracker)
klet.allocationManager = allocation.NewManager(
klet.getRootDir(),
klet.statusManager,
func(pod *v1.Pod) { klet.HandlePodSyncs(ctx, []*v1.Pod{pod}) },
klet.GetActivePods,
klet.podManager.GetPodByUID,
klet.sourcesReady,
kubeDeps.Recorder,
)
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(ctx, klet, kubeCfg.VolumeStatsAggPeriod.Duration, kubeDeps.Recorder)
klet.runtimeService = kubeDeps.RemoteRuntimeService
if kubeDeps.KubeClient != nil {
klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.KubeClient)
}
// setup containerLogManager for CRI container runtime
containerLogManager, err := logs.NewContainerLogManager(
klet.runtimeService,
kubeDeps.OSInterface,
kubeCfg.ContainerLogMaxSize,
int(kubeCfg.ContainerLogMaxFiles),
int(kubeCfg.ContainerLogMaxWorkers),
kubeCfg.ContainerLogMonitorInterval,
)
if err != nil {
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
}
klet.containerLogManager = containerLogManager
klet.reasonCache = NewReasonCache()
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)
klet.podWorkers = newPodWorkers(
klet,
kubeDeps.Recorder,
klet.workQueue,
klet.resyncInterval,
backOffPeriod,
klet.podCache,
klet.allocationManager,
)
var singleProcessOOMKill *bool
if sysruntime.GOOS == "linux" {
if !util.IsCgroup2UnifiedMode() {
// This is a default behavior for cgroups v1.
singleProcessOOMKill = ptr.To(true)
} else {
if kubeCfg.SingleProcessOOMKill == nil {
singleProcessOOMKill = ptr.To(false)
} else {
singleProcessOOMKill = kubeCfg.SingleProcessOOMKill
}
}
}
tokenManager := token.NewManager(kubeDeps.KubeClient)
getServiceAccount := func(namespace, name string) (*v1.ServiceAccount, error) {
return nil, fmt.Errorf("get service account is not implemented")
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
getServiceAccount = func(namespace, name string) (*v1.ServiceAccount, error) {
if klet.kubeClient == nil {
return nil, errors.New("cannot get ServiceAccounts when kubelet is in standalone mode")
}
return klet.kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
}
}
runtime, postImageGCHooks, err := kuberuntime.NewKubeGenericRuntimeManager(
ctx,
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
klet.livenessManager,
klet.readinessManager,
klet.startupManager,
rootDirectory,
podLogsDirectory,
machineInfo,
klet.podWorkers,
kubeCfg.MaxPods,
kubeDeps.OSInterface,
klet,
insecureContainerLifecycleHTTPClient,
imageBackOff,
kubeCfg.SerializeImagePulls,
kubeCfg.MaxParallelImagePulls,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
kubeCfg.ImagePullCredentialsVerificationPolicy,
kubeCfg.PreloadedImagesVerificationAllowlist,
imageCredentialProviderConfigPath,
imageCredentialProviderBinDir,
singleProcessOOMKill,
kubeCfg.CPUCFSQuota,
kubeCfg.CPUCFSQuotaPeriod,
kubeDeps.RemoteRuntimeService,
kubeDeps.RemoteImageService,
kubeDeps.ContainerManager,
klet.containerLogManager,
klet.runtimeClassManager,
seccompDefault,
kubeCfg.MemorySwap.SwapBehavior,
kubeDeps.ContainerManager.GetNodeAllocatableAbsolute,
*kubeCfg.MemoryThrottlingFactor,
kubeDeps.PodStartupLatencyTracker,
kubeDeps.TracerProvider,
tokenManager,
getServiceAccount,
)
if err != nil {
return nil, err
}
klet.containerRuntime = runtime
klet.streamingRuntime = runtime
klet.runner = runtime
klet.allocationManager.SetContainerRuntime(runtime)
resizeAdmitHandler := allocation.NewPodResizesAdmitHandler(klet.containerManager, runtime, klet.allocationManager, logger)
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime, runtimeCacheRefreshPeriod)
if err != nil {
return nil, err
}
klet.runtimeCache = runtimeCache
// common provider to get host file system usage associated with a pod managed by kubelet
hostStatsProvider := stats.NewHostStatsProvider(kubecontainer.RealOS{}, func(podUID types.UID) string {
return getEtcHostsPath(klet.getPodDir(podUID))
}, podLogsDirectory)
cadvisorStatsProvider := stats.NewCadvisorStatsProvider(
klet.cadvisor,
klet.resourceAnalyzer,
klet.podManager,
klet.containerRuntime,
klet.statusManager,
hostStatsProvider,
kubeDeps.ContainerManager,
)
if kubeDeps.useLegacyCadvisorStats {
klet.StatsProvider = cadvisorStatsProvider
} else {
klet.StatsProvider = stats.NewCRIStatsProvider(
klet.cadvisor,
klet.resourceAnalyzer,
klet.podManager,
kubeDeps.RemoteRuntimeService,
kubeDeps.RemoteImageService,
hostStatsProvider,
utilfeature.DefaultFeatureGate.Enabled(features.PodAndContainerStatsFromCRI),
cadvisorStatsProvider,
)
}
eventChannel := make(chan *pleg.PodLifecycleEvent, plegChannelCapacity)
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// adjust Generic PLEG relisting period and threshold to higher value when Evented PLEG is turned on
genericRelistDuration := &pleg.RelistDuration{
RelistPeriod: eventedPlegRelistPeriod,
RelistThreshold: eventedPlegRelistThreshold,
}
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
// In case Evented PLEG has to fall back on Generic PLEG due to an error,
// Evented PLEG should be able to reset the Generic PLEG relisting duration
// to the default value.
eventedRelistDuration := &pleg.RelistDuration{
RelistPeriod: genericPlegRelistPeriod,
RelistThreshold: genericPlegRelistThreshold,
}
klet.eventedPleg, err = pleg.NewEventedPLEG(logger, klet.containerRuntime, klet.runtimeService, eventChannel,
klet.podCache, klet.pleg, eventedPlegMaxStreamRetries, eventedRelistDuration, clock.RealClock{})
if err != nil {
return nil, err
}
} else {
genericRelistDuration := &pleg.RelistDuration{
RelistPeriod: genericPlegRelistPeriod,
RelistThreshold: genericPlegRelistThreshold,
}
klet.pleg = pleg.NewGenericPLEG(logger, klet.containerRuntime, eventChannel, genericRelistDuration, klet.podCache, clock.RealClock{})
}
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
klet.runtimeState.addHealthCheck("EventedPLEG", klet.eventedPleg.Healthy)
}
if _, err := klet.updatePodCIDR(ctx, kubeCfg.PodCIDR); err != nil {
logger.Error(err, "Pod CIDR update failed")
}
// setup containerGC
containerGC, err := kubecontainer.NewContainerGC(klet.containerRuntime, containerGCPolicy, klet.sourcesReady)
if err != nil {
return nil, err
}
klet.containerGC = containerGC
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, max(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
// setup imageManager
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, postImageGCHooks, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider)
if err != nil {
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
}
klet.imageManager = imageManager
if kubeDeps.TLSOptions != nil {
if kubeCfg.ServerTLSBootstrap && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, func() []v1.NodeAddress {
return klet.getLastObservedNodeAddresses(ctx)
}, certDirectory)
if err != nil {
return nil, fmt.Errorf("failed to initialize certificate manager: %w", err)
}
} else if kubeDeps.TLSOptions.CertFile != "" && kubeDeps.TLSOptions.KeyFile != "" && utilfeature.DefaultFeatureGate.Enabled(features.ReloadKubeletServerCertificateFile) {
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateDynamicFileManager(kubeDeps.TLSOptions.CertFile, kubeDeps.TLSOptions.KeyFile)
if err != nil {
return nil, fmt.Errorf("failed to initialize file based certificate manager: %w", err)
}
}
if klet.serverCertificateManager != nil {
kubeDeps.TLSOptions.Config.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
cert := klet.serverCertificateManager.Current()
if cert == nil {
return nil, fmt.Errorf("no serving certificate available for the kubelet")
}
return cert, nil
}
// GetCertificate is only preferred over the certificate files by
// Golang TLS if the ClientHelloInfo.ServerName is set, which it is
// not when connecting to a host by IP address. Clear the files to
// force the use of GetCertificate.
kubeDeps.TLSOptions.CertFile = ""
kubeDeps.TLSOptions.KeyFile = ""
}
}
if kubeDeps.ProbeManager != nil {
klet.probeManager = kubeDeps.ProbeManager
} else {
klet.probeManager = prober.NewManager(
klet.statusManager,
klet.livenessManager,
klet.readinessManager,
klet.startupManager,
klet.runner,
kubeDeps.Recorder)
}
var clusterTrustBundleManager clustertrustbundle.Manager = &clustertrustbundle.NoopManager{}
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) {
clusterTrustBundleManager = clustertrustbundle.NewLazyInformerManager(ctx, kubeDeps.KubeClient, 2*int(kubeCfg.MaxPods))
logger.Info("ClusterTrustBundle informer will be started eventually once a trust bundle is requested")
} else {
logger.Info("Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled")
}
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(
kubeDeps.KubeClient,
0,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", string(nodeName)).String()
}),
)
podCertificateManager := podcertificate.NewIssuingManager(
kubeDeps.KubeClient,
klet.podManager,
kubeDeps.Recorder,
kubeInformers.Certificates().V1beta1().PodCertificateRequests(),
nodeInformer,
nodeName,
clock.RealClock{},
)
klet.podCertificateManager = podCertificateManager
kubeInformers.Start(ctx.Done())
go podCertificateManager.Run(ctx)
metrics.RegisterCollectors(collectors.PodCertificateCollectorFor(podCertificateManager))
} else {
klet.podCertificateManager = &podcertificate.NoOpManager{}
logger.Info("Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled")
}
// NewInitializedVolumePluginMgr initializes some storageErrors on the Kubelet runtimeState (in csi_plugin.go init)
// which affects node ready status. This function must be called before Kubelet is initialized so that the Node
// ReadyState is accurate with the storage state.
klet.volumePluginMgr, err = NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, clusterTrustBundleManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
if err != nil {
return nil, err
}
klet.pluginManager = pluginmanager.NewPluginManager(
klet.getPluginsRegistrationDir(), /* sockDir */
kubeDeps.Recorder,
)
// If the experimentalMounterPathFlag is set, we do not want to
// check node capabilities since the mount path is not the default
if len(experimentalMounterPath) != 0 {
// Replace the nameserver in containerized-mounter's rootfs/etc/resolv.conf with kubelet.ClusterDNS
// so that service name could be resolved
klet.dnsConfigurer.SetupDNSinContainerizedMounter(logger, experimentalMounterPath)
}
// setup volumeManager
klet.volumeManager = volumemanager.NewVolumeManager(
kubeCfg.EnableControllerAttachDetach,
nodeName,
klet.podManager,
klet.podWorkers,
klet.kubeClient,
klet.volumePluginMgr,
kubeDeps.Mounter,
kubeDeps.HostUtil,
klet.getPodsDir(),
kubeDeps.Recorder,
volumepathhandler.NewBlockVolumePathHandler())
boMax, base := newCrashLoopBackOff(kubeCfg)
klet.crashLoopBackOff = flowcontrol.NewBackOff(base, boMax)
klet.crashLoopBackOff.HasExpiredFunc = func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
return eventTime.Sub(lastUpdate) > 600*time.Second
}
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig,
killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, kubeCfg.LocalStorageCapacityIsolation)
klet.evictionManager = evictionManager
handlers := []lifecycle.PodAdmitHandler{}
handlers = append(handlers, evictionAdmitHandler)
if utilfeature.DefaultFeatureGate.Enabled(features.NodeDeclaredFeatures) {
v, err := versionutil.Parse(version.Get().String())
if err != nil {
return nil, fmt.Errorf("failed to parse version: %w", err)
}
framework, err := ndf.New(ndffeatures.AllFeatures)
if err != nil {
return nil, fmt.Errorf("failed to create node feature helper: %w", err)
}
klet.version = v
klet.nodeDeclaredFeaturesFramework = framework
klet.nodeDeclaredFeatures = klet.discoverNodeDeclaredFeatures()
klet.nodeDeclaredFeaturesSet = ndf.NewFeatureSet(klet.nodeDeclaredFeatures...)
}
// Safe, allowed sysctls can always be used as unsafe sysctls in the spec.
// Hence, we concatenate those two lists.
safeAndUnsafeSysctls := append(sysctl.SafeSysctlAllowlist(ctx), allowedUnsafeSysctls...)
sysctlsAllowlist, err := sysctl.NewAllowlist(safeAndUnsafeSysctls)
if err != nil {
return nil, err
}
handlers = append(handlers, sysctlsAllowlist)
// enable active deadline handler
activeDeadlineHandler, err := newActiveDeadlineHandler(klet.statusManager, kubeDeps.Recorder, klet.clock)
if err != nil {
return nil, err
}
klet.AddPodSyncLoopHandler(activeDeadlineHandler)
klet.AddPodSyncHandler(activeDeadlineHandler)
handlers = append(handlers, klet.containerManager.GetAllocateResourcesPodAdmitHandler())
criticalPodAdmissionHandler := preemption.NewCriticalPodAdmissionHandler(klet.getAllocatedPods, killPodNow(klet.podWorkers, kubeDeps.Recorder), kubeDeps.Recorder)
handlers = append(handlers, lifecycle.NewPredicateAdmitHandler(klet.GetCachedNode, criticalPodAdmissionHandler, klet.containerManager.UpdatePluginResources))
// apply functional Option's
for _, opt := range kubeDeps.Options {
opt(klet)
}
if goos == "linux" {
// AppArmor is a Linux kernel security module and it does not support other operating systems.
klet.appArmorValidator = apparmor.NewValidator()
handlers = append(handlers, lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
}
handlers = append(handlers, lifecycle.NewPodFeaturesAdmitHandler())
if utilfeature.DefaultFeatureGate.Enabled(features.NodeDeclaredFeatures) {
handlers = append(handlers, lifecycle.NewDeclaredFeaturesAdmitHandler(klet.nodeDeclaredFeaturesFramework, klet.nodeDeclaredFeaturesSet, klet.version))
}
leaseDuration := time.Duration(kubeCfg.NodeLeaseDurationSeconds) * time.Second
renewInterval := time.Duration(float64(leaseDuration) * nodeLeaseRenewIntervalFraction)
klet.nodeLeaseController = lease.NewController(
klet.clock,
klet.heartbeatClient,
string(klet.nodeName),
kubeCfg.NodeLeaseDurationSeconds,
klet.onRepeatedHeartbeatFailure,
renewInterval,
string(klet.nodeName),
v1.NamespaceNodeLease,
util.SetNodeOwnerFunc(ctx, klet.heartbeatClient, string(klet.nodeName)))
// setup node shutdown manager
shutdownManager := nodeshutdown.NewManager(&nodeshutdown.Config{
Logger: logger,
VolumeManager: klet.volumeManager,
Recorder: kubeDeps.Recorder,
NodeRef: nodeRef,
GetPodsFunc: klet.GetActivePods,
KillPodFunc: killPodNow(klet.podWorkers, kubeDeps.Recorder),
SyncNodeStatusFunc: klet.syncNodeStatus,
ShutdownGracePeriodRequested: kubeCfg.ShutdownGracePeriod.Duration,
ShutdownGracePeriodCriticalPods: kubeCfg.ShutdownGracePeriodCriticalPods.Duration,
ShutdownGracePeriodByPodPriority: kubeCfg.ShutdownGracePeriodByPodPriority,
StateDirectory: rootDirectory,
})
klet.shutdownManager = shutdownManager
handlers = append(handlers, shutdownManager)
klet.allocationManager.AddPodAdmitHandlers(append([]lifecycle.PodAdmitHandler{resizeAdmitHandler}, handlers...))
var usernsIDsPerPod *int64
if kubeCfg.UserNamespaces != nil {
usernsIDsPerPod = kubeCfg.UserNamespaces.IDsPerPod
}
klet.usernsManager, err = userns.MakeUserNsManager(logger, klet, usernsIDsPerPod)
if err != nil {
return nil, fmt.Errorf("create user namespace manager: %w", err)
}
// Finally, put the most recent version of the config on the Kubelet, so
// people can see how it was configured.
klet.kubeletConfiguration = *kubeCfg
// Generating the status funcs should be the last thing we do,
// since this relies on the rest of the Kubelet having been constructed.
klet.setNodeStatusFuncs = klet.defaultNodeStatusFuncs()
return klet, nil
}
type serviceLister interface {
List(labels.Selector) ([]*v1.Service, error)
}
// Kubelet is the main kubelet implementation.
type Kubelet struct {
kubeletConfiguration kubeletconfiginternal.KubeletConfiguration
// hostname is the hostname the kubelet detected or was given via flag/config
hostname string
nodeName types.NodeName
cachedNode *v1.Node
runtimeCache kubecontainer.RuntimeCache
kubeClient clientset.Interface
heartbeatClient clientset.Interface
// mirrorPodClient is used to create and delete mirror pods in the API for static
// pods.
mirrorPodClient kubepod.MirrorClient
rootDirectory string
podLogsDirectory string
// onRepeatedHeartbeatFailure is called when a heartbeat operation fails more than once. optional.
onRepeatedHeartbeatFailure func()
// podManager stores the desired set of admitted pods and mirror pods that the kubelet should be
// running. The actual set of running pods is stored on the podWorkers. The manager is populated
// by the kubelet config loops which abstracts receiving configuration from many different sources
// (api for regular pods, local filesystem or http for static pods). The manager may be consulted
// by other components that need to see the set of desired pods. Note that not all desired pods are
// running, and not all running pods are in the podManager - for instance, force deleting a pod
// from the apiserver will remove it from the podManager, but the pod may still be terminating and
// tracked by the podWorkers. Components that need to know the actual consumed resources of the
// node or are driven by podWorkers and the sync*Pod methods (status, volume, stats) should also
// consult the podWorkers when reconciling.
//
// TODO: review all kubelet components that need the actual set of pods (vs the desired set)
// and update them to use podWorkers instead of podManager. This may introduce latency in some
// methods, but avoids race conditions and correctly accounts for terminating pods that have
// been force deleted or static pods that have been updated.
// https://github.com/kubernetes/kubernetes/issues/116970
podManager kubepod.Manager
// podWorkers is responsible for driving the lifecycle state machine of each pod. The worker is
// notified of config changes, updates, periodic reconciliation, container runtime updates, and
// evictions of all desired pods and will invoke reconciliation methods per pod in separate
// goroutines. The podWorkers are authoritative in the kubelet for what pods are actually being
// run and their current state:
//
// * syncing: pod should be running (syncPod)
// * terminating: pod should be stopped (syncTerminatingPod)
// * terminated: pod should have all resources cleaned up (syncTerminatedPod)
//
// and invoke the handler methods that correspond to each state. Components within the
// kubelet that need to know the phase of the pod in order to correctly set up or tear down
// resources must consult the podWorkers.
//
// Once a pod has been accepted by the pod workers, no other pod with that same UID (and
// name+namespace, for static pods) will be started until the first pod has fully terminated
// and been cleaned up by SyncKnownPods. This means a pod may be desired (in API), admitted
// (in pod manager), and requested (by invoking UpdatePod) but not start for an arbitrarily
// long interval because a prior pod is still terminating.
//
// As an event-driven (by UpdatePod) controller, the podWorkers must periodically be resynced
// by the kubelet invoking SyncKnownPods with the desired state (admitted pods in podManager).
// Since the podManager may be unaware of some running pods due to force deletion, the
// podWorkers are responsible for triggering a sync of pods that are no longer desired but
// must still run to completion.
podWorkers PodWorkers
// evictionManager observes the state of the node for situations that could impact node stability
// and evicts pods (sets to phase Failed with reason Evicted) to reduce resource pressure. The
// eviction manager acts on the actual state of the node and considers the podWorker to be
// authoritative.
evictionManager eviction.Manager
// probeManager tracks the set of running pods and ensures any user-defined periodic checks are
// run to introspect the state of each pod. The probe manager acts on the actual state of the node
// and is notified of pods by the podWorker. The probe manager is the authoritative source of the
// most recent probe status and is responsible for notifying the status manager, which
// synthesizes them into the overall pod status.
probeManager prober.Manager
// secretManager caches the set of secrets used by running pods on this node. The podWorkers
// notify the secretManager when pods are started and terminated, and the secretManager must
// then keep the needed secrets up-to-date as they change.
secretManager secret.Manager
// configMapManager caches the set of config maps used by running pods on this node. The
// podWorkers notify the configMapManager when pods are started and terminated, and the
// configMapManager must then keep the needed config maps up-to-date as they change.
configMapManager configmap.Manager
// volumeManager observes the set of running pods and is responsible for attaching, mounting,
// unmounting, and detaching as those pods move through their lifecycle. It periodically
// synchronizes the set of known volumes to the set of actually desired volumes and cleans up
// any orphaned volumes. The volume manager considers the podWorker to be authoritative for
// which pods are running.
volumeManager volumemanager.VolumeManager
// statusManager receives updated pod status updates from the podWorker and updates the API
// status of those pods to match. The statusManager is authoritative for the synthesized
// status of the pod from the kubelet's perspective (other components own the individual
// elements of status) and should be consulted by components in preference to assembling
// that status themselves. Note that the status manager is downstream of the pod worker
// and components that need to check whether a pod is still running should instead directly
// consult the pod worker.
statusManager status.Manager
// allocationManager manages allocated resources for pods.
allocationManager allocation.Manager
// podCertificateManager is fed updates as pods are added and removed from
// the node, and requests certificates for them based on their configured
// pod certificate volumes.
podCertificateManager podcertificate.Manager
// resyncInterval is the interval between periodic full reconciliations of
// pods on this node.
resyncInterval time.Duration
// sourcesReady records the sources seen by the kubelet, it is thread-safe.
sourcesReady config.SourcesReady
// Optional, defaults to /logs/ from /var/log
logServer http.Handler
// Optional, defaults to simple Docker implementation
runner kubecontainer.CommandRunner
// cAdvisor used for container information.
cadvisor cadvisor.Interface
// Set to true to have the node register itself with the apiserver.
registerNode bool
// List of taints to add to a node object when the kubelet registers itself.
registerWithTaints []v1.Taint
// for internal book keeping; access only from within registerWithApiserver
registrationCompleted bool
// dnsConfigurer is used for setting up DNS resolver configuration when launching pods.
dnsConfigurer *dns.Configurer
// serviceLister knows how to list services
serviceLister serviceLister
// serviceHasSynced indicates whether services have been sync'd at least once.
// Check this before trusting a response from the lister.
serviceHasSynced cache.InformerSynced
// nodeLister knows how to list nodes
nodeLister corelisters.NodeLister
// nodeHasSynced indicates whether nodes have been sync'd at least once.
// Check this before trusting a response from the node lister.
nodeHasSynced cache.InformerSynced
// a list of node labels to register
nodeLabels map[string]string
// nodeDeclaredFeatures is the ordered static list of features that are determined at startup and declared in node status.
nodeDeclaredFeatures []string
// nodeDeclaredFeaturesSet provides the same features as nodeDeclaredFeatures, but as a set for faster inference.
nodeDeclaredFeaturesSet ndf.FeatureSet
// nodeDeclaredFeaturesFramework provides the shared logic for feature discovery and pod requirement inference.
nodeDeclaredFeaturesFramework *ndf.Framework
// kubelet version
version *versionutil.Version
// Last timestamp when runtime responded on ping.
// Mutex is used to protect this value.
runtimeState *runtimeState
// Volume plugins.
volumePluginMgr *volume.VolumePluginMgr
// Manages container health check results.
livenessManager proberesults.Manager
readinessManager proberesults.Manager
startupManager proberesults.Manager
// The EventRecorder to use
recorder record.EventRecorderLogger
// Policy for handling garbage collection of dead containers.
containerGC kubecontainer.GC
// Manager for image garbage collection.
imageManager images.ImageGCManager
// Manager for container logs.
containerLogManager logs.ContainerLogManager
// Cached MachineInfo returned by cadvisor.
machineInfoLock sync.RWMutex
machineInfo *cadvisorapi.MachineInfo
// Handles certificate rotations.
serverCertificateManager certificate.Manager
// Indicates that the node initialization happens in an external cloud controller
externalCloudProvider bool
// Reference to this node.
nodeRef *v1.ObjectReference
// Container runtime.
containerRuntime kubecontainer.Runtime
// Streaming runtime handles container streaming.
streamingRuntime kubecontainer.StreamingRuntime
// Container runtime service (needed by container runtime Start()).
runtimeService internalapi.RuntimeService
// reasonCache caches the failure reason of the last creation of all containers, which is
// used for generating ContainerStatus.
reasonCache *ReasonCache
// containerRuntimeReadyExpected indicates whether container runtime being ready is expected
// so errors are logged without verbosity guard, to avoid excessive error logs at node startup.
// It's false during the node initialization period of nodeReadyGracePeriod, and after that
// it's set to true by fastStatusUpdateOnce when it exits.
containerRuntimeReadyExpected bool
// nodeStatusUpdateFrequency specifies how often kubelet computes node status. If node lease
// feature is not enabled, it is also the frequency that kubelet posts node status to master.
// In that case, be cautious when changing the constant, it must work with nodeMonitorGracePeriod
// in nodecontroller. There are several constraints:
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
// N means number of retries allowed for kubelet to post node status. It is pointless
// to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
// will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
// The constant must be less than podEvictionTimeout.
// 2. nodeStatusUpdateFrequency needs to be large enough for kubelet to generate node
// status. Kubelet may fail to update node status reliably if the value is too small,
// as it takes time to gather all necessary node information.
nodeStatusUpdateFrequency time.Duration
// nodeStatusReportFrequency is the frequency that kubelet posts node
// status to master. It is only used when node lease feature is enabled.
nodeStatusReportFrequency time.Duration
// delayAfterNodeStatusChange is the one-time random duration that we add to the next node status report interval
// every time when there's an actual node status change or kubelet restart. But all future node status update that
// is not caused by real status change will stick with nodeStatusReportFrequency. The random duration is a uniform
// distribution over [-0.5*nodeStatusReportFrequency, 0.5*nodeStatusReportFrequency]
delayAfterNodeStatusChange time.Duration
// lastStatusReportTime is the time when node status was last reported.
lastStatusReportTime time.Time
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
// This lock is used by Kubelet.syncNodeStatus and Kubelet.fastNodeStatusUpdate functions and shouldn't be used anywhere else.
syncNodeStatusMux sync.Mutex
// updatePodCIDRMux is a lock on updating pod CIDR, because this path is not thread-safe.
// This lock is used by Kubelet.updatePodCIDR function and shouldn't be used anywhere else.
updatePodCIDRMux sync.Mutex
// updateRuntimeMux is a lock on updating runtime, because this path is not thread-safe.
// This lock is used by Kubelet.updateRuntimeUp, Kubelet.fastNodeStatusUpdate and
// Kubelet.HandlerSupportsUserNamespaces functions and shouldn't be used anywhere else.
updateRuntimeMux sync.Mutex
// nodeLeaseController claims and renews the node lease for this Kubelet
nodeLeaseController lease.Controller
// pleg observes the state of the container runtime and notifies the kubelet of changes to containers, which
// notifies the podWorkers to reconcile the state of the pod (for instance, if a container dies and needs to
// be restarted).
pleg pleg.PodLifecycleEventGenerator
// eventedPleg supplements the pleg to deliver edge-driven container changes with low-latency.
eventedPleg pleg.PodLifecycleEventGenerator
// Store kubecontainer.PodStatus for all pods.
podCache kubecontainer.Cache
// os is a facade for various syscalls that need to be mocked during testing.
os kubecontainer.OSInterface
// Watcher of out of memory events.
oomWatcher oomwatcher.Watcher
// Monitor resource usage
resourceAnalyzer serverstats.ResourceAnalyzer
// Whether or not we should have the QOS cgroup hierarchy for resource management
cgroupsPerQOS bool
// If non-empty, pass this to the container runtime as the root cgroup.
cgroupRoot string
// Mounter to use for volumes.
mounter mount.Interface
// hostutil to interact with filesystems
hostutil hostutil.HostUtils
// subpather to execute subpath actions
subpather subpath.Interface
// Manager of non-Runtime containers.
containerManager cm.ContainerManager
// Maximum Number of Pods which can be run by this Kubelet
maxPods int
// Monitor Kubelet's sync loop
syncLoopMonitor atomic.Value
// Container restart Backoff
crashLoopBackOff *flowcontrol.Backoff
// Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *v1.NodeDaemonEndpoints
// A queue used to trigger pod workers.
workQueue queue.WorkQueue
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
oneTimeInitializer sync.Once
// If set, use this IP address or addresses for the node
nodeIPs []net.IP
// use this function to validate the kubelet nodeIP
nodeIPValidator func(net.IP) error
// If non-nil, this is a unique identifier for the node in an external database, eg. cloudprovider
providerID string
// clock is an interface that provides time related functionality in a way that makes it
// easy to test the code.
clock clock.WithTicker
// handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(context.Context, *v1.Node) error
lastNodeUnschedulableLock sync.Mutex
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
lastNodeUnschedulable bool
// the list of handlers to call during pod sync loop.
lifecycle.PodSyncLoopHandlers
// the list of handlers to call during pod sync.
lifecycle.PodSyncHandlers
// the number of allowed pods per core
podsPerCore int
// enableControllerAttachDetach indicates the Attach/Detach controller
// should manage attachment/detachment of volumes scheduled to this node,
// and disable kubelet from executing any attach/detach operations
enableControllerAttachDetach bool
// trigger deleting containers in a pod
containerDeletor *podContainerDeletor
// config iptables util rules
makeIPTablesUtilChains bool
// The AppArmor validator for checking whether AppArmor is supported.
appArmorValidator apparmor.Validator
// StatsProvider provides the node and the container stats.
StatsProvider *stats.Provider
// pluginmanager runs a set of asynchronous loops that figure out which
// plugins need to be registered/unregistered based on this node and makes it so.
pluginManager pluginmanager.PluginManager
// This flag sets a maximum number of images to report in the node status.
nodeStatusMaxImages int32
// Handles RuntimeClass objects for the Kubelet.
runtimeClassManager *runtimeclass.Manager
// Handles node shutdown events for the Node.
shutdownManager nodeshutdown.Manager
// Manage user namespaces
usernsManager *userns.UsernsManager
// OpenTelemetry Tracer
tracer trace.Tracer
// Track node startup latencies
nodeStartupLatencyTracker util.NodeStartupLatencyTracker
// Health check kubelet
healthChecker watchdog.HealthChecker
// flagz is the Reader interface to get flags for flagz page.
flagz flagz.Reader
}
// ListPodStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStats(ctx)
}
// ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodCPUAndMemoryStats(ctx)
}
// PodCPUAndMemoryStats is delegated to StatsProvider
func (kl *Kubelet) PodCPUAndMemoryStats(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) (*statsapi.PodStats, error) {
return kl.StatsProvider.PodCPUAndMemoryStats(ctx, pod, podStatus)
}
// ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) {
return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx)
}
// ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) ImageFsStats(ctx context.Context) (*statsapi.FsStats, *statsapi.FsStats, error) {
return kl.StatsProvider.ImageFsStats(ctx)
}
// GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) {
return kl.StatsProvider.GetCgroupStats(cgroupName, updateStats)
}
// GetCgroupCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) {
return kl.StatsProvider.GetCgroupCPUAndMemoryStats(cgroupName, updateStats)
}
// RootFsStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) RootFsStats() (*statsapi.FsStats, error) {
return kl.StatsProvider.RootFsStats()
}
// RlimitStats is delegated to StatsProvider, which implements stats.Provider interface
func (kl *Kubelet) RlimitStats() (*statsapi.RlimitStats, error) {
return kl.StatsProvider.RlimitStats()
}
// setupDataDirs creates:
// 1. the root directory
// 2. the pods directory
// 3. the plugins directory
// 4. the pod-resources directory
// 5. the checkpoint directory
// 6. the pod logs root directory
func (kl *Kubelet) setupDataDirs(logger klog.Logger) error {
if cleanedRoot := filepath.Clean(kl.rootDirectory); cleanedRoot != kl.rootDirectory {
return fmt.Errorf("rootDirectory not in canonical form: expected %s, was %s", cleanedRoot, kl.rootDirectory)
}
pluginRegistrationDir := kl.getPluginsRegistrationDir()
pluginsDir := kl.getPluginsDir()
if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil {
return fmt.Errorf("error creating root directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPodLogsDir(), 0750); err != nil {
return fmt.Errorf("error creating pod logs root directory %q: %w", kl.getPodLogsDir(), err)
}
if err := kl.hostutil.MakeRShared(kl.getRootDir()); err != nil {
return fmt.Errorf("error configuring root directory: %v", err)
}
if err := os.MkdirAll(kl.getPodsDir(), 0750); err != nil {
return fmt.Errorf("error creating pods directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
return fmt.Errorf("error creating plugins directory: %v", err)
}
if err := utilfs.MkdirAll(kl.getPluginsRegistrationDir(), 0750); err != nil {
return fmt.Errorf("error creating plugins registry directory: %v", err)
}
if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil {
return fmt.Errorf("error creating podresources directory: %v", err)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerCheckpoint) {
if err := utilfs.MkdirAll(kl.getCheckpointsDir(), 0700); err != nil {
return fmt.Errorf("error creating checkpoint directory: %v", err)
}
}
if selinux.GetEnabled() {
err := selinux.SetFileLabel(pluginRegistrationDir, kubeletconfig.KubeletPluginsDirSELinuxLabel)
if err != nil {
logger.Info("Unprivileged containerized plugins might not work, could not set selinux context on plugin registration dir", "path", pluginRegistrationDir, "err", err)
}
err = selinux.SetFileLabel(pluginsDir, kubeletconfig.KubeletPluginsDirSELinuxLabel)
if err != nil {
logger.Info("Unprivileged containerized plugins might not work, could not set selinux context on plugins dir", "path", pluginsDir, "err", err)
}
}
return nil
}
// StartGarbageCollection starts garbage collection threads.
func (kl *Kubelet) StartGarbageCollection(ctx context.Context) {
logger := klog.FromContext(ctx)
loggedContainerGCFailure := false
go wait.Until(func() {
if err := kl.containerGC.GarbageCollect(ctx); err != nil {
logger.Error(err, "Container garbage collection failed")
kl.recorder.WithLogger(logger).Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
loggedContainerGCFailure = true
} else {
var vLevel klog.Level = 4
if loggedContainerGCFailure {
vLevel = 1
loggedContainerGCFailure = false
}
logger.V(int(vLevel)).Info("Container garbage collection succeeded")
}
}, ContainerGCPeriod, wait.NeverStop)
// when the high threshold is set to 100, and the max age is 0 (or the max age feature is disabled)
// stub the image GC manager
if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 && kl.kubeletConfiguration.ImageMaximumGCAge.Duration == 0 {
logger.V(2).Info("ImageGCHighThresholdPercent is set 100 and ImageMaximumGCAge is 0, Disable image GC")
return
}
prevImageGCFailed := false
beganGC := time.Now()
go wait.Until(func() {
if err := kl.imageManager.GarbageCollect(ctx, beganGC); err != nil {
if prevImageGCFailed {
logger.Error(err, "Image garbage collection failed multiple times in a row")
// Only create an event for repeated failures
kl.recorder.WithLogger(logger).Event(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error())
} else {
logger.Error(err, "Image garbage collection failed once. Stats initialization may not have completed yet")
}
prevImageGCFailed = true
} else {
var vLevel klog.Level = 4
if prevImageGCFailed {
vLevel = 1
prevImageGCFailed = false
}
logger.V(int(vLevel)).Info("Image garbage collection succeeded")
}
}, ImageGCPeriod, wait.NeverStop)
}
// initializeModules will initialize internal modules that do not require the container runtime to be up.
// Note that the modules here must not depend on modules that are not initialized here.
func (kl *Kubelet) initializeModules(ctx context.Context) error {
logger := klog.FromContext(ctx)
// Prometheus metrics.
metrics.Register()
metrics.RegisterCollectors(
collectors.NewVolumeStatsCollector(kl),
collectors.NewLogMetricsCollector(kl.StatsProvider.ListPodStats),
)
metrics.SetNodeName(kl.nodeName)
servermetrics.Register()
// Setup filesystem directories.
if err := kl.setupDataDirs(logger); err != nil {
return err
}
// If the container logs directory does not exist, create it.
if _, err := os.Stat(ContainerLogsDir); err != nil {
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
return fmt.Errorf("failed to create directory %q: %v", ContainerLogsDir, err)
}
}
if goos == "windows" {
// On Windows we should not allow other users to read the logs directory
// to avoid allowing non-root containers from reading the logs of other containers.
if err := utilfs.Chmod(ContainerLogsDir, 0750); err != nil {
return fmt.Errorf("failed to set permissions on directory %q: %w", ContainerLogsDir, err)
}
}
// Start the image manager.
kl.imageManager.Start(ctx)
// Start the certificate manager if it was enabled.
if kl.serverCertificateManager != nil {
kl.serverCertificateManager.Start()
}
// Start out of memory watcher.
if kl.oomWatcher != nil {
if err := kl.oomWatcher.Start(ctx, kl.nodeRef); err != nil {
return fmt.Errorf("failed to start OOM watcher: %w", err)
}
}
// Start resource analyzer
kl.resourceAnalyzer.Start(ctx)
return nil
}
// initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up.
func (kl *Kubelet) initializeRuntimeDependentModules(ctx context.Context) {
logger := klog.FromContext(ctx)
if err := kl.cadvisor.Start(); err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
logger.Error(err, "Failed to start cAdvisor")
os.Exit(1)
}
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
kl.StatsProvider.GetCgroupStats("/", true)
// Start container manager.
node, err := kl.getNodeAnyWay(ctx)
if err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
logger.Error(err, "Kubelet failed to get node info")
os.Exit(1)
}
// containerManager must start after cAdvisor because it needs filesystem capacity information
if err := kl.containerManager.Start(ctx, node, kl.GetActivePods, kl.getNodeAnyWay, kl.sourcesReady, kl.statusManager, kl.runtimeService, kl.supportLocalStorageCapacityIsolation()); err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
logger.Error(err, "Failed to start ContainerManager")
os.Exit(1)
}
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
// Eviction decisions are based on the allocated (rather than desired) pod resources.
kl.evictionManager.Start(ctx, kl.StatsProvider, kl.getAllocatedPods, kl.PodIsFinished, evictionMonitoringPeriod)
// container log manager must start after container runtime is up to retrieve information from container runtime
// and inform container to reopen log file after log rotation.
kl.containerLogManager.Start(ctx)
// Adding Registration Callback function for CSI Driver
kl.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csi.PluginHandler))
// Adding Registration Callback function for DRA Plugin and Device Plugin
for name, handler := range kl.containerManager.GetPluginRegistrationHandlers() {
kl.pluginManager.AddHandler(name, handler)
}
// Start the plugin manager
logger.V(4).Info("Starting plugin manager")
go kl.pluginManager.Run(ctx, kl.sourcesReady, wait.NeverStop)
err = kl.shutdownManager.Start(ctx)
if err != nil {
// The shutdown manager is not critical for kubelet, so log failure, but don't block Kubelet startup if there was a failure starting it.
logger.Error(err, "Failed to start node shutdown manager")
}
}
// Run starts the kubelet reacting to config updates
func (kl *Kubelet) Run(ctx context.Context, updates <-chan kubetypes.PodUpdate) {
logger := klog.FromContext(ctx)
if kl.logServer == nil {
file := http.FileServer(http.Dir(nodeLogDir))
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLogQuery) && kl.kubeletConfiguration.EnableSystemLogQuery {
kl.logServer = http.StripPrefix("/logs/", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if nlq, errs := newNodeLogQuery(req.URL.Query()); len(errs) > 0 {
http.Error(w, errs.ToAggregate().Error(), http.StatusBadRequest)
return
} else if nlq != nil {
if req.URL.Path != "/" && req.URL.Path != "" {
http.Error(w, "path not allowed in query mode", http.StatusNotAcceptable)
return
}
if errs := nlq.validate(); len(errs) > 0 {
http.Error(w, errs.ToAggregate().Error(), http.StatusNotAcceptable)
return
}
// Validation ensures that the request does not query services and files at the same time
if len(nlq.Services) > 0 {
journal.ServeHTTP(w, req)
return
}
// Validation ensures that the request does not explicitly query multiple files at the same time
if len(nlq.Files) == 1 {
// Account for the \ being used on Windows clients
req.URL.Path = filepath.ToSlash(nlq.Files[0])
}
}
// Fall back in case the caller is directly trying to query a file
// Example: kubectl get --raw /api/v1/nodes/$name/proxy/logs/foo.log
file.ServeHTTP(w, req)
}))
} else {
kl.logServer = http.StripPrefix("/logs/", file)
}
}
if kl.kubeClient == nil {
logger.Info("No API server defined - no node status update will be sent")
}
if err := kl.initializeModules(ctx); err != nil {
kl.recorder.WithLogger(logger).Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
logger.Error(err, "Failed to initialize internal modules")
os.Exit(1)
}
if err := kl.cgroupVersionCheck(); err != nil {
logger.V(2).Info("Warning: cgroup check", "error", err)
}
// Start the allocation manager
if kl.allocationManager != nil {
kl.allocationManager.Run(ctx)
}
// Start volume manager
go kl.volumeManager.Run(ctx, kl.sourcesReady)
if kl.kubeClient != nil {
// Start two go-routines to update the status.
//
// The first will report to the apiserver every nodeStatusUpdateFrequency and is aimed to provide regular status intervals,
// while the second is used to provide a more timely status update during initialization and runs an one-shot update to the apiserver
// once the node becomes ready, then exits afterwards.
//
// Introduce some small jittering to ensure that over time the requests won't start
// accumulating at approximately the same time from the set of nodes due to priority and
// fairness effect.
go func() {
// Call updateRuntimeUp once before syncNodeStatus to make sure kubelet had already checked runtime state
// otherwise when restart kubelet, syncNodeStatus will report node notReady in first report period
kl.updateRuntimeUp(ctx)
wait.JitterUntil(func() { kl.syncNodeStatus(ctx) }, kl.nodeStatusUpdateFrequency, 0.04, true, wait.NeverStop)
}()
go kl.fastStatusUpdateOnce()
// start syncing lease
go kl.nodeLeaseController.Run(context.Background())
// Mirror pods for static pods may not be created immediately during node startup
// due to node registration or informer sync delays. They will be created eventually
// when static pods are resynced (every 1-1.5 minutes).
// To ensure kube-scheduler is aware of static pod resource usage faster,
// mirror pods are created as soon as the node registers.
go kl.fastStaticPodsRegistration(ctx)
}
go wait.UntilWithContext(ctx, kl.updateRuntimeUp, 5*time.Second)
// Set up iptables util rules
if kl.makeIPTablesUtilChains {
kl.initNetworkUtil(logger)
}
// Start component sync loops.
kl.statusManager.Start(ctx)
// Start syncing RuntimeClasses if enabled.
if kl.runtimeClassManager != nil {
kl.runtimeClassManager.Start(wait.NeverStop)
}
// Start the pod lifecycle event generator.
kl.pleg.Start()
// Start eventedPLEG only if EventedPLEG feature gate is enabled.
if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
kl.eventedPleg.Start()
}
if kl.healthChecker != nil {
kl.healthChecker.SetHealthCheckers(kl, kl.containerManager.GetHealthCheckers())
}
kl.syncLoop(ctx, updates, kl)
}
// SyncPod is the transaction script for the sync of a single pod (setting up)
// a pod. This method is reentrant and expected to converge a pod towards the
// desired state of the spec. The reverse (teardown) is handled in
// SyncTerminatingPod and SyncTerminatedPod. If SyncPod exits without error,
// then the pod runtime state is in sync with the desired configuration state
// (pod is running). If SyncPod exits with a transient error, the next
// invocation of SyncPod is expected to make progress towards reaching the
// desired state. SyncPod exits with isTerminal when the pod was detected to
// have reached a terminal lifecycle phase due to container exits (for
// RestartNever or RestartOnFailure) and the next method invoked will be
// SyncTerminatingPod. If the pod terminates for any other reason, SyncPod
// will receive a context cancellation and should exit as soon as possible.
//
// Arguments:
//
// updateType - whether this is a create (first time) or an update, should
// only be used for metrics since this method must be reentrant
//
// pod - the pod that is being set up
//
// mirrorPod - the mirror pod known to the kubelet for this pod, if any
//
// podStatus - the most recent pod status observed for this pod which can
// be used to determine the set of actions that should be taken during
// this loop of SyncPod
//
// The workflow is:
// - If the pod is being created, record pod worker start latency
// - Call generateAPIPodStatus to prepare an v1.PodStatus for the pod
// - If the pod is being seen as running for the first time, record pod
// start latency
// - Update the status of the pod in the status manager
// - Stop the pod's containers if it should not be running due to soft
// admission
// - Ensure any background tracking for a runnable pod is started
// - Create a mirror pod if the pod is a static pod, and does not
// already have a mirror pod
// - Create the data directories for the pod if they do not exist
// - Wait for volumes to attach/mount
// - Fetch the pull secrets for the pod
// - Call the container runtime's SyncPod callback
// - Update the traffic shaping for the pod's ingress and egress limits
//
// If any step of this workflow errors, the error is returned, and is repeated
// on the next SyncPod call.
//
// This operation writes all events that are dispatched in order to provide
// the most accurate information possible about an error situation to aid debugging.
// Callers should not write an event if this operation returns an error.
func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) {
ctx, otelSpan := kl.tracer.Start(ctx, "syncPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
attribute.String("k8s.pod.update_type", updateType.String()),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
logger := klog.FromContext(ctx)
logger.V(4).Info("SyncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer func() {
if err != nil {
otelSpan.RecordError(err)
otelSpan.SetStatus(codes.Error, err.Error())
}
logger.V(4).Info("SyncPod exit", "pod", klog.KObj(pod), "podUID", pod.UID, "isTerminal", isTerminal)
otelSpan.End()
}()
// Latency measurements for the main workflow are relative to the
// first time the pod was seen by kubelet.
var firstSeenTime time.Time
if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; ok {
firstSeenTime = kubetypes.ConvertToTimestamp(firstSeenTimeStr).Get()
}
// Record pod worker start latency if being created
// TODO: make pod workers record their own latencies
if updateType == kubetypes.SyncPodCreate {
if !firstSeenTime.IsZero() {
// This is the first time we are syncing the pod. Record the latency
// since kubelet first saw the pod if firstSeenTime is set.
metrics.PodWorkerStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
} else {
logger.V(3).Info("First seen time not recorded for pod",
"podUID", pod.UID,
"pod", klog.KObj(pod))
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Check whether a resize is in progress so we can set the PodResizeInProgressCondition accordingly.
if kl.containerRuntime.IsPodResizeInProgress(pod, podStatus) {
kl.statusManager.SetPodResizeInProgressCondition(pod.UID, "", "", pod.Generation)
} else if generation, cleared := kl.statusManager.ClearPodResizeInProgressCondition(pod.UID); cleared {
// (Allocated == Actual) => clear the resize in-progress status.
msg := events.PodResizeCompletedMsg(logger, pod, generation)
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeNormal, events.ResizeCompleted, msg)
}
// TODO(natasha41575): There is a race condition here, where the goroutine in the
// allocation manager may allocate a new resize and unconditionally set the
// PodResizeInProgressCondition before we set the status below.
}
// Generate final API pod status with pod and status manager status
apiPodStatus := kl.generateAPIPodStatus(ctx, pod, podStatus, false)
// The pod IP may be changed in generateAPIPodStatus if the pod is using host network. (See #24576)
// TODO(random-liu): After writing pod spec into container labels, check whether pod is using host network, and
// set pod IP to hostIP directly in runtime.GetPodStatus
podStatus.IPs = make([]string, 0, len(apiPodStatus.PodIPs))
for _, ipInfo := range apiPodStatus.PodIPs {
podStatus.IPs = append(podStatus.IPs, ipInfo.IP)
}
if len(podStatus.IPs) == 0 && len(apiPodStatus.PodIP) > 0 {
podStatus.IPs = []string{apiPodStatus.PodIP}
}
// If the pod is terminal, we don't need to continue to setup the pod
if apiPodStatus.Phase == v1.PodSucceeded || apiPodStatus.Phase == v1.PodFailed {
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
isTerminal = true
return isTerminal, nil
}
// Record the time it takes for the pod to become running
// since kubelet first saw the pod if firstSeenTime is set.
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning &&
!firstSeenTime.IsZero() {
metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
}
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// If the network plugin is not ready, only start the pod if it uses the host network
if err := kl.runtimeState.networkErrors(); err != nil && !kubecontainer.IsHostNetworkPod(pod) {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, err)
return false, fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, err)
}
// ensure the kubelet knows about referenced secrets or configmaps used by the pod
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) {
if kl.secretManager != nil {
kl.secretManager.RegisterPod(pod)
}
if kl.configMapManager != nil {
kl.configMapManager.RegisterPod(pod)
}
}
// Create Cgroups for the pod and apply resource parameters
// to them if cgroups-per-qos flag is enabled.
pcm := kl.containerManager.NewPodContainerManager()
// If pod has already been terminated then we need not create
// or update the pod's cgroup
// TODO: once context cancellation is added this check can be removed
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) {
// When the kubelet is restarted with the cgroups-per-qos
// flag enabled, all the pod's running containers
// should be killed intermittently and brought back up
// under the qos cgroup hierarchy.
// Check if this is the pod's first sync
firstSync := true
for _, containerStatus := range apiPodStatus.ContainerStatuses {
if containerStatus.State.Running != nil {
firstSync = false
break
}
}
// Don't kill containers in pod if pod's cgroups already
// exists or the pod is running for the first time
podKilled := false
if !pcm.Exists(pod) && !firstSync {
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(ctx, pod, p, nil); err == nil {
podKilled = true
} else {
if wait.Interrupted(err) {
return false, nil
}
logger.Error(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus)
}
}
// Create and Update pod's Cgroups
// Don't create cgroups for run once pod if it was killed above
// The current policy is not to restart the run once pods when
// the kubelet is restarted with the new flag as run once pods are
// expected to run only once and if the kubelet is restarted then
// they are not expected to run again.
// We don't create and apply updates to cgroup if its a run once pod and was killed above
runOnce := pod.Spec.RestartPolicy == v1.RestartPolicyNever
// With ContainerRestartRules, if any container is restartable, the pod should be restarted.
if utilfeature.DefaultFeatureGate.Enabled(features.ContainerRestartRules) {
for _, c := range pod.Spec.Containers {
if podutil.IsContainerRestartable(pod.Spec, c) {
runOnce = false
}
}
}
if !podKilled || !runOnce {
if !pcm.Exists(pod) {
if err := kl.containerManager.UpdateQOSCgroups(logger); err != nil {
logger.V(2).Info("Failed to update QoS cgroups while syncing pod", "pod", klog.KObj(pod), "err", err)
}
if err := pcm.EnsureExists(logger, pod); err != nil {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err)
return false, fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
}
if err = kl.containerRuntime.UpdateActuatedPodLevelResources(pod); err != nil {
return false, fmt.Errorf("failed to update the state of pod-level resources for the pod %v : %w", pod.UID, err)
}
}
}
}
// Create Mirror Pod for Static Pod if it doesn't already exist
kl.tryReconcileMirrorPods(ctx, pod, mirrorPod)
// Make data directories for the pod
if err := kl.makePodDataDirs(pod); err != nil {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err)
logger.Error(err, "Unable to make pod data directories for pod", "pod", klog.KObj(pod))
return false, err
}
// Wait for volumes to attach/mount
if err := kl.volumeManager.WaitForAttachAndMount(ctx, pod); err != nil {
var volumeAttachLimitErr *volumemanager.VolumeAttachLimitExceededError
if errors.As(err, &volumeAttachLimitErr) {
kl.rejectPod(ctx, pod, volumemanager.VolumeAttachmentLimitExceededReason, volumeAttachLimitErr.Error())
recordAdmissionRejection(volumemanager.VolumeAttachmentLimitExceededReason)
return true, nil
}
if !wait.Interrupted(err) {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err)
logger.Error(err, "Unable to attach or mount volumes for pod; skipping pod", "pod", klog.KObj(pod))
}
return false, err
}
// Fetch the pull secrets for the pod
pullSecrets := kl.getPullSecretsForPod(logger, pod)
// Ensure the pod is being probed
kl.probeManager.AddPod(ctx, pod)
// TODO(#113606): use cancellation from the incoming context parameter, which comes from the pod worker.
// Currently, using cancellation from that context causes test failures. To remove this WithoutCancel,
// any wait.Interrupted errors need to be filtered from result and bypass the reasonCache - cancelling
// the context for SyncPod is a known and deliberate error, not a generic error.
// Use WithoutCancel instead of a new context.TODO() to propagate trace context
// Call the container runtime's SyncPod callback
sctx := context.WithoutCancel(ctx)
restartingAllContainers := false
if utilfeature.DefaultFeatureGate.Enabled(features.RestartAllContainersOnContainerExits) {
for _, cond := range apiPodStatus.Conditions {
if cond.Type == v1.AllContainersRestarting && cond.Status == v1.ConditionTrue {
restartingAllContainers = true
}
}
}
result := kl.containerRuntime.SyncPod(sctx, pod, podStatus, pullSecrets, kl.crashLoopBackOff, restartingAllContainers)
kl.reasonCache.Update(pod.UID, result)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
for _, r := range result.SyncResults {
if r.Action == kubecontainer.ResizePodInPlace && r.Error != nil {
// If the condition already exists, the observedGeneration does not get updated.
if generation, updated := kl.statusManager.SetPodResizeInProgressCondition(pod.UID, v1.PodReasonError, r.Message, pod.Generation); updated {
msg := events.PodResizeErrorMsg(logger, pod, generation, r.Message)
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.ResizeError, msg)
}
}
}
}
return false, result.Error()
}
// SyncTerminatingPod is expected to terminate all running containers in a pod. Once this method
// returns without error, the pod is considered to be terminated and it will be safe to clean up any
// pod state that is tied to the lifetime of running containers. The next method invoked will be
// SyncTerminatedPod. This method is expected to return with the grace period provided and the
// provided context may be cancelled if the duration is exceeded. The method may also be interrupted
// with a context cancellation if the grace period is shortened by the user or the kubelet (such as
// during eviction). This method is not guaranteed to be called if a pod is force deleted from the
// configuration and the kubelet is restarted - SyncTerminatingRuntimePod handles those orphaned
// pods.
func (kl *Kubelet) SyncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) (err error) {
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
// Currently, using that context causes test failures.
logger := klog.FromContext(ctx)
ctx = klog.NewContext(context.TODO(), logger)
logger.V(4).Info("SyncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
ctx, otelSpan := kl.tracer.Start(ctx, "syncTerminatingPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
defer func() {
if err != nil {
otelSpan.RecordError(err)
otelSpan.SetStatus(codes.Error, err.Error())
}
otelSpan.End()
logger.V(4).Info("SyncTerminatingPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
}()
apiPodStatus := kl.generateAPIPodStatus(ctx, pod, podStatus, false)
if podStatusFn != nil {
podStatusFn(&apiPodStatus)
}
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
if gracePeriod != nil {
logger.V(4).Info("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", *gracePeriod)
} else {
logger.V(4).Info("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil)
}
kl.probeManager.StopLivenessAndStartup(pod)
p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus)
if err := kl.killPod(ctx, pod, p, gracePeriod); err != nil {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err)
return err
}
// Once the containers are stopped, we can stop probing for liveness and readiness.
// TODO: once a pod is terminal, certain probes (liveness exec) could be stopped immediately after
// the detection of a container shutdown or (for readiness) after the first failure. Tracked as
// https://github.com/kubernetes/kubernetes/issues/107894 although may not be worth optimizing.
kl.probeManager.RemovePod(pod)
// Guard against consistency issues in KillPod implementations by checking that there are no
// running containers. This method is invoked infrequently so this is effectively free and can
// catch race conditions introduced by callers updating pod status out of order.
// TODO: have KillPod return the terminal status of stopped containers and write that into the
// cache immediately
stoppedPodStatus, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil {
logger.Error(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID)
return err
}
preserveDataFromBeforeStopping(stoppedPodStatus, podStatus)
var runningContainers []string
type container struct {
Name string
State string
ExitCode int
FinishedAt string
}
var containers []container
loggerV := logger.V(4)
loggerVEnabled := loggerV.Enabled()
for _, s := range stoppedPodStatus.ContainerStatuses {
if s.State == kubecontainer.ContainerStateRunning {
runningContainers = append(runningContainers, s.ID.String())
}
if loggerVEnabled {
containers = append(containers, container{Name: s.Name, State: string(s.State), ExitCode: s.ExitCode, FinishedAt: s.FinishedAt.UTC().Format(time.RFC3339Nano)})
}
}
if loggerVEnabled {
sort.Slice(containers, func(i, j int) bool { return containers[i].Name < containers[j].Name })
logger.V(4).Info("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", containers)
}
if len(runningContainers) > 0 {
return fmt.Errorf("detected running containers after a successful KillPod, CRI violation: %v", runningContainers)
}
// NOTE: resources must be unprepared AFTER all containers have stopped
// and BEFORE the pod status is changed on the API server
// to avoid race conditions with the resource deallocation code in kubernetes core.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if err := kl.UnprepareDynamicResources(ctx, pod); err != nil {
return err
}
}
// Compute and update the status in cache once the pods are no longer running.
// The computation is done here to ensure the pod status used for it contains
// information about the container end states (including exit codes) - when
// SyncTerminatedPod is called the containers may already be removed.
apiPodStatus = kl.generateAPIPodStatus(ctx, pod, stoppedPodStatus, true)
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// we have successfully stopped all containers, the pod is terminating, our status is "done"
logger.V(4).Info("Pod termination stopped all running containers", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// preserveDataFromBeforeStopping preserves data, like IPs, which are expected
// to be sent to the API server after termination, but are no longer returned by
// containerRuntime.GetPodStatus for a stopped pod.
// Note that Kubelet restart, after the pod is stopped, may still cause losing
// track of the data.
func preserveDataFromBeforeStopping(stoppedPodStatus, podStatus *kubecontainer.PodStatus) {
stoppedPodStatus.IPs = podStatus.IPs
}
// SyncTerminatingRuntimePod is expected to terminate running containers in a pod that we have no
// configuration for. Once this method returns without error, any remaining local state can be safely
// cleaned up by background processes in each subsystem. Unlike syncTerminatingPod, we lack
// knowledge of the full pod spec and so cannot perform lifecycle related operations, only ensure
// that the remnant of the running pod is terminated and allow garbage collection to proceed. We do
// not update the status of the pod because with the source of configuration removed, we have no
// place to send that status.
func (kl *Kubelet) SyncTerminatingRuntimePod(ctx context.Context, runningPod *kubecontainer.Pod) error {
// TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker.
// Currently, using that context causes test failures.
logger := klog.FromContext(ctx)
ctx = klog.NewContext(context.TODO(), logger)
pod := runningPod.ToAPIPod()
logger.V(4).Info("SyncTerminatingRuntimePod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer logger.V(4).Info("SyncTerminatingRuntimePod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
// we kill the pod directly since we have lost all other information about the pod.
logger.V(4).Info("Orphaned running pod terminating without grace period", "pod", klog.KObj(pod), "podUID", pod.UID)
// TODO: this should probably be zero, to bypass any waiting (needs fixes in container runtime)
gracePeriod := int64(1)
if err := kl.killPod(ctx, pod, *runningPod, &gracePeriod); err != nil {
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err)
// there was an error killing the pod, so we return that error directly
utilruntime.HandleError(err)
return err
}
logger.V(4).Info("Pod termination stopped all running orphaned containers", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// SyncTerminatedPod cleans up a pod that has terminated (has no running containers).
// The invocations in this call are expected to tear down all pod resources.
// When this method exits the pod is expected to be ready for cleanup. This method
// reduces the latency of pod cleanup but is not guaranteed to get called in all scenarios.
//
// Because the kubelet has no local store of information, all actions in this method that modify
// on-disk state must be reentrant and be garbage collected by HandlePodCleanups or a separate loop.
// This typically occurs when a pod is force deleted from configuration (local disk or API) and the
// kubelet restarts in the middle of the action.
func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error {
ctx, otelSpan := kl.tracer.Start(ctx, "syncTerminatedPod", trace.WithAttributes(
semconv.K8SPodUIDKey.String(string(pod.UID)),
attribute.String("k8s.pod", klog.KObj(pod).String()),
semconv.K8SPodNameKey.String(pod.Name),
semconv.K8SNamespaceNameKey.String(pod.Namespace),
))
logger := klog.FromContext(ctx)
defer otelSpan.End()
logger.V(4).Info("SyncTerminatedPod enter", "pod", klog.KObj(pod), "podUID", pod.UID)
defer logger.V(4).Info("SyncTerminatedPod exit", "pod", klog.KObj(pod), "podUID", pod.UID)
// generate the final status of the pod
// TODO: should we simply fold this into TerminatePod? that would give a single pod update
apiPodStatus := kl.generateAPIPodStatus(ctx, pod, podStatus, true)
kl.statusManager.SetPodStatus(logger, pod, apiPodStatus)
// volumes are unmounted after the pod worker reports ShouldPodRuntimeBeRemoved (which is satisfied
// before syncTerminatedPod is invoked)
if err := kl.volumeManager.WaitForUnmount(ctx, pod); err != nil {
return err
}
logger.V(4).Info("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID)
// This waiting loop relies on the background cleanup which starts after pod workers respond
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
volumesExist := kl.podVolumesExist(logger, pod.UID)
if volumesExist {
logger.V(3).Info("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
}
return !volumesExist, nil
}); err != nil {
return err
}
logger.V(3).Info("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
// After volume unmount is complete, let the secret and configmap managers know we're done with this pod
if kl.secretManager != nil {
kl.secretManager.UnregisterPod(pod)
}
if kl.configMapManager != nil {
kl.configMapManager.UnregisterPod(pod)
}
// Note: we leave pod containers to be reclaimed in the background since dockershim requires the
// container for retrieving logs and we want to make sure logs are available until the pod is
// physically deleted.
// remove any cgroups in the hierarchy for pods that are no longer running.
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
name, _ := pcm.GetPodContainerName(pod)
if err := pcm.Destroy(logger, name); err != nil {
return err
}
logger.V(4).Info("Pod termination removed cgroups", "pod", klog.KObj(pod), "podUID", pod.UID)
}
kl.usernsManager.Release(logger, pod.UID)
// mark the final pod status
kl.statusManager.TerminatePod(logger, pod)
logger.V(4).Info("Pod is terminated and will need no more status updates", "pod", klog.KObj(pod), "podUID", pod.UID)
return nil
}
// Get pods which should be resynchronized. Currently, the following pod should be resynchronized:
// - pod whose work is ready.
// - internal modules that request sync of a pod.
//
// This method does not return orphaned pods (those known only to the pod worker that may have
// been deleted from configuration). Those pods are synced by HandlePodCleanups as a consequence
// of driving the state machine to completion.
//
// TODO: Consider synchronizing all pods which have not recently been acted on to be resilient
// to bugs that might prevent updates from being delivered (such as the previous bug with
// orphaned pods). Instead of asking the work queue for pending work, consider asking the
// PodWorker which pods should be synced.
func (kl *Kubelet) getPodsToSync() []*v1.Pod {
allPods := kl.podManager.GetPods()
podUIDs := kl.workQueue.GetWork()
podUIDSet := sets.New[string]()
for _, podUID := range podUIDs {
podUIDSet.Insert(string(podUID))
}
var podsToSync []*v1.Pod
for _, pod := range allPods {
if podUIDSet.Has(string(pod.UID)) {
// The work of the pod is ready
podsToSync = append(podsToSync, pod)
continue
}
for _, podSyncLoopHandler := range kl.PodSyncLoopHandlers {
if podSyncLoopHandler.ShouldSync(pod) {
podsToSync = append(podsToSync, pod)
break
}
}
}
return podsToSync
}
// deletePod deletes the pod from the internal state of the kubelet by:
// 1. stopping the associated pod worker asynchronously
// 2. signaling to kill the pod by sending on the podKillingCh channel
//
// deletePod returns an error if not all sources are ready or the pod is not
// found in the runtime cache.
func (kl *Kubelet) deletePod(logger klog.Logger, pod *v1.Pod) error {
if pod == nil {
return fmt.Errorf("deletePod does not allow nil pod")
}
if !kl.sourcesReady.AllReady() {
// If the sources aren't ready, skip deletion, as we may accidentally delete pods
// for sources that haven't reported yet.
return fmt.Errorf("skipping delete because sources aren't ready yet")
}
logger.V(3).Info("Pod has been deleted and must be killed", "pod", klog.KObj(pod), "podUID", pod.UID)
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
UpdateType: kubetypes.SyncPodKill,
})
// We leave the volume/directory cleanup to the periodic cleanup routine.
return nil
}
// rejectPod records an event about the pod with the given reason and message,
// and updates the pod to the failed phase in the status manager.
func (kl *Kubelet) rejectPod(ctx context.Context, pod *v1.Pod, reason, message string) {
logger := klog.FromContext(ctx)
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(logger, pod, v1.PodStatus{
QOSClass: v1qos.GetPodQOS(pod), // keep it as is
Phase: v1.PodFailed,
Reason: reason,
Message: "Pod was rejected: " + message})
}
func recordAdmissionRejection(reason string) {
// It is possible that the "reason" label can have high cardinality.
// To avoid this metric from exploding, we create an allowlist of known
// reasons, and only record reasons from this list. Use "Other" reason
// for the rest.
if admissionRejectionReasons.Has(reason) {
metrics.AdmissionRejectionsTotal.WithLabelValues(reason).Inc()
} else if strings.HasPrefix(reason, lifecycle.InsufficientResourcePrefix) {
// non-extended resources (like cpu, memory, ephemeral-storage, pods)
// are already included in admissionRejectionReasons.
metrics.AdmissionRejectionsTotal.WithLabelValues("OutOfExtendedResources").Inc()
} else {
metrics.AdmissionRejectionsTotal.WithLabelValues("Other").Inc()
}
}
// syncLoop is the main loop for processing changes. It watches for changes from
// three channels (file, apiserver, and http) and creates a union of them. For
// any new change seen, will run a sync against desired state and running state. If
// no changes are seen to the configuration, will synchronize the last known desired
// state every sync-frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
logger := klog.FromContext(ctx)
logger.Info("Starting kubelet main sync loop")
// The syncTicker wakes up kubelet to checks if there are any pod workers
// that need to be sync'd. A one-second period is sufficient because the
// sync interval is defaulted to 10s.
syncTicker := time.NewTicker(time.Second)
defer syncTicker.Stop()
housekeepingTicker := time.NewTicker(housekeepingPeriod)
defer housekeepingTicker.Stop()
plegCh := kl.pleg.Watch()
const (
base = 100 * time.Millisecond
max = 5 * time.Second
factor = 2
)
duration := base
// Responsible for checking limits in resolv.conf
// The limits do not have anything to do with individual pods
// Since this is called in syncLoop, we don't need to call it anywhere else
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
kl.dnsConfigurer.CheckLimitsForResolvConf(klog.FromContext(ctx))
}
for {
if err := kl.runtimeState.runtimeErrors(); err != nil {
logger.Error(err, "Skipping pod synchronization")
// exponential backoff
time.Sleep(duration)
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
continue
}
// reset backoff if we have a success
duration = base
kl.syncLoopMonitor.Store(kl.clock.Now())
if !kl.syncLoopIteration(ctx, updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
break
}
kl.syncLoopMonitor.Store(kl.clock.Now())
}
}
// syncLoopIteration reads from various channels and dispatches pods to the
// given handler.
//
// Arguments:
// 1. configCh: a channel to read config events from
// 2. handler: the SyncHandler to dispatch pods to
// 3. syncCh: a channel to read periodic sync events from
// 4. housekeepingCh: a channel to read housekeeping events from
// 5. plegCh: a channel to read PLEG updates from
//
// Events are also read from the kubelet liveness manager's update channel.
//
// The workflow is to read from one of the channels, handle that event, and
// update the timestamp in the sync loop monitor.
//
// Here is an appropriate place to note that despite the syntactical
// similarity to the switch statement, the case statements in a select are
// evaluated in a pseudorandom order if there are multiple channels ready to
// read from when the select is evaluated. In other words, case statements
// are evaluated in random order, and you can not assume that the case
// statements evaluate in order if multiple channels have events.
//
// With that in mind, in truly no particular order, the different channels
// are handled as follows:
//
// - configCh: dispatch the pods for the config change to the appropriate
// handler callback for the event type
// - plegCh: update the runtime cache; sync pod
// - syncCh: sync all pods waiting for sync
// - housekeepingCh: trigger cleanup of pods
// - health manager: sync pods that have failed or in which one or more
// containers have failed health checks
func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
logger := klog.FromContext(ctx)
select {
case u, open := <-configCh:
// Update from a config source; dispatch it to the right handler
// callback.
if !open {
logger.Error(nil, "Update channel is closed, exiting the sync loop")
return false
}
switch u.Op {
case kubetypes.ADD:
logger.V(2).Info("SyncLoop ADD", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
// After restarting, kubelet will get all existing pods through
// ADD as if they are new pods. These pods will then go through the
// admission process and *may* be rejected. This can be resolved
// once we have checkpointing.
handler.HandlePodAdditions(ctx, u.Pods)
case kubetypes.UPDATE:
logger.V(2).Info("SyncLoop UPDATE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodUpdates(ctx, u.Pods)
case kubetypes.REMOVE:
logger.V(2).Info("SyncLoop REMOVE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodRemoves(ctx, u.Pods)
case kubetypes.RECONCILE:
logger.V(4).Info("SyncLoop RECONCILE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
handler.HandlePodReconcile(ctx, u.Pods)
case kubetypes.DELETE:
logger.V(2).Info("SyncLoop DELETE", "source", u.Source, "pods", klog.KObjSlice(u.Pods))
// DELETE is treated as a UPDATE because of graceful deletion.
handler.HandlePodUpdates(ctx, u.Pods)
default:
logger.Error(nil, "Invalid operation type received", "operation", u.Op)
}
kl.sourcesReady.AddSource(u.Source)
case e := <-plegCh:
if isSyncPodWorthy(e) {
// PLEG event for a pod; sync it.
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
logger.V(2).Info("SyncLoop (PLEG): event for pod", "pod", klog.KObj(pod), "event", e)
handler.HandlePodSyncs(ctx, []*v1.Pod{pod})
} else {
// If the pod no longer exists, ignore the event.
logger.V(4).Info("SyncLoop (PLEG): pod does not exist, ignore irrelevant event", "event", e)
}
}
if e.Type == pleg.ContainerDied {
if containerID, ok := e.Data.(string); ok {
kl.cleanUpContainersInPod(e.ID, containerID)
}
}
case <-syncCh:
// Sync pods waiting for sync
podsToSync := kl.getPodsToSync()
if len(podsToSync) == 0 {
break
}
logger.V(4).Info("SyncLoop (SYNC) pods", "total", len(podsToSync), "pods", klog.KObjSlice(podsToSync))
handler.HandlePodSyncs(ctx, podsToSync)
case update := <-kl.livenessManager.Updates():
if update.Result == proberesults.Failure {
handleProbeSync(ctx, kl, update, handler, "liveness", "unhealthy")
}
case update := <-kl.readinessManager.Updates():
ready := update.Result == proberesults.Success
kl.statusManager.SetContainerReadiness(logger, update.PodUID, update.ContainerID, ready)
status := "not ready"
if ready {
status = "ready"
}
handleProbeSync(ctx, kl, update, handler, "readiness", status)
case update := <-kl.startupManager.Updates():
started := update.Result == proberesults.Success
kl.statusManager.SetContainerStartup(logger, update.PodUID, update.ContainerID, started)
status := "unhealthy"
if started {
status = "started"
}
handleProbeSync(ctx, kl, update, handler, "startup", status)
case update := <-kl.containerManager.Updates():
pods := []*v1.Pod{}
for _, p := range update.PodUIDs {
if pod, ok := kl.podManager.GetPodByUID(types.UID(p)); ok {
logger.V(3).Info("SyncLoop (containermanager): event for pod", "pod", klog.KObj(pod), "event", update)
pods = append(pods, pod)
} else {
// If the pod no longer exists, ignore the event.
logger.V(4).Info("SyncLoop (containermanager): pod does not exist, ignore devices updates", "event", update)
}
}
if len(pods) > 0 {
// Updating the pod by syncing it again
// We do not apply the optimization by updating the status directly, but can do it later
handler.HandlePodSyncs(ctx, pods)
}
case <-housekeepingCh:
if !kl.sourcesReady.AllReady() {
// If the sources aren't ready or volume manager has not yet synced the states,
// skip housekeeping, as we may accidentally delete pods from unready sources.
logger.V(4).Info("SyncLoop (housekeeping, skipped): sources aren't ready yet")
} else {
start := time.Now()
logger.V(4).Info("SyncLoop (housekeeping)")
if err := handler.HandlePodCleanups(ctx); err != nil {
logger.Error(err, "Failed cleaning pods")
}
duration := time.Since(start)
if duration > housekeepingWarningDuration {
logger.Error(fmt.Errorf("housekeeping took too long"), "Housekeeping took longer than expected", "expected", housekeepingWarningDuration, "actual", duration.Round(time.Millisecond))
}
logger.V(4).Info("SyncLoop (housekeeping) end", "duration", duration.Round(time.Millisecond))
}
}
return true
}
func handleProbeSync(ctx context.Context, kl *Kubelet, update proberesults.Update, handler SyncHandler, probe, status string) {
logger := klog.FromContext(ctx)
// We should not use the pod from manager, because it is never updated after initialization.
pod, ok := kl.podManager.GetPodByUID(update.PodUID)
if !ok {
// If the pod no longer exists, ignore the update.
logger.V(4).Info("SyncLoop (probe): ignore irrelevant update", "probe", probe, "status", status, "update", update)
return
}
logger.V(1).Info("SyncLoop (probe)", "probe", probe, "status", status, "pod", klog.KObj(pod))
handler.HandlePodSyncs(ctx, []*v1.Pod{pod})
}
// HandlePodAdditions is the callback in SyncHandler for pods being added from
// a config source.
func (kl *Kubelet) HandlePodAdditions(ctx context.Context, pods []*v1.Pod) {
start := kl.clock.Now()
logger := klog.FromContext(ctx)
sort.Sort(sliceutils.PodsByCreationTime(pods))
var pendingResizes []types.UID
for _, pod := range pods {
// Always add the pod to the pod manager. Kubelet relies on the pod
// manager as the source of truth for the desired state. If a pod does
// not exist in the pod manager, it means that it has been deleted in
// the apiserver and no action (other than cleanup) is required.
kl.podManager.AddPod(pod)
kl.podCertificateManager.TrackPod(ctx, pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
logger.V(2).Info("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
continue
}
// Only go through the admission process if the pod is not requested
// for termination by another part of the kubelet. If the pod is already
// using resources (previously admitted), the pod worker is going to be
// shutting it down. If the pod hasn't started yet, we know that when
// the pod worker is invoked it will also avoid setting up the pod, so
// we simply avoid doing any work.
// We also do not try to admit the pod that is already in terminated state.
if !kl.podWorkers.IsPodTerminationRequested(pod.UID) && !podutil.IsPodPhaseTerminal(pod.Status.Phase) {
// Check if we can admit the pod; if not, reject it.
// We failed pods that we rejected, so activePods include all admitted
// pods that are alive.
if ok, reason, message := kl.allocationManager.AddPod(kl.GetActivePods(), pod); !ok {
kl.rejectPod(ctx, pod, reason, message)
// We avoid recording the metric in canAdmitPod because it's called
// repeatedly during a resize, which would inflate the metric.
// Instead, we record the metric here in HandlePodAdditions for new pods
// and capture resize events separately.
recordAdmissionRejection(reason)
continue
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Backfill the queue of pending resizes, but only after all the pods have
// been added. This ensures that no resizes get resolved until all the
// existing pods are added.
_, updatedFromAllocation := kl.allocationManager.UpdatePodFromAllocation(pod)
if updatedFromAllocation {
pendingResizes = append(pendingResizes, pod.UID)
}
}
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodCreate,
StartTime: start,
})
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
kl.statusManager.BackfillPodResizeConditions(pods)
for _, uid := range pendingResizes {
kl.allocationManager.PushPendingResize(uid)
}
if len(pendingResizes) > 0 {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodsAdded)
}
}
}
// HandlePodUpdates is the callback in the SyncHandler interface for pods
// being updated from a config source.
func (kl *Kubelet) HandlePodUpdates(ctx context.Context, pods []*v1.Pod) {
start := kl.clock.Now()
logger := klog.FromContext(ctx)
for _, pod := range pods {
oldPod, _ := kl.podManager.GetPodByUID(pod.UID)
kl.podManager.UpdatePod(pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
logger.V(2).Info("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if recordResizeOperations(oldPod, pod) {
_, updatedFromAllocation := kl.allocationManager.UpdatePodFromAllocation(pod)
if updatedFromAllocation {
kl.allocationManager.PushPendingResize(pod.UID)
// TODO(natasha41575): If the resize is immediately actuated, it will trigger a pod sync
// and we will end up calling UpdatePod twice. Figure out if there is a way to avoid this.
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodUpdated)
} else {
// We can hit this case if a pending resize has been reverted,
// so we need to clear the pending resize condition.
kl.statusManager.ClearPodResizePendingCondition(pod.UID)
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.NodeDeclaredFeatures) && oldPod != nil {
oldPodInfo := &ndf.PodInfo{Spec: &oldPod.Spec, Status: &oldPod.Status}
newPodInfo := &ndf.PodInfo{Spec: &pod.Spec, Status: &pod.Status}
reqs, err := kl.nodeDeclaredFeaturesFramework.InferForPodUpdate(oldPodInfo, newPodInfo, kl.version)
if err != nil {
logger.Error(err, "Failed to infer required features for pod update", "pod", klog.KObj(pod))
}
if reqs.Len() != 0 {
matchResult, err := ndf.MatchNodeFeatureSet(reqs, kl.nodeDeclaredFeaturesSet)
if err != nil {
logger.Error(err, "Failed to match pod features with the node", "pod", klog.KObj(pod))
}
if !matchResult.IsMatch {
missingNodeDeclaredFeatures := strings.Join(matchResult.UnsatisfiedRequirements, ", ")
logger.Error(nil, "Pod requires node features that are not available", "missingFeatures", missingNodeDeclaredFeatures)
kl.recorder.WithLogger(logger).Eventf(pod, v1.EventTypeWarning, events.FailedNodeDeclaredFeaturesCheck, "Pod requires node features that are not available: %s", missingNodeDeclaredFeatures)
}
}
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
}
}
// recordResizeOperaations records if any of the pod level resources or
// containers need to be resized, and returns
// true if so
func recordResizeOperations(oldPod, newPod *v1.Pod) bool {
if oldPod == nil {
// This should never happen.
return true
}
hasResize := recordContainerResizeOperations(oldPod, newPod) || recordPodLevelResourceResizeOperations(oldPod, newPod)
return hasResize
}
// recordPodLevelResourceResizeOperations records if any of the pod level resources need to be resized, and returns
// true if so
func recordPodLevelResourceResizeOperations(oldPod, newPod *v1.Pod) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
return false
}
// TODO(ndixita): add metrics for pod-level resources resize.
return !apiequality.Semantic.DeepEqual(oldPod.Spec.Resources, newPod.Spec.Resources)
}
// recordContainerResizeOperations records if any of the pod's containers needs to be resized, and returns
// true if so
func recordContainerResizeOperations(oldPod, newPod *v1.Pod) bool {
hasResize := false
for oldContainer, containerType := range podutil.ContainerIter(&oldPod.Spec, podutil.InitContainers|podutil.Containers) {
if !allocation.IsResizableContainer(oldContainer, containerType) {
continue
}
var newContainer *v1.Container
for new, newType := range podutil.ContainerIter(&newPod.Spec, podutil.InitContainers|podutil.Containers) {
if !allocation.IsResizableContainer(new, newType) {
continue
}
if new.Name == oldContainer.Name && containerType == newType {
newContainer = new
}
}
newResources := newContainer.Resources
oldResources := oldContainer.Resources
if op := resizeOperationForResources(newResources.Requests.Memory(), oldResources.Requests.Memory()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("memory", "requests", op).Inc()
}
if op := resizeOperationForResources(newResources.Limits.Memory(), oldResources.Limits.Memory()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("memory", "limits", op).Inc()
}
if op := resizeOperationForResources(newResources.Requests.Cpu(), oldResources.Requests.Cpu()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("cpu", "requests", op).Inc()
}
if op := resizeOperationForResources(newResources.Limits.Cpu(), oldResources.Limits.Cpu()); op != "" {
hasResize = true
metrics.ContainerRequestedResizes.WithLabelValues("cpu", "limits", op).Inc()
}
}
return hasResize
}
func resizeOperationForResources(new, old *resource.Quantity) string {
if new.IsZero() && !old.IsZero() {
return "remove"
}
if old.IsZero() && !new.IsZero() {
return "add"
}
if new.Cmp(*old) < 0 {
return "decrease"
}
if new.Cmp(*old) > 0 {
return "increase"
}
return ""
}
// HandlePodRemoves is the callback in the SyncHandler interface for pods
// being removed from a config source.
func (kl *Kubelet) HandlePodRemoves(ctx context.Context, pods []*v1.Pod) {
start := kl.clock.Now()
logger := klog.FromContext(ctx)
for _, pod := range pods {
kl.podCertificateManager.ForgetPod(ctx, pod)
kl.podManager.RemovePod(pod)
kl.allocationManager.RemovePod(pod.UID)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
logger.V(2).Info("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodUpdate,
StartTime: start,
})
continue
}
// Deletion is allowed to fail because the periodic cleanup routine
// will trigger deletion again.
if err := kl.deletePod(logger, pod); err != nil {
logger.V(2).Info("Failed to delete pod", "pod", klog.KObj(pod), "err", err)
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodsRemoved)
}
}
// HandlePodReconcile is the callback in the SyncHandler interface for pods
// that should be reconciled. Pods are reconciled when only the status of the
// pod is updated in the API.
func (kl *Kubelet) HandlePodReconcile(ctx context.Context, pods []*v1.Pod) {
start := kl.clock.Now()
logger := klog.FromContext(ctx)
retryPendingResizes := false
hasPendingResizes := kl.allocationManager.HasPendingResizes()
for _, pod := range pods {
// Update the pod in pod manager, status manager will do periodically reconcile according
// to the pod manager.
oldPod, _ := kl.podManager.GetPodByUID(pod.UID)
kl.podManager.UpdatePod(pod)
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
logger.V(2).Info("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
// Static pods should be reconciled the same way as regular pods
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// If there are pending resizes, check whether the requests shrank as a result of the status
// resources changing.
if hasPendingResizes && !retryPendingResizes && oldPod != nil {
opts := resourcehelper.PodResourcesOptions{
UseStatusResources: true,
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
InPlacePodLevelResourcesVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodLevelResourcesVerticalScaling),
}
// Ignore desired resources when aggregating the resources.
allocatedOldPod, _ := kl.allocationManager.UpdatePodFromAllocation(oldPod)
allocatedPod, _ := kl.allocationManager.UpdatePodFromAllocation(pod)
oldRequest := resourcehelper.PodRequests(allocatedOldPod, opts)
newRequest := resourcehelper.PodRequests(allocatedPod, opts)
// If cpu or memory requests shrank, then retry the pending resizes.
retryPendingResizes = newRequest.Memory().Cmp(*oldRequest.Memory()) < 0 ||
newRequest.Cpu().Cmp(*oldRequest.Cpu()) < 0
}
}
// TODO: reconcile being calculated in the config manager is questionable, and avoiding
// extra syncs may no longer be necessary. Reevaluate whether Reconcile and Sync can be
// merged (after resolving the next two TODOs).
// Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation.
// TODO: this should be unnecessary today - determine what is the cause for this to
// be different than Sync, or if there is a better place for it. For instance, we have
// needsReconcile in kubelet/config, here, and in status_manager.
if status.NeedToReconcilePodReadiness(pod) {
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
}
// After an evicted pod is synced, all dead containers in the pod can be removed.
// TODO: this is questionable - status read is async and during eviction we already
// expect to not have some container info. The pod worker knows whether a pod has
// been evicted, so if this is about minimizing the time to react to an eviction we
// can do better. If it's about preserving pod status info we can also do better.
if eviction.PodIsEvicted(pod.Status) {
if podStatus, err := kl.podCache.Get(pod.UID); err == nil {
kl.containerDeletor.deleteContainersInPod("", podStatus, true)
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if retryPendingResizes {
kl.allocationManager.RetryPendingResizes(allocation.TriggerReasonPodResized)
}
}
}
// HandlePodSyncs is the callback in the syncHandler interface for pods
// that should be dispatched to pod workers for sync.
func (kl *Kubelet) HandlePodSyncs(ctx context.Context, pods []*v1.Pod) {
start := kl.clock.Now()
logger := klog.FromContext(ctx)
for _, pod := range pods {
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
if wasMirror {
if pod == nil {
logger.V(2).Info("Unable to find pod for mirror pod, skipping", "mirrorPod", klog.KObj(mirrorPod), "mirrorPodUID", mirrorPod.UID)
continue
}
// Syncing a mirror pod is a programmer error since the intent of sync is to
// batch notify all pending work. We should make it impossible to double sync,
// but for now log a programmer error to prevent accidental introduction.
logger.V(3).Info("Programmer error, HandlePodSyncs does not expect to receive mirror pods", "podUID", pod.UID, "mirrorPodUID", mirrorPod.UID)
continue
}
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
}
}
// LatestLoopEntryTime returns the last time in the sync loop monitor.
func (kl *Kubelet) LatestLoopEntryTime() time.Time {
val := kl.syncLoopMonitor.Load()
if val == nil {
return time.Time{}
}
return val.(time.Time)
}
// SyncLoopHealthCheck checks if kubelet's sync loop that updates containers is working.
func (kl *Kubelet) SyncLoopHealthCheck(req *http.Request) error {
duration := kl.resyncInterval * 2
minDuration := time.Minute * 5
if duration < minDuration {
duration = minDuration
}
enterLoopTime := kl.LatestLoopEntryTime()
if !enterLoopTime.IsZero() && time.Now().After(enterLoopTime.Add(duration)) {
return fmt.Errorf("sync Loop took longer than expected")
}
return nil
}
// updateRuntimeUp calls the container runtime status callback, initializing
// the runtime dependent modules when the container runtime first comes up,
// and returns an error if the status check fails. If the status check is OK,
// update the container runtime uptime in the kubelet runtimeState.
func (kl *Kubelet) updateRuntimeUp(ctx context.Context) {
logger := klog.FromContext(ctx)
kl.updateRuntimeMux.Lock()
defer kl.updateRuntimeMux.Unlock()
s, err := kl.containerRuntime.Status(ctx)
if err != nil {
logger.Error(err, "Container runtime sanity check failed")
return
}
if s == nil {
logger.Error(nil, "Container runtime status is nil")
return
}
// Periodically log the whole runtime status for debugging.
logger.V(4).Info("Container runtime status", "status", s)
loggerInfo := logger.Info
if !kl.containerRuntimeReadyExpected {
loggerInfo = logger.V(4).Info
}
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
if networkReady == nil || !networkReady.Status {
loggerInfo("Container runtime network not ready", "networkReady", networkReady)
kl.runtimeState.setNetworkState(fmt.Errorf("container runtime network not ready: %v", networkReady))
} else {
// Set nil if the container runtime network is ready.
kl.runtimeState.setNetworkState(nil)
}
// information in RuntimeReady condition will be propagated to NodeReady condition.
runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady)
// If RuntimeReady is not set or is false, report an error.
if runtimeReady == nil || !runtimeReady.Status {
loggerInfo("Container runtime not ready", "runtimeReady", runtimeReady)
kl.runtimeState.setRuntimeState(fmt.Errorf("container runtime not ready: %v", runtimeReady))
return
}
kl.runtimeState.setRuntimeState(nil)
kl.runtimeState.setRuntimeHandlers(s.Handlers)
kl.runtimeState.setRuntimeFeatures(s.Features)
kl.oneTimeInitializer.Do(func() {
kl.initializeRuntimeDependentModules(ctx)
})
kl.runtimeState.setRuntimeSync(kl.clock.Now())
}
// GetConfiguration returns the KubeletConfiguration used to configure the kubelet.
func (kl *Kubelet) GetConfiguration() kubeletconfiginternal.KubeletConfiguration {
return kl.kubeletConfiguration
}
// BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
}
// ListenAndServe runs the kubelet HTTP server.
func (kl *Kubelet) ListenAndServe(ctx context.Context, kubeCfg *kubeletconfiginternal.KubeletConfiguration, tlsOptions *server.TLSOptions,
auth server.AuthInterface, tp trace.TracerProvider) {
server.ListenAndServeKubeletServer(ctx, kl, kl.resourceAnalyzer, kl.containerManager.GetHealthCheckers(), kl.flagz, kubeCfg, tlsOptions, auth, tp)
}
// ListenAndServeReadOnly runs the kubelet HTTP server in read-only mode.
func (kl *Kubelet) ListenAndServeReadOnly(ctx context.Context, address net.IP, port uint, tp trace.TracerProvider) {
server.ListenAndServeKubeletReadOnlyServer(ctx, kl, kl.resourceAnalyzer, kl.containerManager.GetHealthCheckers(), kl.flagz, address, port, tp)
}
type kubeletPodsProvider struct {
kl *Kubelet
}
func (pp *kubeletPodsProvider) GetActivePods() []*v1.Pod {
return pp.kl.GetActivePods()
}
func (pp *kubeletPodsProvider) GetPods() []*v1.Pod {
return pp.kl.podManager.GetPods()
}
func (pp *kubeletPodsProvider) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return pp.kl.podManager.GetPodByName(namespace, name)
}
// ListenAndServePodResources runs the kubelet podresources grpc service
func (kl *Kubelet) ListenAndServePodResources(ctx context.Context) {
endpoint, err := util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket)
if err != nil {
klog.FromContext(ctx).V(2).Info("Failed to get local endpoint for PodResources endpoint", "err", err)
return
}
providers := podresources.PodResourcesProviders{
Pods: &kubeletPodsProvider{kl: kl},
Devices: kl.containerManager,
Cpus: kl.containerManager,
Memory: kl.containerManager,
DynamicResources: kl.containerManager,
}
server.ListenAndServePodResources(ctx, endpoint, providers)
}
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
if podStatus, err := kl.podCache.Get(podID); err == nil {
// When an evicted or deleted pod has already synced, all containers can be removed.
removeAll := kl.podWorkers.ShouldPodContentBeRemoved(podID)
kl.containerDeletor.deleteContainersInPod(exitedContainerID, podStatus, removeAll)
}
}
// fastStatusUpdateOnce starts a loop that checks if the current state of kubelet + container runtime
// would be able to turn the node ready, and sync the ready state to the apiserver as soon as possible.
// Function returns after the node status update after such event, or when the node is already ready.
// Function is executed only during Kubelet start which improves latency to ready node by updating
// kubelet state, runtime status and node statuses ASAP.
func (kl *Kubelet) fastStatusUpdateOnce() {
ctx := context.Background()
start := kl.clock.Now()
stopCh := make(chan struct{})
// Keep trying to make fast node status update until either timeout is reached or an update is successful.
wait.Until(func() {
// fastNodeStatusUpdate returns true when it succeeds or when the grace period has expired
// (status was not updated within nodeReadyGracePeriod and the second argument below gets true),
// then we close the channel and abort the loop.
if kl.fastNodeStatusUpdate(ctx, kl.clock.Since(start) >= nodeReadyGracePeriod) {
close(stopCh)
}
}, 100*time.Millisecond, stopCh)
}
// CheckpointContainer tries to checkpoint a container. The parameters are used to
// look up the specified container. If the container specified by the given parameters
// cannot be found an error is returned. If the container is found the container
// engine will be asked to checkpoint the given container into the kubelet's default
// checkpoint directory.
func (kl *Kubelet) CheckpointContainer(
ctx context.Context,
podUID types.UID,
podFullName,
containerName string,
options *runtimeapi.CheckpointContainerRequest,
) error {
container, err := kl.findContainer(ctx, podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container %v not found", containerName)
}
options.Location = filepath.Join(
kl.getCheckpointsDir(),
fmt.Sprintf(
"checkpoint-%s-%s-%s.tar",
podFullName,
containerName,
time.Now().Format(time.RFC3339),
),
)
options.ContainerId = string(container.ID.ID)
if err := kl.containerRuntime.CheckpointContainer(ctx, options); err != nil {
return err
}
return nil
}
// ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics.
func (kl *Kubelet) ListMetricDescriptors(ctx context.Context) ([]*runtimeapi.MetricDescriptor, error) {
return kl.containerRuntime.ListMetricDescriptors(ctx)
}
// ListPodSandboxMetrics retrieves the metrics for all pod sandboxes.
func (kl *Kubelet) ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error) {
return kl.containerRuntime.ListPodSandboxMetrics(ctx)
}
func (kl *Kubelet) supportLocalStorageCapacityIsolation() bool {
return kl.GetConfiguration().LocalStorageCapacityIsolation
}
// isSyncPodWorthy filters out events that are not worthy of pod syncing
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
// ContainerRemoved doesn't affect pod state
return event.Type != pleg.ContainerRemoved
}
// PrepareDynamicResources calls the container Manager PrepareDynamicResources API
// This method implements the RuntimeHelper interface
func (kl *Kubelet) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return kl.containerManager.PrepareDynamicResources(ctx, pod)
}
// UnprepareDynamicResources calls the container Manager UnprepareDynamicResources API
// This method implements the RuntimeHelper interface
func (kl *Kubelet) UnprepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
return kl.containerManager.UnprepareDynamicResources(ctx, pod)
}
// Ensure Mirror Pod for Static Pod exists and matches the current pod definition.
// The function logs and ignores any errors.
func (kl *Kubelet) tryReconcileMirrorPods(ctx context.Context, staticPod, mirrorPod *v1.Pod) {
logger := klog.FromContext(ctx)
if !kubetypes.IsStaticPod(staticPod) {
return
}
deleted := false
if mirrorPod != nil {
if mirrorPod.DeletionTimestamp != nil || !kubepod.IsMirrorPodOf(mirrorPod, staticPod) {
// The mirror pod is semantically different from the static pod. Remove
// it. The mirror pod will get recreated later.
logger.Info("Trying to delete pod", "pod", klog.KObj(mirrorPod), "podUID", mirrorPod.UID)
podFullName := kubecontainer.GetPodFullName(staticPod)
if ok, err := kl.mirrorPodClient.DeleteMirrorPod(ctx, podFullName, &mirrorPod.UID); err != nil {
logger.Error(err, "Failed deleting mirror pod", "pod", klog.KObj(mirrorPod))
} else if ok {
deleted = ok
logger.Info("Deleted mirror pod as it didn't match the static Pod", "pod", klog.KObj(mirrorPod))
}
}
}
if mirrorPod == nil || deleted {
node, err := kl.GetNode(ctx)
if err != nil {
logger.Error(err, "No need to create a mirror pod, since failed to get node info from the cluster", "node", klog.KRef("", string(kl.nodeName)))
} else if node.DeletionTimestamp != nil {
logger.Info("No need to create a mirror pod, since node has been removed from the cluster", "node", klog.KRef("", string(kl.nodeName)))
} else {
logger.Info("Creating a mirror pod for static pod", "pod", klog.KObj(staticPod))
if err := kl.mirrorPodClient.CreateMirrorPod(ctx, staticPod); err != nil {
logger.Error(err, "Failed creating a mirror pod", "pod", klog.KObj(staticPod))
}
}
}
}
// Ensure Mirror Pod for Static Pod exists as soon as node is registered.
func (kl *Kubelet) fastStaticPodsRegistration(ctx context.Context) {
logger := klog.FromContext(ctx)
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
_, err := kl.GetNode(ctx)
if err == nil {
return true, nil
}
logger.Error(err, "Unable to register mirror pod because node is not registered yet", "node", klog.KRef("", string(kl.nodeName)))
return false, nil
}); err != nil {
logger.Error(err, "Failed to wait until node is registered", "node", klog.KRef("", string(kl.nodeName)))
}
staticPodToMirrorPodMap := kl.podManager.GetStaticPodToMirrorPodMap()
for staticPod, mirrorPod := range staticPodToMirrorPodMap {
kl.tryReconcileMirrorPods(ctx, staticPod, mirrorPod)
}
}
func (kl *Kubelet) SetPodWatchCondition(podUID types.UID, conditionKey string, condition pleg.WatchCondition) {
kl.pleg.SetPodWatchCondition(podUID, conditionKey, condition)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/kubelet.go |
import {identity} from 'shared-runtime';
function Component({data}) {
let x = 0;
for (const item of data) {
const {current, other} = item;
x += current;
identity(other);
}
return [x];
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [
{
data: [
{current: 2, other: 3},
{current: 4, other: 5},
],
},
],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/repro-dce-circular-reference.js |
#pragma once
#include <c10/macros/Export.h>
#ifndef _WIN32
#include <level_zero/ze_api.h>
#endif
namespace at::xpu {
// NOTE [ USE OF Level zero API ]
//
// XPU ATen does not directly link to Intel level_zero because it
// require libze_loader to be installed. Following the design of PyTorch,
// we want our GPU build to work on CPU
// machines as long as XPU is not initialized.
//
// Normal XPU code in torch uses the sycl runtime libraries which can be
// installed even if the driver is not installed, but sometimes we specifically
// need to use the driver API (e.g., to load JIT compiled code).
// To accomplish this, we lazily link the level_zero_stub which provides a
// struct at::xpu::LevelZero that contains function pointers to all of the apis
// we need.
//
// IT IS AN ERROR TO TRY TO CALL ANY ze* FUNCTION DIRECTLY.
// INSTEAD USE, e.g.
// detail::getXPUHooks().level_zero().zeModuleCreate(...)
// or
// globalContext().getLevelZero().zeModuleCreate(...)
//
// If a function is missing add it to the list in
// ATen/xpu/level_zero_stub/ATenLevelZero.h and edit
// ATen/xpu/detail/LazyLevelZero.cpp accordingly (e.g., via one of the stub
// macros).
#define AT_FORALL_ZE(_) \
_(zeModuleCreate) \
_(zeKernelCreate) \
_(zeKernelGetProperties) \
_(zeMemGetAllocProperties) \
_(zeModuleBuildLogGetString) \
_(zeModuleBuildLogDestroy)
extern "C" typedef struct LevelZero {
// Intel level zero is not defaultly available on Windows.
#ifndef _WIN32
#define CREATE_MEMBER(name) decltype(&name) name;
AT_FORALL_ZE(CREATE_MEMBER)
#undef CREATE_MEMBER
#endif // _WIN32
} LevelZero;
} // namespace at::xpu | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/xpu/level_zero_stub/ATenLevelZero.h |
from datetime import *
import django
from django.conf import settings
if django.VERSION[:2] >= (1, 4) and getattr(settings, 'USE_TZ', False):
from django.utils import timezone
from datetime import datetime as _datetime
class datetime(_datetime):
"""
A custom datetime.datetime class which acts as a compatibility
layer between South and Django 1.4's timezone aware datetime
instances.
It basically adds the default timezone (as configured in Django's
settings) automatically if no tzinfo is given.
"""
def __new__(cls, year, month, day,
hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
dt = _datetime(year, month, day,
hour, minute, second, microsecond,
tzinfo=tzinfo)
if tzinfo is None:
default_timezone = timezone.get_default_timezone()
dt = timezone.make_aware(dt, default_timezone)
return dt | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.