repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileEncodingNotEqual | def assertFileEncodingNotEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is %s encoded' % (fname, encoding)
self.assertNotEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg)) | python | def assertFileEncodingNotEqual(self, filename, encoding, msg=None):
'''Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fencoding = self._get_file_encoding(filename)
fname = self._get_file_name(filename)
standardMsg = '%s is %s encoded' % (fname, encoding)
self.assertNotEqual(fencoding.lower(),
encoding.lower(),
self._formatMessage(msg, standardMsg)) | [
"def",
"assertFileEncodingNotEqual",
"(",
"self",
",",
"filename",
",",
"encoding",
",",
"msg",
"=",
"None",
")",
":",
"fencoding",
"=",
"self",
".",
"_get_file_encoding",
"(",
"filename",
")",
"fname",
"=",
"self",
".",
"_get_file_name",
"(",
"filename",
")... | Fail if ``filename`` is encoded with the given ``encoding``
as determined by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
encoding : str, bytes
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"is",
"encoded",
"with",
"the",
"given",
"encoding",
"as",
"determined",
"by",
"the",
"!",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L815-L840 | train | 209,100 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeEqual | def assertFileSizeEqual(self, filename, size, msg=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertEqual(fsize, size, msg=msg) | python | def assertFileSizeEqual(self, filename, size, msg=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertEqual(fsize, size, msg=msg) | [
"def",
"assertFileSizeEqual",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertEqual",
"(",
"fsize",
",",
"size",
",",
"msg",
"=",
"msg"... | Fail if ``filename`` does not have the given ``size`` as
determined by the '==' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"does",
"not",
"have",
"the",
"given",
"size",
"as",
"determined",
"by",
"the",
"==",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L842-L861 | train | 209,101 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeNotEqual | def assertFileSizeNotEqual(self, filename, size, msg=None):
'''Fail if ``filename`` has the given ``size`` as determined
by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotEqual(fsize, size, msg=msg) | python | def assertFileSizeNotEqual(self, filename, size, msg=None):
'''Fail if ``filename`` has the given ``size`` as determined
by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertNotEqual(fsize, size, msg=msg) | [
"def",
"assertFileSizeNotEqual",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertNotEqual",
"(",
"fsize",
",",
"size",
",",
"msg",
"=",
... | Fail if ``filename`` has the given ``size`` as determined
by the '!=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"has",
"the",
"given",
"size",
"as",
"determined",
"by",
"the",
"!",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L863-L882 | train | 209,102 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeGreater | def assertFileSizeGreater(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreater(fsize, size, msg=msg) | python | def assertFileSizeGreater(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreater(fsize, size, msg=msg) | [
"def",
"assertFileSizeGreater",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertGreater",
"(",
"fsize",
",",
"size",
",",
"msg",
"=",
"... | Fail if ``filename``'s size is not greater than ``size`` as
determined by the '>' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"s",
"size",
"is",
"not",
"greater",
"than",
"size",
"as",
"determined",
"by",
"the",
">",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L938-L957 | train | 209,103 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeGreaterEqual | def assertFileSizeGreaterEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than or equal to
``size`` as determined by the '>=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreaterEqual(fsize, size, msg=msg) | python | def assertFileSizeGreaterEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not greater than or equal to
``size`` as determined by the '>=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertGreaterEqual(fsize, size, msg=msg) | [
"def",
"assertFileSizeGreaterEqual",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertGreaterEqual",
"(",
"fsize",
",",
"size",
",",
"msg",
... | Fail if ``filename``'s size is not greater than or equal to
``size`` as determined by the '>=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"s",
"size",
"is",
"not",
"greater",
"than",
"or",
"equal",
"to",
"size",
"as",
"determined",
"by",
"the",
">",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L959-L978 | train | 209,104 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeLess | def assertFileSizeLess(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLess(fsize, size, msg=msg) | python | def assertFileSizeLess(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLess(fsize, size, msg=msg) | [
"def",
"assertFileSizeLess",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertLess",
"(",
"fsize",
",",
"size",
",",
"msg",
"=",
"msg",
... | Fail if ``filename``'s size is not less than ``size`` as
determined by the '<' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"s",
"size",
"is",
"not",
"less",
"than",
"size",
"as",
"determined",
"by",
"the",
"<",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L980-L999 | train | 209,105 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | FileMixins.assertFileSizeLessEqual | def assertFileSizeLessEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than or equal to
``size`` as determined by the '<=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLessEqual(fsize, size, msg=msg) | python | def assertFileSizeLessEqual(self, filename, size, msg=None):
'''Fail if ``filename``'s size is not less than or equal to
``size`` as determined by the '<=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertLessEqual(fsize, size, msg=msg) | [
"def",
"assertFileSizeLessEqual",
"(",
"self",
",",
"filename",
",",
"size",
",",
"msg",
"=",
"None",
")",
":",
"fsize",
"=",
"self",
".",
"_get_file_size",
"(",
"filename",
")",
"self",
".",
"assertLessEqual",
"(",
"fsize",
",",
"size",
",",
"msg",
"=",... | Fail if ``filename``'s size is not less than or equal to
``size`` as determined by the '<=' operator.
Parameters
----------
filename : str, bytes, file-like
size : int, float
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | [
"Fail",
"if",
"filename",
"s",
"size",
"is",
"not",
"less",
"than",
"or",
"equal",
"to",
"size",
"as",
"determined",
"by",
"the",
"<",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1001-L1020 | train | 209,106 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | CategoricalMixins.assertCategoricalLevelsEqual | def assertCategoricalLevelsEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` do not have the same
domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels != %s levels' % (levels1, levels2)
if not all(level in levels2 for level in levels1):
self.fail(self._formatMessage(msg, standardMsg))
if not all(level in levels1 for level in levels2):
self.fail(self._formatMessage(msg, standardMsg)) | python | def assertCategoricalLevelsEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` do not have the same
domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels != %s levels' % (levels1, levels2)
if not all(level in levels2 for level in levels1):
self.fail(self._formatMessage(msg, standardMsg))
if not all(level in levels1 for level in levels2):
self.fail(self._formatMessage(msg, standardMsg)) | [
"def",
"assertCategoricalLevelsEqual",
"(",
"self",
",",
"levels1",
",",
"levels2",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"levels1",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not ... | Fail if ``levels1`` and ``levels2`` do not have the same
domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable. | [
"Fail",
"if",
"levels1",
"and",
"levels2",
"do",
"not",
"have",
"the",
"same",
"domain",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1129-L1157 | train | 209,107 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | CategoricalMixins.assertCategoricalLevelsNotEqual | def assertCategoricalLevelsNotEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` have the same domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels == %s levels' % (levels1, levels2)
unshared_levels = False
if not all(level in levels2 for level in levels1):
unshared_levels = True
if not all(level in levels1 for level in levels2):
unshared_levels = True
if not unshared_levels:
self.fail(self._formatMessage(msg, standardMsg)) | python | def assertCategoricalLevelsNotEqual(self, levels1, levels2, msg=None):
'''Fail if ``levels1`` and ``levels2`` have the same domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable.
'''
if not isinstance(levels1, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(levels2, collections.Iterable):
raise TypeError('Second argument is not iterable')
standardMsg = '%s levels == %s levels' % (levels1, levels2)
unshared_levels = False
if not all(level in levels2 for level in levels1):
unshared_levels = True
if not all(level in levels1 for level in levels2):
unshared_levels = True
if not unshared_levels:
self.fail(self._formatMessage(msg, standardMsg)) | [
"def",
"assertCategoricalLevelsNotEqual",
"(",
"self",
",",
"levels1",
",",
"levels2",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"levels1",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is n... | Fail if ``levels1`` and ``levels2`` have the same domain.
Parameters
----------
levels1 : iterable
levels2 : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If either ``levels1`` or ``levels2`` is not iterable. | [
"Fail",
"if",
"levels1",
"and",
"levels2",
"have",
"the",
"same",
"domain",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1159-L1190 | train | 209,108 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | CategoricalMixins.assertCategoricalLevelIn | def assertCategoricalLevelIn(self, level, levels, msg=None):
'''Fail if ``level`` is not in ``levels``.
This is equivalent to ``self.assertIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertIn(level, levels, msg=msg) | python | def assertCategoricalLevelIn(self, level, levels, msg=None):
'''Fail if ``level`` is not in ``levels``.
This is equivalent to ``self.assertIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertIn(level, levels, msg=msg) | [
"def",
"assertCategoricalLevelIn",
"(",
"self",
",",
"level",
",",
"levels",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"levels",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'Second argument is not iterabl... | Fail if ``level`` is not in ``levels``.
This is equivalent to ``self.assertIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable. | [
"Fail",
"if",
"level",
"is",
"not",
"in",
"levels",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1192-L1213 | train | 209,109 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | CategoricalMixins.assertCategoricalLevelNotIn | def assertCategoricalLevelNotIn(self, level, levels, msg=None):
'''Fail if ``level`` is in ``levels``.
This is equivalent to ``self.assertNotIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertNotIn(level, levels, msg=msg) | python | def assertCategoricalLevelNotIn(self, level, levels, msg=None):
'''Fail if ``level`` is in ``levels``.
This is equivalent to ``self.assertNotIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable.
'''
if not isinstance(levels, collections.Iterable):
raise TypeError('Second argument is not iterable')
self.assertNotIn(level, levels, msg=msg) | [
"def",
"assertCategoricalLevelNotIn",
"(",
"self",
",",
"level",
",",
"levels",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"levels",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'Second argument is not iter... | Fail if ``level`` is in ``levels``.
This is equivalent to ``self.assertNotIn(level, levels)``.
Parameters
----------
level
levels : iterable
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``levels`` is not iterable. | [
"Fail",
"if",
"level",
"is",
"in",
"levels",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1215-L1236 | train | 209,110 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesBefore | def assertDateTimesBefore(self, sequence, target, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not before
``target``.
If ``target`` is iterable, it must have the same length as
``sequence``
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``target``. If ``strict=False``, fail
unless all elements in ``sequence`` are less than or equal to
``target``.
Parameters
----------
sequence : iterable
target : datetime, date, iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
ValueError
If ``target`` is iterable but does not have the same length
as ``sequence``.
TypeError
If ``target`` is not a datetime or date object and is not
iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = '%s is not strictly less than %s' % (sequence,
target)
op = operator.lt
else:
standardMsg = '%s is not less than %s' % (sequence, target)
op = operator.le
# Null date(time)s will always compare False, but
# we want to know about null date(time)s
if isinstance(target, collections.Iterable):
if len(target) != len(sequence):
raise ValueError(('Length mismatch: '
'first argument contains %s elements, '
'second argument contains %s elements' % (
len(sequence), len(target))))
if not all(op(i, j) for i, j in zip(sequence, target)):
self.fail(self._formatMessage(msg, standardMsg))
elif isinstance(target, (date, datetime)):
if not all(op(element, target) for element in sequence):
self.fail(self._formatMessage(msg, standardMsg))
else:
raise TypeError(
'Second argument is not a datetime or date object or iterable') | python | def assertDateTimesBefore(self, sequence, target, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not before
``target``.
If ``target`` is iterable, it must have the same length as
``sequence``
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``target``. If ``strict=False``, fail
unless all elements in ``sequence`` are less than or equal to
``target``.
Parameters
----------
sequence : iterable
target : datetime, date, iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
ValueError
If ``target`` is iterable but does not have the same length
as ``sequence``.
TypeError
If ``target`` is not a datetime or date object and is not
iterable.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if strict:
standardMsg = '%s is not strictly less than %s' % (sequence,
target)
op = operator.lt
else:
standardMsg = '%s is not less than %s' % (sequence, target)
op = operator.le
# Null date(time)s will always compare False, but
# we want to know about null date(time)s
if isinstance(target, collections.Iterable):
if len(target) != len(sequence):
raise ValueError(('Length mismatch: '
'first argument contains %s elements, '
'second argument contains %s elements' % (
len(sequence), len(target))))
if not all(op(i, j) for i, j in zip(sequence, target)):
self.fail(self._formatMessage(msg, standardMsg))
elif isinstance(target, (date, datetime)):
if not all(op(element, target) for element in sequence):
self.fail(self._formatMessage(msg, standardMsg))
else:
raise TypeError(
'Second argument is not a datetime or date object or iterable') | [
"def",
"assertDateTimesBefore",
"(",
"self",
",",
"sequence",
",",
"target",
",",
"strict",
"=",
"True",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"... | Fail if any elements in ``sequence`` are not before
``target``.
If ``target`` is iterable, it must have the same length as
``sequence``
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``target``. If ``strict=False``, fail
unless all elements in ``sequence`` are less than or equal to
``target``.
Parameters
----------
sequence : iterable
target : datetime, date, iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
ValueError
If ``target`` is iterable but does not have the same length
as ``sequence``.
TypeError
If ``target`` is not a datetime or date object and is not
iterable. | [
"Fail",
"if",
"any",
"elements",
"in",
"sequence",
"are",
"not",
"before",
"target",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1260-L1318 | train | 209,111 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesPast | def assertDateTimesPast(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the past.
If the max element is a datetime, "past" is defined as anything
prior to ``datetime.now()``; if the max element is a date,
"past" is defined as anything prior to ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``date.today()`` (or ``datetime.now()``).
If ``strict=False``, fail unless all elements in ``sequence``
are less than or equal to ``date.today()`` (or
``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesBefore(sequence, target, strict=strict, msg=msg) | python | def assertDateTimesPast(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the past.
If the max element is a datetime, "past" is defined as anything
prior to ``datetime.now()``; if the max element is a date,
"past" is defined as anything prior to ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``date.today()`` (or ``datetime.now()``).
If ``strict=False``, fail unless all elements in ``sequence``
are less than or equal to ``date.today()`` (or
``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesBefore(sequence, target, strict=strict, msg=msg) | [
"def",
"assertDateTimesPast",
"(",
"self",
",",
"sequence",
",",
"strict",
"=",
"True",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argumen... | Fail if any elements in ``sequence`` are not in the past.
If the max element is a datetime, "past" is defined as anything
prior to ``datetime.now()``; if the max element is a date,
"past" is defined as anything prior to ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly less than ``date.today()`` (or ``datetime.now()``).
If ``strict=False``, fail unless all elements in ``sequence``
are less than or equal to ``date.today()`` (or
``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If max element in ``sequence`` is not a datetime or date
object. | [
"Fail",
"if",
"any",
"elements",
"in",
"sequence",
"are",
"not",
"in",
"the",
"past",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1381-L1422 | train | 209,112 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesFuture | def assertDateTimesFuture(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the future.
If the min element is a datetime, "future" is defined as
anything after ``datetime.now()``; if the min element is a date,
"future" is defined as anything after ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly greater than ``date.today()``
(or ``datetime.now()``). If ``strict=False``, fail all
elements in ``sequence`` are greater than or equal to
``date.today()`` (or ``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If min element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(min(sequence), datetime):
target = datetime.today()
elif isinstance(min(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesAfter(sequence, target, strict=strict, msg=msg) | python | def assertDateTimesFuture(self, sequence, strict=True, msg=None):
'''Fail if any elements in ``sequence`` are not in the future.
If the min element is a datetime, "future" is defined as
anything after ``datetime.now()``; if the min element is a date,
"future" is defined as anything after ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly greater than ``date.today()``
(or ``datetime.now()``). If ``strict=False``, fail all
elements in ``sequence`` are greater than or equal to
``date.today()`` (or ``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If min element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(min(sequence), datetime):
target = datetime.today()
elif isinstance(min(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertDateTimesAfter(sequence, target, strict=strict, msg=msg) | [
"def",
"assertDateTimesFuture",
"(",
"self",
",",
"sequence",
",",
"strict",
"=",
"True",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argum... | Fail if any elements in ``sequence`` are not in the future.
If the min element is a datetime, "future" is defined as
anything after ``datetime.now()``; if the min element is a date,
"future" is defined as anything after ``date.today()``.
If ``strict=True``, fail unless all elements in ``sequence``
are strictly greater than ``date.today()``
(or ``datetime.now()``). If ``strict=False``, fail all
elements in ``sequence`` are greater than or equal to
``date.today()`` (or ``datetime.now()``).
Parameters
----------
sequence : iterable
strict : bool
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If min element in ``sequence`` is not a datetime or date
object. | [
"Fail",
"if",
"any",
"elements",
"in",
"sequence",
"are",
"not",
"in",
"the",
"future",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1424-L1465 | train | 209,113 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesFrequencyEqual | def assertDateTimesFrequencyEqual(self, sequence, frequency, msg=None):
'''Fail if any elements in ``sequence`` aren't separated by
the expected ``fequency``.
Parameters
----------
sequence : iterable
frequency : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``frequency`` is not a timedelta object.
'''
# TODO (jsa): check that elements in sequence are dates or
# datetimes, keeping in mind that sequence may contain null
# values
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(frequency, timedelta):
raise TypeError('Second argument is not a timedelta object')
standardMsg = 'unexpected frequencies found in %s' % sequence
s1 = pd.Series(sequence)
s2 = s1.shift(-1)
freq = s2 - s1
if not all(f == frequency for f in freq[:-1]):
self.fail(self._formatMessage(msg, standardMsg)) | python | def assertDateTimesFrequencyEqual(self, sequence, frequency, msg=None):
'''Fail if any elements in ``sequence`` aren't separated by
the expected ``fequency``.
Parameters
----------
sequence : iterable
frequency : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``frequency`` is not a timedelta object.
'''
# TODO (jsa): check that elements in sequence are dates or
# datetimes, keeping in mind that sequence may contain null
# values
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(frequency, timedelta):
raise TypeError('Second argument is not a timedelta object')
standardMsg = 'unexpected frequencies found in %s' % sequence
s1 = pd.Series(sequence)
s2 = s1.shift(-1)
freq = s2 - s1
if not all(f == frequency for f in freq[:-1]):
self.fail(self._formatMessage(msg, standardMsg)) | [
"def",
"assertDateTimesFrequencyEqual",
"(",
"self",
",",
"sequence",
",",
"frequency",
",",
"msg",
"=",
"None",
")",
":",
"# TODO (jsa): check that elements in sequence are dates or",
"# datetimes, keeping in mind that sequence may contain null",
"# values",
"if",
"not",
"isin... | Fail if any elements in ``sequence`` aren't separated by
the expected ``fequency``.
Parameters
----------
sequence : iterable
frequency : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``frequency`` is not a timedelta object. | [
"Fail",
"if",
"any",
"elements",
"in",
"sequence",
"aren",
"t",
"separated",
"by",
"the",
"expected",
"fequency",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1467-L1502 | train | 209,114 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesLagEqual | def assertDateTimesLagEqual(self, sequence, lag, msg=None):
'''Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertEqual(target - max(sequence), lag, msg=msg) | python | def assertDateTimesLagEqual(self, sequence, lag, msg=None):
'''Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertEqual(target - max(sequence), lag, msg=msg) | [
"def",
"assertDateTimesLagEqual",
"(",
"self",
",",
"sequence",
",",
"lag",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not iterabl... | Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object. | [
"Fail",
"unless",
"max",
"element",
"in",
"sequence",
"is",
"separated",
"from",
"the",
"present",
"by",
"lag",
"as",
"determined",
"by",
"the",
"==",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1504-L1547 | train | 209,115 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesLagLess | def assertDateTimesLagLess(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by ``lag`` or more as determined by the '<'
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLess(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLess(target - max(sequence), lag, msg=msg) | python | def assertDateTimesLagLess(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by ``lag`` or more as determined by the '<'
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLess(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLess(target - max(sequence), lag, msg=msg) | [
"def",
"assertDateTimesLagLess",
"(",
"self",
",",
"sequence",
",",
"lag",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not iterable... | Fail if max element in ``sequence`` is separated from
the present by ``lag`` or more as determined by the '<'
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLess(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object. | [
"Fail",
"if",
"max",
"element",
"in",
"sequence",
"is",
"separated",
"from",
"the",
"present",
"by",
"lag",
"or",
"more",
"as",
"determined",
"by",
"the",
"<",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1549-L1593 | train | 209,116 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertDateTimesLagLessEqual | def assertDateTimesLagLessEqual(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by more than ``lag`` as determined by the '<='
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLessEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLessEqual(target - max(sequence), lag, msg=msg) | python | def assertDateTimesLagLessEqual(self, sequence, lag, msg=None):
'''Fail if max element in ``sequence`` is separated from
the present by more than ``lag`` as determined by the '<='
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLessEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object.
'''
if not isinstance(sequence, collections.Iterable):
raise TypeError('First argument is not iterable')
if not isinstance(lag, timedelta):
raise TypeError('Second argument is not a timedelta object')
# Cannot compare datetime to date, so if dates are provided use
# date.today(), if datetimes are provided use datetime.today()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError('Expected iterable of datetime or date objects')
self.assertLessEqual(target - max(sequence), lag, msg=msg) | [
"def",
"assertDateTimesLagLessEqual",
"(",
"self",
",",
"sequence",
",",
"lag",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sequence",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not ite... | Fail if max element in ``sequence`` is separated from
the present by more than ``lag`` as determined by the '<='
operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertLessEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object. | [
"Fail",
"if",
"max",
"element",
"in",
"sequence",
"is",
"separated",
"from",
"the",
"present",
"by",
"more",
"than",
"lag",
"as",
"determined",
"by",
"the",
"<",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1595-L1639 | train | 209,117 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertTimeZoneIsNone | def assertTimeZoneIsNone(self, dt, msg=None):
'''Fail if ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNone(dt.tzinfo, msg=msg) | python | def assertTimeZoneIsNone(self, dt, msg=None):
'''Fail if ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNone(dt.tzinfo, msg=msg) | [
"def",
"assertTimeZoneIsNone",
"(",
"self",
",",
"dt",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not a datetime object'",
")",
"self",
".",
"assertIsNone",... | Fail if ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object. | [
"Fail",
"if",
"dt",
"has",
"a",
"non",
"-",
"null",
"tzinfo",
"attribute",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1641-L1659 | train | 209,118 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertTimeZoneIsNotNone | def assertTimeZoneIsNotNone(self, dt, msg=None):
'''Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNotNone(dt.tzinfo, msg=msg) | python | def assertTimeZoneIsNotNone(self, dt, msg=None):
'''Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNotNone(dt.tzinfo, msg=msg) | [
"def",
"assertTimeZoneIsNotNone",
"(",
"self",
",",
"dt",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not a datetime object'",
")",
"self",
".",
"assertIsNot... | Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object. | [
"Fail",
"unless",
"dt",
"has",
"a",
"non",
"-",
"null",
"tzinfo",
"attribute",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1661-L1679 | train | 209,119 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertTimeZoneEqual | def assertTimeZoneEqual(self, dt, tz, msg=None):
'''Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '==' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertEqual(dt.tzinfo, tz, msg=msg) | python | def assertTimeZoneEqual(self, dt, tz, msg=None):
'''Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '==' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertEqual(dt.tzinfo, tz, msg=msg) | [
"def",
"assertTimeZoneEqual",
"(",
"self",
",",
"dt",
",",
"tz",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not a datetime object'",
")",
"if",
"not",
"... | Fail unless ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '==' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object. | [
"Fail",
"unless",
"dt",
"s",
"tzinfo",
"attribute",
"equals",
"tz",
"as",
"determined",
"by",
"the",
"==",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1681-L1705 | train | 209,120 |
twosigma/marbles | marbles/mixins/marbles/mixins/mixins.py | DateTimeMixins.assertTimeZoneNotEqual | def assertTimeZoneNotEqual(self, dt, tz, msg=None):
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertNotEqual(dt.tzinfo, tz, msg=msg) | python | def assertTimeZoneNotEqual(self, dt, tz, msg=None):
'''Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
if not isinstance(tz, timezone):
raise TypeError('Second argument is not a timezone object')
self.assertNotEqual(dt.tzinfo, tz, msg=msg) | [
"def",
"assertTimeZoneNotEqual",
"(",
"self",
",",
"dt",
",",
"tz",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"'First argument is not a datetime object'",
")",
"if",
"not",
... | Fail if ``dt``'s ``tzinfo`` attribute equals ``tz`` as
determined by the '!=' operator.
Parameters
----------
dt : datetime
tz : timezone
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
TypeError
If ``tz`` is not a timezone object. | [
"Fail",
"if",
"dt",
"s",
"tzinfo",
"attribute",
"equals",
"tz",
"as",
"determined",
"by",
"the",
"!",
"=",
"operator",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L1707-L1731 | train | 209,121 |
twosigma/marbles | marbles/core/marbles/core/log.py | _class_defining_method | def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__) | python | def _class_defining_method(meth): # pragma: no cover
'''Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
'''
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return '{}.{}'.format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split('.<locals>', 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit('.', 1)[0])
if isinstance(cls, type):
return '{}.{}'.format(cls.__module__, cls.__name__) | [
"def",
"_class_defining_method",
"(",
"meth",
")",
":",
"# pragma: no cover",
"if",
"inspect",
".",
"ismethod",
"(",
"meth",
")",
":",
"for",
"cls",
"in",
"inspect",
".",
"getmro",
"(",
"meth",
".",
"__self__",
".",
"__class__",
")",
":",
"if",
"cls",
".... | Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545. | [
"Gets",
"the",
"name",
"of",
"the",
"class",
"that",
"defines",
"meth",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/log.py#L62-L77 | train | 209,122 |
twosigma/marbles | marbles/core/marbles/core/log.py | AssertionLogger.configure | def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose'] | python | def configure(self, **kwargs):
'''Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure.
'''
if 'logfile' in kwargs:
# Note that kwargs['logfile'] might be an open file
# object, not a string. We deal with this in
# _open_if_needed, but refactoring it so that in that case
# it gets set on another attribute would be tricky to
# handle the lazy opening semantics that let us override
# it with MARBLES_LOGFILE, so instead we choose to let
# self._logfilename do double-duty: sometimes it's a name,
# sometimes it's sneakily a file object.
self._logfilename = kwargs['logfile']
if 'attrs' in kwargs:
self._attrs = kwargs['attrs']
if 'verbose_attrs' in kwargs:
self._verbose_attrs = kwargs['verbose_attrs']
if 'verbose' in kwargs:
self._verbose = kwargs['verbose'] | [
"def",
"configure",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'logfile'",
"in",
"kwargs",
":",
"# Note that kwargs['logfile'] might be an open file",
"# object, not a string. We deal with this in",
"# _open_if_needed, but refactoring it so that in that case",
"# it g... | Configure what assertion logging is done.
Settings configured with this method are overridden by
environment variables.
Parameters
----------
logfile : str or bytes or file object
If a string or bytes object, we write to that filename.
If an open file object, we just write to it. If None,
disable logging. If we open the file, we open it in
``'w'`` mode, so any contents will be overwritten.
attrs : list of str
Capture these attributes on the TestCase being run when
logging an assertion. For example, if you are testing
multiple resources, make sure the resource name is a
member of your TestCase, and configure marbles logging
with that name. These are only captured on failure.
verbose_attrs : list of str
Similar to attrs, but these attrs are captured even on
success.
verbose : bool or list of str
Fields (within the set {msg, note, locals}) to capture
even when the test is successful. By default, those three
fields are only captured on failure. | [
"Configure",
"what",
"assertion",
"logging",
"is",
"done",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/log.py#L124-L169 | train | 209,123 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | _find_msg_argument | def _find_msg_argument(signature):
'''Locates the ``msg`` argument in a function signature.
We need to determine where we expect to find ``msg`` if it's passed
positionally, so we can extract it if the user passed it.
Returns
-------
tuple
The index of the ``msg`` param, the default value for it,
and the number of non-``msg`` positional parameters we expect.
'''
names = signature.parameters.keys()
try:
msg_idx = list(names).index('msg')
default_msg = signature.parameters['msg'].default
except ValueError: # 'msg' is not in list
# It's likely that this is a custom assertion that's just
# passing all remaining args and kwargs through
# (e.g. tests.marbles.ReversingTestCaseMixin). Unfortunately,
# we can't inspect its code to find the assert it's wrapping,
# so we just have to assume it's of the standard form with msg
# in the last position with a default of None.
msg_idx = -1
default_msg = None
# We also don't want to steal any actually positional arguments if
# we can help it. Therefore, we leave the default msg if there are
# fewer than this many args passed. We stop counting at a
# parameter named 'msg' or when we hit a varargs or keyword-only
# parameter.
kinds = (inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD)
non_msg_params = itertools.takewhile(
lambda param: param.name != 'msg' and param.kind in kinds,
signature.parameters.values())
non_msg_params = sum(1 for _ in non_msg_params)
return msg_idx, default_msg, non_msg_params | python | def _find_msg_argument(signature):
'''Locates the ``msg`` argument in a function signature.
We need to determine where we expect to find ``msg`` if it's passed
positionally, so we can extract it if the user passed it.
Returns
-------
tuple
The index of the ``msg`` param, the default value for it,
and the number of non-``msg`` positional parameters we expect.
'''
names = signature.parameters.keys()
try:
msg_idx = list(names).index('msg')
default_msg = signature.parameters['msg'].default
except ValueError: # 'msg' is not in list
# It's likely that this is a custom assertion that's just
# passing all remaining args and kwargs through
# (e.g. tests.marbles.ReversingTestCaseMixin). Unfortunately,
# we can't inspect its code to find the assert it's wrapping,
# so we just have to assume it's of the standard form with msg
# in the last position with a default of None.
msg_idx = -1
default_msg = None
# We also don't want to steal any actually positional arguments if
# we can help it. Therefore, we leave the default msg if there are
# fewer than this many args passed. We stop counting at a
# parameter named 'msg' or when we hit a varargs or keyword-only
# parameter.
kinds = (inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD)
non_msg_params = itertools.takewhile(
lambda param: param.name != 'msg' and param.kind in kinds,
signature.parameters.values())
non_msg_params = sum(1 for _ in non_msg_params)
return msg_idx, default_msg, non_msg_params | [
"def",
"_find_msg_argument",
"(",
"signature",
")",
":",
"names",
"=",
"signature",
".",
"parameters",
".",
"keys",
"(",
")",
"try",
":",
"msg_idx",
"=",
"list",
"(",
"names",
")",
".",
"index",
"(",
"'msg'",
")",
"default_msg",
"=",
"signature",
".",
... | Locates the ``msg`` argument in a function signature.
We need to determine where we expect to find ``msg`` if it's passed
positionally, so we can extract it if the user passed it.
Returns
-------
tuple
The index of the ``msg`` param, the default value for it,
and the number of non-``msg`` positional parameters we expect. | [
"Locates",
"the",
"msg",
"argument",
"in",
"a",
"function",
"signature",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L432-L469 | train | 209,124 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | _extract_msg | def _extract_msg(args, kwargs, msg_idx, default_msg, non_msg_params):
'''Extracts the ``msg`` argument from the passed ``args``.
Returns
-------
tuple
The found ``msg``, the args and kwargs with that ``msg``
removed, and any remaining positional args after ``msg``.
'''
rem_args = []
if 'msg' in kwargs:
msg = kwargs.pop('msg')
elif len(args) > non_msg_params and msg_idx < len(args):
msg = args[msg_idx]
if 0 <= msg_idx:
rem_args = args[msg_idx + 1:]
args = args[:msg_idx]
else:
msg = default_msg
return msg, args, rem_args, kwargs | python | def _extract_msg(args, kwargs, msg_idx, default_msg, non_msg_params):
'''Extracts the ``msg`` argument from the passed ``args``.
Returns
-------
tuple
The found ``msg``, the args and kwargs with that ``msg``
removed, and any remaining positional args after ``msg``.
'''
rem_args = []
if 'msg' in kwargs:
msg = kwargs.pop('msg')
elif len(args) > non_msg_params and msg_idx < len(args):
msg = args[msg_idx]
if 0 <= msg_idx:
rem_args = args[msg_idx + 1:]
args = args[:msg_idx]
else:
msg = default_msg
return msg, args, rem_args, kwargs | [
"def",
"_extract_msg",
"(",
"args",
",",
"kwargs",
",",
"msg_idx",
",",
"default_msg",
",",
"non_msg_params",
")",
":",
"rem_args",
"=",
"[",
"]",
"if",
"'msg'",
"in",
"kwargs",
":",
"msg",
"=",
"kwargs",
".",
"pop",
"(",
"'msg'",
")",
"elif",
"len",
... | Extracts the ``msg`` argument from the passed ``args``.
Returns
-------
tuple
The found ``msg``, the args and kwargs with that ``msg``
removed, and any remaining positional args after ``msg``. | [
"Extracts",
"the",
"msg",
"argument",
"from",
"the",
"passed",
"args",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L472-L491 | train | 209,125 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | _NoteWrapper.wrap | def wrap(self, text, **kwargs):
'''Wraps each paragraph in ``text`` individually.
Parameters
----------
text : str
Returns
-------
str
Single string containing the wrapped paragraphs.
'''
pilcrow = re.compile(r'(\n\s*\n)', re.MULTILINE)
list_prefix = re.compile(r'\s*(?:\w|[0-9]+)[\.\)]\s+')
paragraphs = pilcrow.split(text)
wrapped_lines = []
for paragraph in paragraphs:
if paragraph.isspace():
wrapped_lines.append('')
else:
wrapper = textwrap.TextWrapper(**vars(self))
list_item = re.match(list_prefix, paragraph)
if list_item:
wrapper.subsequent_indent += ' ' * len(list_item.group(0))
wrapped_lines.extend(wrapper.wrap(paragraph))
return wrapped_lines | python | def wrap(self, text, **kwargs):
'''Wraps each paragraph in ``text`` individually.
Parameters
----------
text : str
Returns
-------
str
Single string containing the wrapped paragraphs.
'''
pilcrow = re.compile(r'(\n\s*\n)', re.MULTILINE)
list_prefix = re.compile(r'\s*(?:\w|[0-9]+)[\.\)]\s+')
paragraphs = pilcrow.split(text)
wrapped_lines = []
for paragraph in paragraphs:
if paragraph.isspace():
wrapped_lines.append('')
else:
wrapper = textwrap.TextWrapper(**vars(self))
list_item = re.match(list_prefix, paragraph)
if list_item:
wrapper.subsequent_indent += ' ' * len(list_item.group(0))
wrapped_lines.extend(wrapper.wrap(paragraph))
return wrapped_lines | [
"def",
"wrap",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"pilcrow",
"=",
"re",
".",
"compile",
"(",
"r'(\\n\\s*\\n)'",
",",
"re",
".",
"MULTILINE",
")",
"list_prefix",
"=",
"re",
".",
"compile",
"(",
"r'\\s*(?:\\w|[0-9]+)[\\.\\)]\\s+'",
... | Wraps each paragraph in ``text`` individually.
Parameters
----------
text : str
Returns
-------
str
Single string containing the wrapped paragraphs. | [
"Wraps",
"each",
"paragraph",
"in",
"text",
"individually",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L77-L104 | train | 209,126 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | ContextualAssertionError.assert_stmt | def assert_stmt(self):
'''Returns a string displaying the whole statement that failed,
with a '>' indicator on the line starting the expression.
'''
# This will be used by linecache to read the source of this
# module. See the docstring for _find_assert_stmt below which
# explains how.
# We don't have a test for this because automating the
# creation of an egg, installation into an environment,
# running of tests, and verification that marbles found the
# right source and was able to print it is a lot of
# automation. We have tested manually, and marbles works with
# all check installation mechanisms we know of right now
# (setup.py install, setup.py develop, pip install, bdist_egg,
# bdist_wheel).
module_globals = vars(sys.modules[self.module])
line_range, lineno = self._find_assert_stmt(
self.filename, self.linenumber, module_globals=module_globals)
source = [linecache.getline(self.filename, x,
module_globals=module_globals)
for x in line_range]
# Dedent the source, removing the final newline added by dedent
dedented_lines = textwrap.dedent(''.join(source)).split('\n')[:-1]
formatted_lines = []
for i, line in zip(line_range, dedented_lines):
prefix = '>' if i == lineno else ' '
formatted_lines.append(' {0} {1:4d} {2}'.format(prefix, i, line))
return '\n'.join(formatted_lines) | python | def assert_stmt(self):
'''Returns a string displaying the whole statement that failed,
with a '>' indicator on the line starting the expression.
'''
# This will be used by linecache to read the source of this
# module. See the docstring for _find_assert_stmt below which
# explains how.
# We don't have a test for this because automating the
# creation of an egg, installation into an environment,
# running of tests, and verification that marbles found the
# right source and was able to print it is a lot of
# automation. We have tested manually, and marbles works with
# all check installation mechanisms we know of right now
# (setup.py install, setup.py develop, pip install, bdist_egg,
# bdist_wheel).
module_globals = vars(sys.modules[self.module])
line_range, lineno = self._find_assert_stmt(
self.filename, self.linenumber, module_globals=module_globals)
source = [linecache.getline(self.filename, x,
module_globals=module_globals)
for x in line_range]
# Dedent the source, removing the final newline added by dedent
dedented_lines = textwrap.dedent(''.join(source)).split('\n')[:-1]
formatted_lines = []
for i, line in zip(line_range, dedented_lines):
prefix = '>' if i == lineno else ' '
formatted_lines.append(' {0} {1:4d} {2}'.format(prefix, i, line))
return '\n'.join(formatted_lines) | [
"def",
"assert_stmt",
"(",
"self",
")",
":",
"# This will be used by linecache to read the source of this",
"# module. See the docstring for _find_assert_stmt below which",
"# explains how.",
"# We don't have a test for this because automating the",
"# creation of an egg, installation into an en... | Returns a string displaying the whole statement that failed,
with a '>' indicator on the line starting the expression. | [
"Returns",
"a",
"string",
"displaying",
"the",
"whole",
"statement",
"that",
"failed",
"with",
"a",
">",
"indicator",
"on",
"the",
"line",
"starting",
"the",
"expression",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L286-L317 | train | 209,127 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | ContextualAssertionError._find_assert_stmt | def _find_assert_stmt(filename, linenumber, leading=1, following=2,
module_globals=None):
'''Given a Python module name, filename and line number, find
the lines that are part of the statement containing that line.
Python stacktraces, when reporting which line they're on, always
show the last line of the statement. This can be confusing if
the statement spans multiple lines. This function helps
reconstruct the whole statement, and is used by
:meth:`marbles.core.ContextualAssertionError.assert_stmt`.
Returns a tuple of the range of lines spanned by the source
being returned, the number of the line on which the interesting
statement starts.
We may need the ``module_globals`` in order to tell
:mod:`linecache` how to find the file, if it comes from inside
an egg. In that case, ``module_globals`` should contain a key
``__loader__`` which knows how to read from that file.
'''
lines = linecache.getlines(
filename, module_globals=module_globals)
_source = ''.join(lines)
_tree = ast.parse(_source)
finder = _StatementFinder(linenumber)
finder.visit(_tree)
line_range = range(finder.found - leading, linenumber + following)
return line_range, finder.found | python | def _find_assert_stmt(filename, linenumber, leading=1, following=2,
module_globals=None):
'''Given a Python module name, filename and line number, find
the lines that are part of the statement containing that line.
Python stacktraces, when reporting which line they're on, always
show the last line of the statement. This can be confusing if
the statement spans multiple lines. This function helps
reconstruct the whole statement, and is used by
:meth:`marbles.core.ContextualAssertionError.assert_stmt`.
Returns a tuple of the range of lines spanned by the source
being returned, the number of the line on which the interesting
statement starts.
We may need the ``module_globals`` in order to tell
:mod:`linecache` how to find the file, if it comes from inside
an egg. In that case, ``module_globals`` should contain a key
``__loader__`` which knows how to read from that file.
'''
lines = linecache.getlines(
filename, module_globals=module_globals)
_source = ''.join(lines)
_tree = ast.parse(_source)
finder = _StatementFinder(linenumber)
finder.visit(_tree)
line_range = range(finder.found - leading, linenumber + following)
return line_range, finder.found | [
"def",
"_find_assert_stmt",
"(",
"filename",
",",
"linenumber",
",",
"leading",
"=",
"1",
",",
"following",
"=",
"2",
",",
"module_globals",
"=",
"None",
")",
":",
"lines",
"=",
"linecache",
".",
"getlines",
"(",
"filename",
",",
"module_globals",
"=",
"mo... | Given a Python module name, filename and line number, find
the lines that are part of the statement containing that line.
Python stacktraces, when reporting which line they're on, always
show the last line of the statement. This can be confusing if
the statement spans multiple lines. This function helps
reconstruct the whole statement, and is used by
:meth:`marbles.core.ContextualAssertionError.assert_stmt`.
Returns a tuple of the range of lines spanned by the source
being returned, the number of the line on which the interesting
statement starts.
We may need the ``module_globals`` in order to tell
:mod:`linecache` how to find the file, if it comes from inside
an egg. In that case, ``module_globals`` should contain a key
``__loader__`` which knows how to read from that file. | [
"Given",
"a",
"Python",
"module",
"name",
"filename",
"and",
"line",
"number",
"find",
"the",
"lines",
"that",
"are",
"part",
"of",
"the",
"statement",
"containing",
"that",
"line",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L345-L373 | train | 209,128 |
twosigma/marbles | marbles/core/marbles/core/marbles.py | AnnotationContext._validate_annotation | def _validate_annotation(self, annotation):
'''Ensures that the annotation has the right fields.'''
required_keys = set(self._required_keys)
keys = set(key for key, val in annotation.items() if val)
missing_keys = required_keys.difference(keys)
if missing_keys:
error = 'Annotation missing required fields: {0}'.format(
missing_keys)
raise AnnotationError(error) | python | def _validate_annotation(self, annotation):
'''Ensures that the annotation has the right fields.'''
required_keys = set(self._required_keys)
keys = set(key for key, val in annotation.items() if val)
missing_keys = required_keys.difference(keys)
if missing_keys:
error = 'Annotation missing required fields: {0}'.format(
missing_keys)
raise AnnotationError(error) | [
"def",
"_validate_annotation",
"(",
"self",
",",
"annotation",
")",
":",
"required_keys",
"=",
"set",
"(",
"self",
".",
"_required_keys",
")",
"keys",
"=",
"set",
"(",
"key",
"for",
"key",
",",
"val",
"in",
"annotation",
".",
"items",
"(",
")",
"if",
"... | Ensures that the annotation has the right fields. | [
"Ensures",
"that",
"the",
"annotation",
"has",
"the",
"right",
"fields",
"."
] | f0c668be8344c70d4d63bc57e82c6f2da43c6925 | https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L398-L406 | train | 209,129 |
ericjang/tdb | tdb/transitive_closure.py | _tchelper | def _tchelper(tc_deps,evals,deps):
"""
modifies graph in place
"""
for e in evals:
if e in tc_deps: # we've already included it
continue
else:
if e in deps: # has additional dependnecies
tc_deps[e]=deps[e]
# add to tc_deps the dependencies of the dependencies
_tchelper(tc_deps,deps[e],deps)
return tc_deps | python | def _tchelper(tc_deps,evals,deps):
"""
modifies graph in place
"""
for e in evals:
if e in tc_deps: # we've already included it
continue
else:
if e in deps: # has additional dependnecies
tc_deps[e]=deps[e]
# add to tc_deps the dependencies of the dependencies
_tchelper(tc_deps,deps[e],deps)
return tc_deps | [
"def",
"_tchelper",
"(",
"tc_deps",
",",
"evals",
",",
"deps",
")",
":",
"for",
"e",
"in",
"evals",
":",
"if",
"e",
"in",
"tc_deps",
":",
"# we've already included it",
"continue",
"else",
":",
"if",
"e",
"in",
"deps",
":",
"# has additional dependnecies",
... | modifies graph in place | [
"modifies",
"graph",
"in",
"place"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/transitive_closure.py#L2-L14 | train | 209,130 |
ericjang/tdb | tdb/debug_session.py | DebugSession.run | def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c() | python | def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c() | [
"def",
"run",
"(",
"self",
",",
"evals",
",",
"feed_dict",
"=",
"None",
",",
"breakpoints",
"=",
"None",
",",
"break_immediately",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"evals",
",",
"list",
")",
":",
"evals",
"=",
"[",
"evals",
"]"... | starts the debug session | [
"starts",
"the",
"debug",
"session"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L33-L61 | train | 209,131 |
ericjang/tdb | tdb/debug_session.py | DebugSession.s | def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name)) | python | def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name)) | [
"def",
"s",
"(",
"self",
")",
":",
"next_node",
"=",
"self",
".",
"_exe_order",
"[",
"self",
".",
"step",
"]",
"self",
".",
"_eval",
"(",
"next_node",
")",
"self",
".",
"step",
"+=",
"1",
"if",
"self",
".",
"step",
"==",
"len",
"(",
"self",
".",
... | step to the next node in the execution order | [
"step",
"to",
"the",
"next",
"node",
"in",
"the",
"execution",
"order"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L63-L75 | train | 209,132 |
ericjang/tdb | tdb/debug_session.py | DebugSession.get_value | def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None) | python | def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None) | [
"def",
"get_value",
"(",
"self",
",",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"self",
".",
"_cache",
".",
"get",
"(",
"node",
".",
"name",
",",
"None",
")",
"elif",
"isinstance",
"(",
"node",... | retrieve a node value from the cache | [
"retrieve",
"a",
"node",
"value",
"from",
"the",
"cache"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L104-L113 | train | 209,133 |
ericjang/tdb | tdb/debug_session.py | DebugSession._init_evals_bps | def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2]) | python | def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2]) | [
"def",
"_init_evals_bps",
"(",
"self",
",",
"evals",
",",
"breakpoints",
")",
":",
"# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node",
"evals2",
"=",
"[",
"op_store",
".",
"get_op",
"(",
"t",
")",
"if",
"op_stor... | HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values. | [
"HTOps",
"may",
"depend",
"on",
"tf",
".",
"Tensors",
"that",
"are",
"not",
"in",
"eval",
".",
"We",
"need",
"to",
"have",
"all",
"inputs",
"to",
"HTOps",
"ready",
"upon",
"evaluation",
"."
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L125-L151 | train | 209,134 |
ericjang/tdb | tdb/debug_session.py | DebugSession._eval | def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict) | python | def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict) | [
"def",
"_eval",
"(",
"self",
",",
"node",
")",
":",
"# if node.name == 'Momentum':",
"# \tpdb.set_trace()",
"if",
"isinstance",
"(",
"node",
",",
"HTOp",
")",
":",
"# All Tensors MUST be in the cache.",
"feed_dict",
"=",
"dict",
"(",
"(",
"t",
",",
"self",
".",
... | node is a TensorFlow Op or Tensor from self._exe_order | [
"node",
"is",
"a",
"TensorFlow",
"Op",
"or",
"Tensor",
"from",
"self",
".",
"_exe_order"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L158-L179 | train | 209,135 |
ericjang/tdb | tdb/examples/mnist.py | error_rate | def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0]) | python | def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0]) | [
"def",
"error_rate",
"(",
"predictions",
",",
"labels",
")",
":",
"return",
"100.0",
"-",
"(",
"100.0",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"argmax",
"(",
"predictions",
",",
"1",
")",
"==",
"np",
".",
"argmax",
"(",
"labels",
",",
"1",
")",
"... | Return the error rate based on dense predictions and 1-hot labels. | [
"Return",
"the",
"error",
"rate",
"based",
"on",
"dense",
"predictions",
"and",
"1",
"-",
"hot",
"labels",
"."
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/mnist.py#L194-L199 | train | 209,136 |
ericjang/tdb | tdb/op_store.py | get_node | def get_node(name):
"""
returns HTOp or tf graph element corresponding to requested node name
"""
if name in _ops:
return _ops[name]
else:
g=tf.get_default_graph()
return g.as_graph_element(name) | python | def get_node(name):
"""
returns HTOp or tf graph element corresponding to requested node name
"""
if name in _ops:
return _ops[name]
else:
g=tf.get_default_graph()
return g.as_graph_element(name) | [
"def",
"get_node",
"(",
"name",
")",
":",
"if",
"name",
"in",
"_ops",
":",
"return",
"_ops",
"[",
"name",
"]",
"else",
":",
"g",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"return",
"g",
".",
"as_graph_element",
"(",
"name",
")"
] | returns HTOp or tf graph element corresponding to requested node name | [
"returns",
"HTOp",
"or",
"tf",
"graph",
"element",
"corresponding",
"to",
"requested",
"node",
"name"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/op_store.py#L27-L35 | train | 209,137 |
ericjang/tdb | tdb/python_op.py | PythonOp.cache_values | def cache_values(self, results):
"""
loads into DebugSession cache
"""
if results is None:
# self.fn was probably only used to compute side effects.
return
elif isinstance(results,np.ndarray):
# fn returns single np.ndarray.
# re-format it into a list
results=[results]
# check validity of fn output
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError('Number of output tensors does not match number of outputs produced by function')
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs)))
results=[results]
# assign each element in ndarrays to corresponding output tensor
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) | python | def cache_values(self, results):
"""
loads into DebugSession cache
"""
if results is None:
# self.fn was probably only used to compute side effects.
return
elif isinstance(results,np.ndarray):
# fn returns single np.ndarray.
# re-format it into a list
results=[results]
# check validity of fn output
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError('Number of output tensors does not match number of outputs produced by function')
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs)))
results=[results]
# assign each element in ndarrays to corresponding output tensor
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) | [
"def",
"cache_values",
"(",
"self",
",",
"results",
")",
":",
"if",
"results",
"is",
"None",
":",
"# self.fn was probably only used to compute side effects.",
"return",
"elif",
"isinstance",
"(",
"results",
",",
"np",
".",
"ndarray",
")",
":",
"# fn returns single n... | loads into DebugSession cache | [
"loads",
"into",
"DebugSession",
"cache"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/python_op.py#L54-L75 | train | 209,138 |
ericjang/tdb | tdb/interface.py | debug | def debug(evals,feed_dict=None,breakpoints=None,break_immediately=False,session=None):
"""
spawns a new debug session
"""
global _dbsession
_dbsession=debug_session.DebugSession(session)
return _dbsession.run(evals,feed_dict,breakpoints,break_immediately) | python | def debug(evals,feed_dict=None,breakpoints=None,break_immediately=False,session=None):
"""
spawns a new debug session
"""
global _dbsession
_dbsession=debug_session.DebugSession(session)
return _dbsession.run(evals,feed_dict,breakpoints,break_immediately) | [
"def",
"debug",
"(",
"evals",
",",
"feed_dict",
"=",
"None",
",",
"breakpoints",
"=",
"None",
",",
"break_immediately",
"=",
"False",
",",
"session",
"=",
"None",
")",
":",
"global",
"_dbsession",
"_dbsession",
"=",
"debug_session",
".",
"DebugSession",
"(",... | spawns a new debug session | [
"spawns",
"a",
"new",
"debug",
"session"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/interface.py#L11-L17 | train | 209,139 |
ericjang/tdb | tdb/app.py | connect | def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None) | python | def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None) | [
"def",
"connect",
"(",
")",
":",
"if",
"not",
"is_notebook",
"(",
")",
":",
"print",
"(",
"'Python session is not running in a Notebook Kernel'",
")",
"return",
"global",
"_comm",
"kernel",
"=",
"get_ipython",
"(",
")",
".",
"kernel",
"kernel",
".",
"comm_manage... | establish connection to frontend notebook | [
"establish",
"connection",
"to",
"frontend",
"notebook"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L15-L30 | train | 209,140 |
ericjang/tdb | tdb/app.py | send_action | def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data) | python | def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data) | [
"def",
"send_action",
"(",
"action",
",",
"params",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"msg_type\"",
":",
"\"action\"",
",",
"\"action\"",
":",
"action",
"}",
"if",
"params",
"is",
"not",
"None",
":",
"data",
"[",
"'params'",
"]",
"=",
"params"... | helper method for sending actions | [
"helper",
"method",
"for",
"sending",
"actions"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L32-L39 | train | 209,141 |
ericjang/tdb | tdb/app.py | send_fig | def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name}) | python | def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name}) | [
"def",
"send_fig",
"(",
"fig",
",",
"name",
")",
":",
"imgdata",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"fig",
".",
"savefig",
"(",
"imgdata",
",",
"format",
"=",
"'png'",
")",
"imgdata",
".",
"seek",
"(",
"0",
")",
"# rewind the data",
"uri",
"... | sends figure to frontend | [
"sends",
"figure",
"to",
"frontend"
] | 5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L41-L49 | train | 209,142 |
treasure-data/pandas-td | pandas_td/td.py | create_engine | def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True):
'''Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine
'''
url = urlparse(url)
engine_type = url.scheme if url.scheme else 'presto'
if con is None:
if url.netloc:
# create connection
apikey, host = url.netloc.split('@')
con = Connection(apikey=apikey, endpoint="https://{0}/".format(host))
else:
# default connection
con = Connection()
database = url.path[1:] if url.path.startswith('/') else url.path
params = {
'type': engine_type,
}
params.update(parse_qsl(url.query))
return QueryEngine(con, database, params,
header=header,
show_progress=show_progress,
clear_progress=clear_progress) | python | def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True):
'''Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine
'''
url = urlparse(url)
engine_type = url.scheme if url.scheme else 'presto'
if con is None:
if url.netloc:
# create connection
apikey, host = url.netloc.split('@')
con = Connection(apikey=apikey, endpoint="https://{0}/".format(host))
else:
# default connection
con = Connection()
database = url.path[1:] if url.path.startswith('/') else url.path
params = {
'type': engine_type,
}
params.update(parse_qsl(url.query))
return QueryEngine(con, database, params,
header=header,
show_progress=show_progress,
clear_progress=clear_progress) | [
"def",
"create_engine",
"(",
"url",
",",
"con",
"=",
"None",
",",
"header",
"=",
"True",
",",
"show_progress",
"=",
"5.0",
",",
"clear_progress",
"=",
"True",
")",
":",
"url",
"=",
"urlparse",
"(",
"url",
")",
"engine_type",
"=",
"url",
".",
"scheme",
... | Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine | [
"Create",
"a",
"handler",
"for",
"query",
"engine",
"based",
"on",
"a",
"URL",
"."
] | 0f4e1ee726f593dbb9cc74312c286e62afdf7de2 | https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L439-L486 | train | 209,143 |
treasure-data/pandas-td | pandas_td/td.py | read_td_query | def read_td_query(query, engine, index_col=None, parse_dates=None, distributed_join=False, params=None):
'''Read Treasure Data query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query string.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
query : string
Query string to be executed.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
distributed_join : boolean, default False
(Presto only) If True, distributed join is enabled. If False, broadcast join is used.
See https://prestodb.io/docs/current/release/release-0.77.html
params : dict, optional
Parameters to pass to execute method.
Available parameters:
- result_url (str): result output URL
- priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.)
- retry_limit (int): retry limit
Returns
-------
DataFrame
'''
if params is None:
params = {}
# header
header = engine.create_header("read_td_query")
if engine.type == 'presto' and distributed_join is not None:
header += "-- set session distributed_join = '{0}'\n".format('true' if distributed_join else 'false')
# execute
r = engine.execute(header + query, **params)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | python | def read_td_query(query, engine, index_col=None, parse_dates=None, distributed_join=False, params=None):
'''Read Treasure Data query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query string.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
query : string
Query string to be executed.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
distributed_join : boolean, default False
(Presto only) If True, distributed join is enabled. If False, broadcast join is used.
See https://prestodb.io/docs/current/release/release-0.77.html
params : dict, optional
Parameters to pass to execute method.
Available parameters:
- result_url (str): result output URL
- priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.)
- retry_limit (int): retry limit
Returns
-------
DataFrame
'''
if params is None:
params = {}
# header
header = engine.create_header("read_td_query")
if engine.type == 'presto' and distributed_join is not None:
header += "-- set session distributed_join = '{0}'\n".format('true' if distributed_join else 'false')
# execute
r = engine.execute(header + query, **params)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | [
"def",
"read_td_query",
"(",
"query",
",",
"engine",
",",
"index_col",
"=",
"None",
",",
"parse_dates",
"=",
"None",
",",
"distributed_join",
"=",
"False",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}"... | Read Treasure Data query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query string.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
query : string
Query string to be executed.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
distributed_join : boolean, default False
(Presto only) If True, distributed join is enabled. If False, broadcast join is used.
See https://prestodb.io/docs/current/release/release-0.77.html
params : dict, optional
Parameters to pass to execute method.
Available parameters:
- result_url (str): result output URL
- priority (int or str): priority (e.g. "NORMAL", "HIGH", etc.)
- retry_limit (int): retry limit
Returns
-------
DataFrame | [
"Read",
"Treasure",
"Data",
"query",
"into",
"a",
"DataFrame",
"."
] | 0f4e1ee726f593dbb9cc74312c286e62afdf7de2 | https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L488-L530 | train | 209,144 |
treasure-data/pandas-td | pandas_td/td.py | read_td_job | def read_td_job(job_id, engine, index_col=None, parse_dates=None):
'''Read Treasure Data job result into a DataFrame.
Returns a DataFrame corresponding to the result set of the job.
This method waits for job completion if the specified job is still running.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
job_id : integer
Job ID.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
Returns
-------
DataFrame
'''
# get job
job = engine.connection.client.job(job_id)
# result
r = engine.get_result(job, wait=True)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | python | def read_td_job(job_id, engine, index_col=None, parse_dates=None):
'''Read Treasure Data job result into a DataFrame.
Returns a DataFrame corresponding to the result set of the job.
This method waits for job completion if the specified job is still running.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
job_id : integer
Job ID.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
Returns
-------
DataFrame
'''
# get job
job = engine.connection.client.job(job_id)
# result
r = engine.get_result(job, wait=True)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | [
"def",
"read_td_job",
"(",
"job_id",
",",
"engine",
",",
"index_col",
"=",
"None",
",",
"parse_dates",
"=",
"None",
")",
":",
"# get job",
"job",
"=",
"engine",
".",
"connection",
".",
"client",
".",
"job",
"(",
"job_id",
")",
"# result",
"r",
"=",
"en... | Read Treasure Data job result into a DataFrame.
Returns a DataFrame corresponding to the result set of the job.
This method waits for job completion if the specified job is still running.
Optionally provide an index_col parameter to use one of the columns as
the index, otherwise default integer index will be used.
Parameters
----------
job_id : integer
Job ID.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
Returns
-------
DataFrame | [
"Read",
"Treasure",
"Data",
"job",
"result",
"into",
"a",
"DataFrame",
"."
] | 0f4e1ee726f593dbb9cc74312c286e62afdf7de2 | https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L532-L562 | train | 209,145 |
treasure-data/pandas-td | pandas_td/td.py | read_td_table | def read_td_table(table_name, engine, index_col=None, parse_dates=None, columns=None, time_range=None, limit=10000):
'''Read Treasure Data table into a DataFrame.
The number of returned rows is limited by "limit" (default 10,000).
Setting limit=None means all rows. Be careful when you set limit=None
because your table might be very large and the result does not fit into memory.
Parameters
----------
table_name : string
Name of Treasure Data table in database.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
columns : list, optional
List of column names to select from table.
time_range : tuple (start, end), optional
Limit time range to select. "start" and "end" are one of None, integers,
strings or datetime objects. "end" is exclusive, not included in the result.
limit : int, default 10,000
Maximum number of rows to select.
Returns
-------
DataFrame
'''
# header
query = engine.create_header("read_td_table('{0}')".format(table_name))
# SELECT
query += "SELECT {0}\n".format('*' if columns is None else ', '.join(columns))
# FROM
query += "FROM {0}\n".format(table_name)
# WHERE
if time_range is not None:
start, end = time_range
query += "WHERE td_time_range(time, {0}, {1})\n".format(_convert_time(start), _convert_time(end))
# LIMIT
if limit is not None:
query += "LIMIT {0}\n".format(limit)
# execute
r = engine.execute(query)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | python | def read_td_table(table_name, engine, index_col=None, parse_dates=None, columns=None, time_range=None, limit=10000):
'''Read Treasure Data table into a DataFrame.
The number of returned rows is limited by "limit" (default 10,000).
Setting limit=None means all rows. Be careful when you set limit=None
because your table might be very large and the result does not fit into memory.
Parameters
----------
table_name : string
Name of Treasure Data table in database.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
columns : list, optional
List of column names to select from table.
time_range : tuple (start, end), optional
Limit time range to select. "start" and "end" are one of None, integers,
strings or datetime objects. "end" is exclusive, not included in the result.
limit : int, default 10,000
Maximum number of rows to select.
Returns
-------
DataFrame
'''
# header
query = engine.create_header("read_td_table('{0}')".format(table_name))
# SELECT
query += "SELECT {0}\n".format('*' if columns is None else ', '.join(columns))
# FROM
query += "FROM {0}\n".format(table_name)
# WHERE
if time_range is not None:
start, end = time_range
query += "WHERE td_time_range(time, {0}, {1})\n".format(_convert_time(start), _convert_time(end))
# LIMIT
if limit is not None:
query += "LIMIT {0}\n".format(limit)
# execute
r = engine.execute(query)
return r.to_dataframe(index_col=index_col, parse_dates=parse_dates) | [
"def",
"read_td_table",
"(",
"table_name",
",",
"engine",
",",
"index_col",
"=",
"None",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"time_range",
"=",
"None",
",",
"limit",
"=",
"10000",
")",
":",
"# header",
"query",
"=",
"engine... | Read Treasure Data table into a DataFrame.
The number of returned rows is limited by "limit" (default 10,000).
Setting limit=None means all rows. Be careful when you set limit=None
because your table might be very large and the result does not fit into memory.
Parameters
----------
table_name : string
Name of Treasure Data table in database.
engine : QueryEngine
Handler returned by create_engine.
index_col : string, optional
Column name to use as index for the returned DataFrame object.
parse_dates : list or dict, optional
- List of column names to parse as dates
- Dict of {column_name: format string} where format string is strftime
compatible in case of parsing string times or is one of (D, s, ns, ms, us)
in case of parsing integer timestamps
columns : list, optional
List of column names to select from table.
time_range : tuple (start, end), optional
Limit time range to select. "start" and "end" are one of None, integers,
strings or datetime objects. "end" is exclusive, not included in the result.
limit : int, default 10,000
Maximum number of rows to select.
Returns
-------
DataFrame | [
"Read",
"Treasure",
"Data",
"table",
"into",
"a",
"DataFrame",
"."
] | 0f4e1ee726f593dbb9cc74312c286e62afdf7de2 | https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L564-L611 | train | 209,146 |
treasure-data/pandas-td | pandas_td/td.py | to_td | def to_td(frame, name, con, if_exists='fail', time_col=None, time_index=None, index=True, index_label=None, chunksize=10000, date_format=None):
'''Write a DataFrame to a Treasure Data table.
This method converts the dataframe into a series of key-value pairs
and send them using the Treasure Data streaming API. The data is divided
into chunks of rows (default 10,000) and uploaded separately. If upload
failed, the client retries the process for a certain amount of time
(max_cumul_retry_delay; default 600 secs). This method may fail and
raise an exception when retries did not success, in which case the data
may be partially inserted. Use the bulk import utility if you cannot
accept partial inserts.
Parameters
----------
frame : DataFrame
DataFrame to be written.
name : string
Name of table to be written, in the form 'database.table'.
con : Connection
Connection to a Treasure Data account.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
time_col : string, optional
Column name to use as "time" column for the table. Column type must be
integer (unixtime), datetime, or string. If None is given (default),
then the current time is used as time values.
time_index : int, optional
Level of index to use as "time" column for the table. Set 0 for a single index.
This parameter implies index=False.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and index is True,
then the index names are used. A sequence should be given if the DataFrame uses
MultiIndex.
chunksize : int, default 10,000
Number of rows to be inserted in each chunk from the dataframe.
date_format : string, default None
Format string for datetime objects
'''
database, table = name.split('.')
uploader = StreamingUploader(con.client, database, table, show_progress=True, clear_progress=True)
uploader.message('Streaming import into: {0}.{1}'.format(database, table))
# check existence
if if_exists == 'fail':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
uploader.message('creating new table...')
con.client.create_log_table(database, table)
else:
raise RuntimeError('table "%s" already exists' % name)
elif if_exists == 'replace':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
pass
else:
uploader.message('deleting old table...')
con.client.delete_table(database, table)
uploader.message('creating new table...')
con.client.create_log_table(database, table)
elif if_exists == 'append':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
uploader.message('creating new table...')
con.client.create_log_table(database, table)
else:
raise ValueError('invalid value for if_exists: %s' % if_exists)
# "time_index" implies "index=False"
if time_index:
index = None
# convert
frame = frame.copy()
frame = _convert_time_column(frame, time_col, time_index)
frame = _convert_index_column(frame, index, index_label)
frame = _convert_date_format(frame, date_format)
# upload
uploader.upload_frame(frame, chunksize)
uploader.wait_for_import(len(frame)) | python | def to_td(frame, name, con, if_exists='fail', time_col=None, time_index=None, index=True, index_label=None, chunksize=10000, date_format=None):
'''Write a DataFrame to a Treasure Data table.
This method converts the dataframe into a series of key-value pairs
and send them using the Treasure Data streaming API. The data is divided
into chunks of rows (default 10,000) and uploaded separately. If upload
failed, the client retries the process for a certain amount of time
(max_cumul_retry_delay; default 600 secs). This method may fail and
raise an exception when retries did not success, in which case the data
may be partially inserted. Use the bulk import utility if you cannot
accept partial inserts.
Parameters
----------
frame : DataFrame
DataFrame to be written.
name : string
Name of table to be written, in the form 'database.table'.
con : Connection
Connection to a Treasure Data account.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
time_col : string, optional
Column name to use as "time" column for the table. Column type must be
integer (unixtime), datetime, or string. If None is given (default),
then the current time is used as time values.
time_index : int, optional
Level of index to use as "time" column for the table. Set 0 for a single index.
This parameter implies index=False.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and index is True,
then the index names are used. A sequence should be given if the DataFrame uses
MultiIndex.
chunksize : int, default 10,000
Number of rows to be inserted in each chunk from the dataframe.
date_format : string, default None
Format string for datetime objects
'''
database, table = name.split('.')
uploader = StreamingUploader(con.client, database, table, show_progress=True, clear_progress=True)
uploader.message('Streaming import into: {0}.{1}'.format(database, table))
# check existence
if if_exists == 'fail':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
uploader.message('creating new table...')
con.client.create_log_table(database, table)
else:
raise RuntimeError('table "%s" already exists' % name)
elif if_exists == 'replace':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
pass
else:
uploader.message('deleting old table...')
con.client.delete_table(database, table)
uploader.message('creating new table...')
con.client.create_log_table(database, table)
elif if_exists == 'append':
try:
con.client.table(database, table)
except tdclient.api.NotFoundError:
uploader.message('creating new table...')
con.client.create_log_table(database, table)
else:
raise ValueError('invalid value for if_exists: %s' % if_exists)
# "time_index" implies "index=False"
if time_index:
index = None
# convert
frame = frame.copy()
frame = _convert_time_column(frame, time_col, time_index)
frame = _convert_index_column(frame, index, index_label)
frame = _convert_date_format(frame, date_format)
# upload
uploader.upload_frame(frame, chunksize)
uploader.wait_for_import(len(frame)) | [
"def",
"to_td",
"(",
"frame",
",",
"name",
",",
"con",
",",
"if_exists",
"=",
"'fail'",
",",
"time_col",
"=",
"None",
",",
"time_index",
"=",
"None",
",",
"index",
"=",
"True",
",",
"index_label",
"=",
"None",
",",
"chunksize",
"=",
"10000",
",",
"da... | Write a DataFrame to a Treasure Data table.
This method converts the dataframe into a series of key-value pairs
and send them using the Treasure Data streaming API. The data is divided
into chunks of rows (default 10,000) and uploaded separately. If upload
failed, the client retries the process for a certain amount of time
(max_cumul_retry_delay; default 600 secs). This method may fail and
raise an exception when retries did not success, in which case the data
may be partially inserted. Use the bulk import utility if you cannot
accept partial inserts.
Parameters
----------
frame : DataFrame
DataFrame to be written.
name : string
Name of table to be written, in the form 'database.table'.
con : Connection
Connection to a Treasure Data account.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
time_col : string, optional
Column name to use as "time" column for the table. Column type must be
integer (unixtime), datetime, or string. If None is given (default),
then the current time is used as time values.
time_index : int, optional
Level of index to use as "time" column for the table. Set 0 for a single index.
This parameter implies index=False.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and index is True,
then the index names are used. A sequence should be given if the DataFrame uses
MultiIndex.
chunksize : int, default 10,000
Number of rows to be inserted in each chunk from the dataframe.
date_format : string, default None
Format string for datetime objects | [
"Write",
"a",
"DataFrame",
"to",
"a",
"Treasure",
"Data",
"table",
"."
] | 0f4e1ee726f593dbb9cc74312c286e62afdf7de2 | https://github.com/treasure-data/pandas-td/blob/0f4e1ee726f593dbb9cc74312c286e62afdf7de2/pandas_td/td.py#L629-L715 | train | 209,147 |
frictionlessdata/tableschema-py | tableschema/helpers.py | ensure_dir | def ensure_dir(path):
"""Ensure directory exists.
Args:
path(str): dir path
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath) | python | def ensure_dir(path):
"""Ensure directory exists.
Args:
path(str): dir path
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath) | [
"def",
"ensure_dir",
"(",
"path",
")",
":",
"dirpath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"dirpath",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirpath",
")",
":",
"os",
".",
"makedirs",
"(",
"dirpath",
")"
] | Ensure directory exists.
Args:
path(str): dir path | [
"Ensure",
"directory",
"exists",
"."
] | 9c5fa930319e7c5b10351f794091c5f9de5e8684 | https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/helpers.py#L67-L76 | train | 209,148 |
frictionlessdata/tableschema-py | tableschema/helpers.py | normalize_value | def normalize_value(value):
"""Convert value to string and make it lower cased.
"""
cast = str
if six.PY2:
cast = unicode # noqa
return cast(value).lower() | python | def normalize_value(value):
"""Convert value to string and make it lower cased.
"""
cast = str
if six.PY2:
cast = unicode # noqa
return cast(value).lower() | [
"def",
"normalize_value",
"(",
"value",
")",
":",
"cast",
"=",
"str",
"if",
"six",
".",
"PY2",
":",
"cast",
"=",
"unicode",
"# noqa",
"return",
"cast",
"(",
"value",
")",
".",
"lower",
"(",
")"
] | Convert value to string and make it lower cased. | [
"Convert",
"value",
"to",
"string",
"and",
"make",
"it",
"lower",
"cased",
"."
] | 9c5fa930319e7c5b10351f794091c5f9de5e8684 | https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/helpers.py#L79-L85 | train | 209,149 |
frictionlessdata/tableschema-py | tableschema/cli.py | infer | def infer(data, row_limit, confidence, encoding, to_file):
"""Infer a schema from data.
* data must be a local filepath
* data must be CSV
* the file encoding is assumed to be UTF-8 unless an encoding is passed
with --encoding
* the first line of data must be headers
* these constraints are just for the CLI
"""
descriptor = tableschema.infer(data,
encoding=encoding,
limit=row_limit,
confidence=confidence)
if to_file:
with io.open(to_file, mode='w+t', encoding='utf-8') as dest:
dest.write(json.dumps(descriptor, ensure_ascii=False, indent=4))
click.echo(descriptor) | python | def infer(data, row_limit, confidence, encoding, to_file):
"""Infer a schema from data.
* data must be a local filepath
* data must be CSV
* the file encoding is assumed to be UTF-8 unless an encoding is passed
with --encoding
* the first line of data must be headers
* these constraints are just for the CLI
"""
descriptor = tableschema.infer(data,
encoding=encoding,
limit=row_limit,
confidence=confidence)
if to_file:
with io.open(to_file, mode='w+t', encoding='utf-8') as dest:
dest.write(json.dumps(descriptor, ensure_ascii=False, indent=4))
click.echo(descriptor) | [
"def",
"infer",
"(",
"data",
",",
"row_limit",
",",
"confidence",
",",
"encoding",
",",
"to_file",
")",
":",
"descriptor",
"=",
"tableschema",
".",
"infer",
"(",
"data",
",",
"encoding",
"=",
"encoding",
",",
"limit",
"=",
"row_limit",
",",
"confidence",
... | Infer a schema from data.
* data must be a local filepath
* data must be CSV
* the file encoding is assumed to be UTF-8 unless an encoding is passed
with --encoding
* the first line of data must be headers
* these constraints are just for the CLI | [
"Infer",
"a",
"schema",
"from",
"data",
"."
] | 9c5fa930319e7c5b10351f794091c5f9de5e8684 | https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/cli.py#L36-L53 | train | 209,150 |
frictionlessdata/tableschema-py | tableschema/cli.py | validate | def validate(schema):
"""Validate that a supposed schema is in fact a Table Schema."""
try:
tableschema.validate(schema)
click.echo("Schema is valid")
sys.exit(0)
except tableschema.exceptions.ValidationError as exception:
click.echo("Schema is not valid")
click.echo(exception.errors)
sys.exit(1) | python | def validate(schema):
"""Validate that a supposed schema is in fact a Table Schema."""
try:
tableschema.validate(schema)
click.echo("Schema is valid")
sys.exit(0)
except tableschema.exceptions.ValidationError as exception:
click.echo("Schema is not valid")
click.echo(exception.errors)
sys.exit(1) | [
"def",
"validate",
"(",
"schema",
")",
":",
"try",
":",
"tableschema",
".",
"validate",
"(",
"schema",
")",
"click",
".",
"echo",
"(",
"\"Schema is valid\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"tableschema",
".",
"exceptions",
".",
"Valida... | Validate that a supposed schema is in fact a Table Schema. | [
"Validate",
"that",
"a",
"supposed",
"schema",
"is",
"in",
"fact",
"a",
"Table",
"Schema",
"."
] | 9c5fa930319e7c5b10351f794091c5f9de5e8684 | https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/cli.py#L58-L67 | train | 209,151 |
log2timeline/dfwinreg | dfwinreg/registry_searcher.py | FindSpec._CheckKeyPath | def _CheckKeyPath(self, registry_key, search_depth):
"""Checks the key path find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
bool: True if the Windows Registry key matches the find specification,
False if not.
"""
if self._key_path_segments is None:
return False
if search_depth < 0 or search_depth > self._number_of_key_path_segments:
return False
# Note that the root has no entry in the key path segments and
# no name to match.
if search_depth == 0:
segment_name = ''
else:
segment_name = self._key_path_segments[search_depth - 1]
if self._is_regex:
if isinstance(segment_name, py2to3.STRING_TYPES):
# Allow '\n' to be matched by '.' and make '\w', '\W', '\b', '\B',
# '\d', '\D', '\s' and '\S' Unicode safe.
flags = re.DOTALL | re.IGNORECASE | re.UNICODE
try:
segment_name = r'^{0:s}$'.format(segment_name)
segment_name = re.compile(segment_name, flags=flags)
except sre_constants.error:
# TODO: set self._key_path_segments[search_depth - 1] to None ?
return False
self._key_path_segments[search_depth - 1] = segment_name
else:
segment_name = segment_name.lower()
self._key_path_segments[search_depth - 1] = segment_name
if search_depth > 0:
if self._is_regex:
# pylint: disable=no-member
if not segment_name.match(registry_key.name):
return False
elif segment_name != registry_key.name.lower():
return False
return True | python | def _CheckKeyPath(self, registry_key, search_depth):
"""Checks the key path find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
bool: True if the Windows Registry key matches the find specification,
False if not.
"""
if self._key_path_segments is None:
return False
if search_depth < 0 or search_depth > self._number_of_key_path_segments:
return False
# Note that the root has no entry in the key path segments and
# no name to match.
if search_depth == 0:
segment_name = ''
else:
segment_name = self._key_path_segments[search_depth - 1]
if self._is_regex:
if isinstance(segment_name, py2to3.STRING_TYPES):
# Allow '\n' to be matched by '.' and make '\w', '\W', '\b', '\B',
# '\d', '\D', '\s' and '\S' Unicode safe.
flags = re.DOTALL | re.IGNORECASE | re.UNICODE
try:
segment_name = r'^{0:s}$'.format(segment_name)
segment_name = re.compile(segment_name, flags=flags)
except sre_constants.error:
# TODO: set self._key_path_segments[search_depth - 1] to None ?
return False
self._key_path_segments[search_depth - 1] = segment_name
else:
segment_name = segment_name.lower()
self._key_path_segments[search_depth - 1] = segment_name
if search_depth > 0:
if self._is_regex:
# pylint: disable=no-member
if not segment_name.match(registry_key.name):
return False
elif segment_name != registry_key.name.lower():
return False
return True | [
"def",
"_CheckKeyPath",
"(",
"self",
",",
"registry_key",
",",
"search_depth",
")",
":",
"if",
"self",
".",
"_key_path_segments",
"is",
"None",
":",
"return",
"False",
"if",
"search_depth",
"<",
"0",
"or",
"search_depth",
">",
"self",
".",
"_number_of_key_path... | Checks the key path find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
bool: True if the Windows Registry key matches the find specification,
False if not. | [
"Checks",
"the",
"key",
"path",
"find",
"specification",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry_searcher.py#L108-L160 | train | 209,152 |
log2timeline/dfwinreg | dfwinreg/registry_searcher.py | FindSpec.Matches | def Matches(self, registry_key, search_depth):
"""Determines if the Windows Registry key matches the find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
tuple: contains:
bool: True if the Windows Registry key matches the find specification,
False otherwise.
bool: True if the key path matches, False if not or None if no key path
specified.
"""
if self._key_path_segments is None:
key_path_match = None
else:
key_path_match = self._CheckKeyPath(registry_key, search_depth)
if not key_path_match:
return False, key_path_match
if search_depth != self._number_of_key_path_segments:
return False, key_path_match
return True, key_path_match | python | def Matches(self, registry_key, search_depth):
"""Determines if the Windows Registry key matches the find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
tuple: contains:
bool: True if the Windows Registry key matches the find specification,
False otherwise.
bool: True if the key path matches, False if not or None if no key path
specified.
"""
if self._key_path_segments is None:
key_path_match = None
else:
key_path_match = self._CheckKeyPath(registry_key, search_depth)
if not key_path_match:
return False, key_path_match
if search_depth != self._number_of_key_path_segments:
return False, key_path_match
return True, key_path_match | [
"def",
"Matches",
"(",
"self",
",",
"registry_key",
",",
"search_depth",
")",
":",
"if",
"self",
".",
"_key_path_segments",
"is",
"None",
":",
"key_path_match",
"=",
"None",
"else",
":",
"key_path_match",
"=",
"self",
".",
"_CheckKeyPath",
"(",
"registry_key",... | Determines if the Windows Registry key matches the find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
tuple: contains:
bool: True if the Windows Registry key matches the find specification,
False otherwise.
bool: True if the key path matches, False if not or None if no key path
specified. | [
"Determines",
"if",
"the",
"Windows",
"Registry",
"key",
"matches",
"the",
"find",
"specification",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry_searcher.py#L177-L202 | train | 209,153 |
log2timeline/dfwinreg | dfwinreg/registry_searcher.py | WinRegistrySearcher._FindInKey | def _FindInKey(self, registry_key, find_specs, search_depth):
"""Searches for matching keys within the Windows Registry key.
Args:
registry_key (WinRegistryKey): Windows Registry key.
find_specs (list[FindSpec]): find specifications.
search_depth (int): number of key path segments to compare.
Yields:
str: key path of a matching Windows Registry key.
"""
sub_find_specs = []
for find_spec in find_specs:
match, key_path_match = find_spec.Matches(registry_key, search_depth)
if match:
yield registry_key.path
# pylint: disable=singleton-comparison
if key_path_match != False and not find_spec.AtMaximumDepth(search_depth):
sub_find_specs.append(find_spec)
if sub_find_specs:
search_depth += 1
for sub_registry_key in registry_key.GetSubkeys():
for matching_path in self._FindInKey(
sub_registry_key, sub_find_specs, search_depth):
yield matching_path | python | def _FindInKey(self, registry_key, find_specs, search_depth):
"""Searches for matching keys within the Windows Registry key.
Args:
registry_key (WinRegistryKey): Windows Registry key.
find_specs (list[FindSpec]): find specifications.
search_depth (int): number of key path segments to compare.
Yields:
str: key path of a matching Windows Registry key.
"""
sub_find_specs = []
for find_spec in find_specs:
match, key_path_match = find_spec.Matches(registry_key, search_depth)
if match:
yield registry_key.path
# pylint: disable=singleton-comparison
if key_path_match != False and not find_spec.AtMaximumDepth(search_depth):
sub_find_specs.append(find_spec)
if sub_find_specs:
search_depth += 1
for sub_registry_key in registry_key.GetSubkeys():
for matching_path in self._FindInKey(
sub_registry_key, sub_find_specs, search_depth):
yield matching_path | [
"def",
"_FindInKey",
"(",
"self",
",",
"registry_key",
",",
"find_specs",
",",
"search_depth",
")",
":",
"sub_find_specs",
"=",
"[",
"]",
"for",
"find_spec",
"in",
"find_specs",
":",
"match",
",",
"key_path_match",
"=",
"find_spec",
".",
"Matches",
"(",
"reg... | Searches for matching keys within the Windows Registry key.
Args:
registry_key (WinRegistryKey): Windows Registry key.
find_specs (list[FindSpec]): find specifications.
search_depth (int): number of key path segments to compare.
Yields:
str: key path of a matching Windows Registry key. | [
"Searches",
"for",
"matching",
"keys",
"within",
"the",
"Windows",
"Registry",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry_searcher.py#L223-L249 | train | 209,154 |
log2timeline/dfwinreg | dfwinreg/registry_searcher.py | WinRegistrySearcher.Find | def Find(self, find_specs=None):
"""Searches for matching keys within the Windows Registry.
Args:
find_specs (list[FindSpec]): find specifications. where None
will return all allocated Windows Registry keys.
Yields:
str: key path of a matching Windows Registry key.
"""
if not find_specs:
find_specs = [FindSpec()]
registry_key = self._win_registry.GetRootKey()
for matching_path in self._FindInKey(registry_key, find_specs, 0):
yield matching_path | python | def Find(self, find_specs=None):
"""Searches for matching keys within the Windows Registry.
Args:
find_specs (list[FindSpec]): find specifications. where None
will return all allocated Windows Registry keys.
Yields:
str: key path of a matching Windows Registry key.
"""
if not find_specs:
find_specs = [FindSpec()]
registry_key = self._win_registry.GetRootKey()
for matching_path in self._FindInKey(registry_key, find_specs, 0):
yield matching_path | [
"def",
"Find",
"(",
"self",
",",
"find_specs",
"=",
"None",
")",
":",
"if",
"not",
"find_specs",
":",
"find_specs",
"=",
"[",
"FindSpec",
"(",
")",
"]",
"registry_key",
"=",
"self",
".",
"_win_registry",
".",
"GetRootKey",
"(",
")",
"for",
"matching_path... | Searches for matching keys within the Windows Registry.
Args:
find_specs (list[FindSpec]): find specifications. where None
will return all allocated Windows Registry keys.
Yields:
str: key path of a matching Windows Registry key. | [
"Searches",
"for",
"matching",
"keys",
"within",
"the",
"Windows",
"Registry",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry_searcher.py#L251-L266 | train | 209,155 |
log2timeline/dfwinreg | dfwinreg/interface.py | WinRegistryFile.RecurseKeys | def RecurseKeys(self):
"""Recurses the Windows Registry keys starting with the root key.
Yields:
WinRegistryKey: Windows Registry key.
"""
root_key = self.GetRootKey()
if root_key:
for registry_key in root_key.RecurseKeys():
yield registry_key | python | def RecurseKeys(self):
"""Recurses the Windows Registry keys starting with the root key.
Yields:
WinRegistryKey: Windows Registry key.
"""
root_key = self.GetRootKey()
if root_key:
for registry_key in root_key.RecurseKeys():
yield registry_key | [
"def",
"RecurseKeys",
"(",
"self",
")",
":",
"root_key",
"=",
"self",
".",
"GetRootKey",
"(",
")",
"if",
"root_key",
":",
"for",
"registry_key",
"in",
"root_key",
".",
"RecurseKeys",
"(",
")",
":",
"yield",
"registry_key"
] | Recurses the Windows Registry keys starting with the root key.
Yields:
WinRegistryKey: Windows Registry key. | [
"Recurses",
"the",
"Windows",
"Registry",
"keys",
"starting",
"with",
"the",
"root",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/interface.py#L66-L75 | train | 209,156 |
log2timeline/dfwinreg | dfwinreg/interface.py | WinRegistryFile.SetKeyPathPrefix | def SetKeyPathPrefix(self, key_path_prefix):
"""Sets the Window Registry key path prefix.
Args:
key_path_prefix (str): Windows Registry key path prefix.
"""
self._key_path_prefix = key_path_prefix
self._key_path_prefix_length = len(key_path_prefix)
self._key_path_prefix_upper = key_path_prefix.upper() | python | def SetKeyPathPrefix(self, key_path_prefix):
"""Sets the Window Registry key path prefix.
Args:
key_path_prefix (str): Windows Registry key path prefix.
"""
self._key_path_prefix = key_path_prefix
self._key_path_prefix_length = len(key_path_prefix)
self._key_path_prefix_upper = key_path_prefix.upper() | [
"def",
"SetKeyPathPrefix",
"(",
"self",
",",
"key_path_prefix",
")",
":",
"self",
".",
"_key_path_prefix",
"=",
"key_path_prefix",
"self",
".",
"_key_path_prefix_length",
"=",
"len",
"(",
"key_path_prefix",
")",
"self",
".",
"_key_path_prefix_upper",
"=",
"key_path_... | Sets the Window Registry key path prefix.
Args:
key_path_prefix (str): Windows Registry key path prefix. | [
"Sets",
"the",
"Window",
"Registry",
"key",
"path",
"prefix",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/interface.py#L77-L85 | train | 209,157 |
log2timeline/dfwinreg | dfwinreg/interface.py | WinRegistryKey.RecurseKeys | def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key | python | def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key | [
"def",
"RecurseKeys",
"(",
"self",
")",
":",
"yield",
"self",
"for",
"subkey",
"in",
"self",
".",
"GetSubkeys",
"(",
")",
":",
"for",
"key",
"in",
"subkey",
".",
"RecurseKeys",
"(",
")",
":",
"yield",
"key"
] | Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key. | [
"Recurses",
"the",
"subkeys",
"starting",
"with",
"the",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/interface.py#L219-L228 | train | 209,158 |
log2timeline/dfwinreg | dfwinreg/interface.py | WinRegistryValue.DataIsInteger | def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) | python | def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) | [
"def",
"DataIsInteger",
"(",
"self",
")",
":",
"return",
"self",
".",
"data_type",
"in",
"(",
"definitions",
".",
"REG_DWORD",
",",
"definitions",
".",
"REG_DWORD_BIG_ENDIAN",
",",
"definitions",
".",
"REG_QWORD",
")"
] | Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise. | [
"Determines",
"based",
"on",
"the",
"data",
"type",
"if",
"the",
"data",
"is",
"an",
"integer",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/interface.py#L291-L302 | train | 209,159 |
log2timeline/dfwinreg | dfwinreg/fake.py | FakeWinRegistryFile.AddKeyByPath | def AddKeyByPath(self, key_path, registry_key):
"""Adds a Windows Registry key for a specific key path.
Args:
key_path (str): Windows Registry key path to add the key.
registry_key (WinRegistryKey): Windows Registry key.
Raises:
KeyError: if the subkey already exists.
ValueError: if the Windows Registry key cannot be added.
"""
if not key_path.startswith(definitions.KEY_PATH_SEPARATOR):
raise ValueError('Key path does not start with: {0:s}'.format(
definitions.KEY_PATH_SEPARATOR))
if not self._root_key:
self._root_key = FakeWinRegistryKey(self._key_path_prefix)
path_segments = key_paths.SplitKeyPath(key_path)
parent_key = self._root_key
for path_segment in path_segments:
try:
subkey = FakeWinRegistryKey(path_segment)
parent_key.AddSubkey(subkey)
except KeyError:
subkey = parent_key.GetSubkeyByName(path_segment)
parent_key = subkey
parent_key.AddSubkey(registry_key) | python | def AddKeyByPath(self, key_path, registry_key):
"""Adds a Windows Registry key for a specific key path.
Args:
key_path (str): Windows Registry key path to add the key.
registry_key (WinRegistryKey): Windows Registry key.
Raises:
KeyError: if the subkey already exists.
ValueError: if the Windows Registry key cannot be added.
"""
if not key_path.startswith(definitions.KEY_PATH_SEPARATOR):
raise ValueError('Key path does not start with: {0:s}'.format(
definitions.KEY_PATH_SEPARATOR))
if not self._root_key:
self._root_key = FakeWinRegistryKey(self._key_path_prefix)
path_segments = key_paths.SplitKeyPath(key_path)
parent_key = self._root_key
for path_segment in path_segments:
try:
subkey = FakeWinRegistryKey(path_segment)
parent_key.AddSubkey(subkey)
except KeyError:
subkey = parent_key.GetSubkeyByName(path_segment)
parent_key = subkey
parent_key.AddSubkey(registry_key) | [
"def",
"AddKeyByPath",
"(",
"self",
",",
"key_path",
",",
"registry_key",
")",
":",
"if",
"not",
"key_path",
".",
"startswith",
"(",
"definitions",
".",
"KEY_PATH_SEPARATOR",
")",
":",
"raise",
"ValueError",
"(",
"'Key path does not start with: {0:s}'",
".",
"form... | Adds a Windows Registry key for a specific key path.
Args:
key_path (str): Windows Registry key path to add the key.
registry_key (WinRegistryKey): Windows Registry key.
Raises:
KeyError: if the subkey already exists.
ValueError: if the Windows Registry key cannot be added. | [
"Adds",
"a",
"Windows",
"Registry",
"key",
"for",
"a",
"specific",
"key",
"path",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/fake.py#L34-L63 | train | 209,160 |
log2timeline/dfwinreg | dfwinreg/fake.py | FakeWinRegistryKey._BuildKeyHierarchy | def _BuildKeyHierarchy(self, subkeys, values):
"""Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values.
"""
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
# pylint: disable=protected-access
registry_key._key_path = key_paths.JoinKeyPath([
self._key_path, registry_key.name])
if values:
for registry_value in values:
name = registry_value.name.upper()
if name in self._values:
continue
self._values[name] = registry_value | python | def _BuildKeyHierarchy(self, subkeys, values):
"""Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values.
"""
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
# pylint: disable=protected-access
registry_key._key_path = key_paths.JoinKeyPath([
self._key_path, registry_key.name])
if values:
for registry_value in values:
name = registry_value.name.upper()
if name in self._values:
continue
self._values[name] = registry_value | [
"def",
"_BuildKeyHierarchy",
"(",
"self",
",",
"subkeys",
",",
"values",
")",
":",
"if",
"subkeys",
":",
"for",
"registry_key",
"in",
"subkeys",
":",
"name",
"=",
"registry_key",
".",
"name",
".",
"upper",
"(",
")",
"if",
"name",
"in",
"self",
".",
"_s... | Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values. | [
"Builds",
"the",
"Windows",
"Registry",
"key",
"hierarchy",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/fake.py#L183-L206 | train | 209,161 |
log2timeline/dfwinreg | dfwinreg/fake.py | FakeWinRegistryKey.AddValue | def AddValue(self, registry_value):
"""Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists.
"""
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value | python | def AddValue(self, registry_value):
"""Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists.
"""
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value | [
"def",
"AddValue",
"(",
"self",
",",
"registry_value",
")",
":",
"name",
"=",
"registry_value",
".",
"name",
".",
"upper",
"(",
")",
"if",
"name",
"in",
"self",
".",
"_values",
":",
"raise",
"KeyError",
"(",
"'Value: {0:s} already exists.'",
".",
"format",
... | Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists. | [
"Adds",
"a",
"value",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/fake.py#L227-L241 | train | 209,162 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._GetCachedFileByPath | def _GetCachedFileByPath(self, key_path_upper):
"""Retrieves a cached Windows Registry file for a key path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consist:
str: key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
longest_key_path_prefix_upper = ''
longest_key_path_prefix_length = len(longest_key_path_prefix_upper)
for key_path_prefix_upper in self._registry_files:
if key_path_upper.startswith(key_path_prefix_upper):
key_path_prefix_length = len(key_path_prefix_upper)
if key_path_prefix_length > longest_key_path_prefix_length:
longest_key_path_prefix_upper = key_path_prefix_upper
longest_key_path_prefix_length = key_path_prefix_length
if not longest_key_path_prefix_upper:
return None, None
registry_file = self._registry_files.get(
longest_key_path_prefix_upper, None)
return longest_key_path_prefix_upper, registry_file | python | def _GetCachedFileByPath(self, key_path_upper):
"""Retrieves a cached Windows Registry file for a key path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consist:
str: key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
longest_key_path_prefix_upper = ''
longest_key_path_prefix_length = len(longest_key_path_prefix_upper)
for key_path_prefix_upper in self._registry_files:
if key_path_upper.startswith(key_path_prefix_upper):
key_path_prefix_length = len(key_path_prefix_upper)
if key_path_prefix_length > longest_key_path_prefix_length:
longest_key_path_prefix_upper = key_path_prefix_upper
longest_key_path_prefix_length = key_path_prefix_length
if not longest_key_path_prefix_upper:
return None, None
registry_file = self._registry_files.get(
longest_key_path_prefix_upper, None)
return longest_key_path_prefix_upper, registry_file | [
"def",
"_GetCachedFileByPath",
"(",
"self",
",",
"key_path_upper",
")",
":",
"longest_key_path_prefix_upper",
"=",
"''",
"longest_key_path_prefix_length",
"=",
"len",
"(",
"longest_key_path_prefix_upper",
")",
"for",
"key_path_prefix_upper",
"in",
"self",
".",
"_registry_... | Retrieves a cached Windows Registry file for a key path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consist:
str: key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available. | [
"Retrieves",
"a",
"cached",
"Windows",
"Registry",
"file",
"for",
"a",
"key",
"path",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L139-L167 | train | 209,163 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._GetCurrentControlSet | def _GetCurrentControlSet(self, key_path_suffix):
"""Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available.
"""
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select'
select_key = self.GetKeyByPath(select_key_path)
if not select_key:
return None
# To determine the current control set check:
# 1. The "Current" value.
# 2. The "Default" value.
# 3. The "LastKnownGood" value.
control_set = None
for value_name in ('Current', 'Default', 'LastKnownGood'):
value = select_key.GetValueByName(value_name)
if not value or not value.DataIsInteger():
continue
control_set = value.GetDataAsObject()
# If the control set is 0 then we need to check the other values.
if control_set > 0 or control_set <= 999:
break
if not control_set or control_set <= 0 or control_set > 999:
return None
control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format(
control_set)
key_path = ''.join([control_set_path, key_path_suffix])
return self.GetKeyByPath(key_path) | python | def _GetCurrentControlSet(self, key_path_suffix):
"""Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available.
"""
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select'
select_key = self.GetKeyByPath(select_key_path)
if not select_key:
return None
# To determine the current control set check:
# 1. The "Current" value.
# 2. The "Default" value.
# 3. The "LastKnownGood" value.
control_set = None
for value_name in ('Current', 'Default', 'LastKnownGood'):
value = select_key.GetValueByName(value_name)
if not value or not value.DataIsInteger():
continue
control_set = value.GetDataAsObject()
# If the control set is 0 then we need to check the other values.
if control_set > 0 or control_set <= 999:
break
if not control_set or control_set <= 0 or control_set > 999:
return None
control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format(
control_set)
key_path = ''.join([control_set_path, key_path_suffix])
return self.GetKeyByPath(key_path) | [
"def",
"_GetCurrentControlSet",
"(",
"self",
",",
"key_path_suffix",
")",
":",
"select_key_path",
"=",
"'HKEY_LOCAL_MACHINE\\\\System\\\\Select'",
"select_key",
"=",
"self",
".",
"GetKeyByPath",
"(",
"select_key_path",
")",
"if",
"not",
"select_key",
":",
"return",
"N... | Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available. | [
"Virtual",
"key",
"callback",
"to",
"determine",
"the",
"current",
"control",
"set",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L181-L219 | train | 209,164 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._GetUsers | def _GetUsers(self, key_path_suffix):
"""Virtual key callback to determine the users sub keys.
Args:
key_path_suffix (str): users Windows Registry key path suffix with
leading path separator.
Returns:
WinRegistryKey: the users Windows Registry key or None if not available.
"""
user_key_name, _, key_path_suffix = key_path_suffix.partition(
definitions.KEY_PATH_SEPARATOR)
# HKEY_USERS\.DEFAULT is an alias for HKEY_USERS\S-1-5-18 which is
# the Local System account.
if user_key_name == '.DEFAULT':
search_key_name = 'S-1-5-18'
else:
search_key_name = user_key_name
user_profile_list_key = self.GetKeyByPath(self._USER_PROFILE_LIST_KEY_PATH)
if not user_profile_list_key:
return None
for user_profile_key in user_profile_list_key.GetSubkeys():
if search_key_name == user_profile_key.name:
profile_path_value = user_profile_key.GetValueByName('ProfileImagePath')
if not profile_path_value:
break
profile_path = profile_path_value.GetDataAsObject()
if not profile_path:
break
key_name_upper = user_profile_key.name.upper()
if key_name_upper.endswith('_CLASSES'):
profile_path = '\\'.join([
profile_path, 'AppData', 'Local', 'Microsoft', 'Windows',
'UsrClass.dat'])
else:
profile_path = '\\'.join([profile_path, 'NTUSER.DAT'])
profile_path_upper = profile_path.upper()
registry_file = self._GetCachedUserFileByPath(profile_path_upper)
if not registry_file:
break
key_path_prefix = definitions.KEY_PATH_SEPARATOR.join([
'HKEY_USERS', user_key_name])
key_path = ''.join([key_path_prefix, key_path_suffix])
registry_file.SetKeyPathPrefix(key_path_prefix)
return registry_file.GetKeyByPath(key_path)
return None | python | def _GetUsers(self, key_path_suffix):
"""Virtual key callback to determine the users sub keys.
Args:
key_path_suffix (str): users Windows Registry key path suffix with
leading path separator.
Returns:
WinRegistryKey: the users Windows Registry key or None if not available.
"""
user_key_name, _, key_path_suffix = key_path_suffix.partition(
definitions.KEY_PATH_SEPARATOR)
# HKEY_USERS\.DEFAULT is an alias for HKEY_USERS\S-1-5-18 which is
# the Local System account.
if user_key_name == '.DEFAULT':
search_key_name = 'S-1-5-18'
else:
search_key_name = user_key_name
user_profile_list_key = self.GetKeyByPath(self._USER_PROFILE_LIST_KEY_PATH)
if not user_profile_list_key:
return None
for user_profile_key in user_profile_list_key.GetSubkeys():
if search_key_name == user_profile_key.name:
profile_path_value = user_profile_key.GetValueByName('ProfileImagePath')
if not profile_path_value:
break
profile_path = profile_path_value.GetDataAsObject()
if not profile_path:
break
key_name_upper = user_profile_key.name.upper()
if key_name_upper.endswith('_CLASSES'):
profile_path = '\\'.join([
profile_path, 'AppData', 'Local', 'Microsoft', 'Windows',
'UsrClass.dat'])
else:
profile_path = '\\'.join([profile_path, 'NTUSER.DAT'])
profile_path_upper = profile_path.upper()
registry_file = self._GetCachedUserFileByPath(profile_path_upper)
if not registry_file:
break
key_path_prefix = definitions.KEY_PATH_SEPARATOR.join([
'HKEY_USERS', user_key_name])
key_path = ''.join([key_path_prefix, key_path_suffix])
registry_file.SetKeyPathPrefix(key_path_prefix)
return registry_file.GetKeyByPath(key_path)
return None | [
"def",
"_GetUsers",
"(",
"self",
",",
"key_path_suffix",
")",
":",
"user_key_name",
",",
"_",
",",
"key_path_suffix",
"=",
"key_path_suffix",
".",
"partition",
"(",
"definitions",
".",
"KEY_PATH_SEPARATOR",
")",
"# HKEY_USERS\\.DEFAULT is an alias for HKEY_USERS\\S-1-5-18... | Virtual key callback to determine the users sub keys.
Args:
key_path_suffix (str): users Windows Registry key path suffix with
leading path separator.
Returns:
WinRegistryKey: the users Windows Registry key or None if not available. | [
"Virtual",
"key",
"callback",
"to",
"determine",
"the",
"users",
"sub",
"keys",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L221-L275 | train | 209,165 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._GetFileByPath | def _GetFileByPath(self, key_path_upper):
"""Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
# TODO: handle HKEY_USERS in both 9X and NT.
key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper)
if not registry_file:
for mapping in self._GetFileMappingsByPath(key_path_upper):
try:
registry_file = self._OpenFile(mapping.windows_path)
except IOError:
registry_file = None
if not registry_file:
continue
if not key_path_prefix:
key_path_prefix = mapping.key_path_prefix
self.MapFile(key_path_prefix, registry_file)
key_path_prefix = key_path_prefix.upper()
break
return key_path_prefix, registry_file | python | def _GetFileByPath(self, key_path_upper):
"""Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
# TODO: handle HKEY_USERS in both 9X and NT.
key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper)
if not registry_file:
for mapping in self._GetFileMappingsByPath(key_path_upper):
try:
registry_file = self._OpenFile(mapping.windows_path)
except IOError:
registry_file = None
if not registry_file:
continue
if not key_path_prefix:
key_path_prefix = mapping.key_path_prefix
self.MapFile(key_path_prefix, registry_file)
key_path_prefix = key_path_prefix.upper()
break
return key_path_prefix, registry_file | [
"def",
"_GetFileByPath",
"(",
"self",
",",
"key_path_upper",
")",
":",
"# TODO: handle HKEY_USERS in both 9X and NT.",
"key_path_prefix",
",",
"registry_file",
"=",
"self",
".",
"_GetCachedFileByPath",
"(",
"key_path_upper",
")",
"if",
"not",
"registry_file",
":",
"for"... | Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available. | [
"Retrieves",
"a",
"Windows",
"Registry",
"file",
"for",
"a",
"specific",
"path",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L277-L311 | train | 209,166 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._GetFileMappingsByPath | def _GetFileMappingsByPath(self, key_path_upper):
"""Retrieves the Windows Registry file mappings for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Yields:
WinRegistryFileMapping: Windows Registry file mapping.
"""
candidate_mappings = []
for mapping in self._REGISTRY_FILE_MAPPINGS_NT:
if key_path_upper.startswith(mapping.key_path_prefix.upper()):
candidate_mappings.append(mapping)
# Sort the candidate mappings by longest (most specific) match first.
candidate_mappings.sort(
key=lambda mapping: len(mapping.key_path_prefix), reverse=True)
for mapping in candidate_mappings:
yield mapping | python | def _GetFileMappingsByPath(self, key_path_upper):
"""Retrieves the Windows Registry file mappings for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Yields:
WinRegistryFileMapping: Windows Registry file mapping.
"""
candidate_mappings = []
for mapping in self._REGISTRY_FILE_MAPPINGS_NT:
if key_path_upper.startswith(mapping.key_path_prefix.upper()):
candidate_mappings.append(mapping)
# Sort the candidate mappings by longest (most specific) match first.
candidate_mappings.sort(
key=lambda mapping: len(mapping.key_path_prefix), reverse=True)
for mapping in candidate_mappings:
yield mapping | [
"def",
"_GetFileMappingsByPath",
"(",
"self",
",",
"key_path_upper",
")",
":",
"candidate_mappings",
"=",
"[",
"]",
"for",
"mapping",
"in",
"self",
".",
"_REGISTRY_FILE_MAPPINGS_NT",
":",
"if",
"key_path_upper",
".",
"startswith",
"(",
"mapping",
".",
"key_path_pr... | Retrieves the Windows Registry file mappings for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Yields:
WinRegistryFileMapping: Windows Registry file mapping. | [
"Retrieves",
"the",
"Windows",
"Registry",
"file",
"mappings",
"for",
"a",
"specific",
"path",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L313-L332 | train | 209,167 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry._OpenFile | def _OpenFile(self, path):
"""Opens a Windows Registry file.
Args:
path (str): path of the Windows Registry file.
Returns:
WinRegistryFile: Windows Registry file or None if not available.
"""
if not self._registry_file_reader:
return None
return self._registry_file_reader.Open(
path, ascii_codepage=self._ascii_codepage) | python | def _OpenFile(self, path):
"""Opens a Windows Registry file.
Args:
path (str): path of the Windows Registry file.
Returns:
WinRegistryFile: Windows Registry file or None if not available.
"""
if not self._registry_file_reader:
return None
return self._registry_file_reader.Open(
path, ascii_codepage=self._ascii_codepage) | [
"def",
"_OpenFile",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"self",
".",
"_registry_file_reader",
":",
"return",
"None",
"return",
"self",
".",
"_registry_file_reader",
".",
"Open",
"(",
"path",
",",
"ascii_codepage",
"=",
"self",
".",
"_ascii_codep... | Opens a Windows Registry file.
Args:
path (str): path of the Windows Registry file.
Returns:
WinRegistryFile: Windows Registry file or None if not available. | [
"Opens",
"a",
"Windows",
"Registry",
"file",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L334-L347 | train | 209,168 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry.GetRegistryFileMapping | def GetRegistryFileMapping(self, registry_file):
"""Determines the Registry file mapping based on the content of the file.
Args:
registry_file (WinRegistyFile): Windows Registry file.
Returns:
str: key path prefix or an empty string.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved.
"""
if not registry_file:
return ''
candidate_mappings = []
for mapping in self._REGISTRY_FILE_MAPPINGS_NT:
if not mapping.unique_key_paths:
continue
# If all unique key paths are found consider the file to match.
match = True
for key_path in mapping.unique_key_paths:
registry_key = registry_file.GetKeyByPath(key_path)
if not registry_key:
match = False
if match:
candidate_mappings.append(mapping)
if not candidate_mappings:
return ''
if len(candidate_mappings) == 1:
return candidate_mappings[0].key_path_prefix
key_path_prefixes = frozenset([
mapping.key_path_prefix for mapping in candidate_mappings])
expected_key_path_prefixes = frozenset([
'HKEY_CURRENT_USER',
'HKEY_CURRENT_USER\\Software\\Classes'])
if key_path_prefixes == expected_key_path_prefixes:
return 'HKEY_CURRENT_USER'
raise RuntimeError('Unable to resolve Windows Registry file mapping.') | python | def GetRegistryFileMapping(self, registry_file):
"""Determines the Registry file mapping based on the content of the file.
Args:
registry_file (WinRegistyFile): Windows Registry file.
Returns:
str: key path prefix or an empty string.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved.
"""
if not registry_file:
return ''
candidate_mappings = []
for mapping in self._REGISTRY_FILE_MAPPINGS_NT:
if not mapping.unique_key_paths:
continue
# If all unique key paths are found consider the file to match.
match = True
for key_path in mapping.unique_key_paths:
registry_key = registry_file.GetKeyByPath(key_path)
if not registry_key:
match = False
if match:
candidate_mappings.append(mapping)
if not candidate_mappings:
return ''
if len(candidate_mappings) == 1:
return candidate_mappings[0].key_path_prefix
key_path_prefixes = frozenset([
mapping.key_path_prefix for mapping in candidate_mappings])
expected_key_path_prefixes = frozenset([
'HKEY_CURRENT_USER',
'HKEY_CURRENT_USER\\Software\\Classes'])
if key_path_prefixes == expected_key_path_prefixes:
return 'HKEY_CURRENT_USER'
raise RuntimeError('Unable to resolve Windows Registry file mapping.') | [
"def",
"GetRegistryFileMapping",
"(",
"self",
",",
"registry_file",
")",
":",
"if",
"not",
"registry_file",
":",
"return",
"''",
"candidate_mappings",
"=",
"[",
"]",
"for",
"mapping",
"in",
"self",
".",
"_REGISTRY_FILE_MAPPINGS_NT",
":",
"if",
"not",
"mapping",
... | Determines the Registry file mapping based on the content of the file.
Args:
registry_file (WinRegistyFile): Windows Registry file.
Returns:
str: key path prefix or an empty string.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved. | [
"Determines",
"the",
"Registry",
"file",
"mapping",
"based",
"on",
"the",
"content",
"of",
"the",
"file",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L398-L445 | train | 209,169 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry.GetRootKey | def GetRootKey(self):
"""Retrieves the Windows Registry root key.
Returns:
WinRegistryKey: Windows Registry root key.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved.
"""
root_registry_key = virtual.VirtualWinRegistryKey('')
for mapped_key in self._MAPPED_KEYS:
key_path_segments = key_paths.SplitKeyPath(mapped_key)
if not key_path_segments:
continue
registry_key = root_registry_key
for name in key_path_segments[:-1]:
sub_registry_key = registry_key.GetSubkeyByName(name)
if not sub_registry_key:
sub_registry_key = virtual.VirtualWinRegistryKey(name)
registry_key.AddSubkey(sub_registry_key)
registry_key = sub_registry_key
sub_registry_key = registry_key.GetSubkeyByName(key_path_segments[-1])
if (not sub_registry_key and
isinstance(registry_key, virtual.VirtualWinRegistryKey)):
sub_registry_key = virtual.VirtualWinRegistryKey(
key_path_segments[-1], registry=self)
registry_key.AddSubkey(sub_registry_key)
return root_registry_key | python | def GetRootKey(self):
"""Retrieves the Windows Registry root key.
Returns:
WinRegistryKey: Windows Registry root key.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved.
"""
root_registry_key = virtual.VirtualWinRegistryKey('')
for mapped_key in self._MAPPED_KEYS:
key_path_segments = key_paths.SplitKeyPath(mapped_key)
if not key_path_segments:
continue
registry_key = root_registry_key
for name in key_path_segments[:-1]:
sub_registry_key = registry_key.GetSubkeyByName(name)
if not sub_registry_key:
sub_registry_key = virtual.VirtualWinRegistryKey(name)
registry_key.AddSubkey(sub_registry_key)
registry_key = sub_registry_key
sub_registry_key = registry_key.GetSubkeyByName(key_path_segments[-1])
if (not sub_registry_key and
isinstance(registry_key, virtual.VirtualWinRegistryKey)):
sub_registry_key = virtual.VirtualWinRegistryKey(
key_path_segments[-1], registry=self)
registry_key.AddSubkey(sub_registry_key)
return root_registry_key | [
"def",
"GetRootKey",
"(",
"self",
")",
":",
"root_registry_key",
"=",
"virtual",
".",
"VirtualWinRegistryKey",
"(",
"''",
")",
"for",
"mapped_key",
"in",
"self",
".",
"_MAPPED_KEYS",
":",
"key_path_segments",
"=",
"key_paths",
".",
"SplitKeyPath",
"(",
"mapped_k... | Retrieves the Windows Registry root key.
Returns:
WinRegistryKey: Windows Registry root key.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved. | [
"Retrieves",
"the",
"Windows",
"Registry",
"root",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L447-L481 | train | 209,170 |
log2timeline/dfwinreg | dfwinreg/registry.py | WinRegistry.MapFile | def MapFile(self, key_path_prefix, registry_file):
"""Maps the Windows Registry file to a specific key path prefix.
Args:
key_path_prefix (str): key path prefix.
registry_file (WinRegistryFile): Windows Registry file.
"""
self._registry_files[key_path_prefix.upper()] = registry_file
registry_file.SetKeyPathPrefix(key_path_prefix) | python | def MapFile(self, key_path_prefix, registry_file):
"""Maps the Windows Registry file to a specific key path prefix.
Args:
key_path_prefix (str): key path prefix.
registry_file (WinRegistryFile): Windows Registry file.
"""
self._registry_files[key_path_prefix.upper()] = registry_file
registry_file.SetKeyPathPrefix(key_path_prefix) | [
"def",
"MapFile",
"(",
"self",
",",
"key_path_prefix",
",",
"registry_file",
")",
":",
"self",
".",
"_registry_files",
"[",
"key_path_prefix",
".",
"upper",
"(",
")",
"]",
"=",
"registry_file",
"registry_file",
".",
"SetKeyPathPrefix",
"(",
"key_path_prefix",
")... | Maps the Windows Registry file to a specific key path prefix.
Args:
key_path_prefix (str): key path prefix.
registry_file (WinRegistryFile): Windows Registry file. | [
"Maps",
"the",
"Windows",
"Registry",
"file",
"to",
"a",
"specific",
"key",
"path",
"prefix",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/registry.py#L483-L491 | train | 209,171 |
log2timeline/dfwinreg | dfwinreg/regf.py | REGFWinRegistryFile.GetRootKey | def GetRootKey(self):
"""Retrieves the root key.
Returns:
WinRegistryKey: Windows Registry root key or None if not available.
"""
regf_key = self._regf_file.get_root_key()
if not regf_key:
return None
return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix) | python | def GetRootKey(self):
"""Retrieves the root key.
Returns:
WinRegistryKey: Windows Registry root key or None if not available.
"""
regf_key = self._regf_file.get_root_key()
if not regf_key:
return None
return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix) | [
"def",
"GetRootKey",
"(",
"self",
")",
":",
"regf_key",
"=",
"self",
".",
"_regf_file",
".",
"get_root_key",
"(",
")",
"if",
"not",
"regf_key",
":",
"return",
"None",
"return",
"REGFWinRegistryKey",
"(",
"regf_key",
",",
"key_path",
"=",
"self",
".",
"_key... | Retrieves the root key.
Returns:
WinRegistryKey: Windows Registry root key or None if not available. | [
"Retrieves",
"the",
"root",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/regf.py#L65-L75 | train | 209,172 |
log2timeline/dfwinreg | dfwinreg/regf.py | REGFWinRegistryFile.Open | def Open(self, file_object):
"""Opens the Windows Registry file using a file-like object.
Args:
file_object (file): file-like object.
Returns:
bool: True if successful or False if not.
"""
self._file_object = file_object
self._regf_file.open_file_object(self._file_object)
return True | python | def Open(self, file_object):
"""Opens the Windows Registry file using a file-like object.
Args:
file_object (file): file-like object.
Returns:
bool: True if successful or False if not.
"""
self._file_object = file_object
self._regf_file.open_file_object(self._file_object)
return True | [
"def",
"Open",
"(",
"self",
",",
"file_object",
")",
":",
"self",
".",
"_file_object",
"=",
"file_object",
"self",
".",
"_regf_file",
".",
"open_file_object",
"(",
"self",
".",
"_file_object",
")",
"return",
"True"
] | Opens the Windows Registry file using a file-like object.
Args:
file_object (file): file-like object.
Returns:
bool: True if successful or False if not. | [
"Opens",
"the",
"Windows",
"Registry",
"file",
"using",
"a",
"file",
"-",
"like",
"object",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/regf.py#L77-L88 | train | 209,173 |
log2timeline/dfwinreg | dfwinreg/key_paths.py | SplitKeyPath | def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):
"""Splits the key path into path segments.
Args:
key_path (str): key path.
path_separator (Optional[str]): path separator.
Returns:
list[str]: key path segments without the root path segment, which is an
empty string.
"""
# Split the path with the path separator and remove empty path segments.
return list(filter(None, key_path.split(path_separator))) | python | def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):
"""Splits the key path into path segments.
Args:
key_path (str): key path.
path_separator (Optional[str]): path separator.
Returns:
list[str]: key path segments without the root path segment, which is an
empty string.
"""
# Split the path with the path separator and remove empty path segments.
return list(filter(None, key_path.split(path_separator))) | [
"def",
"SplitKeyPath",
"(",
"key_path",
",",
"path_separator",
"=",
"definitions",
".",
"KEY_PATH_SEPARATOR",
")",
":",
"# Split the path with the path separator and remove empty path segments.",
"return",
"list",
"(",
"filter",
"(",
"None",
",",
"key_path",
".",
"split",... | Splits the key path into path segments.
Args:
key_path (str): key path.
path_separator (Optional[str]): path separator.
Returns:
list[str]: key path segments without the root path segment, which is an
empty string. | [
"Splits",
"the",
"key",
"path",
"into",
"path",
"segments",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/key_paths.py#L39-L51 | train | 209,174 |
log2timeline/dfwinreg | dfwinreg/virtual.py | VirtualWinRegistryKey._GetKeyFromRegistry | def _GetKeyFromRegistry(self):
"""Determines the key from the Windows Registry."""
if not self._registry:
return
try:
self._registry_key = self._registry.GetKeyByPath(self._key_path)
except RuntimeError:
pass
if not self._registry_key:
return
for sub_registry_key in self._registry_key.GetSubkeys():
self.AddSubkey(sub_registry_key)
if self._key_path == 'HKEY_LOCAL_MACHINE\\System':
sub_registry_key = VirtualWinRegistryKey(
'CurrentControlSet', registry=self._registry)
self.AddSubkey(sub_registry_key)
self._registry = None | python | def _GetKeyFromRegistry(self):
"""Determines the key from the Windows Registry."""
if not self._registry:
return
try:
self._registry_key = self._registry.GetKeyByPath(self._key_path)
except RuntimeError:
pass
if not self._registry_key:
return
for sub_registry_key in self._registry_key.GetSubkeys():
self.AddSubkey(sub_registry_key)
if self._key_path == 'HKEY_LOCAL_MACHINE\\System':
sub_registry_key = VirtualWinRegistryKey(
'CurrentControlSet', registry=self._registry)
self.AddSubkey(sub_registry_key)
self._registry = None | [
"def",
"_GetKeyFromRegistry",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_registry",
":",
"return",
"try",
":",
"self",
".",
"_registry_key",
"=",
"self",
".",
"_registry",
".",
"GetKeyByPath",
"(",
"self",
".",
"_key_path",
")",
"except",
"RuntimeE... | Determines the key from the Windows Registry. | [
"Determines",
"the",
"key",
"from",
"the",
"Windows",
"Registry",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/virtual.py#L94-L115 | train | 209,175 |
log2timeline/dfwinreg | dfwinreg/virtual.py | VirtualWinRegistryKey.GetValues | def GetValues(self):
"""Retrieves all values within the key.
Returns:
generator[WinRegistryValue]: Windows Registry value generator.
"""
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
if self._registry_key:
return self._registry_key.GetValues()
return iter([]) | python | def GetValues(self):
"""Retrieves all values within the key.
Returns:
generator[WinRegistryValue]: Windows Registry value generator.
"""
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
if self._registry_key:
return self._registry_key.GetValues()
return iter([]) | [
"def",
"GetValues",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_registry_key",
"and",
"self",
".",
"_registry",
":",
"self",
".",
"_GetKeyFromRegistry",
"(",
")",
"if",
"self",
".",
"_registry_key",
":",
"return",
"self",
".",
"_registry_key",
".",
... | Retrieves all values within the key.
Returns:
generator[WinRegistryValue]: Windows Registry value generator. | [
"Retrieves",
"all",
"values",
"within",
"the",
"key",
"."
] | 9d488bb1db562197dbfb48de9613d6b29dea056e | https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/virtual.py#L246-L258 | train | 209,176 |
binarydud/pyres | pyres/job.py | Job.perform | def perform(self):
"""This method converts payload into args and calls the ``perform``
method on the payload class.
Before calling ``perform``, a ``before_perform`` class method
is called, if it exists. It takes a dictionary as an argument;
currently the only things stored on the dictionary are the
args passed into ``perform`` and a timestamp of when the job
was enqueued.
Similarly, an ``after_perform`` class method is called after
``perform`` is finished. The metadata dictionary contains the
same data, plus a timestamp of when the job was performed, a
``failed`` boolean value, and if it did fail, a ``retried``
boolean value. This method is called after retry, and is
called regardless of whether an exception is ultimately thrown
by the perform method.
"""
payload_class_str = self._payload["class"]
payload_class = self.safe_str_to_class(payload_class_str)
payload_class.resq = self.resq
args = self._payload.get("args")
metadata = dict(args=args)
if self.enqueue_timestamp:
metadata["enqueue_timestamp"] = self.enqueue_timestamp
before_perform = getattr(payload_class, "before_perform", None)
metadata["failed"] = False
metadata["perform_timestamp"] = time.time()
check_after = True
try:
if before_perform:
payload_class.before_perform(metadata)
return payload_class.perform(*args)
except Exception as e:
metadata["failed"] = True
metadata["exception"] = e
if not self.retry(payload_class, args):
metadata["retried"] = False
raise
else:
metadata["retried"] = True
logging.exception("Retry scheduled after error in %s", self._payload)
finally:
after_perform = getattr(payload_class, "after_perform", None)
if after_perform:
payload_class.after_perform(metadata)
delattr(payload_class,'resq') | python | def perform(self):
"""This method converts payload into args and calls the ``perform``
method on the payload class.
Before calling ``perform``, a ``before_perform`` class method
is called, if it exists. It takes a dictionary as an argument;
currently the only things stored on the dictionary are the
args passed into ``perform`` and a timestamp of when the job
was enqueued.
Similarly, an ``after_perform`` class method is called after
``perform`` is finished. The metadata dictionary contains the
same data, plus a timestamp of when the job was performed, a
``failed`` boolean value, and if it did fail, a ``retried``
boolean value. This method is called after retry, and is
called regardless of whether an exception is ultimately thrown
by the perform method.
"""
payload_class_str = self._payload["class"]
payload_class = self.safe_str_to_class(payload_class_str)
payload_class.resq = self.resq
args = self._payload.get("args")
metadata = dict(args=args)
if self.enqueue_timestamp:
metadata["enqueue_timestamp"] = self.enqueue_timestamp
before_perform = getattr(payload_class, "before_perform", None)
metadata["failed"] = False
metadata["perform_timestamp"] = time.time()
check_after = True
try:
if before_perform:
payload_class.before_perform(metadata)
return payload_class.perform(*args)
except Exception as e:
metadata["failed"] = True
metadata["exception"] = e
if not self.retry(payload_class, args):
metadata["retried"] = False
raise
else:
metadata["retried"] = True
logging.exception("Retry scheduled after error in %s", self._payload)
finally:
after_perform = getattr(payload_class, "after_perform", None)
if after_perform:
payload_class.after_perform(metadata)
delattr(payload_class,'resq') | [
"def",
"perform",
"(",
"self",
")",
":",
"payload_class_str",
"=",
"self",
".",
"_payload",
"[",
"\"class\"",
"]",
"payload_class",
"=",
"self",
".",
"safe_str_to_class",
"(",
"payload_class_str",
")",
"payload_class",
".",
"resq",
"=",
"self",
".",
"resq",
... | This method converts payload into args and calls the ``perform``
method on the payload class.
Before calling ``perform``, a ``before_perform`` class method
is called, if it exists. It takes a dictionary as an argument;
currently the only things stored on the dictionary are the
args passed into ``perform`` and a timestamp of when the job
was enqueued.
Similarly, an ``after_perform`` class method is called after
``perform`` is finished. The metadata dictionary contains the
same data, plus a timestamp of when the job was performed, a
``failed`` boolean value, and if it did fail, a ``retried``
boolean value. This method is called after retry, and is
called regardless of whether an exception is ultimately thrown
by the perform method. | [
"This",
"method",
"converts",
"payload",
"into",
"args",
"and",
"calls",
"the",
"perform",
"method",
"on",
"the",
"payload",
"class",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/job.py#L46-L99 | train | 209,177 |
binarydud/pyres | pyres/job.py | Job.fail | def fail(self, exception):
"""This method provides a way to fail a job and will use whatever
failure backend you've provided. The default is the ``RedisBackend``.
"""
fail = failure.create(exception, self._queue, self._payload,
self._worker)
fail.save(self.resq)
return fail | python | def fail(self, exception):
"""This method provides a way to fail a job and will use whatever
failure backend you've provided. The default is the ``RedisBackend``.
"""
fail = failure.create(exception, self._queue, self._payload,
self._worker)
fail.save(self.resq)
return fail | [
"def",
"fail",
"(",
"self",
",",
"exception",
")",
":",
"fail",
"=",
"failure",
".",
"create",
"(",
"exception",
",",
"self",
".",
"_queue",
",",
"self",
".",
"_payload",
",",
"self",
".",
"_worker",
")",
"fail",
".",
"save",
"(",
"self",
".",
"res... | This method provides a way to fail a job and will use whatever
failure backend you've provided. The default is the ``RedisBackend``. | [
"This",
"method",
"provides",
"a",
"way",
"to",
"fail",
"a",
"job",
"and",
"will",
"use",
"whatever",
"failure",
"backend",
"you",
"ve",
"provided",
".",
"The",
"default",
"is",
"the",
"RedisBackend",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/job.py#L101-L109 | train | 209,178 |
binarydud/pyres | pyres/job.py | Job.retry | def retry(self, payload_class, args):
"""This method provides a way to retry a job after a failure.
If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class.
"""
retry_every = getattr(payload_class, 'retry_every', None)
retry_timeout = getattr(payload_class, 'retry_timeout', 0)
if retry_every:
now = ResQ._current_time()
first_attempt = self._payload.get("first_attempt", now)
retry_until = first_attempt + timedelta(seconds=retry_timeout)
retry_at = now + timedelta(seconds=retry_every)
if retry_at < retry_until:
self.resq.enqueue_at(retry_at, payload_class, *args,
**{'first_attempt':first_attempt})
return True
return False | python | def retry(self, payload_class, args):
"""This method provides a way to retry a job after a failure.
If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class.
"""
retry_every = getattr(payload_class, 'retry_every', None)
retry_timeout = getattr(payload_class, 'retry_timeout', 0)
if retry_every:
now = ResQ._current_time()
first_attempt = self._payload.get("first_attempt", now)
retry_until = first_attempt + timedelta(seconds=retry_timeout)
retry_at = now + timedelta(seconds=retry_every)
if retry_at < retry_until:
self.resq.enqueue_at(retry_at, payload_class, *args,
**{'first_attempt':first_attempt})
return True
return False | [
"def",
"retry",
"(",
"self",
",",
"payload_class",
",",
"args",
")",
":",
"retry_every",
"=",
"getattr",
"(",
"payload_class",
",",
"'retry_every'",
",",
"None",
")",
"retry_timeout",
"=",
"getattr",
"(",
"payload_class",
",",
"'retry_timeout'",
",",
"0",
")... | This method provides a way to retry a job after a failure.
If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class. | [
"This",
"method",
"provides",
"a",
"way",
"to",
"retry",
"a",
"job",
"after",
"a",
"failure",
".",
"If",
"the",
"jobclass",
"defined",
"by",
"the",
"payload",
"containes",
"a",
"retry_every",
"attribute",
"then",
"pyres",
"will",
"attempt",
"to",
"retry",
... | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/job.py#L111-L129 | train | 209,179 |
binarydud/pyres | pyres/job.py | Job.reserve | def reserve(cls, queues, res, worker=None, timeout=10):
"""Reserve a job on one of the queues. This marks this job so
that other workers will not pick it up.
"""
if isinstance(queues, string_types):
queues = [queues]
queue, payload = res.pop(queues, timeout=timeout)
if payload:
return cls(queue, payload, res, worker) | python | def reserve(cls, queues, res, worker=None, timeout=10):
"""Reserve a job on one of the queues. This marks this job so
that other workers will not pick it up.
"""
if isinstance(queues, string_types):
queues = [queues]
queue, payload = res.pop(queues, timeout=timeout)
if payload:
return cls(queue, payload, res, worker) | [
"def",
"reserve",
"(",
"cls",
",",
"queues",
",",
"res",
",",
"worker",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"if",
"isinstance",
"(",
"queues",
",",
"string_types",
")",
":",
"queues",
"=",
"[",
"queues",
"]",
"queue",
",",
"payload",
... | Reserve a job on one of the queues. This marks this job so
that other workers will not pick it up. | [
"Reserve",
"a",
"job",
"on",
"one",
"of",
"the",
"queues",
".",
"This",
"marks",
"this",
"job",
"so",
"that",
"other",
"workers",
"will",
"not",
"pick",
"it",
"up",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/job.py#L132-L141 | train | 209,180 |
binarydud/pyres | pyres/__init__.py | my_import | def my_import(name):
"""Helper function for walking import calls when searching for classes by
string names.
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | python | def my_import(name):
"""Helper function for walking import calls when searching for classes by
string names.
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | [
"def",
"my_import",
"(",
"name",
")",
":",
"mod",
"=",
"__import__",
"(",
"name",
")",
"components",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"for",
"comp",
"in",
"components",
"[",
"1",
":",
"]",
":",
"mod",
"=",
"getattr",
"(",
"mod",
",",
"... | Helper function for walking import calls when searching for classes by
string names. | [
"Helper",
"function",
"for",
"walking",
"import",
"calls",
"when",
"searching",
"for",
"classes",
"by",
"string",
"names",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/__init__.py#L77-L85 | train | 209,181 |
binarydud/pyres | pyres/__init__.py | safe_str_to_class | def safe_str_to_class(s):
"""Helper function to map string class names to module classes."""
lst = s.split(".")
klass = lst[-1]
mod_list = lst[:-1]
module = ".".join(mod_list)
# ruby compatibility kludge: resque sends just a class name and
# not a module name so if I use resque to queue a ruby class
# called "Worker" then pyres will throw a "ValueError: Empty
# module name" exception. To avoid that, if there's no module in
# the json then we'll use the classname as a module name.
if not module:
module = klass
mod = my_import(module)
if hasattr(mod, klass):
return getattr(mod, klass)
else:
raise ImportError('') | python | def safe_str_to_class(s):
"""Helper function to map string class names to module classes."""
lst = s.split(".")
klass = lst[-1]
mod_list = lst[:-1]
module = ".".join(mod_list)
# ruby compatibility kludge: resque sends just a class name and
# not a module name so if I use resque to queue a ruby class
# called "Worker" then pyres will throw a "ValueError: Empty
# module name" exception. To avoid that, if there's no module in
# the json then we'll use the classname as a module name.
if not module:
module = klass
mod = my_import(module)
if hasattr(mod, klass):
return getattr(mod, klass)
else:
raise ImportError('') | [
"def",
"safe_str_to_class",
"(",
"s",
")",
":",
"lst",
"=",
"s",
".",
"split",
"(",
"\".\"",
")",
"klass",
"=",
"lst",
"[",
"-",
"1",
"]",
"mod_list",
"=",
"lst",
"[",
":",
"-",
"1",
"]",
"module",
"=",
"\".\"",
".",
"join",
"(",
"mod_list",
")... | Helper function to map string class names to module classes. | [
"Helper",
"function",
"to",
"map",
"string",
"class",
"names",
"to",
"module",
"classes",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/__init__.py#L87-L106 | train | 209,182 |
binarydud/pyres | pyres/__init__.py | str_to_class | def str_to_class(s):
"""Alternate helper function to map string class names to module classes."""
lst = s.split(".")
klass = lst[-1]
mod_list = lst[:-1]
module = ".".join(mod_list)
try:
mod = __import__(module)
if hasattr(mod, klass):
return getattr(mod, klass)
else:
return None
except ImportError:
return None | python | def str_to_class(s):
"""Alternate helper function to map string class names to module classes."""
lst = s.split(".")
klass = lst[-1]
mod_list = lst[:-1]
module = ".".join(mod_list)
try:
mod = __import__(module)
if hasattr(mod, klass):
return getattr(mod, klass)
else:
return None
except ImportError:
return None | [
"def",
"str_to_class",
"(",
"s",
")",
":",
"lst",
"=",
"s",
".",
"split",
"(",
"\".\"",
")",
"klass",
"=",
"lst",
"[",
"-",
"1",
"]",
"mod_list",
"=",
"lst",
"[",
":",
"-",
"1",
"]",
"module",
"=",
"\".\"",
".",
"join",
"(",
"mod_list",
")",
... | Alternate helper function to map string class names to module classes. | [
"Alternate",
"helper",
"function",
"to",
"map",
"string",
"class",
"names",
"to",
"module",
"classes",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/__init__.py#L108-L121 | train | 209,183 |
binarydud/pyres | pyres/__init__.py | ResQ.info | def info(self):
"""Returns a dictionary of the current status of the pending jobs,
processed, no. of queues, no. of workers, no. of failed jobs.
"""
pending = 0
for q in self.queues():
pending += self.size(q)
return {
'pending' : pending,
'processed' : Stat('processed',self).get(),
'queues' : len(self.queues()),
'workers' : len(self.workers()),
#'working' : len(self.working()),
'failed' : Stat('failed',self).get(),
'servers' : ['%s:%s' % (self.host, self.port)]
} | python | def info(self):
"""Returns a dictionary of the current status of the pending jobs,
processed, no. of queues, no. of workers, no. of failed jobs.
"""
pending = 0
for q in self.queues():
pending += self.size(q)
return {
'pending' : pending,
'processed' : Stat('processed',self).get(),
'queues' : len(self.queues()),
'workers' : len(self.workers()),
#'working' : len(self.working()),
'failed' : Stat('failed',self).get(),
'servers' : ['%s:%s' % (self.host, self.port)]
} | [
"def",
"info",
"(",
"self",
")",
":",
"pending",
"=",
"0",
"for",
"q",
"in",
"self",
".",
"queues",
"(",
")",
":",
"pending",
"+=",
"self",
".",
"size",
"(",
"q",
")",
"return",
"{",
"'pending'",
":",
"pending",
",",
"'processed'",
":",
"Stat",
"... | Returns a dictionary of the current status of the pending jobs,
processed, no. of queues, no. of workers, no. of failed jobs. | [
"Returns",
"a",
"dictionary",
"of",
"the",
"current",
"status",
"of",
"the",
"pending",
"jobs",
"processed",
"no",
".",
"of",
"queues",
"no",
".",
"of",
"workers",
"no",
".",
"of",
"failed",
"jobs",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/__init__.py#L241-L257 | train | 209,184 |
binarydud/pyres | pyres/horde.py | Khan._shutdown_minions | def _shutdown_minions(self):
"""
send the SIGNINT signal to each worker in the pool.
"""
setproctitle('pyres_manager: Waiting on children to shutdown.')
for minion in self._workers.values():
minion.terminate()
minion.join() | python | def _shutdown_minions(self):
"""
send the SIGNINT signal to each worker in the pool.
"""
setproctitle('pyres_manager: Waiting on children to shutdown.')
for minion in self._workers.values():
minion.terminate()
minion.join() | [
"def",
"_shutdown_minions",
"(",
"self",
")",
":",
"setproctitle",
"(",
"'pyres_manager: Waiting on children to shutdown.'",
")",
"for",
"minion",
"in",
"self",
".",
"_workers",
".",
"values",
"(",
")",
":",
"minion",
".",
"terminate",
"(",
")",
"minion",
".",
... | send the SIGNINT signal to each worker in the pool. | [
"send",
"the",
"SIGNINT",
"signal",
"to",
"each",
"worker",
"in",
"the",
"pool",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/horde.py#L339-L346 | train | 209,185 |
binarydud/pyres | pyres/worker.py | Worker.work | def work(self, interval=5):
"""Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps
for ``interval`` time.
``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5".
Whenever a worker finds a job on the queue it first calls ``reserve`` on
that job to make sure another worker won't run it, then *forks* itself to
work on that job.
"""
self._setproctitle("Starting")
logger.info("starting")
self.startup()
while True:
if self._shutdown:
logger.info('shutdown scheduled')
break
self.register_worker()
job = self.reserve(interval)
if job:
self.fork_worker(job)
else:
if interval == 0:
break
#procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
self._setproctitle("Waiting")
#time.sleep(interval)
self.unregister_worker() | python | def work(self, interval=5):
"""Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps
for ``interval`` time.
``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5".
Whenever a worker finds a job on the queue it first calls ``reserve`` on
that job to make sure another worker won't run it, then *forks* itself to
work on that job.
"""
self._setproctitle("Starting")
logger.info("starting")
self.startup()
while True:
if self._shutdown:
logger.info('shutdown scheduled')
break
self.register_worker()
job = self.reserve(interval)
if job:
self.fork_worker(job)
else:
if interval == 0:
break
#procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
self._setproctitle("Waiting")
#time.sleep(interval)
self.unregister_worker() | [
"def",
"work",
"(",
"self",
",",
"interval",
"=",
"5",
")",
":",
"self",
".",
"_setproctitle",
"(",
"\"Starting\"",
")",
"logger",
".",
"info",
"(",
"\"starting\"",
")",
"self",
".",
"startup",
"(",
")",
"while",
"True",
":",
"if",
"self",
".",
"_shu... | Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps
for ``interval`` time.
``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5".
Whenever a worker finds a job on the queue it first calls ``reserve`` on
that job to make sure another worker won't run it, then *forks* itself to
work on that job. | [
"Invoked",
"by",
"run",
"method",
".",
"work",
"listens",
"on",
"a",
"list",
"of",
"queues",
"and",
"sleeps",
"for",
"interval",
"time",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/worker.py#L121-L153 | train | 209,186 |
binarydud/pyres | pyres/worker.py | Worker.fork_worker | def fork_worker(self, job):
"""Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child
process that will process the job. It's also responsible for monitoring the child process
and handling hangs and crashes.
Finally, the ``process`` method actually processes the job by eventually calling the Job
instance's ``perform`` method.
"""
logger.debug('picked up job')
logger.debug('job details: %s' % job)
self.before_fork(job)
self.child = os.fork()
if self.child:
self._setproctitle("Forked %s at %s" %
(self.child,
datetime.datetime.now()))
logger.info('Forked %s at %s' % (self.child,
datetime.datetime.now()))
try:
start = datetime.datetime.now()
# waits for the result or times out
while True:
pid, status = os.waitpid(self.child, os.WNOHANG)
if pid != 0:
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0:
break
if os.WIFSTOPPED(status):
logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status))
else:
if os.WIFSIGNALED(status):
raise CrashError("Unexpected exit by signal %d" % os.WTERMSIG(status))
raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status))
time.sleep(0.5)
now = datetime.datetime.now()
if self.timeout and ((now - start).seconds > self.timeout):
os.kill(self.child, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
raise TimeoutError("Timed out after %d seconds" % self.timeout)
except OSError as ose:
import errno
if ose.errno != errno.EINTR:
raise ose
except JobError:
self._handle_job_exception(job)
finally:
# If the child process' job called os._exit manually we need to
# finish the clean up here.
if self.job():
self.done_working(job)
logger.debug('done waiting')
else:
self._setproctitle("Processing %s since %s" %
(job,
datetime.datetime.now()))
logger.info('Processing %s since %s' %
(job, datetime.datetime.now()))
self.after_fork(job)
# re-seed the Python PRNG after forking, otherwise
# all job process will share the same sequence of
# random numbers
random.seed()
self.process(job)
os._exit(0)
self.child = None | python | def fork_worker(self, job):
"""Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child
process that will process the job. It's also responsible for monitoring the child process
and handling hangs and crashes.
Finally, the ``process`` method actually processes the job by eventually calling the Job
instance's ``perform`` method.
"""
logger.debug('picked up job')
logger.debug('job details: %s' % job)
self.before_fork(job)
self.child = os.fork()
if self.child:
self._setproctitle("Forked %s at %s" %
(self.child,
datetime.datetime.now()))
logger.info('Forked %s at %s' % (self.child,
datetime.datetime.now()))
try:
start = datetime.datetime.now()
# waits for the result or times out
while True:
pid, status = os.waitpid(self.child, os.WNOHANG)
if pid != 0:
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0:
break
if os.WIFSTOPPED(status):
logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status))
else:
if os.WIFSIGNALED(status):
raise CrashError("Unexpected exit by signal %d" % os.WTERMSIG(status))
raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status))
time.sleep(0.5)
now = datetime.datetime.now()
if self.timeout and ((now - start).seconds > self.timeout):
os.kill(self.child, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
raise TimeoutError("Timed out after %d seconds" % self.timeout)
except OSError as ose:
import errno
if ose.errno != errno.EINTR:
raise ose
except JobError:
self._handle_job_exception(job)
finally:
# If the child process' job called os._exit manually we need to
# finish the clean up here.
if self.job():
self.done_working(job)
logger.debug('done waiting')
else:
self._setproctitle("Processing %s since %s" %
(job,
datetime.datetime.now()))
logger.info('Processing %s since %s' %
(job, datetime.datetime.now()))
self.after_fork(job)
# re-seed the Python PRNG after forking, otherwise
# all job process will share the same sequence of
# random numbers
random.seed()
self.process(job)
os._exit(0)
self.child = None | [
"def",
"fork_worker",
"(",
"self",
",",
"job",
")",
":",
"logger",
".",
"debug",
"(",
"'picked up job'",
")",
"logger",
".",
"debug",
"(",
"'job details: %s'",
"%",
"job",
")",
"self",
".",
"before_fork",
"(",
"job",
")",
"self",
".",
"child",
"=",
"os... | Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child
process that will process the job. It's also responsible for monitoring the child process
and handling hangs and crashes.
Finally, the ``process`` method actually processes the job by eventually calling the Job
instance's ``perform`` method. | [
"Invoked",
"by",
"work",
"method",
".",
"fork_worker",
"does",
"the",
"actual",
"forking",
"to",
"create",
"the",
"child",
"process",
"that",
"will",
"process",
"the",
"job",
".",
"It",
"s",
"also",
"responsible",
"for",
"monitoring",
"the",
"child",
"proces... | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/worker.py#L155-L228 | train | 209,187 |
binarydud/pyres | pyres/failure/redis.py | RedisBackend.save | def save(self, resq=None):
"""Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
if not resq:
resq = ResQ()
data = {
'failed_at' : datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
'payload' : self._payload,
'exception' : self._exception.__class__.__name__,
'error' : self._parse_message(self._exception),
'backtrace' : self._parse_traceback(self._traceback),
'queue' : self._queue
}
if self._worker:
data['worker'] = self._worker
data = ResQ.encode(data)
resq.redis.rpush('resque:failed', data) | python | def save(self, resq=None):
"""Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info."""
if not resq:
resq = ResQ()
data = {
'failed_at' : datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),
'payload' : self._payload,
'exception' : self._exception.__class__.__name__,
'error' : self._parse_message(self._exception),
'backtrace' : self._parse_traceback(self._traceback),
'queue' : self._queue
}
if self._worker:
data['worker'] = self._worker
data = ResQ.encode(data)
resq.redis.rpush('resque:failed', data) | [
"def",
"save",
"(",
"self",
",",
"resq",
"=",
"None",
")",
":",
"if",
"not",
"resq",
":",
"resq",
"=",
"ResQ",
"(",
")",
"data",
"=",
"{",
"'failed_at'",
":",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%Y/%m/%d %H:%M... | Saves the failed Job into a "failed" Redis queue preserving all its original enqueud info. | [
"Saves",
"the",
"failed",
"Job",
"into",
"a",
"failed",
"Redis",
"queue",
"preserving",
"all",
"its",
"original",
"enqueud",
"info",
"."
] | 4f4b28257afe5b7a08fd38a063fad7ce62c03ae2 | https://github.com/binarydud/pyres/blob/4f4b28257afe5b7a08fd38a063fad7ce62c03ae2/pyres/failure/redis.py#L10-L25 | train | 209,188 |
pyamg/pyamg | pyamg/blackbox.py | make_csr | def make_csr(A):
"""
Convert A to CSR, if A is not a CSR or BSR matrix already.
Parameters
----------
A : array, matrix, sparse matrix
(n x n) matrix to convert to CSR
Returns
-------
A : csr_matrix, bsr_matrix
If A is csr_matrix or bsr_matrix, then do nothing and return A.
Else, convert A to CSR if possible and return.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.blackbox import make_csr
>>> A = poisson((40,40),format='csc')
>>> Acsr = make_csr(A)
Implicit conversion of A to CSR in pyamg.blackbox.make_csr
"""
# Convert to CSR or BSR if necessary
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
print('Implicit conversion of A to CSR in pyamg.blackbox.make_csr')
except BaseException:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
if A.shape[0] != A.shape[1]:
raise TypeError('Argument A must be a square')
A = A.asfptype()
return A | python | def make_csr(A):
"""
Convert A to CSR, if A is not a CSR or BSR matrix already.
Parameters
----------
A : array, matrix, sparse matrix
(n x n) matrix to convert to CSR
Returns
-------
A : csr_matrix, bsr_matrix
If A is csr_matrix or bsr_matrix, then do nothing and return A.
Else, convert A to CSR if possible and return.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.blackbox import make_csr
>>> A = poisson((40,40),format='csc')
>>> Acsr = make_csr(A)
Implicit conversion of A to CSR in pyamg.blackbox.make_csr
"""
# Convert to CSR or BSR if necessary
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
print('Implicit conversion of A to CSR in pyamg.blackbox.make_csr')
except BaseException:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
if A.shape[0] != A.shape[1]:
raise TypeError('Argument A must be a square')
A = A.asfptype()
return A | [
"def",
"make_csr",
"(",
"A",
")",
":",
"# Convert to CSR or BSR if necessary",
"if",
"not",
"(",
"isspmatrix_csr",
"(",
"A",
")",
"or",
"isspmatrix_bsr",
"(",
"A",
")",
")",
":",
"try",
":",
"A",
"=",
"csr_matrix",
"(",
"A",
")",
"print",
"(",
"'Implicit... | Convert A to CSR, if A is not a CSR or BSR matrix already.
Parameters
----------
A : array, matrix, sparse matrix
(n x n) matrix to convert to CSR
Returns
-------
A : csr_matrix, bsr_matrix
If A is csr_matrix or bsr_matrix, then do nothing and return A.
Else, convert A to CSR if possible and return.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.blackbox import make_csr
>>> A = poisson((40,40),format='csc')
>>> Acsr = make_csr(A)
Implicit conversion of A to CSR in pyamg.blackbox.make_csr | [
"Convert",
"A",
"to",
"CSR",
"if",
"A",
"is",
"not",
"a",
"CSR",
"or",
"BSR",
"matrix",
"already",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/blackbox.py#L14-L52 | train | 209,189 |
pyamg/pyamg | pyamg/blackbox.py | solver | def solver(A, config):
"""Generate an SA solver given matrix A and a configuration.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
config : dict
A dictionary of solver configuration parameters that is used to
generate a smoothed aggregation solver
Returns
-------
ml : smoothed_aggregation_solver
smoothed aggregation hierarchy
Notes
-----
config must contain the following parameter entries for
smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother,
B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration,solver
>>> A = poisson((40,40),format='csr')
>>> config = solver_configuration(A,verb=False)
>>> ml = solver(A,config)
"""
# Convert A to acceptable format
A = make_csr(A)
# Generate smoothed aggregation solver
try:
return \
smoothed_aggregation_solver(A,
B=config['B'],
BH=config['BH'],
smooth=config['smooth'],
strength=config['strength'],
max_levels=config['max_levels'],
max_coarse=config['max_coarse'],
coarse_solver=config['coarse_solver'],
symmetry=config['symmetry'],
aggregate=config['aggregate'],
presmoother=config['presmoother'],
postsmoother=config['postsmoother'],
keep=config['keep'])
except BaseException:
raise TypeError('Failed generating smoothed_aggregation_solver') | python | def solver(A, config):
"""Generate an SA solver given matrix A and a configuration.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
config : dict
A dictionary of solver configuration parameters that is used to
generate a smoothed aggregation solver
Returns
-------
ml : smoothed_aggregation_solver
smoothed aggregation hierarchy
Notes
-----
config must contain the following parameter entries for
smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother,
B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration,solver
>>> A = poisson((40,40),format='csr')
>>> config = solver_configuration(A,verb=False)
>>> ml = solver(A,config)
"""
# Convert A to acceptable format
A = make_csr(A)
# Generate smoothed aggregation solver
try:
return \
smoothed_aggregation_solver(A,
B=config['B'],
BH=config['BH'],
smooth=config['smooth'],
strength=config['strength'],
max_levels=config['max_levels'],
max_coarse=config['max_coarse'],
coarse_solver=config['coarse_solver'],
symmetry=config['symmetry'],
aggregate=config['aggregate'],
presmoother=config['presmoother'],
postsmoother=config['postsmoother'],
keep=config['keep'])
except BaseException:
raise TypeError('Failed generating smoothed_aggregation_solver') | [
"def",
"solver",
"(",
"A",
",",
"config",
")",
":",
"# Convert A to acceptable format",
"A",
"=",
"make_csr",
"(",
"A",
")",
"# Generate smoothed aggregation solver",
"try",
":",
"return",
"smoothed_aggregation_solver",
"(",
"A",
",",
"B",
"=",
"config",
"[",
"'... | Generate an SA solver given matrix A and a configuration.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
config : dict
A dictionary of solver configuration parameters that is used to
generate a smoothed aggregation solver
Returns
-------
ml : smoothed_aggregation_solver
smoothed aggregation hierarchy
Notes
-----
config must contain the following parameter entries for
smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother,
B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg import solver_configuration,solver
>>> A = poisson((40,40),format='csr')
>>> config = solver_configuration(A,verb=False)
>>> ml = solver(A,config) | [
"Generate",
"an",
"SA",
"solver",
"given",
"matrix",
"A",
"and",
"a",
"configuration",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/blackbox.py#L158-L209 | train | 209,190 |
pyamg/pyamg | pyamg/blackbox.py | solve | def solve(A, b, x0=None, tol=1e-5, maxiter=400, return_solver=False,
existing_solver=None, verb=True, residuals=None):
"""Solve Ax=b.
Solve the arbitrary system Ax=b with the best out-of-the box choice for a
solver. The matrix A can be non-Hermitian, indefinite, Hermitian
positive-definite, complex, etc... Generic and robust settings for
smoothed_aggregation_solver(..) are used to invert A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
b : array
Right hand side.
x0 : array
Initial guess (default random vector)
tol : float
Stopping criteria: relative residual r[k]/r[0] tolerance
maxiter : int
Stopping criteria: maximum number of allowable iterations
return_solver : bool
True: return the solver generated
existing_solver : smoothed_aggregation_solver
If instance of a multilevel solver, then existing_solver is used
to invert A, thus saving time on setup cost.
verb : bool
If True, print verbose output during runtime
residuals : list
List to contain residual norms at each iteration.
The preconditioned norm is used, namely
||r||_M = (M r, r)^(1/2) = (r, r)^(1/2)
Returns
-------
x : array
Solution to Ax = b
ml : multilevel_solver
Optional return of the multilevel structure used for the solve
Notes
-----
If calling solve(...) multiple times for the same matrix, A, solver reuse
is easy and efficient. Set "return_solver=True", and the return value will
be a tuple, (x,ml), where ml is the solver used to invert A, and x is the
solution to Ax=b. Then, the next time solve(...) is called, set
"existing_solver=ml".
Examples
--------
>>> import numpy as np
>>> from pyamg import solve
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> A = poisson((40,40),format='csr')
>>> b = np.array(np.arange(A.shape[0]), dtype=float)
>>> x = solve(A,b,verb=False)
>>> print "%1.2e"%(norm(b - A*x)/norm(b))
6.28e-06
"""
# Convert A to acceptable CSR/BSR format
A = make_csr(A)
# Generate solver if necessary
if existing_solver is None:
# Parameter dictionary for smoothed_aggregation_solver
config = solver_configuration(A, B=None, verb=verb)
# Generate solver
existing_solver = solver(A, config)
else:
if existing_solver.levels[0].A.shape[0] != A.shape[0]:
raise TypeError('Argument existing_solver must have level 0 matrix\
of same size as A')
# Krylov acceleration depends on symmetry of A
if existing_solver.levels[0].A.symmetry == 'hermitian':
accel = 'cg'
else:
accel = 'gmres'
# Initial guess
if x0 is None:
x0 = np.array(sp.rand(A.shape[0],), dtype=A.dtype)
# Callback function to print iteration number
if verb:
iteration = np.zeros((1,))
print(" maxiter = %d" % maxiter)
def callback(x, iteration):
iteration[0] = iteration[0] + 1
print(" iteration %d" % iteration[0])
def callback2(x):
return callback(x, iteration)
else:
callback2 = None
# Solve with accelerated Krylov method
x = existing_solver.solve(b, x0=x0, accel=accel, tol=tol, maxiter=maxiter,
callback=callback2, residuals=residuals)
if verb:
r0 = b - A * x0
rk = b - A * x
M = existing_solver.aspreconditioner()
nr0 = np.sqrt(np.inner(np.conjugate(M * r0), r0))
nrk = np.sqrt(np.inner(np.conjugate(M * rk), rk))
print(" Residuals ||r_k||_M, ||r_0||_M = %1.2e, %1.2e" % (nrk, nr0))
if np.abs(nr0) > 1e-15:
print(" Residual reduction ||r_k||_M/||r_0||_M = %1.2e"
% (nrk / nr0))
if return_solver:
return (x.reshape(b.shape), existing_solver)
else:
return x.reshape(b.shape) | python | def solve(A, b, x0=None, tol=1e-5, maxiter=400, return_solver=False,
existing_solver=None, verb=True, residuals=None):
"""Solve Ax=b.
Solve the arbitrary system Ax=b with the best out-of-the box choice for a
solver. The matrix A can be non-Hermitian, indefinite, Hermitian
positive-definite, complex, etc... Generic and robust settings for
smoothed_aggregation_solver(..) are used to invert A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
b : array
Right hand side.
x0 : array
Initial guess (default random vector)
tol : float
Stopping criteria: relative residual r[k]/r[0] tolerance
maxiter : int
Stopping criteria: maximum number of allowable iterations
return_solver : bool
True: return the solver generated
existing_solver : smoothed_aggregation_solver
If instance of a multilevel solver, then existing_solver is used
to invert A, thus saving time on setup cost.
verb : bool
If True, print verbose output during runtime
residuals : list
List to contain residual norms at each iteration.
The preconditioned norm is used, namely
||r||_M = (M r, r)^(1/2) = (r, r)^(1/2)
Returns
-------
x : array
Solution to Ax = b
ml : multilevel_solver
Optional return of the multilevel structure used for the solve
Notes
-----
If calling solve(...) multiple times for the same matrix, A, solver reuse
is easy and efficient. Set "return_solver=True", and the return value will
be a tuple, (x,ml), where ml is the solver used to invert A, and x is the
solution to Ax=b. Then, the next time solve(...) is called, set
"existing_solver=ml".
Examples
--------
>>> import numpy as np
>>> from pyamg import solve
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> A = poisson((40,40),format='csr')
>>> b = np.array(np.arange(A.shape[0]), dtype=float)
>>> x = solve(A,b,verb=False)
>>> print "%1.2e"%(norm(b - A*x)/norm(b))
6.28e-06
"""
# Convert A to acceptable CSR/BSR format
A = make_csr(A)
# Generate solver if necessary
if existing_solver is None:
# Parameter dictionary for smoothed_aggregation_solver
config = solver_configuration(A, B=None, verb=verb)
# Generate solver
existing_solver = solver(A, config)
else:
if existing_solver.levels[0].A.shape[0] != A.shape[0]:
raise TypeError('Argument existing_solver must have level 0 matrix\
of same size as A')
# Krylov acceleration depends on symmetry of A
if existing_solver.levels[0].A.symmetry == 'hermitian':
accel = 'cg'
else:
accel = 'gmres'
# Initial guess
if x0 is None:
x0 = np.array(sp.rand(A.shape[0],), dtype=A.dtype)
# Callback function to print iteration number
if verb:
iteration = np.zeros((1,))
print(" maxiter = %d" % maxiter)
def callback(x, iteration):
iteration[0] = iteration[0] + 1
print(" iteration %d" % iteration[0])
def callback2(x):
return callback(x, iteration)
else:
callback2 = None
# Solve with accelerated Krylov method
x = existing_solver.solve(b, x0=x0, accel=accel, tol=tol, maxiter=maxiter,
callback=callback2, residuals=residuals)
if verb:
r0 = b - A * x0
rk = b - A * x
M = existing_solver.aspreconditioner()
nr0 = np.sqrt(np.inner(np.conjugate(M * r0), r0))
nrk = np.sqrt(np.inner(np.conjugate(M * rk), rk))
print(" Residuals ||r_k||_M, ||r_0||_M = %1.2e, %1.2e" % (nrk, nr0))
if np.abs(nr0) > 1e-15:
print(" Residual reduction ||r_k||_M/||r_0||_M = %1.2e"
% (nrk / nr0))
if return_solver:
return (x.reshape(b.shape), existing_solver)
else:
return x.reshape(b.shape) | [
"def",
"solve",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"400",
",",
"return_solver",
"=",
"False",
",",
"existing_solver",
"=",
"None",
",",
"verb",
"=",
"True",
",",
"residuals",
"=",
"None",
")",
... | Solve Ax=b.
Solve the arbitrary system Ax=b with the best out-of-the box choice for a
solver. The matrix A can be non-Hermitian, indefinite, Hermitian
positive-definite, complex, etc... Generic and robust settings for
smoothed_aggregation_solver(..) are used to invert A.
Parameters
----------
A : array, matrix, csr_matrix, bsr_matrix
Matrix to invert, CSR or BSR format preferred for efficiency
b : array
Right hand side.
x0 : array
Initial guess (default random vector)
tol : float
Stopping criteria: relative residual r[k]/r[0] tolerance
maxiter : int
Stopping criteria: maximum number of allowable iterations
return_solver : bool
True: return the solver generated
existing_solver : smoothed_aggregation_solver
If instance of a multilevel solver, then existing_solver is used
to invert A, thus saving time on setup cost.
verb : bool
If True, print verbose output during runtime
residuals : list
List to contain residual norms at each iteration.
The preconditioned norm is used, namely
||r||_M = (M r, r)^(1/2) = (r, r)^(1/2)
Returns
-------
x : array
Solution to Ax = b
ml : multilevel_solver
Optional return of the multilevel structure used for the solve
Notes
-----
If calling solve(...) multiple times for the same matrix, A, solver reuse
is easy and efficient. Set "return_solver=True", and the return value will
be a tuple, (x,ml), where ml is the solver used to invert A, and x is the
solution to Ax=b. Then, the next time solve(...) is called, set
"existing_solver=ml".
Examples
--------
>>> import numpy as np
>>> from pyamg import solve
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> A = poisson((40,40),format='csr')
>>> b = np.array(np.arange(A.shape[0]), dtype=float)
>>> x = solve(A,b,verb=False)
>>> print "%1.2e"%(norm(b - A*x)/norm(b))
6.28e-06 | [
"Solve",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/blackbox.py#L212-L332 | train | 209,191 |
pyamg/pyamg | pyamg/amg_core/bindthem.py | find_comments | def find_comments(fname, ch):
"""
Find the comments for a function.
fname: filename
ch: CppHeaderParser parse tree
The function must look like
/*
* comments
* comments
*/
template<class I, ...>
void somefunc(...){
-or-
/*
* comments
* comments
*/
void somefunc(...){
-or-
with // style comments
Then, take off the first three spaces
"""
with open(fname, 'r') as inf:
fdata = inf.readlines()
comments = {}
for f in ch.functions:
lineno = f['line_number'] - 1 # zero based indexing
# set starting position
lineptr = lineno - 1
if f['template']:
lineptr -= 1
start = lineptr
# find the top of the comment block
while fdata[lineptr].startswith('//') or\
fdata[lineptr].startswith('/*') or\
fdata[lineptr].startswith(' *'):
lineptr -= 1
lineptr += 1
comment = fdata[lineptr:(start + 1)]
comment = [c[3:].rstrip() for c in comment]
comments[f['name']] = '\n'.join(comment).strip()
return comments | python | def find_comments(fname, ch):
"""
Find the comments for a function.
fname: filename
ch: CppHeaderParser parse tree
The function must look like
/*
* comments
* comments
*/
template<class I, ...>
void somefunc(...){
-or-
/*
* comments
* comments
*/
void somefunc(...){
-or-
with // style comments
Then, take off the first three spaces
"""
with open(fname, 'r') as inf:
fdata = inf.readlines()
comments = {}
for f in ch.functions:
lineno = f['line_number'] - 1 # zero based indexing
# set starting position
lineptr = lineno - 1
if f['template']:
lineptr -= 1
start = lineptr
# find the top of the comment block
while fdata[lineptr].startswith('//') or\
fdata[lineptr].startswith('/*') or\
fdata[lineptr].startswith(' *'):
lineptr -= 1
lineptr += 1
comment = fdata[lineptr:(start + 1)]
comment = [c[3:].rstrip() for c in comment]
comments[f['name']] = '\n'.join(comment).strip()
return comments | [
"def",
"find_comments",
"(",
"fname",
",",
"ch",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"inf",
":",
"fdata",
"=",
"inf",
".",
"readlines",
"(",
")",
"comments",
"=",
"{",
"}",
"for",
"f",
"in",
"ch",
".",
"functions",
":",... | Find the comments for a function.
fname: filename
ch: CppHeaderParser parse tree
The function must look like
/*
* comments
* comments
*/
template<class I, ...>
void somefunc(...){
-or-
/*
* comments
* comments
*/
void somefunc(...){
-or-
with // style comments
Then, take off the first three spaces | [
"Find",
"the",
"comments",
"for",
"a",
"function",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/amg_core/bindthem.py#L17-L69 | train | 209,192 |
pyamg/pyamg | pyamg/aggregation/tentative.py | fit_candidates | def fit_candidates(AggOp, B, tol=1e-10):
"""Fit near-nullspace candidates to form the tentative prolongator.
Parameters
----------
AggOp : csr_matrix
Describes the sparsity pattern of the tentative prolongator.
Has dimension (#blocks, #aggregates)
B : array
The near-nullspace candidates stored in column-wise fashion.
Has dimension (#blocks * blocksize, #candidates)
tol : scalar
Threshold for eliminating local basis functions.
If after orthogonalization a local basis function Q[:, j] is small,
i.e. ||Q[:, j]|| < tol, then Q[:, j] is set to zero.
Returns
-------
(Q, R) : (bsr_matrix, array)
The tentative prolongator Q is a sparse block matrix with dimensions
(#blocks * blocksize, #aggregates * #candidates) formed by dense blocks
of size (blocksize, #candidates). The coarse level candidates are
stored in R which has dimensions (#aggregates * #candidates,
#candidates).
See Also
--------
amg_core.fit_candidates
Notes
-----
Assuming that each row of AggOp contains exactly one non-zero entry,
i.e. all unknowns belong to an aggregate, then Q and R satisfy the
relationship B = Q*R. In other words, the near-nullspace candidates
are represented exactly by the tentative prolongator.
If AggOp contains rows with no non-zero entries, then the range of the
tentative prolongator will not include those degrees of freedom. This
situation is illustrated in the examples below.
References
----------
.. [1] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.aggregation.tentative import fit_candidates
>>> # four nodes divided into two aggregates
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 1],
... [0, 1]] )
>>> # B contains one candidate, the constant vector
... B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0.70710678],
[ 0. , 0.70710678]])
>>> R
array([[ 1.41421356],
[ 1.41421356]])
>>> # Two candidates, the constant vector and a linear function
... B = [[1, 0],
... [1, 1],
... [1, 2],
... [1, 3]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, -0.70710678, 0. , 0. ],
[ 0.70710678, 0.70710678, 0. , 0. ],
[ 0. , 0. , 0.70710678, -0.70710678],
[ 0. , 0. , 0.70710678, 0.70710678]])
>>> R
array([[ 1.41421356, 0.70710678],
[ 0. , 0.70710678],
[ 1.41421356, 3.53553391],
[ 0. , 0.70710678]])
>>> # aggregation excludes the third node
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 0],
... [0, 1]] )
>>> B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0. ],
[ 0. , 1. ]])
>>> R
array([[ 1.41421356],
[ 1. ]])
"""
if not isspmatrix_csr(AggOp):
raise TypeError('expected csr_matrix for argument AggOp')
B = np.asarray(B)
if B.dtype not in ['float32', 'float64', 'complex64', 'complex128']:
B = np.asarray(B, dtype='float64')
if len(B.shape) != 2:
raise ValueError('expected 2d array for argument B')
if B.shape[0] % AggOp.shape[0] != 0:
raise ValueError('dimensions of AggOp %s and B %s are \
incompatible' % (AggOp.shape, B.shape))
N_fine, N_coarse = AggOp.shape
K1 = int(B.shape[0] / N_fine) # dof per supernode (e.g. 3 for 3d vectors)
K2 = B.shape[1] # candidates
# the first two dimensions of R and Qx are collapsed later
R = np.empty((N_coarse, K2, K2), dtype=B.dtype) # coarse candidates
Qx = np.empty((AggOp.nnz, K1, K2), dtype=B.dtype) # BSR data array
AggOp_csc = AggOp.tocsc()
fn = amg_core.fit_candidates
fn(N_fine, N_coarse, K1, K2,
AggOp_csc.indptr, AggOp_csc.indices, Qx.ravel(),
B.ravel(), R.ravel(), tol)
Q = bsr_matrix((Qx.swapaxes(1, 2).copy(), AggOp_csc.indices,
AggOp_csc.indptr), shape=(K2*N_coarse, K1*N_fine))
Q = Q.T.tobsr()
R = R.reshape(-1, K2)
return Q, R | python | def fit_candidates(AggOp, B, tol=1e-10):
"""Fit near-nullspace candidates to form the tentative prolongator.
Parameters
----------
AggOp : csr_matrix
Describes the sparsity pattern of the tentative prolongator.
Has dimension (#blocks, #aggregates)
B : array
The near-nullspace candidates stored in column-wise fashion.
Has dimension (#blocks * blocksize, #candidates)
tol : scalar
Threshold for eliminating local basis functions.
If after orthogonalization a local basis function Q[:, j] is small,
i.e. ||Q[:, j]|| < tol, then Q[:, j] is set to zero.
Returns
-------
(Q, R) : (bsr_matrix, array)
The tentative prolongator Q is a sparse block matrix with dimensions
(#blocks * blocksize, #aggregates * #candidates) formed by dense blocks
of size (blocksize, #candidates). The coarse level candidates are
stored in R which has dimensions (#aggregates * #candidates,
#candidates).
See Also
--------
amg_core.fit_candidates
Notes
-----
Assuming that each row of AggOp contains exactly one non-zero entry,
i.e. all unknowns belong to an aggregate, then Q and R satisfy the
relationship B = Q*R. In other words, the near-nullspace candidates
are represented exactly by the tentative prolongator.
If AggOp contains rows with no non-zero entries, then the range of the
tentative prolongator will not include those degrees of freedom. This
situation is illustrated in the examples below.
References
----------
.. [1] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.aggregation.tentative import fit_candidates
>>> # four nodes divided into two aggregates
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 1],
... [0, 1]] )
>>> # B contains one candidate, the constant vector
... B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0.70710678],
[ 0. , 0.70710678]])
>>> R
array([[ 1.41421356],
[ 1.41421356]])
>>> # Two candidates, the constant vector and a linear function
... B = [[1, 0],
... [1, 1],
... [1, 2],
... [1, 3]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, -0.70710678, 0. , 0. ],
[ 0.70710678, 0.70710678, 0. , 0. ],
[ 0. , 0. , 0.70710678, -0.70710678],
[ 0. , 0. , 0.70710678, 0.70710678]])
>>> R
array([[ 1.41421356, 0.70710678],
[ 0. , 0.70710678],
[ 1.41421356, 3.53553391],
[ 0. , 0.70710678]])
>>> # aggregation excludes the third node
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 0],
... [0, 1]] )
>>> B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0. ],
[ 0. , 1. ]])
>>> R
array([[ 1.41421356],
[ 1. ]])
"""
if not isspmatrix_csr(AggOp):
raise TypeError('expected csr_matrix for argument AggOp')
B = np.asarray(B)
if B.dtype not in ['float32', 'float64', 'complex64', 'complex128']:
B = np.asarray(B, dtype='float64')
if len(B.shape) != 2:
raise ValueError('expected 2d array for argument B')
if B.shape[0] % AggOp.shape[0] != 0:
raise ValueError('dimensions of AggOp %s and B %s are \
incompatible' % (AggOp.shape, B.shape))
N_fine, N_coarse = AggOp.shape
K1 = int(B.shape[0] / N_fine) # dof per supernode (e.g. 3 for 3d vectors)
K2 = B.shape[1] # candidates
# the first two dimensions of R and Qx are collapsed later
R = np.empty((N_coarse, K2, K2), dtype=B.dtype) # coarse candidates
Qx = np.empty((AggOp.nnz, K1, K2), dtype=B.dtype) # BSR data array
AggOp_csc = AggOp.tocsc()
fn = amg_core.fit_candidates
fn(N_fine, N_coarse, K1, K2,
AggOp_csc.indptr, AggOp_csc.indices, Qx.ravel(),
B.ravel(), R.ravel(), tol)
Q = bsr_matrix((Qx.swapaxes(1, 2).copy(), AggOp_csc.indices,
AggOp_csc.indptr), shape=(K2*N_coarse, K1*N_fine))
Q = Q.T.tobsr()
R = R.reshape(-1, K2)
return Q, R | [
"def",
"fit_candidates",
"(",
"AggOp",
",",
"B",
",",
"tol",
"=",
"1e-10",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"AggOp",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix for argument AggOp'",
")",
"B",
"=",
"np",
".",
"asarray",
"(",
"B"... | Fit near-nullspace candidates to form the tentative prolongator.
Parameters
----------
AggOp : csr_matrix
Describes the sparsity pattern of the tentative prolongator.
Has dimension (#blocks, #aggregates)
B : array
The near-nullspace candidates stored in column-wise fashion.
Has dimension (#blocks * blocksize, #candidates)
tol : scalar
Threshold for eliminating local basis functions.
If after orthogonalization a local basis function Q[:, j] is small,
i.e. ||Q[:, j]|| < tol, then Q[:, j] is set to zero.
Returns
-------
(Q, R) : (bsr_matrix, array)
The tentative prolongator Q is a sparse block matrix with dimensions
(#blocks * blocksize, #aggregates * #candidates) formed by dense blocks
of size (blocksize, #candidates). The coarse level candidates are
stored in R which has dimensions (#aggregates * #candidates,
#candidates).
See Also
--------
amg_core.fit_candidates
Notes
-----
Assuming that each row of AggOp contains exactly one non-zero entry,
i.e. all unknowns belong to an aggregate, then Q and R satisfy the
relationship B = Q*R. In other words, the near-nullspace candidates
are represented exactly by the tentative prolongator.
If AggOp contains rows with no non-zero entries, then the range of the
tentative prolongator will not include those degrees of freedom. This
situation is illustrated in the examples below.
References
----------
.. [1] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.aggregation.tentative import fit_candidates
>>> # four nodes divided into two aggregates
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 1],
... [0, 1]] )
>>> # B contains one candidate, the constant vector
... B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0.70710678],
[ 0. , 0.70710678]])
>>> R
array([[ 1.41421356],
[ 1.41421356]])
>>> # Two candidates, the constant vector and a linear function
... B = [[1, 0],
... [1, 1],
... [1, 2],
... [1, 3]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, -0.70710678, 0. , 0. ],
[ 0.70710678, 0.70710678, 0. , 0. ],
[ 0. , 0. , 0.70710678, -0.70710678],
[ 0. , 0. , 0.70710678, 0.70710678]])
>>> R
array([[ 1.41421356, 0.70710678],
[ 0. , 0.70710678],
[ 1.41421356, 3.53553391],
[ 0. , 0.70710678]])
>>> # aggregation excludes the third node
... AggOp = csr_matrix( [[1, 0],
... [1, 0],
... [0, 0],
... [0, 1]] )
>>> B = [[1],
... [1],
... [1],
... [1]]
>>> Q, R = fit_candidates(AggOp, B)
>>> Q.todense()
matrix([[ 0.70710678, 0. ],
[ 0.70710678, 0. ],
[ 0. , 0. ],
[ 0. , 1. ]])
>>> R
array([[ 1.41421356],
[ 1. ]]) | [
"Fit",
"near",
"-",
"nullspace",
"candidates",
"to",
"form",
"the",
"tentative",
"prolongator",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/tentative.py#L11-L154 | train | 209,193 |
pyamg/pyamg | pyamg/gallery/random_sparse.py | _rand_sparse | def _rand_sparse(m, n, density, format='csr'):
"""Construct base function for sprand, sprandn."""
nnz = max(min(int(m*n*density), m*n), 0)
row = np.random.randint(low=0, high=m-1, size=nnz)
col = np.random.randint(low=0, high=n-1, size=nnz)
data = np.ones(nnz, dtype=float)
# duplicate (i,j) entries will be summed together
return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n)) | python | def _rand_sparse(m, n, density, format='csr'):
"""Construct base function for sprand, sprandn."""
nnz = max(min(int(m*n*density), m*n), 0)
row = np.random.randint(low=0, high=m-1, size=nnz)
col = np.random.randint(low=0, high=n-1, size=nnz)
data = np.ones(nnz, dtype=float)
# duplicate (i,j) entries will be summed together
return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n)) | [
"def",
"_rand_sparse",
"(",
"m",
",",
"n",
",",
"density",
",",
"format",
"=",
"'csr'",
")",
":",
"nnz",
"=",
"max",
"(",
"min",
"(",
"int",
"(",
"m",
"*",
"n",
"*",
"density",
")",
",",
"m",
"*",
"n",
")",
",",
"0",
")",
"row",
"=",
"np",
... | Construct base function for sprand, sprandn. | [
"Construct",
"base",
"function",
"for",
"sprand",
"sprandn",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/random_sparse.py#L11-L20 | train | 209,194 |
pyamg/pyamg | pyamg/gallery/random_sparse.py | sprand | def sprand(m, n, density, format='csr'):
"""Return a random sparse matrix.
Parameters
----------
m, n : int
shape of the result
density : float
target a matrix with nnz(A) = m*n*density, 0<=density<=1
format : string
sparse matrix format to return, e.g. 'csr', 'coo', etc.
Return
------
A : sparse matrix
m x n sparse matrix
Examples
--------
>>> from pyamg.gallery import sprand
>>> A = sprand(5,5,3/5.0)
"""
m, n = int(m), int(n)
# get sparsity pattern
A = _rand_sparse(m, n, density, format='csr')
# replace data with random values
A.data = sp.rand(A.nnz)
return A.asformat(format) | python | def sprand(m, n, density, format='csr'):
"""Return a random sparse matrix.
Parameters
----------
m, n : int
shape of the result
density : float
target a matrix with nnz(A) = m*n*density, 0<=density<=1
format : string
sparse matrix format to return, e.g. 'csr', 'coo', etc.
Return
------
A : sparse matrix
m x n sparse matrix
Examples
--------
>>> from pyamg.gallery import sprand
>>> A = sprand(5,5,3/5.0)
"""
m, n = int(m), int(n)
# get sparsity pattern
A = _rand_sparse(m, n, density, format='csr')
# replace data with random values
A.data = sp.rand(A.nnz)
return A.asformat(format) | [
"def",
"sprand",
"(",
"m",
",",
"n",
",",
"density",
",",
"format",
"=",
"'csr'",
")",
":",
"m",
",",
"n",
"=",
"int",
"(",
"m",
")",
",",
"int",
"(",
"n",
")",
"# get sparsity pattern",
"A",
"=",
"_rand_sparse",
"(",
"m",
",",
"n",
",",
"densi... | Return a random sparse matrix.
Parameters
----------
m, n : int
shape of the result
density : float
target a matrix with nnz(A) = m*n*density, 0<=density<=1
format : string
sparse matrix format to return, e.g. 'csr', 'coo', etc.
Return
------
A : sparse matrix
m x n sparse matrix
Examples
--------
>>> from pyamg.gallery import sprand
>>> A = sprand(5,5,3/5.0) | [
"Return",
"a",
"random",
"sparse",
"matrix",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/random_sparse.py#L23-L54 | train | 209,195 |
pyamg/pyamg | pyamg/gallery/elasticity.py | linear_elasticity | def linear_elasticity(grid, spacing=None, E=1e5, nu=0.3, format=None):
"""Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/
"""
if len(grid) == 2:
return q12d(grid, spacing=spacing, E=E, nu=nu, format=format)
else:
raise NotImplemented('no support for grid=%s' % str(grid)) | python | def linear_elasticity(grid, spacing=None, E=1e5, nu=0.3, format=None):
"""Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/
"""
if len(grid) == 2:
return q12d(grid, spacing=spacing, E=E, nu=nu, format=format)
else:
raise NotImplemented('no support for grid=%s' % str(grid)) | [
"def",
"linear_elasticity",
"(",
"grid",
",",
"spacing",
"=",
"None",
",",
"E",
"=",
"1e5",
",",
"nu",
"=",
"0.3",
",",
"format",
"=",
"None",
")",
":",
"if",
"len",
"(",
"grid",
")",
"==",
"2",
":",
"return",
"q12d",
"(",
"grid",
",",
"spacing",... | Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid.
Parameters
----------
grid : tuple
length 2 tuple of grid sizes, e.g. (10, 10)
spacing : tuple
length 2 tuple of grid spacings, e.g. (1.0, 0.1)
E : float
Young's modulus
nu : float
Poisson's ratio
format : string
Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.)
Returns
-------
A : csr_matrix
FE Q1 stiffness matrix
B : array
rigid body modes
See Also
--------
linear_elasticity_p1
Notes
-----
- only 2d for now
Examples
--------
>>> from pyamg.gallery import linear_elasticity
>>> A, B = linear_elasticity((4, 4))
References
----------
.. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI
"Matlab implementation of the finite element method in elasticity"
Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263
http://www.math.hu-berlin.de/~cc/ | [
"Linear",
"elasticity",
"problem",
"discretizes",
"with",
"Q1",
"finite",
"elements",
"on",
"a",
"regular",
"rectangular",
"grid",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/elasticity.py#L10-L58 | train | 209,196 |
pyamg/pyamg | pyamg/gallery/elasticity.py | q12d_local | def q12d_local(vertices, lame, mu):
"""Local stiffness matrix for two dimensional elasticity on a square element.
Parameters
----------
lame : Float
Lame's first parameter
mu : Float
shear modulus
See Also
--------
linear_elasticity
Notes
-----
Vertices should be listed in counter-clockwise order::
[3]----[2]
| |
| |
[0]----[1]
Degrees of freedom are enumerated as follows::
[x=6,y=7]----[x=4,y=5]
| |
| |
[x=0,y=1]----[x=2,y=3]
"""
M = lame + 2*mu # P-wave modulus
R_11 = np.matrix([[2, -2, -1, 1],
[-2, 2, 1, -1],
[-1, 1, 2, -2],
[1, -1, -2, 2]]) / 6.0
R_12 = np.matrix([[1, 1, -1, -1],
[-1, -1, 1, 1],
[-1, -1, 1, 1],
[1, 1, -1, -1]]) / 4.0
R_22 = np.matrix([[2, 1, -1, -2],
[1, 2, -2, -1],
[-1, -2, 2, 1],
[-2, -1, 1, 2]]) / 6.0
F = inv(np.vstack((vertices[1] - vertices[0], vertices[3] - vertices[0])))
K = np.zeros((8, 8)) # stiffness matrix
E = F.T * np.matrix([[M, 0], [0, mu]]) * F
K[0::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[mu, 0], [0, M]]) * F
K[1::2, 1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[0, mu], [lame, 0]]) * F
K[1::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
K[0::2, 1::2] = K[1::2, 0::2].T
K /= det(F)
return K | python | def q12d_local(vertices, lame, mu):
"""Local stiffness matrix for two dimensional elasticity on a square element.
Parameters
----------
lame : Float
Lame's first parameter
mu : Float
shear modulus
See Also
--------
linear_elasticity
Notes
-----
Vertices should be listed in counter-clockwise order::
[3]----[2]
| |
| |
[0]----[1]
Degrees of freedom are enumerated as follows::
[x=6,y=7]----[x=4,y=5]
| |
| |
[x=0,y=1]----[x=2,y=3]
"""
M = lame + 2*mu # P-wave modulus
R_11 = np.matrix([[2, -2, -1, 1],
[-2, 2, 1, -1],
[-1, 1, 2, -2],
[1, -1, -2, 2]]) / 6.0
R_12 = np.matrix([[1, 1, -1, -1],
[-1, -1, 1, 1],
[-1, -1, 1, 1],
[1, 1, -1, -1]]) / 4.0
R_22 = np.matrix([[2, 1, -1, -2],
[1, 2, -2, -1],
[-1, -2, 2, 1],
[-2, -1, 1, 2]]) / 6.0
F = inv(np.vstack((vertices[1] - vertices[0], vertices[3] - vertices[0])))
K = np.zeros((8, 8)) # stiffness matrix
E = F.T * np.matrix([[M, 0], [0, mu]]) * F
K[0::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[mu, 0], [0, M]]) * F
K[1::2, 1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[0, mu], [lame, 0]]) * F
K[1::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
K[0::2, 1::2] = K[1::2, 0::2].T
K /= det(F)
return K | [
"def",
"q12d_local",
"(",
"vertices",
",",
"lame",
",",
"mu",
")",
":",
"M",
"=",
"lame",
"+",
"2",
"*",
"mu",
"# P-wave modulus",
"R_11",
"=",
"np",
".",
"matrix",
"(",
"[",
"[",
"2",
",",
"-",
"2",
",",
"-",
"1",
",",
"1",
"]",
",",
"[",
... | Local stiffness matrix for two dimensional elasticity on a square element.
Parameters
----------
lame : Float
Lame's first parameter
mu : Float
shear modulus
See Also
--------
linear_elasticity
Notes
-----
Vertices should be listed in counter-clockwise order::
[3]----[2]
| |
| |
[0]----[1]
Degrees of freedom are enumerated as follows::
[x=6,y=7]----[x=4,y=5]
| |
| |
[x=0,y=1]----[x=2,y=3] | [
"Local",
"stiffness",
"matrix",
"for",
"two",
"dimensional",
"elasticity",
"on",
"a",
"square",
"element",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/elasticity.py#L139-L207 | train | 209,197 |
pyamg/pyamg | pyamg/gallery/elasticity.py | p12d_local | def p12d_local(vertices, lame, mu):
"""Local stiffness matrix for P1 elements in 2d."""
assert(vertices.shape == (3, 2))
A = np.vstack((np.ones((1, 3)), vertices.T))
PhiGrad = inv(A)[:, 1:] # gradients of basis functions
R = np.zeros((3, 6))
R[[[0], [2]], [0, 2, 4]] = PhiGrad.T
R[[[2], [1]], [1, 3, 5]] = PhiGrad.T
C = mu*np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]]) +\
lame*np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
K = det(A)/2.0*np.dot(np.dot(R.T, C), R)
return K | python | def p12d_local(vertices, lame, mu):
"""Local stiffness matrix for P1 elements in 2d."""
assert(vertices.shape == (3, 2))
A = np.vstack((np.ones((1, 3)), vertices.T))
PhiGrad = inv(A)[:, 1:] # gradients of basis functions
R = np.zeros((3, 6))
R[[[0], [2]], [0, 2, 4]] = PhiGrad.T
R[[[2], [1]], [1, 3, 5]] = PhiGrad.T
C = mu*np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]]) +\
lame*np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
K = det(A)/2.0*np.dot(np.dot(R.T, C), R)
return K | [
"def",
"p12d_local",
"(",
"vertices",
",",
"lame",
",",
"mu",
")",
":",
"assert",
"(",
"vertices",
".",
"shape",
"==",
"(",
"3",
",",
"2",
")",
")",
"A",
"=",
"np",
".",
"vstack",
"(",
"(",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"3",
")",
"... | Local stiffness matrix for P1 elements in 2d. | [
"Local",
"stiffness",
"matrix",
"for",
"P1",
"elements",
"in",
"2d",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/elasticity.py#L319-L331 | train | 209,198 |
pyamg/pyamg | pyamg/vis/vtk_writer.py | write_basic_mesh | def write_basic_mesh(Verts, E2V=None, mesh_type='tri',
pdata=None, pvdata=None,
cdata=None, cvdata=None, fname='output.vtk'):
"""Write mesh file for basic types of elements.
Parameters
----------
fname : {string}
file to be written, e.g. 'mymesh.vtu'
Verts : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
mesh_type : {string}
type of elements: tri, quad, tet, hex (all 3d)
pdata : {array}
scalar data on vertices (N x Nfields)
pvdata : {array}
vector data on vertices (3*Nfields x N)
cdata : {array}
scalar data on cells (Nfields x Nel)
cvdata : {array}
vector data on cells (3*Nfields x Nel)
Returns
-------
writes a .vtu file for use in Paraview
Notes
-----
The difference between write_basic_mesh and write_vtu is that write_vtu is
more general and requires dictionaries of cell information.
write_basic_mesh calls write_vtu
Examples
--------
>>> import numpy as np
>>> from pyamg.vis import write_basic_mesh
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [2.0,0.0],
... [0.0,1.0],
... [1.0,1.0],
... [2.0,1.0],
... [0.0,2.0],
... [1.0,2.0],
... [2.0,2.0],
... [0.0,3.0],
... [1.0,3.0],
... [2.0,3.0]])
>>> E2V = np.array([[0,4,3],
... [0,1,4],
... [1,5,4],
... [1,2,5],
... [3,7,6],
... [3,4,7],
... [4,8,7],
... [4,5,8],
... [6,10,9],
... [6,7,10],
... [7,11,10],
... [7,8,11]])
>>> pdata=np.ones((12,2))
>>> pvdata=np.ones((12*3,2))
>>> cdata=np.ones((12,2))
>>> cvdata=np.ones((3*12,2))
>>> write_basic_mesh(Verts, E2V=E2V, mesh_type='tri',pdata=pdata,
pvdata=pvdata, cdata=cdata, cvdata=cvdata,
fname='test.vtu')
See Also
--------
write_vtu
"""
if E2V is None:
mesh_type = 'vertex'
map_type_to_key = {'vertex': 1, 'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12}
if mesh_type not in map_type_to_key:
raise ValueError('unknown mesh_type=%s' % mesh_type)
key = map_type_to_key[mesh_type]
if mesh_type == 'vertex':
uidx = np.arange(0, Verts.shape[0]).reshape((Verts.shape[0], 1))
E2V = {key: uidx}
else:
E2V = {key: E2V}
if cdata is not None:
cdata = {key: cdata}
if cvdata is not None:
cvdata = {key: cvdata}
write_vtu(Verts=Verts, Cells=E2V, pdata=pdata, pvdata=pvdata,
cdata=cdata, cvdata=cvdata, fname=fname) | python | def write_basic_mesh(Verts, E2V=None, mesh_type='tri',
pdata=None, pvdata=None,
cdata=None, cvdata=None, fname='output.vtk'):
"""Write mesh file for basic types of elements.
Parameters
----------
fname : {string}
file to be written, e.g. 'mymesh.vtu'
Verts : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
mesh_type : {string}
type of elements: tri, quad, tet, hex (all 3d)
pdata : {array}
scalar data on vertices (N x Nfields)
pvdata : {array}
vector data on vertices (3*Nfields x N)
cdata : {array}
scalar data on cells (Nfields x Nel)
cvdata : {array}
vector data on cells (3*Nfields x Nel)
Returns
-------
writes a .vtu file for use in Paraview
Notes
-----
The difference between write_basic_mesh and write_vtu is that write_vtu is
more general and requires dictionaries of cell information.
write_basic_mesh calls write_vtu
Examples
--------
>>> import numpy as np
>>> from pyamg.vis import write_basic_mesh
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [2.0,0.0],
... [0.0,1.0],
... [1.0,1.0],
... [2.0,1.0],
... [0.0,2.0],
... [1.0,2.0],
... [2.0,2.0],
... [0.0,3.0],
... [1.0,3.0],
... [2.0,3.0]])
>>> E2V = np.array([[0,4,3],
... [0,1,4],
... [1,5,4],
... [1,2,5],
... [3,7,6],
... [3,4,7],
... [4,8,7],
... [4,5,8],
... [6,10,9],
... [6,7,10],
... [7,11,10],
... [7,8,11]])
>>> pdata=np.ones((12,2))
>>> pvdata=np.ones((12*3,2))
>>> cdata=np.ones((12,2))
>>> cvdata=np.ones((3*12,2))
>>> write_basic_mesh(Verts, E2V=E2V, mesh_type='tri',pdata=pdata,
pvdata=pvdata, cdata=cdata, cvdata=cvdata,
fname='test.vtu')
See Also
--------
write_vtu
"""
if E2V is None:
mesh_type = 'vertex'
map_type_to_key = {'vertex': 1, 'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12}
if mesh_type not in map_type_to_key:
raise ValueError('unknown mesh_type=%s' % mesh_type)
key = map_type_to_key[mesh_type]
if mesh_type == 'vertex':
uidx = np.arange(0, Verts.shape[0]).reshape((Verts.shape[0], 1))
E2V = {key: uidx}
else:
E2V = {key: E2V}
if cdata is not None:
cdata = {key: cdata}
if cvdata is not None:
cvdata = {key: cvdata}
write_vtu(Verts=Verts, Cells=E2V, pdata=pdata, pvdata=pvdata,
cdata=cdata, cvdata=cvdata, fname=fname) | [
"def",
"write_basic_mesh",
"(",
"Verts",
",",
"E2V",
"=",
"None",
",",
"mesh_type",
"=",
"'tri'",
",",
"pdata",
"=",
"None",
",",
"pvdata",
"=",
"None",
",",
"cdata",
"=",
"None",
",",
"cvdata",
"=",
"None",
",",
"fname",
"=",
"'output.vtk'",
")",
":... | Write mesh file for basic types of elements.
Parameters
----------
fname : {string}
file to be written, e.g. 'mymesh.vtu'
Verts : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
mesh_type : {string}
type of elements: tri, quad, tet, hex (all 3d)
pdata : {array}
scalar data on vertices (N x Nfields)
pvdata : {array}
vector data on vertices (3*Nfields x N)
cdata : {array}
scalar data on cells (Nfields x Nel)
cvdata : {array}
vector data on cells (3*Nfields x Nel)
Returns
-------
writes a .vtu file for use in Paraview
Notes
-----
The difference between write_basic_mesh and write_vtu is that write_vtu is
more general and requires dictionaries of cell information.
write_basic_mesh calls write_vtu
Examples
--------
>>> import numpy as np
>>> from pyamg.vis import write_basic_mesh
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [2.0,0.0],
... [0.0,1.0],
... [1.0,1.0],
... [2.0,1.0],
... [0.0,2.0],
... [1.0,2.0],
... [2.0,2.0],
... [0.0,3.0],
... [1.0,3.0],
... [2.0,3.0]])
>>> E2V = np.array([[0,4,3],
... [0,1,4],
... [1,5,4],
... [1,2,5],
... [3,7,6],
... [3,4,7],
... [4,8,7],
... [4,5,8],
... [6,10,9],
... [6,7,10],
... [7,11,10],
... [7,8,11]])
>>> pdata=np.ones((12,2))
>>> pvdata=np.ones((12*3,2))
>>> cdata=np.ones((12,2))
>>> cvdata=np.ones((3*12,2))
>>> write_basic_mesh(Verts, E2V=E2V, mesh_type='tri',pdata=pdata,
pvdata=pvdata, cdata=cdata, cvdata=cvdata,
fname='test.vtu')
See Also
--------
write_vtu | [
"Write",
"mesh",
"file",
"for",
"basic",
"types",
"of",
"elements",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/vis/vtk_writer.py#L377-L475 | train | 209,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.