repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
nickjj/ansigenome | ansigenome/export.py | Export.dump | def dump(self):
"""
Dump the output to json.
"""
report_as_json_string = utils.dict_to_json(self.report)
if self.out_file:
utils.string_to_file(self.out_file, report_as_json_string)
else:
print report_as_json_string | python | def dump(self):
"""
Dump the output to json.
"""
report_as_json_string = utils.dict_to_json(self.report)
if self.out_file:
utils.string_to_file(self.out_file, report_as_json_string)
else:
print report_as_json_string | [
"def",
"dump",
"(",
"self",
")",
":",
"report_as_json_string",
"=",
"utils",
".",
"dict_to_json",
"(",
"self",
".",
"report",
")",
"if",
"self",
".",
"out_file",
":",
"utils",
".",
"string_to_file",
"(",
"self",
".",
"out_file",
",",
"report_as_json_string",... | Dump the output to json. | [
"Dump",
"the",
"output",
"to",
"json",
"."
] | 70cd98d7a23d36c56f4e713ea820cfb4c485c81c | https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L258-L266 | train | 34,900 |
SethMMorton/natsort | natsort/compat/fake_fastnumbers.py | fast_float | def fast_float(
x,
key=lambda x: x,
nan=None,
_uni=unicodedata.numeric,
_nan_inf=NAN_INF,
_first_char=POTENTIAL_FIRST_CHAR,
):
"""
Convert a string to a float quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to a float.
key : callable
Single-argument function to apply to *x* if conversion fails.
nan : object
Value to return instead of NaN if NaN would be returned.
Returns
-------
*str* or *float*
"""
if x[0] in _first_char or x.lstrip()[:3] in _nan_inf:
try:
x = float(x)
return nan if nan is not None and x != x else x
except ValueError:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x)
else:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x) | python | def fast_float(
x,
key=lambda x: x,
nan=None,
_uni=unicodedata.numeric,
_nan_inf=NAN_INF,
_first_char=POTENTIAL_FIRST_CHAR,
):
"""
Convert a string to a float quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to a float.
key : callable
Single-argument function to apply to *x* if conversion fails.
nan : object
Value to return instead of NaN if NaN would be returned.
Returns
-------
*str* or *float*
"""
if x[0] in _first_char or x.lstrip()[:3] in _nan_inf:
try:
x = float(x)
return nan if nan is not None and x != x else x
except ValueError:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x)
else:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x) | [
"def",
"fast_float",
"(",
"x",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
",",
"nan",
"=",
"None",
",",
"_uni",
"=",
"unicodedata",
".",
"numeric",
",",
"_nan_inf",
"=",
"NAN_INF",
",",
"_first_char",
"=",
"POTENTIAL_FIRST_CHAR",
",",
")",
":",
"if",
... | Convert a string to a float quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to a float.
key : callable
Single-argument function to apply to *x* if conversion fails.
nan : object
Value to return instead of NaN if NaN would be returned.
Returns
-------
*str* or *float* | [
"Convert",
"a",
"string",
"to",
"a",
"float",
"quickly",
"return",
"input",
"as",
"-",
"is",
"if",
"not",
"possible",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/compat/fake_fastnumbers.py#L44-L85 | train | 34,901 |
SethMMorton/natsort | natsort/compat/fake_fastnumbers.py | fast_int | def fast_int(
x,
key=lambda x: x,
_uni=unicodedata.digit,
_first_char=POTENTIAL_FIRST_CHAR,
):
"""
Convert a string to a int quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to an int.
key : callable
Single-argument function to apply to *x* if conversion fails.
Returns
-------
*str* or *int*
"""
if x[0] in _first_char:
try:
return long(x)
except ValueError:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x)
else:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x) | python | def fast_int(
x,
key=lambda x: x,
_uni=unicodedata.digit,
_first_char=POTENTIAL_FIRST_CHAR,
):
"""
Convert a string to a int quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to an int.
key : callable
Single-argument function to apply to *x* if conversion fails.
Returns
-------
*str* or *int*
"""
if x[0] in _first_char:
try:
return long(x)
except ValueError:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x)
else:
try:
return _uni(x, key(x)) if len(x) == 1 else key(x)
except TypeError: # pragma: no cover
return key(x) | [
"def",
"fast_int",
"(",
"x",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
",",
"_uni",
"=",
"unicodedata",
".",
"digit",
",",
"_first_char",
"=",
"POTENTIAL_FIRST_CHAR",
",",
")",
":",
"if",
"x",
"[",
"0",
"]",
"in",
"_first_char",
":",
"try",
":",
"r... | Convert a string to a int quickly, return input as-is if not possible.
We don't need to accept all input that the real fast_int accepts because
natsort is controlling what is passed to this function.
Parameters
----------
x : str
String to attempt to convert to an int.
key : callable
Single-argument function to apply to *x* if conversion fails.
Returns
-------
*str* or *int* | [
"Convert",
"a",
"string",
"to",
"a",
"int",
"quickly",
"return",
"input",
"as",
"-",
"is",
"if",
"not",
"possible",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/compat/fake_fastnumbers.py#L89-L125 | train | 34,902 |
SethMMorton/natsort | natsort/__main__.py | check_filters | def check_filters(filters):
"""
Execute range_check for every element of an iterable.
Parameters
----------
filters : iterable
The collection of filters to check. Each element
must be a two-element tuple of floats or ints.
Returns
-------
The input as-is, or None if it evaluates to False.
Raises
------
ValueError
Low is greater than or equal to high for any element.
"""
if not filters:
return None
try:
return [range_check(f[0], f[1]) for f in filters]
except ValueError as err:
raise ValueError("Error in --filter: " + py23_str(err)) | python | def check_filters(filters):
"""
Execute range_check for every element of an iterable.
Parameters
----------
filters : iterable
The collection of filters to check. Each element
must be a two-element tuple of floats or ints.
Returns
-------
The input as-is, or None if it evaluates to False.
Raises
------
ValueError
Low is greater than or equal to high for any element.
"""
if not filters:
return None
try:
return [range_check(f[0], f[1]) for f in filters]
except ValueError as err:
raise ValueError("Error in --filter: " + py23_str(err)) | [
"def",
"check_filters",
"(",
"filters",
")",
":",
"if",
"not",
"filters",
":",
"return",
"None",
"try",
":",
"return",
"[",
"range_check",
"(",
"f",
"[",
"0",
"]",
",",
"f",
"[",
"1",
"]",
")",
"for",
"f",
"in",
"filters",
"]",
"except",
"ValueErro... | Execute range_check for every element of an iterable.
Parameters
----------
filters : iterable
The collection of filters to check. Each element
must be a two-element tuple of floats or ints.
Returns
-------
The input as-is, or None if it evaluates to False.
Raises
------
ValueError
Low is greater than or equal to high for any element. | [
"Execute",
"range_check",
"for",
"every",
"element",
"of",
"an",
"iterable",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L169-L194 | train | 34,903 |
SethMMorton/natsort | natsort/__main__.py | keep_entry_range | def keep_entry_range(entry, lows, highs, converter, regex):
"""
Check if an entry falls into a desired range.
Every number in the entry will be extracted using *regex*,
if any are within a given low to high range the entry will
be kept.
Parameters
----------
entry : str
lows : iterable
Collection of low values against which to compare the entry.
highs : iterable
Collection of high values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise.
"""
return any(
low <= converter(num) <= high
for num in regex.findall(entry)
for low, high in zip(lows, highs)
) | python | def keep_entry_range(entry, lows, highs, converter, regex):
"""
Check if an entry falls into a desired range.
Every number in the entry will be extracted using *regex*,
if any are within a given low to high range the entry will
be kept.
Parameters
----------
entry : str
lows : iterable
Collection of low values against which to compare the entry.
highs : iterable
Collection of high values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise.
"""
return any(
low <= converter(num) <= high
for num in regex.findall(entry)
for low, high in zip(lows, highs)
) | [
"def",
"keep_entry_range",
"(",
"entry",
",",
"lows",
",",
"highs",
",",
"converter",
",",
"regex",
")",
":",
"return",
"any",
"(",
"low",
"<=",
"converter",
"(",
"num",
")",
"<=",
"high",
"for",
"num",
"in",
"regex",
".",
"findall",
"(",
"entry",
")... | Check if an entry falls into a desired range.
Every number in the entry will be extracted using *regex*,
if any are within a given low to high range the entry will
be kept.
Parameters
----------
entry : str
lows : iterable
Collection of low values against which to compare the entry.
highs : iterable
Collection of high values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise. | [
"Check",
"if",
"an",
"entry",
"falls",
"into",
"a",
"desired",
"range",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L197-L226 | train | 34,904 |
SethMMorton/natsort | natsort/__main__.py | keep_entry_value | def keep_entry_value(entry, values, converter, regex):
"""
Check if an entry does not match a given value.
Every number in the entry will be extracted using *regex*,
if any match a given value the entry will not be kept.
Parameters
----------
entry : str
values : iterable
Collection of values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise.
"""
return not any(converter(num) in values for num in regex.findall(entry)) | python | def keep_entry_value(entry, values, converter, regex):
"""
Check if an entry does not match a given value.
Every number in the entry will be extracted using *regex*,
if any match a given value the entry will not be kept.
Parameters
----------
entry : str
values : iterable
Collection of values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise.
"""
return not any(converter(num) in values for num in regex.findall(entry)) | [
"def",
"keep_entry_value",
"(",
"entry",
",",
"values",
",",
"converter",
",",
"regex",
")",
":",
"return",
"not",
"any",
"(",
"converter",
"(",
"num",
")",
"in",
"values",
"for",
"num",
"in",
"regex",
".",
"findall",
"(",
"entry",
")",
")"
] | Check if an entry does not match a given value.
Every number in the entry will be extracted using *regex*,
if any match a given value the entry will not be kept.
Parameters
----------
entry : str
values : iterable
Collection of values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise. | [
"Check",
"if",
"an",
"entry",
"does",
"not",
"match",
"a",
"given",
"value",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L229-L251 | train | 34,905 |
SethMMorton/natsort | natsort/__main__.py | sort_and_print_entries | def sort_and_print_entries(entries, args):
"""Sort the entries, applying the filters first if necessary."""
# Extract the proper number type.
is_float = args.number_type in ("float", "real", "f", "r")
signed = args.signed or args.number_type in ("real", "r")
alg = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
| natsort.ns.PATH * args.paths
| natsort.ns.LOCALE * args.locale
)
# Pre-remove entries that don't pass the filtering criteria
# Make sure we use the same searching algorithm for filtering
# as for sorting.
do_filter = args.filter is not None or args.reverse_filter is not None
if do_filter or args.exclude:
inp_options = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
)
regex = regex_chooser(inp_options)
if args.filter is not None:
lows, highs = ([f[0] for f in args.filter], [f[1] for f in args.filter])
entries = [
entry
for entry in entries
if keep_entry_range(entry, lows, highs, float, regex)
]
if args.reverse_filter is not None:
lows, highs = (
[f[0] for f in args.reverse_filter],
[f[1] for f in args.reverse_filter],
)
entries = [
entry
for entry in entries
if not keep_entry_range(entry, lows, highs, float, regex)
]
if args.exclude:
exclude = set(args.exclude)
entries = [
entry
for entry in entries
if keep_entry_value(entry, exclude, float, regex)
]
# Print off the sorted results
for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg):
print(entry) | python | def sort_and_print_entries(entries, args):
"""Sort the entries, applying the filters first if necessary."""
# Extract the proper number type.
is_float = args.number_type in ("float", "real", "f", "r")
signed = args.signed or args.number_type in ("real", "r")
alg = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
| natsort.ns.PATH * args.paths
| natsort.ns.LOCALE * args.locale
)
# Pre-remove entries that don't pass the filtering criteria
# Make sure we use the same searching algorithm for filtering
# as for sorting.
do_filter = args.filter is not None or args.reverse_filter is not None
if do_filter or args.exclude:
inp_options = (
natsort.ns.FLOAT * is_float
| natsort.ns.SIGNED * signed
| natsort.ns.NOEXP * (not args.exp)
)
regex = regex_chooser(inp_options)
if args.filter is not None:
lows, highs = ([f[0] for f in args.filter], [f[1] for f in args.filter])
entries = [
entry
for entry in entries
if keep_entry_range(entry, lows, highs, float, regex)
]
if args.reverse_filter is not None:
lows, highs = (
[f[0] for f in args.reverse_filter],
[f[1] for f in args.reverse_filter],
)
entries = [
entry
for entry in entries
if not keep_entry_range(entry, lows, highs, float, regex)
]
if args.exclude:
exclude = set(args.exclude)
entries = [
entry
for entry in entries
if keep_entry_value(entry, exclude, float, regex)
]
# Print off the sorted results
for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg):
print(entry) | [
"def",
"sort_and_print_entries",
"(",
"entries",
",",
"args",
")",
":",
"# Extract the proper number type.",
"is_float",
"=",
"args",
".",
"number_type",
"in",
"(",
"\"float\"",
",",
"\"real\"",
",",
"\"f\"",
",",
"\"r\"",
")",
"signed",
"=",
"args",
".",
"sig... | Sort the entries, applying the filters first if necessary. | [
"Sort",
"the",
"entries",
"applying",
"the",
"filters",
"first",
"if",
"necessary",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L254-L306 | train | 34,906 |
SethMMorton/natsort | natsort/utils.py | regex_chooser | def regex_chooser(alg):
"""
Select an appropriate regex for the type of number of interest.
Parameters
----------
alg : ns enum
Used to indicate the regular expression to select.
Returns
-------
regex : compiled regex object
Regular expression object that matches the desired number type.
"""
if alg & ns.FLOAT:
alg &= ns.FLOAT | ns.SIGNED | ns.NOEXP
else:
alg &= ns.INT | ns.SIGNED
return {
ns.INT: NumericalRegularExpressions.int_nosign(),
ns.FLOAT: NumericalRegularExpressions.float_nosign_exp(),
ns.INT | ns.SIGNED: NumericalRegularExpressions.int_sign(),
ns.FLOAT | ns.SIGNED: NumericalRegularExpressions.float_sign_exp(),
ns.FLOAT | ns.NOEXP: NumericalRegularExpressions.float_nosign_noexp(),
ns.FLOAT | ns.SIGNED | ns.NOEXP: NumericalRegularExpressions.float_sign_noexp(),
}[alg] | python | def regex_chooser(alg):
"""
Select an appropriate regex for the type of number of interest.
Parameters
----------
alg : ns enum
Used to indicate the regular expression to select.
Returns
-------
regex : compiled regex object
Regular expression object that matches the desired number type.
"""
if alg & ns.FLOAT:
alg &= ns.FLOAT | ns.SIGNED | ns.NOEXP
else:
alg &= ns.INT | ns.SIGNED
return {
ns.INT: NumericalRegularExpressions.int_nosign(),
ns.FLOAT: NumericalRegularExpressions.float_nosign_exp(),
ns.INT | ns.SIGNED: NumericalRegularExpressions.int_sign(),
ns.FLOAT | ns.SIGNED: NumericalRegularExpressions.float_sign_exp(),
ns.FLOAT | ns.NOEXP: NumericalRegularExpressions.float_nosign_noexp(),
ns.FLOAT | ns.SIGNED | ns.NOEXP: NumericalRegularExpressions.float_sign_noexp(),
}[alg] | [
"def",
"regex_chooser",
"(",
"alg",
")",
":",
"if",
"alg",
"&",
"ns",
".",
"FLOAT",
":",
"alg",
"&=",
"ns",
".",
"FLOAT",
"|",
"ns",
".",
"SIGNED",
"|",
"ns",
".",
"NOEXP",
"else",
":",
"alg",
"&=",
"ns",
".",
"INT",
"|",
"ns",
".",
"SIGNED",
... | Select an appropriate regex for the type of number of interest.
Parameters
----------
alg : ns enum
Used to indicate the regular expression to select.
Returns
-------
regex : compiled regex object
Regular expression object that matches the desired number type. | [
"Select",
"an",
"appropriate",
"regex",
"for",
"the",
"type",
"of",
"number",
"of",
"interest",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L127-L154 | train | 34,907 |
SethMMorton/natsort | natsort/utils.py | _normalize_input_factory | def _normalize_input_factory(alg):
"""
Create a function that will normalize unicode input data.
Parameters
----------
alg : ns enum
Used to indicate how to normalize unicode.
Returns
-------
func : callable
A function that accepts string (unicode) input and returns the
the input normalized with the desired normalization scheme.
"""
normalization_form = "NFKD" if alg & ns.COMPATIBILITYNORMALIZE else "NFD"
wrapped = partial(normalize, normalization_form)
if NEWPY:
return wrapped
else:
return lambda x, _f=wrapped: _f(x) if isinstance(x, py23_str) else x | python | def _normalize_input_factory(alg):
"""
Create a function that will normalize unicode input data.
Parameters
----------
alg : ns enum
Used to indicate how to normalize unicode.
Returns
-------
func : callable
A function that accepts string (unicode) input and returns the
the input normalized with the desired normalization scheme.
"""
normalization_form = "NFKD" if alg & ns.COMPATIBILITYNORMALIZE else "NFD"
wrapped = partial(normalize, normalization_form)
if NEWPY:
return wrapped
else:
return lambda x, _f=wrapped: _f(x) if isinstance(x, py23_str) else x | [
"def",
"_normalize_input_factory",
"(",
"alg",
")",
":",
"normalization_form",
"=",
"\"NFKD\"",
"if",
"alg",
"&",
"ns",
".",
"COMPATIBILITYNORMALIZE",
"else",
"\"NFD\"",
"wrapped",
"=",
"partial",
"(",
"normalize",
",",
"normalization_form",
")",
"if",
"NEWPY",
... | Create a function that will normalize unicode input data.
Parameters
----------
alg : ns enum
Used to indicate how to normalize unicode.
Returns
-------
func : callable
A function that accepts string (unicode) input and returns the
the input normalized with the desired normalization scheme. | [
"Create",
"a",
"function",
"that",
"will",
"normalize",
"unicode",
"input",
"data",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L162-L183 | train | 34,908 |
SethMMorton/natsort | natsort/utils.py | natsort_key | def natsort_key(val, key, string_func, bytes_func, num_func):
"""
Key to sort strings and numbers naturally.
It works by splitting the string into components of strings and numbers,
and then converting the numbers into actual ints or floats.
Parameters
----------
val : str | unicode | bytes | int | float | iterable
key : callable | None
A key to apply to the *val* before any other operations are performed.
string_func : callable
If *val* (or the output of *key* if given) is of type *str*, this
function will be applied to it. The function must return
a tuple.
bytes_func : callable
If *val* (or the output of *key* if given) is of type *bytes*, this
function will be applied to it. The function must return
a tuple.
num_func : callable
If *val* (or the output of *key* if given) is not of type *bytes*,
*str*, nor is iterable, this function will be applied to it.
The function must return a tuple.
Returns
-------
out : tuple
The string split into its string and numeric components.
It *always* starts with a string, and then alternates
between numbers and strings (unless it was applied
recursively, in which case it will return tuples of tuples,
but the lowest-level tuples will then *always* start with
a string etc.).
See Also
--------
parse_string_factory
parse_bytes_factory
parse_number_factory
"""
# Apply key if needed
if key is not None:
val = key(val)
# Assume the input are strings, which is the most common case
try:
return string_func(val)
except (TypeError, AttributeError):
# If bytes type, use the bytes_func
if type(val) in (bytes,):
return bytes_func(val)
# Otherwise, assume it is an iterable that must be parsed recursively.
# Do not apply the key recursively.
try:
return tuple(
natsort_key(x, None, string_func, bytes_func, num_func) for x in val
)
# If that failed, it must be a number.
except TypeError:
return num_func(val) | python | def natsort_key(val, key, string_func, bytes_func, num_func):
"""
Key to sort strings and numbers naturally.
It works by splitting the string into components of strings and numbers,
and then converting the numbers into actual ints or floats.
Parameters
----------
val : str | unicode | bytes | int | float | iterable
key : callable | None
A key to apply to the *val* before any other operations are performed.
string_func : callable
If *val* (or the output of *key* if given) is of type *str*, this
function will be applied to it. The function must return
a tuple.
bytes_func : callable
If *val* (or the output of *key* if given) is of type *bytes*, this
function will be applied to it. The function must return
a tuple.
num_func : callable
If *val* (or the output of *key* if given) is not of type *bytes*,
*str*, nor is iterable, this function will be applied to it.
The function must return a tuple.
Returns
-------
out : tuple
The string split into its string and numeric components.
It *always* starts with a string, and then alternates
between numbers and strings (unless it was applied
recursively, in which case it will return tuples of tuples,
but the lowest-level tuples will then *always* start with
a string etc.).
See Also
--------
parse_string_factory
parse_bytes_factory
parse_number_factory
"""
# Apply key if needed
if key is not None:
val = key(val)
# Assume the input are strings, which is the most common case
try:
return string_func(val)
except (TypeError, AttributeError):
# If bytes type, use the bytes_func
if type(val) in (bytes,):
return bytes_func(val)
# Otherwise, assume it is an iterable that must be parsed recursively.
# Do not apply the key recursively.
try:
return tuple(
natsort_key(x, None, string_func, bytes_func, num_func) for x in val
)
# If that failed, it must be a number.
except TypeError:
return num_func(val) | [
"def",
"natsort_key",
"(",
"val",
",",
"key",
",",
"string_func",
",",
"bytes_func",
",",
"num_func",
")",
":",
"# Apply key if needed",
"if",
"key",
"is",
"not",
"None",
":",
"val",
"=",
"key",
"(",
"val",
")",
"# Assume the input are strings, which is the most... | Key to sort strings and numbers naturally.
It works by splitting the string into components of strings and numbers,
and then converting the numbers into actual ints or floats.
Parameters
----------
val : str | unicode | bytes | int | float | iterable
key : callable | None
A key to apply to the *val* before any other operations are performed.
string_func : callable
If *val* (or the output of *key* if given) is of type *str*, this
function will be applied to it. The function must return
a tuple.
bytes_func : callable
If *val* (or the output of *key* if given) is of type *bytes*, this
function will be applied to it. The function must return
a tuple.
num_func : callable
If *val* (or the output of *key* if given) is not of type *bytes*,
*str*, nor is iterable, this function will be applied to it.
The function must return a tuple.
Returns
-------
out : tuple
The string split into its string and numeric components.
It *always* starts with a string, and then alternates
between numbers and strings (unless it was applied
recursively, in which case it will return tuples of tuples,
but the lowest-level tuples will then *always* start with
a string etc.).
See Also
--------
parse_string_factory
parse_bytes_factory
parse_number_factory | [
"Key",
"to",
"sort",
"strings",
"and",
"numbers",
"naturally",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L186-L251 | train | 34,909 |
SethMMorton/natsort | natsort/utils.py | parse_number_factory | def parse_number_factory(alg, sep, pre_sep):
"""
Create a function that will format a number into a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *bytes*.
sep : str
The string character to be inserted before the number
in the returned tuple.
pre_sep : str
In the event that *alg* contains ``UNGROUPLETTERS``, this
string will be placed in a single-element tuple at the front
of the returned nested tuple.
Returns
-------
func : callable
A function that accepts numeric input (e.g. *int* or *float*)
and returns a tuple containing the number with the leading string
*sep*. Intended to be used as the *num_func* argument to
*natsort_key*.
See Also
--------
natsort_key
"""
nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf")
def func(val, _nan_replace=nan_replace, _sep=sep):
"""Given a number, place it in a tuple with a leading null string."""
return _sep, _nan_replace if val != val else val
# Return the function, possibly wrapping in tuple if PATH is selected.
if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
return lambda x: (((pre_sep,), func(x)),)
elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
return lambda x: ((pre_sep,), func(x))
elif alg & ns.PATH:
return lambda x: (func(x),)
else:
return func | python | def parse_number_factory(alg, sep, pre_sep):
"""
Create a function that will format a number into a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *bytes*.
sep : str
The string character to be inserted before the number
in the returned tuple.
pre_sep : str
In the event that *alg* contains ``UNGROUPLETTERS``, this
string will be placed in a single-element tuple at the front
of the returned nested tuple.
Returns
-------
func : callable
A function that accepts numeric input (e.g. *int* or *float*)
and returns a tuple containing the number with the leading string
*sep*. Intended to be used as the *num_func* argument to
*natsort_key*.
See Also
--------
natsort_key
"""
nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf")
def func(val, _nan_replace=nan_replace, _sep=sep):
"""Given a number, place it in a tuple with a leading null string."""
return _sep, _nan_replace if val != val else val
# Return the function, possibly wrapping in tuple if PATH is selected.
if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
return lambda x: (((pre_sep,), func(x)),)
elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
return lambda x: ((pre_sep,), func(x))
elif alg & ns.PATH:
return lambda x: (func(x),)
else:
return func | [
"def",
"parse_number_factory",
"(",
"alg",
",",
"sep",
",",
"pre_sep",
")",
":",
"nan_replace",
"=",
"float",
"(",
"\"+inf\"",
")",
"if",
"alg",
"&",
"ns",
".",
"NANLAST",
"else",
"float",
"(",
"\"-inf\"",
")",
"def",
"func",
"(",
"val",
",",
"_nan_rep... | Create a function that will format a number into a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *bytes*.
sep : str
The string character to be inserted before the number
in the returned tuple.
pre_sep : str
In the event that *alg* contains ``UNGROUPLETTERS``, this
string will be placed in a single-element tuple at the front
of the returned nested tuple.
Returns
-------
func : callable
A function that accepts numeric input (e.g. *int* or *float*)
and returns a tuple containing the number with the leading string
*sep*. Intended to be used as the *num_func* argument to
*natsort_key*.
See Also
--------
natsort_key | [
"Create",
"a",
"function",
"that",
"will",
"format",
"a",
"number",
"into",
"a",
"tuple",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L287-L330 | train | 34,910 |
SethMMorton/natsort | natsort/utils.py | sep_inserter | def sep_inserter(iterable, sep):
"""
Insert '' between numbers in an iterable.
Parameters
----------
iterable
sep : str
The string character to be inserted between adjacent numeric objects.
Yields
------
The values of *iterable* in order, with *sep* inserted where adjacent
elements are numeric. If the first element in the input is numeric
then *sep* will be the first value yielded.
"""
try:
# Get the first element. A StopIteration indicates an empty iterable.
# Since we are controlling the types of the input, 'type' is used
# instead of 'isinstance' for the small speed advantage it offers.
types = (int, float, long)
first = next(iterable)
if type(first) in types:
yield sep
yield first
# Now, check if pair of elements are both numbers. If so, add ''.
second = next(iterable)
if type(first) in types and type(second) in types:
yield sep
yield second
# Now repeat in a loop.
for x in iterable:
first, second = second, x
if type(first) in types and type(second) in types:
yield sep
yield second
except StopIteration:
# Catch StopIteration per deprecation in PEP 479:
# "Change StopIteration handling inside generators"
return | python | def sep_inserter(iterable, sep):
"""
Insert '' between numbers in an iterable.
Parameters
----------
iterable
sep : str
The string character to be inserted between adjacent numeric objects.
Yields
------
The values of *iterable* in order, with *sep* inserted where adjacent
elements are numeric. If the first element in the input is numeric
then *sep* will be the first value yielded.
"""
try:
# Get the first element. A StopIteration indicates an empty iterable.
# Since we are controlling the types of the input, 'type' is used
# instead of 'isinstance' for the small speed advantage it offers.
types = (int, float, long)
first = next(iterable)
if type(first) in types:
yield sep
yield first
# Now, check if pair of elements are both numbers. If so, add ''.
second = next(iterable)
if type(first) in types and type(second) in types:
yield sep
yield second
# Now repeat in a loop.
for x in iterable:
first, second = second, x
if type(first) in types and type(second) in types:
yield sep
yield second
except StopIteration:
# Catch StopIteration per deprecation in PEP 479:
# "Change StopIteration handling inside generators"
return | [
"def",
"sep_inserter",
"(",
"iterable",
",",
"sep",
")",
":",
"try",
":",
"# Get the first element. A StopIteration indicates an empty iterable.",
"# Since we are controlling the types of the input, 'type' is used",
"# instead of 'isinstance' for the small speed advantage it offers.",
"typ... | Insert '' between numbers in an iterable.
Parameters
----------
iterable
sep : str
The string character to be inserted between adjacent numeric objects.
Yields
------
The values of *iterable* in order, with *sep* inserted where adjacent
elements are numeric. If the first element in the input is numeric
then *sep* will be the first value yielded. | [
"Insert",
"between",
"numbers",
"in",
"an",
"iterable",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L428-L470 | train | 34,911 |
SethMMorton/natsort | natsort/utils.py | input_string_transform_factory | def input_string_transform_factory(alg):
"""
Create a function to transform a string.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *input_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
# Shortcuts.
lowfirst = alg & ns.LOWERCASEFIRST
dumb = alg & NS_DUMB
# Build the chain of functions to execute in order.
function_chain = []
if (dumb and not lowfirst) or (lowfirst and not dumb):
function_chain.append(methodcaller("swapcase"))
if alg & ns.IGNORECASE:
if NEWPY:
function_chain.append(methodcaller("casefold"))
else:
function_chain.append(methodcaller("lower"))
if alg & ns.LOCALENUM:
# Create a regular expression that will remove thousands separators.
strip_thousands = r"""
(?<=[0-9]{{1}}) # At least 1 number
(?<![0-9]{{4}}) # No more than 3 numbers
{nodecimal} # Cannot follow decimal
{thou} # The thousands separator
(?=[0-9]{{3}} # Three numbers must follow
([^0-9]|$) # But a non-number after that
)
"""
nodecimal = r""
if alg & ns.FLOAT:
# Make a regular expression component that will ensure no
# separators are removed after a decimal point.
d = get_decimal_point()
d = r"\." if d == r"." else d
nodecimal += r"(?<!" + d + r"[0-9])"
nodecimal += r"(?<!" + d + r"[0-9]{2})"
nodecimal += r"(?<!" + d + r"[0-9]{3})"
strip_thousands = strip_thousands.format(
thou=get_thousands_sep(), nodecimal=nodecimal
)
strip_thousands = re.compile(strip_thousands, flags=re.VERBOSE)
function_chain.append(partial(strip_thousands.sub, ""))
# Create a regular expression that will change the decimal point to
# a period if not already a period.
decimal = get_decimal_point()
if alg & ns.FLOAT and decimal != ".":
switch_decimal = r"(?<=[0-9]){decimal}|{decimal}(?=[0-9])"
switch_decimal = switch_decimal.format(decimal=decimal)
switch_decimal = re.compile(switch_decimal)
function_chain.append(partial(switch_decimal.sub, "."))
# Return the chained functions.
return chain_functions(function_chain) | python | def input_string_transform_factory(alg):
"""
Create a function to transform a string.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *input_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
# Shortcuts.
lowfirst = alg & ns.LOWERCASEFIRST
dumb = alg & NS_DUMB
# Build the chain of functions to execute in order.
function_chain = []
if (dumb and not lowfirst) or (lowfirst and not dumb):
function_chain.append(methodcaller("swapcase"))
if alg & ns.IGNORECASE:
if NEWPY:
function_chain.append(methodcaller("casefold"))
else:
function_chain.append(methodcaller("lower"))
if alg & ns.LOCALENUM:
# Create a regular expression that will remove thousands separators.
strip_thousands = r"""
(?<=[0-9]{{1}}) # At least 1 number
(?<![0-9]{{4}}) # No more than 3 numbers
{nodecimal} # Cannot follow decimal
{thou} # The thousands separator
(?=[0-9]{{3}} # Three numbers must follow
([^0-9]|$) # But a non-number after that
)
"""
nodecimal = r""
if alg & ns.FLOAT:
# Make a regular expression component that will ensure no
# separators are removed after a decimal point.
d = get_decimal_point()
d = r"\." if d == r"." else d
nodecimal += r"(?<!" + d + r"[0-9])"
nodecimal += r"(?<!" + d + r"[0-9]{2})"
nodecimal += r"(?<!" + d + r"[0-9]{3})"
strip_thousands = strip_thousands.format(
thou=get_thousands_sep(), nodecimal=nodecimal
)
strip_thousands = re.compile(strip_thousands, flags=re.VERBOSE)
function_chain.append(partial(strip_thousands.sub, ""))
# Create a regular expression that will change the decimal point to
# a period if not already a period.
decimal = get_decimal_point()
if alg & ns.FLOAT and decimal != ".":
switch_decimal = r"(?<=[0-9]){decimal}|{decimal}(?=[0-9])"
switch_decimal = switch_decimal.format(decimal=decimal)
switch_decimal = re.compile(switch_decimal)
function_chain.append(partial(switch_decimal.sub, "."))
# Return the chained functions.
return chain_functions(function_chain) | [
"def",
"input_string_transform_factory",
"(",
"alg",
")",
":",
"# Shortcuts.",
"lowfirst",
"=",
"alg",
"&",
"ns",
".",
"LOWERCASEFIRST",
"dumb",
"=",
"alg",
"&",
"NS_DUMB",
"# Build the chain of functions to execute in order.",
"function_chain",
"=",
"[",
"]",
"if",
... | Create a function to transform a string.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *input_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory | [
"Create",
"a",
"function",
"to",
"transform",
"a",
"string",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L473-L544 | train | 34,912 |
SethMMorton/natsort | natsort/utils.py | string_component_transform_factory | def string_component_transform_factory(alg):
"""
Create a function to either transform a string or convert to a number.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *component_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
# Shortcuts.
use_locale = alg & ns.LOCALEALPHA
dumb = alg & NS_DUMB
group_letters = (alg & ns.GROUPLETTERS) or (use_locale and dumb)
nan_val = float("+inf") if alg & ns.NANLAST else float("-inf")
# Build the chain of functions to execute in order.
func_chain = []
if group_letters:
func_chain.append(groupletters)
if use_locale:
func_chain.append(get_strxfrm())
kwargs = {"key": chain_functions(func_chain)} if func_chain else {}
# Return the correct chained functions.
if alg & ns.FLOAT:
# noinspection PyTypeChecker
kwargs["nan"] = nan_val
return partial(fast_float, **kwargs)
else:
return partial(fast_int, **kwargs) | python | def string_component_transform_factory(alg):
"""
Create a function to either transform a string or convert to a number.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *component_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
# Shortcuts.
use_locale = alg & ns.LOCALEALPHA
dumb = alg & NS_DUMB
group_letters = (alg & ns.GROUPLETTERS) or (use_locale and dumb)
nan_val = float("+inf") if alg & ns.NANLAST else float("-inf")
# Build the chain of functions to execute in order.
func_chain = []
if group_letters:
func_chain.append(groupletters)
if use_locale:
func_chain.append(get_strxfrm())
kwargs = {"key": chain_functions(func_chain)} if func_chain else {}
# Return the correct chained functions.
if alg & ns.FLOAT:
# noinspection PyTypeChecker
kwargs["nan"] = nan_val
return partial(fast_float, **kwargs)
else:
return partial(fast_int, **kwargs) | [
"def",
"string_component_transform_factory",
"(",
"alg",
")",
":",
"# Shortcuts.",
"use_locale",
"=",
"alg",
"&",
"ns",
".",
"LOCALEALPHA",
"dumb",
"=",
"alg",
"&",
"NS_DUMB",
"group_letters",
"=",
"(",
"alg",
"&",
"ns",
".",
"GROUPLETTERS",
")",
"or",
"(",
... | Create a function to either transform a string or convert to a number.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
Returns
-------
func : callable
A function to be used as the *component_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory | [
"Create",
"a",
"function",
"to",
"either",
"transform",
"a",
"string",
"or",
"convert",
"to",
"a",
"number",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L547-L587 | train | 34,913 |
SethMMorton/natsort | natsort/utils.py | final_data_transform_factory | def final_data_transform_factory(alg, sep, pre_sep):
"""
Create a function to transform a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
sep : str
Separator that was passed to *parse_string_factory*.
pre_sep : str
String separator to insert at the at the front
of the return tuple in the case that the first element
is *sep*.
Returns
-------
func : callable
A function to be used as the *final_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
if alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
swap = alg & NS_DUMB and alg & ns.LOWERCASEFIRST
transform = methodcaller("swapcase") if swap else _no_op
def func(split_val, val, _transform=transform, _sep=sep, _pre_sep=pre_sep):
"""
Return a tuple with the first character of the first element
of the return value as the first element, and the return value
as the second element. This will be used to perform gross sorting
by the first letter.
"""
split_val = tuple(split_val)
if not split_val:
return (), ()
elif split_val[0] == _sep:
return (_pre_sep,), split_val
else:
return (_transform(val[0]),), split_val
return func
else:
return lambda split_val, val: tuple(split_val) | python | def final_data_transform_factory(alg, sep, pre_sep):
"""
Create a function to transform a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
sep : str
Separator that was passed to *parse_string_factory*.
pre_sep : str
String separator to insert at the at the front
of the return tuple in the case that the first element
is *sep*.
Returns
-------
func : callable
A function to be used as the *final_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory
"""
if alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA:
swap = alg & NS_DUMB and alg & ns.LOWERCASEFIRST
transform = methodcaller("swapcase") if swap else _no_op
def func(split_val, val, _transform=transform, _sep=sep, _pre_sep=pre_sep):
"""
Return a tuple with the first character of the first element
of the return value as the first element, and the return value
as the second element. This will be used to perform gross sorting
by the first letter.
"""
split_val = tuple(split_val)
if not split_val:
return (), ()
elif split_val[0] == _sep:
return (_pre_sep,), split_val
else:
return (_transform(val[0]),), split_val
return func
else:
return lambda split_val, val: tuple(split_val) | [
"def",
"final_data_transform_factory",
"(",
"alg",
",",
"sep",
",",
"pre_sep",
")",
":",
"if",
"alg",
"&",
"ns",
".",
"UNGROUPLETTERS",
"and",
"alg",
"&",
"ns",
".",
"LOCALEALPHA",
":",
"swap",
"=",
"alg",
"&",
"NS_DUMB",
"and",
"alg",
"&",
"ns",
".",
... | Create a function to transform a tuple.
Parameters
----------
alg : ns enum
Indicate how to format the *str*.
sep : str
Separator that was passed to *parse_string_factory*.
pre_sep : str
String separator to insert at the at the front
of the return tuple in the case that the first element
is *sep*.
Returns
-------
func : callable
A function to be used as the *final_transform* argument to
*parse_string_factory*.
See Also
--------
parse_string_factory | [
"Create",
"a",
"function",
"to",
"transform",
"a",
"tuple",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L590-L637 | train | 34,914 |
SethMMorton/natsort | natsort/utils.py | groupletters | def groupletters(x, _low=lower_function):
"""
Double all characters, making doubled letters lowercase.
Parameters
----------
x : str
Returns
-------
str
Examples
--------
>>> groupletters("Apple")
{u}'aAppppllee'
"""
return "".join(ichain.from_iterable((_low(y), y) for y in x)) | python | def groupletters(x, _low=lower_function):
"""
Double all characters, making doubled letters lowercase.
Parameters
----------
x : str
Returns
-------
str
Examples
--------
>>> groupletters("Apple")
{u}'aAppppllee'
"""
return "".join(ichain.from_iterable((_low(y), y) for y in x)) | [
"def",
"groupletters",
"(",
"x",
",",
"_low",
"=",
"lower_function",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"ichain",
".",
"from_iterable",
"(",
"(",
"_low",
"(",
"y",
")",
",",
"y",
")",
"for",
"y",
"in",
"x",
")",
")"
] | Double all characters, making doubled letters lowercase.
Parameters
----------
x : str
Returns
-------
str
Examples
--------
>>> groupletters("Apple")
{u}'aAppppllee' | [
"Double",
"all",
"characters",
"making",
"doubled",
"letters",
"lowercase",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L645-L664 | train | 34,915 |
SethMMorton/natsort | natsort/utils.py | chain_functions | def chain_functions(functions):
"""
Chain a list of single-argument functions together and return.
The functions are applied in list order, and the output of the
previous functions is passed to the next function.
Parameters
----------
functions : list
A list of single-argument functions to chain together.
Returns
-------
func : callable
A single argument function.
Examples
--------
Chain several functions together!
>>> funcs = [lambda x: x * 4, len, lambda x: x + 5]
>>> func = chain_functions(funcs)
>>> func('hey')
17
"""
functions = list(functions)
if not functions:
return _no_op
elif len(functions) == 1:
return functions[0]
else:
# See https://stackoverflow.com/a/39123400/1399279
return partial(reduce, lambda res, f: f(res), functions) | python | def chain_functions(functions):
"""
Chain a list of single-argument functions together and return.
The functions are applied in list order, and the output of the
previous functions is passed to the next function.
Parameters
----------
functions : list
A list of single-argument functions to chain together.
Returns
-------
func : callable
A single argument function.
Examples
--------
Chain several functions together!
>>> funcs = [lambda x: x * 4, len, lambda x: x + 5]
>>> func = chain_functions(funcs)
>>> func('hey')
17
"""
functions = list(functions)
if not functions:
return _no_op
elif len(functions) == 1:
return functions[0]
else:
# See https://stackoverflow.com/a/39123400/1399279
return partial(reduce, lambda res, f: f(res), functions) | [
"def",
"chain_functions",
"(",
"functions",
")",
":",
"functions",
"=",
"list",
"(",
"functions",
")",
"if",
"not",
"functions",
":",
"return",
"_no_op",
"elif",
"len",
"(",
"functions",
")",
"==",
"1",
":",
"return",
"functions",
"[",
"0",
"]",
"else",
... | Chain a list of single-argument functions together and return.
The functions are applied in list order, and the output of the
previous functions is passed to the next function.
Parameters
----------
functions : list
A list of single-argument functions to chain together.
Returns
-------
func : callable
A single argument function.
Examples
--------
Chain several functions together!
>>> funcs = [lambda x: x * 4, len, lambda x: x + 5]
>>> func = chain_functions(funcs)
>>> func('hey')
17 | [
"Chain",
"a",
"list",
"of",
"single",
"-",
"argument",
"functions",
"together",
"and",
"return",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L667-L701 | train | 34,916 |
SethMMorton/natsort | natsort/utils.py | path_splitter | def path_splitter(s, _d_match=re.compile(r"\.\d").match):
"""
Split a string into its path components.
Assumes a string is a path or is path-like.
Parameters
----------
s : str | pathlib.Path
Returns
-------
split : tuple
The path split by directory components and extensions.
Examples
--------
>>> tuple(path_splitter("this/thing.ext"))
({u}'this', {u}'thing', {u}'.ext')
"""
if has_pathlib and isinstance(s, PurePath):
s = py23_str(s)
path_parts = deque()
p_appendleft = path_parts.appendleft
# Continue splitting the path from the back until we have reached
# '..' or '.', or until there is nothing left to split.
path_location = s
while path_location != os_curdir and path_location != os_pardir:
parent_path = path_location
path_location, child_path = path_split(parent_path)
if path_location == parent_path:
break
p_appendleft(child_path)
# This last append is the base path.
# Only append if the string is non-empty.
# Make sure the proper path separator for this OS is used
# no matter what was actually given.
if path_location:
p_appendleft(py23_str(os_sep))
# Now, split off the file extensions using a similar method to above.
# Continue splitting off file extensions until we reach a decimal number
# or there are no more extensions.
# We are not using built-in functionality of PathLib here because of
# the recursive splitting up to a decimal.
base = path_parts.pop()
base_parts = deque()
b_appendleft = base_parts.appendleft
while True:
front = base
base, ext = path_splitext(front)
if _d_match(ext) or not ext:
# Reset base to before the split if the split is invalid.
base = front
break
b_appendleft(ext)
b_appendleft(base)
# Return the split parent paths and then the split basename.
return ichain(path_parts, base_parts) | python | def path_splitter(s, _d_match=re.compile(r"\.\d").match):
"""
Split a string into its path components.
Assumes a string is a path or is path-like.
Parameters
----------
s : str | pathlib.Path
Returns
-------
split : tuple
The path split by directory components and extensions.
Examples
--------
>>> tuple(path_splitter("this/thing.ext"))
({u}'this', {u}'thing', {u}'.ext')
"""
if has_pathlib and isinstance(s, PurePath):
s = py23_str(s)
path_parts = deque()
p_appendleft = path_parts.appendleft
# Continue splitting the path from the back until we have reached
# '..' or '.', or until there is nothing left to split.
path_location = s
while path_location != os_curdir and path_location != os_pardir:
parent_path = path_location
path_location, child_path = path_split(parent_path)
if path_location == parent_path:
break
p_appendleft(child_path)
# This last append is the base path.
# Only append if the string is non-empty.
# Make sure the proper path separator for this OS is used
# no matter what was actually given.
if path_location:
p_appendleft(py23_str(os_sep))
# Now, split off the file extensions using a similar method to above.
# Continue splitting off file extensions until we reach a decimal number
# or there are no more extensions.
# We are not using built-in functionality of PathLib here because of
# the recursive splitting up to a decimal.
base = path_parts.pop()
base_parts = deque()
b_appendleft = base_parts.appendleft
while True:
front = base
base, ext = path_splitext(front)
if _d_match(ext) or not ext:
# Reset base to before the split if the split is invalid.
base = front
break
b_appendleft(ext)
b_appendleft(base)
# Return the split parent paths and then the split basename.
return ichain(path_parts, base_parts) | [
"def",
"path_splitter",
"(",
"s",
",",
"_d_match",
"=",
"re",
".",
"compile",
"(",
"r\"\\.\\d\"",
")",
".",
"match",
")",
":",
"if",
"has_pathlib",
"and",
"isinstance",
"(",
"s",
",",
"PurePath",
")",
":",
"s",
"=",
"py23_str",
"(",
"s",
")",
"path_p... | Split a string into its path components.
Assumes a string is a path or is path-like.
Parameters
----------
s : str | pathlib.Path
Returns
-------
split : tuple
The path split by directory components and extensions.
Examples
--------
>>> tuple(path_splitter("this/thing.ext"))
({u}'this', {u}'thing', {u}'.ext') | [
"Split",
"a",
"string",
"into",
"its",
"path",
"components",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L729-L791 | train | 34,917 |
SethMMorton/natsort | natsort/utils.py | NumericalRegularExpressions._construct_regex | def _construct_regex(cls, fmt):
"""Given a format string, construct the regex with class attributes."""
return re.compile(fmt.format(**vars(cls)), flags=re.U) | python | def _construct_regex(cls, fmt):
"""Given a format string, construct the regex with class attributes."""
return re.compile(fmt.format(**vars(cls)), flags=re.U) | [
"def",
"_construct_regex",
"(",
"cls",
",",
"fmt",
")",
":",
"return",
"re",
".",
"compile",
"(",
"fmt",
".",
"format",
"(",
"*",
"*",
"vars",
"(",
"cls",
")",
")",
",",
"flags",
"=",
"re",
".",
"U",
")"
] | Given a format string, construct the regex with class attributes. | [
"Given",
"a",
"format",
"string",
"construct",
"the",
"regex",
"with",
"class",
"attributes",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L92-L94 | train | 34,918 |
SethMMorton/natsort | natsort/natsort.py | natsort_keygen | def natsort_keygen(key=None, alg=ns.DEFAULT):
"""
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
try:
ns.DEFAULT | alg
except TypeError:
msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'"
raise ValueError(msg + ", got {}".format(py23_str(alg)))
# Add the NS_DUMB option if the locale library is broken.
if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort():
alg |= NS_DUMB
# Set some variables that will be passed to the factory functions
if alg & ns.NUMAFTER:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale_max
else:
sep = natsort.compat.locale.null_string_max
pre_sep = natsort.compat.locale.null_string_max
else:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale
else:
sep = natsort.compat.locale.null_string
pre_sep = natsort.compat.locale.null_string
regex = utils.regex_chooser(alg)
# Create the functions that will be used to split strings.
input_transform = utils.input_string_transform_factory(alg)
component_transform = utils.string_component_transform_factory(alg)
final_transform = utils.final_data_transform_factory(alg, sep, pre_sep)
# Create the high-level parsing functions for strings, bytes, and numbers.
string_func = utils.parse_string_factory(
alg, sep, regex.split, input_transform, component_transform, final_transform
)
if alg & ns.PATH:
string_func = utils.parse_path_factory(string_func)
bytes_func = utils.parse_bytes_factory(alg)
num_func = utils.parse_number_factory(alg, sep, pre_sep)
# Return the natsort key with the parsing path pre-chosen.
return partial(
utils.natsort_key,
key=key,
string_func=string_func,
bytes_func=bytes_func,
num_func=num_func,
) | python | def natsort_keygen(key=None, alg=ns.DEFAULT):
"""
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
try:
ns.DEFAULT | alg
except TypeError:
msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'"
raise ValueError(msg + ", got {}".format(py23_str(alg)))
# Add the NS_DUMB option if the locale library is broken.
if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort():
alg |= NS_DUMB
# Set some variables that will be passed to the factory functions
if alg & ns.NUMAFTER:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale_max
else:
sep = natsort.compat.locale.null_string_max
pre_sep = natsort.compat.locale.null_string_max
else:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale
else:
sep = natsort.compat.locale.null_string
pre_sep = natsort.compat.locale.null_string
regex = utils.regex_chooser(alg)
# Create the functions that will be used to split strings.
input_transform = utils.input_string_transform_factory(alg)
component_transform = utils.string_component_transform_factory(alg)
final_transform = utils.final_data_transform_factory(alg, sep, pre_sep)
# Create the high-level parsing functions for strings, bytes, and numbers.
string_func = utils.parse_string_factory(
alg, sep, regex.split, input_transform, component_transform, final_transform
)
if alg & ns.PATH:
string_func = utils.parse_path_factory(string_func)
bytes_func = utils.parse_bytes_factory(alg)
num_func = utils.parse_number_factory(alg, sep, pre_sep)
# Return the natsort key with the parsing path pre-chosen.
return partial(
utils.natsort_key,
key=key,
string_func=string_func,
bytes_func=bytes_func,
num_func=num_func,
) | [
"def",
"natsort_keygen",
"(",
"key",
"=",
"None",
",",
"alg",
"=",
"ns",
".",
"DEFAULT",
")",
":",
"try",
":",
"ns",
".",
"DEFAULT",
"|",
"alg",
"except",
"TypeError",
":",
"msg",
"=",
"\"natsort_keygen: 'alg' argument must be from the enum 'ns'\"",
"raise",
"... | Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] | [
"Generate",
"a",
"key",
"to",
"sort",
"strings",
"and",
"numbers",
"naturally",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L111-L203 | train | 34,919 |
SethMMorton/natsort | natsort/natsort.py | natsorted | def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key) | python | def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key) | [
"def",
"natsorted",
"(",
"seq",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
",",
"alg",
"=",
"ns",
".",
"DEFAULT",
")",
":",
"key",
"=",
"natsort_keygen",
"(",
"key",
",",
"alg",
")",
"return",
"sorted",
"(",
"seq",
",",
"reverse",
"=",
... | Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5'] | [
"Sorts",
"an",
"iterable",
"naturally",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L222-L267 | train | 34,920 |
SethMMorton/natsort | natsort/natsort.py | humansorted | def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort non-numeric characters.
This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_humansorted : Returns the sorted indexes from `humansorted`.
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> natsorted(a)
[{u}'Apple', {u}'Banana', {u}'apple', {u}'banana']
>>> humansorted(a)
[{u}'apple', {u}'Apple', {u}'banana', {u}'Banana']
"""
return natsorted(seq, key, reverse, alg | ns.LOCALE) | python | def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort non-numeric characters.
This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_humansorted : Returns the sorted indexes from `humansorted`.
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> natsorted(a)
[{u}'Apple', {u}'Banana', {u}'apple', {u}'banana']
>>> humansorted(a)
[{u}'apple', {u}'Apple', {u}'banana', {u}'Banana']
"""
return natsorted(seq, key, reverse, alg | ns.LOCALE) | [
"def",
"humansorted",
"(",
"seq",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
",",
"alg",
"=",
"ns",
".",
"DEFAULT",
")",
":",
"return",
"natsorted",
"(",
"seq",
",",
"key",
",",
"reverse",
",",
"alg",
"|",
"ns",
".",
"LOCALE",
")"
] | Convenience function to properly sort non-numeric characters.
This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_humansorted : Returns the sorted indexes from `humansorted`.
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> natsorted(a)
[{u}'Apple', {u}'Banana', {u}'apple', {u}'banana']
>>> humansorted(a)
[{u}'apple', {u}'Apple', {u}'banana', {u}'Banana'] | [
"Convenience",
"function",
"to",
"properly",
"sort",
"non",
"-",
"numeric",
"characters",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L271-L320 | train | 34,921 |
SethMMorton/natsort | natsort/natsort.py | realsorted | def realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort signed floats.
A signed float in a string could be "a-5.7". This is a wrapper around
``natsorted(seq, alg=ns.REAL)``.
The behavior of :func:`realsorted` for `natsort` version >= 4.0.0
was the default behavior of :func:`natsorted` for `natsort`
version < 4.0.0.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.REAL`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_realsorted : Returns the sorted indexes from `realsorted`.
Examples
--------
Use `realsorted` just like the builtin `sorted`::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3']
>>> realsorted(a)
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
return natsorted(seq, key, reverse, alg | ns.REAL) | python | def realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort signed floats.
A signed float in a string could be "a-5.7". This is a wrapper around
``natsorted(seq, alg=ns.REAL)``.
The behavior of :func:`realsorted` for `natsort` version >= 4.0.0
was the default behavior of :func:`natsorted` for `natsort`
version < 4.0.0.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.REAL`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_realsorted : Returns the sorted indexes from `realsorted`.
Examples
--------
Use `realsorted` just like the builtin `sorted`::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3']
>>> realsorted(a)
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
"""
return natsorted(seq, key, reverse, alg | ns.REAL) | [
"def",
"realsorted",
"(",
"seq",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
",",
"alg",
"=",
"ns",
".",
"DEFAULT",
")",
":",
"return",
"natsorted",
"(",
"seq",
",",
"key",
",",
"reverse",
",",
"alg",
"|",
"ns",
".",
"REAL",
")"
] | Convenience function to properly sort signed floats.
A signed float in a string could be "a-5.7". This is a wrapper around
``natsorted(seq, alg=ns.REAL)``.
The behavior of :func:`realsorted` for `natsort` version >= 4.0.0
was the default behavior of :func:`natsorted` for `natsort`
version < 4.0.0.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.REAL`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_realsorted : Returns the sorted indexes from `realsorted`.
Examples
--------
Use `realsorted` just like the builtin `sorted`::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3']
>>> realsorted(a)
[{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] | [
"Convenience",
"function",
"to",
"properly",
"sort",
"signed",
"floats",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L324-L374 | train | 34,922 |
SethMMorton/natsort | natsort/natsort.py | index_natsorted | def index_natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Determine the list of the indexes used to sort the input sequence.
Sorts a sequence naturally, but returns a list of sorted the
indexes and not the sorted list itself. This list of indexes
can be used to sort multiple lists by the sorted order of the
given sequence.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
natsorted
order_by_index
Examples
--------
Use index_natsorted if you want to sort multiple lists by the
sorted order of one list::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar']
"""
if key is None:
newkey = itemgetter(1)
else:
def newkey(x):
return key(itemgetter(1)(x))
# Pair the index and sequence together, then sort by element
index_seq_pair = [[x, y] for x, y in enumerate(seq)]
index_seq_pair.sort(reverse=reverse, key=natsort_keygen(newkey, alg))
return [x for x, _ in index_seq_pair] | python | def index_natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Determine the list of the indexes used to sort the input sequence.
Sorts a sequence naturally, but returns a list of sorted the
indexes and not the sorted list itself. This list of indexes
can be used to sort multiple lists by the sorted order of the
given sequence.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
natsorted
order_by_index
Examples
--------
Use index_natsorted if you want to sort multiple lists by the
sorted order of one list::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar']
"""
if key is None:
newkey = itemgetter(1)
else:
def newkey(x):
return key(itemgetter(1)(x))
# Pair the index and sequence together, then sort by element
index_seq_pair = [[x, y] for x, y in enumerate(seq)]
index_seq_pair.sort(reverse=reverse, key=natsort_keygen(newkey, alg))
return [x for x, _ in index_seq_pair] | [
"def",
"index_natsorted",
"(",
"seq",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
",",
"alg",
"=",
"ns",
".",
"DEFAULT",
")",
":",
"if",
"key",
"is",
"None",
":",
"newkey",
"=",
"itemgetter",
"(",
"1",
")",
"else",
":",
"def",
"newkey",
... | Determine the list of the indexes used to sort the input sequence.
Sorts a sequence naturally, but returns a list of sorted the
indexes and not the sorted list itself. This list of indexes
can be used to sort multiple lists by the sorted order of the
given sequence.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
natsorted
order_by_index
Examples
--------
Use index_natsorted if you want to sort multiple lists by the
sorted order of one list::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar'] | [
"Determine",
"the",
"list",
"of",
"the",
"indexes",
"used",
"to",
"sort",
"the",
"input",
"sequence",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L378-L444 | train | 34,923 |
SethMMorton/natsort | natsort/natsort.py | order_by_index | def order_by_index(seq, index, iter=False):
"""
Order a given sequence by an index sequence.
The output of `index_natsorted` is a
sequence of integers (index) that correspond to how its input
sequence **would** be sorted. The idea is that this index can
be used to reorder multiple sequences by the sorted order of the
first sequence. This function is a convenient wrapper to
apply this ordering to a sequence.
Parameters
----------
seq : sequence
The sequence to order.
index : iterable
The iterable that indicates how to order `seq`.
It should be the same length as `seq` and consist
of integers only.
iter : {{True, False}}, optional
If `True`, the ordered sequence is returned as a
iterator; otherwise it is returned as a
list. The default is `False`.
Returns
-------
out : {{list, iterator}}
The sequence ordered by `index`, as a `list` or as an
iterator (depending on the value of `iter`).
See Also
--------
index_natsorted
index_humansorted
index_realsorted
Examples
--------
`order_by_index` is a convenience function that helps you apply
the result of `index_natsorted`::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar']
"""
return (seq[i] for i in index) if iter else [seq[i] for i in index] | python | def order_by_index(seq, index, iter=False):
"""
Order a given sequence by an index sequence.
The output of `index_natsorted` is a
sequence of integers (index) that correspond to how its input
sequence **would** be sorted. The idea is that this index can
be used to reorder multiple sequences by the sorted order of the
first sequence. This function is a convenient wrapper to
apply this ordering to a sequence.
Parameters
----------
seq : sequence
The sequence to order.
index : iterable
The iterable that indicates how to order `seq`.
It should be the same length as `seq` and consist
of integers only.
iter : {{True, False}}, optional
If `True`, the ordered sequence is returned as a
iterator; otherwise it is returned as a
list. The default is `False`.
Returns
-------
out : {{list, iterator}}
The sequence ordered by `index`, as a `list` or as an
iterator (depending on the value of `iter`).
See Also
--------
index_natsorted
index_humansorted
index_realsorted
Examples
--------
`order_by_index` is a convenience function that helps you apply
the result of `index_natsorted`::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar']
"""
return (seq[i] for i in index) if iter else [seq[i] for i in index] | [
"def",
"order_by_index",
"(",
"seq",
",",
"index",
",",
"iter",
"=",
"False",
")",
":",
"return",
"(",
"seq",
"[",
"i",
"]",
"for",
"i",
"in",
"index",
")",
"if",
"iter",
"else",
"[",
"seq",
"[",
"i",
"]",
"for",
"i",
"in",
"index",
"]"
] | Order a given sequence by an index sequence.
The output of `index_natsorted` is a
sequence of integers (index) that correspond to how its input
sequence **would** be sorted. The idea is that this index can
be used to reorder multiple sequences by the sorted order of the
first sequence. This function is a convenient wrapper to
apply this ordering to a sequence.
Parameters
----------
seq : sequence
The sequence to order.
index : iterable
The iterable that indicates how to order `seq`.
It should be the same length as `seq` and consist
of integers only.
iter : {{True, False}}, optional
If `True`, the ordered sequence is returned as a
iterator; otherwise it is returned as a
list. The default is `False`.
Returns
-------
out : {{list, iterator}}
The sequence ordered by `index`, as a `list` or as an
iterator (depending on the value of `iter`).
See Also
--------
index_natsorted
index_humansorted
index_realsorted
Examples
--------
`order_by_index` is a convenience function that helps you apply
the result of `index_natsorted`::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
[{u}'num2', {u}'num3', {u}'num5']
>>> order_by_index(b, index)
[{u}'baz', {u}'foo', {u}'bar'] | [
"Order",
"a",
"given",
"sequence",
"by",
"an",
"index",
"sequence",
"."
] | ea0d37ef790b42c424a096e079edd9ea0d5717e3 | https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L545-L601 | train | 34,924 |
vpelletier/pprofile | zpprofile.py | ZopeMixIn._iterOutFiles | def _iterOutFiles(self):
"""
Yields path, data, mimetype for each file involved on or produced by
profiling.
"""
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
) | python | def _iterOutFiles(self):
"""
Yields path, data, mimetype for each file involved on or produced by
profiling.
"""
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
) | [
"def",
"_iterOutFiles",
"(",
"self",
")",
":",
"out",
"=",
"StringIO",
"(",
")",
"self",
".",
"callgrind",
"(",
"out",
",",
"relative_path",
"=",
"True",
")",
"yield",
"(",
"'cachegrind.out.pprofile'",
",",
"out",
".",
"getvalue",
"(",
")",
",",
"'applic... | Yields path, data, mimetype for each file involved on or produced by
profiling. | [
"Yields",
"path",
"data",
"mimetype",
"for",
"each",
"file",
"involved",
"on",
"or",
"produced",
"by",
"profiling",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L407-L486 | train | 34,925 |
vpelletier/pprofile | pprofile.py | run | def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd) | python | def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd) | [
"def",
"run",
"(",
"cmd",
",",
"filename",
"=",
"None",
",",
"threads",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"_run",
"(",
"threads",
",",
"verbose",
",",
"'run'",
",",
"filename",
",",
"cmd",
")"
] | Similar to profile.run . | [
"Similar",
"to",
"profile",
".",
"run",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1182-L1184 | train | 34,926 |
vpelletier/pprofile | pprofile.py | runctx | def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals) | python | def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals) | [
"def",
"runctx",
"(",
"cmd",
",",
"globals",
",",
"locals",
",",
"filename",
"=",
"None",
",",
"threads",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"_run",
"(",
"threads",
",",
"verbose",
",",
"'runctx'",
",",
"filename",
",",
"cmd",
",",
... | Similar to profile.runctx . | [
"Similar",
"to",
"profile",
".",
"runctx",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1186-L1188 | train | 34,927 |
vpelletier/pprofile | pprofile.py | runfile | def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit) | python | def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit) | [
"def",
"runfile",
"(",
"fd",
",",
"argv",
",",
"fd_name",
"=",
"'<unknown>'",
",",
"compile_flags",
"=",
"0",
",",
"dont_inherit",
"=",
"1",
",",
"filename",
"=",
"None",
",",
"threads",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"_run",
"(... | Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code. | [
"Run",
"code",
"from",
"given",
"file",
"descriptor",
"with",
"profiling",
"enabled",
".",
"Closes",
"fd",
"before",
"executing",
"contained",
"code",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1190-L1197 | train | 34,928 |
vpelletier/pprofile | pprofile.py | runpath | def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv) | python | def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv) | [
"def",
"runpath",
"(",
"path",
",",
"argv",
",",
"filename",
"=",
"None",
",",
"threads",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"_run",
"(",
"threads",
",",
"verbose",
",",
"'runpath'",
",",
"filename",
",",
"path",
",",
"argv",
")"
] | Run code from open-accessible file path with profiling enabled. | [
"Run",
"code",
"from",
"open",
"-",
"accessible",
"file",
"path",
"with",
"profiling",
"enabled",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1199-L1203 | train | 34,929 |
vpelletier/pprofile | pprofile.py | pprofile | def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
) | python | def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
) | [
"def",
"pprofile",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"if",
"cell",
"is",
"None",
":",
"# TODO: detect and use arguments (statistical profiling, ...) ?",
"return",
"run",
"(",
"line",
")",
"return",
"_main",
"(",
"[",
"'%%pprofile'",
",",
"'-m'",
... | Profile line execution. | [
"Profile",
"line",
"execution",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1388-L1398 | train | 34,930 |
vpelletier/pprofile | pprofile.py | _FileTiming.hit | def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration | python | def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration | [
"def",
"hit",
"(",
"self",
",",
"code",
",",
"line",
",",
"duration",
")",
":",
"entry",
"=",
"self",
".",
"line_dict",
"[",
"line",
"]",
"[",
"code",
"]",
"entry",
"[",
"0",
"]",
"+=",
"1",
"entry",
"[",
"1",
"]",
"+=",
"duration"
] | A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds | [
"A",
"line",
"has",
"finished",
"executing",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L192-L205 | train | 34,931 |
vpelletier/pprofile | pprofile.py | _FileTiming.call | def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration | python | def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration | [
"def",
"call",
"(",
"self",
",",
"code",
",",
"line",
",",
"callee_file_timing",
",",
"callee",
",",
"duration",
",",
"frame",
")",
":",
"try",
":",
"entry",
"=",
"self",
".",
"call_dict",
"[",
"(",
"code",
",",
"line",
",",
"callee",
")",
"]",
"ex... | A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return | [
"A",
"call",
"originating",
"from",
"this",
"file",
"returned",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L207-L230 | train | 34,932 |
vpelletier/pprofile | pprofile.py | ProfileBase.dump_stats | def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out) | python | def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out) | [
"def",
"dump_stats",
"(",
"self",
",",
"filename",
")",
":",
"if",
"_isCallgrindName",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"out",
":",
"self",
".",
"callgrind",
"(",
"out",
")",
"else",
":",
"with",
"io"... | Similar to profile.Profile.dump_stats - but different output format ! | [
"Similar",
"to",
"profile",
".",
"Profile",
".",
"dump_stats",
"-",
"but",
"different",
"output",
"format",
"!"
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L723-L732 | train | 34,933 |
vpelletier/pprofile | pprofile.py | ProfileRunnerBase.runctx | def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self | python | def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self | [
"def",
"runctx",
"(",
"self",
",",
"cmd",
",",
"globals",
",",
"locals",
")",
":",
"with",
"self",
"(",
")",
":",
"exec",
"(",
"cmd",
",",
"globals",
",",
"locals",
")",
"return",
"self"
] | Similar to profile.Profile.runctx . | [
"Similar",
"to",
"profile",
".",
"Profile",
".",
"runctx",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L752-L756 | train | 34,934 |
vpelletier/pprofile | pprofile.py | Profile.enable | def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace) | python | def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace) | [
"def",
"enable",
"(",
"self",
")",
":",
"if",
"self",
".",
"enabled_start",
":",
"warn",
"(",
"'Duplicate \"enable\" call'",
")",
"else",
":",
"self",
".",
"_enable",
"(",
")",
"sys",
".",
"settrace",
"(",
"self",
".",
"_global_trace",
")"
] | Enable profiling. | [
"Enable",
"profiling",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L843-L851 | train | 34,935 |
vpelletier/pprofile | pprofile.py | Profile._disable | def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack | python | def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack | [
"def",
"_disable",
"(",
"self",
")",
":",
"self",
".",
"total_time",
"+=",
"time",
"(",
")",
"-",
"self",
".",
"enabled_start",
"self",
".",
"enabled_start",
"=",
"None",
"del",
"self",
".",
"stack"
] | Overload this method when subclassing. Called after actually disabling
trace. | [
"Overload",
"this",
"method",
"when",
"subclassing",
".",
"Called",
"after",
"actually",
"disabling",
"trace",
"."
] | 51a36896727565faf23e5abccc9204e5f935fe1e | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L853-L860 | train | 34,936 |
rsinger86/drf-flex-fields | rest_flex_fields/serializers.py | FlexFieldsSerializerMixin._make_expanded_field_serializer | def _make_expanded_field_serializer(
self, name, nested_expand, nested_fields, nested_omit
):
"""
Returns an instance of the dynamically created nested serializer.
"""
field_options = self.expandable_fields[name]
serializer_class = field_options[0]
serializer_settings = copy.deepcopy(field_options[1])
if name in nested_expand:
serializer_settings["expand"] = nested_expand[name]
if name in nested_fields:
serializer_settings["fields"] = nested_fields[name]
if name in nested_omit:
serializer_settings["omit"] = nested_omit[name]
if serializer_settings.get("source") == name:
del serializer_settings["source"]
if type(serializer_class) == str:
serializer_class = self._import_serializer_class(serializer_class)
return serializer_class(**serializer_settings) | python | def _make_expanded_field_serializer(
self, name, nested_expand, nested_fields, nested_omit
):
"""
Returns an instance of the dynamically created nested serializer.
"""
field_options = self.expandable_fields[name]
serializer_class = field_options[0]
serializer_settings = copy.deepcopy(field_options[1])
if name in nested_expand:
serializer_settings["expand"] = nested_expand[name]
if name in nested_fields:
serializer_settings["fields"] = nested_fields[name]
if name in nested_omit:
serializer_settings["omit"] = nested_omit[name]
if serializer_settings.get("source") == name:
del serializer_settings["source"]
if type(serializer_class) == str:
serializer_class = self._import_serializer_class(serializer_class)
return serializer_class(**serializer_settings) | [
"def",
"_make_expanded_field_serializer",
"(",
"self",
",",
"name",
",",
"nested_expand",
",",
"nested_fields",
",",
"nested_omit",
")",
":",
"field_options",
"=",
"self",
".",
"expandable_fields",
"[",
"name",
"]",
"serializer_class",
"=",
"field_options",
"[",
"... | Returns an instance of the dynamically created nested serializer. | [
"Returns",
"an",
"instance",
"of",
"the",
"dynamically",
"created",
"nested",
"serializer",
"."
] | 56495f15977d76697972acac571792e8fd67003d | https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L50-L75 | train | 34,937 |
rsinger86/drf-flex-fields | rest_flex_fields/serializers.py | FlexFieldsSerializerMixin._clean_fields | def _clean_fields(self, omit_fields, sparse_fields, next_level_omits):
"""
Remove fields that are found in omit list, and if sparse names
are passed, remove any fields not found in that list.
"""
sparse = len(sparse_fields) > 0
to_remove = []
if not sparse and len(omit_fields) == 0:
return
for field_name in self.fields:
is_present = self._should_field_exist(
field_name, omit_fields, sparse_fields, next_level_omits
)
if not is_present:
to_remove.append(field_name)
for remove_field in to_remove:
self.fields.pop(remove_field) | python | def _clean_fields(self, omit_fields, sparse_fields, next_level_omits):
"""
Remove fields that are found in omit list, and if sparse names
are passed, remove any fields not found in that list.
"""
sparse = len(sparse_fields) > 0
to_remove = []
if not sparse and len(omit_fields) == 0:
return
for field_name in self.fields:
is_present = self._should_field_exist(
field_name, omit_fields, sparse_fields, next_level_omits
)
if not is_present:
to_remove.append(field_name)
for remove_field in to_remove:
self.fields.pop(remove_field) | [
"def",
"_clean_fields",
"(",
"self",
",",
"omit_fields",
",",
"sparse_fields",
",",
"next_level_omits",
")",
":",
"sparse",
"=",
"len",
"(",
"sparse_fields",
")",
">",
"0",
"to_remove",
"=",
"[",
"]",
"if",
"not",
"sparse",
"and",
"len",
"(",
"omit_fields"... | Remove fields that are found in omit list, and if sparse names
are passed, remove any fields not found in that list. | [
"Remove",
"fields",
"that",
"are",
"found",
"in",
"omit",
"list",
"and",
"if",
"sparse",
"names",
"are",
"passed",
"remove",
"any",
"fields",
"not",
"found",
"in",
"that",
"list",
"."
] | 56495f15977d76697972acac571792e8fd67003d | https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L92-L112 | train | 34,938 |
rsinger86/drf-flex-fields | rest_flex_fields/serializers.py | FlexFieldsSerializerMixin._can_access_request | def _can_access_request(self):
"""
Can access current request object if all are true
- The serializer is the root.
- A request context was passed in.
- The request method is GET.
"""
if self.parent:
return False
if not hasattr(self, "context") or not self.context.get("request", None):
return False
return self.context["request"].method == "GET" | python | def _can_access_request(self):
"""
Can access current request object if all are true
- The serializer is the root.
- A request context was passed in.
- The request method is GET.
"""
if self.parent:
return False
if not hasattr(self, "context") or not self.context.get("request", None):
return False
return self.context["request"].method == "GET" | [
"def",
"_can_access_request",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent",
":",
"return",
"False",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"context\"",
")",
"or",
"not",
"self",
".",
"context",
".",
"get",
"(",
"\"request\"",
",",
"None",
")... | Can access current request object if all are true
- The serializer is the root.
- A request context was passed in.
- The request method is GET. | [
"Can",
"access",
"current",
"request",
"object",
"if",
"all",
"are",
"true",
"-",
"The",
"serializer",
"is",
"the",
"root",
".",
"-",
"A",
"request",
"context",
"was",
"passed",
"in",
".",
"-",
"The",
"request",
"method",
"is",
"GET",
"."
] | 56495f15977d76697972acac571792e8fd67003d | https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L158-L171 | train | 34,939 |
rsinger86/drf-flex-fields | rest_flex_fields/serializers.py | FlexFieldsSerializerMixin._get_expand_input | def _get_expand_input(self, passed_settings):
"""
If expand value is explicitliy passed, just return it.
If parsing from request, ensure that the value complies with
the "permitted_expands" list passed into the context from the
FlexFieldsMixin.
"""
value = passed_settings.get("expand")
if len(value) > 0:
return value
if not self._can_access_request:
return []
expand = self._parse_request_list_value("expand")
if "permitted_expands" in self.context:
permitted_expands = self.context["permitted_expands"]
if "~all" in expand or "*" in expand:
return permitted_expands
else:
return list(set(expand) & set(permitted_expands))
return expand | python | def _get_expand_input(self, passed_settings):
"""
If expand value is explicitliy passed, just return it.
If parsing from request, ensure that the value complies with
the "permitted_expands" list passed into the context from the
FlexFieldsMixin.
"""
value = passed_settings.get("expand")
if len(value) > 0:
return value
if not self._can_access_request:
return []
expand = self._parse_request_list_value("expand")
if "permitted_expands" in self.context:
permitted_expands = self.context["permitted_expands"]
if "~all" in expand or "*" in expand:
return permitted_expands
else:
return list(set(expand) & set(permitted_expands))
return expand | [
"def",
"_get_expand_input",
"(",
"self",
",",
"passed_settings",
")",
":",
"value",
"=",
"passed_settings",
".",
"get",
"(",
"\"expand\"",
")",
"if",
"len",
"(",
"value",
")",
">",
"0",
":",
"return",
"value",
"if",
"not",
"self",
".",
"_can_access_request... | If expand value is explicitliy passed, just return it.
If parsing from request, ensure that the value complies with
the "permitted_expands" list passed into the context from the
FlexFieldsMixin. | [
"If",
"expand",
"value",
"is",
"explicitliy",
"passed",
"just",
"return",
"it",
".",
"If",
"parsing",
"from",
"request",
"ensure",
"that",
"the",
"value",
"complies",
"with",
"the",
"permitted_expands",
"list",
"passed",
"into",
"the",
"context",
"from",
"the"... | 56495f15977d76697972acac571792e8fd67003d | https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L196-L221 | train | 34,940 |
rsinger86/drf-flex-fields | rest_flex_fields/utils.py | is_expanded | def is_expanded(request, key):
""" Examines request object to return boolean of whether
passed field is expanded.
"""
expand = request.query_params.get("expand", "")
expand_fields = []
for e in expand.split(","):
expand_fields.extend([e for e in e.split(".")])
return "~all" in expand_fields or key in expand_fields | python | def is_expanded(request, key):
""" Examines request object to return boolean of whether
passed field is expanded.
"""
expand = request.query_params.get("expand", "")
expand_fields = []
for e in expand.split(","):
expand_fields.extend([e for e in e.split(".")])
return "~all" in expand_fields or key in expand_fields | [
"def",
"is_expanded",
"(",
"request",
",",
"key",
")",
":",
"expand",
"=",
"request",
".",
"query_params",
".",
"get",
"(",
"\"expand\"",
",",
"\"\"",
")",
"expand_fields",
"=",
"[",
"]",
"for",
"e",
"in",
"expand",
".",
"split",
"(",
"\",\"",
")",
"... | Examines request object to return boolean of whether
passed field is expanded. | [
"Examines",
"request",
"object",
"to",
"return",
"boolean",
"of",
"whether",
"passed",
"field",
"is",
"expanded",
"."
] | 56495f15977d76697972acac571792e8fd67003d | https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/utils.py#L1-L11 | train | 34,941 |
chinuno-usami/CuteR | CuteR/CuteR.py | color_replace | def color_replace(image, color):
"""Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO
"""
pixels = image.load()
size = image.size[0]
for width in range(size):
for height in range(size):
r, g, b, a = pixels[width, height]
if (r, g, b, a) == (0,0,0,255):
pixels[width,height] = color
else:
pixels[width,height] = (r,g,b,color[3]) | python | def color_replace(image, color):
"""Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO
"""
pixels = image.load()
size = image.size[0]
for width in range(size):
for height in range(size):
r, g, b, a = pixels[width, height]
if (r, g, b, a) == (0,0,0,255):
pixels[width,height] = color
else:
pixels[width,height] = (r,g,b,color[3]) | [
"def",
"color_replace",
"(",
"image",
",",
"color",
")",
":",
"pixels",
"=",
"image",
".",
"load",
"(",
")",
"size",
"=",
"image",
".",
"size",
"[",
"0",
"]",
"for",
"width",
"in",
"range",
"(",
"size",
")",
":",
"for",
"height",
"in",
"range",
"... | Replace black with other color
:color: custom color (r,g,b,a)
:image: image to replace color
:returns: TODO | [
"Replace",
"black",
"with",
"other",
"color"
] | ba4e017d3460bda9c1ccaf90723ddbfd4cc5426c | https://github.com/chinuno-usami/CuteR/blob/ba4e017d3460bda9c1ccaf90723ddbfd4cc5426c/CuteR/CuteR.py#L10-L26 | train | 34,942 |
maroba/findiff | findiff/vector.py | wrap_in_ndarray | def wrap_in_ndarray(value):
"""Wraps the argument in a numpy.ndarray.
If value is a scalar, it is converted in a list first.
If value is array-like, the shape is conserved.
"""
if hasattr(value, "__len__"):
return np.array(value)
else:
return np.array([value]) | python | def wrap_in_ndarray(value):
"""Wraps the argument in a numpy.ndarray.
If value is a scalar, it is converted in a list first.
If value is array-like, the shape is conserved.
"""
if hasattr(value, "__len__"):
return np.array(value)
else:
return np.array([value]) | [
"def",
"wrap_in_ndarray",
"(",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"\"__len__\"",
")",
":",
"return",
"np",
".",
"array",
"(",
"value",
")",
"else",
":",
"return",
"np",
".",
"array",
"(",
"[",
"value",
"]",
")"
] | Wraps the argument in a numpy.ndarray.
If value is a scalar, it is converted in a list first.
If value is array-like, the shape is conserved. | [
"Wraps",
"the",
"argument",
"in",
"a",
"numpy",
".",
"ndarray",
"."
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/vector.py#L264-L275 | train | 34,943 |
maroba/findiff | findiff/coefs.py | coefficients_non_uni | def coefficients_non_uni(deriv, acc, coords, idx):
"""
Calculates the finite difference coefficients for given derivative order and accuracy order.
Assumes that the underlying grid is non-uniform.
:param deriv: int > 0: The derivative order.
:param acc: even int > 0: The accuracy order.
:param coords: 1D numpy.ndarray: the coordinates of the axis for the partial derivative
:param idx: int: index of the grid position where to calculate the coefficients
:return: dict with the finite difference coefficients and corresponding offsets
"""
if acc % 2 == 1:
acc += 1
num_central = 2 * math.floor((deriv + 1) / 2) - 1 + acc
num_side = num_central // 2
if deriv % 2 == 0:
num_coef = num_central + 1
else:
num_coef = num_central
if idx < num_side:
matrix = _build_matrix_non_uniform(0, num_coef - 1, coords, idx)
rhs = _build_rhs(0, num_coef - 1, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(num_coef)])
}
elif idx >= len(coords) - num_side:
matrix = _build_matrix_non_uniform(num_coef - 1, 0, coords, idx)
rhs = _build_rhs(num_coef - 1, 0, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(-num_coef + 1, 1)])
}
else:
matrix = _build_matrix_non_uniform(num_side, num_side, coords, idx)
rhs = _build_rhs(num_side, num_side, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(-num_side, num_side + 1)])
}
return ret | python | def coefficients_non_uni(deriv, acc, coords, idx):
"""
Calculates the finite difference coefficients for given derivative order and accuracy order.
Assumes that the underlying grid is non-uniform.
:param deriv: int > 0: The derivative order.
:param acc: even int > 0: The accuracy order.
:param coords: 1D numpy.ndarray: the coordinates of the axis for the partial derivative
:param idx: int: index of the grid position where to calculate the coefficients
:return: dict with the finite difference coefficients and corresponding offsets
"""
if acc % 2 == 1:
acc += 1
num_central = 2 * math.floor((deriv + 1) / 2) - 1 + acc
num_side = num_central // 2
if deriv % 2 == 0:
num_coef = num_central + 1
else:
num_coef = num_central
if idx < num_side:
matrix = _build_matrix_non_uniform(0, num_coef - 1, coords, idx)
rhs = _build_rhs(0, num_coef - 1, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(num_coef)])
}
elif idx >= len(coords) - num_side:
matrix = _build_matrix_non_uniform(num_coef - 1, 0, coords, idx)
rhs = _build_rhs(num_coef - 1, 0, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(-num_coef + 1, 1)])
}
else:
matrix = _build_matrix_non_uniform(num_side, num_side, coords, idx)
rhs = _build_rhs(num_side, num_side, deriv)
ret = {
"coefficients": np.linalg.solve(matrix, rhs),
"offsets": np.array([p for p in range(-num_side, num_side + 1)])
}
return ret | [
"def",
"coefficients_non_uni",
"(",
"deriv",
",",
"acc",
",",
"coords",
",",
"idx",
")",
":",
"if",
"acc",
"%",
"2",
"==",
"1",
":",
"acc",
"+=",
"1",
"num_central",
"=",
"2",
"*",
"math",
".",
"floor",
"(",
"(",
"deriv",
"+",
"1",
")",
"/",
"2... | Calculates the finite difference coefficients for given derivative order and accuracy order.
Assumes that the underlying grid is non-uniform.
:param deriv: int > 0: The derivative order.
:param acc: even int > 0: The accuracy order.
:param coords: 1D numpy.ndarray: the coordinates of the axis for the partial derivative
:param idx: int: index of the grid position where to calculate the coefficients
:return: dict with the finite difference coefficients and corresponding offsets | [
"Calculates",
"the",
"finite",
"difference",
"coefficients",
"for",
"given",
"derivative",
"order",
"and",
"accuracy",
"order",
".",
"Assumes",
"that",
"the",
"underlying",
"grid",
"is",
"non",
"-",
"uniform",
"."
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/coefs.py#L63-L120 | train | 34,944 |
maroba/findiff | findiff/coefs.py | _build_matrix | def _build_matrix(p, q, deriv):
"""Constructs the equation system matrix for the finite difference coefficients"""
A = [([1 for _ in range(-p, q+1)])]
for i in range(1, p + q + 1):
A.append([j**i for j in range(-p, q+1)])
return np.array(A) | python | def _build_matrix(p, q, deriv):
"""Constructs the equation system matrix for the finite difference coefficients"""
A = [([1 for _ in range(-p, q+1)])]
for i in range(1, p + q + 1):
A.append([j**i for j in range(-p, q+1)])
return np.array(A) | [
"def",
"_build_matrix",
"(",
"p",
",",
"q",
",",
"deriv",
")",
":",
"A",
"=",
"[",
"(",
"[",
"1",
"for",
"_",
"in",
"range",
"(",
"-",
"p",
",",
"q",
"+",
"1",
")",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"p",
"+",
"q",... | Constructs the equation system matrix for the finite difference coefficients | [
"Constructs",
"the",
"equation",
"system",
"matrix",
"for",
"the",
"finite",
"difference",
"coefficients"
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/coefs.py#L123-L128 | train | 34,945 |
maroba/findiff | findiff/coefs.py | _build_rhs | def _build_rhs(p, q, deriv):
"""The right hand side of the equation system matrix"""
b = [0 for _ in range(p+q+1)]
b[deriv] = math.factorial(deriv)
return np.array(b) | python | def _build_rhs(p, q, deriv):
"""The right hand side of the equation system matrix"""
b = [0 for _ in range(p+q+1)]
b[deriv] = math.factorial(deriv)
return np.array(b) | [
"def",
"_build_rhs",
"(",
"p",
",",
"q",
",",
"deriv",
")",
":",
"b",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"p",
"+",
"q",
"+",
"1",
")",
"]",
"b",
"[",
"deriv",
"]",
"=",
"math",
".",
"factorial",
"(",
"deriv",
")",
"return",
"np"... | The right hand side of the equation system matrix | [
"The",
"right",
"hand",
"side",
"of",
"the",
"equation",
"system",
"matrix"
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/coefs.py#L131-L136 | train | 34,946 |
maroba/findiff | findiff/coefs.py | _build_matrix_non_uniform | def _build_matrix_non_uniform(p, q, coords, k):
"""Constructs the equation matrix for the finite difference coefficients of non-uniform grids at location k"""
A = [[1] * (p+q+1)]
for i in range(1, p + q + 1):
line = [(coords[k+j] - coords[k])**i for j in range(-p, q+1)]
A.append(line)
return np.array(A) | python | def _build_matrix_non_uniform(p, q, coords, k):
"""Constructs the equation matrix for the finite difference coefficients of non-uniform grids at location k"""
A = [[1] * (p+q+1)]
for i in range(1, p + q + 1):
line = [(coords[k+j] - coords[k])**i for j in range(-p, q+1)]
A.append(line)
return np.array(A) | [
"def",
"_build_matrix_non_uniform",
"(",
"p",
",",
"q",
",",
"coords",
",",
"k",
")",
":",
"A",
"=",
"[",
"[",
"1",
"]",
"*",
"(",
"p",
"+",
"q",
"+",
"1",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"p",
"+",
"q",
"+",
"1",
")",
... | Constructs the equation matrix for the finite difference coefficients of non-uniform grids at location k | [
"Constructs",
"the",
"equation",
"matrix",
"for",
"the",
"finite",
"difference",
"coefficients",
"of",
"non",
"-",
"uniform",
"grids",
"at",
"location",
"k"
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/coefs.py#L139-L145 | train | 34,947 |
maroba/findiff | findiff/operators.py | FinDiff.set_accuracy | def set_accuracy(self, acc):
""" Sets the accuracy order of the finite difference scheme.
If the FinDiff object is not a raw partial derivative but a composition of derivatives
the accuracy order will be propagated to the child operators.
"""
self.acc = acc
if self.child:
self.child.set_accuracy(acc) | python | def set_accuracy(self, acc):
""" Sets the accuracy order of the finite difference scheme.
If the FinDiff object is not a raw partial derivative but a composition of derivatives
the accuracy order will be propagated to the child operators.
"""
self.acc = acc
if self.child:
self.child.set_accuracy(acc) | [
"def",
"set_accuracy",
"(",
"self",
",",
"acc",
")",
":",
"self",
".",
"acc",
"=",
"acc",
"if",
"self",
".",
"child",
":",
"self",
".",
"child",
".",
"set_accuracy",
"(",
"acc",
")"
] | Sets the accuracy order of the finite difference scheme.
If the FinDiff object is not a raw partial derivative but a composition of derivatives
the accuracy order will be propagated to the child operators. | [
"Sets",
"the",
"accuracy",
"order",
"of",
"the",
"finite",
"difference",
"scheme",
".",
"If",
"the",
"FinDiff",
"object",
"is",
"not",
"a",
"raw",
"partial",
"derivative",
"but",
"a",
"composition",
"of",
"derivatives",
"the",
"accuracy",
"order",
"will",
"b... | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/operators.py#L166-L173 | train | 34,948 |
maroba/findiff | findiff/operators.py | FinDiff.diff | def diff(self, y, h, deriv, dim, coefs):
"""The core function to take a partial derivative on a uniform grid.
"""
try:
npts = y.shape[dim]
except AttributeError as err:
raise ValueError("FinDiff objects can only be applied to arrays or evaluated(!) functions returning arrays") from err
scheme = "center"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
nbndry = len(weights) // 2
ref_slice = slice(nbndry, npts - nbndry, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
yd = np.zeros_like(y)
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
scheme = "forward"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
ref_slice = slice(0, nbndry, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
scheme = "backward"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
ref_slice = slice(npts - nbndry, npts, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
h_inv = 1. / h ** deriv
return yd * h_inv | python | def diff(self, y, h, deriv, dim, coefs):
"""The core function to take a partial derivative on a uniform grid.
"""
try:
npts = y.shape[dim]
except AttributeError as err:
raise ValueError("FinDiff objects can only be applied to arrays or evaluated(!) functions returning arrays") from err
scheme = "center"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
nbndry = len(weights) // 2
ref_slice = slice(nbndry, npts - nbndry, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
yd = np.zeros_like(y)
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
scheme = "forward"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
ref_slice = slice(0, nbndry, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
scheme = "backward"
weights = coefs[scheme]["coefficients"]
offsets = coefs[scheme]["offsets"]
ref_slice = slice(npts - nbndry, npts, 1)
off_slices = [self._shift_slice(ref_slice, offsets[k], npts) for k in range(len(offsets))]
self._apply_to_array(yd, y, weights, off_slices, ref_slice, dim)
h_inv = 1. / h ** deriv
return yd * h_inv | [
"def",
"diff",
"(",
"self",
",",
"y",
",",
"h",
",",
"deriv",
",",
"dim",
",",
"coefs",
")",
":",
"try",
":",
"npts",
"=",
"y",
".",
"shape",
"[",
"dim",
"]",
"except",
"AttributeError",
"as",
"err",
":",
"raise",
"ValueError",
"(",
"\"FinDiff obje... | The core function to take a partial derivative on a uniform grid. | [
"The",
"core",
"function",
"to",
"take",
"a",
"partial",
"derivative",
"on",
"a",
"uniform",
"grid",
"."
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/operators.py#L215-L255 | train | 34,949 |
maroba/findiff | findiff/operators.py | FinDiff.diff_non_uni | def diff_non_uni(self, y, coords, dim, coefs):
"""The core function to take a partial derivative on a non-uniform grid"""
yd = np.zeros_like(y)
ndims = len(y.shape)
multi_slice = [slice(None, None)] * ndims
ref_multi_slice = [slice(None, None)] * ndims
for i, x in enumerate(coords):
weights = coefs[i]["coefficients"]
offsets = coefs[i]["offsets"]
ref_multi_slice[dim] = i
for off, w in zip(offsets, weights):
multi_slice[dim] = i + off
yd[ref_multi_slice] += w * y[multi_slice]
return yd | python | def diff_non_uni(self, y, coords, dim, coefs):
"""The core function to take a partial derivative on a non-uniform grid"""
yd = np.zeros_like(y)
ndims = len(y.shape)
multi_slice = [slice(None, None)] * ndims
ref_multi_slice = [slice(None, None)] * ndims
for i, x in enumerate(coords):
weights = coefs[i]["coefficients"]
offsets = coefs[i]["offsets"]
ref_multi_slice[dim] = i
for off, w in zip(offsets, weights):
multi_slice[dim] = i + off
yd[ref_multi_slice] += w * y[multi_slice]
return yd | [
"def",
"diff_non_uni",
"(",
"self",
",",
"y",
",",
"coords",
",",
"dim",
",",
"coefs",
")",
":",
"yd",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"ndims",
"=",
"len",
"(",
"y",
".",
"shape",
")",
"multi_slice",
"=",
"[",
"slice",
"(",
"None",
... | The core function to take a partial derivative on a non-uniform grid | [
"The",
"core",
"function",
"to",
"take",
"a",
"partial",
"derivative",
"on",
"a",
"non",
"-",
"uniform",
"grid"
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/operators.py#L257-L275 | train | 34,950 |
maroba/findiff | findiff/operators.py | FinDiff._apply_to_array | def _apply_to_array(self, yd, y, weights, off_slices, ref_slice, dim):
"""Applies the finite differences only to slices along a given axis"""
ndims = len(y.shape)
all = slice(None, None, 1)
ref_multi_slice = [all] * ndims
ref_multi_slice[dim] = ref_slice
for w, s in zip(weights, off_slices):
off_multi_slice = [all] * ndims
off_multi_slice[dim] = s
if abs(1 - w) < 1.E-14:
yd[ref_multi_slice] += y[off_multi_slice]
else:
yd[ref_multi_slice] += w * y[off_multi_slice] | python | def _apply_to_array(self, yd, y, weights, off_slices, ref_slice, dim):
"""Applies the finite differences only to slices along a given axis"""
ndims = len(y.shape)
all = slice(None, None, 1)
ref_multi_slice = [all] * ndims
ref_multi_slice[dim] = ref_slice
for w, s in zip(weights, off_slices):
off_multi_slice = [all] * ndims
off_multi_slice[dim] = s
if abs(1 - w) < 1.E-14:
yd[ref_multi_slice] += y[off_multi_slice]
else:
yd[ref_multi_slice] += w * y[off_multi_slice] | [
"def",
"_apply_to_array",
"(",
"self",
",",
"yd",
",",
"y",
",",
"weights",
",",
"off_slices",
",",
"ref_slice",
",",
"dim",
")",
":",
"ndims",
"=",
"len",
"(",
"y",
".",
"shape",
")",
"all",
"=",
"slice",
"(",
"None",
",",
"None",
",",
"1",
")",... | Applies the finite differences only to slices along a given axis | [
"Applies",
"the",
"finite",
"differences",
"only",
"to",
"slices",
"along",
"a",
"given",
"axis"
] | 5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac | https://github.com/maroba/findiff/blob/5d1ccfa966ce2bd556b4425583f8b9bbcbf183ac/findiff/operators.py#L277-L293 | train | 34,951 |
anchore/anchore | anchore/anchore_auth.py | get_current_user_info | def get_current_user_info(anchore_auth):
"""
Return the metadata about the current user as supplied by the anchore.io service. Includes permissions and tier access.
:return: Dict of user metadata
"""
user_url = anchore_auth['client_info_url'] + '/' + anchore_auth['username']
user_timeout = 60
retries = 3
result = requests.get(user_url, headers={'x-anchore-password': anchore_auth['password']})
if result.status_code == 200:
user_data = json.loads(result.content)
else:
raise requests.HTTPError('Error response from service: {}'.format(result.status_code))
return user_data | python | def get_current_user_info(anchore_auth):
"""
Return the metadata about the current user as supplied by the anchore.io service. Includes permissions and tier access.
:return: Dict of user metadata
"""
user_url = anchore_auth['client_info_url'] + '/' + anchore_auth['username']
user_timeout = 60
retries = 3
result = requests.get(user_url, headers={'x-anchore-password': anchore_auth['password']})
if result.status_code == 200:
user_data = json.loads(result.content)
else:
raise requests.HTTPError('Error response from service: {}'.format(result.status_code))
return user_data | [
"def",
"get_current_user_info",
"(",
"anchore_auth",
")",
":",
"user_url",
"=",
"anchore_auth",
"[",
"'client_info_url'",
"]",
"+",
"'/'",
"+",
"anchore_auth",
"[",
"'username'",
"]",
"user_timeout",
"=",
"60",
"retries",
"=",
"3",
"result",
"=",
"requests",
"... | Return the metadata about the current user as supplied by the anchore.io service. Includes permissions and tier access.
:return: Dict of user metadata | [
"Return",
"the",
"metadata",
"about",
"the",
"current",
"user",
"as",
"supplied",
"by",
"the",
"anchore",
".",
"io",
"service",
".",
"Includes",
"permissions",
"and",
"tier",
"access",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/anchore_auth.py#L47-L62 | train | 34,952 |
anchore/anchore | anchore/cli/policybundle.py | show | def show(details):
"""
Show list of Anchore data policies.
"""
ecode = 0
try:
policymeta = anchore_policy.load_policymeta()
if details:
anchore_print(policymeta, do_formatting=True)
else:
output = {}
name = policymeta['name']
output[name] = {}
output[name]['id'] = policymeta['id']
output[name]['policies'] = policymeta['policies']
output[name]['whitelists'] = policymeta['whitelists']
output[name]['mappings'] = policymeta['mappings']
anchore_print(output, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def show(details):
"""
Show list of Anchore data policies.
"""
ecode = 0
try:
policymeta = anchore_policy.load_policymeta()
if details:
anchore_print(policymeta, do_formatting=True)
else:
output = {}
name = policymeta['name']
output[name] = {}
output[name]['id'] = policymeta['id']
output[name]['policies'] = policymeta['policies']
output[name]['whitelists'] = policymeta['whitelists']
output[name]['mappings'] = policymeta['mappings']
anchore_print(output, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"show",
"(",
"details",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"policymeta",
"=",
"anchore_policy",
".",
"load_policymeta",
"(",
")",
"if",
"details",
":",
"anchore_print",
"(",
"policymeta",
",",
"do_formatting",
"=",
"True",
")",
"else",
":",
... | Show list of Anchore data policies. | [
"Show",
"list",
"of",
"Anchore",
"data",
"policies",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/policybundle.py#L30-L58 | train | 34,953 |
anchore/anchore | anchore/cli/common.py | extended_help_option | def extended_help_option(extended_help=None, *param_decls, **attrs):
"""
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
if not extended_help:
ctx.command.help = ctx.command.callback.__doc__
click.echo(ctx.get_help(), color=ctx.color)
else:
ctx.command.help = extended_help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return click.option(*(param_decls or ('--extended-help',)), **attrs)(f)
return decorator | python | def extended_help_option(extended_help=None, *param_decls, **attrs):
"""
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
if not extended_help:
ctx.command.help = ctx.command.callback.__doc__
click.echo(ctx.get_help(), color=ctx.color)
else:
ctx.command.help = extended_help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return click.option(*(param_decls or ('--extended-help',)), **attrs)(f)
return decorator | [
"def",
"extended_help_option",
"(",
"extended_help",
"=",
"None",
",",
"*",
"param_decls",
",",
"*",
"*",
"attrs",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"def",
"callback",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"if",
"value",
"... | Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`. | [
"Based",
"on",
"the",
"click",
".",
"help_option",
"code",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/common.py#L14-L49 | train | 34,954 |
anchore/anchore | anchore/cli/common.py | anchore_print | def anchore_print(msg, do_formatting=False):
"""
Print to stdout using the proper formatting for the command.
:param msg: output to be printed, either an object or a string. Objects will be serialized according to config
:return:
"""
if do_formatting:
click.echo(formatter(msg))
else:
click.echo(msg) | python | def anchore_print(msg, do_formatting=False):
"""
Print to stdout using the proper formatting for the command.
:param msg: output to be printed, either an object or a string. Objects will be serialized according to config
:return:
"""
if do_formatting:
click.echo(formatter(msg))
else:
click.echo(msg) | [
"def",
"anchore_print",
"(",
"msg",
",",
"do_formatting",
"=",
"False",
")",
":",
"if",
"do_formatting",
":",
"click",
".",
"echo",
"(",
"formatter",
"(",
"msg",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"msg",
")"
] | Print to stdout using the proper formatting for the command.
:param msg: output to be printed, either an object or a string. Objects will be serialized according to config
:return: | [
"Print",
"to",
"stdout",
"using",
"the",
"proper",
"formatting",
"for",
"the",
"command",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/common.py#L106-L116 | train | 34,955 |
anchore/anchore | anchore/cli/common.py | build_image_list | def build_image_list(config, image, imagefile, all_local, include_allanchore, dockerfile=None, exclude_file=None):
"""Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic"""
if not image and not (imagefile or all_local):
raise click.BadOptionUsage('No input found for image source. One of <image>, <imagefile>, or <all> must be specified')
if image and imagefile:
raise click.BadOptionUsage('Only one of <image> and <imagefile> can be specified')
filter_images = []
if exclude_file:
with open(exclude_file) as f:
for line in f.readlines():
filter_images.append(line.strip())
imagelist = {}
if image:
imagelist[image] = {'dockerfile':dockerfile}
if imagefile:
filelist = anchore_utils.read_kvfile_tolist(imagefile)
for i in range(len(filelist)):
l = filelist[i]
imageId = l[0]
try:
dfile = l[1]
except:
dfile = None
imagelist[imageId] = {'dockerfile':dfile}
if all_local:
docker_cli = contexts['docker_cli']
if docker_cli:
for f in docker_cli.images(all=True, quiet=True, filters={'dangling': False}):
if f not in imagelist and f not in filter_images:
imagelist[f] = {'dockerfile':None}
else:
raise Exception("Could not load any images from local docker host - is docker running?")
if include_allanchore:
ret = contexts['anchore_db'].load_all_images().keys()
if ret and len(ret) > 0:
for l in list(set(imagelist.keys()) | set(ret)):
imagelist[l] = {'dockerfile':None}
# Remove excluded items
for excluded in filter_images:
docker_cli = contexts['docker_cli']
if not docker_cli:
raise Exception("Could not query docker - is docker running?")
for img in docker_cli.images(name=excluded, quiet=True):
imagelist.pop(img, None)
return imagelist | python | def build_image_list(config, image, imagefile, all_local, include_allanchore, dockerfile=None, exclude_file=None):
"""Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic"""
if not image and not (imagefile or all_local):
raise click.BadOptionUsage('No input found for image source. One of <image>, <imagefile>, or <all> must be specified')
if image and imagefile:
raise click.BadOptionUsage('Only one of <image> and <imagefile> can be specified')
filter_images = []
if exclude_file:
with open(exclude_file) as f:
for line in f.readlines():
filter_images.append(line.strip())
imagelist = {}
if image:
imagelist[image] = {'dockerfile':dockerfile}
if imagefile:
filelist = anchore_utils.read_kvfile_tolist(imagefile)
for i in range(len(filelist)):
l = filelist[i]
imageId = l[0]
try:
dfile = l[1]
except:
dfile = None
imagelist[imageId] = {'dockerfile':dfile}
if all_local:
docker_cli = contexts['docker_cli']
if docker_cli:
for f in docker_cli.images(all=True, quiet=True, filters={'dangling': False}):
if f not in imagelist and f not in filter_images:
imagelist[f] = {'dockerfile':None}
else:
raise Exception("Could not load any images from local docker host - is docker running?")
if include_allanchore:
ret = contexts['anchore_db'].load_all_images().keys()
if ret and len(ret) > 0:
for l in list(set(imagelist.keys()) | set(ret)):
imagelist[l] = {'dockerfile':None}
# Remove excluded items
for excluded in filter_images:
docker_cli = contexts['docker_cli']
if not docker_cli:
raise Exception("Could not query docker - is docker running?")
for img in docker_cli.images(name=excluded, quiet=True):
imagelist.pop(img, None)
return imagelist | [
"def",
"build_image_list",
"(",
"config",
",",
"image",
",",
"imagefile",
",",
"all_local",
",",
"include_allanchore",
",",
"dockerfile",
"=",
"None",
",",
"exclude_file",
"=",
"None",
")",
":",
"if",
"not",
"image",
"and",
"not",
"(",
"imagefile",
"or",
"... | Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic | [
"Given",
"option",
"inputs",
"from",
"the",
"cli",
"construct",
"a",
"list",
"of",
"image",
"ids",
".",
"Includes",
"all",
"found",
"with",
"no",
"exclusion",
"logic"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/common.py#L119-L172 | train | 34,956 |
anchore/anchore | anchore/cli/logs.py | init_output_formatters | def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys())
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO')
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO')
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG')
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG')
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) | python | def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys())
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO')
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO')
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG')
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG')
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) | [
"def",
"init_output_formatters",
"(",
"output_verbosity",
"=",
"'normal'",
",",
"stderr",
"=",
"sys",
".",
"stderr",
",",
"logfile",
"=",
"None",
",",
"debug_logfile",
"=",
"None",
")",
":",
"if",
"output_verbosity",
"not",
"in",
"console_verbosity_options",
":"... | Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return: | [
"Initialize",
"the",
"CLI",
"logging",
"scheme",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/logs.py#L105-L160 | train | 34,957 |
anchore/anchore | anchore/cli/__init__.py | main_entry | def main_entry(ctx, verbose, debug, quiet, json, plain, html, config_override):
"""
Anchore is a tool to analyze, query, and curate container images. The options at this top level
control stdout and stderr verbosity and format.
After installation, the first command run should be: 'anchore feeds
list' to initialize the system and load feed data.
High-level example flows:
Initialize the system and sync the by-default subscribed feed 'vulnerabilties':
\b
anchore feeds list
anchore feeds sync
Analyze an image
docker pull nginx:latest
anchore analyze --image nginx:latest --imagetype base
Generate a summary report on all analyzed images
anchore audit report
Check gate output for nginx:latest:
anchore gate --image nginx:latest
"""
# Load the config into the context object
logfile = None
debug_logfile = None
try:
try:
config_overrides = {}
if config_override:
for el in config_override:
try:
(key, val) = el.split('=')
if not key or not val:
raise Exception("could not split by '='")
config_overrides[key] = val
except:
click.echo("Error: specified --config_override param cannot be parsed (should be <config_opt>=<value>): " + str(el))
exit(1)
args = {'verbose': verbose, 'debug': debug, 'json': json, 'plain': plain, 'html': html, 'quiet': quiet, 'config_overrides':config_overrides}
anchore_conf = AnchoreConfiguration(cliargs=args)
except Exception as err:
click.echo("Error setting up/reading Anchore configuration", err=True)
click.echo("Info: "+str(err), err=True)
import traceback
traceback.print_exc()
sys.exit(1)
try:
logfile = anchore_conf.data['log_file'] if 'log_file' in anchore_conf.data else None
debug_logfile = anchore_conf.data['debug_log_file'] if 'debug_log_file' in anchore_conf.data else None
except Exception, e:
click.echo(str(e))
ctx.obj = anchore_conf
except:
if ctx.invoked_subcommand != 'system':
click.echo('Expected, but did not find configuration file at %s' % os.path.join(AnchoreConfiguration.DEFAULT_CONFIG_FILE), err=True)
exit(1)
try:
init_output_format(json, plain, debug, verbose, quiet, log_filepath=logfile, debug_log_filepath=debug_logfile)
except Exception, e:
click.echo('Error initializing logging: %s' % str(e))
exit(2)
if not anchore_pre_flight_check(ctx):
anchore_print_err("Error running pre-flight checks")
exit(1)
try:
if not anchore.anchore_utils.anchore_common_context_setup(ctx.obj):
anchore_print_err("Error setting up common data based on configuration")
exit(1)
except ValueError as err:
print "ERROR: " + str(err)
exit(1) | python | def main_entry(ctx, verbose, debug, quiet, json, plain, html, config_override):
"""
Anchore is a tool to analyze, query, and curate container images. The options at this top level
control stdout and stderr verbosity and format.
After installation, the first command run should be: 'anchore feeds
list' to initialize the system and load feed data.
High-level example flows:
Initialize the system and sync the by-default subscribed feed 'vulnerabilties':
\b
anchore feeds list
anchore feeds sync
Analyze an image
docker pull nginx:latest
anchore analyze --image nginx:latest --imagetype base
Generate a summary report on all analyzed images
anchore audit report
Check gate output for nginx:latest:
anchore gate --image nginx:latest
"""
# Load the config into the context object
logfile = None
debug_logfile = None
try:
try:
config_overrides = {}
if config_override:
for el in config_override:
try:
(key, val) = el.split('=')
if not key or not val:
raise Exception("could not split by '='")
config_overrides[key] = val
except:
click.echo("Error: specified --config_override param cannot be parsed (should be <config_opt>=<value>): " + str(el))
exit(1)
args = {'verbose': verbose, 'debug': debug, 'json': json, 'plain': plain, 'html': html, 'quiet': quiet, 'config_overrides':config_overrides}
anchore_conf = AnchoreConfiguration(cliargs=args)
except Exception as err:
click.echo("Error setting up/reading Anchore configuration", err=True)
click.echo("Info: "+str(err), err=True)
import traceback
traceback.print_exc()
sys.exit(1)
try:
logfile = anchore_conf.data['log_file'] if 'log_file' in anchore_conf.data else None
debug_logfile = anchore_conf.data['debug_log_file'] if 'debug_log_file' in anchore_conf.data else None
except Exception, e:
click.echo(str(e))
ctx.obj = anchore_conf
except:
if ctx.invoked_subcommand != 'system':
click.echo('Expected, but did not find configuration file at %s' % os.path.join(AnchoreConfiguration.DEFAULT_CONFIG_FILE), err=True)
exit(1)
try:
init_output_format(json, plain, debug, verbose, quiet, log_filepath=logfile, debug_log_filepath=debug_logfile)
except Exception, e:
click.echo('Error initializing logging: %s' % str(e))
exit(2)
if not anchore_pre_flight_check(ctx):
anchore_print_err("Error running pre-flight checks")
exit(1)
try:
if not anchore.anchore_utils.anchore_common_context_setup(ctx.obj):
anchore_print_err("Error setting up common data based on configuration")
exit(1)
except ValueError as err:
print "ERROR: " + str(err)
exit(1) | [
"def",
"main_entry",
"(",
"ctx",
",",
"verbose",
",",
"debug",
",",
"quiet",
",",
"json",
",",
"plain",
",",
"html",
",",
"config_override",
")",
":",
"# Load the config into the context object",
"logfile",
"=",
"None",
"debug_logfile",
"=",
"None",
"try",
":"... | Anchore is a tool to analyze, query, and curate container images. The options at this top level
control stdout and stderr verbosity and format.
After installation, the first command run should be: 'anchore feeds
list' to initialize the system and load feed data.
High-level example flows:
Initialize the system and sync the by-default subscribed feed 'vulnerabilties':
\b
anchore feeds list
anchore feeds sync
Analyze an image
docker pull nginx:latest
anchore analyze --image nginx:latest --imagetype base
Generate a summary report on all analyzed images
anchore audit report
Check gate output for nginx:latest:
anchore gate --image nginx:latest | [
"Anchore",
"is",
"a",
"tool",
"to",
"analyze",
"query",
"and",
"curate",
"container",
"images",
".",
"The",
"options",
"at",
"this",
"top",
"level",
"control",
"stdout",
"and",
"stderr",
"verbosity",
"and",
"format",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/__init__.py#L101-L186 | train | 34,958 |
anchore/anchore | anchore/util/resources.py | get_user_agent | def get_user_agent():
"""
Construct and informative user-agent string.
Includes OS version and docker version info
Not thread-safe
:return:
"""
global user_agent_string
if user_agent_string is None:
# get OS type
try:
sysinfo = subprocess.check_output('lsb_release -i -r -s'.split(' '))
dockerinfo = subprocess.check_output('docker --version'.split(' '))
user_agent_string = ' '.join([sysinfo.replace('\t','/'),
dockerinfo.replace(" version ", "/").replace(", ", "/").replace(' ', '/')]).replace('\n','')
except:
user_agent_string = None
return user_agent_string | python | def get_user_agent():
"""
Construct and informative user-agent string.
Includes OS version and docker version info
Not thread-safe
:return:
"""
global user_agent_string
if user_agent_string is None:
# get OS type
try:
sysinfo = subprocess.check_output('lsb_release -i -r -s'.split(' '))
dockerinfo = subprocess.check_output('docker --version'.split(' '))
user_agent_string = ' '.join([sysinfo.replace('\t','/'),
dockerinfo.replace(" version ", "/").replace(", ", "/").replace(' ', '/')]).replace('\n','')
except:
user_agent_string = None
return user_agent_string | [
"def",
"get_user_agent",
"(",
")",
":",
"global",
"user_agent_string",
"if",
"user_agent_string",
"is",
"None",
":",
"# get OS type",
"try",
":",
"sysinfo",
"=",
"subprocess",
".",
"check_output",
"(",
"'lsb_release -i -r -s'",
".",
"split",
"(",
"' '",
")",
")"... | Construct and informative user-agent string.
Includes OS version and docker version info
Not thread-safe
:return: | [
"Construct",
"and",
"informative",
"user",
"-",
"agent",
"string",
".",
"Includes",
"OS",
"version",
"and",
"docker",
"version",
"info"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L36-L57 | train | 34,959 |
anchore/anchore | anchore/util/resources.py | normalize_headers | def normalize_headers(headers):
"""
Convert useful headers to a normalized type and return in a new dict
Only processes content-type, content-length, etag, and last-modified
:param headers:
:return:
"""
for (k, v) in headers.items():
headers.pop(k)
headers[k.lower()] = v
result = {}
if 'content-length' in headers:
result['content-length'] = headers['content-length']
elif 'contentlength' in headers:
result['content-length'] = headers['contentlength']
if 'content-type' in headers:
result['content-type'] = headers['content-type']
elif 'contenttype' in headers:
result['content-type'] = headers['contenttype']
if 'last-modified' in headers:
result['last-modified'] = headers['last-modified']
elif 'lastmodified' in headers:
result['last-modified'] = headers['lastmodified']
else:
result['last-modified'] = datetime.datetime.utcnow().strftime(RFC1123_TIME_FORMAT)
# Normalize date
if isinstance(result['last-modified'], datetime.datetime):
result['last-modified'] = result['last-modified'].strftime(RFC1123_TIME_FORMAT)
if 'ETag' in headers:
result['etag'] = headers['etag'].strip('"')
elif 'etag' in headers:
result['etag'] = headers['etag'].strip('"')
return result | python | def normalize_headers(headers):
"""
Convert useful headers to a normalized type and return in a new dict
Only processes content-type, content-length, etag, and last-modified
:param headers:
:return:
"""
for (k, v) in headers.items():
headers.pop(k)
headers[k.lower()] = v
result = {}
if 'content-length' in headers:
result['content-length'] = headers['content-length']
elif 'contentlength' in headers:
result['content-length'] = headers['contentlength']
if 'content-type' in headers:
result['content-type'] = headers['content-type']
elif 'contenttype' in headers:
result['content-type'] = headers['contenttype']
if 'last-modified' in headers:
result['last-modified'] = headers['last-modified']
elif 'lastmodified' in headers:
result['last-modified'] = headers['lastmodified']
else:
result['last-modified'] = datetime.datetime.utcnow().strftime(RFC1123_TIME_FORMAT)
# Normalize date
if isinstance(result['last-modified'], datetime.datetime):
result['last-modified'] = result['last-modified'].strftime(RFC1123_TIME_FORMAT)
if 'ETag' in headers:
result['etag'] = headers['etag'].strip('"')
elif 'etag' in headers:
result['etag'] = headers['etag'].strip('"')
return result | [
"def",
"normalize_headers",
"(",
"headers",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"headers",
".",
"items",
"(",
")",
":",
"headers",
".",
"pop",
"(",
"k",
")",
"headers",
"[",
"k",
".",
"lower",
"(",
")",
"]",
"=",
"v",
"result",
"=",... | Convert useful headers to a normalized type and return in a new dict
Only processes content-type, content-length, etag, and last-modified
:param headers:
:return: | [
"Convert",
"useful",
"headers",
"to",
"a",
"normalized",
"type",
"and",
"return",
"in",
"a",
"new",
"dict"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L117-L156 | train | 34,960 |
anchore/anchore | anchore/util/resources.py | get_resource_retriever | def get_resource_retriever(url):
"""
Get the appropriate retriever object for the specified url based on url scheme.
Makes assumption that HTTP urls do not require any special authorization.
For HTTP urls: returns HTTPResourceRetriever
For s3:// urls returns S3ResourceRetriever
:param url: url of the resource to be retrieved
:return: ResourceRetriever object
"""
if url.startswith('http://') or url.startswith('https://'):
return HttpResourceRetriever(url)
else:
raise ValueError('Unsupported scheme in url: %s' % url) | python | def get_resource_retriever(url):
"""
Get the appropriate retriever object for the specified url based on url scheme.
Makes assumption that HTTP urls do not require any special authorization.
For HTTP urls: returns HTTPResourceRetriever
For s3:// urls returns S3ResourceRetriever
:param url: url of the resource to be retrieved
:return: ResourceRetriever object
"""
if url.startswith('http://') or url.startswith('https://'):
return HttpResourceRetriever(url)
else:
raise ValueError('Unsupported scheme in url: %s' % url) | [
"def",
"get_resource_retriever",
"(",
"url",
")",
":",
"if",
"url",
".",
"startswith",
"(",
"'http://'",
")",
"or",
"url",
".",
"startswith",
"(",
"'https://'",
")",
":",
"return",
"HttpResourceRetriever",
"(",
"url",
")",
"else",
":",
"raise",
"ValueError",... | Get the appropriate retriever object for the specified url based on url scheme.
Makes assumption that HTTP urls do not require any special authorization.
For HTTP urls: returns HTTPResourceRetriever
For s3:// urls returns S3ResourceRetriever
:param url: url of the resource to be retrieved
:return: ResourceRetriever object | [
"Get",
"the",
"appropriate",
"retriever",
"object",
"for",
"the",
"specified",
"url",
"based",
"on",
"url",
"scheme",
".",
"Makes",
"assumption",
"that",
"HTTP",
"urls",
"do",
"not",
"require",
"any",
"special",
"authorization",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L158-L173 | train | 34,961 |
anchore/anchore | anchore/util/resources.py | ResourceCache.get | def get(self, url):
"""
Lookup the given url and return an entry if found. Else return None.
The returned entry is a dict with metadata about the content and a 'content' key with a file path value
:param url:
:return:
"""
self._load(url)
self._flush()
return self.metadata[url] | python | def get(self, url):
"""
Lookup the given url and return an entry if found. Else return None.
The returned entry is a dict with metadata about the content and a 'content' key with a file path value
:param url:
:return:
"""
self._load(url)
self._flush()
return self.metadata[url] | [
"def",
"get",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"_load",
"(",
"url",
")",
"self",
".",
"_flush",
"(",
")",
"return",
"self",
".",
"metadata",
"[",
"url",
"]"
] | Lookup the given url and return an entry if found. Else return None.
The returned entry is a dict with metadata about the content and a 'content' key with a file path value
:param url:
:return: | [
"Lookup",
"the",
"given",
"url",
"and",
"return",
"an",
"entry",
"if",
"found",
".",
"Else",
"return",
"None",
".",
"The",
"returned",
"entry",
"is",
"a",
"dict",
"with",
"metadata",
"about",
"the",
"content",
"and",
"a",
"content",
"key",
"with",
"a",
... | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L240-L251 | train | 34,962 |
anchore/anchore | anchore/util/resources.py | ResourceCache._load | def _load(self, url):
"""
Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return:
"""
self._logger.debug('Loading url %s into resource cache' % url)
retriever = get_resource_retriever(url)
content_path = os.path.join(self.path, self._hash_path(url))
try:
if url in self.metadata:
headers = retriever.fetch(content_path, last_etag=self.metadata[url]['etag'], progress_bar=self.progress_bars)
if headers is None:
# no update, return what is already loaded
self._logger.info('Cached %s is up-to-date. No data download needed' % url)
return self.metadata[url]
else:
headers = retriever.fetch(content_path, progress_bar=self.progress_bars)
if headers is None:
raise Exception('Fetch of %s failed' % url)
if 'etag' not in headers:
# No Etag returned, so generate it
headers['etag'] = fs_util.calc_file_md5(content_path)
# Populate metadata from the headers
self.metadata[url] = headers.copy()
self.metadata[url]['content'] = content_path
return self.metadata[url]
except:
self._logger.error('Failed getting resource: %s')
# forcibly flush local entry if found
if url in self.metadata:
self.metadata.pop(url)
raise
finally:
if url not in self.metadata:
self._logger.debug('Cleaning up on failed load of %s' % url)
# Cleanup on failure
if content_path is not None and os.path.exists(content_path):
os.remove(content_path) | python | def _load(self, url):
"""
Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return:
"""
self._logger.debug('Loading url %s into resource cache' % url)
retriever = get_resource_retriever(url)
content_path = os.path.join(self.path, self._hash_path(url))
try:
if url in self.metadata:
headers = retriever.fetch(content_path, last_etag=self.metadata[url]['etag'], progress_bar=self.progress_bars)
if headers is None:
# no update, return what is already loaded
self._logger.info('Cached %s is up-to-date. No data download needed' % url)
return self.metadata[url]
else:
headers = retriever.fetch(content_path, progress_bar=self.progress_bars)
if headers is None:
raise Exception('Fetch of %s failed' % url)
if 'etag' not in headers:
# No Etag returned, so generate it
headers['etag'] = fs_util.calc_file_md5(content_path)
# Populate metadata from the headers
self.metadata[url] = headers.copy()
self.metadata[url]['content'] = content_path
return self.metadata[url]
except:
self._logger.error('Failed getting resource: %s')
# forcibly flush local entry if found
if url in self.metadata:
self.metadata.pop(url)
raise
finally:
if url not in self.metadata:
self._logger.debug('Cleaning up on failed load of %s' % url)
# Cleanup on failure
if content_path is not None and os.path.exists(content_path):
os.remove(content_path) | [
"def",
"_load",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Loading url %s into resource cache'",
"%",
"url",
")",
"retriever",
"=",
"get_resource_retriever",
"(",
"url",
")",
"content_path",
"=",
"os",
".",
"path",
".",
... | Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return: | [
"Load",
"from",
"remote",
"but",
"check",
"local",
"file",
"content",
"to",
"identify",
"duplicate",
"content",
".",
"If",
"local",
"file",
"is",
"found",
"with",
"same",
"hash",
"then",
"it",
"is",
"used",
"with",
"metadata",
"from",
"remote",
"object",
"... | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L271-L315 | train | 34,963 |
anchore/anchore | anchore/util/fs_util.py | calc_file_md5 | def calc_file_md5(filepath, chunk_size=None):
"""
Calculate a file's md5 checksum. Use the specified chunk_size for IO or the
default 256KB
:param filepath:
:param chunk_size:
:return:
"""
if chunk_size is None:
chunk_size = 256 * 1024
md5sum = hashlib.md5()
with io.open(filepath, 'r+b') as f:
datachunk = f.read(chunk_size)
while datachunk is not None and len(datachunk) > 0:
md5sum.update(datachunk)
datachunk = f.read(chunk_size)
return md5sum.hexdigest() | python | def calc_file_md5(filepath, chunk_size=None):
"""
Calculate a file's md5 checksum. Use the specified chunk_size for IO or the
default 256KB
:param filepath:
:param chunk_size:
:return:
"""
if chunk_size is None:
chunk_size = 256 * 1024
md5sum = hashlib.md5()
with io.open(filepath, 'r+b') as f:
datachunk = f.read(chunk_size)
while datachunk is not None and len(datachunk) > 0:
md5sum.update(datachunk)
datachunk = f.read(chunk_size)
return md5sum.hexdigest() | [
"def",
"calc_file_md5",
"(",
"filepath",
",",
"chunk_size",
"=",
"None",
")",
":",
"if",
"chunk_size",
"is",
"None",
":",
"chunk_size",
"=",
"256",
"*",
"1024",
"md5sum",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"io",
".",
"open",
"(",
"filepath",... | Calculate a file's md5 checksum. Use the specified chunk_size for IO or the
default 256KB
:param filepath:
:param chunk_size:
:return: | [
"Calculate",
"a",
"file",
"s",
"md5",
"checksum",
".",
"Use",
"the",
"specified",
"chunk_size",
"for",
"IO",
"or",
"the",
"default",
"256KB"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/fs_util.py#L59-L77 | train | 34,964 |
anchore/anchore | anchore/cli/toolbox.py | toolbox | def toolbox(anchore_config, ctx, image, imageid):
"""
A collection of tools for operating on images and containers and building anchore modules.
Subcommands operate on the specified image passed in as --image <imgid>
"""
global config, imagelist, nav
config = anchore_config
ecode = 0
try:
# set up imagelist of imageIds
if image:
imagelist = [image]
try:
result = anchore_utils.discover_imageIds(imagelist)
except ValueError as err:
raise err
else:
imagelist = result
elif imageid:
if len(imageid) != 64 or re.findall("[^0-9a-fA-F]+",imageid):
raise Exception("input is not a valid imageId (64 characters, a-f, A-F, 0-9)")
imagelist = [imageid]
else:
imagelist = []
if ctx.invoked_subcommand not in ['import', 'delete', 'kubesync', 'images', 'show']:
if not imagelist:
raise Exception("for this operation, you must specify an image with '--image' or '--imageid'")
else:
try:
nav = navigator.Navigator(anchore_config=config, imagelist=imagelist, allimages=contexts['anchore_allimages'])
except Exception as err:
nav = None
raise err
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
if ecode:
sys.exit(ecode) | python | def toolbox(anchore_config, ctx, image, imageid):
"""
A collection of tools for operating on images and containers and building anchore modules.
Subcommands operate on the specified image passed in as --image <imgid>
"""
global config, imagelist, nav
config = anchore_config
ecode = 0
try:
# set up imagelist of imageIds
if image:
imagelist = [image]
try:
result = anchore_utils.discover_imageIds(imagelist)
except ValueError as err:
raise err
else:
imagelist = result
elif imageid:
if len(imageid) != 64 or re.findall("[^0-9a-fA-F]+",imageid):
raise Exception("input is not a valid imageId (64 characters, a-f, A-F, 0-9)")
imagelist = [imageid]
else:
imagelist = []
if ctx.invoked_subcommand not in ['import', 'delete', 'kubesync', 'images', 'show']:
if not imagelist:
raise Exception("for this operation, you must specify an image with '--image' or '--imageid'")
else:
try:
nav = navigator.Navigator(anchore_config=config, imagelist=imagelist, allimages=contexts['anchore_allimages'])
except Exception as err:
nav = None
raise err
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
if ecode:
sys.exit(ecode) | [
"def",
"toolbox",
"(",
"anchore_config",
",",
"ctx",
",",
"image",
",",
"imageid",
")",
":",
"global",
"config",
",",
"imagelist",
",",
"nav",
"config",
"=",
"anchore_config",
"ecode",
"=",
"0",
"try",
":",
"# set up imagelist of imageIds",
"if",
"image",
":... | A collection of tools for operating on images and containers and building anchore modules.
Subcommands operate on the specified image passed in as --image <imgid> | [
"A",
"collection",
"of",
"tools",
"for",
"operating",
"on",
"images",
"and",
"containers",
"and",
"building",
"anchore",
"modules",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L24-L71 | train | 34,965 |
anchore/anchore | anchore/cli/toolbox.py | unpack | def unpack(destdir):
"""Unpack and Squash image to local filesystem"""
if not nav:
sys.exit(1)
ecode = 0
try:
anchore_print("Unpacking images: " + ' '.join(nav.get_images()))
result = nav.unpack(destdir=destdir)
if not result:
anchore_print_err("no images unpacked")
ecode = 1
else:
for imageId in result:
anchore_print("Unpacked image: " + imageId)
anchore_print("Unpack directory: "+ result[imageId])
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | python | def unpack(destdir):
"""Unpack and Squash image to local filesystem"""
if not nav:
sys.exit(1)
ecode = 0
try:
anchore_print("Unpacking images: " + ' '.join(nav.get_images()))
result = nav.unpack(destdir=destdir)
if not result:
anchore_print_err("no images unpacked")
ecode = 1
else:
for imageId in result:
anchore_print("Unpacked image: " + imageId)
anchore_print("Unpack directory: "+ result[imageId])
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | [
"def",
"unpack",
"(",
"destdir",
")",
":",
"if",
"not",
"nav",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"ecode",
"=",
"0",
"try",
":",
"anchore_print",
"(",
"\"Unpacking images: \"",
"+",
"' '",
".",
"join",
"(",
"nav",
".",
"get_images",
"(",
")",
... | Unpack and Squash image to local filesystem | [
"Unpack",
"and",
"Squash",
"image",
"to",
"local",
"filesystem"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L117-L140 | train | 34,966 |
anchore/anchore | anchore/cli/toolbox.py | show_analyzer_status | def show_analyzer_status():
"""Show analyzer status for specified image"""
ecode = 0
try:
image=contexts['anchore_allimages'][imagelist[0]]
analyzer_status = contexts['anchore_db'].load_analyzer_manifest(image.meta['imageId'])
result = {image.meta['imageId']:{'result':{'header':['Analyzer', 'Status', '*Type', 'LastExec', 'Exitcode', 'Checksum'], 'rows':[]}}}
for script in analyzer_status.keys():
adata = analyzer_status[script]
nicetime = datetime.datetime.fromtimestamp(adata['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
try:
row = [script.split('/')[-1], adata['status'], adata['atype'], nicetime, str(adata['returncode']), adata['csum']]
result[image.meta['imageId']]['result']['rows'].append(row)
except:
pass
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | python | def show_analyzer_status():
"""Show analyzer status for specified image"""
ecode = 0
try:
image=contexts['anchore_allimages'][imagelist[0]]
analyzer_status = contexts['anchore_db'].load_analyzer_manifest(image.meta['imageId'])
result = {image.meta['imageId']:{'result':{'header':['Analyzer', 'Status', '*Type', 'LastExec', 'Exitcode', 'Checksum'], 'rows':[]}}}
for script in analyzer_status.keys():
adata = analyzer_status[script]
nicetime = datetime.datetime.fromtimestamp(adata['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
try:
row = [script.split('/')[-1], adata['status'], adata['atype'], nicetime, str(adata['returncode']), adata['csum']]
result[image.meta['imageId']]['result']['rows'].append(row)
except:
pass
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | [
"def",
"show_analyzer_status",
"(",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"image",
"=",
"contexts",
"[",
"'anchore_allimages'",
"]",
"[",
"imagelist",
"[",
"0",
"]",
"]",
"analyzer_status",
"=",
"contexts",
"[",
"'anchore_db'",
"]",
".",
"load_analyzer_ma... | Show analyzer status for specified image | [
"Show",
"analyzer",
"status",
"for",
"specified",
"image"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L333-L356 | train | 34,967 |
anchore/anchore | anchore/cli/toolbox.py | export | def export(outfile):
"""Export image anchore data to a JSON file."""
if not nav:
sys.exit(1)
ecode = 0
savelist = list()
for imageId in imagelist:
try:
record = {}
record['image'] = {}
record['image']['imageId'] = imageId
record['image']['imagedata'] = contexts['anchore_db'].load_image_new(imageId)
savelist.append(record)
except Exception as err:
anchore_print_err("could not find record for image ("+str(imageId)+")")
ecode = 1
if ecode == 0:
try:
if outfile == '-':
print json.dumps(savelist, indent=4)
else:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(savelist))
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) | python | def export(outfile):
"""Export image anchore data to a JSON file."""
if not nav:
sys.exit(1)
ecode = 0
savelist = list()
for imageId in imagelist:
try:
record = {}
record['image'] = {}
record['image']['imageId'] = imageId
record['image']['imagedata'] = contexts['anchore_db'].load_image_new(imageId)
savelist.append(record)
except Exception as err:
anchore_print_err("could not find record for image ("+str(imageId)+")")
ecode = 1
if ecode == 0:
try:
if outfile == '-':
print json.dumps(savelist, indent=4)
else:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(savelist))
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) | [
"def",
"export",
"(",
"outfile",
")",
":",
"if",
"not",
"nav",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"ecode",
"=",
"0",
"savelist",
"=",
"list",
"(",
")",
"for",
"imageId",
"in",
"imagelist",
":",
"try",
":",
"record",
"=",
"{",
"}",
"record",
... | Export image anchore data to a JSON file. | [
"Export",
"image",
"anchore",
"data",
"to",
"a",
"JSON",
"file",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L360-L392 | train | 34,968 |
anchore/anchore | anchore/cli/toolbox.py | image_import | def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | python | def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | [
"def",
"image_import",
"(",
"infile",
",",
"force",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"with",
"open",
"(",
"infile",
",",
"'r'",
")",
"as",
"FH",
":",
"savelist",
"=",
"json",
".",
"loads",
"(",
"FH",
".",
"read",
"(",
")",
")",
"except",
... | Import image anchore data from a JSON file. | [
"Import",
"image",
"anchore",
"data",
"from",
"a",
"JSON",
"file",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L421-L452 | train | 34,969 |
anchore/anchore | anchore/cli/system.py | status | def status(conf):
"""
Show anchore system status.
"""
ecode = 0
try:
if conf:
if config.cliargs['json']:
anchore_print(config.data, do_formatting=True)
else:
anchore_print(yaml.safe_dump(config.data, indent=True, default_flow_style=False))
else:
result = {}
if contexts['anchore_db'].check():
result["anchore_db"] = "OK"
else:
result["anchore_db"] = "NOTINITIALIZED"
if anchore_feeds.check():
result["anchore_feeds"] = "OK"
else:
result["anchore_feeds"] = "NOTSYNCED"
afailed = False
latest = 0
for imageId in contexts['anchore_db'].load_all_images().keys():
amanifest = anchore_utils.load_analyzer_manifest(imageId)
for module_name in amanifest.keys():
try:
if amanifest[module_name]['timestamp'] > latest:
latest = amanifest[module_name]['timestamp']
if amanifest[module_name]['status'] != 'SUCCESS':
analyzer_failed_imageId = imageId
analyzer_failed_name = module_name
afailed = True
except:
pass
if latest == 0:
result["analyzer_status"] = "NODATA"
elif afailed:
result["analyzer_status"] = "FAIL ("+analyzer_failed_imageId+")"
result["analyzer_latest_run"] = time.ctime(latest)
else:
result["analyzer_status"] = "OK"
result["analyzer_latest_run"] = time.ctime(latest)
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def status(conf):
"""
Show anchore system status.
"""
ecode = 0
try:
if conf:
if config.cliargs['json']:
anchore_print(config.data, do_formatting=True)
else:
anchore_print(yaml.safe_dump(config.data, indent=True, default_flow_style=False))
else:
result = {}
if contexts['anchore_db'].check():
result["anchore_db"] = "OK"
else:
result["anchore_db"] = "NOTINITIALIZED"
if anchore_feeds.check():
result["anchore_feeds"] = "OK"
else:
result["anchore_feeds"] = "NOTSYNCED"
afailed = False
latest = 0
for imageId in contexts['anchore_db'].load_all_images().keys():
amanifest = anchore_utils.load_analyzer_manifest(imageId)
for module_name in amanifest.keys():
try:
if amanifest[module_name]['timestamp'] > latest:
latest = amanifest[module_name]['timestamp']
if amanifest[module_name]['status'] != 'SUCCESS':
analyzer_failed_imageId = imageId
analyzer_failed_name = module_name
afailed = True
except:
pass
if latest == 0:
result["analyzer_status"] = "NODATA"
elif afailed:
result["analyzer_status"] = "FAIL ("+analyzer_failed_imageId+")"
result["analyzer_latest_run"] = time.ctime(latest)
else:
result["analyzer_status"] = "OK"
result["analyzer_latest_run"] = time.ctime(latest)
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"status",
"(",
"conf",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"if",
"conf",
":",
"if",
"config",
".",
"cliargs",
"[",
"'json'",
"]",
":",
"anchore_print",
"(",
"config",
".",
"data",
",",
"do_formatting",
"=",
"True",
")",
"else",
":",
"an... | Show anchore system status. | [
"Show",
"anchore",
"system",
"status",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/system.py#L25-L79 | train | 34,970 |
anchore/anchore | anchore/cli/system.py | show_schemas | def show_schemas(schemaname):
"""
Show anchore document schemas.
"""
ecode = 0
try:
schemas = {}
schema_dir = os.path.join(contexts['anchore_config']['pkg_dir'], 'schemas')
for f in os.listdir(schema_dir):
sdata = {}
try:
with open(os.path.join(schema_dir, f), 'r') as FH:
sdata = json.loads(FH.read())
except:
anchore_print_err('found schema file but failed to parse: ' + os.path.join(schema_dir, f))
if sdata and (not schemaname or f in schemaname):
schemas[f] = sdata
if not schemas:
anchore_print_err("no specified schemas were found to show")
else:
anchore_print(json.dumps(schemas, indent=4))
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def show_schemas(schemaname):
"""
Show anchore document schemas.
"""
ecode = 0
try:
schemas = {}
schema_dir = os.path.join(contexts['anchore_config']['pkg_dir'], 'schemas')
for f in os.listdir(schema_dir):
sdata = {}
try:
with open(os.path.join(schema_dir, f), 'r') as FH:
sdata = json.loads(FH.read())
except:
anchore_print_err('found schema file but failed to parse: ' + os.path.join(schema_dir, f))
if sdata and (not schemaname or f in schemaname):
schemas[f] = sdata
if not schemas:
anchore_print_err("no specified schemas were found to show")
else:
anchore_print(json.dumps(schemas, indent=4))
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"show_schemas",
"(",
"schemaname",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"schemas",
"=",
"{",
"}",
"schema_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"contexts",
"[",
"'anchore_config'",
"]",
"[",
"'pkg_dir'",
"]",
",",
"'schemas'",
")",... | Show anchore document schemas. | [
"Show",
"anchore",
"document",
"schemas",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/system.py#L83-L112 | train | 34,971 |
anchore/anchore | anchore/cli/system.py | backup | def backup(outputdir):
"""
Backup an anchore installation to a tarfile.
"""
ecode = 0
try:
anchore_print('Backing up anchore system to directory '+str(outputdir)+' ...')
backupfile = config.backup(outputdir)
anchore_print({"anchore_backup_tarball":str(backupfile)}, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def backup(outputdir):
"""
Backup an anchore installation to a tarfile.
"""
ecode = 0
try:
anchore_print('Backing up anchore system to directory '+str(outputdir)+' ...')
backupfile = config.backup(outputdir)
anchore_print({"anchore_backup_tarball":str(backupfile)}, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"backup",
"(",
"outputdir",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"anchore_print",
"(",
"'Backing up anchore system to directory '",
"+",
"str",
"(",
"outputdir",
")",
"+",
"' ...'",
")",
"backupfile",
"=",
"config",
".",
"backup",
"(",
"outputdir",
... | Backup an anchore installation to a tarfile. | [
"Backup",
"an",
"anchore",
"installation",
"to",
"a",
"tarfile",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/system.py#L116-L130 | train | 34,972 |
anchore/anchore | anchore/cli/system.py | restore | def restore(inputfile, destination_root):
"""
Restore an anchore installation from a previously backed up tar file.
"""
ecode = 0
try:
anchore_print('Restoring anchore system from backup file %s ...' % (str(inputfile.name)))
restoredir = config.restore(destination_root, inputfile)
anchore_print("Anchore restored.")
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def restore(inputfile, destination_root):
"""
Restore an anchore installation from a previously backed up tar file.
"""
ecode = 0
try:
anchore_print('Restoring anchore system from backup file %s ...' % (str(inputfile.name)))
restoredir = config.restore(destination_root, inputfile)
anchore_print("Anchore restored.")
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"restore",
"(",
"inputfile",
",",
"destination_root",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"anchore_print",
"(",
"'Restoring anchore system from backup file %s ...'",
"%",
"(",
"str",
"(",
"inputfile",
".",
"name",
")",
")",
")",
"restoredir",
"=",
... | Restore an anchore installation from a previously backed up tar file. | [
"Restore",
"an",
"anchore",
"installation",
"from",
"a",
"previously",
"backed",
"up",
"tar",
"file",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/system.py#L135-L149 | train | 34,973 |
anchore/anchore | anchore/cli/system.py | exportdb | def exportdb(outdir):
"""Export all anchore images to JSON files"""
ecode = 0
try:
imgdir = os.path.join(outdir, "images")
feeddir = os.path.join(outdir, "feeds")
storedir = os.path.join(outdir, "storedfiles")
for d in [outdir, imgdir, feeddir, storedir]:
if not os.path.exists(d):
os.makedirs(d)
anchore_print("exporting images...")
imagelist = anchore_utils.get_image_list().keys()
for imageId in imagelist:
thefile = os.path.join(imgdir, imageId+".json")
if not os.path.exists(thefile):
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(contexts['anchore_db'].load_image_new(imageId)))
stored_namespaces = contexts['anchore_db'].load_files_namespaces(imageId)
for namespace in stored_namespaces:
stored_files = contexts['anchore_db'].load_files_tarfile(imageId, namespace)
if os.path.exists(stored_files):
thedir = os.path.join(storedir, imageId, namespace)
if not os.path.exists(thedir):
os.makedirs(thedir)
thefile = os.path.join(thedir, "stored_files.tar.gz")
shutil.copy(stored_files, thefile)
anchore_print("exporting feeds...")
feedmeta = contexts['anchore_db'].load_feedmeta()
thefile = os.path.join(feeddir, "feedmeta.json")
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(feedmeta))
for feed in feedmeta:
feedobj = feedmeta[feed]
for group in feedobj['groups']:
groupobj = feedobj['groups'][group]
datafiles = groupobj.pop('datafiles', [])
for datafile in datafiles:
thedir = os.path.join(feeddir, feed, group)
if not os.path.exists(thedir):
os.makedirs(thedir)
thefile = os.path.join(thedir, datafile)
if not os.path.exists(thefile):
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(contexts['anchore_db'].load_feed_group_data(feed, group, datafile)))
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) | python | def exportdb(outdir):
"""Export all anchore images to JSON files"""
ecode = 0
try:
imgdir = os.path.join(outdir, "images")
feeddir = os.path.join(outdir, "feeds")
storedir = os.path.join(outdir, "storedfiles")
for d in [outdir, imgdir, feeddir, storedir]:
if not os.path.exists(d):
os.makedirs(d)
anchore_print("exporting images...")
imagelist = anchore_utils.get_image_list().keys()
for imageId in imagelist:
thefile = os.path.join(imgdir, imageId+".json")
if not os.path.exists(thefile):
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(contexts['anchore_db'].load_image_new(imageId)))
stored_namespaces = contexts['anchore_db'].load_files_namespaces(imageId)
for namespace in stored_namespaces:
stored_files = contexts['anchore_db'].load_files_tarfile(imageId, namespace)
if os.path.exists(stored_files):
thedir = os.path.join(storedir, imageId, namespace)
if not os.path.exists(thedir):
os.makedirs(thedir)
thefile = os.path.join(thedir, "stored_files.tar.gz")
shutil.copy(stored_files, thefile)
anchore_print("exporting feeds...")
feedmeta = contexts['anchore_db'].load_feedmeta()
thefile = os.path.join(feeddir, "feedmeta.json")
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(feedmeta))
for feed in feedmeta:
feedobj = feedmeta[feed]
for group in feedobj['groups']:
groupobj = feedobj['groups'][group]
datafiles = groupobj.pop('datafiles', [])
for datafile in datafiles:
thedir = os.path.join(feeddir, feed, group)
if not os.path.exists(thedir):
os.makedirs(thedir)
thefile = os.path.join(thedir, datafile)
if not os.path.exists(thefile):
with open(thefile, 'w') as OFH:
OFH.write(json.dumps(contexts['anchore_db'].load_feed_group_data(feed, group, datafile)))
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) | [
"def",
"exportdb",
"(",
"outdir",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"imgdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"images\"",
")",
"feeddir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"feeds\"",
")",
... | Export all anchore images to JSON files | [
"Export",
"all",
"anchore",
"images",
"to",
"JSON",
"files"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/system.py#L153-L207 | train | 34,974 |
anchore/anchore | anchore/apk.py | next_token | def next_token(expected_type, data):
"""
Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token
removed
:param expected_type:
:param data:
:return: (TokenType, str) tuple where TokenType is the type of the next token expected
"""
next_data = copy.copy(data)
next_type = TokenType.INVALID
if len(next_data) == 0 or next_data[0] == None:
next_type = TokenType.END
elif (expected_type == TokenType.DIGIT or expected_type == TokenType.DIGIT_OR_ZERO) and next_data[0].isalpha():
next_type = TokenType.LETTER
elif expected_type == TokenType.LETTER and next_data[0].isdigit():
next_type = TokenType.DIGIT
elif expected_type == TokenType.SUFFIX and next_data[0].isdigit():
next_type = TokenType.SUFFIX_NO
else:
if next_data[0] == '.':
next_type = TokenType.DIGIT_OR_ZERO
elif next_data[0] == '_':
next_type = TokenType.SUFFIX
elif next_data[0] == '-':
if len(next_data) > 1 and next_data[1] == 'r':
next_type = TokenType.REVISION_NO
# Pop leading char off
next_data = next_data[1:]
else:
next_type = TokenType.INVALID
next_data = next_data[1:]
if next_type < expected_type:
if not ((next_type == TokenType.DIGIT_OR_ZERO and expected_type == TokenType.DIGIT) or
(next_type == TokenType.SUFFIX and expected_type == TokenType.SUFFIX_NO) or
(next_type == TokenType.DIGIT and expected_type == TokenType.LETTER)):
next_type = TokenType.INVALID
return next_type, next_data | python | def next_token(expected_type, data):
"""
Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token
removed
:param expected_type:
:param data:
:return: (TokenType, str) tuple where TokenType is the type of the next token expected
"""
next_data = copy.copy(data)
next_type = TokenType.INVALID
if len(next_data) == 0 or next_data[0] == None:
next_type = TokenType.END
elif (expected_type == TokenType.DIGIT or expected_type == TokenType.DIGIT_OR_ZERO) and next_data[0].isalpha():
next_type = TokenType.LETTER
elif expected_type == TokenType.LETTER and next_data[0].isdigit():
next_type = TokenType.DIGIT
elif expected_type == TokenType.SUFFIX and next_data[0].isdigit():
next_type = TokenType.SUFFIX_NO
else:
if next_data[0] == '.':
next_type = TokenType.DIGIT_OR_ZERO
elif next_data[0] == '_':
next_type = TokenType.SUFFIX
elif next_data[0] == '-':
if len(next_data) > 1 and next_data[1] == 'r':
next_type = TokenType.REVISION_NO
# Pop leading char off
next_data = next_data[1:]
else:
next_type = TokenType.INVALID
next_data = next_data[1:]
if next_type < expected_type:
if not ((next_type == TokenType.DIGIT_OR_ZERO and expected_type == TokenType.DIGIT) or
(next_type == TokenType.SUFFIX and expected_type == TokenType.SUFFIX_NO) or
(next_type == TokenType.DIGIT and expected_type == TokenType.LETTER)):
next_type = TokenType.INVALID
return next_type, next_data | [
"def",
"next_token",
"(",
"expected_type",
",",
"data",
")",
":",
"next_data",
"=",
"copy",
".",
"copy",
"(",
"data",
")",
"next_type",
"=",
"TokenType",
".",
"INVALID",
"if",
"len",
"(",
"next_data",
")",
"==",
"0",
"or",
"next_data",
"[",
"0",
"]",
... | Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token
removed
:param expected_type:
:param data:
:return: (TokenType, str) tuple where TokenType is the type of the next token expected | [
"Based",
"on",
"the",
"expected",
"next",
"type",
"consume",
"the",
"next",
"token",
"returning",
"the",
"type",
"found",
"and",
"an",
"updated",
"buffer",
"with",
"the",
"found",
"token",
"removed"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/apk.py#L31-L74 | train | 34,975 |
anchore/anchore | anchore/cli/feeds.py | show | def show(feed):
"""
Show detailed feed information
"""
ecode = 0
try:
feedmeta = anchore_feeds.load_anchore_feedmeta()
if feed in feedmeta:
result = {}
groups = feedmeta[feed].get('groups',{}).values()
result['name'] = feed
result['access_tier'] = int(feedmeta[feed].get('access_tier'))
result['description'] = feedmeta[feed].get('description')
result['groups'] = {}
if 'subscribed' not in feedmeta[feed]:
result['subscribed'] = False
else:
result['subscribed'] = feedmeta[feed]['subscribed']
for g in groups:
result['groups'][g['name']] = {
'access_tier': int(g.get('access_tier')),
'description': g.get('description'),
'last_sync': datetime.datetime.fromtimestamp(g.get('last_update')).isoformat() if 'last_update' in g else 'None'
}
anchore_print(result, do_formatting=True)
else:
anchore_print_err('Unknown feed name. Valid feeds can be seen withe the "list" command')
ecode = 1
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def show(feed):
"""
Show detailed feed information
"""
ecode = 0
try:
feedmeta = anchore_feeds.load_anchore_feedmeta()
if feed in feedmeta:
result = {}
groups = feedmeta[feed].get('groups',{}).values()
result['name'] = feed
result['access_tier'] = int(feedmeta[feed].get('access_tier'))
result['description'] = feedmeta[feed].get('description')
result['groups'] = {}
if 'subscribed' not in feedmeta[feed]:
result['subscribed'] = False
else:
result['subscribed'] = feedmeta[feed]['subscribed']
for g in groups:
result['groups'][g['name']] = {
'access_tier': int(g.get('access_tier')),
'description': g.get('description'),
'last_sync': datetime.datetime.fromtimestamp(g.get('last_update')).isoformat() if 'last_update' in g else 'None'
}
anchore_print(result, do_formatting=True)
else:
anchore_print_err('Unknown feed name. Valid feeds can be seen withe the "list" command')
ecode = 1
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"show",
"(",
"feed",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"feedmeta",
"=",
"anchore_feeds",
".",
"load_anchore_feedmeta",
"(",
")",
"if",
"feed",
"in",
"feedmeta",
":",
"result",
"=",
"{",
"}",
"groups",
"=",
"feedmeta",
"[",
"feed",
"]",
... | Show detailed feed information | [
"Show",
"detailed",
"feed",
"information"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/feeds.py#L42-L77 | train | 34,976 |
anchore/anchore | anchore/cli/feeds.py | list | def list(showgroups):
"""
Show list of Anchore data feeds.
"""
ecode = 0
try:
result = {}
subscribed = {}
available = {}
unavailable = {}
current_user_data = contexts['anchore_auth']['user_info']
feedmeta = anchore_feeds.load_anchore_feedmeta()
for feed in feedmeta.keys():
if feedmeta[feed]['subscribed']:
subscribed[feed] = {}
subscribed[feed]['description'] = feedmeta[feed]['description']
if showgroups:
subscribed[feed]['groups'] = feedmeta[feed]['groups'].keys()
else:
if current_user_data:
tier = int(current_user_data['tier'])
else:
tier = 0
if int(feedmeta[feed]['access_tier']) > tier:
collection = unavailable
else:
collection = available
collection[feed] = {}
collection[feed]['description'] = feedmeta[feed]['description']
if showgroups and collection == available:
collection[feed]['groups'] = feedmeta[feed]['groups'].keys()
if available:
result['Available'] = available
if subscribed:
result['Subscribed'] = subscribed
if unavailable:
result['Unavailable/Insufficient Access Tier'] = unavailable
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def list(showgroups):
"""
Show list of Anchore data feeds.
"""
ecode = 0
try:
result = {}
subscribed = {}
available = {}
unavailable = {}
current_user_data = contexts['anchore_auth']['user_info']
feedmeta = anchore_feeds.load_anchore_feedmeta()
for feed in feedmeta.keys():
if feedmeta[feed]['subscribed']:
subscribed[feed] = {}
subscribed[feed]['description'] = feedmeta[feed]['description']
if showgroups:
subscribed[feed]['groups'] = feedmeta[feed]['groups'].keys()
else:
if current_user_data:
tier = int(current_user_data['tier'])
else:
tier = 0
if int(feedmeta[feed]['access_tier']) > tier:
collection = unavailable
else:
collection = available
collection[feed] = {}
collection[feed]['description'] = feedmeta[feed]['description']
if showgroups and collection == available:
collection[feed]['groups'] = feedmeta[feed]['groups'].keys()
if available:
result['Available'] = available
if subscribed:
result['Subscribed'] = subscribed
if unavailable:
result['Unavailable/Insufficient Access Tier'] = unavailable
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"list",
"(",
"showgroups",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"result",
"=",
"{",
"}",
"subscribed",
"=",
"{",
"}",
"available",
"=",
"{",
"}",
"unavailable",
"=",
"{",
"}",
"current_user_data",
"=",
"contexts",
"[",
"'anchore_auth'",
"]",... | Show list of Anchore data feeds. | [
"Show",
"list",
"of",
"Anchore",
"data",
"feeds",
"."
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/feeds.py#L81-L131 | train | 34,977 |
anchore/anchore | anchore/analyzer.py | SelectionStrategy.evaluate_familytree | def evaluate_familytree(self, family_tree, image_set):
"""
Evaluate strategy for the given family tree and return a dict of images to analyze that match the strategy
:param family_tree: the family tree to traverse and evaluate
:param image_set: list of all images in the context
:return:
"""
if family_tree is None or image_set is None:
raise ValueError('Cannot execute analysis strategy on None image or image with no familytree data')
toanalyze = OrderedDict()
tree_len = len(family_tree)
for i in family_tree:
image = image_set[i]
if self._should_analyze_image(image, family_tree.index(i), tree_len):
toanalyze[image.meta['imageId']] = image
return toanalyze | python | def evaluate_familytree(self, family_tree, image_set):
"""
Evaluate strategy for the given family tree and return a dict of images to analyze that match the strategy
:param family_tree: the family tree to traverse and evaluate
:param image_set: list of all images in the context
:return:
"""
if family_tree is None or image_set is None:
raise ValueError('Cannot execute analysis strategy on None image or image with no familytree data')
toanalyze = OrderedDict()
tree_len = len(family_tree)
for i in family_tree:
image = image_set[i]
if self._should_analyze_image(image, family_tree.index(i), tree_len):
toanalyze[image.meta['imageId']] = image
return toanalyze | [
"def",
"evaluate_familytree",
"(",
"self",
",",
"family_tree",
",",
"image_set",
")",
":",
"if",
"family_tree",
"is",
"None",
"or",
"image_set",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot execute analysis strategy on None image or image with no familytree dat... | Evaluate strategy for the given family tree and return a dict of images to analyze that match the strategy
:param family_tree: the family tree to traverse and evaluate
:param image_set: list of all images in the context
:return: | [
"Evaluate",
"strategy",
"for",
"the",
"given",
"family",
"tree",
"and",
"return",
"a",
"dict",
"of",
"images",
"to",
"analyze",
"that",
"match",
"the",
"strategy"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/analyzer.py#L23-L42 | train | 34,978 |
anchore/anchore | anchore/cli/login.py | logout | def logout(anchore_config):
"""
Log out of Anchore service
"""
ecode = 0
try:
aa = contexts['anchore_auth']
if aa:
anchore_auth.anchore_auth_invalidate(aa)
if 'auth_file' in aa:
os.remove(aa['auth_file'])
print "Logout successful."
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | python | def logout(anchore_config):
"""
Log out of Anchore service
"""
ecode = 0
try:
aa = contexts['anchore_auth']
if aa:
anchore_auth.anchore_auth_invalidate(aa)
if 'auth_file' in aa:
os.remove(aa['auth_file'])
print "Logout successful."
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | [
"def",
"logout",
"(",
"anchore_config",
")",
":",
"ecode",
"=",
"0",
"try",
":",
"aa",
"=",
"contexts",
"[",
"'anchore_auth'",
"]",
"if",
"aa",
":",
"anchore_auth",
".",
"anchore_auth_invalidate",
"(",
"aa",
")",
"if",
"'auth_file'",
"in",
"aa",
":",
"os... | Log out of Anchore service | [
"Log",
"out",
"of",
"Anchore",
"service"
] | 8a4d5b9708e27856312d303aae3f04f3c72039d6 | https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/login.py#L81-L97 | train | 34,979 |
joanvila/aioredlock | aioredlock/redis.py | Instance.connect | async def connect(self):
"""
Get an connection for the self instance
"""
if isinstance(self.connection, dict):
# a dict like {'host': 'localhost', 'port': 6379,
# 'db': 0, 'password': 'pass'}
kwargs = self.connection.copy()
address = (
kwargs.pop('host', 'localhost'),
kwargs.pop('port', 6379)
)
redis_kwargs = kwargs
elif isinstance(self.connection, aioredis.Redis):
self._pool = self.connection
else:
# a tuple or list ('localhost', 6379)
# a string "redis://host:6379/0?encoding=utf-8" or
# a unix domain socket path "/path/to/redis.sock"
address = self.connection
redis_kwargs = {}
if self._pool is None:
async with self._lock:
if self._pool is None:
self.log.debug('Connecting %s', repr(self))
self._pool = await self._create_redis_pool(
address, **redis_kwargs,
minsize=1, maxsize=100)
return await self._pool | python | async def connect(self):
"""
Get an connection for the self instance
"""
if isinstance(self.connection, dict):
# a dict like {'host': 'localhost', 'port': 6379,
# 'db': 0, 'password': 'pass'}
kwargs = self.connection.copy()
address = (
kwargs.pop('host', 'localhost'),
kwargs.pop('port', 6379)
)
redis_kwargs = kwargs
elif isinstance(self.connection, aioredis.Redis):
self._pool = self.connection
else:
# a tuple or list ('localhost', 6379)
# a string "redis://host:6379/0?encoding=utf-8" or
# a unix domain socket path "/path/to/redis.sock"
address = self.connection
redis_kwargs = {}
if self._pool is None:
async with self._lock:
if self._pool is None:
self.log.debug('Connecting %s', repr(self))
self._pool = await self._create_redis_pool(
address, **redis_kwargs,
minsize=1, maxsize=100)
return await self._pool | [
"async",
"def",
"connect",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"connection",
",",
"dict",
")",
":",
"# a dict like {'host': 'localhost', 'port': 6379,",
"# 'db': 0, 'password': 'pass'}",
"kwargs",
"=",
"self",
".",
"connection",
"... | Get an connection for the self instance | [
"Get",
"an",
"connection",
"for",
"the",
"self",
"instance"
] | 6c62f0895c93b26b87ca8e3fe36bc024c81be421 | https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L85-L116 | train | 34,980 |
joanvila/aioredlock | aioredlock/redis.py | Instance.close | async def close(self):
"""
Closes connection and resets pool
"""
if self._pool is not None and not isinstance(self.connection, aioredis.Redis):
self._pool.close()
await self._pool.wait_closed()
self._pool = None | python | async def close(self):
"""
Closes connection and resets pool
"""
if self._pool is not None and not isinstance(self.connection, aioredis.Redis):
self._pool.close()
await self._pool.wait_closed()
self._pool = None | [
"async",
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pool",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"self",
".",
"connection",
",",
"aioredis",
".",
"Redis",
")",
":",
"self",
".",
"_pool",
".",
"close",
"(",
")",
"awa... | Closes connection and resets pool | [
"Closes",
"connection",
"and",
"resets",
"pool"
] | 6c62f0895c93b26b87ca8e3fe36bc024c81be421 | https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L118-L125 | train | 34,981 |
joanvila/aioredlock | aioredlock/redis.py | Redis.set_lock | async def set_lock(self, resource, lock_identifier):
"""
Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances
"""
start_time = time.time()
lock_timeout = self.lock_timeout
successes = await asyncio.gather(*[
i.set_lock(resource, lock_identifier, lock_timeout) for
i in self.instances
], return_exceptions=True)
successful_sets = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
locked = True if successful_sets >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is set on %d/%d instances in %s seconds',
resource, successful_sets, len(self.instances), elapsed_time)
if not locked:
raise LockError('Can not acquire the lock "%s"' % resource)
return elapsed_time | python | async def set_lock(self, resource, lock_identifier):
"""
Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances
"""
start_time = time.time()
lock_timeout = self.lock_timeout
successes = await asyncio.gather(*[
i.set_lock(resource, lock_identifier, lock_timeout) for
i in self.instances
], return_exceptions=True)
successful_sets = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
locked = True if successful_sets >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is set on %d/%d instances in %s seconds',
resource, successful_sets, len(self.instances), elapsed_time)
if not locked:
raise LockError('Can not acquire the lock "%s"' % resource)
return elapsed_time | [
"async",
"def",
"set_lock",
"(",
"self",
",",
"resource",
",",
"lock_identifier",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"lock_timeout",
"=",
"self",
".",
"lock_timeout",
"successes",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
... | Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances | [
"Tries",
"to",
"set",
"the",
"lock",
"to",
"all",
"the",
"redis",
"instances"
] | 6c62f0895c93b26b87ca8e3fe36bc024c81be421 | https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L227-L256 | train | 34,982 |
joanvila/aioredlock | aioredlock/redis.py | Redis.unset_lock | async def unset_lock(self, resource, lock_identifier):
"""
Tries to unset the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances in iseconds
:raises: LockError if the lock has not matching identifier in more then
(N/2 - 1) instances
"""
start_time = time.time()
successes = await asyncio.gather(*[
i.unset_lock(resource, lock_identifier) for
i in self.instances
], return_exceptions=True)
successful_remvoes = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
unlocked = True if successful_remvoes >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is unset on %d/%d instances in %s seconds',
resource, successful_remvoes, len(self.instances), elapsed_time)
if not unlocked:
raise LockError('Can not release the lock')
return elapsed_time | python | async def unset_lock(self, resource, lock_identifier):
"""
Tries to unset the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances in iseconds
:raises: LockError if the lock has not matching identifier in more then
(N/2 - 1) instances
"""
start_time = time.time()
successes = await asyncio.gather(*[
i.unset_lock(resource, lock_identifier) for
i in self.instances
], return_exceptions=True)
successful_remvoes = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
unlocked = True if successful_remvoes >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is unset on %d/%d instances in %s seconds',
resource, successful_remvoes, len(self.instances), elapsed_time)
if not unlocked:
raise LockError('Can not release the lock')
return elapsed_time | [
"async",
"def",
"unset_lock",
"(",
"self",
",",
"resource",
",",
"lock_identifier",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"successes",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"[",
"i",
".",
"unset_lock",
"(",
"resource",
... | Tries to unset the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances in iseconds
:raises: LockError if the lock has not matching identifier in more then
(N/2 - 1) instances | [
"Tries",
"to",
"unset",
"the",
"lock",
"to",
"all",
"the",
"redis",
"instances"
] | 6c62f0895c93b26b87ca8e3fe36bc024c81be421 | https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L258-L285 | train | 34,983 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_info.py | _clean | def _clean(value):
""" Convert numpy numeric types to their python equivalents. """
if isinstance(value, np.ndarray):
if value.dtype.kind == 'S':
return np.char.decode(value).tolist()
else:
return value.tolist()
elif type(value).__module__ == np.__name__:
# h5py==2.8.0 on windows sometimes fails to cast this from an np.float64 to a python.float
# We have explicitly cast in Albacore (merge 488) to avoid this bug, since casting here could be dangerous
# https://github.com/h5py/h5py/issues/1051
conversion = np.asscalar(value)
if sys.version_info.major == 3 and isinstance(conversion, bytes):
conversion = conversion.decode()
return conversion
elif sys.version_info.major == 3 and isinstance(value, bytes):
return value.decode()
else:
return value | python | def _clean(value):
""" Convert numpy numeric types to their python equivalents. """
if isinstance(value, np.ndarray):
if value.dtype.kind == 'S':
return np.char.decode(value).tolist()
else:
return value.tolist()
elif type(value).__module__ == np.__name__:
# h5py==2.8.0 on windows sometimes fails to cast this from an np.float64 to a python.float
# We have explicitly cast in Albacore (merge 488) to avoid this bug, since casting here could be dangerous
# https://github.com/h5py/h5py/issues/1051
conversion = np.asscalar(value)
if sys.version_info.major == 3 and isinstance(conversion, bytes):
conversion = conversion.decode()
return conversion
elif sys.version_info.major == 3 and isinstance(value, bytes):
return value.decode()
else:
return value | [
"def",
"_clean",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"value",
".",
"dtype",
".",
"kind",
"==",
"'S'",
":",
"return",
"np",
".",
"char",
".",
"decode",
"(",
"value",
")",
".",
"tolis... | Convert numpy numeric types to their python equivalents. | [
"Convert",
"numpy",
"numeric",
"types",
"to",
"their",
"python",
"equivalents",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_info.py#L169-L187 | train | 34,984 |
nanoporetech/ont_fast5_api | ont_fast5_api/analysis_tools/basecall_2d.py | Basecall2DTools.get_prior_alignment | def get_prior_alignment(self):
""" Return the prior alignment that was used for 2D basecalling.
:return: Alignment data table.
"""
data_group = '{}/HairpinAlign'.format(self.group_name)
data = self.handle.get_analysis_dataset(data_group, 'Alignment')
return data | python | def get_prior_alignment(self):
""" Return the prior alignment that was used for 2D basecalling.
:return: Alignment data table.
"""
data_group = '{}/HairpinAlign'.format(self.group_name)
data = self.handle.get_analysis_dataset(data_group, 'Alignment')
return data | [
"def",
"get_prior_alignment",
"(",
"self",
")",
":",
"data_group",
"=",
"'{}/HairpinAlign'",
".",
"format",
"(",
"self",
".",
"group_name",
")",
"data",
"=",
"self",
".",
"handle",
".",
"get_analysis_dataset",
"(",
"data_group",
",",
"'Alignment'",
")",
"retur... | Return the prior alignment that was used for 2D basecalling.
:return: Alignment data table. | [
"Return",
"the",
"prior",
"alignment",
"that",
"was",
"used",
"for",
"2D",
"basecalling",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/basecall_2d.py#L17-L24 | train | 34,985 |
nanoporetech/ont_fast5_api | ont_fast5_api/analysis_tools/basecall_2d.py | Basecall2DTools.get_2d_call_alignment | def get_2d_call_alignment(self):
""" Return the alignment and model_states from the 2D basecall.
:return: Alignment data table.
"""
data_group = '{}/BaseCalled_2D'.format(self.group_name)
data = self.handle.get_analysis_dataset(data_group, 'Alignment')
return data | python | def get_2d_call_alignment(self):
""" Return the alignment and model_states from the 2D basecall.
:return: Alignment data table.
"""
data_group = '{}/BaseCalled_2D'.format(self.group_name)
data = self.handle.get_analysis_dataset(data_group, 'Alignment')
return data | [
"def",
"get_2d_call_alignment",
"(",
"self",
")",
":",
"data_group",
"=",
"'{}/BaseCalled_2D'",
".",
"format",
"(",
"self",
".",
"group_name",
")",
"data",
"=",
"self",
".",
"handle",
".",
"get_analysis_dataset",
"(",
"data_group",
",",
"'Alignment'",
")",
"re... | Return the alignment and model_states from the 2D basecall.
:return: Alignment data table. | [
"Return",
"the",
"alignment",
"and",
"model_states",
"from",
"the",
"2D",
"basecall",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/basecall_2d.py#L26-L33 | train | 34,986 |
nanoporetech/ont_fast5_api | ont_fast5_api/analysis_tools/alignment.py | AlignmentTools.get_results | def get_results(self):
""" Get details about the alignments that have been performed.
:return: A dict of dicts.
The keys of the top level are 'template', 'complement' and '2d'.
Each of these dicts contains the following fields:
* status: Can be 'no data', 'no match found', or 'match found'.
* direction: Can be 'forward', 'reverse'.
* ref_name: Name of reference.
* ref_span: Section of reference aligned to, as a tuple (start, end).
* seq_span: Section of the called sequence that aligned, as a tuple (start, end).
* seq_len: Total length of the called sequence.
* num_aligned: Number of bases that aligned to bases in the reference.
* num_correct: Number of aligned bases that match the reference.
* num_deletions: Number of bases in the aligned section of the
reference that are not aligned to bases in the called sequence.
* num_insertions: Number of bases in the aligned section of the called
sequence that are not aligned to bases in the reference.
* identity: The fraction of aligned bases that are correct (num_correct /
num_aligned).
* accuracy: The overall basecall accuracy, according to the alignment.
(num_correct / (num_aligned + num_deletions + num_insertions)).
Note that if the status field is not 'match found', then all the other
fields will be absent.
"""
summary = self.handle.get_summary_data(self.group_name)
results = {'template': {'status': 'no data'},
'complement': {'status': 'no data'},
'2d': {'status': 'no data'}}
if 'genome_mapping_template' in summary:
results['template'] = self._get_results(summary['genome_mapping_template'])
if 'genome_mapping_complement' in summary:
results['complement'] = self._get_results(summary['genome_mapping_complement'])
if 'genome_mapping_2d' in summary:
results['2d'] = self._get_results(summary['genome_mapping_2d'])
return results | python | def get_results(self):
""" Get details about the alignments that have been performed.
:return: A dict of dicts.
The keys of the top level are 'template', 'complement' and '2d'.
Each of these dicts contains the following fields:
* status: Can be 'no data', 'no match found', or 'match found'.
* direction: Can be 'forward', 'reverse'.
* ref_name: Name of reference.
* ref_span: Section of reference aligned to, as a tuple (start, end).
* seq_span: Section of the called sequence that aligned, as a tuple (start, end).
* seq_len: Total length of the called sequence.
* num_aligned: Number of bases that aligned to bases in the reference.
* num_correct: Number of aligned bases that match the reference.
* num_deletions: Number of bases in the aligned section of the
reference that are not aligned to bases in the called sequence.
* num_insertions: Number of bases in the aligned section of the called
sequence that are not aligned to bases in the reference.
* identity: The fraction of aligned bases that are correct (num_correct /
num_aligned).
* accuracy: The overall basecall accuracy, according to the alignment.
(num_correct / (num_aligned + num_deletions + num_insertions)).
Note that if the status field is not 'match found', then all the other
fields will be absent.
"""
summary = self.handle.get_summary_data(self.group_name)
results = {'template': {'status': 'no data'},
'complement': {'status': 'no data'},
'2d': {'status': 'no data'}}
if 'genome_mapping_template' in summary:
results['template'] = self._get_results(summary['genome_mapping_template'])
if 'genome_mapping_complement' in summary:
results['complement'] = self._get_results(summary['genome_mapping_complement'])
if 'genome_mapping_2d' in summary:
results['2d'] = self._get_results(summary['genome_mapping_2d'])
return results | [
"def",
"get_results",
"(",
"self",
")",
":",
"summary",
"=",
"self",
".",
"handle",
".",
"get_summary_data",
"(",
"self",
".",
"group_name",
")",
"results",
"=",
"{",
"'template'",
":",
"{",
"'status'",
":",
"'no data'",
"}",
",",
"'complement'",
":",
"{... | Get details about the alignments that have been performed.
:return: A dict of dicts.
The keys of the top level are 'template', 'complement' and '2d'.
Each of these dicts contains the following fields:
* status: Can be 'no data', 'no match found', or 'match found'.
* direction: Can be 'forward', 'reverse'.
* ref_name: Name of reference.
* ref_span: Section of reference aligned to, as a tuple (start, end).
* seq_span: Section of the called sequence that aligned, as a tuple (start, end).
* seq_len: Total length of the called sequence.
* num_aligned: Number of bases that aligned to bases in the reference.
* num_correct: Number of aligned bases that match the reference.
* num_deletions: Number of bases in the aligned section of the
reference that are not aligned to bases in the called sequence.
* num_insertions: Number of bases in the aligned section of the called
sequence that are not aligned to bases in the reference.
* identity: The fraction of aligned bases that are correct (num_correct /
num_aligned).
* accuracy: The overall basecall accuracy, according to the alignment.
(num_correct / (num_aligned + num_deletions + num_insertions)).
Note that if the status field is not 'match found', then all the other
fields will be absent. | [
"Get",
"details",
"about",
"the",
"alignments",
"that",
"have",
"been",
"performed",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/alignment.py#L62-L100 | train | 34,987 |
nanoporetech/ont_fast5_api | ont_fast5_api/analysis_tools/alignment.py | AlignmentTools.calculate_speed | def calculate_speed(self, section, alignment_results=None):
""" Calculate speed using alignment information.
:param section: The section (template or complement) we're calculating
speed for.
:param alignment_results: Optional dictionary of the alignment summary,
so that speed can be calculated without having to write the summary
out to the fast5 file first.
:return: Speed in bases per second or zero if the speed could not be
calculated.
The only reliable way we have of finding out how many bases have gone through the pore is by
looking at how much of the reference the sequence aligned to. This takes that information and
uses it to calculate speed in reference-bases-per-second.
"""
speed = 0.0
if alignment_results:
results = self._get_results(alignment_results)
else:
results = self.get_results()[section]
if results['status'] != 'match found':
return 0.0
ref_span = results['ref_span']
ref_len = ref_span[1] - ref_span[0]
seq_span = results['seq_span']
seq_len = seq_span[1] - seq_span[0]
total_len = results['seq_len']
sample_rate = self.handle.get_channel_info()['sampling_rate']
# We need the duration from the segmentation results
chain = self.handle.get_chain(self.group_name)
if chain is not None:
segmentation_group = dict(chain).get('segmentation')
else:
segmentation_group = None
duration = 0
if segmentation_group is not None:
with SegmentationTools(self.handle, group_name=segmentation_group) as seg:
summary = seg.get_results()
if summary is not None:
duration = summary['duration_{}'.format(section)]
if duration == 0:
return 0.0
normalized_duration = duration * seq_len / float(total_len)
speed = sample_rate * ref_len / normalized_duration
return speed | python | def calculate_speed(self, section, alignment_results=None):
""" Calculate speed using alignment information.
:param section: The section (template or complement) we're calculating
speed for.
:param alignment_results: Optional dictionary of the alignment summary,
so that speed can be calculated without having to write the summary
out to the fast5 file first.
:return: Speed in bases per second or zero if the speed could not be
calculated.
The only reliable way we have of finding out how many bases have gone through the pore is by
looking at how much of the reference the sequence aligned to. This takes that information and
uses it to calculate speed in reference-bases-per-second.
"""
speed = 0.0
if alignment_results:
results = self._get_results(alignment_results)
else:
results = self.get_results()[section]
if results['status'] != 'match found':
return 0.0
ref_span = results['ref_span']
ref_len = ref_span[1] - ref_span[0]
seq_span = results['seq_span']
seq_len = seq_span[1] - seq_span[0]
total_len = results['seq_len']
sample_rate = self.handle.get_channel_info()['sampling_rate']
# We need the duration from the segmentation results
chain = self.handle.get_chain(self.group_name)
if chain is not None:
segmentation_group = dict(chain).get('segmentation')
else:
segmentation_group = None
duration = 0
if segmentation_group is not None:
with SegmentationTools(self.handle, group_name=segmentation_group) as seg:
summary = seg.get_results()
if summary is not None:
duration = summary['duration_{}'.format(section)]
if duration == 0:
return 0.0
normalized_duration = duration * seq_len / float(total_len)
speed = sample_rate * ref_len / normalized_duration
return speed | [
"def",
"calculate_speed",
"(",
"self",
",",
"section",
",",
"alignment_results",
"=",
"None",
")",
":",
"speed",
"=",
"0.0",
"if",
"alignment_results",
":",
"results",
"=",
"self",
".",
"_get_results",
"(",
"alignment_results",
")",
"else",
":",
"results",
"... | Calculate speed using alignment information.
:param section: The section (template or complement) we're calculating
speed for.
:param alignment_results: Optional dictionary of the alignment summary,
so that speed can be calculated without having to write the summary
out to the fast5 file first.
:return: Speed in bases per second or zero if the speed could not be
calculated.
The only reliable way we have of finding out how many bases have gone through the pore is by
looking at how much of the reference the sequence aligned to. This takes that information and
uses it to calculate speed in reference-bases-per-second. | [
"Calculate",
"speed",
"using",
"alignment",
"information",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/analysis_tools/alignment.py#L134-L181 | train | 34,988 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_read.py | Fast5Read.add_raw_data | def add_raw_data(self, data, attrs):
""" Add raw data for a read.
:param data: The raw data DAQ values (16 bit integers).
The read must already exist in the file. It must not already
have raw data.
"""
self.assert_writeable()
if "Raw" not in self.handle:
self.handle.create_group("Raw")
if "Signal" in self.handle['Raw']:
msg = "Fast5 file already has raw data for read '{}' in: {}"
raise KeyError(msg.format(self.read_id, self.filename))
self.handle['Raw'].create_dataset('Signal', data=data, compression='gzip', shuffle=True, dtype='i2')
self._add_attributes("Raw", attrs, clear=True) | python | def add_raw_data(self, data, attrs):
""" Add raw data for a read.
:param data: The raw data DAQ values (16 bit integers).
The read must already exist in the file. It must not already
have raw data.
"""
self.assert_writeable()
if "Raw" not in self.handle:
self.handle.create_group("Raw")
if "Signal" in self.handle['Raw']:
msg = "Fast5 file already has raw data for read '{}' in: {}"
raise KeyError(msg.format(self.read_id, self.filename))
self.handle['Raw'].create_dataset('Signal', data=data, compression='gzip', shuffle=True, dtype='i2')
self._add_attributes("Raw", attrs, clear=True) | [
"def",
"add_raw_data",
"(",
"self",
",",
"data",
",",
"attrs",
")",
":",
"self",
".",
"assert_writeable",
"(",
")",
"if",
"\"Raw\"",
"not",
"in",
"self",
".",
"handle",
":",
"self",
".",
"handle",
".",
"create_group",
"(",
"\"Raw\"",
")",
"if",
"\"Sign... | Add raw data for a read.
:param data: The raw data DAQ values (16 bit integers).
The read must already exist in the file. It must not already
have raw data. | [
"Add",
"raw",
"data",
"for",
"a",
"read",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_read.py#L26-L41 | train | 34,989 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_read.py | Fast5Read.add_channel_info | def add_channel_info(self, attrs, clear=False):
""" Add channel info data to the channel_id group.
:param data: A dictionary of key/value pairs. Keys must be strings.
Values can be strings or numeric values.
:param clear: If set, any existing channel info data will be removed.
"""
self.assert_writeable()
if 'channel_id' not in self.handle:
self.handle.create_group('channel_id')
self._add_attributes('channel_id', attrs, clear) | python | def add_channel_info(self, attrs, clear=False):
""" Add channel info data to the channel_id group.
:param data: A dictionary of key/value pairs. Keys must be strings.
Values can be strings or numeric values.
:param clear: If set, any existing channel info data will be removed.
"""
self.assert_writeable()
if 'channel_id' not in self.handle:
self.handle.create_group('channel_id')
self._add_attributes('channel_id', attrs, clear) | [
"def",
"add_channel_info",
"(",
"self",
",",
"attrs",
",",
"clear",
"=",
"False",
")",
":",
"self",
".",
"assert_writeable",
"(",
")",
"if",
"'channel_id'",
"not",
"in",
"self",
".",
"handle",
":",
"self",
".",
"handle",
".",
"create_group",
"(",
"'chann... | Add channel info data to the channel_id group.
:param data: A dictionary of key/value pairs. Keys must be strings.
Values can be strings or numeric values.
:param clear: If set, any existing channel info data will be removed. | [
"Add",
"channel",
"info",
"data",
"to",
"the",
"channel_id",
"group",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_read.py#L43-L53 | train | 34,990 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_read.py | Fast5Read.add_analysis | def add_analysis(self, component, group_name, attrs, config=None):
""" Add a new analysis group to the file.
:param component: The component name.
:param group_name: The name to use for the group. Must not already
exist in the file e.g. 'Test_000'.
:param attrs: A dictionary containing the key-value pairs to
put in the analysis group as attributes. Keys must be strings,
and values must be strings or numeric types.
:param config: A dictionary of dictionaries. The top level keys
should be the name of analysis steps, and should contain
key value pairs for analysis parameters used.
"""
if "Analyses" not in self.handle:
self.handle.create_group("Analyses")
super(Fast5Read, self).add_analysis(component, group_name, attrs, config) | python | def add_analysis(self, component, group_name, attrs, config=None):
""" Add a new analysis group to the file.
:param component: The component name.
:param group_name: The name to use for the group. Must not already
exist in the file e.g. 'Test_000'.
:param attrs: A dictionary containing the key-value pairs to
put in the analysis group as attributes. Keys must be strings,
and values must be strings or numeric types.
:param config: A dictionary of dictionaries. The top level keys
should be the name of analysis steps, and should contain
key value pairs for analysis parameters used.
"""
if "Analyses" not in self.handle:
self.handle.create_group("Analyses")
super(Fast5Read, self).add_analysis(component, group_name, attrs, config) | [
"def",
"add_analysis",
"(",
"self",
",",
"component",
",",
"group_name",
",",
"attrs",
",",
"config",
"=",
"None",
")",
":",
"if",
"\"Analyses\"",
"not",
"in",
"self",
".",
"handle",
":",
"self",
".",
"handle",
".",
"create_group",
"(",
"\"Analyses\"",
"... | Add a new analysis group to the file.
:param component: The component name.
:param group_name: The name to use for the group. Must not already
exist in the file e.g. 'Test_000'.
:param attrs: A dictionary containing the key-value pairs to
put in the analysis group as attributes. Keys must be strings,
and values must be strings or numeric types.
:param config: A dictionary of dictionaries. The top level keys
should be the name of analysis steps, and should contain
key value pairs for analysis parameters used. | [
"Add",
"a",
"new",
"analysis",
"group",
"to",
"the",
"file",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_read.py#L61-L76 | train | 34,991 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_writer.py | Fast5Writer.write_strand | def write_strand(self, strand):
""" Writes a Strand object to the stream. """
if strand['channel'] != self._current_channel \
or self._strand_counter == self._reads_per_file:
self._start_new_file(strand)
fname = self._write_strand(strand)
self._index.write('{}\t{}\t{}\t{}\n'.format(strand['channel'],
strand['read_attrs']['read_number'],
self._current_file, fname))
return | python | def write_strand(self, strand):
""" Writes a Strand object to the stream. """
if strand['channel'] != self._current_channel \
or self._strand_counter == self._reads_per_file:
self._start_new_file(strand)
fname = self._write_strand(strand)
self._index.write('{}\t{}\t{}\t{}\n'.format(strand['channel'],
strand['read_attrs']['read_number'],
self._current_file, fname))
return | [
"def",
"write_strand",
"(",
"self",
",",
"strand",
")",
":",
"if",
"strand",
"[",
"'channel'",
"]",
"!=",
"self",
".",
"_current_channel",
"or",
"self",
".",
"_strand_counter",
"==",
"self",
".",
"_reads_per_file",
":",
"self",
".",
"_start_new_file",
"(",
... | Writes a Strand object to the stream. | [
"Writes",
"a",
"Strand",
"object",
"to",
"the",
"stream",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_writer.py#L53-L62 | train | 34,992 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | AbstractFast5File.close | def close(self):
""" Closes the object.
"""
if self._is_open:
self.mode = None
if self.handle:
self.handle.close()
self.handle = None
self.filename = None
self._is_open = False
self.status = None | python | def close(self):
""" Closes the object.
"""
if self._is_open:
self.mode = None
if self.handle:
self.handle.close()
self.handle = None
self.filename = None
self._is_open = False
self.status = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"self",
".",
"mode",
"=",
"None",
"if",
"self",
".",
"handle",
":",
"self",
".",
"handle",
".",
"close",
"(",
")",
"self",
".",
"handle",
"=",
"None",
"self",
".",
"filena... | Closes the object. | [
"Closes",
"the",
"object",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L72-L82 | train | 34,993 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | Fast5File.add_chain | def add_chain(self, group_name, component_map):
"""
Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``.
"""
self.assert_writeable()
for component, path in component_map.items():
if not path.startswith('Analyses/'):
path = 'Analyses/{}'.format(path)
component_map[component] = path
self.add_analysis_attributes(group_name, component_map) | python | def add_chain(self, group_name, component_map):
"""
Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``.
"""
self.assert_writeable()
for component, path in component_map.items():
if not path.startswith('Analyses/'):
path = 'Analyses/{}'.format(path)
component_map[component] = path
self.add_analysis_attributes(group_name, component_map) | [
"def",
"add_chain",
"(",
"self",
",",
"group_name",
",",
"component_map",
")",
":",
"self",
".",
"assert_writeable",
"(",
")",
"for",
"component",
",",
"path",
"in",
"component_map",
".",
"items",
"(",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(... | Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``. | [
"Adds",
"the",
"component",
"chain",
"to",
"group_name",
"in",
"the",
"fast5",
".",
"These",
"are",
"added",
"as",
"attributes",
"to",
"the",
"group",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L288-L313 | train | 34,994 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | Fast5File.add_read | def add_read(self, read_number, read_id, start_time, duration, mux, median_before):
""" Add a new read to the file.
:param read_number: The read number to assign to the read.
:param read_id: The unique read-id for the read.
:param start_time: The start time (in samples) of the read.
:param duration: The duration (in samples) of the read.
:param mux: The mux set at the time of the read.
:param median_before: The median level of the data before the read.
Note that most tools assume a file contains only one read.
Putting multiple reads into a file severely limits the
ability to operate on those reads with standard tools.
"""
self.assert_writeable()
read_info = ReadInfo(read_number, read_id, start_time, duration, mux=mux, median_before=median_before)
self.status.read_info.append(read_info)
n = len(self.status.read_info) - 1
self.status.read_number_map[read_number] = n
self.status.read_id_map[read_id] = n
group_name = 'Raw/Reads/Read_{}'.format(read_number)
attrs = {'read_number': read_number,
'read_id': read_id,
'start_time': start_time,
'duration': duration,
'start_mux': mux,
'median_before': median_before}
self._add_group(group_name, attrs) | python | def add_read(self, read_number, read_id, start_time, duration, mux, median_before):
""" Add a new read to the file.
:param read_number: The read number to assign to the read.
:param read_id: The unique read-id for the read.
:param start_time: The start time (in samples) of the read.
:param duration: The duration (in samples) of the read.
:param mux: The mux set at the time of the read.
:param median_before: The median level of the data before the read.
Note that most tools assume a file contains only one read.
Putting multiple reads into a file severely limits the
ability to operate on those reads with standard tools.
"""
self.assert_writeable()
read_info = ReadInfo(read_number, read_id, start_time, duration, mux=mux, median_before=median_before)
self.status.read_info.append(read_info)
n = len(self.status.read_info) - 1
self.status.read_number_map[read_number] = n
self.status.read_id_map[read_id] = n
group_name = 'Raw/Reads/Read_{}'.format(read_number)
attrs = {'read_number': read_number,
'read_id': read_id,
'start_time': start_time,
'duration': duration,
'start_mux': mux,
'median_before': median_before}
self._add_group(group_name, attrs) | [
"def",
"add_read",
"(",
"self",
",",
"read_number",
",",
"read_id",
",",
"start_time",
",",
"duration",
",",
"mux",
",",
"median_before",
")",
":",
"self",
".",
"assert_writeable",
"(",
")",
"read_info",
"=",
"ReadInfo",
"(",
"read_number",
",",
"read_id",
... | Add a new read to the file.
:param read_number: The read number to assign to the read.
:param read_id: The unique read-id for the read.
:param start_time: The start time (in samples) of the read.
:param duration: The duration (in samples) of the read.
:param mux: The mux set at the time of the read.
:param median_before: The median level of the data before the read.
Note that most tools assume a file contains only one read.
Putting multiple reads into a file severely limits the
ability to operate on those reads with standard tools. | [
"Add",
"a",
"new",
"read",
"to",
"the",
"file",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L348-L375 | train | 34,995 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | Fast5File.set_summary_data | def set_summary_data(self, group_name, section_name, data):
""" Set the summary data for an analysis group.
:param group_name: The name of the analysis group.
:param section_name: The analysis step. This will become a
subfolder in the Summary section.
:param data: A dictionary containing keys which are the summary
fields, and values which are the summary values.
"""
self.assert_writeable()
group = 'Analyses/{}/Summary/{}'.format(group_name, section_name)
self._add_group(group, data) | python | def set_summary_data(self, group_name, section_name, data):
""" Set the summary data for an analysis group.
:param group_name: The name of the analysis group.
:param section_name: The analysis step. This will become a
subfolder in the Summary section.
:param data: A dictionary containing keys which are the summary
fields, and values which are the summary values.
"""
self.assert_writeable()
group = 'Analyses/{}/Summary/{}'.format(group_name, section_name)
self._add_group(group, data) | [
"def",
"set_summary_data",
"(",
"self",
",",
"group_name",
",",
"section_name",
",",
"data",
")",
":",
"self",
".",
"assert_writeable",
"(",
")",
"group",
"=",
"'Analyses/{}/Summary/{}'",
".",
"format",
"(",
"group_name",
",",
"section_name",
")",
"self",
".",... | Set the summary data for an analysis group.
:param group_name: The name of the analysis group.
:param section_name: The analysis step. This will become a
subfolder in the Summary section.
:param data: A dictionary containing keys which are the summary
fields, and values which are the summary values. | [
"Set",
"the",
"summary",
"data",
"for",
"an",
"analysis",
"group",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L413-L424 | train | 34,996 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | Fast5File.get_analysis_config | def get_analysis_config(self, group_name):
""" Gets any config data saved for the analysis.
:param group_name: The name of the analysis group.
:returns: A dictionary of dictionaries. Each key represents
an analysis step. Each value is a dictionary containing the
analysis parameters as key/value pairs. Returns None if no
configuration exists for the analysis.
"""
self.assert_open()
group = 'Analyses/{}/Configuration'.format(group_name)
config = None
if group in self.handle:
config = self._parse_attribute_tree(group)
return config | python | def get_analysis_config(self, group_name):
""" Gets any config data saved for the analysis.
:param group_name: The name of the analysis group.
:returns: A dictionary of dictionaries. Each key represents
an analysis step. Each value is a dictionary containing the
analysis parameters as key/value pairs. Returns None if no
configuration exists for the analysis.
"""
self.assert_open()
group = 'Analyses/{}/Configuration'.format(group_name)
config = None
if group in self.handle:
config = self._parse_attribute_tree(group)
return config | [
"def",
"get_analysis_config",
"(",
"self",
",",
"group_name",
")",
":",
"self",
".",
"assert_open",
"(",
")",
"group",
"=",
"'Analyses/{}/Configuration'",
".",
"format",
"(",
"group_name",
")",
"config",
"=",
"None",
"if",
"group",
"in",
"self",
".",
"handle... | Gets any config data saved for the analysis.
:param group_name: The name of the analysis group.
:returns: A dictionary of dictionaries. Each key represents
an analysis step. Each value is a dictionary containing the
analysis parameters as key/value pairs. Returns None if no
configuration exists for the analysis. | [
"Gets",
"any",
"config",
"data",
"saved",
"for",
"the",
"analysis",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L454-L468 | train | 34,997 |
nanoporetech/ont_fast5_api | ont_fast5_api/fast5_file.py | Fast5File.read_summary_data | def read_summary_data(fname, component):
""" Read summary data suitable to encode as a json packet.
:param fname: The fast5 file to pull the summary data from.
:param component: The component name to pull summary data for.
:returns: A dictionary containing the summary data.
"""
summary = {}
with Fast5File(fname, mode='r') as fh:
summary['tracking_id'] = fh.get_tracking_id()
summary['channel_id'] = fh.get_channel_info()
read_info = fh.status.read_info
read_summary = []
for read in read_info:
read_summary.append({'read_number': read.read_number,
'read_id': read.read_id,
'start_time': read.start_time,
'duration': read.duration,
'start_mux': read.start_mux})
summary['reads'] = read_summary
analyses_list = fh.list_analyses(component)
_, group_names = zip(*analyses_list)
group_names = sorted(group_names)
group = group_names[-1]
summary['software'] = fh.get_analysis_attributes(group)
summary['software']['component'] = group[:-4]
summary['data'] = fh.get_summary_data(group)
summary['filename'] = os.path.basename(fname)
return summary | python | def read_summary_data(fname, component):
""" Read summary data suitable to encode as a json packet.
:param fname: The fast5 file to pull the summary data from.
:param component: The component name to pull summary data for.
:returns: A dictionary containing the summary data.
"""
summary = {}
with Fast5File(fname, mode='r') as fh:
summary['tracking_id'] = fh.get_tracking_id()
summary['channel_id'] = fh.get_channel_info()
read_info = fh.status.read_info
read_summary = []
for read in read_info:
read_summary.append({'read_number': read.read_number,
'read_id': read.read_id,
'start_time': read.start_time,
'duration': read.duration,
'start_mux': read.start_mux})
summary['reads'] = read_summary
analyses_list = fh.list_analyses(component)
_, group_names = zip(*analyses_list)
group_names = sorted(group_names)
group = group_names[-1]
summary['software'] = fh.get_analysis_attributes(group)
summary['software']['component'] = group[:-4]
summary['data'] = fh.get_summary_data(group)
summary['filename'] = os.path.basename(fname)
return summary | [
"def",
"read_summary_data",
"(",
"fname",
",",
"component",
")",
":",
"summary",
"=",
"{",
"}",
"with",
"Fast5File",
"(",
"fname",
",",
"mode",
"=",
"'r'",
")",
"as",
"fh",
":",
"summary",
"[",
"'tracking_id'",
"]",
"=",
"fh",
".",
"get_tracking_id",
"... | Read summary data suitable to encode as a json packet.
:param fname: The fast5 file to pull the summary data from.
:param component: The component name to pull summary data for.
:returns: A dictionary containing the summary data. | [
"Read",
"summary",
"data",
"suitable",
"to",
"encode",
"as",
"a",
"json",
"packet",
"."
] | 352b3903155fcf4f19234c4f429dcefaa6d6bc4a | https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L644-L674 | train | 34,998 |
mrjoes/sockjs-tornado | sockjs/tornado/transports/base.py | BaseTransportMixin.get_conn_info | def get_conn_info(self):
"""Return `ConnectionInfo` object from current transport"""
return session.ConnectionInfo(self.request.remote_ip,
self.request.cookies,
self.request.arguments,
self.request.headers,
self.request.path) | python | def get_conn_info(self):
"""Return `ConnectionInfo` object from current transport"""
return session.ConnectionInfo(self.request.remote_ip,
self.request.cookies,
self.request.arguments,
self.request.headers,
self.request.path) | [
"def",
"get_conn_info",
"(",
"self",
")",
":",
"return",
"session",
".",
"ConnectionInfo",
"(",
"self",
".",
"request",
".",
"remote_ip",
",",
"self",
".",
"request",
".",
"cookies",
",",
"self",
".",
"request",
".",
"arguments",
",",
"self",
".",
"reque... | Return `ConnectionInfo` object from current transport | [
"Return",
"ConnectionInfo",
"object",
"from",
"current",
"transport"
] | bd3a99b407f1181f054b3b1730f438dde375ca1c | https://github.com/mrjoes/sockjs-tornado/blob/bd3a99b407f1181f054b3b1730f438dde375ca1c/sockjs/tornado/transports/base.py#L12-L18 | train | 34,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.