id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
18,400 | daler/gffutils | gffutils/feature.py | feature_from_line | def feature_from_line(line, dialect=None, strict=True, keep_order=False):
"""
Given a line from a GFF file, return a Feature object
Parameters
----------
line : string
strict : bool
If True (default), assume `line` is a single, tab-delimited string that
has at least 9 fields.
If False, then the input can have a more flexible format, useful for
creating single ad hoc features or for writing tests. In this case,
`line` can be a multi-line string (as long as it has a single non-empty
line), and, as long as there are only 9 fields (standard GFF/GTF), then
it's OK to use spaces instead of tabs to separate fields in `line`.
But if >9 fields are to be used, then tabs must be used.
keep_order, dialect
Passed directly to :class:`Feature`; see docstring for that class for
description
Returns
-------
A new :class:`Feature` object.
"""
if not strict:
lines = line.splitlines(False)
_lines = []
for i in lines:
i = i.strip()
if len(i) > 0:
_lines.append(i)
assert len(_lines) == 1, _lines
line = _lines[0]
if '\t' in line:
fields = line.rstrip('\n\r').split('\t')
else:
fields = line.rstrip('\n\r').split(None, 8)
else:
fields = line.rstrip('\n\r').split('\t')
try:
attr_string = fields[8]
except IndexError:
attr_string = ""
attrs, _dialect = parser._split_keyvals(attr_string, dialect=dialect)
d = dict(list(zip(constants._gffkeys, fields)))
d['attributes'] = attrs
d['extra'] = fields[9:]
d['keep_order'] = keep_order
if dialect is None:
dialect = _dialect
return Feature(dialect=dialect, **d) | python | def feature_from_line(line, dialect=None, strict=True, keep_order=False):
if not strict:
lines = line.splitlines(False)
_lines = []
for i in lines:
i = i.strip()
if len(i) > 0:
_lines.append(i)
assert len(_lines) == 1, _lines
line = _lines[0]
if '\t' in line:
fields = line.rstrip('\n\r').split('\t')
else:
fields = line.rstrip('\n\r').split(None, 8)
else:
fields = line.rstrip('\n\r').split('\t')
try:
attr_string = fields[8]
except IndexError:
attr_string = ""
attrs, _dialect = parser._split_keyvals(attr_string, dialect=dialect)
d = dict(list(zip(constants._gffkeys, fields)))
d['attributes'] = attrs
d['extra'] = fields[9:]
d['keep_order'] = keep_order
if dialect is None:
dialect = _dialect
return Feature(dialect=dialect, **d) | [
"def",
"feature_from_line",
"(",
"line",
",",
"dialect",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"keep_order",
"=",
"False",
")",
":",
"if",
"not",
"strict",
":",
"lines",
"=",
"line",
".",
"splitlines",
"(",
"False",
")",
"_lines",
"=",
"[",
"... | Given a line from a GFF file, return a Feature object
Parameters
----------
line : string
strict : bool
If True (default), assume `line` is a single, tab-delimited string that
has at least 9 fields.
If False, then the input can have a more flexible format, useful for
creating single ad hoc features or for writing tests. In this case,
`line` can be a multi-line string (as long as it has a single non-empty
line), and, as long as there are only 9 fields (standard GFF/GTF), then
it's OK to use spaces instead of tabs to separate fields in `line`.
But if >9 fields are to be used, then tabs must be used.
keep_order, dialect
Passed directly to :class:`Feature`; see docstring for that class for
description
Returns
-------
A new :class:`Feature` object. | [
"Given",
"a",
"line",
"from",
"a",
"GFF",
"file",
"return",
"a",
"Feature",
"object"
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L356-L411 |
18,401 | daler/gffutils | gffutils/feature.py | Feature.calc_bin | def calc_bin(self, _bin=None):
"""
Calculate the smallest UCSC genomic bin that will contain this feature.
"""
if _bin is None:
try:
_bin = bins.bins(self.start, self.end, one=True)
except TypeError:
_bin = None
return _bin | python | def calc_bin(self, _bin=None):
if _bin is None:
try:
_bin = bins.bins(self.start, self.end, one=True)
except TypeError:
_bin = None
return _bin | [
"def",
"calc_bin",
"(",
"self",
",",
"_bin",
"=",
"None",
")",
":",
"if",
"_bin",
"is",
"None",
":",
"try",
":",
"_bin",
"=",
"bins",
".",
"bins",
"(",
"self",
".",
"start",
",",
"self",
".",
"end",
",",
"one",
"=",
"True",
")",
"except",
"Type... | Calculate the smallest UCSC genomic bin that will contain this feature. | [
"Calculate",
"the",
"smallest",
"UCSC",
"genomic",
"bin",
"that",
"will",
"contain",
"this",
"feature",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L182-L191 |
18,402 | daler/gffutils | gffutils/feature.py | Feature.astuple | def astuple(self, encoding=None):
"""
Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple
"""
if not encoding:
return (
self.id, self.seqid, self.source, self.featuretype, self.start,
self.end, self.score, self.strand, self.frame,
helpers._jsonify(self.attributes),
helpers._jsonify(self.extra), self.calc_bin()
)
return (
self.id.decode(encoding), self.seqid.decode(encoding),
self.source.decode(encoding), self.featuretype.decode(encoding),
self.start, self.end, self.score.decode(encoding),
self.strand.decode(encoding), self.frame.decode(encoding),
helpers._jsonify(self.attributes).decode(encoding),
helpers._jsonify(self.extra).decode(encoding), self.calc_bin()
) | python | def astuple(self, encoding=None):
if not encoding:
return (
self.id, self.seqid, self.source, self.featuretype, self.start,
self.end, self.score, self.strand, self.frame,
helpers._jsonify(self.attributes),
helpers._jsonify(self.extra), self.calc_bin()
)
return (
self.id.decode(encoding), self.seqid.decode(encoding),
self.source.decode(encoding), self.featuretype.decode(encoding),
self.start, self.end, self.score.decode(encoding),
self.strand.decode(encoding), self.frame.decode(encoding),
helpers._jsonify(self.attributes).decode(encoding),
helpers._jsonify(self.extra).decode(encoding), self.calc_bin()
) | [
"def",
"astuple",
"(",
"self",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"not",
"encoding",
":",
"return",
"(",
"self",
".",
"id",
",",
"self",
".",
"seqid",
",",
"self",
".",
"source",
",",
"self",
".",
"featuretype",
",",
"self",
".",
"start"... | Return a tuple suitable for import into a database.
Attributes field and extra field jsonified into strings. The order of
fields is such that they can be supplied as arguments for the query
defined in :attr:`gffutils.constants._INSERT`.
If `encoding` is not None, then convert string fields to unicode using
the provided encoding.
Returns
-------
Tuple | [
"Return",
"a",
"tuple",
"suitable",
"for",
"import",
"into",
"a",
"database",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L293-L322 |
18,403 | daler/gffutils | gffutils/feature.py | Feature.sequence | def sequence(self, fasta, use_strand=True):
"""
Retrieves the sequence of this feature as a string.
Uses the pyfaidx package.
Parameters
----------
fasta : str
If str, then it's a FASTA-format filename; otherwise assume it's
a pyfaidx.Fasta object.
use_strand : bool
If True (default), the sequence returned will be
reverse-complemented for minus-strand features.
Returns
-------
string
"""
if isinstance(fasta, six.string_types):
fasta = Fasta(fasta, as_raw=False)
# recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation
# and is therefore 0-based half-open.
seq = fasta[self.chrom][self.start-1:self.stop]
if use_strand and self.strand == '-':
seq = seq.reverse.complement
return seq.seq | python | def sequence(self, fasta, use_strand=True):
if isinstance(fasta, six.string_types):
fasta = Fasta(fasta, as_raw=False)
# recall GTF/GFF is 1-based closed; pyfaidx uses Python slice notation
# and is therefore 0-based half-open.
seq = fasta[self.chrom][self.start-1:self.stop]
if use_strand and self.strand == '-':
seq = seq.reverse.complement
return seq.seq | [
"def",
"sequence",
"(",
"self",
",",
"fasta",
",",
"use_strand",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"fasta",
",",
"six",
".",
"string_types",
")",
":",
"fasta",
"=",
"Fasta",
"(",
"fasta",
",",
"as_raw",
"=",
"False",
")",
"# recall GTF/GF... | Retrieves the sequence of this feature as a string.
Uses the pyfaidx package.
Parameters
----------
fasta : str
If str, then it's a FASTA-format filename; otherwise assume it's
a pyfaidx.Fasta object.
use_strand : bool
If True (default), the sequence returned will be
reverse-complemented for minus-strand features.
Returns
-------
string | [
"Retrieves",
"the",
"sequence",
"of",
"this",
"feature",
"as",
"a",
"string",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/feature.py#L324-L353 |
18,404 | daler/gffutils | gffutils/helpers.py | infer_dialect | def infer_dialect(attributes):
"""
Infer the dialect based on the attributes.
Parameters
----------
attributes : str or iterable
A single attributes string from a GTF or GFF line, or an iterable of
such strings.
Returns
-------
Dictionary representing the inferred dialect
"""
if isinstance(attributes, six.string_types):
attributes = [attributes]
dialects = [parser._split_keyvals(i)[1] for i in attributes]
return _choose_dialect(dialects) | python | def infer_dialect(attributes):
if isinstance(attributes, six.string_types):
attributes = [attributes]
dialects = [parser._split_keyvals(i)[1] for i in attributes]
return _choose_dialect(dialects) | [
"def",
"infer_dialect",
"(",
"attributes",
")",
":",
"if",
"isinstance",
"(",
"attributes",
",",
"six",
".",
"string_types",
")",
":",
"attributes",
"=",
"[",
"attributes",
"]",
"dialects",
"=",
"[",
"parser",
".",
"_split_keyvals",
"(",
"i",
")",
"[",
"... | Infer the dialect based on the attributes.
Parameters
----------
attributes : str or iterable
A single attributes string from a GTF or GFF line, or an iterable of
such strings.
Returns
-------
Dictionary representing the inferred dialect | [
"Infer",
"the",
"dialect",
"based",
"on",
"the",
"attributes",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L25-L42 |
18,405 | daler/gffutils | gffutils/helpers.py | _choose_dialect | def _choose_dialect(dialects):
"""
Given a list of dialects, choose the one to use as the "canonical" version.
If `dialects` is an empty list, then use the default GFF3 dialect
Parameters
----------
dialects : iterable
iterable of dialect dictionaries
Returns
-------
dict
"""
# NOTE: can use helpers.dialect_compare if you need to make this more
# complex....
# For now, this function favors the first dialect, and then appends the
# order of additional fields seen in the attributes of other lines giving
# priority to dialects that come first in the iterable.
if len(dialects) == 0:
return constants.dialect
final_order = []
for dialect in dialects:
for o in dialect['order']:
if o not in final_order:
final_order.append(o)
dialect = dialects[0]
dialect['order'] = final_order
return dialect | python | def _choose_dialect(dialects):
# NOTE: can use helpers.dialect_compare if you need to make this more
# complex....
# For now, this function favors the first dialect, and then appends the
# order of additional fields seen in the attributes of other lines giving
# priority to dialects that come first in the iterable.
if len(dialects) == 0:
return constants.dialect
final_order = []
for dialect in dialects:
for o in dialect['order']:
if o not in final_order:
final_order.append(o)
dialect = dialects[0]
dialect['order'] = final_order
return dialect | [
"def",
"_choose_dialect",
"(",
"dialects",
")",
":",
"# NOTE: can use helpers.dialect_compare if you need to make this more",
"# complex....",
"# For now, this function favors the first dialect, and then appends the",
"# order of additional fields seen in the attributes of other lines giving",
"... | Given a list of dialects, choose the one to use as the "canonical" version.
If `dialects` is an empty list, then use the default GFF3 dialect
Parameters
----------
dialects : iterable
iterable of dialect dictionaries
Returns
-------
dict | [
"Given",
"a",
"list",
"of",
"dialects",
"choose",
"the",
"one",
"to",
"use",
"as",
"the",
"canonical",
"version",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L45-L75 |
18,406 | daler/gffutils | gffutils/helpers.py | _bin_from_dict | def _bin_from_dict(d):
"""
Given a dictionary yielded by the parser, return the genomic "UCSC" bin
"""
try:
start = int(d['start'])
end = int(d['end'])
return bins.bins(start, end, one=True)
# e.g., if "."
except ValueError:
return None | python | def _bin_from_dict(d):
try:
start = int(d['start'])
end = int(d['end'])
return bins.bins(start, end, one=True)
# e.g., if "."
except ValueError:
return None | [
"def",
"_bin_from_dict",
"(",
"d",
")",
":",
"try",
":",
"start",
"=",
"int",
"(",
"d",
"[",
"'start'",
"]",
")",
"end",
"=",
"int",
"(",
"d",
"[",
"'end'",
"]",
")",
"return",
"bins",
".",
"bins",
"(",
"start",
",",
"end",
",",
"one",
"=",
"... | Given a dictionary yielded by the parser, return the genomic "UCSC" bin | [
"Given",
"a",
"dictionary",
"yielded",
"by",
"the",
"parser",
"return",
"the",
"genomic",
"UCSC",
"bin"
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L242-L253 |
18,407 | daler/gffutils | gffutils/helpers.py | _jsonify | def _jsonify(x):
"""Use most compact form of JSON"""
if isinstance(x, dict_class):
return json.dumps(x._d, separators=(',', ':'))
return json.dumps(x, separators=(',', ':')) | python | def _jsonify(x):
if isinstance(x, dict_class):
return json.dumps(x._d, separators=(',', ':'))
return json.dumps(x, separators=(',', ':')) | [
"def",
"_jsonify",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"dict_class",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"x",
".",
"_d",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
")",
"return",
"json",
".",
"dumps",
"(",
... | Use most compact form of JSON | [
"Use",
"most",
"compact",
"form",
"of",
"JSON"
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L256-L260 |
18,408 | daler/gffutils | gffutils/helpers.py | _unjsonify | def _unjsonify(x, isattributes=False):
"""Convert JSON string to an ordered defaultdict."""
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x) | python | def _unjsonify(x, isattributes=False):
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x) | [
"def",
"_unjsonify",
"(",
"x",
",",
"isattributes",
"=",
"False",
")",
":",
"if",
"isattributes",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"x",
")",
"return",
"dict_class",
"(",
"obj",
")",
"return",
"json",
".",
"loads",
"(",
"x",
")"
] | Convert JSON string to an ordered defaultdict. | [
"Convert",
"JSON",
"string",
"to",
"an",
"ordered",
"defaultdict",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L263-L268 |
18,409 | daler/gffutils | gffutils/helpers.py | _feature_to_fields | def _feature_to_fields(f, jsonify=True):
"""
Convert feature to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = getattr(f, k)
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | python | def _feature_to_fields(f, jsonify=True):
x = []
for k in constants._keys:
v = getattr(f, k)
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | [
"def",
"_feature_to_fields",
"(",
"f",
",",
"jsonify",
"=",
"True",
")",
":",
"x",
"=",
"[",
"]",
"for",
"k",
"in",
"constants",
".",
"_keys",
":",
"v",
"=",
"getattr",
"(",
"f",
",",
"k",
")",
"if",
"jsonify",
"and",
"(",
"k",
"in",
"(",
"'att... | Convert feature to tuple, for faster sqlite3 import | [
"Convert",
"feature",
"to",
"tuple",
"for",
"faster",
"sqlite3",
"import"
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L271-L282 |
18,410 | daler/gffutils | gffutils/helpers.py | _dict_to_fields | def _dict_to_fields(d, jsonify=True):
"""
Convert dict to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | python | def _dict_to_fields(d, jsonify=True):
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | [
"def",
"_dict_to_fields",
"(",
"d",
",",
"jsonify",
"=",
"True",
")",
":",
"x",
"=",
"[",
"]",
"for",
"k",
"in",
"constants",
".",
"_keys",
":",
"v",
"=",
"d",
"[",
"k",
"]",
"if",
"jsonify",
"and",
"(",
"k",
"in",
"(",
"'attributes'",
",",
"'e... | Convert dict to tuple, for faster sqlite3 import | [
"Convert",
"dict",
"to",
"tuple",
"for",
"faster",
"sqlite3",
"import"
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L285-L296 |
18,411 | daler/gffutils | gffutils/helpers.py | merge_attributes | def merge_attributes(attr1, attr2):
"""
Merges two attribute dictionaries into a single dictionary.
Parameters
----------
`attr1`, `attr2` : dict
Returns
-------
dict
"""
new_d = copy.deepcopy(attr1)
new_d.update(attr2)
#all of attr2 key : values just overwrote attr1, fix it
for k, v in new_d.items():
if not isinstance(v, list):
new_d[k] = [v]
for k, v in six.iteritems(attr1):
if k in attr2:
if not isinstance(v, list):
v = [v]
new_d[k].extend(v)
return dict((k, sorted(set(v))) for k, v in new_d.items()) | python | def merge_attributes(attr1, attr2):
new_d = copy.deepcopy(attr1)
new_d.update(attr2)
#all of attr2 key : values just overwrote attr1, fix it
for k, v in new_d.items():
if not isinstance(v, list):
new_d[k] = [v]
for k, v in six.iteritems(attr1):
if k in attr2:
if not isinstance(v, list):
v = [v]
new_d[k].extend(v)
return dict((k, sorted(set(v))) for k, v in new_d.items()) | [
"def",
"merge_attributes",
"(",
"attr1",
",",
"attr2",
")",
":",
"new_d",
"=",
"copy",
".",
"deepcopy",
"(",
"attr1",
")",
"new_d",
".",
"update",
"(",
"attr2",
")",
"#all of attr2 key : values just overwrote attr1, fix it",
"for",
"k",
",",
"v",
"in",
"new_d"... | Merges two attribute dictionaries into a single dictionary.
Parameters
----------
`attr1`, `attr2` : dict
Returns
-------
dict | [
"Merges",
"two",
"attribute",
"dictionaries",
"into",
"a",
"single",
"dictionary",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L307-L333 |
18,412 | daler/gffutils | gffutils/helpers.py | dialect_compare | def dialect_compare(dialect1, dialect2):
"""
Compares two dialects.
"""
orig = set(dialect1.items())
new = set(dialect2.items())
return dict(
added=dict(list(new.difference(orig))),
removed=dict(list(orig.difference(new)))
) | python | def dialect_compare(dialect1, dialect2):
orig = set(dialect1.items())
new = set(dialect2.items())
return dict(
added=dict(list(new.difference(orig))),
removed=dict(list(orig.difference(new)))
) | [
"def",
"dialect_compare",
"(",
"dialect1",
",",
"dialect2",
")",
":",
"orig",
"=",
"set",
"(",
"dialect1",
".",
"items",
"(",
")",
")",
"new",
"=",
"set",
"(",
"dialect2",
".",
"items",
"(",
")",
")",
"return",
"dict",
"(",
"added",
"=",
"dict",
"(... | Compares two dialects. | [
"Compares",
"two",
"dialects",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L336-L345 |
18,413 | daler/gffutils | gffutils/helpers.py | sanitize_gff_db | def sanitize_gff_db(db, gid_field="gid"):
"""
Sanitize given GFF db. Returns a sanitized GFF db.
Sanitizing means:
- Ensuring that start < stop for all features
- Standardizing gene units by adding a 'gid' attribute
that makes the file grep-able
TODO: Do something with negative coordinates?
"""
def sanitized_iterator():
# Iterate through the database by each gene's records
for gene_recs in db.iter_by_parent_childs():
# The gene's ID
gene_id = gene_recs[0].id
for rec in gene_recs:
# Fixup coordinates if necessary
if rec.start > rec.stop:
rec.start, rec.stop = rec.stop, rec.start
# Add a gene id field to each gene's records
rec.attributes[gid_field] = [gene_id]
yield rec
# Return sanitized GFF database
sanitized_db = \
gffutils.create_db(sanitized_iterator(), ":memory:",
verbose=False)
return sanitized_db | python | def sanitize_gff_db(db, gid_field="gid"):
def sanitized_iterator():
# Iterate through the database by each gene's records
for gene_recs in db.iter_by_parent_childs():
# The gene's ID
gene_id = gene_recs[0].id
for rec in gene_recs:
# Fixup coordinates if necessary
if rec.start > rec.stop:
rec.start, rec.stop = rec.stop, rec.start
# Add a gene id field to each gene's records
rec.attributes[gid_field] = [gene_id]
yield rec
# Return sanitized GFF database
sanitized_db = \
gffutils.create_db(sanitized_iterator(), ":memory:",
verbose=False)
return sanitized_db | [
"def",
"sanitize_gff_db",
"(",
"db",
",",
"gid_field",
"=",
"\"gid\"",
")",
":",
"def",
"sanitized_iterator",
"(",
")",
":",
"# Iterate through the database by each gene's records",
"for",
"gene_recs",
"in",
"db",
".",
"iter_by_parent_childs",
"(",
")",
":",
"# The ... | Sanitize given GFF db. Returns a sanitized GFF db.
Sanitizing means:
- Ensuring that start < stop for all features
- Standardizing gene units by adding a 'gid' attribute
that makes the file grep-able
TODO: Do something with negative coordinates? | [
"Sanitize",
"given",
"GFF",
"db",
".",
"Returns",
"a",
"sanitized",
"GFF",
"db",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L348-L376 |
18,414 | daler/gffutils | gffutils/helpers.py | sanitize_gff_file | def sanitize_gff_file(gff_fname,
in_memory=True,
in_place=False):
"""
Sanitize a GFF file.
"""
db = None
if is_gff_db(gff_fname):
# It's a database filename, so load it
db = gffutils.FeatureDB(gff_fname)
else:
# Need to create a database for file
if in_memory:
db = gffutils.create_db(gff_fname, ":memory:",
verbose=False)
else:
db = get_gff_db(gff_fname)
if in_place:
gff_out = gffwriter.GFFWriter(gff_fname,
in_place=in_place)
else:
gff_out = gffwriter.GFFWriter(sys.stdout)
sanitized_db = sanitize_gff_db(db)
for gene_rec in sanitized_db.all_features(featuretype="gene"):
gff_out.write_gene_recs(sanitized_db, gene_rec.id)
gff_out.close() | python | def sanitize_gff_file(gff_fname,
in_memory=True,
in_place=False):
db = None
if is_gff_db(gff_fname):
# It's a database filename, so load it
db = gffutils.FeatureDB(gff_fname)
else:
# Need to create a database for file
if in_memory:
db = gffutils.create_db(gff_fname, ":memory:",
verbose=False)
else:
db = get_gff_db(gff_fname)
if in_place:
gff_out = gffwriter.GFFWriter(gff_fname,
in_place=in_place)
else:
gff_out = gffwriter.GFFWriter(sys.stdout)
sanitized_db = sanitize_gff_db(db)
for gene_rec in sanitized_db.all_features(featuretype="gene"):
gff_out.write_gene_recs(sanitized_db, gene_rec.id)
gff_out.close() | [
"def",
"sanitize_gff_file",
"(",
"gff_fname",
",",
"in_memory",
"=",
"True",
",",
"in_place",
"=",
"False",
")",
":",
"db",
"=",
"None",
"if",
"is_gff_db",
"(",
"gff_fname",
")",
":",
"# It's a database filename, so load it",
"db",
"=",
"gffutils",
".",
"Featu... | Sanitize a GFF file. | [
"Sanitize",
"a",
"GFF",
"file",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L379-L404 |
18,415 | daler/gffutils | gffutils/helpers.py | is_gff_db | def is_gff_db(db_fname):
"""
Return True if the given filename is a GFF database.
For now, rely on .db extension.
"""
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False | python | def is_gff_db(db_fname):
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False | [
"def",
"is_gff_db",
"(",
"db_fname",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"db_fname",
")",
":",
"return",
"False",
"if",
"db_fname",
".",
"endswith",
"(",
"\".db\"",
")",
":",
"return",
"True",
"return",
"False"
] | Return True if the given filename is a GFF database.
For now, rely on .db extension. | [
"Return",
"True",
"if",
"the",
"given",
"filename",
"is",
"a",
"GFF",
"database",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L415-L425 |
18,416 | daler/gffutils | gffutils/helpers.py | get_gff_db | def get_gff_db(gff_fname,
ext=".db"):
"""
Get db for GFF file. If the database has a .db file,
load that. Otherwise, create a named temporary file,
serialize the db to that, and return the loaded database.
"""
if not os.path.isfile(gff_fname):
# Not sure how we should deal with errors normally in
# gffutils -- Ryan?
raise ValueError("GFF %s does not exist." % (gff_fname))
candidate_db_fname = "%s.%s" % (gff_fname, ext)
if os.path.isfile(candidate_db_fname):
# Standard .db file found, so return it
return candidate_db_fname
# Otherwise, we need to create a temporary but non-deleted
# file to store the db in. It'll be up to the user
# of the function the delete the file when done.
## NOTE: Ryan must have a good scheme for dealing with this
## since pybedtools does something similar under the hood, i.e.
## creating temporary files as needed without over proliferation
db_fname = tempfile.NamedTemporaryFile(delete=False)
# Create the database for the gff file (suppress output
# when using function internally)
print("Creating db for %s" % (gff_fname))
t1 = time.time()
db = gffutils.create_db(gff_fname, db_fname.name,
merge_strategy="merge",
verbose=False)
t2 = time.time()
print(" - Took %.2f seconds" % (t2 - t1))
return db | python | def get_gff_db(gff_fname,
ext=".db"):
if not os.path.isfile(gff_fname):
# Not sure how we should deal with errors normally in
# gffutils -- Ryan?
raise ValueError("GFF %s does not exist." % (gff_fname))
candidate_db_fname = "%s.%s" % (gff_fname, ext)
if os.path.isfile(candidate_db_fname):
# Standard .db file found, so return it
return candidate_db_fname
# Otherwise, we need to create a temporary but non-deleted
# file to store the db in. It'll be up to the user
# of the function the delete the file when done.
## NOTE: Ryan must have a good scheme for dealing with this
## since pybedtools does something similar under the hood, i.e.
## creating temporary files as needed without over proliferation
db_fname = tempfile.NamedTemporaryFile(delete=False)
# Create the database for the gff file (suppress output
# when using function internally)
print("Creating db for %s" % (gff_fname))
t1 = time.time()
db = gffutils.create_db(gff_fname, db_fname.name,
merge_strategy="merge",
verbose=False)
t2 = time.time()
print(" - Took %.2f seconds" % (t2 - t1))
return db | [
"def",
"get_gff_db",
"(",
"gff_fname",
",",
"ext",
"=",
"\".db\"",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"gff_fname",
")",
":",
"# Not sure how we should deal with errors normally in",
"# gffutils -- Ryan?",
"raise",
"ValueError",
"(",
"\"G... | Get db for GFF file. If the database has a .db file,
load that. Otherwise, create a named temporary file,
serialize the db to that, and return the loaded database. | [
"Get",
"db",
"for",
"GFF",
"file",
".",
"If",
"the",
"database",
"has",
"a",
".",
"db",
"file",
"load",
"that",
".",
"Otherwise",
"create",
"a",
"named",
"temporary",
"file",
"serialize",
"the",
"db",
"to",
"that",
"and",
"return",
"the",
"loaded",
"da... | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L475-L506 |
18,417 | daler/gffutils | gffutils/parser.py | _reconstruct | def _reconstruct(keyvals, dialect, keep_order=False,
sort_attribute_values=False):
"""
Reconstructs the original attributes string according to the dialect.
Parameters
==========
keyvals : dict
Attributes from a GFF/GTF feature
dialect : dict
Dialect containing info on how to reconstruct a string version of the
attributes
keep_order : bool
If True, then perform sorting of attribute keys to ensure they are in
the same order as those provided in the original file. Default is
False, which saves time especially on large data sets.
sort_attribute_values : bool
If True, then sort values to ensure they will always be in the same
order. Mostly only useful for testing; default is False.
"""
if not dialect:
raise AttributeStringError()
if not keyvals:
return ""
parts = []
# Re-encode when reconstructing attributes
if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3':
attributes = keyvals
else:
attributes = {}
for k, v in keyvals.items():
attributes[k] = []
for i in v:
attributes[k].append(''.join([quoter[j] for j in i]))
# May need to split multiple values into multiple key/val pairs
if dialect['repeated keys']:
items = []
for key, val in attributes.items():
if len(val) > 1:
for v in val:
items.append((key, [v]))
else:
items.append((key, val))
else:
items = list(attributes.items())
def sort_key(x):
# sort keys by their order in the dialect; anything not in there will
# be in arbitrary order at the end.
try:
return dialect['order'].index(x[0])
except ValueError:
return 1e6
if keep_order:
items.sort(key=sort_key)
for key, val in items:
# Multival sep is usually a comma:
if val:
if sort_attribute_values:
val = sorted(val)
val_str = dialect['multival separator'].join(val)
if val_str:
# Surround with quotes if needed
if dialect['quoted GFF2 values']:
val_str = '"%s"' % val_str
# Typically "=" for GFF3 or " " otherwise
part = dialect['keyval separator'].join([key, val_str])
else:
if dialect['fmt'] == 'gtf':
part = dialect['keyval separator'].join([key, '""'])
else:
part = key
parts.append(part)
# Typically ";" or "; "
parts_str = dialect['field separator'].join(parts)
# Sometimes need to add this
if dialect['trailing semicolon']:
parts_str += ';'
return parts_str | python | def _reconstruct(keyvals, dialect, keep_order=False,
sort_attribute_values=False):
if not dialect:
raise AttributeStringError()
if not keyvals:
return ""
parts = []
# Re-encode when reconstructing attributes
if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3':
attributes = keyvals
else:
attributes = {}
for k, v in keyvals.items():
attributes[k] = []
for i in v:
attributes[k].append(''.join([quoter[j] for j in i]))
# May need to split multiple values into multiple key/val pairs
if dialect['repeated keys']:
items = []
for key, val in attributes.items():
if len(val) > 1:
for v in val:
items.append((key, [v]))
else:
items.append((key, val))
else:
items = list(attributes.items())
def sort_key(x):
# sort keys by their order in the dialect; anything not in there will
# be in arbitrary order at the end.
try:
return dialect['order'].index(x[0])
except ValueError:
return 1e6
if keep_order:
items.sort(key=sort_key)
for key, val in items:
# Multival sep is usually a comma:
if val:
if sort_attribute_values:
val = sorted(val)
val_str = dialect['multival separator'].join(val)
if val_str:
# Surround with quotes if needed
if dialect['quoted GFF2 values']:
val_str = '"%s"' % val_str
# Typically "=" for GFF3 or " " otherwise
part = dialect['keyval separator'].join([key, val_str])
else:
if dialect['fmt'] == 'gtf':
part = dialect['keyval separator'].join([key, '""'])
else:
part = key
parts.append(part)
# Typically ";" or "; "
parts_str = dialect['field separator'].join(parts)
# Sometimes need to add this
if dialect['trailing semicolon']:
parts_str += ';'
return parts_str | [
"def",
"_reconstruct",
"(",
"keyvals",
",",
"dialect",
",",
"keep_order",
"=",
"False",
",",
"sort_attribute_values",
"=",
"False",
")",
":",
"if",
"not",
"dialect",
":",
"raise",
"AttributeStringError",
"(",
")",
"if",
"not",
"keyvals",
":",
"return",
"\"\"... | Reconstructs the original attributes string according to the dialect.
Parameters
==========
keyvals : dict
Attributes from a GFF/GTF feature
dialect : dict
Dialect containing info on how to reconstruct a string version of the
attributes
keep_order : bool
If True, then perform sorting of attribute keys to ensure they are in
the same order as those provided in the original file. Default is
False, which saves time especially on large data sets.
sort_attribute_values : bool
If True, then sort values to ensure they will always be in the same
order. Mostly only useful for testing; default is False. | [
"Reconstructs",
"the",
"original",
"attributes",
"string",
"according",
"to",
"the",
"dialect",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/parser.py#L76-L169 |
18,418 | daler/gffutils | gffutils/create.py | create_db | def create_db(data, dbfn, id_spec=None, force=False, verbose=False,
checklines=10, merge_strategy='error', transform=None,
gtf_transcript_key='transcript_id', gtf_gene_key='gene_id',
gtf_subfeature='exon', force_gff=False,
force_dialect_check=False, from_string=False, keep_order=False,
text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None,
pragmas=constants.default_pragmas, sort_attribute_values=False,
dialect=None, _keep_tempfiles=False, infer_gene_extent=True,
disable_infer_genes=False, disable_infer_transcripts=False,
**kwargs):
"""
Create a database from a GFF or GTF file.
For more details on when and how to use the kwargs below, see the examples
in the online documentation (:ref:`examples`).
Parameters
----------
data : string or iterable
If a string (and `from_string` is False), then `data` is the path to
the original GFF or GTF file.
If a string and `from_string` is True, then assume `data` is the actual
data to use.
Otherwise, it's an iterable of Feature objects.
dbfn : string
Path to the database that will be created. Can be the special string
":memory:" to create an in-memory database.
id_spec : string, list, dict, callable, or None
This parameter guides what will be used as the primary key for the
database, which in turn determines how you will access individual
features by name from the database.
If `id_spec=None`, then auto-increment primary keys based on the
feature type (e.g., "gene_1", "gene_2"). This is also the fallback
behavior for the other values below.
If `id_spec` is a string, then look for this key in the attributes. If
it exists, then use its value as the primary key, otherwise
autoincrement based on the feature type. For many GFF3 files, "ID"
usually works well.
If `id_spec` is a list or tuple of keys, then check for each one in
order, using the first one found. For GFF3, this might be ["ID",
"Name"], which would use the ID if it exists, otherwise the Name,
otherwise autoincrement based on the feature type.
If `id_spec` is a dictionary, then it is a mapping of feature types to
what should be used as the ID. For example, for GTF files, `{'gene':
'gene_id', 'transcript': 'transcript_id'}` may be useful. The values
of this dictionary can also be a list, e.g., `{'gene': ['gene_id',
'geneID']}`
If `id_spec` is a callable object, then it accepts a dictionary from
the iterator and returns one of the following:
* None (in which case the feature type will be auto-incremented)
* string (which will be used as the primary key)
* special string starting with "autoincrement:X", where "X" is
a string that will be used for auto-incrementing. For example,
if "autoincrement:chr10", then the first feature will be
"chr10_1", the second "chr10_2", and so on.
force : bool
If `False` (default), then raise an exception if `dbfn` already exists.
Use `force=True` to overwrite any existing databases.
verbose : bool
Report percent complete and other feedback on how the db creation is
progressing.
In order to report percent complete, the entire file needs to be read
once to see how many items there are; for large files you may want to
use `verbose=False` to avoid this.
checklines : int
Number of lines to check the dialect.
merge_strategy : str
One of {merge, create_unique, error, warning, replace}.
This parameter specifies the behavior when two items have an identical
primary key.
Using `merge_strategy="merge"`, then there will be a single entry in
the database, but the attributes of all features with the same primary
key will be merged.
Using `merge_strategy="create_unique"`, then the first entry will use
the original primary key, but the second entry will have a unique,
autoincremented primary key assigned to it
Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID`
exception will be raised. This means you will have to edit the file
yourself to fix the duplicated IDs.
Using `merge_strategy="warning"`, a warning will be printed to the
logger, and the duplicate feature will be skipped.
Using `merge_strategy="replace"` will replace the entire existing
feature with the new feature.
transform : callable
Function (or other callable object) that accepts a `Feature` object and
returns a (possibly modified) `Feature` object.
gtf_transcript_key, gtf_gene_key : string
Which attribute to use as the transcript ID and gene ID respectively
for GTF files. Default is `transcript_id` and `gene_id` according to
the GTF spec.
gtf_subfeature : string
Feature type to use as a "gene component" when inferring gene and
transcript extents for GTF files. Default is `exon` according to the
GTF spec.
force_gff : bool
If True, do not do automatic format detection -- only use GFF.
force_dialect_check : bool
If True, the dialect will be checkef for every feature (instead of just
`checklines` features). This can be slow, but may be necessary for
inconsistently-formatted input files.
from_string : bool
If True, then treat `data` as actual data (rather than the path to
a file).
keep_order : bool
If True, all features returned from this instance will have the
order of their attributes maintained. This can be turned on or off
database-wide by setting the `keep_order` attribute or with this
kwarg, or on a feature-by-feature basis by setting the `keep_order`
attribute of an individual feature.
Note that a single order of attributes will be used for all features.
Specifically, the order will be determined by the order of attribute
keys in the first `checklines` of the input data. See
helpers._choose_dialect for more information on this.
Default is False, since this includes a sorting step that can get
time-consuming for many features.
infer_gene_extent : bool
DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and
`disable_infer_genes` for more granular control.
disable_infer_transcripts, disable_infer_genes : bool
Only used for GTF files. By default -- and according to the GTF spec --
we assume that there are no transcript or gene features in the file.
gffutils then infers the extent of each transcript based on its
constituent exons and infers the extent of each gene bases on its
constituent transcripts.
This default behavior is problematic if the input file already contains
transcript or gene features (like recent GENCODE GTF files for human),
since 1) the work to infer extents is unnecessary, and 2)
trying to insert an inferred feature back into the database triggers
gffutils' feature-merging routines, which can get time consuming.
The solution is to use `disable_infer_transcripts=True` if your GTF
already has transcripts in it, and/or `disable_infer_genes=True` if it
already has genes in it. This can result in dramatic (100x) speedup.
Prior to version 0.8.4, setting `infer_gene_extents=False` would
disable both transcript and gene inference simultaneously. As of
version 0.8.4, these argument allow more granular control.
force_merge_fields : list
If merge_strategy="merge", then features will only be merged if their
non-attribute values are identical (same chrom, source, start, stop,
score, strand, phase). Using `force_merge_fields`, you can override
this behavior to allow merges even when fields are different. This
list can contain one or more of ['seqid', 'source', 'featuretype',
'score', 'strand', 'frame']. The resulting merged fields will be
strings of comma-separated values. Note that 'start' and 'end' are not
available, since these fields need to be integers.
text_factory : callable
Text factory to use for the sqlite3 database. See
https://docs.python.org/2/library/\
sqlite3.html#sqlite3.Connection.text_factory
for details. The default sqlite3.OptimizedUnicode will return Unicode
objects only for non-ASCII data, and bytestrings otherwise.
pragmas : dict
Dictionary of pragmas used when creating the sqlite3 database. See
http://www.sqlite.org/pragma.html for a list of available pragmas. The
defaults are stored in constants.default_pragmas, which can be used as
a template for supplying a custom dictionary.
sort_attribute_values : bool
All features returned from the database will have their attribute
values sorted. Typically this is only useful for testing, since this
can get time-consuming for large numbers of features.
_keep_tempfiles : bool or string
False by default to clean up intermediate tempfiles created during GTF
import. If True, then keep these tempfile for testing or debugging.
If string, then keep the tempfile for testing, but also use the string
as the suffix fo the tempfile. This can be useful for testing in
parallel environments.
Returns
-------
New :class:`FeatureDB` object.
"""
_locals = locals()
# Check if any older kwargs made it in
deprecation_handler(kwargs)
kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs)
# First construct an iterator so that we can identify the file format.
# DataIterator figures out what kind of data was provided (string of lines,
# filename, or iterable of Features) and checks `checklines` lines to
# identify the dialect.
iterator = iterators.DataIterator(**kwargs)
kwargs.update(**_locals)
if dialect is None:
dialect = iterator.dialect
# However, a side-effect of this is that if `data` was a generator, then
# we've just consumed `checklines` items (see
# iterators.BaseIterator.__init__, which calls iterators.peek).
#
# But it also chains those consumed items back onto the beginning, and the
# result is available as as iterator._iter.
#
# That's what we should be using now for `data:
kwargs['data'] = iterator._iter
kwargs['directives'] = iterator.directives
# Since we've already checked lines, we don't want to do it again
kwargs['checklines'] = 0
if force_gff or (dialect['fmt'] == 'gff3'):
cls = _GFFDBCreator
id_spec = id_spec or 'ID'
add_kwargs = dict(
id_spec=id_spec,
)
elif dialect['fmt'] == 'gtf':
cls = _GTFDBCreator
id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'}
add_kwargs = dict(
transcript_key=gtf_transcript_key,
gene_key=gtf_gene_key,
subfeature=gtf_subfeature,
id_spec=id_spec,
)
kwargs.update(**add_kwargs)
kwargs['dialect'] = dialect
c = cls(**kwargs)
c.create()
if dbfn == ':memory:':
db = interface.FeatureDB(c.conn,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
else:
db = interface.FeatureDB(c,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
return db | python | def create_db(data, dbfn, id_spec=None, force=False, verbose=False,
checklines=10, merge_strategy='error', transform=None,
gtf_transcript_key='transcript_id', gtf_gene_key='gene_id',
gtf_subfeature='exon', force_gff=False,
force_dialect_check=False, from_string=False, keep_order=False,
text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None,
pragmas=constants.default_pragmas, sort_attribute_values=False,
dialect=None, _keep_tempfiles=False, infer_gene_extent=True,
disable_infer_genes=False, disable_infer_transcripts=False,
**kwargs):
_locals = locals()
# Check if any older kwargs made it in
deprecation_handler(kwargs)
kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs)
# First construct an iterator so that we can identify the file format.
# DataIterator figures out what kind of data was provided (string of lines,
# filename, or iterable of Features) and checks `checklines` lines to
# identify the dialect.
iterator = iterators.DataIterator(**kwargs)
kwargs.update(**_locals)
if dialect is None:
dialect = iterator.dialect
# However, a side-effect of this is that if `data` was a generator, then
# we've just consumed `checklines` items (see
# iterators.BaseIterator.__init__, which calls iterators.peek).
#
# But it also chains those consumed items back onto the beginning, and the
# result is available as as iterator._iter.
#
# That's what we should be using now for `data:
kwargs['data'] = iterator._iter
kwargs['directives'] = iterator.directives
# Since we've already checked lines, we don't want to do it again
kwargs['checklines'] = 0
if force_gff or (dialect['fmt'] == 'gff3'):
cls = _GFFDBCreator
id_spec = id_spec or 'ID'
add_kwargs = dict(
id_spec=id_spec,
)
elif dialect['fmt'] == 'gtf':
cls = _GTFDBCreator
id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'}
add_kwargs = dict(
transcript_key=gtf_transcript_key,
gene_key=gtf_gene_key,
subfeature=gtf_subfeature,
id_spec=id_spec,
)
kwargs.update(**add_kwargs)
kwargs['dialect'] = dialect
c = cls(**kwargs)
c.create()
if dbfn == ':memory:':
db = interface.FeatureDB(c.conn,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
else:
db = interface.FeatureDB(c,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory)
return db | [
"def",
"create_db",
"(",
"data",
",",
"dbfn",
",",
"id_spec",
"=",
"None",
",",
"force",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"checklines",
"=",
"10",
",",
"merge_strategy",
"=",
"'error'",
",",
"transform",
"=",
"None",
",",
"gtf_transcript_... | Create a database from a GFF or GTF file.
For more details on when and how to use the kwargs below, see the examples
in the online documentation (:ref:`examples`).
Parameters
----------
data : string or iterable
If a string (and `from_string` is False), then `data` is the path to
the original GFF or GTF file.
If a string and `from_string` is True, then assume `data` is the actual
data to use.
Otherwise, it's an iterable of Feature objects.
dbfn : string
Path to the database that will be created. Can be the special string
":memory:" to create an in-memory database.
id_spec : string, list, dict, callable, or None
This parameter guides what will be used as the primary key for the
database, which in turn determines how you will access individual
features by name from the database.
If `id_spec=None`, then auto-increment primary keys based on the
feature type (e.g., "gene_1", "gene_2"). This is also the fallback
behavior for the other values below.
If `id_spec` is a string, then look for this key in the attributes. If
it exists, then use its value as the primary key, otherwise
autoincrement based on the feature type. For many GFF3 files, "ID"
usually works well.
If `id_spec` is a list or tuple of keys, then check for each one in
order, using the first one found. For GFF3, this might be ["ID",
"Name"], which would use the ID if it exists, otherwise the Name,
otherwise autoincrement based on the feature type.
If `id_spec` is a dictionary, then it is a mapping of feature types to
what should be used as the ID. For example, for GTF files, `{'gene':
'gene_id', 'transcript': 'transcript_id'}` may be useful. The values
of this dictionary can also be a list, e.g., `{'gene': ['gene_id',
'geneID']}`
If `id_spec` is a callable object, then it accepts a dictionary from
the iterator and returns one of the following:
* None (in which case the feature type will be auto-incremented)
* string (which will be used as the primary key)
* special string starting with "autoincrement:X", where "X" is
a string that will be used for auto-incrementing. For example,
if "autoincrement:chr10", then the first feature will be
"chr10_1", the second "chr10_2", and so on.
force : bool
If `False` (default), then raise an exception if `dbfn` already exists.
Use `force=True` to overwrite any existing databases.
verbose : bool
Report percent complete and other feedback on how the db creation is
progressing.
In order to report percent complete, the entire file needs to be read
once to see how many items there are; for large files you may want to
use `verbose=False` to avoid this.
checklines : int
Number of lines to check the dialect.
merge_strategy : str
One of {merge, create_unique, error, warning, replace}.
This parameter specifies the behavior when two items have an identical
primary key.
Using `merge_strategy="merge"`, then there will be a single entry in
the database, but the attributes of all features with the same primary
key will be merged.
Using `merge_strategy="create_unique"`, then the first entry will use
the original primary key, but the second entry will have a unique,
autoincremented primary key assigned to it
Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID`
exception will be raised. This means you will have to edit the file
yourself to fix the duplicated IDs.
Using `merge_strategy="warning"`, a warning will be printed to the
logger, and the duplicate feature will be skipped.
Using `merge_strategy="replace"` will replace the entire existing
feature with the new feature.
transform : callable
Function (or other callable object) that accepts a `Feature` object and
returns a (possibly modified) `Feature` object.
gtf_transcript_key, gtf_gene_key : string
Which attribute to use as the transcript ID and gene ID respectively
for GTF files. Default is `transcript_id` and `gene_id` according to
the GTF spec.
gtf_subfeature : string
Feature type to use as a "gene component" when inferring gene and
transcript extents for GTF files. Default is `exon` according to the
GTF spec.
force_gff : bool
If True, do not do automatic format detection -- only use GFF.
force_dialect_check : bool
If True, the dialect will be checkef for every feature (instead of just
`checklines` features). This can be slow, but may be necessary for
inconsistently-formatted input files.
from_string : bool
If True, then treat `data` as actual data (rather than the path to
a file).
keep_order : bool
If True, all features returned from this instance will have the
order of their attributes maintained. This can be turned on or off
database-wide by setting the `keep_order` attribute or with this
kwarg, or on a feature-by-feature basis by setting the `keep_order`
attribute of an individual feature.
Note that a single order of attributes will be used for all features.
Specifically, the order will be determined by the order of attribute
keys in the first `checklines` of the input data. See
helpers._choose_dialect for more information on this.
Default is False, since this includes a sorting step that can get
time-consuming for many features.
infer_gene_extent : bool
DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and
`disable_infer_genes` for more granular control.
disable_infer_transcripts, disable_infer_genes : bool
Only used for GTF files. By default -- and according to the GTF spec --
we assume that there are no transcript or gene features in the file.
gffutils then infers the extent of each transcript based on its
constituent exons and infers the extent of each gene bases on its
constituent transcripts.
This default behavior is problematic if the input file already contains
transcript or gene features (like recent GENCODE GTF files for human),
since 1) the work to infer extents is unnecessary, and 2)
trying to insert an inferred feature back into the database triggers
gffutils' feature-merging routines, which can get time consuming.
The solution is to use `disable_infer_transcripts=True` if your GTF
already has transcripts in it, and/or `disable_infer_genes=True` if it
already has genes in it. This can result in dramatic (100x) speedup.
Prior to version 0.8.4, setting `infer_gene_extents=False` would
disable both transcript and gene inference simultaneously. As of
version 0.8.4, these argument allow more granular control.
force_merge_fields : list
If merge_strategy="merge", then features will only be merged if their
non-attribute values are identical (same chrom, source, start, stop,
score, strand, phase). Using `force_merge_fields`, you can override
this behavior to allow merges even when fields are different. This
list can contain one or more of ['seqid', 'source', 'featuretype',
'score', 'strand', 'frame']. The resulting merged fields will be
strings of comma-separated values. Note that 'start' and 'end' are not
available, since these fields need to be integers.
text_factory : callable
Text factory to use for the sqlite3 database. See
https://docs.python.org/2/library/\
sqlite3.html#sqlite3.Connection.text_factory
for details. The default sqlite3.OptimizedUnicode will return Unicode
objects only for non-ASCII data, and bytestrings otherwise.
pragmas : dict
Dictionary of pragmas used when creating the sqlite3 database. See
http://www.sqlite.org/pragma.html for a list of available pragmas. The
defaults are stored in constants.default_pragmas, which can be used as
a template for supplying a custom dictionary.
sort_attribute_values : bool
All features returned from the database will have their attribute
values sorted. Typically this is only useful for testing, since this
can get time-consuming for large numbers of features.
_keep_tempfiles : bool or string
False by default to clean up intermediate tempfiles created during GTF
import. If True, then keep these tempfile for testing or debugging.
If string, then keep the tempfile for testing, but also use the string
as the suffix fo the tempfile. This can be useful for testing in
parallel environments.
Returns
-------
New :class:`FeatureDB` object. | [
"Create",
"a",
"database",
"from",
"a",
"GFF",
"or",
"GTF",
"file",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L1025-L1312 |
18,419 | daler/gffutils | gffutils/create.py | _DBCreator._id_handler | def _id_handler(self, f):
"""
Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` identify the ID.
"""
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype) | python | def _id_handler(self, f):
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype) | [
"def",
"_id_handler",
"(",
"self",
",",
"f",
")",
":",
"# If id_spec is a string, convert to iterable for later",
"if",
"isinstance",
"(",
"self",
".",
"id_spec",
",",
"six",
".",
"string_types",
")",
":",
"id_key",
"=",
"[",
"self",
".",
"id_spec",
"]",
"elif... | Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` identify the ID. | [
"Given",
"a",
"Feature",
"from",
"self",
".",
"iterator",
"figure",
"out",
"what",
"the",
"ID",
"should",
"be",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L153-L205 |
18,420 | daler/gffutils | gffutils/create.py | _DBCreator.create | def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | python | def create(self):
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize() | [
"def",
"create",
"(",
"self",
")",
":",
"# Calls each of these methods in order. _populate_from_lines and",
"# _update_relations must be implemented in subclasses.",
"self",
".",
"_init_tables",
"(",
")",
"self",
".",
"_populate_from_lines",
"(",
"self",
".",
"iterator",
")"... | Calls various methods sequentially in order to fully build the
database. | [
"Calls",
"various",
"methods",
"sequentially",
"in",
"order",
"to",
"fully",
"build",
"the",
"database",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L507-L517 |
18,421 | daler/gffutils | gffutils/create.py | _DBCreator.execute | def execute(self, query):
"""
Execute a query directly on the database.
"""
c = self.conn.cursor()
result = c.execute(query)
for i in result:
yield i | python | def execute(self, query):
c = self.conn.cursor()
result = c.execute(query)
for i in result:
yield i | [
"def",
"execute",
"(",
"self",
",",
"query",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"result",
"=",
"c",
".",
"execute",
"(",
"query",
")",
"for",
"i",
"in",
"result",
":",
"yield",
"i"
] | Execute a query directly on the database. | [
"Execute",
"a",
"query",
"directly",
"on",
"the",
"database",
"."
] | 6f7f547cad898738a1bd0a999fd68ba68db2c524 | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/create.py#L523-L530 |
18,422 | edx/bok-choy | bok_choy/javascript.py | wait_for_js | def wait_for_js(function):
"""
Method decorator that waits for JavaScript dependencies before executing `function`.
If the function is not a method, the decorator has no effect.
Args:
function (callable): Method to decorate.
Returns:
Decorated method
"""
@functools.wraps(function)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
# If not a method, then just call the function
if len(args) < 1:
return function(*args, **kwargs)
# Otherwise, retrieve `self` as the first arg
else:
self = args[0]
# If the class has been decorated by one of the
# JavaScript dependency decorators, it should have
# a `wait_for_js` method
if hasattr(self, 'wait_for_js'):
self.wait_for_js()
# Call the function
return function(*args, **kwargs)
return wrapper | python | def wait_for_js(function):
@functools.wraps(function)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
# If not a method, then just call the function
if len(args) < 1:
return function(*args, **kwargs)
# Otherwise, retrieve `self` as the first arg
else:
self = args[0]
# If the class has been decorated by one of the
# JavaScript dependency decorators, it should have
# a `wait_for_js` method
if hasattr(self, 'wait_for_js'):
self.wait_for_js()
# Call the function
return function(*args, **kwargs)
return wrapper | [
"def",
"wait_for_js",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"# If not a method, then just call the function",
"... | Method decorator that waits for JavaScript dependencies before executing `function`.
If the function is not a method, the decorator has no effect.
Args:
function (callable): Method to decorate.
Returns:
Decorated method | [
"Method",
"decorator",
"that",
"waits",
"for",
"JavaScript",
"dependencies",
"before",
"executing",
"function",
".",
"If",
"the",
"function",
"is",
"not",
"a",
"method",
"the",
"decorator",
"has",
"no",
"effect",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L45-L77 |
18,423 | edx/bok-choy | bok_choy/javascript.py | _wait_for_js | def _wait_for_js(self):
"""
Class method added by the decorators to allow
decorated classes to manually re-check JavaScript
dependencies.
Expect that `self` is a class that:
1) Has been decorated with either `js_defined` or `requirejs`
2) Has a `browser` property
If either (1) or (2) is not satisfied, then do nothing.
"""
# No Selenium browser available, so return without doing anything
if not hasattr(self, 'browser'):
return
# pylint: disable=protected-access
# Wait for JavaScript variables to be defined
if hasattr(self, '_js_vars') and self._js_vars:
EmptyPromise(
lambda: _are_js_vars_defined(self.browser, self._js_vars),
u"JavaScript variables defined: {0}".format(", ".join(self._js_vars))
).fulfill()
# Wait for RequireJS dependencies to load
if hasattr(self, '_requirejs_deps') and self._requirejs_deps:
EmptyPromise(
lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps),
u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)),
try_limit=5
).fulfill() | python | def _wait_for_js(self):
# No Selenium browser available, so return without doing anything
if not hasattr(self, 'browser'):
return
# pylint: disable=protected-access
# Wait for JavaScript variables to be defined
if hasattr(self, '_js_vars') and self._js_vars:
EmptyPromise(
lambda: _are_js_vars_defined(self.browser, self._js_vars),
u"JavaScript variables defined: {0}".format(", ".join(self._js_vars))
).fulfill()
# Wait for RequireJS dependencies to load
if hasattr(self, '_requirejs_deps') and self._requirejs_deps:
EmptyPromise(
lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps),
u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)),
try_limit=5
).fulfill() | [
"def",
"_wait_for_js",
"(",
"self",
")",
":",
"# No Selenium browser available, so return without doing anything",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'browser'",
")",
":",
"return",
"# pylint: disable=protected-access",
"# Wait for JavaScript variables to be defined",
"... | Class method added by the decorators to allow
decorated classes to manually re-check JavaScript
dependencies.
Expect that `self` is a class that:
1) Has been decorated with either `js_defined` or `requirejs`
2) Has a `browser` property
If either (1) or (2) is not satisfied, then do nothing. | [
"Class",
"method",
"added",
"by",
"the",
"decorators",
"to",
"allow",
"decorated",
"classes",
"to",
"manually",
"re",
"-",
"check",
"JavaScript",
"dependencies",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L104-L135 |
18,424 | edx/bok-choy | bok_choy/javascript.py | _are_js_vars_defined | def _are_js_vars_defined(browser, js_vars):
"""
Return a boolean indicating whether all the JavaScript
variables `js_vars` are defined on the current page.
`browser` is a Selenium webdriver instance.
"""
# This script will evaluate to True iff all of
# the required vars are defined.
script = u" && ".join([
u"!(typeof {0} === 'undefined')".format(var)
for var in js_vars
])
try:
return browser.execute_script(u"return {}".format(script))
except WebDriverException as exc:
if "is not defined" in exc.msg or "is undefined" in exc.msg:
return False
else:
raise | python | def _are_js_vars_defined(browser, js_vars):
# This script will evaluate to True iff all of
# the required vars are defined.
script = u" && ".join([
u"!(typeof {0} === 'undefined')".format(var)
for var in js_vars
])
try:
return browser.execute_script(u"return {}".format(script))
except WebDriverException as exc:
if "is not defined" in exc.msg or "is undefined" in exc.msg:
return False
else:
raise | [
"def",
"_are_js_vars_defined",
"(",
"browser",
",",
"js_vars",
")",
":",
"# This script will evaluate to True iff all of",
"# the required vars are defined.",
"script",
"=",
"u\" && \"",
".",
"join",
"(",
"[",
"u\"!(typeof {0} === 'undefined')\"",
".",
"format",
"(",
"var",... | Return a boolean indicating whether all the JavaScript
variables `js_vars` are defined on the current page.
`browser` is a Selenium webdriver instance. | [
"Return",
"a",
"boolean",
"indicating",
"whether",
"all",
"the",
"JavaScript",
"variables",
"js_vars",
"are",
"defined",
"on",
"the",
"current",
"page",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L138-L158 |
18,425 | edx/bok-choy | bok_choy/javascript.py | _are_requirejs_deps_loaded | def _are_requirejs_deps_loaded(browser, deps):
"""
Return a boolean indicating whether all the RequireJS
dependencies `deps` have loaded on the current page.
`browser` is a WebDriver instance.
"""
# This is a little complicated
#
# We're going to use `execute_async_script` to give control to
# the browser. The browser indicates that it wants to return
# control to us by calling `callback`, which is the last item
# in the global `arguments` array.
#
# We install a RequireJS module with the dependencies we want
# to ensure are loaded. When our module loads, we return
# control to the test suite.
script = dedent(u"""
// Retrieve the callback function used to return control to the test suite
var callback = arguments[arguments.length - 1];
// If RequireJS isn't defined, then return immediately
if (!window.require) {{
callback("RequireJS not defined");
}}
// Otherwise, install a RequireJS module that depends on the modules
// we're waiting for.
else {{
// Catch errors reported by RequireJS
requirejs.onError = callback;
// Install our module
require({deps}, function() {{
callback('Success');
}});
}}
""").format(deps=json.dumps(list(deps)))
# Set a timeout to ensure we get control back
browser.set_script_timeout(30)
# Give control to the browser
# `result` will be the argument passed to the callback function
try:
result = browser.execute_async_script(script)
return result == 'Success'
except TimeoutException:
return False | python | def _are_requirejs_deps_loaded(browser, deps):
# This is a little complicated
#
# We're going to use `execute_async_script` to give control to
# the browser. The browser indicates that it wants to return
# control to us by calling `callback`, which is the last item
# in the global `arguments` array.
#
# We install a RequireJS module with the dependencies we want
# to ensure are loaded. When our module loads, we return
# control to the test suite.
script = dedent(u"""
// Retrieve the callback function used to return control to the test suite
var callback = arguments[arguments.length - 1];
// If RequireJS isn't defined, then return immediately
if (!window.require) {{
callback("RequireJS not defined");
}}
// Otherwise, install a RequireJS module that depends on the modules
// we're waiting for.
else {{
// Catch errors reported by RequireJS
requirejs.onError = callback;
// Install our module
require({deps}, function() {{
callback('Success');
}});
}}
""").format(deps=json.dumps(list(deps)))
# Set a timeout to ensure we get control back
browser.set_script_timeout(30)
# Give control to the browser
# `result` will be the argument passed to the callback function
try:
result = browser.execute_async_script(script)
return result == 'Success'
except TimeoutException:
return False | [
"def",
"_are_requirejs_deps_loaded",
"(",
"browser",
",",
"deps",
")",
":",
"# This is a little complicated",
"#",
"# We're going to use `execute_async_script` to give control to",
"# the browser. The browser indicates that it wants to return",
"# control to us by calling `callback`, which ... | Return a boolean indicating whether all the RequireJS
dependencies `deps` have loaded on the current page.
`browser` is a WebDriver instance. | [
"Return",
"a",
"boolean",
"indicating",
"whether",
"all",
"the",
"RequireJS",
"dependencies",
"deps",
"have",
"loaded",
"on",
"the",
"current",
"page",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/javascript.py#L161-L212 |
18,426 | edx/bok-choy | bok_choy/page_object.py | no_selenium_errors | def no_selenium_errors(func):
"""
Decorator to create an `EmptyPromise` check function that is satisfied
only when `func` executes without a Selenium error.
This protects against many common test failures due to timing issues.
For example, accessing an element after it has been modified by JavaScript
ordinarily results in a `StaleElementException`. Methods decorated
with `no_selenium_errors` will simply retry if that happens, which makes tests
more robust.
Args:
func (callable): The function to execute, with retries if an error occurs.
Returns:
Decorated function
"""
def _inner(*args, **kwargs): # pylint: disable=missing-docstring
try:
return_val = func(*args, **kwargs)
except WebDriverException:
LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True)
return False
else:
return return_val
return _inner | python | def no_selenium_errors(func):
def _inner(*args, **kwargs): # pylint: disable=missing-docstring
try:
return_val = func(*args, **kwargs)
except WebDriverException:
LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True)
return False
else:
return return_val
return _inner | [
"def",
"no_selenium_errors",
"(",
"func",
")",
":",
"def",
"_inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"try",
":",
"return_val",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"excep... | Decorator to create an `EmptyPromise` check function that is satisfied
only when `func` executes without a Selenium error.
This protects against many common test failures due to timing issues.
For example, accessing an element after it has been modified by JavaScript
ordinarily results in a `StaleElementException`. Methods decorated
with `no_selenium_errors` will simply retry if that happens, which makes tests
more robust.
Args:
func (callable): The function to execute, with retries if an error occurs.
Returns:
Decorated function | [
"Decorator",
"to",
"create",
"an",
"EmptyPromise",
"check",
"function",
"that",
"is",
"satisfied",
"only",
"when",
"func",
"executes",
"without",
"a",
"Selenium",
"error",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/page_object.py#L64-L90 |
18,427 | edx/bok-choy | bok_choy/a11y/axs_ruleset.py | AxsAuditConfig.set_rules | def set_rules(self, rules):
"""
Sets the rules to be run or ignored for the audit.
Args:
rules: a dictionary of the format `{"ignore": [], "apply": []}`.
See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits
Passing `{"apply": []}` or `{}` means to check for all available rules.
Passing `{"apply": None}` means that no audit should be done for this page.
Passing `{"ignore": []}` means to run all otherwise enabled rules.
Any rules in the "ignore" list will be ignored even if they were also
specified in the "apply".
Examples:
To check only `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"apply": ['badAriaAttributeValue']
})
To check all rules except `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"ignore": ['badAriaAttributeValue'],
})
"""
self.rules_to_ignore = rules.get("ignore", [])
self.rules_to_run = rules.get("apply", []) | python | def set_rules(self, rules):
self.rules_to_ignore = rules.get("ignore", [])
self.rules_to_run = rules.get("apply", []) | [
"def",
"set_rules",
"(",
"self",
",",
"rules",
")",
":",
"self",
".",
"rules_to_ignore",
"=",
"rules",
".",
"get",
"(",
"\"ignore\"",
",",
"[",
"]",
")",
"self",
".",
"rules_to_run",
"=",
"rules",
".",
"get",
"(",
"\"apply\"",
",",
"[",
"]",
")"
] | Sets the rules to be run or ignored for the audit.
Args:
rules: a dictionary of the format `{"ignore": [], "apply": []}`.
See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits
Passing `{"apply": []}` or `{}` means to check for all available rules.
Passing `{"apply": None}` means that no audit should be done for this page.
Passing `{"ignore": []}` means to run all otherwise enabled rules.
Any rules in the "ignore" list will be ignored even if they were also
specified in the "apply".
Examples:
To check only `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"apply": ['badAriaAttributeValue']
})
To check all rules except `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"ignore": ['badAriaAttributeValue'],
}) | [
"Sets",
"the",
"rules",
"to",
"be",
"run",
"or",
"ignored",
"for",
"the",
"audit",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L36-L69 |
18,428 | edx/bok-choy | bok_choy/a11y/axs_ruleset.py | AxsAuditConfig.set_scope | def set_scope(self, include=None, exclude=None):
"""
Sets `scope`, the "start point" for the audit.
Args:
include: A list of css selectors specifying the elements that
contain the portion of the page that should be audited.
Defaults to auditing the entire document.
exclude: This arg is not implemented in this ruleset.
Examples:
To check only the `div` with id `foo`::
page.a11y_audit.config.set_scope(["div#foo"])
To reset the scope to check the whole document::
page.a11y_audit.config.set_scope()
"""
if include:
self.scope = u"document.querySelector(\"{}\")".format(
u', '.join(include)
)
else:
self.scope = "null"
if exclude is not None:
raise NotImplementedError(
"The argument `exclude` has not been implemented in "
"AxsAuditConfig.set_scope method."
) | python | def set_scope(self, include=None, exclude=None):
if include:
self.scope = u"document.querySelector(\"{}\")".format(
u', '.join(include)
)
else:
self.scope = "null"
if exclude is not None:
raise NotImplementedError(
"The argument `exclude` has not been implemented in "
"AxsAuditConfig.set_scope method."
) | [
"def",
"set_scope",
"(",
"self",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"if",
"include",
":",
"self",
".",
"scope",
"=",
"u\"document.querySelector(\\\"{}\\\")\"",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"include",
")",
... | Sets `scope`, the "start point" for the audit.
Args:
include: A list of css selectors specifying the elements that
contain the portion of the page that should be audited.
Defaults to auditing the entire document.
exclude: This arg is not implemented in this ruleset.
Examples:
To check only the `div` with id `foo`::
page.a11y_audit.config.set_scope(["div#foo"])
To reset the scope to check the whole document::
page.a11y_audit.config.set_scope() | [
"Sets",
"scope",
"the",
"start",
"point",
"for",
"the",
"audit",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L71-L103 |
18,429 | edx/bok-choy | bok_choy/a11y/axs_ruleset.py | AxsAudit._check_rules | def _check_rules(browser, rules_js, config):
"""
Check the page for violations of the configured rules. By default,
all rules in the ruleset will be checked.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A namedtuple with 'errors' and 'warnings' fields whose values are
the errors and warnings returned from the audit.
None if config has rules_to_run set to None.
__Caution__: You probably don't really want to call this method
directly! It will be used by `A11yAudit.do_audit` if using this ruleset.
"""
if config.rules_to_run is None:
msg = 'No accessibility rules were specified to check.'
log.warning(msg)
return None
# This line will only be included in the script if rules to check on
# this page are specified, as the default behavior of the js is to
# run all rules.
rules = config.rules_to_run
if rules:
rules_config = u"auditConfig.auditRulesToRun = {rules};".format(
rules=rules)
else:
rules_config = ""
ignored_rules = config.rules_to_ignore
if ignored_rules:
rules_config += (
u"\nauditConfig.auditRulesToIgnore = {rules};".format(
rules=ignored_rules
)
)
script = dedent(u"""
{rules_js}
var auditConfig = new axs.AuditConfiguration();
{rules_config}
auditConfig.scope = {scope};
var run_results = axs.Audit.run(auditConfig);
var audit_results = axs.Audit.auditResults(run_results)
return audit_results;
""".format(rules_js=rules_js, rules_config=rules_config, scope=config.scope))
result = browser.execute_script(script)
# audit_results is report of accessibility errors for that session
audit_results = AuditResults(
errors=result.get('errors_'),
warnings=result.get('warnings_')
)
return audit_results | python | def _check_rules(browser, rules_js, config):
if config.rules_to_run is None:
msg = 'No accessibility rules were specified to check.'
log.warning(msg)
return None
# This line will only be included in the script if rules to check on
# this page are specified, as the default behavior of the js is to
# run all rules.
rules = config.rules_to_run
if rules:
rules_config = u"auditConfig.auditRulesToRun = {rules};".format(
rules=rules)
else:
rules_config = ""
ignored_rules = config.rules_to_ignore
if ignored_rules:
rules_config += (
u"\nauditConfig.auditRulesToIgnore = {rules};".format(
rules=ignored_rules
)
)
script = dedent(u"""
{rules_js}
var auditConfig = new axs.AuditConfiguration();
{rules_config}
auditConfig.scope = {scope};
var run_results = axs.Audit.run(auditConfig);
var audit_results = axs.Audit.auditResults(run_results)
return audit_results;
""".format(rules_js=rules_js, rules_config=rules_config, scope=config.scope))
result = browser.execute_script(script)
# audit_results is report of accessibility errors for that session
audit_results = AuditResults(
errors=result.get('errors_'),
warnings=result.get('warnings_')
)
return audit_results | [
"def",
"_check_rules",
"(",
"browser",
",",
"rules_js",
",",
"config",
")",
":",
"if",
"config",
".",
"rules_to_run",
"is",
"None",
":",
"msg",
"=",
"'No accessibility rules were specified to check.'",
"log",
".",
"warning",
"(",
"msg",
")",
"return",
"None",
... | Check the page for violations of the configured rules. By default,
all rules in the ruleset will be checked.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A namedtuple with 'errors' and 'warnings' fields whose values are
the errors and warnings returned from the audit.
None if config has rules_to_run set to None.
__Caution__: You probably don't really want to call this method
directly! It will be used by `A11yAudit.do_audit` if using this ruleset. | [
"Check",
"the",
"page",
"for",
"violations",
"of",
"the",
"configured",
"rules",
".",
"By",
"default",
"all",
"rules",
"in",
"the",
"ruleset",
"will",
"be",
"checked",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axs_ruleset.py#L134-L193 |
18,430 | edx/bok-choy | bok_choy/promise.py | Promise.fulfill | def fulfill(self):
"""
Evaluate the promise and return the result.
Returns:
The result of the `Promise` (second return value from the `check_func`)
Raises:
BrokenPromise: the `Promise` was not satisfied within the time or attempt limits.
"""
is_fulfilled, result = self._check_fulfilled()
if is_fulfilled:
return result
else:
raise BrokenPromise(self) | python | def fulfill(self):
is_fulfilled, result = self._check_fulfilled()
if is_fulfilled:
return result
else:
raise BrokenPromise(self) | [
"def",
"fulfill",
"(",
"self",
")",
":",
"is_fulfilled",
",",
"result",
"=",
"self",
".",
"_check_fulfilled",
"(",
")",
"if",
"is_fulfilled",
":",
"return",
"result",
"else",
":",
"raise",
"BrokenPromise",
"(",
"self",
")"
] | Evaluate the promise and return the result.
Returns:
The result of the `Promise` (second return value from the `check_func`)
Raises:
BrokenPromise: the `Promise` was not satisfied within the time or attempt limits. | [
"Evaluate",
"the",
"promise",
"and",
"return",
"the",
"result",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/promise.py#L91-L106 |
18,431 | edx/bok-choy | docs/code/round_3/pages.py | GitHubSearchPage.search | def search(self):
"""
Click on the Search button and wait for the
results page to be displayed
"""
self.q(css='button.btn').click()
GitHubSearchResultsPage(self.browser).wait_for_page() | python | def search(self):
self.q(css='button.btn').click()
GitHubSearchResultsPage(self.browser).wait_for_page() | [
"def",
"search",
"(",
"self",
")",
":",
"self",
".",
"q",
"(",
"css",
"=",
"'button.btn'",
")",
".",
"click",
"(",
")",
"GitHubSearchResultsPage",
"(",
"self",
".",
"browser",
")",
".",
"wait_for_page",
"(",
")"
] | Click on the Search button and wait for the
results page to be displayed | [
"Click",
"on",
"the",
"Search",
"button",
"and",
"wait",
"for",
"the",
"results",
"page",
"to",
"be",
"displayed"
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/docs/code/round_3/pages.py#L43-L49 |
18,432 | edx/bok-choy | bok_choy/a11y/axe_core_ruleset.py | AxeCoreAuditConfig.set_rules | def set_rules(self, rules):
"""
Set rules to ignore XOR limit to when checking for accessibility
errors on the page.
Args:
rules: a dictionary one of the following formats.
If you want to run all of the rules except for some::
{"ignore": []}
If you want to run only a specific set of rules::
{"apply": []}
If you want to run only rules of a specific standard::
{"tags": []}
Examples:
To run only "bad-link" and "color-contrast" rules::
page.a11y_audit.config.set_rules({
"apply": ["bad-link", "color-contrast"],
})
To run all rules except for "bad-link" and "color-contrast"::
page.a11y_audit.config.set_rules({
"ignore": ["bad-link", "color-contrast"],
})
To run only WCAG 2.0 Level A rules::
page.a11y_audit.config.set_rules({
"tags": ["wcag2a"],
})
To run all rules:
page.a11y_audit.config.set_rules({})
Related documentation:
* https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples
* https://github.com/dequelabs/axe-core/doc/rule-descriptions.md
"""
options = {}
if rules:
if rules.get("ignore"):
options["rules"] = {}
for rule in rules.get("ignore"):
options["rules"][rule] = {"enabled": False}
elif rules.get("apply"):
options["runOnly"] = {
"type": "rule",
"values": rules.get("apply"),
}
elif rules.get("tags"):
options["runOnly"] = {
"type": "tag",
"values": rules.get("tags"),
}
self.rules = json.dumps(options) | python | def set_rules(self, rules):
options = {}
if rules:
if rules.get("ignore"):
options["rules"] = {}
for rule in rules.get("ignore"):
options["rules"][rule] = {"enabled": False}
elif rules.get("apply"):
options["runOnly"] = {
"type": "rule",
"values": rules.get("apply"),
}
elif rules.get("tags"):
options["runOnly"] = {
"type": "tag",
"values": rules.get("tags"),
}
self.rules = json.dumps(options) | [
"def",
"set_rules",
"(",
"self",
",",
"rules",
")",
":",
"options",
"=",
"{",
"}",
"if",
"rules",
":",
"if",
"rules",
".",
"get",
"(",
"\"ignore\"",
")",
":",
"options",
"[",
"\"rules\"",
"]",
"=",
"{",
"}",
"for",
"rule",
"in",
"rules",
".",
"ge... | Set rules to ignore XOR limit to when checking for accessibility
errors on the page.
Args:
rules: a dictionary one of the following formats.
If you want to run all of the rules except for some::
{"ignore": []}
If you want to run only a specific set of rules::
{"apply": []}
If you want to run only rules of a specific standard::
{"tags": []}
Examples:
To run only "bad-link" and "color-contrast" rules::
page.a11y_audit.config.set_rules({
"apply": ["bad-link", "color-contrast"],
})
To run all rules except for "bad-link" and "color-contrast"::
page.a11y_audit.config.set_rules({
"ignore": ["bad-link", "color-contrast"],
})
To run only WCAG 2.0 Level A rules::
page.a11y_audit.config.set_rules({
"tags": ["wcag2a"],
})
To run all rules:
page.a11y_audit.config.set_rules({})
Related documentation:
* https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples
* https://github.com/dequelabs/axe-core/doc/rule-descriptions.md | [
"Set",
"rules",
"to",
"ignore",
"XOR",
"limit",
"to",
"when",
"checking",
"for",
"accessibility",
"errors",
"on",
"the",
"page",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L37-L101 |
18,433 | edx/bok-choy | bok_choy/a11y/axe_core_ruleset.py | AxeCoreAuditConfig.customize_ruleset | def customize_ruleset(self, custom_ruleset_file=None):
"""
Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
"""
custom_file = custom_ruleset_file or os.environ.get(
"BOKCHOY_A11Y_CUSTOM_RULES_FILE"
)
if not custom_file:
return
with open(custom_file, "r") as additional_rules:
custom_rules = additional_rules.read()
if "var customRules" not in custom_rules:
raise A11yAuditConfigError(
"Custom rules file must include \"var customRules\""
)
self.custom_rules = custom_rules | python | def customize_ruleset(self, custom_ruleset_file=None):
custom_file = custom_ruleset_file or os.environ.get(
"BOKCHOY_A11Y_CUSTOM_RULES_FILE"
)
if not custom_file:
return
with open(custom_file, "r") as additional_rules:
custom_rules = additional_rules.read()
if "var customRules" not in custom_rules:
raise A11yAuditConfigError(
"Custom rules file must include \"var customRules\""
)
self.custom_rules = custom_rules | [
"def",
"customize_ruleset",
"(",
"self",
",",
"custom_ruleset_file",
"=",
"None",
")",
":",
"custom_file",
"=",
"custom_ruleset_file",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"\"BOKCHOY_A11Y_CUSTOM_RULES_FILE\"",
")",
"if",
"not",
"custom_file",
":",
"return"... | Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js | [
"Updates",
"the",
"ruleset",
"to",
"include",
"a",
"set",
"of",
"custom",
"rules",
".",
"These",
"rules",
"will",
"be",
"_added_",
"to",
"the",
"existing",
"ruleset",
"or",
"replace",
"the",
"existing",
"rule",
"with",
"the",
"same",
"ID",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L156-L207 |
18,434 | edx/bok-choy | bok_choy/a11y/axe_core_ruleset.py | AxeCoreAudit._check_rules | def _check_rules(browser, rules_js, config):
"""
Run an accessibility audit on the page using the axe-core ruleset.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A list of violations.
Related documentation:
https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object
__Caution__: You probably don't really want to call this method
directly! It will be used by `AxeCoreAudit.do_audit`.
"""
audit_run_script = dedent(u"""
{rules_js}
{custom_rules}
axe.configure(customRules);
var callback = function(err, results) {{
if (err) throw err;
window.a11yAuditResults = JSON.stringify(results);
window.console.log(window.a11yAuditResults);
}}
axe.run({context}, {options}, callback);
""").format(
rules_js=rules_js,
custom_rules=config.custom_rules,
context=config.context,
options=config.rules
)
audit_results_script = dedent(u"""
window.console.log(window.a11yAuditResults);
return window.a11yAuditResults;
""")
browser.execute_script(audit_run_script)
def audit_results_check_func():
"""
A method to check that the audit has completed.
Returns:
(True, results) if the results are available.
(False, None) if the results aren't available.
"""
unicode_results = browser.execute_script(audit_results_script)
try:
results = json.loads(unicode_results)
except (TypeError, ValueError):
results = None
if results:
return True, results
return False, None
result = Promise(
audit_results_check_func,
"Timed out waiting for a11y audit results.",
timeout=5,
).fulfill()
# audit_results is report of accessibility violations for that session
# Note that this ruleset doesn't have distinct error/warning levels.
audit_results = result.get('violations')
return audit_results | python | def _check_rules(browser, rules_js, config):
audit_run_script = dedent(u"""
{rules_js}
{custom_rules}
axe.configure(customRules);
var callback = function(err, results) {{
if (err) throw err;
window.a11yAuditResults = JSON.stringify(results);
window.console.log(window.a11yAuditResults);
}}
axe.run({context}, {options}, callback);
""").format(
rules_js=rules_js,
custom_rules=config.custom_rules,
context=config.context,
options=config.rules
)
audit_results_script = dedent(u"""
window.console.log(window.a11yAuditResults);
return window.a11yAuditResults;
""")
browser.execute_script(audit_run_script)
def audit_results_check_func():
"""
A method to check that the audit has completed.
Returns:
(True, results) if the results are available.
(False, None) if the results aren't available.
"""
unicode_results = browser.execute_script(audit_results_script)
try:
results = json.loads(unicode_results)
except (TypeError, ValueError):
results = None
if results:
return True, results
return False, None
result = Promise(
audit_results_check_func,
"Timed out waiting for a11y audit results.",
timeout=5,
).fulfill()
# audit_results is report of accessibility violations for that session
# Note that this ruleset doesn't have distinct error/warning levels.
audit_results = result.get('violations')
return audit_results | [
"def",
"_check_rules",
"(",
"browser",
",",
"rules_js",
",",
"config",
")",
":",
"audit_run_script",
"=",
"dedent",
"(",
"u\"\"\"\n {rules_js}\n {custom_rules}\n axe.configure(customRules);\n var callback = function(err, results) {{\n ... | Run an accessibility audit on the page using the axe-core ruleset.
Args:
browser: a browser instance.
rules_js: the ruleset JavaScript as a string.
config: an AxsAuditConfig instance.
Returns:
A list of violations.
Related documentation:
https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object
__Caution__: You probably don't really want to call this method
directly! It will be used by `AxeCoreAudit.do_audit`. | [
"Run",
"an",
"accessibility",
"audit",
"on",
"the",
"page",
"using",
"the",
"axe",
"-",
"core",
"ruleset",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/a11y/axe_core_ruleset.py#L227-L300 |
18,435 | edx/bok-choy | bok_choy/browser.py | save_source | def save_source(driver, name):
"""
Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None
"""
source = driver.page_source
file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'),
'{name}.html'.format(name=name))
try:
with open(file_name, 'wb') as output_file:
output_file.write(source.encode('utf-8'))
except Exception: # pylint: disable=broad-except
msg = u"Could not save the browser page source to {}.".format(file_name)
LOGGER.warning(msg) | python | def save_source(driver, name):
source = driver.page_source
file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'),
'{name}.html'.format(name=name))
try:
with open(file_name, 'wb') as output_file:
output_file.write(source.encode('utf-8'))
except Exception: # pylint: disable=broad-except
msg = u"Could not save the browser page source to {}.".format(file_name)
LOGGER.warning(msg) | [
"def",
"save_source",
"(",
"driver",
",",
"name",
")",
":",
"source",
"=",
"driver",
".",
"page_source",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'SAVED_SOURCE_DIR'",
")",
",",
"'{name}.html'",
".",
... | Save the rendered HTML of the browser.
The location of the source can be configured
by the environment variable `SAVED_SOURCE_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name to use in the output file name.
Note that ".html" is appended automatically
Returns:
None | [
"Save",
"the",
"rendered",
"HTML",
"of",
"the",
"browser",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L79-L104 |
18,436 | edx/bok-choy | bok_choy/browser.py | save_screenshot | def save_screenshot(driver, name):
"""
Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in the output file name.
Returns:
None
"""
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if not screenshot_dir:
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
image_name = os.path.join(screenshot_dir, name + '.png')
driver.save_screenshot(image_name)
else:
msg = (
u"Browser does not support screenshots. "
u"Could not save screenshot '{name}'"
).format(name=name)
LOGGER.warning(msg) | python | def save_screenshot(driver, name):
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if not screenshot_dir:
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
image_name = os.path.join(screenshot_dir, name + '.png')
driver.save_screenshot(image_name)
else:
msg = (
u"Browser does not support screenshots. "
u"Could not save screenshot '{name}'"
).format(name=name)
LOGGER.warning(msg) | [
"def",
"save_screenshot",
"(",
"driver",
",",
"name",
")",
":",
"if",
"hasattr",
"(",
"driver",
",",
"'save_screenshot'",
")",
":",
"screenshot_dir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SCREENSHOT_DIR'",
")",
"if",
"not",
"screenshot_dir",
":",
"... | Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in the output file name.
Returns:
None | [
"Save",
"a",
"screenshot",
"of",
"the",
"browser",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L107-L138 |
18,437 | edx/bok-choy | bok_choy/browser.py | save_driver_logs | def save_driver_logs(driver, prefix):
"""
Save the selenium driver logs.
The location of the driver log files can be configured
by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
prefix (str): A prefix which will be used in the output file names for the logs.
Returns:
None
"""
browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox')
log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR')
if not log_dir:
LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs')
return
elif not os.path.exists(log_dir):
os.makedirs(log_dir)
if browser_name == 'firefox':
# Firefox doesn't yet provide logs to Selenium, but does log to a separate file
# https://github.com/mozilla/geckodriver/issues/284
# https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html
log_path = os.path.join(os.getcwd(), 'geckodriver.log')
if os.path.exists(log_path):
dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix))
copyfile(log_path, dest_path)
return
log_types = driver.log_types
for log_type in log_types:
try:
log = driver.get_log(log_type)
file_name = os.path.join(
log_dir, '{}_{}.log'.format(prefix, log_type)
)
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except: # pylint: disable=bare-except
msg = (
u"Could not save browser log of type '{log_type}'. "
u"It may be that the browser does not support it."
).format(log_type=log_type)
LOGGER.warning(msg, exc_info=True) | python | def save_driver_logs(driver, prefix):
browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox')
log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR')
if not log_dir:
LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs')
return
elif not os.path.exists(log_dir):
os.makedirs(log_dir)
if browser_name == 'firefox':
# Firefox doesn't yet provide logs to Selenium, but does log to a separate file
# https://github.com/mozilla/geckodriver/issues/284
# https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html
log_path = os.path.join(os.getcwd(), 'geckodriver.log')
if os.path.exists(log_path):
dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix))
copyfile(log_path, dest_path)
return
log_types = driver.log_types
for log_type in log_types:
try:
log = driver.get_log(log_type)
file_name = os.path.join(
log_dir, '{}_{}.log'.format(prefix, log_type)
)
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except: # pylint: disable=bare-except
msg = (
u"Could not save browser log of type '{log_type}'. "
u"It may be that the browser does not support it."
).format(log_type=log_type)
LOGGER.warning(msg, exc_info=True) | [
"def",
"save_driver_logs",
"(",
"driver",
",",
"prefix",
")",
":",
"browser_name",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SELENIUM_BROWSER'",
",",
"'firefox'",
")",
"log_dir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SELENIUM_DRIVER_LOG_DIR'",
")... | Save the selenium driver logs.
The location of the driver log files can be configured
by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
prefix (str): A prefix which will be used in the output file names for the logs.
Returns:
None | [
"Save",
"the",
"selenium",
"driver",
"logs",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L141-L189 |
18,438 | edx/bok-choy | bok_choy/browser.py | browser | def browser(tags=None, proxy=None, other_caps=None):
"""
Interpret environment variables to configure Selenium.
Performs validation, logging, and sensible defaults.
There are three cases:
1. Local browsers: If the proper environment variables are not all set for the second case,
then we use a local browser.
* The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \
Firefox.
* Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \
browser will be initialized with the proxy server set.
* The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \
Default behavior is to use the system location.
* The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \
Default behavior is to use a barebones default profile with a few useful preferences set.
2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of
the ones needed for SauceLabs:
* SELENIUM_BROWSER
* SELENIUM_HOST
* SELENIUM_PORT
3. SauceLabs: Set all of the following environment variables:
* SELENIUM_BROWSER
* SELENIUM_VERSION
* SELENIUM_PLATFORM
* SELENIUM_HOST
* SELENIUM_PORT
* SAUCE_USER_NAME
* SAUCE_API_KEY
**NOTE:** these are the environment variables set by the SauceLabs
Jenkins plugin.
Optionally provide Jenkins info, used to identify jobs to Sauce:
* JOB_NAME
* BUILD_NUMBER
`tags` is a list of string tags to apply to the SauceLabs
job. If not using SauceLabs, these will be ignored.
Keyword Args:
tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored.
proxy: A proxy instance.
other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note
that these values will be overwritten by environment variables described above. This is only used for
remote driver instances, where such info is usually used by services for additional configuration and
metadata.
Returns:
selenium.webdriver: The configured browser object used to drive tests
Raises:
BrowserConfigError: The environment variables are not correctly specified.
"""
browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox')
def browser_check_func():
""" Instantiate the browser and return the browser instance """
# See https://openedx.atlassian.net/browse/TE-701
try:
# Get the class and kwargs required to instantiate the browser based on
# whether we are using a local or remote one.
if _use_remote_browser(SAUCE_ENV_VARS):
browser_class, browser_args, browser_kwargs = _remote_browser_class(
SAUCE_ENV_VARS, tags)
elif _use_remote_browser(REMOTE_ENV_VARS):
browser_class, browser_args, browser_kwargs = _remote_browser_class(
REMOTE_ENV_VARS, tags)
else:
browser_class, browser_args, browser_kwargs = _local_browser_class(
browser_name)
# If we are using a proxy, we need extra kwargs passed on intantiation.
if proxy:
browser_kwargs = _proxy_kwargs(browser_name, proxy, browser_kwargs)
# Load in user given desired caps but override with derived caps from above. This is to retain existing
# behavior. Only for remote drivers, where various testing services use this info for configuration.
if browser_class == webdriver.Remote:
desired_caps = other_caps or {}
desired_caps.update(browser_kwargs.get('desired_capabilities', {}))
browser_kwargs['desired_capabilities'] = desired_caps
return True, browser_class(*browser_args, **browser_kwargs)
except (socket.error, WebDriverException) as err:
msg = str(err)
LOGGER.debug('Failed to instantiate browser: ' + msg)
return False, None
browser_instance = Promise(
# There are cases where selenium takes 30s to return with a failure, so in order to try 3
# times, we set a long timeout. If there is a hang on the first try, the timeout will
# be enforced.
browser_check_func, "Browser is instantiated successfully.", try_limit=3, timeout=95).fulfill()
return browser_instance | python | def browser(tags=None, proxy=None, other_caps=None):
browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox')
def browser_check_func():
""" Instantiate the browser and return the browser instance """
# See https://openedx.atlassian.net/browse/TE-701
try:
# Get the class and kwargs required to instantiate the browser based on
# whether we are using a local or remote one.
if _use_remote_browser(SAUCE_ENV_VARS):
browser_class, browser_args, browser_kwargs = _remote_browser_class(
SAUCE_ENV_VARS, tags)
elif _use_remote_browser(REMOTE_ENV_VARS):
browser_class, browser_args, browser_kwargs = _remote_browser_class(
REMOTE_ENV_VARS, tags)
else:
browser_class, browser_args, browser_kwargs = _local_browser_class(
browser_name)
# If we are using a proxy, we need extra kwargs passed on intantiation.
if proxy:
browser_kwargs = _proxy_kwargs(browser_name, proxy, browser_kwargs)
# Load in user given desired caps but override with derived caps from above. This is to retain existing
# behavior. Only for remote drivers, where various testing services use this info for configuration.
if browser_class == webdriver.Remote:
desired_caps = other_caps or {}
desired_caps.update(browser_kwargs.get('desired_capabilities', {}))
browser_kwargs['desired_capabilities'] = desired_caps
return True, browser_class(*browser_args, **browser_kwargs)
except (socket.error, WebDriverException) as err:
msg = str(err)
LOGGER.debug('Failed to instantiate browser: ' + msg)
return False, None
browser_instance = Promise(
# There are cases where selenium takes 30s to return with a failure, so in order to try 3
# times, we set a long timeout. If there is a hang on the first try, the timeout will
# be enforced.
browser_check_func, "Browser is instantiated successfully.", try_limit=3, timeout=95).fulfill()
return browser_instance | [
"def",
"browser",
"(",
"tags",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"other_caps",
"=",
"None",
")",
":",
"browser_name",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SELENIUM_BROWSER'",
",",
"'firefox'",
")",
"def",
"browser_check_func",
"(",
")... | Interpret environment variables to configure Selenium.
Performs validation, logging, and sensible defaults.
There are three cases:
1. Local browsers: If the proper environment variables are not all set for the second case,
then we use a local browser.
* The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \
Firefox.
* Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \
browser will be initialized with the proxy server set.
* The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \
Default behavior is to use the system location.
* The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \
Default behavior is to use a barebones default profile with a few useful preferences set.
2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of
the ones needed for SauceLabs:
* SELENIUM_BROWSER
* SELENIUM_HOST
* SELENIUM_PORT
3. SauceLabs: Set all of the following environment variables:
* SELENIUM_BROWSER
* SELENIUM_VERSION
* SELENIUM_PLATFORM
* SELENIUM_HOST
* SELENIUM_PORT
* SAUCE_USER_NAME
* SAUCE_API_KEY
**NOTE:** these are the environment variables set by the SauceLabs
Jenkins plugin.
Optionally provide Jenkins info, used to identify jobs to Sauce:
* JOB_NAME
* BUILD_NUMBER
`tags` is a list of string tags to apply to the SauceLabs
job. If not using SauceLabs, these will be ignored.
Keyword Args:
tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored.
proxy: A proxy instance.
other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note
that these values will be overwritten by environment variables described above. This is only used for
remote driver instances, where such info is usually used by services for additional configuration and
metadata.
Returns:
selenium.webdriver: The configured browser object used to drive tests
Raises:
BrowserConfigError: The environment variables are not correctly specified. | [
"Interpret",
"environment",
"variables",
"to",
"configure",
"Selenium",
".",
"Performs",
"validation",
"logging",
"and",
"sensible",
"defaults",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L192-L296 |
18,439 | edx/bok-choy | bok_choy/browser.py | _firefox_profile | def _firefox_profile():
"""Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set"""
profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR)
if profile_dir:
LOGGER.info(u"Using firefox profile: %s", profile_dir)
try:
firefox_profile = webdriver.FirefoxProfile(profile_dir)
except OSError as err:
if err.errno == errno.ENOENT:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} does not exist".format(
env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
elif err.errno == errno.EACCES:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \
readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
else:
# Some other OSError:
raise BrowserConfigError(
u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}"
.format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err)))
else:
LOGGER.info("Using default firefox profile")
firefox_profile = webdriver.FirefoxProfile()
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device (e.g., a webcam)
firefox_profile.set_preference('media.navigator.permission.disabled', True)
# Disable the initial url fetch to 'learn more' from mozilla (so you don't have to
# be online to run bok-choy on firefox)
firefox_profile.set_preference('browser.startup.homepage', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank')
# Disable fetching an updated version of firefox
firefox_profile.set_preference('app.update.enabled', False)
# Disable plugin checking
firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True)
# Disable health reporter
firefox_profile.set_preference('datareporting.healthreport.service.enabled', False)
# Disable all data upload (Telemetry and FHR)
firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False)
# Disable crash reporter
firefox_profile.set_preference('toolkit.crashreporter.enabled', False)
# Disable the JSON Viewer
firefox_profile.set_preference('devtools.jsonview.enabled', False)
# Grant OS focus to the launched browser so focus-related tests function correctly
firefox_profile.set_preference('focusmanager.testmode', True)
for function in FIREFOX_PROFILE_CUSTOMIZERS:
function(firefox_profile)
return firefox_profile | python | def _firefox_profile():
profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR)
if profile_dir:
LOGGER.info(u"Using firefox profile: %s", profile_dir)
try:
firefox_profile = webdriver.FirefoxProfile(profile_dir)
except OSError as err:
if err.errno == errno.ENOENT:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} does not exist".format(
env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
elif err.errno == errno.EACCES:
raise BrowserConfigError(
u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \
readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir))
else:
# Some other OSError:
raise BrowserConfigError(
u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}"
.format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err)))
else:
LOGGER.info("Using default firefox profile")
firefox_profile = webdriver.FirefoxProfile()
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device (e.g., a webcam)
firefox_profile.set_preference('media.navigator.permission.disabled', True)
# Disable the initial url fetch to 'learn more' from mozilla (so you don't have to
# be online to run bok-choy on firefox)
firefox_profile.set_preference('browser.startup.homepage', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank')
firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank')
# Disable fetching an updated version of firefox
firefox_profile.set_preference('app.update.enabled', False)
# Disable plugin checking
firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True)
# Disable health reporter
firefox_profile.set_preference('datareporting.healthreport.service.enabled', False)
# Disable all data upload (Telemetry and FHR)
firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False)
# Disable crash reporter
firefox_profile.set_preference('toolkit.crashreporter.enabled', False)
# Disable the JSON Viewer
firefox_profile.set_preference('devtools.jsonview.enabled', False)
# Grant OS focus to the launched browser so focus-related tests function correctly
firefox_profile.set_preference('focusmanager.testmode', True)
for function in FIREFOX_PROFILE_CUSTOMIZERS:
function(firefox_profile)
return firefox_profile | [
"def",
"_firefox_profile",
"(",
")",
":",
"profile_dir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"FIREFOX_PROFILE_ENV_VAR",
")",
"if",
"profile_dir",
":",
"LOGGER",
".",
"info",
"(",
"u\"Using firefox profile: %s\"",
",",
"profile_dir",
")",
"try",
":",
"f... | Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set | [
"Configure",
"the",
"Firefox",
"profile",
"respecting",
"FIREFOX_PROFILE_PATH",
"if",
"set"
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L309-L367 |
18,440 | edx/bok-choy | bok_choy/browser.py | _local_browser_class | def _local_browser_class(browser_name):
"""
Returns class, kwargs, and args needed to instantiate the local browser.
"""
# Log name of local browser
LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name)
# Get class of local browser based on name
browser_class = BROWSERS.get(browser_name)
headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true'
if browser_class is None:
raise BrowserConfigError(
u"Invalid browser name {name}. Options are: {options}".format(
name=browser_name, options=", ".join(list(BROWSERS.keys()))))
else:
if browser_name == 'firefox':
# Remove geckodriver log data from previous test cases
log_path = os.path.join(os.getcwd(), 'geckodriver.log')
if os.path.exists(log_path):
os.remove(log_path)
firefox_options = FirefoxOptions()
firefox_options.log.level = 'trace'
if headless:
firefox_options.headless = True
browser_args = []
browser_kwargs = {
'firefox_profile': _firefox_profile(),
'options': firefox_options,
}
firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH')
firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG')
if firefox_path and firefox_log:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(
firefox_path=firefox_path, log_file=firefox_log)
})
elif firefox_path:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(firefox_path=firefox_path)
})
elif firefox_log:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(log_file=firefox_log)
})
elif browser_name == 'chrome':
chrome_options = ChromeOptions()
if headless:
chrome_options.headless = True
# Emulate webcam and microphone for testing purposes
chrome_options.add_argument('--use-fake-device-for-media-stream')
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device (e.g., a webcam)
chrome_options.add_argument('--use-fake-ui-for-media-stream')
browser_args = []
browser_kwargs = {
'options': chrome_options,
}
else:
browser_args, browser_kwargs = [], {}
return browser_class, browser_args, browser_kwargs | python | def _local_browser_class(browser_name):
# Log name of local browser
LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name)
# Get class of local browser based on name
browser_class = BROWSERS.get(browser_name)
headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true'
if browser_class is None:
raise BrowserConfigError(
u"Invalid browser name {name}. Options are: {options}".format(
name=browser_name, options=", ".join(list(BROWSERS.keys()))))
else:
if browser_name == 'firefox':
# Remove geckodriver log data from previous test cases
log_path = os.path.join(os.getcwd(), 'geckodriver.log')
if os.path.exists(log_path):
os.remove(log_path)
firefox_options = FirefoxOptions()
firefox_options.log.level = 'trace'
if headless:
firefox_options.headless = True
browser_args = []
browser_kwargs = {
'firefox_profile': _firefox_profile(),
'options': firefox_options,
}
firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH')
firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG')
if firefox_path and firefox_log:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(
firefox_path=firefox_path, log_file=firefox_log)
})
elif firefox_path:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(firefox_path=firefox_path)
})
elif firefox_log:
browser_kwargs.update({
'firefox_binary': FirefoxBinary(log_file=firefox_log)
})
elif browser_name == 'chrome':
chrome_options = ChromeOptions()
if headless:
chrome_options.headless = True
# Emulate webcam and microphone for testing purposes
chrome_options.add_argument('--use-fake-device-for-media-stream')
# Bypasses the security prompt displayed by the browser when it attempts to
# access a media device (e.g., a webcam)
chrome_options.add_argument('--use-fake-ui-for-media-stream')
browser_args = []
browser_kwargs = {
'options': chrome_options,
}
else:
browser_args, browser_kwargs = [], {}
return browser_class, browser_args, browser_kwargs | [
"def",
"_local_browser_class",
"(",
"browser_name",
")",
":",
"# Log name of local browser",
"LOGGER",
".",
"info",
"(",
"u\"Using local browser: %s [Default is firefox]\"",
",",
"browser_name",
")",
"# Get class of local browser based on name",
"browser_class",
"=",
"BROWSERS",
... | Returns class, kwargs, and args needed to instantiate the local browser. | [
"Returns",
"class",
"kwargs",
"and",
"args",
"needed",
"to",
"instantiate",
"the",
"local",
"browser",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L370-L437 |
18,441 | edx/bok-choy | bok_choy/browser.py | _remote_browser_class | def _remote_browser_class(env_vars, tags=None):
"""
Returns class, kwargs, and args needed to instantiate the remote browser.
"""
if tags is None:
tags = []
# Interpret the environment variables, raising an exception if they're
# invalid
envs = _required_envs(env_vars)
envs.update(_optional_envs())
# Turn the environment variables into a dictionary of desired capabilities
caps = _capabilities_dict(envs, tags)
if 'accessKey' in caps:
LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version'])
else:
LOGGER.info(u"Using Remote Browser: %s", caps['browserName'])
# Create and return a new Browser
# We assume that the WebDriver end-point is running locally (e.g. using
# SauceConnect)
url = u"http://{0}:{1}/wd/hub".format(
envs['SELENIUM_HOST'], envs['SELENIUM_PORT'])
browser_args = []
browser_kwargs = {
'command_executor': url,
'desired_capabilities': caps,
}
if caps['browserName'] == 'firefox':
browser_kwargs['browser_profile'] = _firefox_profile()
return webdriver.Remote, browser_args, browser_kwargs | python | def _remote_browser_class(env_vars, tags=None):
if tags is None:
tags = []
# Interpret the environment variables, raising an exception if they're
# invalid
envs = _required_envs(env_vars)
envs.update(_optional_envs())
# Turn the environment variables into a dictionary of desired capabilities
caps = _capabilities_dict(envs, tags)
if 'accessKey' in caps:
LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version'])
else:
LOGGER.info(u"Using Remote Browser: %s", caps['browserName'])
# Create and return a new Browser
# We assume that the WebDriver end-point is running locally (e.g. using
# SauceConnect)
url = u"http://{0}:{1}/wd/hub".format(
envs['SELENIUM_HOST'], envs['SELENIUM_PORT'])
browser_args = []
browser_kwargs = {
'command_executor': url,
'desired_capabilities': caps,
}
if caps['browserName'] == 'firefox':
browser_kwargs['browser_profile'] = _firefox_profile()
return webdriver.Remote, browser_args, browser_kwargs | [
"def",
"_remote_browser_class",
"(",
"env_vars",
",",
"tags",
"=",
"None",
")",
":",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"# Interpret the environment variables, raising an exception if they're",
"# invalid",
"envs",
"=",
"_required_envs",
"(",
"e... | Returns class, kwargs, and args needed to instantiate the remote browser. | [
"Returns",
"class",
"kwargs",
"and",
"args",
"needed",
"to",
"instantiate",
"the",
"remote",
"browser",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L440-L474 |
18,442 | edx/bok-choy | bok_choy/browser.py | _proxy_kwargs | def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value
"""
Determines the kwargs needed to set up a proxy based on the
browser type.
Returns: a dictionary of arguments needed to pass when
instantiating the WebDriver instance.
"""
proxy_dict = {
"httpProxy": proxy.proxy,
"proxyType": 'manual',
}
if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs:
# This one works for firefox locally
wd_proxy = webdriver.common.proxy.Proxy(proxy_dict)
browser_kwargs['proxy'] = wd_proxy
else:
# This one works with chrome, both locally and remote
# This one works with firefox remote, but not locally
if 'desired_capabilities' not in browser_kwargs:
browser_kwargs['desired_capabilities'] = {}
browser_kwargs['desired_capabilities']['proxy'] = proxy_dict
return browser_kwargs | python | def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value
proxy_dict = {
"httpProxy": proxy.proxy,
"proxyType": 'manual',
}
if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs:
# This one works for firefox locally
wd_proxy = webdriver.common.proxy.Proxy(proxy_dict)
browser_kwargs['proxy'] = wd_proxy
else:
# This one works with chrome, both locally and remote
# This one works with firefox remote, but not locally
if 'desired_capabilities' not in browser_kwargs:
browser_kwargs['desired_capabilities'] = {}
browser_kwargs['desired_capabilities']['proxy'] = proxy_dict
return browser_kwargs | [
"def",
"_proxy_kwargs",
"(",
"browser_name",
",",
"proxy",
",",
"browser_kwargs",
"=",
"{",
"}",
")",
":",
"# pylint: disable=dangerous-default-value",
"proxy_dict",
"=",
"{",
"\"httpProxy\"",
":",
"proxy",
".",
"proxy",
",",
"\"proxyType\"",
":",
"'manual'",
",",... | Determines the kwargs needed to set up a proxy based on the
browser type.
Returns: a dictionary of arguments needed to pass when
instantiating the WebDriver instance. | [
"Determines",
"the",
"kwargs",
"needed",
"to",
"set",
"up",
"a",
"proxy",
"based",
"on",
"the",
"browser",
"type",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L477-L503 |
18,443 | edx/bok-choy | bok_choy/browser.py | _required_envs | def _required_envs(env_vars):
"""
Parse environment variables for required values,
raising a `BrowserConfig` error if they are not found.
Returns a `dict` of environment variables.
"""
envs = {
key: os.environ.get(key)
for key in env_vars
}
# Check for missing keys
missing = [key for key, val in list(envs.items()) if val is None]
if missing:
msg = (
u"These environment variables must be set: " + u", ".join(missing)
)
raise BrowserConfigError(msg)
# Check that we support this browser
if envs['SELENIUM_BROWSER'] not in BROWSERS:
msg = u"Unsuppported browser: {0}".format(envs['SELENIUM_BROWSER'])
raise BrowserConfigError(msg)
return envs | python | def _required_envs(env_vars):
envs = {
key: os.environ.get(key)
for key in env_vars
}
# Check for missing keys
missing = [key for key, val in list(envs.items()) if val is None]
if missing:
msg = (
u"These environment variables must be set: " + u", ".join(missing)
)
raise BrowserConfigError(msg)
# Check that we support this browser
if envs['SELENIUM_BROWSER'] not in BROWSERS:
msg = u"Unsuppported browser: {0}".format(envs['SELENIUM_BROWSER'])
raise BrowserConfigError(msg)
return envs | [
"def",
"_required_envs",
"(",
"env_vars",
")",
":",
"envs",
"=",
"{",
"key",
":",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"env_vars",
"}",
"# Check for missing keys",
"missing",
"=",
"[",
"key",
"for",
"key",
",",
"val",
... | Parse environment variables for required values,
raising a `BrowserConfig` error if they are not found.
Returns a `dict` of environment variables. | [
"Parse",
"environment",
"variables",
"for",
"required",
"values",
"raising",
"a",
"BrowserConfig",
"error",
"if",
"they",
"are",
"not",
"found",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L519-L544 |
18,444 | edx/bok-choy | bok_choy/browser.py | _optional_envs | def _optional_envs():
"""
Parse environment variables for optional values,
raising a `BrowserConfig` error if they are insufficiently specified.
Returns a `dict` of environment variables.
"""
envs = {
key: os.environ.get(key)
for key in OPTIONAL_ENV_VARS
if key in os.environ
}
# If we're using Jenkins, check that we have all the required info
if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs:
raise BrowserConfigError("Missing BUILD_NUMBER environment var")
if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs:
raise BrowserConfigError("Missing JOB_NAME environment var")
return envs | python | def _optional_envs():
envs = {
key: os.environ.get(key)
for key in OPTIONAL_ENV_VARS
if key in os.environ
}
# If we're using Jenkins, check that we have all the required info
if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs:
raise BrowserConfigError("Missing BUILD_NUMBER environment var")
if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs:
raise BrowserConfigError("Missing JOB_NAME environment var")
return envs | [
"def",
"_optional_envs",
"(",
")",
":",
"envs",
"=",
"{",
"key",
":",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"OPTIONAL_ENV_VARS",
"if",
"key",
"in",
"os",
".",
"environ",
"}",
"# If we're using Jenkins, check that we have all ... | Parse environment variables for optional values,
raising a `BrowserConfig` error if they are insufficiently specified.
Returns a `dict` of environment variables. | [
"Parse",
"environment",
"variables",
"for",
"optional",
"values",
"raising",
"a",
"BrowserConfig",
"error",
"if",
"they",
"are",
"insufficiently",
"specified",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L547-L567 |
18,445 | edx/bok-choy | bok_choy/browser.py | _capabilities_dict | def _capabilities_dict(envs, tags):
"""
Convert the dictionary of environment variables to
a dictionary of desired capabilities to send to the
Remote WebDriver.
`tags` is a list of string tags to apply to the SauceLabs job.
"""
capabilities = {
'browserName': envs['SELENIUM_BROWSER'],
'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)),
'video-upload-on-pass': False,
'sauce-advisor': False,
'capture-html': True,
'record-screenshots': True,
'max-duration': 600,
'public': 'public restricted',
'tags': tags,
}
# Add SauceLabs specific environment vars if they are set.
if _use_remote_browser(SAUCE_ENV_VARS):
sauce_capabilities = {
'platform': envs['SELENIUM_PLATFORM'],
'version': envs['SELENIUM_VERSION'],
'username': envs['SAUCE_USER_NAME'],
'accessKey': envs['SAUCE_API_KEY'],
}
capabilities.update(sauce_capabilities)
# Optional: Add in Jenkins-specific environment variables
# to link Sauce output with the Jenkins job
if 'JOB_NAME' in envs:
jenkins_vars = {
'build': envs['BUILD_NUMBER'],
'name': envs['JOB_NAME'],
}
capabilities.update(jenkins_vars)
return capabilities | python | def _capabilities_dict(envs, tags):
capabilities = {
'browserName': envs['SELENIUM_BROWSER'],
'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)),
'video-upload-on-pass': False,
'sauce-advisor': False,
'capture-html': True,
'record-screenshots': True,
'max-duration': 600,
'public': 'public restricted',
'tags': tags,
}
# Add SauceLabs specific environment vars if they are set.
if _use_remote_browser(SAUCE_ENV_VARS):
sauce_capabilities = {
'platform': envs['SELENIUM_PLATFORM'],
'version': envs['SELENIUM_VERSION'],
'username': envs['SAUCE_USER_NAME'],
'accessKey': envs['SAUCE_API_KEY'],
}
capabilities.update(sauce_capabilities)
# Optional: Add in Jenkins-specific environment variables
# to link Sauce output with the Jenkins job
if 'JOB_NAME' in envs:
jenkins_vars = {
'build': envs['BUILD_NUMBER'],
'name': envs['JOB_NAME'],
}
capabilities.update(jenkins_vars)
return capabilities | [
"def",
"_capabilities_dict",
"(",
"envs",
",",
"tags",
")",
":",
"capabilities",
"=",
"{",
"'browserName'",
":",
"envs",
"[",
"'SELENIUM_BROWSER'",
"]",
",",
"'acceptInsecureCerts'",
":",
"bool",
"(",
"envs",
".",
"get",
"(",
"'SELENIUM_INSECURE_CERTS'",
",",
... | Convert the dictionary of environment variables to
a dictionary of desired capabilities to send to the
Remote WebDriver.
`tags` is a list of string tags to apply to the SauceLabs job. | [
"Convert",
"the",
"dictionary",
"of",
"environment",
"variables",
"to",
"a",
"dictionary",
"of",
"desired",
"capabilities",
"to",
"send",
"to",
"the",
"Remote",
"WebDriver",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/browser.py#L570-L611 |
18,446 | edx/bok-choy | bok_choy/query.py | Query.replace | def replace(self, **kwargs):
"""
Return a copy of this `Query`, but with attributes specified
as keyword arguments replaced by the keyword values.
Keyword Args:
Attributes/values to replace in the copy.
Returns:
A copy of the query that has its attributes updated with the specified values.
Raises:
TypeError: The `Query` does not have the specified attribute.
"""
clone = copy(self)
clone.transforms = list(clone.transforms)
for key, value in kwargs.items():
if not hasattr(clone, key):
raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key))
setattr(clone, key, value)
return clone | python | def replace(self, **kwargs):
clone = copy(self)
clone.transforms = list(clone.transforms)
for key, value in kwargs.items():
if not hasattr(clone, key):
raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key))
setattr(clone, key, value)
return clone | [
"def",
"replace",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"clone",
"=",
"copy",
"(",
"self",
")",
"clone",
".",
"transforms",
"=",
"list",
"(",
"clone",
".",
"transforms",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
... | Return a copy of this `Query`, but with attributes specified
as keyword arguments replaced by the keyword values.
Keyword Args:
Attributes/values to replace in the copy.
Returns:
A copy of the query that has its attributes updated with the specified values.
Raises:
TypeError: The `Query` does not have the specified attribute. | [
"Return",
"a",
"copy",
"of",
"this",
"Query",
"but",
"with",
"attributes",
"specified",
"as",
"keyword",
"arguments",
"replaced",
"by",
"the",
"keyword",
"values",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L81-L103 |
18,447 | edx/bok-choy | bok_choy/query.py | Query.transform | def transform(self, transform, desc=None):
"""
Create a copy of this query, transformed by `transform`.
Args:
transform (callable): Callable that takes an iterable of values and
returns an iterable of transformed values.
Keyword Args:
desc (str): A description of the transform, to use in log messages.
Defaults to the name of the `transform` function.
Returns:
Query
"""
if desc is None:
desc = u'transform({})'.format(getattr(transform, '__name__', ''))
return self.replace(
transforms=self.transforms + [transform],
desc_stack=self.desc_stack + [desc]
) | python | def transform(self, transform, desc=None):
if desc is None:
desc = u'transform({})'.format(getattr(transform, '__name__', ''))
return self.replace(
transforms=self.transforms + [transform],
desc_stack=self.desc_stack + [desc]
) | [
"def",
"transform",
"(",
"self",
",",
"transform",
",",
"desc",
"=",
"None",
")",
":",
"if",
"desc",
"is",
"None",
":",
"desc",
"=",
"u'transform({})'",
".",
"format",
"(",
"getattr",
"(",
"transform",
",",
"'__name__'",
",",
"''",
")",
")",
"return",
... | Create a copy of this query, transformed by `transform`.
Args:
transform (callable): Callable that takes an iterable of values and
returns an iterable of transformed values.
Keyword Args:
desc (str): A description of the transform, to use in log messages.
Defaults to the name of the `transform` function.
Returns:
Query | [
"Create",
"a",
"copy",
"of",
"this",
"query",
"transformed",
"by",
"transform",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L105-L126 |
18,448 | edx/bok-choy | bok_choy/query.py | Query.map | def map(self, map_fn, desc=None):
"""
Return a copy of this query, with the values mapped through `map_fn`.
Args:
map_fn (callable): A callable that takes a single argument and returns a new value.
Keyword Args:
desc (str): A description of the mapping transform, for use in log message.
Defaults to the name of the map function.
Returns:
Query
"""
if desc is None:
desc = getattr(map_fn, '__name__', '')
desc = u'map({})'.format(desc)
return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc) | python | def map(self, map_fn, desc=None):
if desc is None:
desc = getattr(map_fn, '__name__', '')
desc = u'map({})'.format(desc)
return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc) | [
"def",
"map",
"(",
"self",
",",
"map_fn",
",",
"desc",
"=",
"None",
")",
":",
"if",
"desc",
"is",
"None",
":",
"desc",
"=",
"getattr",
"(",
"map_fn",
",",
"'__name__'",
",",
"''",
")",
"desc",
"=",
"u'map({})'",
".",
"format",
"(",
"desc",
")",
"... | Return a copy of this query, with the values mapped through `map_fn`.
Args:
map_fn (callable): A callable that takes a single argument and returns a new value.
Keyword Args:
desc (str): A description of the mapping transform, for use in log message.
Defaults to the name of the map function.
Returns:
Query | [
"Return",
"a",
"copy",
"of",
"this",
"query",
"with",
"the",
"values",
"mapped",
"through",
"map_fn",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L128-L146 |
18,449 | edx/bok-choy | bok_choy/query.py | Query.filter | def filter(self, filter_fn=None, desc=None, **kwargs):
"""
Return a copy of this query, with some values removed.
Example usages:
.. code:: python
# Returns a query that matches even numbers
q.filter(filter_fn=lambda x: x % 2)
# Returns a query that matches elements with el.description == "foo"
q.filter(description="foo")
Keyword Args:
filter_fn (callable): If specified, a function that accepts one argument (the element)
and returns a boolean indicating whether to include that element in the results.
kwargs: Specify attribute values that an element must have to be included in the results.
desc (str): A description of the filter, for use in log messages.
Defaults to the name of the filter function or attribute.
Raises:
TypeError: neither or both of `filter_fn` and `kwargs` are provided.
"""
if filter_fn is not None and kwargs:
raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.')
if filter_fn is None and not kwargs:
raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().')
if desc is None:
if filter_fn is not None:
desc = getattr(filter_fn, '__name__', '')
elif kwargs:
desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()])
desc = u"filter({})".format(desc)
if kwargs:
def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring
return all(
getattr(elem, filter_key) == filter_value
for filter_key, filter_value
in kwargs.items()
)
return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc) | python | def filter(self, filter_fn=None, desc=None, **kwargs):
if filter_fn is not None and kwargs:
raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.')
if filter_fn is None and not kwargs:
raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().')
if desc is None:
if filter_fn is not None:
desc = getattr(filter_fn, '__name__', '')
elif kwargs:
desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()])
desc = u"filter({})".format(desc)
if kwargs:
def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring
return all(
getattr(elem, filter_key) == filter_value
for filter_key, filter_value
in kwargs.items()
)
return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc) | [
"def",
"filter",
"(",
"self",
",",
"filter_fn",
"=",
"None",
",",
"desc",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"filter_fn",
"is",
"not",
"None",
"and",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'Must supply either a filter_fn or attribute f... | Return a copy of this query, with some values removed.
Example usages:
.. code:: python
# Returns a query that matches even numbers
q.filter(filter_fn=lambda x: x % 2)
# Returns a query that matches elements with el.description == "foo"
q.filter(description="foo")
Keyword Args:
filter_fn (callable): If specified, a function that accepts one argument (the element)
and returns a boolean indicating whether to include that element in the results.
kwargs: Specify attribute values that an element must have to be included in the results.
desc (str): A description of the filter, for use in log messages.
Defaults to the name of the filter function or attribute.
Raises:
TypeError: neither or both of `filter_fn` and `kwargs` are provided. | [
"Return",
"a",
"copy",
"of",
"this",
"query",
"with",
"some",
"values",
"removed",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L148-L194 |
18,450 | edx/bok-choy | bok_choy/query.py | Query._execute | def _execute(self):
"""
Run the query, generating data from the `seed_fn` and performing transforms on the results.
"""
data = self.seed_fn()
for transform in self.transforms:
data = transform(data)
return list(data) | python | def _execute(self):
data = self.seed_fn()
for transform in self.transforms:
data = transform(data)
return list(data) | [
"def",
"_execute",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"seed_fn",
"(",
")",
"for",
"transform",
"in",
"self",
".",
"transforms",
":",
"data",
"=",
"transform",
"(",
"data",
")",
"return",
"list",
"(",
"data",
")"
] | Run the query, generating data from the `seed_fn` and performing transforms on the results. | [
"Run",
"the",
"query",
"generating",
"data",
"from",
"the",
"seed_fn",
"and",
"performing",
"transforms",
"on",
"the",
"results",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L196-L203 |
18,451 | edx/bok-choy | bok_choy/query.py | Query.execute | def execute(self, try_limit=5, try_interval=0.5, timeout=30):
"""
Execute this query, retrying based on the supplied parameters.
Keyword Args:
try_limit (int): The number of times to retry the query.
try_interval (float): The number of seconds to wait between each try (float).
timeout (float): The maximum number of seconds to spend retrying (float).
Returns:
The transformed results of the query.
Raises:
BrokenPromise: The query did not execute without a Selenium error after one or more attempts.
"""
return Promise(
no_error(self._execute),
u"Executing {!r}".format(self),
try_limit=try_limit,
try_interval=try_interval,
timeout=timeout,
).fulfill() | python | def execute(self, try_limit=5, try_interval=0.5, timeout=30):
return Promise(
no_error(self._execute),
u"Executing {!r}".format(self),
try_limit=try_limit,
try_interval=try_interval,
timeout=timeout,
).fulfill() | [
"def",
"execute",
"(",
"self",
",",
"try_limit",
"=",
"5",
",",
"try_interval",
"=",
"0.5",
",",
"timeout",
"=",
"30",
")",
":",
"return",
"Promise",
"(",
"no_error",
"(",
"self",
".",
"_execute",
")",
",",
"u\"Executing {!r}\"",
".",
"format",
"(",
"s... | Execute this query, retrying based on the supplied parameters.
Keyword Args:
try_limit (int): The number of times to retry the query.
try_interval (float): The number of seconds to wait between each try (float).
timeout (float): The maximum number of seconds to spend retrying (float).
Returns:
The transformed results of the query.
Raises:
BrokenPromise: The query did not execute without a Selenium error after one or more attempts. | [
"Execute",
"this",
"query",
"retrying",
"based",
"on",
"the",
"supplied",
"parameters",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L205-L226 |
18,452 | edx/bok-choy | bok_choy/query.py | Query.first | def first(self):
"""
Return a Query that selects only the first element of this Query.
If no elements are available, returns a query with no results.
Example usage:
.. code:: python
>> q = Query(lambda: list(range(5)))
>> q.first.results
[0]
Returns:
Query
"""
def _transform(xs): # pylint: disable=missing-docstring, invalid-name
try:
return [six.next(iter(xs))]
except StopIteration:
return []
return self.transform(_transform, 'first') | python | def first(self):
def _transform(xs): # pylint: disable=missing-docstring, invalid-name
try:
return [six.next(iter(xs))]
except StopIteration:
return []
return self.transform(_transform, 'first') | [
"def",
"first",
"(",
"self",
")",
":",
"def",
"_transform",
"(",
"xs",
")",
":",
"# pylint: disable=missing-docstring, invalid-name",
"try",
":",
"return",
"[",
"six",
".",
"next",
"(",
"iter",
"(",
"xs",
")",
")",
"]",
"except",
"StopIteration",
":",
"ret... | Return a Query that selects only the first element of this Query.
If no elements are available, returns a query with no results.
Example usage:
.. code:: python
>> q = Query(lambda: list(range(5)))
>> q.first.results
[0]
Returns:
Query | [
"Return",
"a",
"Query",
"that",
"selects",
"only",
"the",
"first",
"element",
"of",
"this",
"Query",
".",
"If",
"no",
"elements",
"are",
"available",
"returns",
"a",
"query",
"with",
"no",
"results",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L258-L280 |
18,453 | edx/bok-choy | bok_choy/query.py | BrowserQuery.attrs | def attrs(self, attribute_name):
"""
Retrieve HTML attribute values from the elements matched by the query.
Example usage:
.. code:: python
# Assume that the query matches html elements:
# <div class="foo"> and <div class="bar">
>> q.attrs('class')
['foo', 'bar']
Args:
attribute_name (str): The name of the attribute values to retrieve.
Returns:
A list of attribute values for `attribute_name`.
"""
desc = u'attrs({!r})'.format(attribute_name)
return self.map(lambda el: el.get_attribute(attribute_name), desc).results | python | def attrs(self, attribute_name):
desc = u'attrs({!r})'.format(attribute_name)
return self.map(lambda el: el.get_attribute(attribute_name), desc).results | [
"def",
"attrs",
"(",
"self",
",",
"attribute_name",
")",
":",
"desc",
"=",
"u'attrs({!r})'",
".",
"format",
"(",
"attribute_name",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"el",
":",
"el",
".",
"get_attribute",
"(",
"attribute_name",
")",
",",
"... | Retrieve HTML attribute values from the elements matched by the query.
Example usage:
.. code:: python
# Assume that the query matches html elements:
# <div class="foo"> and <div class="bar">
>> q.attrs('class')
['foo', 'bar']
Args:
attribute_name (str): The name of the attribute values to retrieve.
Returns:
A list of attribute values for `attribute_name`. | [
"Retrieve",
"HTML",
"attribute",
"values",
"from",
"the",
"elements",
"matched",
"by",
"the",
"query",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L356-L376 |
18,454 | edx/bok-choy | bok_choy/query.py | BrowserQuery.selected | def selected(self):
"""
Check whether all the matched elements are selected.
Returns:
bool
"""
query_results = self.map(lambda el: el.is_selected(), 'selected').results
if query_results:
return all(query_results)
return False | python | def selected(self):
query_results = self.map(lambda el: el.is_selected(), 'selected').results
if query_results:
return all(query_results)
return False | [
"def",
"selected",
"(",
"self",
")",
":",
"query_results",
"=",
"self",
".",
"map",
"(",
"lambda",
"el",
":",
"el",
".",
"is_selected",
"(",
")",
",",
"'selected'",
")",
".",
"results",
"if",
"query_results",
":",
"return",
"all",
"(",
"query_results",
... | Check whether all the matched elements are selected.
Returns:
bool | [
"Check",
"whether",
"all",
"the",
"matched",
"elements",
"are",
"selected",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L417-L427 |
18,455 | edx/bok-choy | bok_choy/query.py | BrowserQuery.visible | def visible(self):
"""
Check whether all matched elements are visible.
Returns:
bool
"""
query_results = self.map(lambda el: el.is_displayed(), 'visible').results
if query_results:
return all(query_results)
return False | python | def visible(self):
query_results = self.map(lambda el: el.is_displayed(), 'visible').results
if query_results:
return all(query_results)
return False | [
"def",
"visible",
"(",
"self",
")",
":",
"query_results",
"=",
"self",
".",
"map",
"(",
"lambda",
"el",
":",
"el",
".",
"is_displayed",
"(",
")",
",",
"'visible'",
")",
".",
"results",
"if",
"query_results",
":",
"return",
"all",
"(",
"query_results",
... | Check whether all matched elements are visible.
Returns:
bool | [
"Check",
"whether",
"all",
"matched",
"elements",
"are",
"visible",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L430-L440 |
18,456 | edx/bok-choy | bok_choy/query.py | BrowserQuery.fill | def fill(self, text):
"""
Set the text value of each matched element to `text`.
Example usage:
.. code:: python
# Set the text of the first element matched by the query to "Foo"
q.first.fill('Foo')
Args:
text (str): The text used to fill the element (usually a text field or text area).
Returns:
None
"""
def _fill(elem): # pylint: disable=missing-docstring
elem.clear()
elem.send_keys(text)
self.map(_fill, u'fill({!r})'.format(text)).execute() | python | def fill(self, text):
def _fill(elem): # pylint: disable=missing-docstring
elem.clear()
elem.send_keys(text)
self.map(_fill, u'fill({!r})'.format(text)).execute() | [
"def",
"fill",
"(",
"self",
",",
"text",
")",
":",
"def",
"_fill",
"(",
"elem",
")",
":",
"# pylint: disable=missing-docstring",
"elem",
".",
"clear",
"(",
")",
"elem",
".",
"send_keys",
"(",
"text",
")",
"self",
".",
"map",
"(",
"_fill",
",",
"u'fill(... | Set the text value of each matched element to `text`.
Example usage:
.. code:: python
# Set the text of the first element matched by the query to "Foo"
q.first.fill('Foo')
Args:
text (str): The text used to fill the element (usually a text field or text area).
Returns:
None | [
"Set",
"the",
"text",
"value",
"of",
"each",
"matched",
"element",
"to",
"text",
"."
] | cdd0d423419fc0c49d56a9226533aa1490b60afc | https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L486-L507 |
18,457 | metakermit/django-spa | spa/storage.py | PatchedManifestStaticFilesStorage.url_converter | def url_converter(self, *args, **kwargs):
"""
Return the custom URL converter for the given file name.
"""
upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs)
def converter(matchobj):
try:
upstream_converter(matchobj)
except ValueError:
# e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found
# because the upstream converter stripped 'static/' from the path
matched, url = matchobj.groups()
return matched
return converter | python | def url_converter(self, *args, **kwargs):
upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs)
def converter(matchobj):
try:
upstream_converter(matchobj)
except ValueError:
# e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found
# because the upstream converter stripped 'static/' from the path
matched, url = matchobj.groups()
return matched
return converter | [
"def",
"url_converter",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"upstream_converter",
"=",
"super",
"(",
"PatchedManifestStaticFilesStorage",
",",
"self",
")",
".",
"url_converter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
... | Return the custom URL converter for the given file name. | [
"Return",
"the",
"custom",
"URL",
"converter",
"for",
"the",
"given",
"file",
"name",
"."
] | dbdfa6d06c1077fade729db25b3b137d44299db6 | https://github.com/metakermit/django-spa/blob/dbdfa6d06c1077fade729db25b3b137d44299db6/spa/storage.py#L12-L27 |
18,458 | TriOptima/tri.table | lib/tri/table/__init__.py | order_by_on_list | def order_by_on_list(objects, order_field, is_desc=False):
"""
Utility function to sort objects django-style even for non-query set collections
:param objects: list of objects to sort
:param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable.
:param is_desc: reverse the sorting
:return:
"""
if callable(order_field):
objects.sort(key=order_field, reverse=is_desc)
return
def order_key(x):
v = getattr_path(x, order_field)
if v is None:
return MIN
return v
objects.sort(key=order_key, reverse=is_desc) | python | def order_by_on_list(objects, order_field, is_desc=False):
if callable(order_field):
objects.sort(key=order_field, reverse=is_desc)
return
def order_key(x):
v = getattr_path(x, order_field)
if v is None:
return MIN
return v
objects.sort(key=order_key, reverse=is_desc) | [
"def",
"order_by_on_list",
"(",
"objects",
",",
"order_field",
",",
"is_desc",
"=",
"False",
")",
":",
"if",
"callable",
"(",
"order_field",
")",
":",
"objects",
".",
"sort",
"(",
"key",
"=",
"order_field",
",",
"reverse",
"=",
"is_desc",
")",
"return",
... | Utility function to sort objects django-style even for non-query set collections
:param objects: list of objects to sort
:param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable.
:param is_desc: reverse the sorting
:return: | [
"Utility",
"function",
"to",
"sort",
"objects",
"django",
"-",
"style",
"even",
"for",
"non",
"-",
"query",
"set",
"collections"
] | fc38c02098a80a3fb336ac4cf502954d74e31484 | https://github.com/TriOptima/tri.table/blob/fc38c02098a80a3fb336ac4cf502954d74e31484/lib/tri/table/__init__.py#L153-L172 |
18,459 | TriOptima/tri.table | lib/tri/table/__init__.py | render_table | def render_table(request,
table,
links=None,
context=None,
template='tri_table/list.html',
blank_on_empty=False,
paginate_by=40, # pragma: no mutate
page=None,
paginator=None,
show_hits=False,
hit_label='Items',
post_bulk_edit=lambda table, queryset, updates: None):
"""
Render a table. This automatically handles pagination, sorting, filtering and bulk operations.
:param request: the request object. This is set on the table object so that it is available for lambda expressions.
:param table: an instance of Table
:param links: a list of instances of Link
:param context: dict of extra context parameters
:param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.
:param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty
:param show_hits: Display how many items there are total in the paginator.
:param hit_label: Label for the show_hits display.
:return: a string with the rendered HTML table
"""
if not context:
context = {}
if isinstance(table, Namespace):
table = table()
assert isinstance(table, Table), table
table.request = request
should_return, dispatch_result = handle_dispatch(request=request, obj=table)
if should_return:
return dispatch_result
context['bulk_form'] = table.bulk_form
context['query_form'] = table.query_form
context['tri_query_error'] = table.query_error
if table.bulk_form and request.method == 'POST':
if table.bulk_form.is_valid():
queryset = table.bulk_queryset()
updates = {
field.name: field.value
for field in table.bulk_form.fields
if field.value is not None and field.value != '' and field.attr is not None
}
queryset.update(**updates)
post_bulk_edit(table=table, queryset=queryset, updates=updates)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
table.context = table_context(
request,
table=table,
links=links,
paginate_by=paginate_by,
page=page,
extra_context=context,
paginator=paginator,
show_hits=show_hits,
hit_label=hit_label,
)
if not table.data and blank_on_empty:
return ''
if table.query_form and not table.query_form.is_valid():
table.data = None
table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>')
return render_template(request, template, table.context) | python | def render_table(request,
table,
links=None,
context=None,
template='tri_table/list.html',
blank_on_empty=False,
paginate_by=40, # pragma: no mutate
page=None,
paginator=None,
show_hits=False,
hit_label='Items',
post_bulk_edit=lambda table, queryset, updates: None):
if not context:
context = {}
if isinstance(table, Namespace):
table = table()
assert isinstance(table, Table), table
table.request = request
should_return, dispatch_result = handle_dispatch(request=request, obj=table)
if should_return:
return dispatch_result
context['bulk_form'] = table.bulk_form
context['query_form'] = table.query_form
context['tri_query_error'] = table.query_error
if table.bulk_form and request.method == 'POST':
if table.bulk_form.is_valid():
queryset = table.bulk_queryset()
updates = {
field.name: field.value
for field in table.bulk_form.fields
if field.value is not None and field.value != '' and field.attr is not None
}
queryset.update(**updates)
post_bulk_edit(table=table, queryset=queryset, updates=updates)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
table.context = table_context(
request,
table=table,
links=links,
paginate_by=paginate_by,
page=page,
extra_context=context,
paginator=paginator,
show_hits=show_hits,
hit_label=hit_label,
)
if not table.data and blank_on_empty:
return ''
if table.query_form and not table.query_form.is_valid():
table.data = None
table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>')
return render_template(request, template, table.context) | [
"def",
"render_table",
"(",
"request",
",",
"table",
",",
"links",
"=",
"None",
",",
"context",
"=",
"None",
",",
"template",
"=",
"'tri_table/list.html'",
",",
"blank_on_empty",
"=",
"False",
",",
"paginate_by",
"=",
"40",
",",
"# pragma: no mutate",
"page",
... | Render a table. This automatically handles pagination, sorting, filtering and bulk operations.
:param request: the request object. This is set on the table object so that it is available for lambda expressions.
:param table: an instance of Table
:param links: a list of instances of Link
:param context: dict of extra context parameters
:param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.
:param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty
:param show_hits: Display how many items there are total in the paginator.
:param hit_label: Label for the show_hits display.
:return: a string with the rendered HTML table | [
"Render",
"a",
"table",
".",
"This",
"automatically",
"handles",
"pagination",
"sorting",
"filtering",
"and",
"bulk",
"operations",
"."
] | fc38c02098a80a3fb336ac4cf502954d74e31484 | https://github.com/TriOptima/tri.table/blob/fc38c02098a80a3fb336ac4cf502954d74e31484/lib/tri/table/__init__.py#L1595-L1671 |
18,460 | infobloxopen/infoblox-client | infoblox_client/utils.py | generate_duid | def generate_duid(mac):
"""DUID is consisted of 10 hex numbers.
0x00 + mac with last 3 hex + mac with 6 hex
"""
valid = mac and isinstance(mac, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
return "00:" + mac[9:] + ":" + mac | python | def generate_duid(mac):
valid = mac and isinstance(mac, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
return "00:" + mac[9:] + ":" + mac | [
"def",
"generate_duid",
"(",
"mac",
")",
":",
"valid",
"=",
"mac",
"and",
"isinstance",
"(",
"mac",
",",
"six",
".",
"string_types",
")",
"if",
"not",
"valid",
":",
"raise",
"ValueError",
"(",
"\"Invalid argument was passed\"",
")",
"return",
"\"00:\"",
"+",... | DUID is consisted of 10 hex numbers.
0x00 + mac with last 3 hex + mac with 6 hex | [
"DUID",
"is",
"consisted",
"of",
"10",
"hex",
"numbers",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L41-L49 |
18,461 | infobloxopen/infoblox-client | infoblox_client/utils.py | try_value_to_bool | def try_value_to_bool(value, strict_mode=True):
"""Tries to convert value into boolean.
strict_mode is True:
- Only string representation of str(True) and str(False)
are converted into booleans;
- Otherwise unchanged incoming value is returned;
strict_mode is False:
- Anything that looks like True or False is converted into booleans.
Values accepted as True:
- 'true', 'on', 'yes' (case independent)
Values accepted as False:
- 'false', 'off', 'no' (case independent)
- all other values are returned unchanged
"""
if strict_mode:
true_list = ('True',)
false_list = ('False',)
val = value
else:
true_list = ('true', 'on', 'yes')
false_list = ('false', 'off', 'no')
val = str(value).lower()
if val in true_list:
return True
elif val in false_list:
return False
return value | python | def try_value_to_bool(value, strict_mode=True):
if strict_mode:
true_list = ('True',)
false_list = ('False',)
val = value
else:
true_list = ('true', 'on', 'yes')
false_list = ('false', 'off', 'no')
val = str(value).lower()
if val in true_list:
return True
elif val in false_list:
return False
return value | [
"def",
"try_value_to_bool",
"(",
"value",
",",
"strict_mode",
"=",
"True",
")",
":",
"if",
"strict_mode",
":",
"true_list",
"=",
"(",
"'True'",
",",
")",
"false_list",
"=",
"(",
"'False'",
",",
")",
"val",
"=",
"value",
"else",
":",
"true_list",
"=",
"... | Tries to convert value into boolean.
strict_mode is True:
- Only string representation of str(True) and str(False)
are converted into booleans;
- Otherwise unchanged incoming value is returned;
strict_mode is False:
- Anything that looks like True or False is converted into booleans.
Values accepted as True:
- 'true', 'on', 'yes' (case independent)
Values accepted as False:
- 'false', 'off', 'no' (case independent)
- all other values are returned unchanged | [
"Tries",
"to",
"convert",
"value",
"into",
"boolean",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L85-L114 |
18,462 | infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.create_network | def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False) | python | def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False) | [
"def",
"create_network",
"(",
"self",
",",
"net_view_name",
",",
"cidr",
",",
"nameservers",
"=",
"None",
",",
"members",
"=",
"None",
",",
"gateway_ip",
"=",
"None",
",",
"dhcp_trel_ip",
"=",
"None",
",",
"network_extattrs",
"=",
"None",
")",
":",
"ipv4",... | Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network) | [
"Create",
"NIOS",
"Network",
"and",
"prepare",
"DHCP",
"options",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L58-L96 |
18,463 | infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.create_ip_range | def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False) | python | def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False) | [
"def",
"create_ip_range",
"(",
"self",
",",
"network_view",
",",
"start_ip",
",",
"end_ip",
",",
"network",
",",
"disable",
",",
"range_extattrs",
")",
":",
"return",
"obj",
".",
"IPRange",
".",
"create",
"(",
"self",
".",
"connector",
",",
"network_view",
... | Creates IPRange or fails if already exists. | [
"Creates",
"IPRange",
"or",
"fails",
"if",
"already",
"exists",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L103-L113 |
18,464 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector._parse_options | def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version) | python | def _parse_options(self, options):
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version) | [
"def",
"_parse_options",
"(",
"self",
",",
"options",
")",
":",
"attributes",
"=",
"(",
"'host'",
",",
"'wapi_version'",
",",
"'username'",
",",
"'password'",
",",
"'ssl_verify'",
",",
"'http_request_timeout'",
",",
"'max_retries'",
",",
"'http_pool_connections'",
... | Copy needed options to self | [
"Copy",
"needed",
"options",
"to",
"self"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L89-L115 |
18,465 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector._parse_reply | def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content) | python | def _parse_reply(request):
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content) | [
"def",
"_parse_reply",
"(",
"request",
")",
":",
"try",
":",
"return",
"jsonutils",
".",
"loads",
"(",
"request",
".",
"content",
")",
"except",
"ValueError",
":",
"raise",
"ib_ex",
".",
"InfobloxConnectionError",
"(",
"reason",
"=",
"request",
".",
"content... | Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format | [
"Tries",
"to",
"parse",
"reply",
"from",
"NIOS",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L212-L220 |
18,466 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.get_object | def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None | python | def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None | [
"def",
"get_object",
"(",
"self",
",",
"obj_type",
",",
"payload",
"=",
"None",
",",
"return_fields",
"=",
"None",
",",
"extattrs",
"=",
"None",
",",
"force_proxy",
"=",
"False",
",",
"max_results",
"=",
"None",
",",
"paging",
"=",
"False",
")",
":",
"... | Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound | [
"Retrieve",
"a",
"list",
"of",
"Infoblox",
"objects",
"of",
"type",
"obj_type"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L231-L293 |
18,467 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.create_object | def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r) | python | def create_object(self, obj_type, payload, return_fields=None):
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r) | [
"def",
"create_object",
"(",
"self",
",",
"obj_type",
",",
"payload",
",",
"return_fields",
"=",
"None",
")",
":",
"self",
".",
"_validate_obj_type_or_die",
"(",
"obj_type",
")",
"query_params",
"=",
"self",
".",
"_build_query_params",
"(",
"return_fields",
"=",... | Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException | [
"Create",
"an",
"Infoblox",
"object",
"of",
"type",
"obj_type"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L345-L387 |
18,468 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.update_object | def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | python | def update_object(self, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | [
"def",
"update_object",
"(",
"self",
",",
"ref",
",",
"payload",
",",
"return_fields",
"=",
"None",
")",
":",
"query_params",
"=",
"self",
".",
"_build_query_params",
"(",
"return_fields",
"=",
"return_fields",
")",
"opts",
"=",
"self",
".",
"_get_request_opti... | Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException | [
"Update",
"an",
"Infoblox",
"object"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L424-L453 |
18,469 | infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.delete_object | def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | python | def delete_object(self, ref, delete_arguments=None):
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | [
"def",
"delete_object",
"(",
"self",
",",
"ref",
",",
"delete_arguments",
"=",
"None",
")",
":",
"opts",
"=",
"self",
".",
"_get_request_options",
"(",
")",
"if",
"not",
"isinstance",
"(",
"delete_arguments",
",",
"dict",
")",
":",
"delete_arguments",
"=",
... | Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException | [
"Remove",
"an",
"Infoblox",
"object"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L456-L485 |
18,470 | infobloxopen/infoblox-client | infoblox_client/objects.py | BaseObject._remap_fields | def _remap_fields(cls, kwargs):
"""Map fields from kwargs into dict acceptable by NIOS"""
mapped = {}
for key in kwargs:
if key in cls._remap:
mapped[cls._remap[key]] = kwargs[key]
else:
mapped[key] = kwargs[key]
return mapped | python | def _remap_fields(cls, kwargs):
mapped = {}
for key in kwargs:
if key in cls._remap:
mapped[cls._remap[key]] = kwargs[key]
else:
mapped[key] = kwargs[key]
return mapped | [
"def",
"_remap_fields",
"(",
"cls",
",",
"kwargs",
")",
":",
"mapped",
"=",
"{",
"}",
"for",
"key",
"in",
"kwargs",
":",
"if",
"key",
"in",
"cls",
".",
"_remap",
":",
"mapped",
"[",
"cls",
".",
"_remap",
"[",
"key",
"]",
"]",
"=",
"kwargs",
"[",
... | Map fields from kwargs into dict acceptable by NIOS | [
"Map",
"fields",
"from",
"kwargs",
"into",
"dict",
"acceptable",
"by",
"NIOS"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L87-L95 |
18,471 | infobloxopen/infoblox-client | infoblox_client/objects.py | EA.from_dict | def from_dict(cls, eas_from_nios):
"""Converts extensible attributes from the NIOS reply."""
if not eas_from_nios:
return
return cls({name: cls._process_value(ib_utils.try_value_to_bool,
eas_from_nios[name]['value'])
for name in eas_from_nios}) | python | def from_dict(cls, eas_from_nios):
if not eas_from_nios:
return
return cls({name: cls._process_value(ib_utils.try_value_to_bool,
eas_from_nios[name]['value'])
for name in eas_from_nios}) | [
"def",
"from_dict",
"(",
"cls",
",",
"eas_from_nios",
")",
":",
"if",
"not",
"eas_from_nios",
":",
"return",
"return",
"cls",
"(",
"{",
"name",
":",
"cls",
".",
"_process_value",
"(",
"ib_utils",
".",
"try_value_to_bool",
",",
"eas_from_nios",
"[",
"name",
... | Converts extensible attributes from the NIOS reply. | [
"Converts",
"extensible",
"attributes",
"from",
"the",
"NIOS",
"reply",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L141-L147 |
18,472 | infobloxopen/infoblox-client | infoblox_client/objects.py | EA.to_dict | def to_dict(self):
"""Converts extensible attributes into the format suitable for NIOS."""
return {name: {'value': self._process_value(str, value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])} | python | def to_dict(self):
return {name: {'value': self._process_value(str, value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])} | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"name",
":",
"{",
"'value'",
":",
"self",
".",
"_process_value",
"(",
"str",
",",
"value",
")",
"}",
"for",
"name",
",",
"value",
"in",
"self",
".",
"_ea_dict",
".",
"items",
"(",
")",
"if",
... | Converts extensible attributes into the format suitable for NIOS. | [
"Converts",
"extensible",
"attributes",
"into",
"the",
"format",
"suitable",
"for",
"NIOS",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L149-L153 |
18,473 | infobloxopen/infoblox-client | infoblox_client/objects.py | EA._process_value | def _process_value(func, value):
"""Applies processing method for value or each element in it.
:param func: method to be called with value
:param value: value to process
:return: if 'value' is list/tupe, returns iterable with func results,
else func result is returned
"""
if isinstance(value, (list, tuple)):
return [func(item) for item in value]
return func(value) | python | def _process_value(func, value):
if isinstance(value, (list, tuple)):
return [func(item) for item in value]
return func(value) | [
"def",
"_process_value",
"(",
"func",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"func",
"(",
"item",
")",
"for",
"item",
"in",
"value",
"]",
"return",
"func",
"(",
"value... | Applies processing method for value or each element in it.
:param func: method to be called with value
:param value: value to process
:return: if 'value' is list/tupe, returns iterable with func results,
else func result is returned | [
"Applies",
"processing",
"method",
"for",
"value",
"or",
"each",
"element",
"in",
"it",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L156-L166 |
18,474 | infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.from_dict | def from_dict(cls, connector, ip_dict):
"""Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked.
"""
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict) | python | def from_dict(cls, connector, ip_dict):
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict) | [
"def",
"from_dict",
"(",
"cls",
",",
"connector",
",",
"ip_dict",
")",
":",
"mapping",
"=",
"cls",
".",
"_global_field_processing",
".",
"copy",
"(",
")",
"mapping",
".",
"update",
"(",
"cls",
".",
"_custom_field_processing",
")",
"# Process fields that require ... | Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked. | [
"Build",
"dict",
"fields",
"as",
"SubObjects",
"if",
"needed",
"."
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L243-L256 |
18,475 | infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.field_to_dict | def field_to_dict(self, field):
"""Read field value and converts to dict if possible"""
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value) | python | def field_to_dict(self, field):
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value) | [
"def",
"field_to_dict",
"(",
"self",
",",
"field",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"field",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"self",
".",
"value_to_dict",
"(",
... | Read field value and converts to dict if possible | [
"Read",
"field",
"value",
"and",
"converts",
"to",
"dict",
"if",
"possible"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L262-L267 |
18,476 | infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.to_dict | def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None} | python | def to_dict(self, search_fields=None):
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None} | [
"def",
"to_dict",
"(",
"self",
",",
"search_fields",
"=",
"None",
")",
":",
"fields",
"=",
"self",
".",
"_fields",
"if",
"search_fields",
"==",
"'update'",
":",
"fields",
"=",
"self",
".",
"_search_for_update_fields",
"elif",
"search_fields",
"==",
"'all'",
... | Builds dict without None object fields | [
"Builds",
"dict",
"without",
"None",
"object",
"fields"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L269-L284 |
18,477 | infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.fetch | def fetch(self, only_ref=False):
"""Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch
"""
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False | python | def fetch(self, only_ref=False):
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False | [
"def",
"fetch",
"(",
"self",
",",
"only_ref",
"=",
"False",
")",
":",
"if",
"self",
".",
"ref",
":",
"reply",
"=",
"self",
".",
"connector",
".",
"get_object",
"(",
"self",
".",
"ref",
",",
"return_fields",
"=",
"self",
".",
"return_fields",
")",
"if... | Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch | [
"Fetch",
"object",
"from",
"NIOS",
"by",
"_ref",
"or",
"searchfields"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L378-L399 |
18,478 | infobloxopen/infoblox-client | infoblox_client/objects.py | HostRecord._ip_setter | def _ip_setter(self, ipaddr_name, ipaddrs_name, ips):
"""Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet.
"""
if isinstance(ips, six.string_types):
setattr(self, ipaddr_name, ips)
elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP):
setattr(self, ipaddr_name, ips[0].ip)
setattr(self, ipaddrs_name, ips)
elif isinstance(ips, IP):
setattr(self, ipaddr_name, ips.ip)
setattr(self, ipaddrs_name, [ips])
elif ips is None:
setattr(self, ipaddr_name, None)
setattr(self, ipaddrs_name, None)
else:
raise ValueError(
"Invalid format of ip passed in: %s."
"Should be string or list of NIOS IP objects." % ips) | python | def _ip_setter(self, ipaddr_name, ipaddrs_name, ips):
if isinstance(ips, six.string_types):
setattr(self, ipaddr_name, ips)
elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP):
setattr(self, ipaddr_name, ips[0].ip)
setattr(self, ipaddrs_name, ips)
elif isinstance(ips, IP):
setattr(self, ipaddr_name, ips.ip)
setattr(self, ipaddrs_name, [ips])
elif ips is None:
setattr(self, ipaddr_name, None)
setattr(self, ipaddrs_name, None)
else:
raise ValueError(
"Invalid format of ip passed in: %s."
"Should be string or list of NIOS IP objects." % ips) | [
"def",
"_ip_setter",
"(",
"self",
",",
"ipaddr_name",
",",
"ipaddrs_name",
",",
"ips",
")",
":",
"if",
"isinstance",
"(",
"ips",
",",
"six",
".",
"string_types",
")",
":",
"setattr",
"(",
"self",
",",
"ipaddr_name",
",",
"ips",
")",
"elif",
"isinstance",... | Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet. | [
"Setter",
"for",
"ip",
"fields"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L527-L554 |
18,479 | infobloxopen/infoblox-client | infoblox_client/objects.py | FixedAddressV6.mac | def mac(self, mac):
"""Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi.
"""
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac)
elif not hasattr(self, 'duid'):
self.duid = None | python | def mac(self, mac):
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac)
elif not hasattr(self, 'duid'):
self.duid = None | [
"def",
"mac",
"(",
"self",
",",
"mac",
")",
":",
"self",
".",
"_mac",
"=",
"mac",
"if",
"mac",
":",
"self",
".",
"duid",
"=",
"ib_utils",
".",
"generate_duid",
"(",
"mac",
")",
"elif",
"not",
"hasattr",
"(",
"self",
",",
"'duid'",
")",
":",
"self... | Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi. | [
"Set",
"mac",
"and",
"duid",
"fields"
] | edeec62db1935784c728731b2ae7cf0fcc9bf84d | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L821-L832 |
18,480 | cf-platform-eng/tile-generator | tile_generator/template.py | render_property | def render_property(property):
"""Render a property for bosh manifest, according to its type."""
# This ain't the prettiest thing, but it should get the job done.
# I don't think we have anything more elegant available at bosh-manifest-generation time.
# See https://docs.pivotal.io/partners/product-template-reference.html for list.
if 'type' in property and property['type'] in PROPERTY_FIELDS:
fields = {}
for field in PROPERTY_FIELDS[property['type']]:
if type(field) is tuple:
fields[field[0]] = '(( .properties.{}.{} ))'.format(property['name'], field[1])
else:
fields[field] = '(( .properties.{}.{} ))'.format(property['name'], field)
out = { property['name']: fields }
else:
if property.get('is_reference', False):
out = { property['name']: property['default'] }
else:
out = { property['name']: '(( .properties.{}.value ))'.format(property['name']) }
return out | python | def render_property(property):
# This ain't the prettiest thing, but it should get the job done.
# I don't think we have anything more elegant available at bosh-manifest-generation time.
# See https://docs.pivotal.io/partners/product-template-reference.html for list.
if 'type' in property and property['type'] in PROPERTY_FIELDS:
fields = {}
for field in PROPERTY_FIELDS[property['type']]:
if type(field) is tuple:
fields[field[0]] = '(( .properties.{}.{} ))'.format(property['name'], field[1])
else:
fields[field] = '(( .properties.{}.{} ))'.format(property['name'], field)
out = { property['name']: fields }
else:
if property.get('is_reference', False):
out = { property['name']: property['default'] }
else:
out = { property['name']: '(( .properties.{}.value ))'.format(property['name']) }
return out | [
"def",
"render_property",
"(",
"property",
")",
":",
"# This ain't the prettiest thing, but it should get the job done.",
"# I don't think we have anything more elegant available at bosh-manifest-generation time.",
"# See https://docs.pivotal.io/partners/product-template-reference.html for list.",
... | Render a property for bosh manifest, according to its type. | [
"Render",
"a",
"property",
"for",
"bosh",
"manifest",
"according",
"to",
"its",
"type",
"."
] | 56b602334edb38639bc7e01b1e9e68e43f9e6828 | https://github.com/cf-platform-eng/tile-generator/blob/56b602334edb38639bc7e01b1e9e68e43f9e6828/tile_generator/template.py#L152-L170 |
18,481 | h2non/filetype.py | filetype/match.py | match | def match(obj, matchers=TYPES):
"""
Matches the given input againts the available
file type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if type matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type.
"""
buf = get_bytes(obj)
for matcher in matchers:
if matcher.match(buf):
return matcher
return None | python | def match(obj, matchers=TYPES):
buf = get_bytes(obj)
for matcher in matchers:
if matcher.match(buf):
return matcher
return None | [
"def",
"match",
"(",
"obj",
",",
"matchers",
"=",
"TYPES",
")",
":",
"buf",
"=",
"get_bytes",
"(",
"obj",
")",
"for",
"matcher",
"in",
"matchers",
":",
"if",
"matcher",
".",
"match",
"(",
"buf",
")",
":",
"return",
"matcher",
"return",
"None"
] | Matches the given input againts the available
file type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if type matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type. | [
"Matches",
"the",
"given",
"input",
"againts",
"the",
"available",
"file",
"type",
"matchers",
"."
] | 37e7fd1a9eed1a9eab55ac43f62da98f10970675 | https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/match.py#L14-L34 |
18,482 | h2non/filetype.py | filetype/utils.py | signature | def signature(array):
"""
Returns the first 262 bytes of the given bytearray
as part of the file header signature.
Args:
array: bytearray to extract the header signature.
Returns:
First 262 bytes of the file content as bytearray type.
"""
length = len(array)
index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length
return array[:index] | python | def signature(array):
length = len(array)
index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length
return array[:index] | [
"def",
"signature",
"(",
"array",
")",
":",
"length",
"=",
"len",
"(",
"array",
")",
"index",
"=",
"_NUM_SIGNATURE_BYTES",
"if",
"length",
">",
"_NUM_SIGNATURE_BYTES",
"else",
"length",
"return",
"array",
"[",
":",
"index",
"]"
] | Returns the first 262 bytes of the given bytearray
as part of the file header signature.
Args:
array: bytearray to extract the header signature.
Returns:
First 262 bytes of the file content as bytearray type. | [
"Returns",
"the",
"first",
"262",
"bytes",
"of",
"the",
"given",
"bytearray",
"as",
"part",
"of",
"the",
"file",
"header",
"signature",
"."
] | 37e7fd1a9eed1a9eab55ac43f62da98f10970675 | https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/utils.py#L21-L35 |
18,483 | h2non/filetype.py | filetype/utils.py | get_bytes | def get_bytes(obj):
"""
Infers the input type and reads the first 262 bytes,
returning a sliced bytearray.
Args:
obj: path to readable, file, bytes or bytearray.
Returns:
First 262 bytes of the file content as bytearray type.
Raises:
TypeError: if obj is not a supported type.
"""
try:
obj = obj.read(_NUM_SIGNATURE_BYTES)
except AttributeError:
# duck-typing as readable failed - we'll try the other options
pass
kind = type(obj)
if kind is bytearray:
return signature(obj)
if kind is str:
return get_signature_bytes(obj)
if kind is bytes:
return signature(obj)
if kind is memoryview:
return signature(obj).tolist()
raise TypeError('Unsupported type as file input: %s' % kind) | python | def get_bytes(obj):
try:
obj = obj.read(_NUM_SIGNATURE_BYTES)
except AttributeError:
# duck-typing as readable failed - we'll try the other options
pass
kind = type(obj)
if kind is bytearray:
return signature(obj)
if kind is str:
return get_signature_bytes(obj)
if kind is bytes:
return signature(obj)
if kind is memoryview:
return signature(obj).tolist()
raise TypeError('Unsupported type as file input: %s' % kind) | [
"def",
"get_bytes",
"(",
"obj",
")",
":",
"try",
":",
"obj",
"=",
"obj",
".",
"read",
"(",
"_NUM_SIGNATURE_BYTES",
")",
"except",
"AttributeError",
":",
"# duck-typing as readable failed - we'll try the other options",
"pass",
"kind",
"=",
"type",
"(",
"obj",
")",... | Infers the input type and reads the first 262 bytes,
returning a sliced bytearray.
Args:
obj: path to readable, file, bytes or bytearray.
Returns:
First 262 bytes of the file content as bytearray type.
Raises:
TypeError: if obj is not a supported type. | [
"Infers",
"the",
"input",
"type",
"and",
"reads",
"the",
"first",
"262",
"bytes",
"returning",
"a",
"sliced",
"bytearray",
"."
] | 37e7fd1a9eed1a9eab55ac43f62da98f10970675 | https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/utils.py#L38-L72 |
18,484 | h2non/filetype.py | filetype/filetype.py | get_type | def get_type(mime=None, ext=None):
"""
Returns the file type instance searching by
MIME type or file extension.
Args:
ext: file extension string. E.g: jpg, png, mp4, mp3
mime: MIME string. E.g: image/jpeg, video/mpeg
Returns:
The matched file type instance. Otherwise None.
"""
for kind in types:
if kind.extension is ext or kind.mime is mime:
return kind
return None | python | def get_type(mime=None, ext=None):
for kind in types:
if kind.extension is ext or kind.mime is mime:
return kind
return None | [
"def",
"get_type",
"(",
"mime",
"=",
"None",
",",
"ext",
"=",
"None",
")",
":",
"for",
"kind",
"in",
"types",
":",
"if",
"kind",
".",
"extension",
"is",
"ext",
"or",
"kind",
".",
"mime",
"is",
"mime",
":",
"return",
"kind",
"return",
"None"
] | Returns the file type instance searching by
MIME type or file extension.
Args:
ext: file extension string. E.g: jpg, png, mp4, mp3
mime: MIME string. E.g: image/jpeg, video/mpeg
Returns:
The matched file type instance. Otherwise None. | [
"Returns",
"the",
"file",
"type",
"instance",
"searching",
"by",
"MIME",
"type",
"or",
"file",
"extension",
"."
] | 37e7fd1a9eed1a9eab55ac43f62da98f10970675 | https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/filetype.py#L67-L82 |
18,485 | python-beaver/python-beaver | beaver/worker/tail.py | Tail.open | def open(self, encoding=None):
"""Opens the file with the appropriate call"""
try:
if IS_GZIPPED_FILE.search(self._filename):
_file = gzip.open(self._filename, 'rb')
else:
if encoding:
_file = io.open(self._filename, 'r', encoding=encoding, errors='replace')
elif self._encoding:
_file = io.open(self._filename, 'r', encoding=self._encoding, errors='replace')
else:
_file = io.open(self._filename, 'r', errors='replace')
except IOError, e:
self._log_warning(str(e))
_file = None
self.close()
return _file | python | def open(self, encoding=None):
try:
if IS_GZIPPED_FILE.search(self._filename):
_file = gzip.open(self._filename, 'rb')
else:
if encoding:
_file = io.open(self._filename, 'r', encoding=encoding, errors='replace')
elif self._encoding:
_file = io.open(self._filename, 'r', encoding=self._encoding, errors='replace')
else:
_file = io.open(self._filename, 'r', errors='replace')
except IOError, e:
self._log_warning(str(e))
_file = None
self.close()
return _file | [
"def",
"open",
"(",
"self",
",",
"encoding",
"=",
"None",
")",
":",
"try",
":",
"if",
"IS_GZIPPED_FILE",
".",
"search",
"(",
"self",
".",
"_filename",
")",
":",
"_file",
"=",
"gzip",
".",
"open",
"(",
"self",
".",
"_filename",
",",
"'rb'",
")",
"el... | Opens the file with the appropriate call | [
"Opens",
"the",
"file",
"with",
"the",
"appropriate",
"call"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L79-L96 |
18,486 | python-beaver/python-beaver | beaver/worker/tail.py | Tail.close | def close(self):
"""Closes all currently open file pointers"""
if not self.active:
return
self.active = False
if self._file:
self._file.close()
self._sincedb_update_position(force_update=True)
if self._current_event:
event = '\n'.join(self._current_event)
self._current_event.clear()
self._callback_wrapper([event]) | python | def close(self):
if not self.active:
return
self.active = False
if self._file:
self._file.close()
self._sincedb_update_position(force_update=True)
if self._current_event:
event = '\n'.join(self._current_event)
self._current_event.clear()
self._callback_wrapper([event]) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"return",
"self",
".",
"active",
"=",
"False",
"if",
"self",
".",
"_file",
":",
"self",
".",
"_file",
".",
"close",
"(",
")",
"self",
".",
"_sincedb_update_position",
"("... | Closes all currently open file pointers | [
"Closes",
"all",
"currently",
"open",
"file",
"pointers"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L98-L111 |
18,487 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._ensure_file_is_good | def _ensure_file_is_good(self, current_time):
"""Every N seconds, ensures that the file we are tailing is the file we expect to be tailing"""
if self._last_file_mapping_update and current_time - self._last_file_mapping_update <= self._stat_interval:
return
self._last_file_mapping_update = time.time()
try:
st = os.stat(self._filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self._log_info('file removed')
self.close()
return
raise
fid = self.get_file_id(st)
if fid != self._fid:
self._log_info('file rotated')
self.close()
elif self._file.tell() > st.st_size:
if st.st_size == 0 and self._ignore_truncate:
self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, self._filename) +
"If you use another tool (i.e. logrotate) to truncate " +
"the file, your application may continue to write to " +
"the offset it last wrote later. In such a case, we'd " +
"better do nothing here")
return
self._log_info('file truncated')
self._update_file(seek_to_end=False)
elif REOPEN_FILES:
self._log_debug('file reloaded (non-linux)')
position = self._file.tell()
self._update_file(seek_to_end=False)
if self.active:
self._file.seek(position, os.SEEK_SET) | python | def _ensure_file_is_good(self, current_time):
if self._last_file_mapping_update and current_time - self._last_file_mapping_update <= self._stat_interval:
return
self._last_file_mapping_update = time.time()
try:
st = os.stat(self._filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self._log_info('file removed')
self.close()
return
raise
fid = self.get_file_id(st)
if fid != self._fid:
self._log_info('file rotated')
self.close()
elif self._file.tell() > st.st_size:
if st.st_size == 0 and self._ignore_truncate:
self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, self._filename) +
"If you use another tool (i.e. logrotate) to truncate " +
"the file, your application may continue to write to " +
"the offset it last wrote later. In such a case, we'd " +
"better do nothing here")
return
self._log_info('file truncated')
self._update_file(seek_to_end=False)
elif REOPEN_FILES:
self._log_debug('file reloaded (non-linux)')
position = self._file.tell()
self._update_file(seek_to_end=False)
if self.active:
self._file.seek(position, os.SEEK_SET) | [
"def",
"_ensure_file_is_good",
"(",
"self",
",",
"current_time",
")",
":",
"if",
"self",
".",
"_last_file_mapping_update",
"and",
"current_time",
"-",
"self",
".",
"_last_file_mapping_update",
"<=",
"self",
".",
"_stat_interval",
":",
"return",
"self",
".",
"_last... | Every N seconds, ensures that the file we are tailing is the file we expect to be tailing | [
"Every",
"N",
"seconds",
"ensures",
"that",
"the",
"file",
"we",
"are",
"tailing",
"is",
"the",
"file",
"we",
"expect",
"to",
"be",
"tailing"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L197-L232 |
18,488 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._run_pass | def _run_pass(self):
"""Read lines from a file and performs a callback against them"""
while True:
try:
data = self._file.read(4096)
except IOError, e:
if e.errno == errno.ESTALE:
self.active = False
return False
lines = self._buffer_extract(data)
if not lines:
# Before returning, check if an event (maybe partial) is waiting for too long.
if self._current_event and time.time() - self._last_activity > 1:
event = '\n'.join(self._current_event)
self._current_event.clear()
self._callback_wrapper([event])
break
self._last_activity = time.time()
if self._multiline_regex_after or self._multiline_regex_before:
# Multiline is enabled for this file.
events = multiline_merge(
lines,
self._current_event,
self._multiline_regex_after,
self._multiline_regex_before)
else:
events = lines
if events:
self._callback_wrapper(events)
if self._sincedb_path:
current_line_count = len(lines)
self._sincedb_update_position(lines=current_line_count)
self._sincedb_update_position() | python | def _run_pass(self):
while True:
try:
data = self._file.read(4096)
except IOError, e:
if e.errno == errno.ESTALE:
self.active = False
return False
lines = self._buffer_extract(data)
if not lines:
# Before returning, check if an event (maybe partial) is waiting for too long.
if self._current_event and time.time() - self._last_activity > 1:
event = '\n'.join(self._current_event)
self._current_event.clear()
self._callback_wrapper([event])
break
self._last_activity = time.time()
if self._multiline_regex_after or self._multiline_regex_before:
# Multiline is enabled for this file.
events = multiline_merge(
lines,
self._current_event,
self._multiline_regex_after,
self._multiline_regex_before)
else:
events = lines
if events:
self._callback_wrapper(events)
if self._sincedb_path:
current_line_count = len(lines)
self._sincedb_update_position(lines=current_line_count)
self._sincedb_update_position() | [
"def",
"_run_pass",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"data",
"=",
"self",
".",
"_file",
".",
"read",
"(",
"4096",
")",
"except",
"IOError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ESTALE",
":",
"self",... | Read lines from a file and performs a callback against them | [
"Read",
"lines",
"from",
"a",
"file",
"and",
"performs",
"a",
"callback",
"against",
"them"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L234-L273 |
18,489 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._sincedb_init | def _sincedb_init(self):
"""Initializes the sincedb schema in an sqlite db"""
if not self._sincedb_path:
return
if not os.path.exists(self._sincedb_path):
self._log_debug('initializing sincedb sqlite schema')
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
conn.execute("""
create table sincedb (
fid text primary key,
filename text,
position integer default 1
);
""")
conn.close() | python | def _sincedb_init(self):
if not self._sincedb_path:
return
if not os.path.exists(self._sincedb_path):
self._log_debug('initializing sincedb sqlite schema')
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
conn.execute("""
create table sincedb (
fid text primary key,
filename text,
position integer default 1
);
""")
conn.close() | [
"def",
"_sincedb_init",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sincedb_path",
":",
"return",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_sincedb_path",
")",
":",
"self",
".",
"_log_debug",
"(",
"'initializing sincedb sqli... | Initializes the sincedb schema in an sqlite db | [
"Initializes",
"the",
"sincedb",
"schema",
"in",
"an",
"sqlite",
"db"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L381-L396 |
18,490 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._sincedb_update_position | def _sincedb_update_position(self, lines=0, force_update=False):
"""Retrieves the starting position from the sincedb sql db for a given file
Returns a boolean representing whether or not it updated the record
"""
if not self._sincedb_path:
return False
self._line_count = self._line_count + lines
old_count = self._line_count_sincedb
lines = self._line_count
current_time = int(time.time())
if not force_update:
if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval:
return False
if old_count == lines:
return False
self._sincedb_init()
self._last_sincedb_write = current_time
self._log_debug('updating sincedb to {0}'.format(lines))
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);'
cursor.execute(query, {
'fid': self._fid,
'filename': self._filename
})
query = 'update sincedb set position = :position where fid = :fid and filename = :filename'
cursor.execute(query, {
'fid': self._fid,
'filename': self._filename,
'position': lines,
})
conn.close()
self._line_count_sincedb = lines
return True | python | def _sincedb_update_position(self, lines=0, force_update=False):
if not self._sincedb_path:
return False
self._line_count = self._line_count + lines
old_count = self._line_count_sincedb
lines = self._line_count
current_time = int(time.time())
if not force_update:
if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval:
return False
if old_count == lines:
return False
self._sincedb_init()
self._last_sincedb_write = current_time
self._log_debug('updating sincedb to {0}'.format(lines))
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);'
cursor.execute(query, {
'fid': self._fid,
'filename': self._filename
})
query = 'update sincedb set position = :position where fid = :fid and filename = :filename'
cursor.execute(query, {
'fid': self._fid,
'filename': self._filename,
'position': lines,
})
conn.close()
self._line_count_sincedb = lines
return True | [
"def",
"_sincedb_update_position",
"(",
"self",
",",
"lines",
"=",
"0",
",",
"force_update",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_sincedb_path",
":",
"return",
"False",
"self",
".",
"_line_count",
"=",
"self",
".",
"_line_count",
"+",
"lines... | Retrieves the starting position from the sincedb sql db for a given file
Returns a boolean representing whether or not it updated the record | [
"Retrieves",
"the",
"starting",
"position",
"from",
"the",
"sincedb",
"sql",
"db",
"for",
"a",
"given",
"file",
"Returns",
"a",
"boolean",
"representing",
"whether",
"or",
"not",
"it",
"updated",
"the",
"record"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L398-L441 |
18,491 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._sincedb_start_position | def _sincedb_start_position(self):
"""Retrieves the starting position from the sincedb sql db
for a given file
"""
if not self._sincedb_path:
return None
self._sincedb_init()
self._log_debug('retrieving start_position from sincedb')
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
cursor.execute('select position from sincedb where fid = :fid and filename = :filename', {
'fid': self._fid,
'filename': self._filename
})
start_position = None
for row in cursor.fetchall():
start_position, = row
return start_position | python | def _sincedb_start_position(self):
if not self._sincedb_path:
return None
self._sincedb_init()
self._log_debug('retrieving start_position from sincedb')
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
cursor.execute('select position from sincedb where fid = :fid and filename = :filename', {
'fid': self._fid,
'filename': self._filename
})
start_position = None
for row in cursor.fetchall():
start_position, = row
return start_position | [
"def",
"_sincedb_start_position",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sincedb_path",
":",
"return",
"None",
"self",
".",
"_sincedb_init",
"(",
")",
"self",
".",
"_log_debug",
"(",
"'retrieving start_position from sincedb'",
")",
"conn",
"=",
"sqli... | Retrieves the starting position from the sincedb sql db
for a given file | [
"Retrieves",
"the",
"starting",
"position",
"from",
"the",
"sincedb",
"sql",
"db",
"for",
"a",
"given",
"file"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L443-L463 |
18,492 | python-beaver/python-beaver | beaver/worker/tail.py | Tail._update_file | def _update_file(self, seek_to_end=True):
"""Open the file for tailing"""
try:
self.close()
self._file = self.open()
except IOError:
pass
else:
if not self._file:
return
self.active = True
try:
st = os.stat(self._filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self._log_info('file removed')
self.close()
fid = self.get_file_id(st)
if not self._fid:
self._fid = fid
if fid != self._fid:
self._log_info('file rotated')
self.close()
elif seek_to_end:
self._seek_to_end() | python | def _update_file(self, seek_to_end=True):
try:
self.close()
self._file = self.open()
except IOError:
pass
else:
if not self._file:
return
self.active = True
try:
st = os.stat(self._filename)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self._log_info('file removed')
self.close()
fid = self.get_file_id(st)
if not self._fid:
self._fid = fid
if fid != self._fid:
self._log_info('file rotated')
self.close()
elif seek_to_end:
self._seek_to_end() | [
"def",
"_update_file",
"(",
"self",
",",
"seek_to_end",
"=",
"True",
")",
":",
"try",
":",
"self",
".",
"close",
"(",
")",
"self",
".",
"_file",
"=",
"self",
".",
"open",
"(",
")",
"except",
"IOError",
":",
"pass",
"else",
":",
"if",
"not",
"self",... | Open the file for tailing | [
"Open",
"the",
"file",
"for",
"tailing"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L465-L492 |
18,493 | python-beaver/python-beaver | beaver/worker/tail.py | Tail.tail | def tail(self, fname, encoding, window, position=None):
"""Read last N lines from file fname."""
if window <= 0:
raise ValueError('invalid window %r' % window)
encodings = ENCODINGS
if encoding:
encodings = [encoding] + ENCODINGS
for enc in encodings:
try:
f = self.open(encoding=enc)
if f:
return self.tail_read(f, window, position=position)
return False
except IOError, err:
if err.errno == errno.ENOENT:
return []
raise
except UnicodeDecodeError:
pass | python | def tail(self, fname, encoding, window, position=None):
if window <= 0:
raise ValueError('invalid window %r' % window)
encodings = ENCODINGS
if encoding:
encodings = [encoding] + ENCODINGS
for enc in encodings:
try:
f = self.open(encoding=enc)
if f:
return self.tail_read(f, window, position=position)
return False
except IOError, err:
if err.errno == errno.ENOENT:
return []
raise
except UnicodeDecodeError:
pass | [
"def",
"tail",
"(",
"self",
",",
"fname",
",",
"encoding",
",",
"window",
",",
"position",
"=",
"None",
")",
":",
"if",
"window",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'invalid window %r'",
"%",
"window",
")",
"encodings",
"=",
"ENCODINGS",
"if",
... | Read last N lines from file fname. | [
"Read",
"last",
"N",
"lines",
"from",
"file",
"fname",
"."
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L494-L515 |
18,494 | python-beaver/python-beaver | beaver/transports/__init__.py | create_transport | def create_transport(beaver_config, logger):
"""Creates and returns a transport object"""
transport_str = beaver_config.get('transport')
if '.' not in transport_str:
# allow simple names like 'redis' to load a beaver built-in transport
module_path = 'beaver.transports.%s_transport' % transport_str.lower()
class_name = '%sTransport' % transport_str.title()
else:
# allow dotted path names to load a custom transport class
try:
module_path, class_name = transport_str.rsplit('.', 1)
except ValueError:
raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))
_module = __import__(module_path, globals(), locals(), class_name, -1)
transport_class = getattr(_module, class_name)
transport = transport_class(beaver_config=beaver_config, logger=logger)
return transport | python | def create_transport(beaver_config, logger):
transport_str = beaver_config.get('transport')
if '.' not in transport_str:
# allow simple names like 'redis' to load a beaver built-in transport
module_path = 'beaver.transports.%s_transport' % transport_str.lower()
class_name = '%sTransport' % transport_str.title()
else:
# allow dotted path names to load a custom transport class
try:
module_path, class_name = transport_str.rsplit('.', 1)
except ValueError:
raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))
_module = __import__(module_path, globals(), locals(), class_name, -1)
transport_class = getattr(_module, class_name)
transport = transport_class(beaver_config=beaver_config, logger=logger)
return transport | [
"def",
"create_transport",
"(",
"beaver_config",
",",
"logger",
")",
":",
"transport_str",
"=",
"beaver_config",
".",
"get",
"(",
"'transport'",
")",
"if",
"'.'",
"not",
"in",
"transport_str",
":",
"# allow simple names like 'redis' to load a beaver built-in transport",
... | Creates and returns a transport object | [
"Creates",
"and",
"returns",
"a",
"transport",
"object"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/__init__.py#L4-L22 |
18,495 | python-beaver/python-beaver | beaver/worker/tail_manager.py | TailManager.update_files | def update_files(self):
"""Ensures all files are properly loaded.
Detects new files, file removals, file rotation, and truncation.
On non-linux platforms, it will also manually reload the file for tailing.
Note that this hack is necessary because EOF is cached on BSD systems.
"""
if self._update_time and int(time.time()) - self._update_time < self._discover_interval:
return
self._update_time = int(time.time())
possible_files = []
files = []
if len(self._beaver_config.get('globs')) > 0:
extend_files = files.extend
for name, exclude in self._beaver_config.get('globs').items():
globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)]
extend_files(globbed)
self._beaver_config.addglob(name, globbed)
self._callback(("addglob", (name, globbed)))
else:
append_files = files.append
for name in self.listdir():
append_files(os.path.realpath(os.path.join(self._folder, name)))
for absname in files:
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
append_possible_files = possible_files.append
fid = self.get_file_id(st)
append_possible_files((fid, absname))
# add new ones
new_files = [fname for fid, fname in possible_files if fid not in self._tails]
self.watch(new_files) | python | def update_files(self):
if self._update_time and int(time.time()) - self._update_time < self._discover_interval:
return
self._update_time = int(time.time())
possible_files = []
files = []
if len(self._beaver_config.get('globs')) > 0:
extend_files = files.extend
for name, exclude in self._beaver_config.get('globs').items():
globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)]
extend_files(globbed)
self._beaver_config.addglob(name, globbed)
self._callback(("addglob", (name, globbed)))
else:
append_files = files.append
for name in self.listdir():
append_files(os.path.realpath(os.path.join(self._folder, name)))
for absname in files:
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
append_possible_files = possible_files.append
fid = self.get_file_id(st)
append_possible_files((fid, absname))
# add new ones
new_files = [fname for fid, fname in possible_files if fid not in self._tails]
self.watch(new_files) | [
"def",
"update_files",
"(",
"self",
")",
":",
"if",
"self",
".",
"_update_time",
"and",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"-",
"self",
".",
"_update_time",
"<",
"self",
".",
"_discover_interval",
":",
"return",
"self",
".",
"_update_time",
... | Ensures all files are properly loaded.
Detects new files, file removals, file rotation, and truncation.
On non-linux platforms, it will also manually reload the file for tailing.
Note that this hack is necessary because EOF is cached on BSD systems. | [
"Ensures",
"all",
"files",
"are",
"properly",
"loaded",
".",
"Detects",
"new",
"files",
"file",
"removals",
"file",
"rotation",
"and",
"truncation",
".",
"On",
"non",
"-",
"linux",
"platforms",
"it",
"will",
"also",
"manually",
"reload",
"the",
"file",
"for"... | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail_manager.py#L85-L125 |
18,496 | python-beaver/python-beaver | beaver/worker/tail_manager.py | TailManager.close | def close(self, signalnum=None, frame=None):
self._running = False
"""Closes all currently open Tail objects"""
self._log_debug("Closing all tail objects")
self._active = False
for fid in self._tails:
self._tails[fid].close()
for n in range(0,self._number_of_consumer_processes):
if self._proc[n] is not None and self._proc[n].is_alive():
self._logger.debug("Terminate Process: " + str(n))
self._proc[n].terminate()
self._proc[n].join() | python | def close(self, signalnum=None, frame=None):
self._running = False
self._log_debug("Closing all tail objects")
self._active = False
for fid in self._tails:
self._tails[fid].close()
for n in range(0,self._number_of_consumer_processes):
if self._proc[n] is not None and self._proc[n].is_alive():
self._logger.debug("Terminate Process: " + str(n))
self._proc[n].terminate()
self._proc[n].join() | [
"def",
"close",
"(",
"self",
",",
"signalnum",
"=",
"None",
",",
"frame",
"=",
"None",
")",
":",
"self",
".",
"_running",
"=",
"False",
"self",
".",
"_log_debug",
"(",
"\"Closing all tail objects\"",
")",
"self",
".",
"_active",
"=",
"False",
"for",
"fid... | Closes all currently open Tail objects | [
"Closes",
"all",
"currently",
"open",
"Tail",
"objects"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail_manager.py#L127-L138 |
18,497 | python-beaver/python-beaver | beaver/utils.py | expand_paths | def expand_paths(path):
"""When given a path with brackets, expands it to return all permutations
of the path with expanded brackets, similar to ant.
>>> expand_paths('../{a,b}/{c,d}')
['../a/c', '../a/d', '../b/c', '../b/d']
>>> expand_paths('../{a,b}/{a,b}.py')
['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py']
>>> expand_paths('../{a,b,c}/{a,b,c}')
['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c']
>>> expand_paths('test')
['test']
>>> expand_paths('')
"""
pr = itertools.product
parts = MAGIC_BRACKETS.findall(path)
if not path:
return
if not parts:
return [path]
permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts]
return [_replace_all(path, i) for i in pr(*permutations)] | python | def expand_paths(path):
pr = itertools.product
parts = MAGIC_BRACKETS.findall(path)
if not path:
return
if not parts:
return [path]
permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts]
return [_replace_all(path, i) for i in pr(*permutations)] | [
"def",
"expand_paths",
"(",
"path",
")",
":",
"pr",
"=",
"itertools",
".",
"product",
"parts",
"=",
"MAGIC_BRACKETS",
".",
"findall",
"(",
"path",
")",
"if",
"not",
"path",
":",
"return",
"if",
"not",
"parts",
":",
"return",
"[",
"path",
"]",
"permutat... | When given a path with brackets, expands it to return all permutations
of the path with expanded brackets, similar to ant.
>>> expand_paths('../{a,b}/{c,d}')
['../a/c', '../a/d', '../b/c', '../b/d']
>>> expand_paths('../{a,b}/{a,b}.py')
['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py']
>>> expand_paths('../{a,b,c}/{a,b,c}')
['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c']
>>> expand_paths('test')
['test']
>>> expand_paths('') | [
"When",
"given",
"a",
"path",
"with",
"brackets",
"expands",
"it",
"to",
"return",
"all",
"permutations",
"of",
"the",
"path",
"with",
"expanded",
"brackets",
"similar",
"to",
"ant",
"."
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/utils.py#L147-L171 |
18,498 | python-beaver/python-beaver | beaver/utils.py | multiline_merge | def multiline_merge(lines, current_event, re_after, re_before):
""" Merge multi-line events based.
Some event (like Python trackback or Java stracktrace) spawn
on multiple line. This method will merge them using two
regular expression: regex_after and regex_before.
If a line match re_after, it will be merged with next line.
If a line match re_before, it will be merged with previous line.
This function return a list of complet event. Note that because
we don't know if an event is complet before another new event
start, the last event will not be returned but stored in
current_event. You should pass the same current_event to
successive call to multiline_merge. current_event is a list
of lines whose belong to the same event.
"""
events = []
for line in lines:
if re_before and re_before.match(line):
current_event.append(line)
elif re_after and current_event and re_after.match(current_event[-1]):
current_event.append(line)
else:
if current_event:
events.append('\n'.join(current_event))
current_event.clear()
current_event.append(line)
return events | python | def multiline_merge(lines, current_event, re_after, re_before):
events = []
for line in lines:
if re_before and re_before.match(line):
current_event.append(line)
elif re_after and current_event and re_after.match(current_event[-1]):
current_event.append(line)
else:
if current_event:
events.append('\n'.join(current_event))
current_event.clear()
current_event.append(line)
return events | [
"def",
"multiline_merge",
"(",
"lines",
",",
"current_event",
",",
"re_after",
",",
"re_before",
")",
":",
"events",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"re_before",
"and",
"re_before",
".",
"match",
"(",
"line",
")",
":",
"current_ev... | Merge multi-line events based.
Some event (like Python trackback or Java stracktrace) spawn
on multiple line. This method will merge them using two
regular expression: regex_after and regex_before.
If a line match re_after, it will be merged with next line.
If a line match re_before, it will be merged with previous line.
This function return a list of complet event. Note that because
we don't know if an event is complet before another new event
start, the last event will not be returned but stored in
current_event. You should pass the same current_event to
successive call to multiline_merge. current_event is a list
of lines whose belong to the same event. | [
"Merge",
"multi",
"-",
"line",
"events",
"based",
"."
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/utils.py#L180-L210 |
18,499 | python-beaver/python-beaver | beaver/ssh_tunnel.py | create_ssh_tunnel | def create_ssh_tunnel(beaver_config, logger=None):
"""Returns a BeaverSshTunnel object if the current config requires us to"""
if not beaver_config.use_ssh_tunnel():
return None
logger.info("Proxying transport using through local ssh tunnel")
return BeaverSshTunnel(beaver_config, logger=logger) | python | def create_ssh_tunnel(beaver_config, logger=None):
if not beaver_config.use_ssh_tunnel():
return None
logger.info("Proxying transport using through local ssh tunnel")
return BeaverSshTunnel(beaver_config, logger=logger) | [
"def",
"create_ssh_tunnel",
"(",
"beaver_config",
",",
"logger",
"=",
"None",
")",
":",
"if",
"not",
"beaver_config",
".",
"use_ssh_tunnel",
"(",
")",
":",
"return",
"None",
"logger",
".",
"info",
"(",
"\"Proxying transport using through local ssh tunnel\"",
")",
... | Returns a BeaverSshTunnel object if the current config requires us to | [
"Returns",
"a",
"BeaverSshTunnel",
"object",
"if",
"the",
"current",
"config",
"requires",
"us",
"to"
] | 93941e968016c5a962dffed9e7a9f6dc1d23236c | https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/ssh_tunnel.py#L10-L16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.