code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False):
"""
This converts the bedfile to bedpefile, assuming the reads are from CA.
"""
fp = must_open(bedfile)
fw = must_open(bedpefile, "w")
if pairsbedfile:
fwpairs = must_open(pairsbedfile, "w")
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b)
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
# 'library bes 37896 126916'
lib, name, smin, smax = libraryline.split()
assert lib == "library"
smin, smax = int(smin), int(smax)
logging.debug("Happy mates for lib {0} fall between {1} - {2}".\
format(name, smin, smax))
nbedpe = 0
nspan = 0
for clonename, blines in clones.items():
nlines = len(blines)
if nlines == 2:
a, b = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = b.seqid, b.start, b.end
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand])
print("\t".join(str(x) for x in outcols), file=fw)
nbedpe += 1
elif nlines == 1:
a, = blines
aseqid, astart, aend = a.seqid, a.start, a.end
bseqid, bstart, bend = 0, 0, 0
else: # More than two lines per pair
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue
span = end - start + 1
if (not matesfile) or (smin <= span <= smax):
print("\t".join(str(x) for x in \
(aseqid, start - 1, end, clonename)), file=fwpairs)
nspan += 1
fw.close()
logging.debug("A total of {0} bedpe written to `{1}`.".\
format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug("A total of {0} spans written to `{1}`.".\
format(nspan, pairsbedfile)) | def function[bed_to_bedpe, parameter[bedfile, bedpefile, pairsbedfile, matesfile, ca, strand]]:
constant[
This converts the bedfile to bedpefile, assuming the reads are from CA.
]
variable[fp] assign[=] call[name[must_open], parameter[name[bedfile]]]
variable[fw] assign[=] call[name[must_open], parameter[name[bedpefile], constant[w]]]
if name[pairsbedfile] begin[:]
variable[fwpairs] assign[=] call[name[must_open], parameter[name[pairsbedfile], constant[w]]]
variable[clones] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[row]] in starred[name[fp]] begin[:]
variable[b] assign[=] call[name[BedLine], parameter[name[row]]]
variable[name] assign[=] name[b].accn
variable[clonename] assign[=] call[name[clone_name], parameter[name[name]]]
call[call[name[clones]][name[clonename]].append, parameter[name[b]]]
if name[matesfile] begin[:]
variable[fp] assign[=] call[name[open], parameter[name[matesfile]]]
variable[libraryline] assign[=] call[name[next], parameter[name[fp]]]
<ast.Tuple object at 0x7da1b0976380> assign[=] call[name[libraryline].split, parameter[]]
assert[compare[name[lib] equal[==] constant[library]]]
<ast.Tuple object at 0x7da1b0976c50> assign[=] tuple[[<ast.Call object at 0x7da18ede44c0>, <ast.Call object at 0x7da18ede4160>]]
call[name[logging].debug, parameter[call[constant[Happy mates for lib {0} fall between {1} - {2}].format, parameter[name[name], name[smin], name[smax]]]]]
variable[nbedpe] assign[=] constant[0]
variable[nspan] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18ede6e30>, <ast.Name object at 0x7da18ede4f40>]]] in starred[call[name[clones].items, parameter[]]] begin[:]
variable[nlines] assign[=] call[name[len], parameter[name[blines]]]
if compare[name[nlines] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da18ede7d60> assign[=] name[blines]
<ast.Tuple object at 0x7da18ede4c70> assign[=] tuple[[<ast.Attribute object at 0x7da18ede7c70>, <ast.Attribute object at 0x7da18ede5bd0>, <ast.Attribute object at 0x7da18ede7970>]]
<ast.Tuple object at 0x7da18ede7a00> assign[=] tuple[[<ast.Attribute object at 0x7da18ede7a90>, <ast.Attribute object at 0x7da18ede5390>, <ast.Attribute object at 0x7da18ede7d00>]]
variable[outcols] assign[=] list[[<ast.Name object at 0x7da1b094d120>, <ast.BinOp object at 0x7da1b094f1f0>, <ast.Name object at 0x7da1b094db70>, <ast.Name object at 0x7da1b094d3f0>, <ast.BinOp object at 0x7da1b094d840>, <ast.Name object at 0x7da1b094c7c0>, <ast.Name object at 0x7da1b094dea0>]]
if name[strand] begin[:]
call[name[outcols].extend, parameter[list[[<ast.Constant object at 0x7da1b094fe20>, <ast.Attribute object at 0x7da1b094dc00>, <ast.Attribute object at 0x7da1b094cb50>]]]]
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b094ee00>]]]]
<ast.AugAssign object at 0x7da1b094e080>
if name[pairsbedfile] begin[:]
variable[start] assign[=] <ast.IfExp object at 0x7da1b094d360>
variable[end] assign[=] <ast.IfExp object at 0x7da1b094df30>
if compare[name[aseqid] not_equal[!=] name[bseqid]] begin[:]
continue
variable[span] assign[=] binary_operation[binary_operation[name[end] - name[start]] + constant[1]]
if <ast.BoolOp object at 0x7da1b094c9d0> begin[:]
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b094d420>]]]]
<ast.AugAssign object at 0x7da1b094f160>
call[name[fw].close, parameter[]]
call[name[logging].debug, parameter[call[constant[A total of {0} bedpe written to `{1}`.].format, parameter[name[nbedpe], name[bedpefile]]]]]
if name[pairsbedfile] begin[:]
call[name[fwpairs].close, parameter[]]
call[name[logging].debug, parameter[call[constant[A total of {0} spans written to `{1}`.].format, parameter[name[nspan], name[pairsbedfile]]]]] | keyword[def] identifier[bed_to_bedpe] ( identifier[bedfile] , identifier[bedpefile] , identifier[pairsbedfile] = keyword[None] , identifier[matesfile] = keyword[None] , identifier[ca] = keyword[False] , identifier[strand] = keyword[False] ):
literal[string]
identifier[fp] = identifier[must_open] ( identifier[bedfile] )
identifier[fw] = identifier[must_open] ( identifier[bedpefile] , literal[string] )
keyword[if] identifier[pairsbedfile] :
identifier[fwpairs] = identifier[must_open] ( identifier[pairsbedfile] , literal[string] )
identifier[clones] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[row] keyword[in] identifier[fp] :
identifier[b] = identifier[BedLine] ( identifier[row] )
identifier[name] = identifier[b] . identifier[accn]
identifier[clonename] = identifier[clone_name] ( identifier[name] , identifier[ca] = identifier[ca] )
identifier[clones] [ identifier[clonename] ]. identifier[append] ( identifier[b] )
keyword[if] identifier[matesfile] :
identifier[fp] = identifier[open] ( identifier[matesfile] )
identifier[libraryline] = identifier[next] ( identifier[fp] )
identifier[lib] , identifier[name] , identifier[smin] , identifier[smax] = identifier[libraryline] . identifier[split] ()
keyword[assert] identifier[lib] == literal[string]
identifier[smin] , identifier[smax] = identifier[int] ( identifier[smin] ), identifier[int] ( identifier[smax] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] , identifier[smin] , identifier[smax] ))
identifier[nbedpe] = literal[int]
identifier[nspan] = literal[int]
keyword[for] identifier[clonename] , identifier[blines] keyword[in] identifier[clones] . identifier[items] ():
identifier[nlines] = identifier[len] ( identifier[blines] )
keyword[if] identifier[nlines] == literal[int] :
identifier[a] , identifier[b] = identifier[blines]
identifier[aseqid] , identifier[astart] , identifier[aend] = identifier[a] . identifier[seqid] , identifier[a] . identifier[start] , identifier[a] . identifier[end]
identifier[bseqid] , identifier[bstart] , identifier[bend] = identifier[b] . identifier[seqid] , identifier[b] . identifier[start] , identifier[b] . identifier[end]
identifier[outcols] =[ identifier[aseqid] , identifier[astart] - literal[int] , identifier[aend] , identifier[bseqid] , identifier[bstart] - literal[int] , identifier[bend] , identifier[clonename] ]
keyword[if] identifier[strand] :
identifier[outcols] . identifier[extend] ([ literal[int] , identifier[a] . identifier[strand] , identifier[b] . identifier[strand] ])
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[outcols] ), identifier[file] = identifier[fw] )
identifier[nbedpe] += literal[int]
keyword[elif] identifier[nlines] == literal[int] :
identifier[a] ,= identifier[blines]
identifier[aseqid] , identifier[astart] , identifier[aend] = identifier[a] . identifier[seqid] , identifier[a] . identifier[start] , identifier[a] . identifier[end]
identifier[bseqid] , identifier[bstart] , identifier[bend] = literal[int] , literal[int] , literal[int]
keyword[else] :
keyword[pass]
keyword[if] identifier[pairsbedfile] :
identifier[start] = identifier[min] ( identifier[astart] , identifier[bstart] ) keyword[if] identifier[bstart] > literal[int] keyword[else] identifier[astart]
identifier[end] = identifier[max] ( identifier[aend] , identifier[bend] ) keyword[if] identifier[bend] > literal[int] keyword[else] identifier[aend]
keyword[if] identifier[aseqid] != identifier[bseqid] :
keyword[continue]
identifier[span] = identifier[end] - identifier[start] + literal[int]
keyword[if] ( keyword[not] identifier[matesfile] ) keyword[or] ( identifier[smin] <= identifier[span] <= identifier[smax] ):
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[aseqid] , identifier[start] - literal[int] , identifier[end] , identifier[clonename] )), identifier[file] = identifier[fwpairs] )
identifier[nspan] += literal[int]
identifier[fw] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nbedpe] , identifier[bedpefile] ))
keyword[if] identifier[pairsbedfile] :
identifier[fwpairs] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nspan] , identifier[pairsbedfile] )) | def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False):
"""
This converts the bedfile to bedpefile, assuming the reads are from CA.
"""
fp = must_open(bedfile)
fw = must_open(bedpefile, 'w')
if pairsbedfile:
fwpairs = must_open(pairsbedfile, 'w') # depends on [control=['if'], data=[]]
clones = defaultdict(list)
for row in fp:
b = BedLine(row)
name = b.accn
clonename = clone_name(name, ca=ca)
clones[clonename].append(b) # depends on [control=['for'], data=['row']]
if matesfile:
fp = open(matesfile)
libraryline = next(fp)
# 'library bes 37896 126916'
(lib, name, smin, smax) = libraryline.split()
assert lib == 'library'
(smin, smax) = (int(smin), int(smax))
logging.debug('Happy mates for lib {0} fall between {1} - {2}'.format(name, smin, smax)) # depends on [control=['if'], data=[]]
nbedpe = 0
nspan = 0
for (clonename, blines) in clones.items():
nlines = len(blines)
if nlines == 2:
(a, b) = blines
(aseqid, astart, aend) = (a.seqid, a.start, a.end)
(bseqid, bstart, bend) = (b.seqid, b.start, b.end)
outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename]
if strand:
outcols.extend([0, a.strand, b.strand]) # depends on [control=['if'], data=[]]
print('\t'.join((str(x) for x in outcols)), file=fw)
nbedpe += 1 # depends on [control=['if'], data=[]]
elif nlines == 1:
(a,) = blines
(aseqid, astart, aend) = (a.seqid, a.start, a.end)
(bseqid, bstart, bend) = (0, 0, 0) # depends on [control=['if'], data=[]]
else: # More than two lines per pair
pass
if pairsbedfile:
start = min(astart, bstart) if bstart > 0 else astart
end = max(aend, bend) if bend > 0 else aend
if aseqid != bseqid:
continue # depends on [control=['if'], data=[]]
span = end - start + 1
if not matesfile or smin <= span <= smax:
print('\t'.join((str(x) for x in (aseqid, start - 1, end, clonename))), file=fwpairs)
nspan += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
fw.close()
logging.debug('A total of {0} bedpe written to `{1}`.'.format(nbedpe, bedpefile))
if pairsbedfile:
fwpairs.close()
logging.debug('A total of {0} spans written to `{1}`.'.format(nspan, pairsbedfile)) # depends on [control=['if'], data=[]] |
def get_alerts_for(self, trigger):
"""
Retrieves all of the alerts that were fired for the specified Trigger
:param trigger: the trigger
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: list of `pyowm.alertapi30.alert.Alert` objects
"""
assert trigger is not None
assert isinstance(trigger.id, str), "Value must be a string"
status, data = self.http_client.get_json(
ALERTS_URI % trigger.id,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.alert_parser.parse_dict(item) for item in data] | def function[get_alerts_for, parameter[self, trigger]]:
constant[
Retrieves all of the alerts that were fired for the specified Trigger
:param trigger: the trigger
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: list of `pyowm.alertapi30.alert.Alert` objects
]
assert[compare[name[trigger] is_not constant[None]]]
assert[call[name[isinstance], parameter[name[trigger].id, name[str]]]]
<ast.Tuple object at 0x7da20c6e6560> assign[=] call[name[self].http_client.get_json, parameter[binary_operation[name[ALERTS_URI] <ast.Mod object at 0x7da2590d6920> name[trigger].id]]]
return[<ast.ListComp object at 0x7da2044c02b0>] | keyword[def] identifier[get_alerts_for] ( identifier[self] , identifier[trigger] ):
literal[string]
keyword[assert] identifier[trigger] keyword[is] keyword[not] keyword[None]
keyword[assert] identifier[isinstance] ( identifier[trigger] . identifier[id] , identifier[str] ), literal[string]
identifier[status] , identifier[data] = identifier[self] . identifier[http_client] . identifier[get_json] (
identifier[ALERTS_URI] % identifier[trigger] . identifier[id] ,
identifier[params] ={ literal[string] : identifier[self] . identifier[API_key] },
identifier[headers] ={ literal[string] : literal[string] })
keyword[return] [ identifier[self] . identifier[alert_parser] . identifier[parse_dict] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[data] ] | def get_alerts_for(self, trigger):
"""
Retrieves all of the alerts that were fired for the specified Trigger
:param trigger: the trigger
:type trigger: `pyowm.alertapi30.trigger.Trigger`
:return: list of `pyowm.alertapi30.alert.Alert` objects
"""
assert trigger is not None
assert isinstance(trigger.id, str), 'Value must be a string'
(status, data) = self.http_client.get_json(ALERTS_URI % trigger.id, params={'appid': self.API_key}, headers={'Content-Type': 'application/json'})
return [self.alert_parser.parse_dict(item) for item in data] |
def find_arg(self, name):
"""Find arg by normalized arg name or parameter name."""
name = self.normalize_name(name)
return self.args.get(name) | def function[find_arg, parameter[self, name]]:
constant[Find arg by normalized arg name or parameter name.]
variable[name] assign[=] call[name[self].normalize_name, parameter[name[name]]]
return[call[name[self].args.get, parameter[name[name]]]] | keyword[def] identifier[find_arg] ( identifier[self] , identifier[name] ):
literal[string]
identifier[name] = identifier[self] . identifier[normalize_name] ( identifier[name] )
keyword[return] identifier[self] . identifier[args] . identifier[get] ( identifier[name] ) | def find_arg(self, name):
"""Find arg by normalized arg name or parameter name."""
name = self.normalize_name(name)
return self.args.get(name) |
def register_producer(cls, producer):
"""
Register a default producer for events to use.
:param producer: the default producer to to dispatch events on.
"""
log.info('@Registry.register_producer `{}`'
.format(producer.__class__.__name__))
cls._producer = (cls._producer or producer) | def function[register_producer, parameter[cls, producer]]:
constant[
Register a default producer for events to use.
:param producer: the default producer to to dispatch events on.
]
call[name[log].info, parameter[call[constant[@Registry.register_producer `{}`].format, parameter[name[producer].__class__.__name__]]]]
name[cls]._producer assign[=] <ast.BoolOp object at 0x7da20c990700> | keyword[def] identifier[register_producer] ( identifier[cls] , identifier[producer] ):
literal[string]
identifier[log] . identifier[info] ( literal[string]
. identifier[format] ( identifier[producer] . identifier[__class__] . identifier[__name__] ))
identifier[cls] . identifier[_producer] =( identifier[cls] . identifier[_producer] keyword[or] identifier[producer] ) | def register_producer(cls, producer):
"""
Register a default producer for events to use.
:param producer: the default producer to to dispatch events on.
"""
log.info('@Registry.register_producer `{}`'.format(producer.__class__.__name__))
cls._producer = cls._producer or producer |
def add_label(self):
"""Add label with explanation at top of dialog window."""
txt = _('Autosave files found. What would you like to do?\n\n'
'This dialog will be shown again on next startup if any '
'autosave files are not restored, moved or deleted.')
label = QLabel(txt, self)
label.setWordWrap(True)
self.layout.addWidget(label) | def function[add_label, parameter[self]]:
constant[Add label with explanation at top of dialog window.]
variable[txt] assign[=] call[name[_], parameter[constant[Autosave files found. What would you like to do?
This dialog will be shown again on next startup if any autosave files are not restored, moved or deleted.]]]
variable[label] assign[=] call[name[QLabel], parameter[name[txt], name[self]]]
call[name[label].setWordWrap, parameter[constant[True]]]
call[name[self].layout.addWidget, parameter[name[label]]] | keyword[def] identifier[add_label] ( identifier[self] ):
literal[string]
identifier[txt] = identifier[_] ( literal[string]
literal[string]
literal[string] )
identifier[label] = identifier[QLabel] ( identifier[txt] , identifier[self] )
identifier[label] . identifier[setWordWrap] ( keyword[True] )
identifier[self] . identifier[layout] . identifier[addWidget] ( identifier[label] ) | def add_label(self):
"""Add label with explanation at top of dialog window."""
txt = _('Autosave files found. What would you like to do?\n\nThis dialog will be shown again on next startup if any autosave files are not restored, moved or deleted.')
label = QLabel(txt, self)
label.setWordWrap(True)
self.layout.addWidget(label) |
def get_choice_status(self):
"""
Returns a message field, which indicates whether choices statically
or dynamically defined, and flag indicating whether a dynamic file
selection loading error occurred.
Throws an error if this is not a choice parameter.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
status = self.dto[self.name]['choiceInfo']['status']
return status['message'], status['flag'] | def function[get_choice_status, parameter[self]]:
constant[
Returns a message field, which indicates whether choices statically
or dynamically defined, and flag indicating whether a dynamic file
selection loading error occurred.
Throws an error if this is not a choice parameter.
]
if compare[constant[choiceInfo] <ast.NotIn object at 0x7da2590d7190> call[name[self].dto][name[self].name]] begin[:]
<ast.Raise object at 0x7da2041d8730>
variable[status] assign[=] call[call[call[name[self].dto][name[self].name]][constant[choiceInfo]]][constant[status]]
return[tuple[[<ast.Subscript object at 0x7da2041d9780>, <ast.Subscript object at 0x7da2041d91b0>]]] | keyword[def] identifier[get_choice_status] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[dto] [ identifier[self] . identifier[name] ]:
keyword[raise] identifier[GPException] ( literal[string] )
identifier[status] = identifier[self] . identifier[dto] [ identifier[self] . identifier[name] ][ literal[string] ][ literal[string] ]
keyword[return] identifier[status] [ literal[string] ], identifier[status] [ literal[string] ] | def get_choice_status(self):
"""
Returns a message field, which indicates whether choices statically
or dynamically defined, and flag indicating whether a dynamic file
selection loading error occurred.
Throws an error if this is not a choice parameter.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter') # depends on [control=['if'], data=[]]
status = self.dto[self.name]['choiceInfo']['status']
return (status['message'], status['flag']) |
def load_map(stream, name=None, check_integrity=True, check_duplicates=True):
"""
Loads a ContainerMap configuration from a YAML document stream.
:param stream: YAML stream.
:type stream: file
:param name: Name of the ContainerMap. If not provided, will be attempted to read from a ``name`` attribute on the
document root level.
:type name: unicode | str
:param check_integrity: Performs a brief integrity check; default is ``True``.
:type check_integrity: bool
:param check_duplicates: Check for duplicate attached volumes during integrity check.
:type check_duplicates: bool
:return: A ContainerMap object.
:rtype: ContainerMap
"""
map_dict = yaml.safe_load(stream)
if isinstance(map_dict, dict):
map_name = name or map_dict.pop('name', None)
if not map_name:
raise ValueError("No map name provided, and none found in YAML stream.")
return ContainerMap(map_name, map_dict, check_integrity=check_integrity, check_duplicates=check_duplicates)
raise ValueError("Valid map could not be decoded.") | def function[load_map, parameter[stream, name, check_integrity, check_duplicates]]:
constant[
Loads a ContainerMap configuration from a YAML document stream.
:param stream: YAML stream.
:type stream: file
:param name: Name of the ContainerMap. If not provided, will be attempted to read from a ``name`` attribute on the
document root level.
:type name: unicode | str
:param check_integrity: Performs a brief integrity check; default is ``True``.
:type check_integrity: bool
:param check_duplicates: Check for duplicate attached volumes during integrity check.
:type check_duplicates: bool
:return: A ContainerMap object.
:rtype: ContainerMap
]
variable[map_dict] assign[=] call[name[yaml].safe_load, parameter[name[stream]]]
if call[name[isinstance], parameter[name[map_dict], name[dict]]] begin[:]
variable[map_name] assign[=] <ast.BoolOp object at 0x7da204564af0>
if <ast.UnaryOp object at 0x7da20c7c8c10> begin[:]
<ast.Raise object at 0x7da20c7c98a0>
return[call[name[ContainerMap], parameter[name[map_name], name[map_dict]]]]
<ast.Raise object at 0x7da20c7cba90> | keyword[def] identifier[load_map] ( identifier[stream] , identifier[name] = keyword[None] , identifier[check_integrity] = keyword[True] , identifier[check_duplicates] = keyword[True] ):
literal[string]
identifier[map_dict] = identifier[yaml] . identifier[safe_load] ( identifier[stream] )
keyword[if] identifier[isinstance] ( identifier[map_dict] , identifier[dict] ):
identifier[map_name] = identifier[name] keyword[or] identifier[map_dict] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[map_name] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[ContainerMap] ( identifier[map_name] , identifier[map_dict] , identifier[check_integrity] = identifier[check_integrity] , identifier[check_duplicates] = identifier[check_duplicates] )
keyword[raise] identifier[ValueError] ( literal[string] ) | def load_map(stream, name=None, check_integrity=True, check_duplicates=True):
"""
Loads a ContainerMap configuration from a YAML document stream.
:param stream: YAML stream.
:type stream: file
:param name: Name of the ContainerMap. If not provided, will be attempted to read from a ``name`` attribute on the
document root level.
:type name: unicode | str
:param check_integrity: Performs a brief integrity check; default is ``True``.
:type check_integrity: bool
:param check_duplicates: Check for duplicate attached volumes during integrity check.
:type check_duplicates: bool
:return: A ContainerMap object.
:rtype: ContainerMap
"""
map_dict = yaml.safe_load(stream)
if isinstance(map_dict, dict):
map_name = name or map_dict.pop('name', None)
if not map_name:
raise ValueError('No map name provided, and none found in YAML stream.') # depends on [control=['if'], data=[]]
return ContainerMap(map_name, map_dict, check_integrity=check_integrity, check_duplicates=check_duplicates) # depends on [control=['if'], data=[]]
raise ValueError('Valid map could not be decoded.') |
def close(self):
"""
Cleans up anything from the process
"""
self.logger.info("Closing Rest Service")
self.closed = True
# close threads
self._close_thread(self._redis_thread, "Redis setup")
self._close_thread(self._heartbeat_thread, "Heartbeat")
self._close_thread(self._kafka_thread, "Kafka setup")
self._close_thread(self._consumer_thread, "Consumer")
# close kafka
if self.consumer is not None:
self.logger.debug("Closing kafka consumer")
self.consumer.close()
if self.producer is not None:
self.logger.debug("Closing kafka producer")
self.producer.close(timeout=10) | def function[close, parameter[self]]:
constant[
Cleans up anything from the process
]
call[name[self].logger.info, parameter[constant[Closing Rest Service]]]
name[self].closed assign[=] constant[True]
call[name[self]._close_thread, parameter[name[self]._redis_thread, constant[Redis setup]]]
call[name[self]._close_thread, parameter[name[self]._heartbeat_thread, constant[Heartbeat]]]
call[name[self]._close_thread, parameter[name[self]._kafka_thread, constant[Kafka setup]]]
call[name[self]._close_thread, parameter[name[self]._consumer_thread, constant[Consumer]]]
if compare[name[self].consumer is_not constant[None]] begin[:]
call[name[self].logger.debug, parameter[constant[Closing kafka consumer]]]
call[name[self].consumer.close, parameter[]]
if compare[name[self].producer is_not constant[None]] begin[:]
call[name[self].logger.debug, parameter[constant[Closing kafka producer]]]
call[name[self].producer.close, parameter[]] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[closed] = keyword[True]
identifier[self] . identifier[_close_thread] ( identifier[self] . identifier[_redis_thread] , literal[string] )
identifier[self] . identifier[_close_thread] ( identifier[self] . identifier[_heartbeat_thread] , literal[string] )
identifier[self] . identifier[_close_thread] ( identifier[self] . identifier[_kafka_thread] , literal[string] )
identifier[self] . identifier[_close_thread] ( identifier[self] . identifier[_consumer_thread] , literal[string] )
keyword[if] identifier[self] . identifier[consumer] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[consumer] . identifier[close] ()
keyword[if] identifier[self] . identifier[producer] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[producer] . identifier[close] ( identifier[timeout] = literal[int] ) | def close(self):
"""
Cleans up anything from the process
"""
self.logger.info('Closing Rest Service')
self.closed = True
# close threads
self._close_thread(self._redis_thread, 'Redis setup')
self._close_thread(self._heartbeat_thread, 'Heartbeat')
self._close_thread(self._kafka_thread, 'Kafka setup')
self._close_thread(self._consumer_thread, 'Consumer')
# close kafka
if self.consumer is not None:
self.logger.debug('Closing kafka consumer')
self.consumer.close() # depends on [control=['if'], data=[]]
if self.producer is not None:
self.logger.debug('Closing kafka producer')
self.producer.close(timeout=10) # depends on [control=['if'], data=[]] |
def get_lines(self, config_access, visited_set):
"""
get the lines for this section
visited_set is used to avoid visiting same section
twice, if we've got a diamond in the @is setup
"""
if self in visited_set:
return []
lines = self.lines.copy()
visited_set.add(self)
for identity in self.identities:
if config_access.get_keyfile(identity):
lines.append(('IdentitiesOnly', ['yes']))
lines.append(('IdentityFile', [pipes.quote(config_access.get_keyfile(identity))]))
for section_name in self.types:
section = config_access.get_section(section_name)
lines += section.get_lines(config_access, visited_set)
return lines | def function[get_lines, parameter[self, config_access, visited_set]]:
constant[
get the lines for this section
visited_set is used to avoid visiting same section
twice, if we've got a diamond in the @is setup
]
if compare[name[self] in name[visited_set]] begin[:]
return[list[[]]]
variable[lines] assign[=] call[name[self].lines.copy, parameter[]]
call[name[visited_set].add, parameter[name[self]]]
for taget[name[identity]] in starred[name[self].identities] begin[:]
if call[name[config_access].get_keyfile, parameter[name[identity]]] begin[:]
call[name[lines].append, parameter[tuple[[<ast.Constant object at 0x7da1b09144f0>, <ast.List object at 0x7da1b0916410>]]]]
call[name[lines].append, parameter[tuple[[<ast.Constant object at 0x7da1b0914a60>, <ast.List object at 0x7da1b0916950>]]]]
for taget[name[section_name]] in starred[name[self].types] begin[:]
variable[section] assign[=] call[name[config_access].get_section, parameter[name[section_name]]]
<ast.AugAssign object at 0x7da1b0a224a0>
return[name[lines]] | keyword[def] identifier[get_lines] ( identifier[self] , identifier[config_access] , identifier[visited_set] ):
literal[string]
keyword[if] identifier[self] keyword[in] identifier[visited_set] :
keyword[return] []
identifier[lines] = identifier[self] . identifier[lines] . identifier[copy] ()
identifier[visited_set] . identifier[add] ( identifier[self] )
keyword[for] identifier[identity] keyword[in] identifier[self] . identifier[identities] :
keyword[if] identifier[config_access] . identifier[get_keyfile] ( identifier[identity] ):
identifier[lines] . identifier[append] (( literal[string] ,[ literal[string] ]))
identifier[lines] . identifier[append] (( literal[string] ,[ identifier[pipes] . identifier[quote] ( identifier[config_access] . identifier[get_keyfile] ( identifier[identity] ))]))
keyword[for] identifier[section_name] keyword[in] identifier[self] . identifier[types] :
identifier[section] = identifier[config_access] . identifier[get_section] ( identifier[section_name] )
identifier[lines] += identifier[section] . identifier[get_lines] ( identifier[config_access] , identifier[visited_set] )
keyword[return] identifier[lines] | def get_lines(self, config_access, visited_set):
"""
get the lines for this section
visited_set is used to avoid visiting same section
twice, if we've got a diamond in the @is setup
"""
if self in visited_set:
return [] # depends on [control=['if'], data=[]]
lines = self.lines.copy()
visited_set.add(self)
for identity in self.identities:
if config_access.get_keyfile(identity):
lines.append(('IdentitiesOnly', ['yes']))
lines.append(('IdentityFile', [pipes.quote(config_access.get_keyfile(identity))])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['identity']]
for section_name in self.types:
section = config_access.get_section(section_name)
lines += section.get_lines(config_access, visited_set) # depends on [control=['for'], data=['section_name']]
return lines |
def get_param_names(model_obj):
"""
Extracts all the names to be displayed for the estimated parameters.
Parameters
----------
model_obj : an instance of an MNDC object.
Should have the following attributes:
`['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`.
Returns
-------
all_names : list of strings.
There will be one element for each estimated parameter. The order of
the parameter names will be
`['nest_parameters', 'shape_parameters', 'outside_intercepts',
'index_coefficients']`.
"""
# Get the index coefficient names
all_names = deepcopy(model_obj.ind_var_names)
# Add the intercept names if any exist
if model_obj.intercept_names is not None:
all_names = model_obj.intercept_names + all_names
# Add the shape names if any exist
if model_obj.shape_names is not None:
all_names = model_obj.shape_names + all_names
# Add the nest names if any exist
if model_obj.nest_names is not None:
all_names = model_obj.nest_names + all_names
return all_names | def function[get_param_names, parameter[model_obj]]:
constant[
Extracts all the names to be displayed for the estimated parameters.
Parameters
----------
model_obj : an instance of an MNDC object.
Should have the following attributes:
`['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`.
Returns
-------
all_names : list of strings.
There will be one element for each estimated parameter. The order of
the parameter names will be
`['nest_parameters', 'shape_parameters', 'outside_intercepts',
'index_coefficients']`.
]
variable[all_names] assign[=] call[name[deepcopy], parameter[name[model_obj].ind_var_names]]
if compare[name[model_obj].intercept_names is_not constant[None]] begin[:]
variable[all_names] assign[=] binary_operation[name[model_obj].intercept_names + name[all_names]]
if compare[name[model_obj].shape_names is_not constant[None]] begin[:]
variable[all_names] assign[=] binary_operation[name[model_obj].shape_names + name[all_names]]
if compare[name[model_obj].nest_names is_not constant[None]] begin[:]
variable[all_names] assign[=] binary_operation[name[model_obj].nest_names + name[all_names]]
return[name[all_names]] | keyword[def] identifier[get_param_names] ( identifier[model_obj] ):
literal[string]
identifier[all_names] = identifier[deepcopy] ( identifier[model_obj] . identifier[ind_var_names] )
keyword[if] identifier[model_obj] . identifier[intercept_names] keyword[is] keyword[not] keyword[None] :
identifier[all_names] = identifier[model_obj] . identifier[intercept_names] + identifier[all_names]
keyword[if] identifier[model_obj] . identifier[shape_names] keyword[is] keyword[not] keyword[None] :
identifier[all_names] = identifier[model_obj] . identifier[shape_names] + identifier[all_names]
keyword[if] identifier[model_obj] . identifier[nest_names] keyword[is] keyword[not] keyword[None] :
identifier[all_names] = identifier[model_obj] . identifier[nest_names] + identifier[all_names]
keyword[return] identifier[all_names] | def get_param_names(model_obj):
"""
Extracts all the names to be displayed for the estimated parameters.
Parameters
----------
model_obj : an instance of an MNDC object.
Should have the following attributes:
`['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`.
Returns
-------
all_names : list of strings.
There will be one element for each estimated parameter. The order of
the parameter names will be
`['nest_parameters', 'shape_parameters', 'outside_intercepts',
'index_coefficients']`.
"""
# Get the index coefficient names
all_names = deepcopy(model_obj.ind_var_names)
# Add the intercept names if any exist
if model_obj.intercept_names is not None:
all_names = model_obj.intercept_names + all_names # depends on [control=['if'], data=[]]
# Add the shape names if any exist
if model_obj.shape_names is not None:
all_names = model_obj.shape_names + all_names # depends on [control=['if'], data=[]]
# Add the nest names if any exist
if model_obj.nest_names is not None:
all_names = model_obj.nest_names + all_names # depends on [control=['if'], data=[]]
return all_names |
def implicit_step(self):
"""
Integrate one step using trapezoidal method. Sets convergence and niter flags.
Returns
-------
None
"""
config = self.config
system = self.system
dae = self.system.dae
# constant short names
In = spdiag([1] * dae.n)
h = self.h
while self.err > config.tol and self.niter < config.maxit:
if self.t - self.t_jac >= 5:
dae.rebuild = True
self.t_jac = self.t
elif self.niter > 4:
dae.rebuild = True
elif dae.factorize:
dae.rebuild = True
# rebuild Jacobian
if dae.rebuild:
exec(system.call.int)
dae.rebuild = False
else:
exec(system.call.int_fg)
# complete Jacobian matrix dae.Ac
if config.method == 'euler':
dae.Ac = sparse(
[[In - h * dae.Fx, dae.Gx], [-h * dae.Fy, dae.Gy]],
'd')
dae.q = dae.x - self.x0 - h * dae.f
elif config.method == 'trapezoidal':
dae.Ac = sparse([[In - h * 0.5 * dae.Fx, dae.Gx],
[-h * 0.5 * dae.Fy, dae.Gy]], 'd')
dae.q = dae.x - self.x0 - h * 0.5 * (dae.f + self.f0)
# windup limiters
dae.reset_Ac()
if dae.factorize:
self.F = self.solver.symbolic(dae.Ac)
dae.factorize = False
self.inc = -matrix([dae.q, dae.g])
try:
N = self.solver.numeric(dae.Ac, self.F)
self.solver.solve(dae.Ac, self.F, N, self.inc)
except ArithmeticError:
logger.error('Singular matrix')
dae.check_diag(dae.Gy, 'unamey')
dae.check_diag(dae.Fx, 'unamex')
# force quit
self.niter = config.maxit + 1
break
except ValueError:
logger.warning('Unexpected symbolic factorization')
dae.factorize = True
continue
else:
inc_x = self.inc[:dae.n]
inc_y = self.inc[dae.n:dae.m + dae.n]
dae.x += inc_x
dae.y += inc_y
self.err = max(abs(self.inc))
if np.isnan(self.inc).any():
logger.error('Iteration error: NaN detected.')
self.niter = config.maxit + 1
break
self.niter += 1
if self.niter <= config.maxit:
self.convergence = True | def function[implicit_step, parameter[self]]:
constant[
Integrate one step using trapezoidal method. Sets convergence and niter flags.
Returns
-------
None
]
variable[config] assign[=] name[self].config
variable[system] assign[=] name[self].system
variable[dae] assign[=] name[self].system.dae
variable[In] assign[=] call[name[spdiag], parameter[binary_operation[list[[<ast.Constant object at 0x7da20c7966e0>]] * name[dae].n]]]
variable[h] assign[=] name[self].h
while <ast.BoolOp object at 0x7da20c794370> begin[:]
if compare[binary_operation[name[self].t - name[self].t_jac] greater_or_equal[>=] constant[5]] begin[:]
name[dae].rebuild assign[=] constant[True]
name[self].t_jac assign[=] name[self].t
if name[dae].rebuild begin[:]
call[name[exec], parameter[name[system].call.int]]
name[dae].rebuild assign[=] constant[False]
if compare[name[config].method equal[==] constant[euler]] begin[:]
name[dae].Ac assign[=] call[name[sparse], parameter[list[[<ast.List object at 0x7da20c7959f0>, <ast.List object at 0x7da20c795c30>]], constant[d]]]
name[dae].q assign[=] binary_operation[binary_operation[name[dae].x - name[self].x0] - binary_operation[name[h] * name[dae].f]]
call[name[dae].reset_Ac, parameter[]]
if name[dae].factorize begin[:]
name[self].F assign[=] call[name[self].solver.symbolic, parameter[name[dae].Ac]]
name[dae].factorize assign[=] constant[False]
name[self].inc assign[=] <ast.UnaryOp object at 0x7da20ed4a8f0>
<ast.Try object at 0x7da1b02d9480>
name[self].err assign[=] call[name[max], parameter[call[name[abs], parameter[name[self].inc]]]]
if call[call[name[np].isnan, parameter[name[self].inc]].any, parameter[]] begin[:]
call[name[logger].error, parameter[constant[Iteration error: NaN detected.]]]
name[self].niter assign[=] binary_operation[name[config].maxit + constant[1]]
break
<ast.AugAssign object at 0x7da1b2346500>
if compare[name[self].niter less_or_equal[<=] name[config].maxit] begin[:]
name[self].convergence assign[=] constant[True] | keyword[def] identifier[implicit_step] ( identifier[self] ):
literal[string]
identifier[config] = identifier[self] . identifier[config]
identifier[system] = identifier[self] . identifier[system]
identifier[dae] = identifier[self] . identifier[system] . identifier[dae]
identifier[In] = identifier[spdiag] ([ literal[int] ]* identifier[dae] . identifier[n] )
identifier[h] = identifier[self] . identifier[h]
keyword[while] identifier[self] . identifier[err] > identifier[config] . identifier[tol] keyword[and] identifier[self] . identifier[niter] < identifier[config] . identifier[maxit] :
keyword[if] identifier[self] . identifier[t] - identifier[self] . identifier[t_jac] >= literal[int] :
identifier[dae] . identifier[rebuild] = keyword[True]
identifier[self] . identifier[t_jac] = identifier[self] . identifier[t]
keyword[elif] identifier[self] . identifier[niter] > literal[int] :
identifier[dae] . identifier[rebuild] = keyword[True]
keyword[elif] identifier[dae] . identifier[factorize] :
identifier[dae] . identifier[rebuild] = keyword[True]
keyword[if] identifier[dae] . identifier[rebuild] :
identifier[exec] ( identifier[system] . identifier[call] . identifier[int] )
identifier[dae] . identifier[rebuild] = keyword[False]
keyword[else] :
identifier[exec] ( identifier[system] . identifier[call] . identifier[int_fg] )
keyword[if] identifier[config] . identifier[method] == literal[string] :
identifier[dae] . identifier[Ac] = identifier[sparse] (
[[ identifier[In] - identifier[h] * identifier[dae] . identifier[Fx] , identifier[dae] . identifier[Gx] ],[- identifier[h] * identifier[dae] . identifier[Fy] , identifier[dae] . identifier[Gy] ]],
literal[string] )
identifier[dae] . identifier[q] = identifier[dae] . identifier[x] - identifier[self] . identifier[x0] - identifier[h] * identifier[dae] . identifier[f]
keyword[elif] identifier[config] . identifier[method] == literal[string] :
identifier[dae] . identifier[Ac] = identifier[sparse] ([[ identifier[In] - identifier[h] * literal[int] * identifier[dae] . identifier[Fx] , identifier[dae] . identifier[Gx] ],
[- identifier[h] * literal[int] * identifier[dae] . identifier[Fy] , identifier[dae] . identifier[Gy] ]], literal[string] )
identifier[dae] . identifier[q] = identifier[dae] . identifier[x] - identifier[self] . identifier[x0] - identifier[h] * literal[int] *( identifier[dae] . identifier[f] + identifier[self] . identifier[f0] )
identifier[dae] . identifier[reset_Ac] ()
keyword[if] identifier[dae] . identifier[factorize] :
identifier[self] . identifier[F] = identifier[self] . identifier[solver] . identifier[symbolic] ( identifier[dae] . identifier[Ac] )
identifier[dae] . identifier[factorize] = keyword[False]
identifier[self] . identifier[inc] =- identifier[matrix] ([ identifier[dae] . identifier[q] , identifier[dae] . identifier[g] ])
keyword[try] :
identifier[N] = identifier[self] . identifier[solver] . identifier[numeric] ( identifier[dae] . identifier[Ac] , identifier[self] . identifier[F] )
identifier[self] . identifier[solver] . identifier[solve] ( identifier[dae] . identifier[Ac] , identifier[self] . identifier[F] , identifier[N] , identifier[self] . identifier[inc] )
keyword[except] identifier[ArithmeticError] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[dae] . identifier[check_diag] ( identifier[dae] . identifier[Gy] , literal[string] )
identifier[dae] . identifier[check_diag] ( identifier[dae] . identifier[Fx] , literal[string] )
identifier[self] . identifier[niter] = identifier[config] . identifier[maxit] + literal[int]
keyword[break]
keyword[except] identifier[ValueError] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[dae] . identifier[factorize] = keyword[True]
keyword[continue]
keyword[else] :
identifier[inc_x] = identifier[self] . identifier[inc] [: identifier[dae] . identifier[n] ]
identifier[inc_y] = identifier[self] . identifier[inc] [ identifier[dae] . identifier[n] : identifier[dae] . identifier[m] + identifier[dae] . identifier[n] ]
identifier[dae] . identifier[x] += identifier[inc_x]
identifier[dae] . identifier[y] += identifier[inc_y]
identifier[self] . identifier[err] = identifier[max] ( identifier[abs] ( identifier[self] . identifier[inc] ))
keyword[if] identifier[np] . identifier[isnan] ( identifier[self] . identifier[inc] ). identifier[any] ():
identifier[logger] . identifier[error] ( literal[string] )
identifier[self] . identifier[niter] = identifier[config] . identifier[maxit] + literal[int]
keyword[break]
identifier[self] . identifier[niter] += literal[int]
keyword[if] identifier[self] . identifier[niter] <= identifier[config] . identifier[maxit] :
identifier[self] . identifier[convergence] = keyword[True] | def implicit_step(self):
"""
Integrate one step using trapezoidal method. Sets convergence and niter flags.
Returns
-------
None
"""
config = self.config
system = self.system
dae = self.system.dae
# constant short names
In = spdiag([1] * dae.n)
h = self.h
while self.err > config.tol and self.niter < config.maxit:
if self.t - self.t_jac >= 5:
dae.rebuild = True
self.t_jac = self.t # depends on [control=['if'], data=[]]
elif self.niter > 4:
dae.rebuild = True # depends on [control=['if'], data=[]]
elif dae.factorize:
dae.rebuild = True # depends on [control=['if'], data=[]]
# rebuild Jacobian
if dae.rebuild:
exec(system.call.int)
dae.rebuild = False # depends on [control=['if'], data=[]]
else:
exec(system.call.int_fg)
# complete Jacobian matrix dae.Ac
if config.method == 'euler':
dae.Ac = sparse([[In - h * dae.Fx, dae.Gx], [-h * dae.Fy, dae.Gy]], 'd')
dae.q = dae.x - self.x0 - h * dae.f # depends on [control=['if'], data=[]]
elif config.method == 'trapezoidal':
dae.Ac = sparse([[In - h * 0.5 * dae.Fx, dae.Gx], [-h * 0.5 * dae.Fy, dae.Gy]], 'd')
dae.q = dae.x - self.x0 - h * 0.5 * (dae.f + self.f0) # depends on [control=['if'], data=[]]
# windup limiters
dae.reset_Ac()
if dae.factorize:
self.F = self.solver.symbolic(dae.Ac)
dae.factorize = False # depends on [control=['if'], data=[]]
self.inc = -matrix([dae.q, dae.g])
try:
N = self.solver.numeric(dae.Ac, self.F)
self.solver.solve(dae.Ac, self.F, N, self.inc) # depends on [control=['try'], data=[]]
except ArithmeticError:
logger.error('Singular matrix')
dae.check_diag(dae.Gy, 'unamey')
dae.check_diag(dae.Fx, 'unamex')
# force quit
self.niter = config.maxit + 1
break # depends on [control=['except'], data=[]]
except ValueError:
logger.warning('Unexpected symbolic factorization')
dae.factorize = True
continue # depends on [control=['except'], data=[]]
else:
inc_x = self.inc[:dae.n]
inc_y = self.inc[dae.n:dae.m + dae.n]
dae.x += inc_x
dae.y += inc_y
self.err = max(abs(self.inc))
if np.isnan(self.inc).any():
logger.error('Iteration error: NaN detected.')
self.niter = config.maxit + 1
break # depends on [control=['if'], data=[]]
self.niter += 1 # depends on [control=['while'], data=[]]
if self.niter <= config.maxit:
self.convergence = True # depends on [control=['if'], data=[]] |
def copy_to_local(local_path, remote_name, remote_path, demote=True):
"""Copy a path from inside a Dusty container to a path on the
local filesystem. The path on the local filesystem must be
wrist-accessible by the user specified in mac_username."""
if not container_path_exists(remote_name, remote_path):
raise RuntimeError('ERROR: Path {} does not exist inside container {}.'.format(remote_path, remote_name))
temp_identifier = str(uuid.uuid1())
copy_path_inside_container(remote_name, remote_path, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier))
vm_path = os.path.join(vm_cp_path(remote_name), temp_identifier)
is_dir = vm_path_is_directory(vm_path)
sync_local_path_from_vm(local_path, vm_path, demote=demote, is_dir=is_dir) | def function[copy_to_local, parameter[local_path, remote_name, remote_path, demote]]:
constant[Copy a path from inside a Dusty container to a path on the
local filesystem. The path on the local filesystem must be
wrist-accessible by the user specified in mac_username.]
if <ast.UnaryOp object at 0x7da207f99090> begin[:]
<ast.Raise object at 0x7da207f9bfa0>
variable[temp_identifier] assign[=] call[name[str], parameter[call[name[uuid].uuid1, parameter[]]]]
call[name[copy_path_inside_container], parameter[name[remote_name], name[remote_path], call[name[os].path.join, parameter[name[constants].CONTAINER_CP_DIR, name[temp_identifier]]]]]
variable[vm_path] assign[=] call[name[os].path.join, parameter[call[name[vm_cp_path], parameter[name[remote_name]]], name[temp_identifier]]]
variable[is_dir] assign[=] call[name[vm_path_is_directory], parameter[name[vm_path]]]
call[name[sync_local_path_from_vm], parameter[name[local_path], name[vm_path]]] | keyword[def] identifier[copy_to_local] ( identifier[local_path] , identifier[remote_name] , identifier[remote_path] , identifier[demote] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[container_path_exists] ( identifier[remote_name] , identifier[remote_path] ):
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[remote_path] , identifier[remote_name] ))
identifier[temp_identifier] = identifier[str] ( identifier[uuid] . identifier[uuid1] ())
identifier[copy_path_inside_container] ( identifier[remote_name] , identifier[remote_path] , identifier[os] . identifier[path] . identifier[join] ( identifier[constants] . identifier[CONTAINER_CP_DIR] , identifier[temp_identifier] ))
identifier[vm_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[vm_cp_path] ( identifier[remote_name] ), identifier[temp_identifier] )
identifier[is_dir] = identifier[vm_path_is_directory] ( identifier[vm_path] )
identifier[sync_local_path_from_vm] ( identifier[local_path] , identifier[vm_path] , identifier[demote] = identifier[demote] , identifier[is_dir] = identifier[is_dir] ) | def copy_to_local(local_path, remote_name, remote_path, demote=True):
"""Copy a path from inside a Dusty container to a path on the
local filesystem. The path on the local filesystem must be
wrist-accessible by the user specified in mac_username."""
if not container_path_exists(remote_name, remote_path):
raise RuntimeError('ERROR: Path {} does not exist inside container {}.'.format(remote_path, remote_name)) # depends on [control=['if'], data=[]]
temp_identifier = str(uuid.uuid1())
copy_path_inside_container(remote_name, remote_path, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier))
vm_path = os.path.join(vm_cp_path(remote_name), temp_identifier)
is_dir = vm_path_is_directory(vm_path)
sync_local_path_from_vm(local_path, vm_path, demote=demote, is_dir=is_dir) |
def rt_update_module(self, xmldict, module):
"""Updates the members, executables and types in the specified module
to have the latest docstring information from the xmldict.
"""
#This keeps track of how many character were added/removed by
#updating the docstrings in xmldict.
delta = 0
for kdecor in xmldict:
if "." in kdecor:
modname, memname = kdecor.split(".")
else:
modname, memname = module.name, None
if module.name == modname:
#This tag is relevant to the specified module. Continue
xlist, docstart, docend = xmldict[kdecor]
#We only need to check the members, types and executables
#For executables and types, we need to update the docstart and
#docend attributes since their docstrings must come as a single
#block immediately preceding the signature, so that our values
#from the updater will be correct.
if memname in module.types:
member = module.types[memname]
docs = self.to_doc(xlist, memname)
member.docstring = docs
delta += self._rt_update_docindices(member, docstart, docend)
elif memname in module.executables:
member = module.executables[memname]
docs = self.to_doc(xlist, memname)
self.process_execdocs(docs, member, kdecor, False)
delta += self._rt_update_docindices(member, docstart, docend)
else:
#Since it didn't point to anything else, it must be for the
#members of the module.
docs = self.to_doc(xlist, modname)
self.process_memberdocs(docs, module, False)
return delta | def function[rt_update_module, parameter[self, xmldict, module]]:
constant[Updates the members, executables and types in the specified module
to have the latest docstring information from the xmldict.
]
variable[delta] assign[=] constant[0]
for taget[name[kdecor]] in starred[name[xmldict]] begin[:]
if compare[constant[.] in name[kdecor]] begin[:]
<ast.Tuple object at 0x7da20c6a8250> assign[=] call[name[kdecor].split, parameter[constant[.]]]
if compare[name[module].name equal[==] name[modname]] begin[:]
<ast.Tuple object at 0x7da20c6aa1a0> assign[=] call[name[xmldict]][name[kdecor]]
if compare[name[memname] in name[module].types] begin[:]
variable[member] assign[=] call[name[module].types][name[memname]]
variable[docs] assign[=] call[name[self].to_doc, parameter[name[xlist], name[memname]]]
name[member].docstring assign[=] name[docs]
<ast.AugAssign object at 0x7da20c6a8310>
return[name[delta]] | keyword[def] identifier[rt_update_module] ( identifier[self] , identifier[xmldict] , identifier[module] ):
literal[string]
identifier[delta] = literal[int]
keyword[for] identifier[kdecor] keyword[in] identifier[xmldict] :
keyword[if] literal[string] keyword[in] identifier[kdecor] :
identifier[modname] , identifier[memname] = identifier[kdecor] . identifier[split] ( literal[string] )
keyword[else] :
identifier[modname] , identifier[memname] = identifier[module] . identifier[name] , keyword[None]
keyword[if] identifier[module] . identifier[name] == identifier[modname] :
identifier[xlist] , identifier[docstart] , identifier[docend] = identifier[xmldict] [ identifier[kdecor] ]
keyword[if] identifier[memname] keyword[in] identifier[module] . identifier[types] :
identifier[member] = identifier[module] . identifier[types] [ identifier[memname] ]
identifier[docs] = identifier[self] . identifier[to_doc] ( identifier[xlist] , identifier[memname] )
identifier[member] . identifier[docstring] = identifier[docs]
identifier[delta] += identifier[self] . identifier[_rt_update_docindices] ( identifier[member] , identifier[docstart] , identifier[docend] )
keyword[elif] identifier[memname] keyword[in] identifier[module] . identifier[executables] :
identifier[member] = identifier[module] . identifier[executables] [ identifier[memname] ]
identifier[docs] = identifier[self] . identifier[to_doc] ( identifier[xlist] , identifier[memname] )
identifier[self] . identifier[process_execdocs] ( identifier[docs] , identifier[member] , identifier[kdecor] , keyword[False] )
identifier[delta] += identifier[self] . identifier[_rt_update_docindices] ( identifier[member] , identifier[docstart] , identifier[docend] )
keyword[else] :
identifier[docs] = identifier[self] . identifier[to_doc] ( identifier[xlist] , identifier[modname] )
identifier[self] . identifier[process_memberdocs] ( identifier[docs] , identifier[module] , keyword[False] )
keyword[return] identifier[delta] | def rt_update_module(self, xmldict, module):
"""Updates the members, executables and types in the specified module
to have the latest docstring information from the xmldict.
"""
#This keeps track of how many character were added/removed by
#updating the docstrings in xmldict.
delta = 0
for kdecor in xmldict:
if '.' in kdecor:
(modname, memname) = kdecor.split('.') # depends on [control=['if'], data=['kdecor']]
else:
(modname, memname) = (module.name, None)
if module.name == modname:
#This tag is relevant to the specified module. Continue
(xlist, docstart, docend) = xmldict[kdecor]
#We only need to check the members, types and executables
#For executables and types, we need to update the docstart and
#docend attributes since their docstrings must come as a single
#block immediately preceding the signature, so that our values
#from the updater will be correct.
if memname in module.types:
member = module.types[memname]
docs = self.to_doc(xlist, memname)
member.docstring = docs
delta += self._rt_update_docindices(member, docstart, docend) # depends on [control=['if'], data=['memname']]
elif memname in module.executables:
member = module.executables[memname]
docs = self.to_doc(xlist, memname)
self.process_execdocs(docs, member, kdecor, False)
delta += self._rt_update_docindices(member, docstart, docend) # depends on [control=['if'], data=['memname']]
else:
#Since it didn't point to anything else, it must be for the
#members of the module.
docs = self.to_doc(xlist, modname)
self.process_memberdocs(docs, module, False) # depends on [control=['if'], data=['modname']] # depends on [control=['for'], data=['kdecor']]
return delta |
def connect(self, interface, event, handler):
"""Connect to a DBus signal. Returns subscription id (int)."""
object_path = self.object_path
return self.bus.connect(interface, event, object_path, handler) | def function[connect, parameter[self, interface, event, handler]]:
constant[Connect to a DBus signal. Returns subscription id (int).]
variable[object_path] assign[=] name[self].object_path
return[call[name[self].bus.connect, parameter[name[interface], name[event], name[object_path], name[handler]]]] | keyword[def] identifier[connect] ( identifier[self] , identifier[interface] , identifier[event] , identifier[handler] ):
literal[string]
identifier[object_path] = identifier[self] . identifier[object_path]
keyword[return] identifier[self] . identifier[bus] . identifier[connect] ( identifier[interface] , identifier[event] , identifier[object_path] , identifier[handler] ) | def connect(self, interface, event, handler):
"""Connect to a DBus signal. Returns subscription id (int)."""
object_path = self.object_path
return self.bus.connect(interface, event, object_path, handler) |
def updateDelegates(self):
"""reset all delegates"""
for index, column in enumerate(self.tableView.model().dataFrame().columns):
dtype = self.tableView.model().dataFrame()[column].dtype
self.updateDelegate(index, dtype) | def function[updateDelegates, parameter[self]]:
constant[reset all delegates]
for taget[tuple[[<ast.Name object at 0x7da1b077b0d0>, <ast.Name object at 0x7da1b077a500>]]] in starred[call[name[enumerate], parameter[call[call[name[self].tableView.model, parameter[]].dataFrame, parameter[]].columns]]] begin[:]
variable[dtype] assign[=] call[call[call[name[self].tableView.model, parameter[]].dataFrame, parameter[]]][name[column]].dtype
call[name[self].updateDelegate, parameter[name[index], name[dtype]]] | keyword[def] identifier[updateDelegates] ( identifier[self] ):
literal[string]
keyword[for] identifier[index] , identifier[column] keyword[in] identifier[enumerate] ( identifier[self] . identifier[tableView] . identifier[model] (). identifier[dataFrame] (). identifier[columns] ):
identifier[dtype] = identifier[self] . identifier[tableView] . identifier[model] (). identifier[dataFrame] ()[ identifier[column] ]. identifier[dtype]
identifier[self] . identifier[updateDelegate] ( identifier[index] , identifier[dtype] ) | def updateDelegates(self):
"""reset all delegates"""
for (index, column) in enumerate(self.tableView.model().dataFrame().columns):
dtype = self.tableView.model().dataFrame()[column].dtype
self.updateDelegate(index, dtype) # depends on [control=['for'], data=[]] |
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off != None:
idx = self.off_to_pos(off)
return [i for i in self.get_instructions()][idx] | def function[get_instruction, parameter[self, idx, off]]:
constant[
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
]
if compare[name[off] not_equal[!=] constant[None]] begin[:]
variable[idx] assign[=] call[name[self].off_to_pos, parameter[name[off]]]
return[call[<ast.ListComp object at 0x7da1b0a4a410>][name[idx]]] | keyword[def] identifier[get_instruction] ( identifier[self] , identifier[idx] , identifier[off] = keyword[None] ):
literal[string]
keyword[if] identifier[off] != keyword[None] :
identifier[idx] = identifier[self] . identifier[off_to_pos] ( identifier[off] )
keyword[return] [ identifier[i] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[get_instructions] ()][ identifier[idx] ] | def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off != None:
idx = self.off_to_pos(off) # depends on [control=['if'], data=['off']]
return [i for i in self.get_instructions()][idx] |
def merge(self, samples_uuid):
"""
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
"""
all_meta_data = pd.DataFrame()
for dm in self.data_model:
all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0)
group = all_meta_data.groupby([samples_uuid])['sample']
sample_sets = group.apply(list).values
merged_df = pd.DataFrame()
multi_index = list(map(list, zip(*sample_sets)))
multi_index_names = list(range(0, len(sample_sets[0])))
i = 1
for pair in sample_sets:
i += 1
numbers = list(range(0, len(pair)))
df_temp = pd.DataFrame()
for n in numbers:
try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False)
df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1)
except:
pass
merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0)
multi_index = np.asarray(multi_index)
multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names)
merged_df.index = multi_index
return merged_df | def function[merge, parameter[self, samples_uuid]]:
constant[
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
]
variable[all_meta_data] assign[=] call[name[pd].DataFrame, parameter[]]
for taget[name[dm]] in starred[name[self].data_model] begin[:]
variable[all_meta_data] assign[=] call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da1b1adeb60>, <ast.Attribute object at 0x7da1b1add2d0>]]]]
variable[group] assign[=] call[call[name[all_meta_data].groupby, parameter[list[[<ast.Name object at 0x7da1b1a2cb80>]]]]][constant[sample]]
variable[sample_sets] assign[=] call[name[group].apply, parameter[name[list]]].values
variable[merged_df] assign[=] call[name[pd].DataFrame, parameter[]]
variable[multi_index] assign[=] call[name[list], parameter[call[name[map], parameter[name[list], call[name[zip], parameter[<ast.Starred object at 0x7da1b1a2d360>]]]]]]
variable[multi_index_names] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], call[name[len], parameter[call[name[sample_sets]][constant[0]]]]]]]]
variable[i] assign[=] constant[1]
for taget[name[pair]] in starred[name[sample_sets]] begin[:]
<ast.AugAssign object at 0x7da1b1a2ccd0>
variable[numbers] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], call[name[len], parameter[name[pair]]]]]]]
variable[df_temp] assign[=] call[name[pd].DataFrame, parameter[]]
for taget[name[n]] in starred[name[numbers]] begin[:]
<ast.Try object at 0x7da1b1a2fbe0>
variable[merged_df] assign[=] call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da1b195e5c0>, <ast.Subscript object at 0x7da1b195e650>]]]]
variable[multi_index] assign[=] call[name[np].asarray, parameter[name[multi_index]]]
variable[multi_index] assign[=] call[name[pd].MultiIndex.from_arrays, parameter[name[multi_index]]]
name[merged_df].index assign[=] name[multi_index]
return[name[merged_df]] | keyword[def] identifier[merge] ( identifier[self] , identifier[samples_uuid] ):
literal[string]
identifier[all_meta_data] = identifier[pd] . identifier[DataFrame] ()
keyword[for] identifier[dm] keyword[in] identifier[self] . identifier[data_model] :
identifier[all_meta_data] = identifier[pd] . identifier[concat] ([ identifier[all_meta_data] , identifier[dm] . identifier[meta] ], identifier[axis] = literal[int] )
identifier[group] = identifier[all_meta_data] . identifier[groupby] ([ identifier[samples_uuid] ])[ literal[string] ]
identifier[sample_sets] = identifier[group] . identifier[apply] ( identifier[list] ). identifier[values]
identifier[merged_df] = identifier[pd] . identifier[DataFrame] ()
identifier[multi_index] = identifier[list] ( identifier[map] ( identifier[list] , identifier[zip] (* identifier[sample_sets] )))
identifier[multi_index_names] = identifier[list] ( identifier[range] ( literal[int] , identifier[len] ( identifier[sample_sets] [ literal[int] ])))
identifier[i] = literal[int]
keyword[for] identifier[pair] keyword[in] identifier[sample_sets] :
identifier[i] += literal[int]
identifier[numbers] = identifier[list] ( identifier[range] ( literal[int] , identifier[len] ( identifier[pair] )))
identifier[df_temp] = identifier[pd] . identifier[DataFrame] ()
keyword[for] identifier[n] keyword[in] identifier[numbers] :
keyword[try] :
identifier[df_temp] = identifier[pd] . identifier[concat] ([ identifier[df_temp] , identifier[self] . identifier[data_model] [ identifier[n] ]. identifier[data] . identifier[loc] [ identifier[pair] [ identifier[n] ]]], identifier[axis] = literal[int] )
keyword[except] :
keyword[pass]
identifier[merged_df] = identifier[pd] . identifier[concat] ([ identifier[merged_df] , identifier[df_temp] . identifier[T] . identifier[bfill] (). identifier[iloc] [[ literal[int] ]]], identifier[axis] = literal[int] )
identifier[multi_index] = identifier[np] . identifier[asarray] ( identifier[multi_index] )
identifier[multi_index] = identifier[pd] . identifier[MultiIndex] . identifier[from_arrays] ( identifier[multi_index] , identifier[names] = identifier[multi_index_names] )
identifier[merged_df] . identifier[index] = identifier[multi_index]
keyword[return] identifier[merged_df] | def merge(self, samples_uuid):
"""
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
"""
all_meta_data = pd.DataFrame()
for dm in self.data_model:
all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0) # depends on [control=['for'], data=['dm']]
group = all_meta_data.groupby([samples_uuid])['sample']
sample_sets = group.apply(list).values
merged_df = pd.DataFrame()
multi_index = list(map(list, zip(*sample_sets)))
multi_index_names = list(range(0, len(sample_sets[0])))
i = 1
for pair in sample_sets:
i += 1
numbers = list(range(0, len(pair)))
df_temp = pd.DataFrame()
for n in numbers:
try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False)
df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['n']]
merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0) # depends on [control=['for'], data=['pair']]
multi_index = np.asarray(multi_index)
multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names)
merged_df.index = multi_index
return merged_df |
def channels_voice_agent_user_display_create(self, agent_id, user_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/voice-api/partner_edition#open-a-users-profile-in-an-agents-browser"
api_path = "/api/v2/channels/voice/agents/{agent_id}/users/{user_id}/display.json"
api_path = api_path.format(agent_id=agent_id, user_id=user_id)
return self.call(api_path, method="POST", data=data, **kwargs) | def function[channels_voice_agent_user_display_create, parameter[self, agent_id, user_id, data]]:
constant[https://developer.zendesk.com/rest_api/docs/voice-api/partner_edition#open-a-users-profile-in-an-agents-browser]
variable[api_path] assign[=] constant[/api/v2/channels/voice/agents/{agent_id}/users/{user_id}/display.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[channels_voice_agent_user_display_create] ( identifier[self] , identifier[agent_id] , identifier[user_id] , identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[agent_id] = identifier[agent_id] , identifier[user_id] = identifier[user_id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def channels_voice_agent_user_display_create(self, agent_id, user_id, data, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/voice-api/partner_edition#open-a-users-profile-in-an-agents-browser"""
api_path = '/api/v2/channels/voice/agents/{agent_id}/users/{user_id}/display.json'
api_path = api_path.format(agent_id=agent_id, user_id=user_id)
return self.call(api_path, method='POST', data=data, **kwargs) |
def list_apps(self, cmd=None, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False, app_id=None, label=None, **kwargs):
"""List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
"""
params = {}
if cmd:
params['cmd'] = cmd
if app_id:
params['id'] = app_id
if label:
params['label'] = label
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request('GET', '/v2/apps', params=params)
apps = self._parse_response(
response, MarathonApp, is_list=True, resource_name='apps')
for k, v in kwargs.items():
apps = [o for o in apps if getattr(o, k) == v]
return apps | def function[list_apps, parameter[self, cmd, embed_tasks, embed_counts, embed_deployments, embed_readiness, embed_last_task_failure, embed_failures, embed_task_stats, app_id, label]]:
constant[List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
]
variable[params] assign[=] dictionary[[], []]
if name[cmd] begin[:]
call[name[params]][constant[cmd]] assign[=] name[cmd]
if name[app_id] begin[:]
call[name[params]][constant[id]] assign[=] name[app_id]
if name[label] begin[:]
call[name[params]][constant[label]] assign[=] name[label]
variable[embed_params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f58130>, <ast.Constant object at 0x7da1b0f5ba00>, <ast.Constant object at 0x7da1b0f5bca0>, <ast.Constant object at 0x7da1b0f59b70>, <ast.Constant object at 0x7da1b0f5a9e0>, <ast.Constant object at 0x7da1b0f59690>, <ast.Constant object at 0x7da1b0f599c0>], [<ast.Name object at 0x7da1b0f5ab00>, <ast.Name object at 0x7da1b0f5aa40>, <ast.Name object at 0x7da1b0f5ab60>, <ast.Name object at 0x7da1b0f58160>, <ast.Name object at 0x7da1b0f5b6d0>, <ast.Name object at 0x7da1b0f5ae00>, <ast.Name object at 0x7da1b0f5ad70>]]
variable[filtered_embed_params] assign[=] <ast.ListComp object at 0x7da1b0f5ac80>
if name[filtered_embed_params] begin[:]
call[name[params]][constant[embed]] assign[=] name[filtered_embed_params]
variable[response] assign[=] call[name[self]._do_request, parameter[constant[GET], constant[/v2/apps]]]
variable[apps] assign[=] call[name[self]._parse_response, parameter[name[response], name[MarathonApp]]]
for taget[tuple[[<ast.Name object at 0x7da1b0f5a770>, <ast.Name object at 0x7da1b0f5b430>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
variable[apps] assign[=] <ast.ListComp object at 0x7da1b0f5aef0>
return[name[apps]] | keyword[def] identifier[list_apps] ( identifier[self] , identifier[cmd] = keyword[None] , identifier[embed_tasks] = keyword[False] , identifier[embed_counts] = keyword[False] ,
identifier[embed_deployments] = keyword[False] , identifier[embed_readiness] = keyword[False] ,
identifier[embed_last_task_failure] = keyword[False] , identifier[embed_failures] = keyword[False] ,
identifier[embed_task_stats] = keyword[False] , identifier[app_id] = keyword[None] , identifier[label] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[cmd] :
identifier[params] [ literal[string] ]= identifier[cmd]
keyword[if] identifier[app_id] :
identifier[params] [ literal[string] ]= identifier[app_id]
keyword[if] identifier[label] :
identifier[params] [ literal[string] ]= identifier[label]
identifier[embed_params] ={
literal[string] : identifier[embed_tasks] ,
literal[string] : identifier[embed_counts] ,
literal[string] : identifier[embed_deployments] ,
literal[string] : identifier[embed_readiness] ,
literal[string] : identifier[embed_last_task_failure] ,
literal[string] : identifier[embed_failures] ,
literal[string] : identifier[embed_task_stats]
}
identifier[filtered_embed_params] =[ identifier[k] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[embed_params] . identifier[items] () keyword[if] identifier[v] ]
keyword[if] identifier[filtered_embed_params] :
identifier[params] [ literal[string] ]= identifier[filtered_embed_params]
identifier[response] = identifier[self] . identifier[_do_request] ( literal[string] , literal[string] , identifier[params] = identifier[params] )
identifier[apps] = identifier[self] . identifier[_parse_response] (
identifier[response] , identifier[MarathonApp] , identifier[is_list] = keyword[True] , identifier[resource_name] = literal[string] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[apps] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[apps] keyword[if] identifier[getattr] ( identifier[o] , identifier[k] )== identifier[v] ]
keyword[return] identifier[apps] | def list_apps(self, cmd=None, embed_tasks=False, embed_counts=False, embed_deployments=False, embed_readiness=False, embed_last_task_failure=False, embed_failures=False, embed_task_stats=False, app_id=None, label=None, **kwargs):
"""List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
"""
params = {}
if cmd:
params['cmd'] = cmd # depends on [control=['if'], data=[]]
if app_id:
params['id'] = app_id # depends on [control=['if'], data=[]]
if label:
params['label'] = label # depends on [control=['if'], data=[]]
embed_params = {'app.tasks': embed_tasks, 'app.counts': embed_counts, 'app.deployments': embed_deployments, 'app.readiness': embed_readiness, 'app.lastTaskFailure': embed_last_task_failure, 'app.failures': embed_failures, 'app.taskStats': embed_task_stats}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params # depends on [control=['if'], data=[]]
response = self._do_request('GET', '/v2/apps', params=params)
apps = self._parse_response(response, MarathonApp, is_list=True, resource_name='apps')
for (k, v) in kwargs.items():
apps = [o for o in apps if getattr(o, k) == v] # depends on [control=['for'], data=[]]
return apps |
def _handle_key_value(t_dict, key, value):
"""
Function to handle key has multi value, and return the values as list.
"""
if key in t_dict:
val = t_dict[key]
if isinstance(val, str):
val = [val]
val.append(value)
return val
return value | def function[_handle_key_value, parameter[t_dict, key, value]]:
constant[
Function to handle key has multi value, and return the values as list.
]
if compare[name[key] in name[t_dict]] begin[:]
variable[val] assign[=] call[name[t_dict]][name[key]]
if call[name[isinstance], parameter[name[val], name[str]]] begin[:]
variable[val] assign[=] list[[<ast.Name object at 0x7da20c992e00>]]
call[name[val].append, parameter[name[value]]]
return[name[val]]
return[name[value]] | keyword[def] identifier[_handle_key_value] ( identifier[t_dict] , identifier[key] , identifier[value] ):
literal[string]
keyword[if] identifier[key] keyword[in] identifier[t_dict] :
identifier[val] = identifier[t_dict] [ identifier[key] ]
keyword[if] identifier[isinstance] ( identifier[val] , identifier[str] ):
identifier[val] =[ identifier[val] ]
identifier[val] . identifier[append] ( identifier[value] )
keyword[return] identifier[val]
keyword[return] identifier[value] | def _handle_key_value(t_dict, key, value):
"""
Function to handle key has multi value, and return the values as list.
"""
if key in t_dict:
val = t_dict[key]
if isinstance(val, str):
val = [val] # depends on [control=['if'], data=[]]
val.append(value)
return val # depends on [control=['if'], data=['key', 't_dict']]
return value |
def edit_reals(
self,
id_vip,
method_bal,
reals,
reals_prioritys,
reals_weights,
alter_priority=0):
"""Execute the script 'gerador_vips' several times with options -real, -add and -del to adjust vip request reals.
:param id_vip: Identifier of the VIP. Integer value and greater than zero.
:param method_bal: method_bal.
:param reals: List of reals. Ex: [{'real_name':'Teste1', 'real_ip':'10.10.10.1'},{'real_name':'Teste2', 'real_ip':'10.10.10.2'}]
:param reals_prioritys: List of reals_priority. Ex: ['1','5','3'].
:param reals_weights: List of reals_weight. Ex: ['1','5','3'].
:param alter_priority: 1 if priority has changed and 0 if hasn't changed.
:return: None
:raise VipNaoExisteError: Request VIP not registered.
:raise InvalidParameterError: Identifier of the request is invalid or null VIP.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise EnvironmentVipError: The combination of finality, client and environment is invalid.
:raise InvalidTimeoutValueError: The value of timeout is invalid.
:raise InvalidBalMethodValueError: The value of method_bal is invalid.
:raise InvalidCacheValueError: The value of cache is invalid.
:raise InvalidPersistenceValueError: The value of persistence is invalid.
:raise InvalidPriorityValueError: One of the priority values is invalid.
:raise EquipamentoNaoExisteError: The equipment associated with this Vip Request doesn't exist.
:raise IpEquipmentError: Association between equipment and ip of this Vip Request doesn't exist.
:raise IpError: IP not registered.
:raise RealServerPriorityError: Vip Request priority list has an error.
:raise RealServerWeightError: Vip Request weight list has an error.
:raise RealServerPortError: Vip Request port list has an error.
:raise RealParameterValueError: Vip Request real server parameter list has an error.
:raise RealServerScriptError: Vip Request real server script execution error.
"""
if not is_valid_int_param(id_vip):
raise InvalidParameterError(
u'The identifier of vip is invalid or was not informed.')
vip_map = dict()
vip_map['vip_id'] = id_vip
# vip_map['metodo_bal'] = method_bal
vip_map['reals'] = {'real': reals}
vip_map['reals_prioritys'] = {'reals_priority': reals_prioritys}
vip_map['reals_weights'] = {'reals_weight': reals_weights}
vip_map['alter_priority'] = alter_priority
url = 'vip/real/edit/'
code, xml = self.submit({'vip': vip_map}, 'PUT', url)
return self.response(code, xml) | def function[edit_reals, parameter[self, id_vip, method_bal, reals, reals_prioritys, reals_weights, alter_priority]]:
constant[Execute the script 'gerador_vips' several times with options -real, -add and -del to adjust vip request reals.
:param id_vip: Identifier of the VIP. Integer value and greater than zero.
:param method_bal: method_bal.
:param reals: List of reals. Ex: [{'real_name':'Teste1', 'real_ip':'10.10.10.1'},{'real_name':'Teste2', 'real_ip':'10.10.10.2'}]
:param reals_prioritys: List of reals_priority. Ex: ['1','5','3'].
:param reals_weights: List of reals_weight. Ex: ['1','5','3'].
:param alter_priority: 1 if priority has changed and 0 if hasn't changed.
:return: None
:raise VipNaoExisteError: Request VIP not registered.
:raise InvalidParameterError: Identifier of the request is invalid or null VIP.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise EnvironmentVipError: The combination of finality, client and environment is invalid.
:raise InvalidTimeoutValueError: The value of timeout is invalid.
:raise InvalidBalMethodValueError: The value of method_bal is invalid.
:raise InvalidCacheValueError: The value of cache is invalid.
:raise InvalidPersistenceValueError: The value of persistence is invalid.
:raise InvalidPriorityValueError: One of the priority values is invalid.
:raise EquipamentoNaoExisteError: The equipment associated with this Vip Request doesn't exist.
:raise IpEquipmentError: Association between equipment and ip of this Vip Request doesn't exist.
:raise IpError: IP not registered.
:raise RealServerPriorityError: Vip Request priority list has an error.
:raise RealServerWeightError: Vip Request weight list has an error.
:raise RealServerPortError: Vip Request port list has an error.
:raise RealParameterValueError: Vip Request real server parameter list has an error.
:raise RealServerScriptError: Vip Request real server script execution error.
]
if <ast.UnaryOp object at 0x7da2047ebb50> begin[:]
<ast.Raise object at 0x7da2047e92a0>
variable[vip_map] assign[=] call[name[dict], parameter[]]
call[name[vip_map]][constant[vip_id]] assign[=] name[id_vip]
call[name[vip_map]][constant[reals]] assign[=] dictionary[[<ast.Constant object at 0x7da2047eb760>], [<ast.Name object at 0x7da2047eac50>]]
call[name[vip_map]][constant[reals_prioritys]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346620>], [<ast.Name object at 0x7da1b23442e0>]]
call[name[vip_map]][constant[reals_weights]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344520>], [<ast.Name object at 0x7da1b2345d80>]]
call[name[vip_map]][constant[alter_priority]] assign[=] name[alter_priority]
variable[url] assign[=] constant[vip/real/edit/]
<ast.Tuple object at 0x7da1b2346950> assign[=] call[name[self].submit, parameter[dictionary[[<ast.Constant object at 0x7da1b2347010>], [<ast.Name object at 0x7da1b23456f0>]], constant[PUT], name[url]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[edit_reals] (
identifier[self] ,
identifier[id_vip] ,
identifier[method_bal] ,
identifier[reals] ,
identifier[reals_prioritys] ,
identifier[reals_weights] ,
identifier[alter_priority] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[is_valid_int_param] ( identifier[id_vip] ):
keyword[raise] identifier[InvalidParameterError] (
literal[string] )
identifier[vip_map] = identifier[dict] ()
identifier[vip_map] [ literal[string] ]= identifier[id_vip]
identifier[vip_map] [ literal[string] ]={ literal[string] : identifier[reals] }
identifier[vip_map] [ literal[string] ]={ literal[string] : identifier[reals_prioritys] }
identifier[vip_map] [ literal[string] ]={ literal[string] : identifier[reals_weights] }
identifier[vip_map] [ literal[string] ]= identifier[alter_priority]
identifier[url] = literal[string]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ({ literal[string] : identifier[vip_map] }, literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def edit_reals(self, id_vip, method_bal, reals, reals_prioritys, reals_weights, alter_priority=0):
"""Execute the script 'gerador_vips' several times with options -real, -add and -del to adjust vip request reals.
:param id_vip: Identifier of the VIP. Integer value and greater than zero.
:param method_bal: method_bal.
:param reals: List of reals. Ex: [{'real_name':'Teste1', 'real_ip':'10.10.10.1'},{'real_name':'Teste2', 'real_ip':'10.10.10.2'}]
:param reals_prioritys: List of reals_priority. Ex: ['1','5','3'].
:param reals_weights: List of reals_weight. Ex: ['1','5','3'].
:param alter_priority: 1 if priority has changed and 0 if hasn't changed.
:return: None
:raise VipNaoExisteError: Request VIP not registered.
:raise InvalidParameterError: Identifier of the request is invalid or null VIP.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise EnvironmentVipError: The combination of finality, client and environment is invalid.
:raise InvalidTimeoutValueError: The value of timeout is invalid.
:raise InvalidBalMethodValueError: The value of method_bal is invalid.
:raise InvalidCacheValueError: The value of cache is invalid.
:raise InvalidPersistenceValueError: The value of persistence is invalid.
:raise InvalidPriorityValueError: One of the priority values is invalid.
:raise EquipamentoNaoExisteError: The equipment associated with this Vip Request doesn't exist.
:raise IpEquipmentError: Association between equipment and ip of this Vip Request doesn't exist.
:raise IpError: IP not registered.
:raise RealServerPriorityError: Vip Request priority list has an error.
:raise RealServerWeightError: Vip Request weight list has an error.
:raise RealServerPortError: Vip Request port list has an error.
:raise RealParameterValueError: Vip Request real server parameter list has an error.
:raise RealServerScriptError: Vip Request real server script execution error.
"""
if not is_valid_int_param(id_vip):
raise InvalidParameterError(u'The identifier of vip is invalid or was not informed.') # depends on [control=['if'], data=[]]
vip_map = dict()
vip_map['vip_id'] = id_vip
# vip_map['metodo_bal'] = method_bal
vip_map['reals'] = {'real': reals}
vip_map['reals_prioritys'] = {'reals_priority': reals_prioritys}
vip_map['reals_weights'] = {'reals_weight': reals_weights}
vip_map['alter_priority'] = alter_priority
url = 'vip/real/edit/'
(code, xml) = self.submit({'vip': vip_map}, 'PUT', url)
return self.response(code, xml) |
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.") | def function[_execute, parameter[self, session]]:
constant[
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
]
variable[ti_status] assign[=] call[name[BackfillJob]._DagRunTaskStatus, parameter[]]
variable[start_date] assign[=] name[self].bf_start_date
variable[run_dates] assign[=] call[name[self].dag.get_run_dates, parameter[]]
if name[self].run_backwards begin[:]
variable[tasks_that_depend_on_past] assign[=] <ast.ListComp object at 0x7da1b03f80d0>
if name[tasks_that_depend_on_past] begin[:]
<ast.Raise object at 0x7da20c6c6140>
variable[run_dates] assign[=] call[name[run_dates]][<ast.Slice object at 0x7da20c6c6170>]
if compare[call[name[len], parameter[name[run_dates]]] equal[==] constant[0]] begin[:]
call[name[self].log.info, parameter[constant[No run dates were found for the given dates and dag interval.]]]
return[None]
variable[pickle_id] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20c6c5390> begin[:]
variable[pickle] assign[=] call[name[DagPickle], parameter[name[self].dag]]
call[name[session].add, parameter[name[pickle]]]
call[name[session].commit, parameter[]]
variable[pickle_id] assign[=] name[pickle].id
variable[executor] assign[=] name[self].executor
call[name[executor].start, parameter[]]
name[ti_status].total_runs assign[=] call[name[len], parameter[name[run_dates]]]
<ast.Try object at 0x7da20c6c40a0>
call[name[self].log.info, parameter[constant[Backfill done. Exiting.]]] | keyword[def] identifier[_execute] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[ti_status] = identifier[BackfillJob] . identifier[_DagRunTaskStatus] ()
identifier[start_date] = identifier[self] . identifier[bf_start_date]
identifier[run_dates] = identifier[self] . identifier[dag] . identifier[get_run_dates] ( identifier[start_date] = identifier[start_date] ,
identifier[end_date] = identifier[self] . identifier[bf_end_date] )
keyword[if] identifier[self] . identifier[run_backwards] :
identifier[tasks_that_depend_on_past] =[ identifier[t] . identifier[task_id] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[dag] . identifier[task_dict] . identifier[values] () keyword[if] identifier[t] . identifier[depends_on_past] ]
keyword[if] identifier[tasks_that_depend_on_past] :
keyword[raise] identifier[AirflowException] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[tasks_that_depend_on_past] )))
identifier[run_dates] = identifier[run_dates] [::- literal[int] ]
keyword[if] identifier[len] ( identifier[run_dates] )== literal[int] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[return]
identifier[pickle_id] = keyword[None]
keyword[if] keyword[not] identifier[self] . identifier[donot_pickle] keyword[and] identifier[self] . identifier[executor] . identifier[__class__] keyword[not] keyword[in] (
identifier[executors] . identifier[LocalExecutor] , identifier[executors] . identifier[SequentialExecutor] ):
identifier[pickle] = identifier[DagPickle] ( identifier[self] . identifier[dag] )
identifier[session] . identifier[add] ( identifier[pickle] )
identifier[session] . identifier[commit] ()
identifier[pickle_id] = identifier[pickle] . identifier[id]
identifier[executor] = identifier[self] . identifier[executor]
identifier[executor] . identifier[start] ()
identifier[ti_status] . identifier[total_runs] = identifier[len] ( identifier[run_dates] )
keyword[try] :
identifier[remaining_dates] = identifier[ti_status] . identifier[total_runs]
keyword[while] identifier[remaining_dates] > literal[int] :
identifier[dates_to_process] =[ identifier[run_date] keyword[for] identifier[run_date] keyword[in] identifier[run_dates]
keyword[if] identifier[run_date] keyword[not] keyword[in] identifier[ti_status] . identifier[executed_dag_run_dates] ]
identifier[self] . identifier[_execute_for_run_dates] ( identifier[run_dates] = identifier[dates_to_process] ,
identifier[ti_status] = identifier[ti_status] ,
identifier[executor] = identifier[executor] ,
identifier[pickle_id] = identifier[pickle_id] ,
identifier[start_date] = identifier[start_date] ,
identifier[session] = identifier[session] )
identifier[remaining_dates] =(
identifier[ti_status] . identifier[total_runs] - identifier[len] ( identifier[ti_status] . identifier[executed_dag_run_dates] )
)
identifier[err] = identifier[self] . identifier[_collect_errors] ( identifier[ti_status] = identifier[ti_status] , identifier[session] = identifier[session] )
keyword[if] identifier[err] :
keyword[raise] identifier[AirflowException] ( identifier[err] )
keyword[if] identifier[remaining_dates] > literal[int] :
identifier[self] . identifier[log] . identifier[info] (
literal[string]
literal[string] ,
identifier[self] . identifier[dag_id]
)
identifier[time] . identifier[sleep] ( identifier[self] . identifier[delay_on_limit_secs] )
keyword[except] ( identifier[KeyboardInterrupt] , identifier[SystemExit] ):
identifier[self] . identifier[log] . identifier[warning] ( literal[string] )
identifier[self] . identifier[_set_unfinished_dag_runs_to_failed] ( identifier[ti_status] . identifier[active_runs] )
keyword[finally] :
identifier[session] . identifier[commit] ()
identifier[executor] . identifier[end] ()
identifier[self] . identifier[log] . identifier[info] ( literal[string] ) | def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException('You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(','.join(tasks_that_depend_on_past))) # depends on [control=['if'], data=[]]
run_dates = run_dates[::-1] # depends on [control=['if'], data=[]]
if len(run_dates) == 0:
self.log.info('No run dates were found for the given dates and dag interval.')
return # depends on [control=['if'], data=[]]
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id # depends on [control=['if'], data=[]]
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process, ti_status=ti_status, executor=executor, pickle_id=pickle_id, start_date=start_date, session=session)
remaining_dates = ti_status.total_runs - len(ti_status.executed_dag_run_dates)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err) # depends on [control=['if'], data=[]]
if remaining_dates > 0:
self.log.info('max_active_runs limit for dag %s has been reached - waiting for other dag runs to finish', self.dag_id)
time.sleep(self.delay_on_limit_secs) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['remaining_dates']] # depends on [control=['try'], data=[]]
except (KeyboardInterrupt, SystemExit):
self.log.warning('Backfill terminated by user.')
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs) # depends on [control=['except'], data=[]]
finally:
session.commit()
executor.end()
self.log.info('Backfill done. Exiting.') |
def in_filter_get(self, address):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.in_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param) | def function[in_filter_get, parameter[self, address]]:
constant[This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
Returns a list object containing an instance of Filter sub-class
]
variable[func_name] assign[=] constant[neighbor.in_filter.get]
variable[param] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1a56800>], [<ast.Name object at 0x7da1b1a57610>]]
return[call[name[call], parameter[name[func_name]]]] | keyword[def] identifier[in_filter_get] ( identifier[self] , identifier[address] ):
literal[string]
identifier[func_name] = literal[string]
identifier[param] ={
identifier[neighbors] . identifier[IP_ADDRESS] : identifier[address] ,
}
keyword[return] identifier[call] ( identifier[func_name] ,** identifier[param] ) | def in_filter_get(self, address):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.in_filter.get'
param = {neighbors.IP_ADDRESS: address}
return call(func_name, **param) |
def rst_to_pypi(contents):
"""Convert the given GitHub RST contents to PyPi RST contents (since some RST directives are not available in PyPi).
Args:
contents (str): The GitHub compatible RST contents.
Returns:
str: The PyPi compatible RST contents.
"""
# The PyPi description does not support the SVG file type.
contents = contents.replace(".svg?pypi=png.from.svg", ".png")
# Convert ``<br class="title">`` to a H1 title
asterisks_length = len(PackageHelper.get_name())
asterisks = "*" * asterisks_length
title = asterisks + "\n" + PackageHelper.get_name() + "\n" + asterisks;
contents = re.sub(r"(\.\. raw\:\: html\n)(\n {2,4})(\<br class=\"title\"\>)", title, contents)
# The PyPi description does not support raw HTML
contents = re.sub(r"(\.\. raw\:\: html\n)((\n {2,4})([A-Za-z0-9<>\ =\"\/])*)*", "", contents)
return contents | def function[rst_to_pypi, parameter[contents]]:
constant[Convert the given GitHub RST contents to PyPi RST contents (since some RST directives are not available in PyPi).
Args:
contents (str): The GitHub compatible RST contents.
Returns:
str: The PyPi compatible RST contents.
]
variable[contents] assign[=] call[name[contents].replace, parameter[constant[.svg?pypi=png.from.svg], constant[.png]]]
variable[asterisks_length] assign[=] call[name[len], parameter[call[name[PackageHelper].get_name, parameter[]]]]
variable[asterisks] assign[=] binary_operation[constant[*] * name[asterisks_length]]
variable[title] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[asterisks] + constant[
]] + call[name[PackageHelper].get_name, parameter[]]] + constant[
]] + name[asterisks]]
variable[contents] assign[=] call[name[re].sub, parameter[constant[(\.\. raw\:\: html\n)(\n {2,4})(\<br class=\"title\"\>)], name[title], name[contents]]]
variable[contents] assign[=] call[name[re].sub, parameter[constant[(\.\. raw\:\: html\n)((\n {2,4})([A-Za-z0-9<>\ =\"\/])*)*], constant[], name[contents]]]
return[name[contents]] | keyword[def] identifier[rst_to_pypi] ( identifier[contents] ):
literal[string]
identifier[contents] = identifier[contents] . identifier[replace] ( literal[string] , literal[string] )
identifier[asterisks_length] = identifier[len] ( identifier[PackageHelper] . identifier[get_name] ())
identifier[asterisks] = literal[string] * identifier[asterisks_length]
identifier[title] = identifier[asterisks] + literal[string] + identifier[PackageHelper] . identifier[get_name] ()+ literal[string] + identifier[asterisks] ;
identifier[contents] = identifier[re] . identifier[sub] ( literal[string] , identifier[title] , identifier[contents] )
identifier[contents] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[contents] )
keyword[return] identifier[contents] | def rst_to_pypi(contents):
"""Convert the given GitHub RST contents to PyPi RST contents (since some RST directives are not available in PyPi).
Args:
contents (str): The GitHub compatible RST contents.
Returns:
str: The PyPi compatible RST contents.
"""
# The PyPi description does not support the SVG file type.
contents = contents.replace('.svg?pypi=png.from.svg', '.png')
# Convert ``<br class="title">`` to a H1 title
asterisks_length = len(PackageHelper.get_name())
asterisks = '*' * asterisks_length
title = asterisks + '\n' + PackageHelper.get_name() + '\n' + asterisks
contents = re.sub('(\\.\\. raw\\:\\: html\\n)(\\n {2,4})(\\<br class=\\"title\\"\\>)', title, contents)
# The PyPi description does not support raw HTML
contents = re.sub('(\\.\\. raw\\:\\: html\\n)((\\n {2,4})([A-Za-z0-9<>\\ =\\"\\/])*)*', '', contents)
return contents |
def handle_request(self):
"""
Handle one request - serve current process to one connection.
Use close_request() to disconnect this process.
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
# we only serve once, and we want to free up the port
# for future serves.
self.socket.close()
self.process_request(request, client_address)
except SocketConnected as err:
self._serve_process(err.slaveFd, err.serverPid)
return
except Exception as err:
self.handle_error(request, client_address)
self.close_request() | def function[handle_request, parameter[self]]:
constant[
Handle one request - serve current process to one connection.
Use close_request() to disconnect this process.
]
<ast.Try object at 0x7da20c6c7df0>
if call[name[self].verify_request, parameter[name[request], name[client_address]]] begin[:]
<ast.Try object at 0x7da20c6c5b10> | keyword[def] identifier[handle_request] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[request] , identifier[client_address] = identifier[self] . identifier[get_request] ()
keyword[except] identifier[socket] . identifier[error] :
keyword[return]
keyword[if] identifier[self] . identifier[verify_request] ( identifier[request] , identifier[client_address] ):
keyword[try] :
identifier[self] . identifier[socket] . identifier[close] ()
identifier[self] . identifier[process_request] ( identifier[request] , identifier[client_address] )
keyword[except] identifier[SocketConnected] keyword[as] identifier[err] :
identifier[self] . identifier[_serve_process] ( identifier[err] . identifier[slaveFd] , identifier[err] . identifier[serverPid] )
keyword[return]
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[self] . identifier[handle_error] ( identifier[request] , identifier[client_address] )
identifier[self] . identifier[close_request] () | def handle_request(self):
"""
Handle one request - serve current process to one connection.
Use close_request() to disconnect this process.
"""
try:
(request, client_address) = self.get_request() # depends on [control=['try'], data=[]]
except socket.error:
return # depends on [control=['except'], data=[]]
if self.verify_request(request, client_address):
try:
# we only serve once, and we want to free up the port
# for future serves.
self.socket.close()
self.process_request(request, client_address) # depends on [control=['try'], data=[]]
except SocketConnected as err:
self._serve_process(err.slaveFd, err.serverPid)
return # depends on [control=['except'], data=['err']]
except Exception as err:
self.handle_error(request, client_address)
self.close_request() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] |
def _identifier_as_cid(self, identifier):
"""
Returns a container uuid for identifier.
If identifier is an image UUID or image tag, create a temporary
container and return its uuid.
"""
def __cname_matches(container, identifier):
return any([n for n in (container['Names'] or [])
if matches(n, '/' + identifier)])
# Determine if identifier is a container
containers = [c['Id'] for c in self.client.containers(all=True)
if (__cname_matches(c, identifier) or
matches(c['Id'], identifier + '*'))]
if len(containers) > 1:
raise SelectionMatchError(identifier, containers)
elif len(containers) == 1:
c = containers[0]
return self._clone(c)
# Determine if identifier is an image UUID
images = [i for i in set(self.client.images(all=True, quiet=True))
if i.startswith(identifier)]
if len(images) > 1:
raise SelectionMatchError(identifier, images)
elif len(images) == 1:
return self._create_temp_container(images[0])
# Match image tag.
images = util.image_by_name(identifier)
if len(images) > 1:
tags = [t for i in images for t in i['RepoTags']]
raise SelectionMatchError(identifier, tags)
elif len(images) == 1:
return self._create_temp_container(images[0]['Id'].replace("sha256:", ""))
raise MountError('{} did not match any image or container.'
''.format(identifier)) | def function[_identifier_as_cid, parameter[self, identifier]]:
constant[
Returns a container uuid for identifier.
If identifier is an image UUID or image tag, create a temporary
container and return its uuid.
]
def function[__cname_matches, parameter[container, identifier]]:
return[call[name[any], parameter[<ast.ListComp object at 0x7da2054a55d0>]]]
variable[containers] assign[=] <ast.ListComp object at 0x7da2054a7100>
if compare[call[name[len], parameter[name[containers]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da2054a5cc0>
variable[images] assign[=] <ast.ListComp object at 0x7da2054a6590>
if compare[call[name[len], parameter[name[images]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da2054a5330>
variable[images] assign[=] call[name[util].image_by_name, parameter[name[identifier]]]
if compare[call[name[len], parameter[name[images]]] greater[>] constant[1]] begin[:]
variable[tags] assign[=] <ast.ListComp object at 0x7da2054a5a80>
<ast.Raise object at 0x7da2054a7a60>
<ast.Raise object at 0x7da204621630> | keyword[def] identifier[_identifier_as_cid] ( identifier[self] , identifier[identifier] ):
literal[string]
keyword[def] identifier[__cname_matches] ( identifier[container] , identifier[identifier] ):
keyword[return] identifier[any] ([ identifier[n] keyword[for] identifier[n] keyword[in] ( identifier[container] [ literal[string] ] keyword[or] [])
keyword[if] identifier[matches] ( identifier[n] , literal[string] + identifier[identifier] )])
identifier[containers] =[ identifier[c] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[self] . identifier[client] . identifier[containers] ( identifier[all] = keyword[True] )
keyword[if] ( identifier[__cname_matches] ( identifier[c] , identifier[identifier] ) keyword[or]
identifier[matches] ( identifier[c] [ literal[string] ], identifier[identifier] + literal[string] ))]
keyword[if] identifier[len] ( identifier[containers] )> literal[int] :
keyword[raise] identifier[SelectionMatchError] ( identifier[identifier] , identifier[containers] )
keyword[elif] identifier[len] ( identifier[containers] )== literal[int] :
identifier[c] = identifier[containers] [ literal[int] ]
keyword[return] identifier[self] . identifier[_clone] ( identifier[c] )
identifier[images] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[set] ( identifier[self] . identifier[client] . identifier[images] ( identifier[all] = keyword[True] , identifier[quiet] = keyword[True] ))
keyword[if] identifier[i] . identifier[startswith] ( identifier[identifier] )]
keyword[if] identifier[len] ( identifier[images] )> literal[int] :
keyword[raise] identifier[SelectionMatchError] ( identifier[identifier] , identifier[images] )
keyword[elif] identifier[len] ( identifier[images] )== literal[int] :
keyword[return] identifier[self] . identifier[_create_temp_container] ( identifier[images] [ literal[int] ])
identifier[images] = identifier[util] . identifier[image_by_name] ( identifier[identifier] )
keyword[if] identifier[len] ( identifier[images] )> literal[int] :
identifier[tags] =[ identifier[t] keyword[for] identifier[i] keyword[in] identifier[images] keyword[for] identifier[t] keyword[in] identifier[i] [ literal[string] ]]
keyword[raise] identifier[SelectionMatchError] ( identifier[identifier] , identifier[tags] )
keyword[elif] identifier[len] ( identifier[images] )== literal[int] :
keyword[return] identifier[self] . identifier[_create_temp_container] ( identifier[images] [ literal[int] ][ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ))
keyword[raise] identifier[MountError] ( literal[string]
literal[string] . identifier[format] ( identifier[identifier] )) | def _identifier_as_cid(self, identifier):
"""
Returns a container uuid for identifier.
If identifier is an image UUID or image tag, create a temporary
container and return its uuid.
"""
def __cname_matches(container, identifier):
return any([n for n in container['Names'] or [] if matches(n, '/' + identifier)])
# Determine if identifier is a container
containers = [c['Id'] for c in self.client.containers(all=True) if __cname_matches(c, identifier) or matches(c['Id'], identifier + '*')]
if len(containers) > 1:
raise SelectionMatchError(identifier, containers) # depends on [control=['if'], data=[]]
elif len(containers) == 1:
c = containers[0]
return self._clone(c) # depends on [control=['if'], data=[]]
# Determine if identifier is an image UUID
images = [i for i in set(self.client.images(all=True, quiet=True)) if i.startswith(identifier)]
if len(images) > 1:
raise SelectionMatchError(identifier, images) # depends on [control=['if'], data=[]]
elif len(images) == 1:
return self._create_temp_container(images[0]) # depends on [control=['if'], data=[]]
# Match image tag.
images = util.image_by_name(identifier)
if len(images) > 1:
tags = [t for i in images for t in i['RepoTags']]
raise SelectionMatchError(identifier, tags) # depends on [control=['if'], data=[]]
elif len(images) == 1:
return self._create_temp_container(images[0]['Id'].replace('sha256:', '')) # depends on [control=['if'], data=[]]
raise MountError('{} did not match any image or container.'.format(identifier)) |
def get_event_tracking_beacons(self, event_id, **data):
"""
GET /events/:event_id/tracking_beacons/
Returns the list of :format:`tracking_beacon` for the event :event_id
"""
return self.get("/events/{0}/tracking_beacons/".format(event_id), data=data) | def function[get_event_tracking_beacons, parameter[self, event_id]]:
constant[
GET /events/:event_id/tracking_beacons/
Returns the list of :format:`tracking_beacon` for the event :event_id
]
return[call[name[self].get, parameter[call[constant[/events/{0}/tracking_beacons/].format, parameter[name[event_id]]]]]] | keyword[def] identifier[get_event_tracking_beacons] ( identifier[self] , identifier[event_id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[event_id] ), identifier[data] = identifier[data] ) | def get_event_tracking_beacons(self, event_id, **data):
"""
GET /events/:event_id/tracking_beacons/
Returns the list of :format:`tracking_beacon` for the event :event_id
"""
return self.get('/events/{0}/tracking_beacons/'.format(event_id), data=data) |
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls | def function[find_lexer_class, parameter[name]]:
constant[Lookup a lexer class by name.
Return None if not found.
]
if compare[name[name] in name[_lexer_cache]] begin[:]
return[call[name[_lexer_cache]][name[name]]]
for taget[tuple[[<ast.Name object at 0x7da2044c0400>, <ast.Name object at 0x7da2044c2c80>, <ast.Name object at 0x7da2044c0a60>, <ast.Name object at 0x7da2044c27a0>, <ast.Name object at 0x7da2044c19c0>]]] in starred[call[name[itervalues], parameter[name[LEXERS]]]] begin[:]
if compare[name[name] equal[==] name[lname]] begin[:]
call[name[_load_lexers], parameter[name[module_name]]]
return[call[name[_lexer_cache]][name[name]]]
for taget[name[cls]] in starred[call[name[find_plugin_lexers], parameter[]]] begin[:]
if compare[name[cls].name equal[==] name[name]] begin[:]
return[name[cls]] | keyword[def] identifier[find_lexer_class] ( identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[_lexer_cache] :
keyword[return] identifier[_lexer_cache] [ identifier[name] ]
keyword[for] identifier[module_name] , identifier[lname] , identifier[aliases] , identifier[_] , identifier[_] keyword[in] identifier[itervalues] ( identifier[LEXERS] ):
keyword[if] identifier[name] == identifier[lname] :
identifier[_load_lexers] ( identifier[module_name] )
keyword[return] identifier[_lexer_cache] [ identifier[name] ]
keyword[for] identifier[cls] keyword[in] identifier[find_plugin_lexers] ():
keyword[if] identifier[cls] . identifier[name] == identifier[name] :
keyword[return] identifier[cls] | def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name] # depends on [control=['if'], data=['name', '_lexer_cache']]
# lookup builtin lexers
for (module_name, lname, aliases, _, _) in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name] # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cls']] |
def _parse_field_value(line):
""" Parse the field and value from a line. """
if line.startswith(':'):
# Ignore the line
return None, None
if ':' not in line:
# Treat the entire line as the field, use empty string as value
return line, ''
# Else field is before the ':' and value is after
field, value = line.split(':', 1)
# If value starts with a space, remove it.
value = value[1:] if value.startswith(' ') else value
return field, value | def function[_parse_field_value, parameter[line]]:
constant[ Parse the field and value from a line. ]
if call[name[line].startswith, parameter[constant[:]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2044c2a70>, <ast.Constant object at 0x7da2044c0dc0>]]]
if compare[constant[:] <ast.NotIn object at 0x7da2590d7190> name[line]] begin[:]
return[tuple[[<ast.Name object at 0x7da2044c3d90>, <ast.Constant object at 0x7da2044c0760>]]]
<ast.Tuple object at 0x7da2044c12d0> assign[=] call[name[line].split, parameter[constant[:], constant[1]]]
variable[value] assign[=] <ast.IfExp object at 0x7da20e9631f0>
return[tuple[[<ast.Name object at 0x7da20e960c40>, <ast.Name object at 0x7da20e9627d0>]]] | keyword[def] identifier[_parse_field_value] ( identifier[line] ):
literal[string]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[None] , keyword[None]
keyword[if] literal[string] keyword[not] keyword[in] identifier[line] :
keyword[return] identifier[line] , literal[string]
identifier[field] , identifier[value] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[value] = identifier[value] [ literal[int] :] keyword[if] identifier[value] . identifier[startswith] ( literal[string] ) keyword[else] identifier[value]
keyword[return] identifier[field] , identifier[value] | def _parse_field_value(line):
""" Parse the field and value from a line. """
if line.startswith(':'):
# Ignore the line
return (None, None) # depends on [control=['if'], data=[]]
if ':' not in line:
# Treat the entire line as the field, use empty string as value
return (line, '') # depends on [control=['if'], data=['line']]
# Else field is before the ':' and value is after
(field, value) = line.split(':', 1)
# If value starts with a space, remove it.
value = value[1:] if value.startswith(' ') else value
return (field, value) |
def _ParseValueData(self, knowledge_base, value_data):
"""Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail(
'Unsupported Windows Registry value type: {0:s} for '
'artifact: {1:s}.'.format(
type(value_data), self.ARTIFACT_DEFINITION_NAME))
if not knowledge_base.GetValue('operating_system_product'):
knowledge_base.SetValue('operating_system_product', value_data) | def function[_ParseValueData, parameter[self, knowledge_base, value_data]]:
constant[Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
]
if <ast.UnaryOp object at 0x7da2041d9b10> begin[:]
<ast.Raise object at 0x7da2041d9780>
if <ast.UnaryOp object at 0x7da20c991d20> begin[:]
call[name[knowledge_base].SetValue, parameter[constant[operating_system_product], name[value_data]]] | keyword[def] identifier[_ParseValueData] ( identifier[self] , identifier[knowledge_base] , identifier[value_data] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value_data] , identifier[py2to3] . identifier[UNICODE_TYPE] ):
keyword[raise] identifier[errors] . identifier[PreProcessFail] (
literal[string]
literal[string] . identifier[format] (
identifier[type] ( identifier[value_data] ), identifier[self] . identifier[ARTIFACT_DEFINITION_NAME] ))
keyword[if] keyword[not] identifier[knowledge_base] . identifier[GetValue] ( literal[string] ):
identifier[knowledge_base] . identifier[SetValue] ( literal[string] , identifier[value_data] ) | def _ParseValueData(self, knowledge_base, value_data):
"""Parses Windows Registry value data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
value_data (object): Windows Registry value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
if not isinstance(value_data, py2to3.UNICODE_TYPE):
raise errors.PreProcessFail('Unsupported Windows Registry value type: {0:s} for artifact: {1:s}.'.format(type(value_data), self.ARTIFACT_DEFINITION_NAME)) # depends on [control=['if'], data=[]]
if not knowledge_base.GetValue('operating_system_product'):
knowledge_base.SetValue('operating_system_product', value_data) # depends on [control=['if'], data=[]] |
def _call_reshape_input_output(self, fn, x, extra_kwargs=None):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
# Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`
# because it is possible the user provided extra kwargs would itself
# have `fn` and/or `x` as a key.
with tf.control_dependencies(self._runtime_assertions +
self._validate_sample_arg(x)):
sample_shape, static_sample_shape = self._sample_shape(x)
old_shape = tf.concat(
[
sample_shape,
self.distribution.batch_shape_tensor(),
self.event_shape_tensor(),
],
axis=0)
x_reshape = tf.reshape(x, old_shape)
result = fn(x_reshape, **extra_kwargs) if extra_kwargs else fn(x_reshape)
new_shape = tf.concat(
[
sample_shape,
self._batch_shape_unexpanded,
], axis=0)
result = tf.reshape(result, new_shape)
if (tensorshape_util.rank(static_sample_shape) is not None and
tensorshape_util.rank(self.batch_shape) is not None):
new_shape = tensorshape_util.concatenate(static_sample_shape,
self.batch_shape)
tensorshape_util.set_shape(result, new_shape)
return result | def function[_call_reshape_input_output, parameter[self, fn, x, extra_kwargs]]:
constant[Calls `fn`, appropriately reshaping its input `x` and output.]
with call[name[tf].control_dependencies, parameter[binary_operation[name[self]._runtime_assertions + call[name[self]._validate_sample_arg, parameter[name[x]]]]]] begin[:]
<ast.Tuple object at 0x7da1b0228fa0> assign[=] call[name[self]._sample_shape, parameter[name[x]]]
variable[old_shape] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da1b02296f0>, <ast.Call object at 0x7da1b022aaa0>, <ast.Call object at 0x7da1b0229360>]]]]
variable[x_reshape] assign[=] call[name[tf].reshape, parameter[name[x], name[old_shape]]]
variable[result] assign[=] <ast.IfExp object at 0x7da1b0229870>
variable[new_shape] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da1b022aa70>, <ast.Attribute object at 0x7da1b022b790>]]]]
variable[result] assign[=] call[name[tf].reshape, parameter[name[result], name[new_shape]]]
if <ast.BoolOp object at 0x7da1b0229a50> begin[:]
variable[new_shape] assign[=] call[name[tensorshape_util].concatenate, parameter[name[static_sample_shape], name[self].batch_shape]]
call[name[tensorshape_util].set_shape, parameter[name[result], name[new_shape]]]
return[name[result]] | keyword[def] identifier[_call_reshape_input_output] ( identifier[self] , identifier[fn] , identifier[x] , identifier[extra_kwargs] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[control_dependencies] ( identifier[self] . identifier[_runtime_assertions] +
identifier[self] . identifier[_validate_sample_arg] ( identifier[x] )):
identifier[sample_shape] , identifier[static_sample_shape] = identifier[self] . identifier[_sample_shape] ( identifier[x] )
identifier[old_shape] = identifier[tf] . identifier[concat] (
[
identifier[sample_shape] ,
identifier[self] . identifier[distribution] . identifier[batch_shape_tensor] (),
identifier[self] . identifier[event_shape_tensor] (),
],
identifier[axis] = literal[int] )
identifier[x_reshape] = identifier[tf] . identifier[reshape] ( identifier[x] , identifier[old_shape] )
identifier[result] = identifier[fn] ( identifier[x_reshape] ,** identifier[extra_kwargs] ) keyword[if] identifier[extra_kwargs] keyword[else] identifier[fn] ( identifier[x_reshape] )
identifier[new_shape] = identifier[tf] . identifier[concat] (
[
identifier[sample_shape] ,
identifier[self] . identifier[_batch_shape_unexpanded] ,
], identifier[axis] = literal[int] )
identifier[result] = identifier[tf] . identifier[reshape] ( identifier[result] , identifier[new_shape] )
keyword[if] ( identifier[tensorshape_util] . identifier[rank] ( identifier[static_sample_shape] ) keyword[is] keyword[not] keyword[None] keyword[and]
identifier[tensorshape_util] . identifier[rank] ( identifier[self] . identifier[batch_shape] ) keyword[is] keyword[not] keyword[None] ):
identifier[new_shape] = identifier[tensorshape_util] . identifier[concatenate] ( identifier[static_sample_shape] ,
identifier[self] . identifier[batch_shape] )
identifier[tensorshape_util] . identifier[set_shape] ( identifier[result] , identifier[new_shape] )
keyword[return] identifier[result] | def _call_reshape_input_output(self, fn, x, extra_kwargs=None):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
# Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs`
# because it is possible the user provided extra kwargs would itself
# have `fn` and/or `x` as a key.
with tf.control_dependencies(self._runtime_assertions + self._validate_sample_arg(x)):
(sample_shape, static_sample_shape) = self._sample_shape(x)
old_shape = tf.concat([sample_shape, self.distribution.batch_shape_tensor(), self.event_shape_tensor()], axis=0)
x_reshape = tf.reshape(x, old_shape)
result = fn(x_reshape, **extra_kwargs) if extra_kwargs else fn(x_reshape)
new_shape = tf.concat([sample_shape, self._batch_shape_unexpanded], axis=0)
result = tf.reshape(result, new_shape)
if tensorshape_util.rank(static_sample_shape) is not None and tensorshape_util.rank(self.batch_shape) is not None:
new_shape = tensorshape_util.concatenate(static_sample_shape, self.batch_shape)
tensorshape_util.set_shape(result, new_shape) # depends on [control=['if'], data=[]]
return result # depends on [control=['with'], data=[]] |
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore) | def function[_create_sparse_kvstore, parameter[kvstore]]:
constant[Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
]
variable[update_on_kvstore] assign[=] constant[True]
if call[name[isinstance], parameter[name[kvstore], name[kvs].KVStore]] begin[:]
variable[kv] assign[=] name[kvstore]
return[tuple[[<ast.Name object at 0x7da1b1f06d70>, <ast.Name object at 0x7da1b1f06c50>]]] | keyword[def] identifier[_create_sparse_kvstore] ( identifier[kvstore] ):
literal[string]
identifier[update_on_kvstore] = keyword[True]
keyword[if] identifier[isinstance] ( identifier[kvstore] , identifier[kvs] . identifier[KVStore] ):
identifier[kv] = identifier[kvstore]
keyword[elif] identifier[isinstance] ( identifier[kvstore] , identifier[str] ):
identifier[kv] = identifier[kvs] . identifier[create] ( identifier[kvstore] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] % identifier[kvstore] )
keyword[return] ( identifier[kv] , identifier[update_on_kvstore] ) | def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore # depends on [control=['if'], data=[]]
elif isinstance(kvstore, str):
kv = kvs.create(kvstore) # depends on [control=['if'], data=[]]
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore) |
def down(job, input_file_id, n, down_checkpoints):
"""Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
"""
# Read the file
input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)
length = os.path.getsize(input_file)
if length > n:
# We will subdivide the file
job.fileStore.logToMaster("Splitting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Split the file into two copies
mid_point = get_midpoint(input_file, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copy_subrange_of_file(input_file, 0, mid_point + 1, fH)
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copy_subrange_of_file(input_file, mid_point + 1, length, fH)
# Call the down function recursively
return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n,
down_checkpoints=down_checkpoints, memory='600M').rv(),
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n,
down_checkpoints=down_checkpoints,
memory='600M').rv()).rv()
else:
# We can sort this bit of the file
job.fileStore.logToMaster("Sorting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
output_file = job.fileStore.getLocalTempFile()
sort(input_file, output_file)
return job.fileStore.writeGlobalFile(output_file) | def function[down, parameter[job, input_file_id, n, down_checkpoints]]:
constant[Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
]
variable[input_file] assign[=] call[name[job].fileStore.readGlobalFile, parameter[name[input_file_id]]]
variable[length] assign[=] call[name[os].path.getsize, parameter[name[input_file]]]
if compare[name[length] greater[>] name[n]] begin[:]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Splitting file: %s of size: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1e5c580>, <ast.Name object at 0x7da1b1e5ead0>]]]]]
variable[mid_point] assign[=] call[name[get_midpoint], parameter[name[input_file], constant[0], name[length]]]
variable[t1] assign[=] call[name[job].fileStore.getLocalTempFile, parameter[]]
with call[name[open], parameter[name[t1], constant[w]]] begin[:]
call[name[copy_subrange_of_file], parameter[name[input_file], constant[0], binary_operation[name[mid_point] + constant[1]], name[fH]]]
variable[t2] assign[=] call[name[job].fileStore.getLocalTempFile, parameter[]]
with call[name[open], parameter[name[t2], constant[w]]] begin[:]
call[name[copy_subrange_of_file], parameter[name[input_file], binary_operation[name[mid_point] + constant[1]], name[length], name[fH]]]
return[call[call[name[job].addFollowOnJobFn, parameter[name[up], call[call[name[job].addChildJobFn, parameter[name[down], call[name[job].fileStore.writeGlobalFile, parameter[name[t1]]], name[n]]].rv, parameter[]], call[call[name[job].addChildJobFn, parameter[name[down], call[name[job].fileStore.writeGlobalFile, parameter[name[t2]]], name[n]]].rv, parameter[]]]].rv, parameter[]]] | keyword[def] identifier[down] ( identifier[job] , identifier[input_file_id] , identifier[n] , identifier[down_checkpoints] ):
literal[string]
identifier[input_file] = identifier[job] . identifier[fileStore] . identifier[readGlobalFile] ( identifier[input_file_id] , identifier[cache] = keyword[False] )
identifier[length] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[input_file] )
keyword[if] identifier[length] > identifier[n] :
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string]
%( identifier[input_file_id] , identifier[length] ), identifier[level] = identifier[logging] . identifier[CRITICAL] )
identifier[mid_point] = identifier[get_midpoint] ( identifier[input_file] , literal[int] , identifier[length] )
identifier[t1] = identifier[job] . identifier[fileStore] . identifier[getLocalTempFile] ()
keyword[with] identifier[open] ( identifier[t1] , literal[string] ) keyword[as] identifier[fH] :
identifier[copy_subrange_of_file] ( identifier[input_file] , literal[int] , identifier[mid_point] + literal[int] , identifier[fH] )
identifier[t2] = identifier[job] . identifier[fileStore] . identifier[getLocalTempFile] ()
keyword[with] identifier[open] ( identifier[t2] , literal[string] ) keyword[as] identifier[fH] :
identifier[copy_subrange_of_file] ( identifier[input_file] , identifier[mid_point] + literal[int] , identifier[length] , identifier[fH] )
keyword[return] identifier[job] . identifier[addFollowOnJobFn] ( identifier[up] , identifier[job] . identifier[addChildJobFn] ( identifier[down] , identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[t1] ), identifier[n] ,
identifier[down_checkpoints] = identifier[down_checkpoints] , identifier[memory] = literal[string] ). identifier[rv] (),
identifier[job] . identifier[addChildJobFn] ( identifier[down] , identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[t2] ), identifier[n] ,
identifier[down_checkpoints] = identifier[down_checkpoints] ,
identifier[memory] = literal[string] ). identifier[rv] ()). identifier[rv] ()
keyword[else] :
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string]
%( identifier[input_file_id] , identifier[length] ), identifier[level] = identifier[logging] . identifier[CRITICAL] )
identifier[output_file] = identifier[job] . identifier[fileStore] . identifier[getLocalTempFile] ()
identifier[sort] ( identifier[input_file] , identifier[output_file] )
keyword[return] identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[output_file] ) | def down(job, input_file_id, n, down_checkpoints):
"""Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
"""
# Read the file
input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)
length = os.path.getsize(input_file)
if length > n:
# We will subdivide the file
job.fileStore.logToMaster('Splitting file: %s of size: %s' % (input_file_id, length), level=logging.CRITICAL)
# Split the file into two copies
mid_point = get_midpoint(input_file, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copy_subrange_of_file(input_file, 0, mid_point + 1, fH) # depends on [control=['with'], data=['fH']]
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copy_subrange_of_file(input_file, mid_point + 1, length, fH) # depends on [control=['with'], data=['fH']]
# Call the down function recursively
return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() # depends on [control=['if'], data=['length', 'n']]
else:
# We can sort this bit of the file
job.fileStore.logToMaster('Sorting file: %s of size: %s' % (input_file_id, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
output_file = job.fileStore.getLocalTempFile()
sort(input_file, output_file)
return job.fileStore.writeGlobalFile(output_file) |
def alpha_gen(x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha_(xmin,x=x):
"""
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
gexmin = x>=xmin
n = np.count_nonzero(gexmin)
if n < 2:
return 0
x = x[gexmin]
a = 1 + float(n) / sum(log(x/xmin))
return a
return alpha_ | def function[alpha_gen, parameter[x]]:
constant[ Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
]
def function[alpha_, parameter[xmin, x]]:
constant[
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
]
variable[gexmin] assign[=] compare[name[x] greater_or_equal[>=] name[xmin]]
variable[n] assign[=] call[name[np].count_nonzero, parameter[name[gexmin]]]
if compare[name[n] less[<] constant[2]] begin[:]
return[constant[0]]
variable[x] assign[=] call[name[x]][name[gexmin]]
variable[a] assign[=] binary_operation[constant[1] + binary_operation[call[name[float], parameter[name[n]]] / call[name[sum], parameter[call[name[log], parameter[binary_operation[name[x] / name[xmin]]]]]]]]
return[name[a]]
return[name[alpha_]] | keyword[def] identifier[alpha_gen] ( identifier[x] ):
literal[string]
keyword[def] identifier[alpha_] ( identifier[xmin] , identifier[x] = identifier[x] ):
literal[string]
identifier[gexmin] = identifier[x] >= identifier[xmin]
identifier[n] = identifier[np] . identifier[count_nonzero] ( identifier[gexmin] )
keyword[if] identifier[n] < literal[int] :
keyword[return] literal[int]
identifier[x] = identifier[x] [ identifier[gexmin] ]
identifier[a] = literal[int] + identifier[float] ( identifier[n] )/ identifier[sum] ( identifier[log] ( identifier[x] / identifier[xmin] ))
keyword[return] identifier[a]
keyword[return] identifier[alpha_] | def alpha_gen(x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha_(xmin, x=x):
"""
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
gexmin = x >= xmin
n = np.count_nonzero(gexmin)
if n < 2:
return 0 # depends on [control=['if'], data=[]]
x = x[gexmin]
a = 1 + float(n) / sum(log(x / xmin))
return a
return alpha_ |
def _image_size(image_config, type_, target_size):
"""Find the closest available size for specified image type.
Arguments:
image_config (:py:class:`dict`): The image config data.
type_ (:py:class:`str`): The type of image to create a URL
for, (``'poster'`` or ``'profile'``).
target_size (:py:class:`int`): The size of image to aim for (used
as either width or height).
"""
return min(
image_config['{}_sizes'.format(type_)],
key=lambda size: (abs(target_size - int(size[1:]))
if size.startswith('w') or size.startswith('h')
else 999),
) | def function[_image_size, parameter[image_config, type_, target_size]]:
constant[Find the closest available size for specified image type.
Arguments:
image_config (:py:class:`dict`): The image config data.
type_ (:py:class:`str`): The type of image to create a URL
for, (``'poster'`` or ``'profile'``).
target_size (:py:class:`int`): The size of image to aim for (used
as either width or height).
]
return[call[name[min], parameter[call[name[image_config]][call[constant[{}_sizes].format, parameter[name[type_]]]]]]] | keyword[def] identifier[_image_size] ( identifier[image_config] , identifier[type_] , identifier[target_size] ):
literal[string]
keyword[return] identifier[min] (
identifier[image_config] [ literal[string] . identifier[format] ( identifier[type_] )],
identifier[key] = keyword[lambda] identifier[size] :( identifier[abs] ( identifier[target_size] - identifier[int] ( identifier[size] [ literal[int] :]))
keyword[if] identifier[size] . identifier[startswith] ( literal[string] ) keyword[or] identifier[size] . identifier[startswith] ( literal[string] )
keyword[else] literal[int] ),
) | def _image_size(image_config, type_, target_size):
"""Find the closest available size for specified image type.
Arguments:
image_config (:py:class:`dict`): The image config data.
type_ (:py:class:`str`): The type of image to create a URL
for, (``'poster'`` or ``'profile'``).
target_size (:py:class:`int`): The size of image to aim for (used
as either width or height).
"""
return min(image_config['{}_sizes'.format(type_)], key=lambda size: abs(target_size - int(size[1:])) if size.startswith('w') or size.startswith('h') else 999) |
def bls_parallel_pfind(
times, mags, errs,
magsarefluxes=False,
startp=0.1, # by default, search from 0.1 d to...
endp=100.0, # ... 100.0 d -- don't search full timebase
stepsize=1.0e-4,
mintransitduration=0.01, # minimum transit length in phase
maxtransitduration=0.4, # maximum transit length in phase
ndurations=100,
autofreq=True, # figure out f0, nf, and df automatically
blsobjective='likelihood',
blsmethod='fast',
blsoversample=5,
blsmintransits=3,
blsfreqfactor=10.0,
nbestpeaks=5,
periodepsilon=0.1, # 0.1
sigclip=10.0,
verbose=True,
nworkers=None,
):
'''Runs the Box Least Squares Fitting Search for transit-shaped signals.
Breaks up the full frequency space into chunks and passes them to parallel
BLS workers.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_parallel_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'. The generated period array
will then be broken up into chunks and sent to the individual workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
In particular, when you want to get reliable estimates of the SNR, transit
depth, duration, etc. that Astropy's BLS gives you, rerun `bls_serial_pfind`
with `startp`, and `endp` close to the best period you want to characterize
the transit at. The dict returned from that function contains a `blsmodel`
key, which is the generated model from Astropy's BLS. Use the
`.compute_stats()` method to calculate the required stats.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autoperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# if we're setting up everything automatically
if isinstance(autofreq, bool) and autofreq:
# use heuristic to figure out best timestep
stepsize = 0.25*mintransitduration/(stimes.max()-stimes.min())
# now figure out the frequencies to use
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, bool) and not autofreq:
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(npceil((maxfreq - minfreq)/stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, '
'minfreq: %s, maxfreq: %s' % (startp, endp, nfreq,
minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for '
'freq stepsize: %s, ndurations: %s, '
'min transit duration: %s, max transit duration: %s' %
(stepsize, ndurations,
mintransitduration, maxtransitduration))
use_autoperiod = False
elif isinstance(autofreq, str) and autofreq == 'astropy':
use_autoperiod = True
minfreq = 1.0/endp
maxfreq = 1.0/startp
else:
LOGERROR("unknown autofreq kwarg encountered. can't continue...")
return None
# check the minimum frequency
if minfreq < (1.0/(stimes.max() - stimes.min())):
minfreq = 2.0/(stimes.max() - stimes.min())
if verbose:
LOGWARNING('the requested max P = %.3f is larger than '
'the time base of the observations = %.3f, '
' will make minfreq = 2 x 1/timebase'
% (endp, stimes.max() - stimes.min()))
LOGINFO('new minfreq: %s, maxfreq: %s' %
(minfreq, maxfreq))
#############################
## NOW RUN BLS IN PARALLEL ##
#############################
# fix number of CPUs if needed
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
# check if autoperiod is True and get the correct period-grid
if use_autoperiod:
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration*startp,
maxtransitduration*startp,
ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.dimensionless_unscaled,
dy=serrs*u.dimensionless_unscaled
)
else:
blsmodel = BoxLeastSquares(
stimes*u.day,
smags*u.mag,
dy=serrs*u.mag
)
periods = nparray(
blsmodel.autoperiod(
durations*u.day,
minimum_period=startp,
maximum_period=endp,
minimum_n_transit=blsmintransits,
frequency_factor=blsfreqfactor
)
)
frequencies = 1.0/periods
nfreq = frequencies.size
if verbose:
LOGINFO(
"autofreq = 'astropy', used .autoperiod() with "
"minimum_n_transit = %s, freq_factor = %s "
"to generate the frequency grid" %
(blsmintransits, blsfreqfactor)
)
LOGINFO('stepsize = %s, nfreq = %s, minfreq = %.5f, '
'maxfreq = %.5f, ndurations = %s' %
(abs(frequencies[1] - frequencies[0]),
nfreq,
1.0/periods.max(),
1.0/periods.min(),
durations.size))
del blsmodel
del durations
# otherwise, use kbls method
else:
frequencies = minfreq + nparange(nfreq)*stepsize
# break up the tasks into chunks
csrem = int(fmod(nfreq, nworkers))
csint = int(float(nfreq/nworkers))
chunk_minfreqs, chunk_nfreqs = [], []
for x in range(nworkers):
this_minfreqs = frequencies[x*csint]
# handle usual nfreqs
if x < (nworkers - 1):
this_nfreqs = frequencies[x*csint:x*csint+csint].size
else:
this_nfreqs = frequencies[x*csint:x*csint+csint+csrem].size
chunk_minfreqs.append(this_minfreqs)
chunk_nfreqs.append(this_nfreqs)
# populate the tasks list
#
# task[0] = times
# task[1] = mags
# task[2] = errs
# task[3] = magsarefluxes
# task[4] = minfreq
# task[5] = nfreq
# task[6] = stepsize
# task[7] = nphasebins
# task[8] = mintransitduration
# task[9] = maxtransitduration
# task[10] = blsobjective
# task[11] = blsmethod
# task[12] = blsoversample
# populate the tasks list
tasks = [(stimes, smags, serrs, magsarefluxes,
chunk_minf, chunk_nf, stepsize,
ndurations, mintransitduration, maxtransitduration,
blsobjective, blsmethod, blsoversample)
for (chunk_minf, chunk_nf)
in zip(chunk_minfreqs, chunk_nfreqs)]
if verbose:
for ind, task in enumerate(tasks):
LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' %
(ind+1, task[4], task[5]))
LOGINFO('running...')
# return tasks
# start the pool
pool = Pool(nworkers)
results = pool.map(_parallel_bls_worker, tasks)
pool.close()
pool.join()
del pool
# now concatenate the output lsp arrays
lsp = npconcatenate([x['power'] for x in results])
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'method':'bls',
'blsresult':None,
'blsmodel':None,
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'frequencies':frequencies,
'periods':periods,
'durations':[x['durations'] for x in results],
'blsresult':[x['blsresult'] for x in results],
'blsmodel':[x['blsmodel'] for x in results],
'stepsize':stepsize,
'nfreq':nfreq,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestinds':None,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'durations':None,
'blsresult':None,
'blsmodel':None,
'stepsize':stepsize,
'nfreq':None,
'nphasebins':None,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'method':'bls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'mintransitduration':mintransitduration,
'maxtransitduration':maxtransitduration,
'ndurations':ndurations,
'blsobjective':blsobjective,
'blsmethod':blsmethod,
'blsoversample':blsoversample,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}} | def function[bls_parallel_pfind, parameter[times, mags, errs, magsarefluxes, startp, endp, stepsize, mintransitduration, maxtransitduration, ndurations, autofreq, blsobjective, blsmethod, blsoversample, blsmintransits, blsfreqfactor, nbestpeaks, periodepsilon, sigclip, verbose, nworkers]]:
constant[Runs the Box Least Squares Fitting Search for transit-shaped signals.
Breaks up the full frequency space into chunks and passes them to parallel
BLS workers.
Based on the version of BLS in Astropy 3.1:
`astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module
will fail to import. Note that by default, this implementation of
`bls_parallel_pfind` doesn't use the `.autoperiod()` function from
`BoxLeastSquares` but uses the same auto frequency-grid generation as the
functions in `periodbase.kbls`. If you want to use Astropy's implementation,
set the value of `autofreq` kwarg to 'astropy'. The generated period array
will then be broken up into chunks and sent to the individual workers.
NOTE: the combined BLS spectrum produced by this function is not identical
to that produced by running BLS in one shot for the entire frequency
space. There are differences on the order of 1.0e-3 or so in the respective
peak values, but peaks appear at the same frequencies for both methods. This
is likely due to different aliasing caused by smaller chunks of the
frequency space used by the parallel workers in this function. When in
doubt, confirm results for this parallel implementation by comparing to
those from the serial implementation above.
In particular, when you want to get reliable estimates of the SNR, transit
depth, duration, etc. that Astropy's BLS gives you, rerun `bls_serial_pfind`
with `startp`, and `endp` close to the best period you want to characterize
the transit at. The dict returned from that function contains a `blsmodel`
key, which is the generated model from Astropy's BLS. Use the
`.compute_stats()` method to calculate the required stats.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
mintransitduration,maxtransitduration : float
The minimum and maximum transitdurations (in units of phase) to consider
for the transit search.
ndurations : int
The number of transit durations to use in the period-search.
autofreq : bool or str
If this is True, the values of `stepsize` and `nphasebins` will be
ignored, and these, along with a frequency-grid, will be determined
based on the following relations::
nphasebins = int(ceil(2.0/mintransitduration))
if nphasebins > 3000:
nphasebins = 3000
stepsize = 0.25*mintransitduration/(times.max()-times.min())
minfreq = 1.0/endp
maxfreq = 1.0/startp
nfreq = int(ceil((maxfreq - minfreq)/stepsize))
If this is False, you must set `startp`, `endp`, and `stepsize` as
appropriate.
If this is str == 'astropy', will use the
`astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the
frequency grid instead of the kbls method.
blsobjective : {'likelihood','snr'}
Sets the type of objective to optimize in the `BoxLeastSquares.power()`
function.
blsmethod : {'fast','slow'}
Sets the type of method to use in the `BoxLeastSquares.power()`
function.
blsoversample : {'likelihood','snr'}
Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.
blsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
blsfreqfactor : float
Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autoperiod()`
function.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'durations': the array of durations used to run BLS,
'blsresult': Astropy BLS result object (BoxLeastSquaresResult),
'blsmodel': Astropy BLS BoxLeastSquares object used for work,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'durations': the durations array used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
]
<ast.Tuple object at 0x7da204621f90> assign[=] call[name[sigclip_magseries], parameter[name[times], name[mags], name[errs]]]
if <ast.BoolOp object at 0x7da2046223b0> begin[:]
if <ast.BoolOp object at 0x7da204621b40> begin[:]
variable[stepsize] assign[=] binary_operation[binary_operation[constant[0.25] * name[mintransitduration]] / binary_operation[call[name[stimes].max, parameter[]] - call[name[stimes].min, parameter[]]]]
variable[minfreq] assign[=] binary_operation[constant[1.0] / name[endp]]
variable[maxfreq] assign[=] binary_operation[constant[1.0] / name[startp]]
variable[nfreq] assign[=] call[name[int], parameter[call[name[npceil], parameter[binary_operation[binary_operation[name[maxfreq] - name[minfreq]] / name[stepsize]]]]]]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[min P: %s, max P: %s, nfreq: %s, minfreq: %s, maxfreq: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9935b0>, <ast.Name object at 0x7da20c990fa0>, <ast.Name object at 0x7da20c9925f0>, <ast.Name object at 0x7da20c991060>, <ast.Name object at 0x7da20c992500>]]]]]
call[name[LOGINFO], parameter[binary_operation[constant[autofreq = True: using AUTOMATIC values for freq stepsize: %s, ndurations: %s, min transit duration: %s, max transit duration: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9922c0>, <ast.Name object at 0x7da20c993970>, <ast.Name object at 0x7da20c992560>, <ast.Name object at 0x7da20c9916f0>]]]]]
variable[use_autoperiod] assign[=] constant[False]
if compare[name[minfreq] less[<] binary_operation[constant[1.0] / binary_operation[call[name[stimes].max, parameter[]] - call[name[stimes].min, parameter[]]]]] begin[:]
variable[minfreq] assign[=] binary_operation[constant[2.0] / binary_operation[call[name[stimes].max, parameter[]] - call[name[stimes].min, parameter[]]]]
if name[verbose] begin[:]
call[name[LOGWARNING], parameter[binary_operation[constant[the requested max P = %.3f is larger than the time base of the observations = %.3f, will make minfreq = 2 x 1/timebase] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9912a0>, <ast.BinOp object at 0x7da20c990850>]]]]]
call[name[LOGINFO], parameter[binary_operation[constant[new minfreq: %s, maxfreq: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c991930>, <ast.Name object at 0x7da20c993ca0>]]]]]
if <ast.BoolOp object at 0x7da20c991b70> begin[:]
variable[nworkers] assign[=] name[NCPUS]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[using %s workers...] <ast.Mod object at 0x7da2590d6920> name[nworkers]]]]
if name[use_autoperiod] begin[:]
variable[durations] assign[=] call[name[nplinspace], parameter[binary_operation[name[mintransitduration] * name[startp]], binary_operation[name[maxtransitduration] * name[startp]], name[ndurations]]]
if name[magsarefluxes] begin[:]
variable[blsmodel] assign[=] call[name[BoxLeastSquares], parameter[binary_operation[name[stimes] * name[u].day], binary_operation[name[smags] * name[u].dimensionless_unscaled]]]
variable[periods] assign[=] call[name[nparray], parameter[call[name[blsmodel].autoperiod, parameter[binary_operation[name[durations] * name[u].day]]]]]
variable[frequencies] assign[=] binary_operation[constant[1.0] / name[periods]]
variable[nfreq] assign[=] name[frequencies].size
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[autofreq = 'astropy', used .autoperiod() with minimum_n_transit = %s, freq_factor = %s to generate the frequency grid] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c992140>, <ast.Name object at 0x7da20c991900>]]]]]
call[name[LOGINFO], parameter[binary_operation[constant[stepsize = %s, nfreq = %s, minfreq = %.5f, maxfreq = %.5f, ndurations = %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c993d00>, <ast.Name object at 0x7da20c991210>, <ast.BinOp object at 0x7da20c991330>, <ast.BinOp object at 0x7da20c992920>, <ast.Attribute object at 0x7da20c991420>]]]]]
<ast.Delete object at 0x7da20c992050>
<ast.Delete object at 0x7da18f00cbe0>
variable[csrem] assign[=] call[name[int], parameter[call[name[fmod], parameter[name[nfreq], name[nworkers]]]]]
variable[csint] assign[=] call[name[int], parameter[call[name[float], parameter[binary_operation[name[nfreq] / name[nworkers]]]]]]
<ast.Tuple object at 0x7da18f00c2e0> assign[=] tuple[[<ast.List object at 0x7da18f00c880>, <ast.List object at 0x7da18f00d060>]]
for taget[name[x]] in starred[call[name[range], parameter[name[nworkers]]]] begin[:]
variable[this_minfreqs] assign[=] call[name[frequencies]][binary_operation[name[x] * name[csint]]]
if compare[name[x] less[<] binary_operation[name[nworkers] - constant[1]]] begin[:]
variable[this_nfreqs] assign[=] call[name[frequencies]][<ast.Slice object at 0x7da18f00e860>].size
call[name[chunk_minfreqs].append, parameter[name[this_minfreqs]]]
call[name[chunk_nfreqs].append, parameter[name[this_nfreqs]]]
variable[tasks] assign[=] <ast.ListComp object at 0x7da18f00c8b0>
if name[verbose] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00c520>, <ast.Name object at 0x7da18f00cd60>]]] in starred[call[name[enumerate], parameter[name[tasks]]]] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[worker %s: minfreq = %.6f, nfreqs = %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da18f00ecb0>, <ast.Subscript object at 0x7da18f00ceb0>, <ast.Subscript object at 0x7da18f00fbb0>]]]]]
call[name[LOGINFO], parameter[constant[running...]]]
variable[pool] assign[=] call[name[Pool], parameter[name[nworkers]]]
variable[results] assign[=] call[name[pool].map, parameter[name[_parallel_bls_worker], name[tasks]]]
call[name[pool].close, parameter[]]
call[name[pool].join, parameter[]]
<ast.Delete object at 0x7da18f00d660>
variable[lsp] assign[=] call[name[npconcatenate], parameter[<ast.ListComp object at 0x7da18f00d330>]]
variable[periods] assign[=] binary_operation[constant[1.0] / name[frequencies]]
variable[finitepeakind] assign[=] call[name[npisfinite], parameter[name[lsp]]]
variable[finlsp] assign[=] call[name[lsp]][name[finitepeakind]]
variable[finperiods] assign[=] call[name[periods]][name[finitepeakind]]
<ast.Try object at 0x7da18f00c190>
variable[sortedlspind] assign[=] call[call[name[npargsort], parameter[name[finlsp]]]][<ast.Slice object at 0x7da18f00efb0>]
variable[sortedlspperiods] assign[=] call[name[finperiods]][name[sortedlspind]]
variable[sortedlspvals] assign[=] call[name[finlsp]][name[sortedlspind]]
<ast.Tuple object at 0x7da18f00eaa0> assign[=] tuple[[<ast.List object at 0x7da18f00c280>, <ast.List object at 0x7da18f00e980>, <ast.List object at 0x7da18f00c250>, <ast.Constant object at 0x7da18f00ff10>]]
variable[prevperiod] assign[=] call[name[sortedlspperiods]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da18f00f100>, <ast.Name object at 0x7da18f00ff40>, <ast.Name object at 0x7da18f00f5b0>]]] in starred[call[name[zip], parameter[name[sortedlspperiods], name[sortedlspvals], name[sortedlspind]]]] begin[:]
if compare[name[peakcount] equal[==] name[nbestpeaks]] begin[:]
break
variable[perioddiff] assign[=] call[name[abs], parameter[binary_operation[name[period] - name[prevperiod]]]]
variable[bestperiodsdiff] assign[=] <ast.ListComp object at 0x7da18eb57190>
if <ast.BoolOp object at 0x7da18eb546d0> begin[:]
call[name[nbestperiods].append, parameter[name[period]]]
call[name[nbestlspvals].append, parameter[name[lspval]]]
call[name[nbestinds].append, parameter[name[ind]]]
variable[peakcount] assign[=] binary_operation[name[peakcount] + constant[1]]
variable[prevperiod] assign[=] name[period]
variable[resultdict] assign[=] dictionary[[<ast.Constant object at 0x7da18eb553f0>, <ast.Constant object at 0x7da18eb57e50>, <ast.Constant object at 0x7da18eb57340>, <ast.Constant object at 0x7da18eb579a0>, <ast.Constant object at 0x7da18eb55cc0>, <ast.Constant object at 0x7da18eb55ea0>, <ast.Constant object at 0x7da18eb54ac0>, <ast.Constant object at 0x7da18eb576a0>, <ast.Constant object at 0x7da18eb57910>, <ast.Constant object at 0x7da18eb54a00>, <ast.Constant object at 0x7da18eb54430>, <ast.Constant object at 0x7da18eb55720>, <ast.Constant object at 0x7da18eb57820>, <ast.Constant object at 0x7da18eb55900>, <ast.Constant object at 0x7da18eb56c50>, <ast.Constant object at 0x7da18eb57f40>, <ast.Constant object at 0x7da18eb57940>, <ast.Constant object at 0x7da18eb54640>], [<ast.Subscript object at 0x7da18eb56320>, <ast.Subscript object at 0x7da18eb553c0>, <ast.Name object at 0x7da18eb54610>, <ast.Name object at 0x7da18eb55ab0>, <ast.Name object at 0x7da18eb54c10>, <ast.Name object at 0x7da18eb541f0>, <ast.Name object at 0x7da18eb55d20>, <ast.Name object at 0x7da18eb54160>, <ast.Name object at 0x7da18eb57970>, <ast.ListComp object at 0x7da18eb55120>, <ast.ListComp object at 0x7da18eb56e00>, <ast.ListComp object at 0x7da18eb562c0>, <ast.Name object at 0x7da18eb56110>, <ast.Name object at 0x7da18eb56b60>, <ast.Name object at 0x7da18eb54be0>, <ast.Name object at 0x7da18eb578b0>, <ast.Constant object at 0x7da18eb54a60>, <ast.Dict object at 0x7da18eb56ec0>]]
return[name[resultdict]] | keyword[def] identifier[bls_parallel_pfind] (
identifier[times] , identifier[mags] , identifier[errs] ,
identifier[magsarefluxes] = keyword[False] ,
identifier[startp] = literal[int] ,
identifier[endp] = literal[int] ,
identifier[stepsize] = literal[int] ,
identifier[mintransitduration] = literal[int] ,
identifier[maxtransitduration] = literal[int] ,
identifier[ndurations] = literal[int] ,
identifier[autofreq] = keyword[True] ,
identifier[blsobjective] = literal[string] ,
identifier[blsmethod] = literal[string] ,
identifier[blsoversample] = literal[int] ,
identifier[blsmintransits] = literal[int] ,
identifier[blsfreqfactor] = literal[int] ,
identifier[nbestpeaks] = literal[int] ,
identifier[periodepsilon] = literal[int] ,
identifier[sigclip] = literal[int] ,
identifier[verbose] = keyword[True] ,
identifier[nworkers] = keyword[None] ,
):
literal[string]
identifier[stimes] , identifier[smags] , identifier[serrs] = identifier[sigclip_magseries] ( identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[sigclip] = identifier[sigclip] )
keyword[if] identifier[len] ( identifier[stimes] )> literal[int] keyword[and] identifier[len] ( identifier[smags] )> literal[int] keyword[and] identifier[len] ( identifier[serrs] )> literal[int] :
keyword[if] identifier[isinstance] ( identifier[autofreq] , identifier[bool] ) keyword[and] identifier[autofreq] :
identifier[stepsize] = literal[int] * identifier[mintransitduration] /( identifier[stimes] . identifier[max] ()- identifier[stimes] . identifier[min] ())
identifier[minfreq] = literal[int] / identifier[endp]
identifier[maxfreq] = literal[int] / identifier[startp]
identifier[nfreq] = identifier[int] ( identifier[npceil] (( identifier[maxfreq] - identifier[minfreq] )/ identifier[stepsize] ))
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string]
literal[string] %( identifier[startp] , identifier[endp] , identifier[nfreq] ,
identifier[minfreq] , identifier[maxfreq] ))
identifier[LOGINFO] ( literal[string]
literal[string]
literal[string] %
( identifier[stepsize] , identifier[ndurations] ,
identifier[mintransitduration] , identifier[maxtransitduration] ))
identifier[use_autoperiod] = keyword[False]
keyword[elif] identifier[isinstance] ( identifier[autofreq] , identifier[bool] ) keyword[and] keyword[not] identifier[autofreq] :
identifier[minfreq] = literal[int] / identifier[endp]
identifier[maxfreq] = literal[int] / identifier[startp]
identifier[nfreq] = identifier[int] ( identifier[npceil] (( identifier[maxfreq] - identifier[minfreq] )/ identifier[stepsize] ))
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string]
literal[string] %( identifier[startp] , identifier[endp] , identifier[nfreq] ,
identifier[minfreq] , identifier[maxfreq] ))
identifier[LOGINFO] ( literal[string]
literal[string]
literal[string] %
( identifier[stepsize] , identifier[ndurations] ,
identifier[mintransitduration] , identifier[maxtransitduration] ))
identifier[use_autoperiod] = keyword[False]
keyword[elif] identifier[isinstance] ( identifier[autofreq] , identifier[str] ) keyword[and] identifier[autofreq] == literal[string] :
identifier[use_autoperiod] = keyword[True]
identifier[minfreq] = literal[int] / identifier[endp]
identifier[maxfreq] = literal[int] / identifier[startp]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] keyword[None]
keyword[if] identifier[minfreq] <( literal[int] /( identifier[stimes] . identifier[max] ()- identifier[stimes] . identifier[min] ())):
identifier[minfreq] = literal[int] /( identifier[stimes] . identifier[max] ()- identifier[stimes] . identifier[min] ())
keyword[if] identifier[verbose] :
identifier[LOGWARNING] ( literal[string]
literal[string]
literal[string]
%( identifier[endp] , identifier[stimes] . identifier[max] ()- identifier[stimes] . identifier[min] ()))
identifier[LOGINFO] ( literal[string] %
( identifier[minfreq] , identifier[maxfreq] ))
keyword[if] keyword[not] identifier[nworkers] keyword[or] identifier[nworkers] > identifier[NCPUS] :
identifier[nworkers] = identifier[NCPUS]
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[nworkers] )
keyword[if] identifier[use_autoperiod] :
identifier[durations] = identifier[nplinspace] ( identifier[mintransitduration] * identifier[startp] ,
identifier[maxtransitduration] * identifier[startp] ,
identifier[ndurations] )
keyword[if] identifier[magsarefluxes] :
identifier[blsmodel] = identifier[BoxLeastSquares] (
identifier[stimes] * identifier[u] . identifier[day] ,
identifier[smags] * identifier[u] . identifier[dimensionless_unscaled] ,
identifier[dy] = identifier[serrs] * identifier[u] . identifier[dimensionless_unscaled]
)
keyword[else] :
identifier[blsmodel] = identifier[BoxLeastSquares] (
identifier[stimes] * identifier[u] . identifier[day] ,
identifier[smags] * identifier[u] . identifier[mag] ,
identifier[dy] = identifier[serrs] * identifier[u] . identifier[mag]
)
identifier[periods] = identifier[nparray] (
identifier[blsmodel] . identifier[autoperiod] (
identifier[durations] * identifier[u] . identifier[day] ,
identifier[minimum_period] = identifier[startp] ,
identifier[maximum_period] = identifier[endp] ,
identifier[minimum_n_transit] = identifier[blsmintransits] ,
identifier[frequency_factor] = identifier[blsfreqfactor]
)
)
identifier[frequencies] = literal[int] / identifier[periods]
identifier[nfreq] = identifier[frequencies] . identifier[size]
keyword[if] identifier[verbose] :
identifier[LOGINFO] (
literal[string]
literal[string]
literal[string] %
( identifier[blsmintransits] , identifier[blsfreqfactor] )
)
identifier[LOGINFO] ( literal[string]
literal[string] %
( identifier[abs] ( identifier[frequencies] [ literal[int] ]- identifier[frequencies] [ literal[int] ]),
identifier[nfreq] ,
literal[int] / identifier[periods] . identifier[max] (),
literal[int] / identifier[periods] . identifier[min] (),
identifier[durations] . identifier[size] ))
keyword[del] identifier[blsmodel]
keyword[del] identifier[durations]
keyword[else] :
identifier[frequencies] = identifier[minfreq] + identifier[nparange] ( identifier[nfreq] )* identifier[stepsize]
identifier[csrem] = identifier[int] ( identifier[fmod] ( identifier[nfreq] , identifier[nworkers] ))
identifier[csint] = identifier[int] ( identifier[float] ( identifier[nfreq] / identifier[nworkers] ))
identifier[chunk_minfreqs] , identifier[chunk_nfreqs] =[],[]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[nworkers] ):
identifier[this_minfreqs] = identifier[frequencies] [ identifier[x] * identifier[csint] ]
keyword[if] identifier[x] <( identifier[nworkers] - literal[int] ):
identifier[this_nfreqs] = identifier[frequencies] [ identifier[x] * identifier[csint] : identifier[x] * identifier[csint] + identifier[csint] ]. identifier[size]
keyword[else] :
identifier[this_nfreqs] = identifier[frequencies] [ identifier[x] * identifier[csint] : identifier[x] * identifier[csint] + identifier[csint] + identifier[csrem] ]. identifier[size]
identifier[chunk_minfreqs] . identifier[append] ( identifier[this_minfreqs] )
identifier[chunk_nfreqs] . identifier[append] ( identifier[this_nfreqs] )
identifier[tasks] =[( identifier[stimes] , identifier[smags] , identifier[serrs] , identifier[magsarefluxes] ,
identifier[chunk_minf] , identifier[chunk_nf] , identifier[stepsize] ,
identifier[ndurations] , identifier[mintransitduration] , identifier[maxtransitduration] ,
identifier[blsobjective] , identifier[blsmethod] , identifier[blsoversample] )
keyword[for] ( identifier[chunk_minf] , identifier[chunk_nf] )
keyword[in] identifier[zip] ( identifier[chunk_minfreqs] , identifier[chunk_nfreqs] )]
keyword[if] identifier[verbose] :
keyword[for] identifier[ind] , identifier[task] keyword[in] identifier[enumerate] ( identifier[tasks] ):
identifier[LOGINFO] ( literal[string] %
( identifier[ind] + literal[int] , identifier[task] [ literal[int] ], identifier[task] [ literal[int] ]))
identifier[LOGINFO] ( literal[string] )
identifier[pool] = identifier[Pool] ( identifier[nworkers] )
identifier[results] = identifier[pool] . identifier[map] ( identifier[_parallel_bls_worker] , identifier[tasks] )
identifier[pool] . identifier[close] ()
identifier[pool] . identifier[join] ()
keyword[del] identifier[pool]
identifier[lsp] = identifier[npconcatenate] ([ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[results] ])
identifier[periods] = literal[int] / identifier[frequencies]
identifier[finitepeakind] = identifier[npisfinite] ( identifier[lsp] )
identifier[finlsp] = identifier[lsp] [ identifier[finitepeakind] ]
identifier[finperiods] = identifier[periods] [ identifier[finitepeakind] ]
keyword[try] :
identifier[bestperiodind] = identifier[npargmax] ( identifier[finlsp] )
keyword[except] identifier[ValueError] :
identifier[LOGERROR] ( literal[string]
literal[string] )
keyword[return] { literal[string] : identifier[npnan] ,
literal[string] : identifier[npnan] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[mintransitduration] ,
literal[string] : identifier[maxtransitduration] ,
literal[string] : identifier[ndurations] ,
literal[string] : identifier[blsobjective] ,
literal[string] : identifier[blsmethod] ,
literal[string] : identifier[blsoversample] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] ,
literal[string] : identifier[magsarefluxes] }}
identifier[sortedlspind] = identifier[npargsort] ( identifier[finlsp] )[::- literal[int] ]
identifier[sortedlspperiods] = identifier[finperiods] [ identifier[sortedlspind] ]
identifier[sortedlspvals] = identifier[finlsp] [ identifier[sortedlspind] ]
identifier[nbestperiods] , identifier[nbestlspvals] , identifier[nbestinds] , identifier[peakcount] =(
[ identifier[finperiods] [ identifier[bestperiodind] ]],
[ identifier[finlsp] [ identifier[bestperiodind] ]],
[ identifier[bestperiodind] ],
literal[int]
)
identifier[prevperiod] = identifier[sortedlspperiods] [ literal[int] ]
keyword[for] identifier[period] , identifier[lspval] , identifier[ind] keyword[in] identifier[zip] ( identifier[sortedlspperiods] ,
identifier[sortedlspvals] ,
identifier[sortedlspind] ):
keyword[if] identifier[peakcount] == identifier[nbestpeaks] :
keyword[break]
identifier[perioddiff] = identifier[abs] ( identifier[period] - identifier[prevperiod] )
identifier[bestperiodsdiff] =[ identifier[abs] ( identifier[period] - identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[nbestperiods] ]
keyword[if] ( identifier[perioddiff] >( identifier[periodepsilon] * identifier[prevperiod] ) keyword[and]
identifier[all] ( identifier[x] >( identifier[periodepsilon] * identifier[period] )
keyword[for] identifier[x] keyword[in] identifier[bestperiodsdiff] )):
identifier[nbestperiods] . identifier[append] ( identifier[period] )
identifier[nbestlspvals] . identifier[append] ( identifier[lspval] )
identifier[nbestinds] . identifier[append] ( identifier[ind] )
identifier[peakcount] = identifier[peakcount] + literal[int]
identifier[prevperiod] = identifier[period]
identifier[resultdict] ={
literal[string] : identifier[finperiods] [ identifier[bestperiodind] ],
literal[string] : identifier[finlsp] [ identifier[bestperiodind] ],
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[nbestinds] ,
literal[string] : identifier[nbestlspvals] ,
literal[string] : identifier[nbestperiods] ,
literal[string] : identifier[lsp] ,
literal[string] : identifier[frequencies] ,
literal[string] : identifier[periods] ,
literal[string] :[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[results] ],
literal[string] :[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[results] ],
literal[string] :[ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[results] ],
literal[string] : identifier[stepsize] ,
literal[string] : identifier[nfreq] ,
literal[string] : identifier[mintransitduration] ,
literal[string] : identifier[maxtransitduration] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[mintransitduration] ,
literal[string] : identifier[maxtransitduration] ,
literal[string] : identifier[ndurations] ,
literal[string] : identifier[blsobjective] ,
literal[string] : identifier[blsmethod] ,
literal[string] : identifier[blsoversample] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] ,
literal[string] : identifier[magsarefluxes] }
}
keyword[return] identifier[resultdict]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] { literal[string] : identifier[npnan] ,
literal[string] : identifier[npnan] ,
literal[string] : keyword[None] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[stepsize] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[mintransitduration] ,
literal[string] : identifier[maxtransitduration] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[mintransitduration] ,
literal[string] : identifier[maxtransitduration] ,
literal[string] : identifier[ndurations] ,
literal[string] : identifier[blsobjective] ,
literal[string] : identifier[blsmethod] ,
literal[string] : identifier[blsoversample] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] ,
literal[string] : identifier[magsarefluxes] }} | def bls_parallel_pfind(times, mags, errs, magsarefluxes=False, startp=0.1, endp=100.0, stepsize=0.0001, mintransitduration=0.01, maxtransitduration=0.4, ndurations=100, autofreq=True, blsobjective='likelihood', blsmethod='fast', blsoversample=5, blsmintransits=3, blsfreqfactor=10.0, nbestpeaks=5, periodepsilon=0.1, sigclip=10.0, verbose=True, nworkers=None): # by default, search from 0.1 d to...
# ... 100.0 d -- don't search full timebase
# minimum transit length in phase
# maximum transit length in phase
# figure out f0, nf, and df automatically
# 0.1
'Runs the Box Least Squares Fitting Search for transit-shaped signals.\n\n Breaks up the full frequency space into chunks and passes them to parallel\n BLS workers.\n\n Based on the version of BLS in Astropy 3.1:\n `astropy.stats.BoxLeastSquares`. If you don\'t have Astropy 3.1, this module\n will fail to import. Note that by default, this implementation of\n `bls_parallel_pfind` doesn\'t use the `.autoperiod()` function from\n `BoxLeastSquares` but uses the same auto frequency-grid generation as the\n functions in `periodbase.kbls`. If you want to use Astropy\'s implementation,\n set the value of `autofreq` kwarg to \'astropy\'. The generated period array\n will then be broken up into chunks and sent to the individual workers.\n\n NOTE: the combined BLS spectrum produced by this function is not identical\n to that produced by running BLS in one shot for the entire frequency\n space. There are differences on the order of 1.0e-3 or so in the respective\n peak values, but peaks appear at the same frequencies for both methods. This\n is likely due to different aliasing caused by smaller chunks of the\n frequency space used by the parallel workers in this function. When in\n doubt, confirm results for this parallel implementation by comparing to\n those from the serial implementation above.\n\n In particular, when you want to get reliable estimates of the SNR, transit\n depth, duration, etc. that Astropy\'s BLS gives you, rerun `bls_serial_pfind`\n with `startp`, and `endp` close to the best period you want to characterize\n the transit at. The dict returned from that function contains a `blsmodel`\n key, which is the generated model from Astropy\'s BLS. Use the\n `.compute_stats()` method to calculate the required stats.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The magnitude/flux time-series to search for transits.\n\n magsarefluxes : bool\n If the input measurement values in `mags` and `errs` are in fluxes, set\n this to True.\n\n startp,endp : float\n The minimum and maximum periods to consider for the transit search.\n\n stepsize : float\n The step-size in frequency to use when constructing a frequency grid for\n the period search.\n\n mintransitduration,maxtransitduration : float\n The minimum and maximum transitdurations (in units of phase) to consider\n for the transit search.\n\n ndurations : int\n The number of transit durations to use in the period-search.\n\n autofreq : bool or str\n If this is True, the values of `stepsize` and `nphasebins` will be\n ignored, and these, along with a frequency-grid, will be determined\n based on the following relations::\n\n nphasebins = int(ceil(2.0/mintransitduration))\n if nphasebins > 3000:\n nphasebins = 3000\n\n stepsize = 0.25*mintransitduration/(times.max()-times.min())\n\n minfreq = 1.0/endp\n maxfreq = 1.0/startp\n nfreq = int(ceil((maxfreq - minfreq)/stepsize))\n\n If this is False, you must set `startp`, `endp`, and `stepsize` as\n appropriate.\n\n If this is str == \'astropy\', will use the\n `astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the\n frequency grid instead of the kbls method.\n\n blsobjective : {\'likelihood\',\'snr\'}\n Sets the type of objective to optimize in the `BoxLeastSquares.power()`\n function.\n\n blsmethod : {\'fast\',\'slow\'}\n Sets the type of method to use in the `BoxLeastSquares.power()`\n function.\n\n blsoversample : {\'likelihood\',\'snr\'}\n Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.\n\n blsmintransits : int\n Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`\n function.\n\n blsfreqfactor : float\n Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autoperiod()`\n function.\n\n periodepsilon : float\n The fractional difference between successive values of \'best\' periods\n when sorting by periodogram power to consider them as separate periods\n (as opposed to part of the same periodogram peak). This is used to avoid\n broad peaks in the periodogram and make sure the \'best\' periods returned\n are all actually independent.\n\n nbestpeaks : int\n The number of \'best\' peaks to return from the periodogram results,\n starting from the global maximum of the periodogram peak values.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n \'asymmetric\' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n "dimming" and "brightening" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If this is True, will indicate progress and details about the frequency\n grid used for the period search.\n\n nworkers : int or None\n The number of parallel workers to launch for period-search. If None,\n nworkers = NCPUS.\n\n Returns\n -------\n\n dict\n This function returns a dict, referred to as an `lspinfo` dict in other\n astrobase functions that operate on periodogram results. This is a\n standardized format across all astrobase period-finders, and is of the\n form below::\n\n {\'bestperiod\': the best period value in the periodogram,\n \'bestlspval\': the periodogram peak associated with the best period,\n \'nbestpeaks\': the input value of nbestpeaks,\n \'nbestlspvals\': nbestpeaks-size list of best period peak values,\n \'nbestperiods\': nbestpeaks-size list of best periods,\n \'lspvals\': the full array of periodogram powers,\n \'frequencies\': the full array of frequencies considered,\n \'periods\': the full array of periods considered,\n \'durations\': the array of durations used to run BLS,\n \'blsresult\': Astropy BLS result object (BoxLeastSquaresResult),\n \'blsmodel\': Astropy BLS BoxLeastSquares object used for work,\n \'stepsize\': the actual stepsize used,\n \'nfreq\': the actual nfreq used,\n \'durations\': the durations array used,\n \'mintransitduration\': the input mintransitduration,\n \'maxtransitduration\': the input maxtransitdurations,\n \'method\':\'bls\' -> the name of the period-finder method,\n \'kwargs\':{ dict of all of the input kwargs for record-keeping}}\n\n '
# get rid of nans first and sigclip
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and (len(serrs) > 9):
# if we're setting up everything automatically
if isinstance(autofreq, bool) and autofreq:
# use heuristic to figure out best timestep
stepsize = 0.25 * mintransitduration / (stimes.max() - stimes.min())
# now figure out the frequencies to use
minfreq = 1.0 / endp
maxfreq = 1.0 / startp
nfreq = int(npceil((maxfreq - minfreq) / stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, minfreq: %s, maxfreq: %s' % (startp, endp, nfreq, minfreq, maxfreq))
LOGINFO('autofreq = True: using AUTOMATIC values for freq stepsize: %s, ndurations: %s, min transit duration: %s, max transit duration: %s' % (stepsize, ndurations, mintransitduration, maxtransitduration)) # depends on [control=['if'], data=[]]
use_autoperiod = False # depends on [control=['if'], data=[]]
elif isinstance(autofreq, bool) and (not autofreq):
minfreq = 1.0 / endp
maxfreq = 1.0 / startp
nfreq = int(npceil((maxfreq - minfreq) / stepsize))
# say what we're using
if verbose:
LOGINFO('min P: %s, max P: %s, nfreq: %s, minfreq: %s, maxfreq: %s' % (startp, endp, nfreq, minfreq, maxfreq))
LOGINFO('autofreq = False: using PROVIDED values for freq stepsize: %s, ndurations: %s, min transit duration: %s, max transit duration: %s' % (stepsize, ndurations, mintransitduration, maxtransitduration)) # depends on [control=['if'], data=[]]
use_autoperiod = False # depends on [control=['if'], data=[]]
elif isinstance(autofreq, str) and autofreq == 'astropy':
use_autoperiod = True
minfreq = 1.0 / endp
maxfreq = 1.0 / startp # depends on [control=['if'], data=[]]
else:
LOGERROR("unknown autofreq kwarg encountered. can't continue...")
return None
# check the minimum frequency
if minfreq < 1.0 / (stimes.max() - stimes.min()):
minfreq = 2.0 / (stimes.max() - stimes.min())
if verbose:
LOGWARNING('the requested max P = %.3f is larger than the time base of the observations = %.3f, will make minfreq = 2 x 1/timebase' % (endp, stimes.max() - stimes.min()))
LOGINFO('new minfreq: %s, maxfreq: %s' % (minfreq, maxfreq)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['minfreq']]
#############################
## NOW RUN BLS IN PARALLEL ##
#############################
# fix number of CPUs if needed
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# check if autoperiod is True and get the correct period-grid
if use_autoperiod:
# astropy's BLS requires durations in units of time
durations = nplinspace(mintransitduration * startp, maxtransitduration * startp, ndurations)
# set up the correct units for the BLS model
if magsarefluxes:
blsmodel = BoxLeastSquares(stimes * u.day, smags * u.dimensionless_unscaled, dy=serrs * u.dimensionless_unscaled) # depends on [control=['if'], data=[]]
else:
blsmodel = BoxLeastSquares(stimes * u.day, smags * u.mag, dy=serrs * u.mag)
periods = nparray(blsmodel.autoperiod(durations * u.day, minimum_period=startp, maximum_period=endp, minimum_n_transit=blsmintransits, frequency_factor=blsfreqfactor))
frequencies = 1.0 / periods
nfreq = frequencies.size
if verbose:
LOGINFO("autofreq = 'astropy', used .autoperiod() with minimum_n_transit = %s, freq_factor = %s to generate the frequency grid" % (blsmintransits, blsfreqfactor))
LOGINFO('stepsize = %s, nfreq = %s, minfreq = %.5f, maxfreq = %.5f, ndurations = %s' % (abs(frequencies[1] - frequencies[0]), nfreq, 1.0 / periods.max(), 1.0 / periods.min(), durations.size)) # depends on [control=['if'], data=[]]
del blsmodel
del durations # depends on [control=['if'], data=[]]
else:
# otherwise, use kbls method
frequencies = minfreq + nparange(nfreq) * stepsize
# break up the tasks into chunks
csrem = int(fmod(nfreq, nworkers))
csint = int(float(nfreq / nworkers))
(chunk_minfreqs, chunk_nfreqs) = ([], [])
for x in range(nworkers):
this_minfreqs = frequencies[x * csint]
# handle usual nfreqs
if x < nworkers - 1:
this_nfreqs = frequencies[x * csint:x * csint + csint].size # depends on [control=['if'], data=['x']]
else:
this_nfreqs = frequencies[x * csint:x * csint + csint + csrem].size
chunk_minfreqs.append(this_minfreqs)
chunk_nfreqs.append(this_nfreqs) # depends on [control=['for'], data=['x']]
# populate the tasks list
#
# task[0] = times
# task[1] = mags
# task[2] = errs
# task[3] = magsarefluxes
# task[4] = minfreq
# task[5] = nfreq
# task[6] = stepsize
# task[7] = nphasebins
# task[8] = mintransitduration
# task[9] = maxtransitduration
# task[10] = blsobjective
# task[11] = blsmethod
# task[12] = blsoversample
# populate the tasks list
tasks = [(stimes, smags, serrs, magsarefluxes, chunk_minf, chunk_nf, stepsize, ndurations, mintransitduration, maxtransitduration, blsobjective, blsmethod, blsoversample) for (chunk_minf, chunk_nf) in zip(chunk_minfreqs, chunk_nfreqs)]
if verbose:
for (ind, task) in enumerate(tasks):
LOGINFO('worker %s: minfreq = %.6f, nfreqs = %s' % (ind + 1, task[4], task[5])) # depends on [control=['for'], data=[]]
LOGINFO('running...') # depends on [control=['if'], data=[]]
# return tasks
# start the pool
pool = Pool(nworkers)
results = pool.map(_parallel_bls_worker, tasks)
pool.close()
pool.join()
del pool
# now concatenate the output lsp arrays
lsp = npconcatenate([x['power'] for x in results])
periods = 1.0 / frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array
# by highest value first 2. go down the values until we find
# five values that are separated by at least periodepsilon in
# period
# make sure to get only the finite peaks in the periodogram
# this is needed because BLS may produce infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp) # depends on [control=['try'], data=[]]
except ValueError:
LOGERROR('no finite periodogram values for this mag series, skipping...')
return {'bestperiod': npnan, 'bestlspval': npnan, 'nbestpeaks': nbestpeaks, 'nbestinds': None, 'nbestlspvals': None, 'nbestperiods': None, 'lspvals': None, 'periods': None, 'durations': None, 'method': 'bls', 'blsresult': None, 'blsmodel': None, 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'mintransitduration': mintransitduration, 'maxtransitduration': maxtransitduration, 'ndurations': ndurations, 'blsobjective': blsobjective, 'blsmethod': blsmethod, 'blsoversample': blsoversample, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip, 'magsarefluxes': magsarefluxes}} # depends on [control=['except'], data=[]]
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
(nbestperiods, nbestlspvals, nbestinds, peakcount) = ([finperiods[bestperiodind]], [finlsp[bestperiodind]], [bestperiodind], 1)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for (period, lspval, ind) in zip(sortedlspperiods, sortedlspvals, sortedlspind):
if peakcount == nbestpeaks:
break # depends on [control=['if'], data=[]]
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if perioddiff > periodepsilon * prevperiod and all((x > periodepsilon * period for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1 # depends on [control=['if'], data=[]]
prevperiod = period # depends on [control=['for'], data=[]]
# generate the return dict
resultdict = {'bestperiod': finperiods[bestperiodind], 'bestlspval': finlsp[bestperiodind], 'nbestpeaks': nbestpeaks, 'nbestinds': nbestinds, 'nbestlspvals': nbestlspvals, 'nbestperiods': nbestperiods, 'lspvals': lsp, 'frequencies': frequencies, 'periods': periods, 'durations': [x['durations'] for x in results], 'blsresult': [x['blsresult'] for x in results], 'blsmodel': [x['blsmodel'] for x in results], 'stepsize': stepsize, 'nfreq': nfreq, 'mintransitduration': mintransitduration, 'maxtransitduration': maxtransitduration, 'method': 'bls', 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'mintransitduration': mintransitduration, 'maxtransitduration': maxtransitduration, 'ndurations': ndurations, 'blsobjective': blsobjective, 'blsmethod': blsmethod, 'blsoversample': blsoversample, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip, 'magsarefluxes': magsarefluxes}}
return resultdict # depends on [control=['if'], data=[]]
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod': npnan, 'bestlspval': npnan, 'nbestinds': None, 'nbestpeaks': nbestpeaks, 'nbestlspvals': None, 'nbestperiods': None, 'lspvals': None, 'periods': None, 'durations': None, 'blsresult': None, 'blsmodel': None, 'stepsize': stepsize, 'nfreq': None, 'nphasebins': None, 'mintransitduration': mintransitduration, 'maxtransitduration': maxtransitduration, 'method': 'bls', 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'mintransitduration': mintransitduration, 'maxtransitduration': maxtransitduration, 'ndurations': ndurations, 'blsobjective': blsobjective, 'blsmethod': blsmethod, 'blsoversample': blsoversample, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip, 'magsarefluxes': magsarefluxes}} |
def get_port_channel_detail_output_lacp_aggregator_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
aggregator_type = ET.SubElement(lacp, "aggregator-type")
aggregator_type.text = kwargs.pop('aggregator_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_port_channel_detail_output_lacp_aggregator_type, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_port_channel_detail] assign[=] call[name[ET].Element, parameter[constant[get_port_channel_detail]]]
variable[config] assign[=] name[get_port_channel_detail]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_port_channel_detail], constant[output]]]
variable[lacp] assign[=] call[name[ET].SubElement, parameter[name[output], constant[lacp]]]
variable[aggregator_type] assign[=] call[name[ET].SubElement, parameter[name[lacp], constant[aggregator-type]]]
name[aggregator_type].text assign[=] call[name[kwargs].pop, parameter[constant[aggregator_type]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_port_channel_detail_output_lacp_aggregator_type] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_port_channel_detail] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_port_channel_detail]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_port_channel_detail] , literal[string] )
identifier[lacp] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[aggregator_type] = identifier[ET] . identifier[SubElement] ( identifier[lacp] , literal[string] )
identifier[aggregator_type] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_port_channel_detail_output_lacp_aggregator_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_port_channel_detail = ET.Element('get_port_channel_detail')
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, 'output')
lacp = ET.SubElement(output, 'lacp')
aggregator_type = ET.SubElement(lacp, 'aggregator-type')
aggregator_type.text = kwargs.pop('aggregator_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def voronoi(df, projection=None, edgecolor='black',
clip=None,
hue=None, scheme=None, k=5, cmap='viridis', categorical=False, vmin=None, vmax=None,
legend=False, legend_kwargs=None, legend_labels=None,
extent=None, figsize=(8, 6), ax=None,
**kwargs):
"""
Geospatial Voronoi diagram.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str, optional
Applies a colormap to the output points.
categorical : boolean, optional
Set to ``True`` if ``hue`` references a categorical variable, and ``False`` (the default) otherwise. Ignored
if ``hue`` is left unspecified.
scheme : None or {"quantiles"|"equal_interval"|"fisher_jenks"}, optional
Controls how the colormap bin edges are determined. Ignored if ``hue`` is left unspecified.
k : int or None, optional
Ignored if ``hue`` is left unspecified. Otherwise, if ``categorical`` is False, controls how many colors to
use (5 is the default). If set to ``None``, a continuous colormap will be used.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
Ignored if ``hue`` is left unspecified.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum. Ignored
if ``hue`` is left unspecified.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum. Ignored
if ``hue`` is left unspecified.
legend : boolean, optional
Whether or not to include a legend. Ignored if neither a ``hue`` nor a ``scale`` is specified.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Line2D objects
<http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_.
Returns
-------
AxesSubplot or GeoAxesSubplot instance
The axis object with the plot on it.
Examples
--------
The neighborhood closest to a point in space is known as its `Voronoi region
<https://en.wikipedia.org/wiki/Voronoi_diagram>`_. Every point in a dataset has a Voronoi region, which may be
either a closed polygon (for inliers) or open infinite region (for points on the edge of the distribution). A
Voronoi diagram works by dividing a space filled with points into such regions and plotting the result. Voronoi
plots allow efficient assessmelt of the *density* of points in different spaces, and when combined with a
colormap can be quite informative of overall trends in the dataset.
The ``geoplot`` ``voronoi`` is a spatially aware application of this technique. It compares well with the more
well-known ``choropleth``, which has the advantage of using meaningful regions, but the disadvantage of having
defined those regions beforehand. ``voronoi`` has fewer requirements and may perform better when the number of
observations is small. Compare also with the quadtree technique available in ``aggplot``.
A basic ``voronoi`` specified data and, optionally, a projection. We overlay geometry to aid interpretability.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000))
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-simple.png
``hue`` parameterizes the color, and ``cmap`` controls the colormap.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds')
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-cmap.png
Add a ``clip`` of iterable geometries to trim the ``voronoi`` against local geography.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-clip.png
``legend`` adds a a ``matplotlib`` `Legend
<http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_. This can be tuned even further using the
``legend_kwargs`` argument. Other keyword parameters are passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-kwargs.png
Change the number of bins by specifying an alternative ``k`` value. To use a continuous colormap, explicitly
specify ``k=None``. You can change the binning sceme with ``scheme``. The default is ``quantile``, which bins
observations into classes of different sizes but the same numbers of observations. ``equal_interval`` will
creates bins that are the same size, but potentially containing different numbers of observations. The more
complicated ``fisher_jenks`` scheme is an intermediate between the two.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000),
hue='NUMBER OF PERSONS INJURED', cmap='Reds', k=5, scheme='fisher_jenks',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-scheme.png
If your variable of interest is already `categorical
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_, specify ``categorical=True`` to
use the labels in your dataset directly.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
edgecolor='white', clip=boroughs.geometry,
linewidth=0.5, categorical=True
)
gplt.polyplot(boroughs, linewidth=1, ax=ax)
.. image:: ../figures/voronoi/voronoi-multiparty.png
"""
# Initialize the figure.
fig = _init_figure(ax, figsize)
if projection:
# Properly set up the projection.
projection = projection.load(df, {
'central_longitude': lambda df: np.mean(np.array([p.x for p in df.geometry.centroid])),
'central_latitude': lambda df: np.mean(np.array([p.y for p in df.geometry.centroid]))
})
# Set up the axis.
if not ax:
ax = plt.subplot(111, projection=projection)
else:
if not ax:
ax = plt.gca()
# Clean up patches.
_lay_out_axes(ax, projection)
# Immediately return if input geometry is empty.
if len(df.geometry) == 0:
return ax
# Set extent.
xs, ys = [p.x for p in df.geometry.centroid], [p.y for p in df.geometry.centroid]
extrema = np.min(xs), np.max(xs), np.min(ys), np.max(ys)
_set_extent(ax, projection, extent, extrema)
# Validate hue input.
hue = _validate_hue(df, hue)
# Generate the coloring information, if needed. Follows one of two schemes, categorical or continuous,
# based on whether or not ``k`` is specified (``hue`` must be specified for either to work).
if k is not None:
# Categorical colormap code path.
categorical, k, scheme = _validate_buckets(categorical, k, scheme)
if hue is not None:
cmap, categories, hue_values = _discrete_colorize(categorical, hue, scheme, k, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values]
else:
colors = ['None']*len(df)
elif k is None and hue is not None:
# Continuous colormap code path.
hue_values = hue
cmap = _continuous_colormap(hue_values, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values]
elif 'facecolor' in kwargs:
colors = [kwargs.pop('facecolor')]*len(df)
else:
colors = ['None']*len(df)
# Finally we draw the features.
geoms = _build_voronoi_polygons(df)
if projection:
for color, geom in zip(colors, geoms):
features = ShapelyFeature([geom], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, edgecolor=edgecolor, **kwargs)
if clip is not None:
clip_geom = _get_clip(ax.get_extent(crs=ccrs.PlateCarree()), clip)
feature = ShapelyFeature([clip_geom], ccrs.PlateCarree())
ax.add_feature(feature, facecolor=(1,1,1), linewidth=0, zorder=100)
else:
for color, geom in zip(colors, geoms):
feature = descartes.PolygonPatch(geom, facecolor=color, edgecolor=edgecolor, **kwargs)
ax.add_patch(feature)
if clip is not None:
clip_geom = _get_clip(ax.get_xlim() + ax.get_ylim(), clip)
ax = polyplot(gpd.GeoSeries(clip_geom), facecolor='white', linewidth=0, zorder=100,
extent=ax.get_xlim() + ax.get_ylim(), ax=ax)
# Add a legend, if appropriate.
if legend and k is not None:
_paint_hue_legend(ax, categories, cmap, legend_labels, legend_kwargs, figure=True)
elif legend and k is None and hue is not None:
_paint_colorbar_legend(ax, hue_values, cmap, legend_kwargs)
return ax | def function[voronoi, parameter[df, projection, edgecolor, clip, hue, scheme, k, cmap, categorical, vmin, vmax, legend, legend_kwargs, legend_labels, extent, figsize, ax]]:
constant[
Geospatial Voronoi diagram.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str, optional
Applies a colormap to the output points.
categorical : boolean, optional
Set to ``True`` if ``hue`` references a categorical variable, and ``False`` (the default) otherwise. Ignored
if ``hue`` is left unspecified.
scheme : None or {"quantiles"|"equal_interval"|"fisher_jenks"}, optional
Controls how the colormap bin edges are determined. Ignored if ``hue`` is left unspecified.
k : int or None, optional
Ignored if ``hue`` is left unspecified. Otherwise, if ``categorical`` is False, controls how many colors to
use (5 is the default). If set to ``None``, a continuous colormap will be used.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
Ignored if ``hue`` is left unspecified.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum. Ignored
if ``hue`` is left unspecified.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum. Ignored
if ``hue`` is left unspecified.
legend : boolean, optional
Whether or not to include a legend. Ignored if neither a ``hue`` nor a ``scale`` is specified.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Line2D objects
<http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_.
Returns
-------
AxesSubplot or GeoAxesSubplot instance
The axis object with the plot on it.
Examples
--------
The neighborhood closest to a point in space is known as its `Voronoi region
<https://en.wikipedia.org/wiki/Voronoi_diagram>`_. Every point in a dataset has a Voronoi region, which may be
either a closed polygon (for inliers) or open infinite region (for points on the edge of the distribution). A
Voronoi diagram works by dividing a space filled with points into such regions and plotting the result. Voronoi
plots allow efficient assessmelt of the *density* of points in different spaces, and when combined with a
colormap can be quite informative of overall trends in the dataset.
The ``geoplot`` ``voronoi`` is a spatially aware application of this technique. It compares well with the more
well-known ``choropleth``, which has the advantage of using meaningful regions, but the disadvantage of having
defined those regions beforehand. ``voronoi`` has fewer requirements and may perform better when the number of
observations is small. Compare also with the quadtree technique available in ``aggplot``.
A basic ``voronoi`` specified data and, optionally, a projection. We overlay geometry to aid interpretability.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000))
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-simple.png
``hue`` parameterizes the color, and ``cmap`` controls the colormap.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds')
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-cmap.png
Add a ``clip`` of iterable geometries to trim the ``voronoi`` against local geography.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-clip.png
``legend`` adds a a ``matplotlib`` `Legend
<http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_. This can be tuned even further using the
``legend_kwargs`` argument. Other keyword parameters are passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-kwargs.png
Change the number of bins by specifying an alternative ``k`` value. To use a continuous colormap, explicitly
specify ``k=None``. You can change the binning sceme with ``scheme``. The default is ``quantile``, which bins
observations into classes of different sizes but the same numbers of observations. ``equal_interval`` will
creates bins that are the same size, but potentially containing different numbers of observations. The more
complicated ``fisher_jenks`` scheme is an intermediate between the two.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000),
hue='NUMBER OF PERSONS INJURED', cmap='Reds', k=5, scheme='fisher_jenks',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-scheme.png
If your variable of interest is already `categorical
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_, specify ``categorical=True`` to
use the labels in your dataset directly.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
edgecolor='white', clip=boroughs.geometry,
linewidth=0.5, categorical=True
)
gplt.polyplot(boroughs, linewidth=1, ax=ax)
.. image:: ../figures/voronoi/voronoi-multiparty.png
]
variable[fig] assign[=] call[name[_init_figure], parameter[name[ax], name[figsize]]]
if name[projection] begin[:]
variable[projection] assign[=] call[name[projection].load, parameter[name[df], dictionary[[<ast.Constant object at 0x7da2044c03d0>, <ast.Constant object at 0x7da2044c13c0>], [<ast.Lambda object at 0x7da2044c2ec0>, <ast.Lambda object at 0x7da20cabff70>]]]]
if <ast.UnaryOp object at 0x7da20cabcd60> begin[:]
variable[ax] assign[=] call[name[plt].subplot, parameter[constant[111]]]
call[name[_lay_out_axes], parameter[name[ax], name[projection]]]
if compare[call[name[len], parameter[name[df].geometry]] equal[==] constant[0]] begin[:]
return[name[ax]]
<ast.Tuple object at 0x7da20cabce20> assign[=] tuple[[<ast.ListComp object at 0x7da20cabf160>, <ast.ListComp object at 0x7da20cabc4f0>]]
variable[extrema] assign[=] tuple[[<ast.Call object at 0x7da207f99900>, <ast.Call object at 0x7da207f9aa10>, <ast.Call object at 0x7da207f99ed0>, <ast.Call object at 0x7da207f9b700>]]
call[name[_set_extent], parameter[name[ax], name[projection], name[extent], name[extrema]]]
variable[hue] assign[=] call[name[_validate_hue], parameter[name[df], name[hue]]]
if compare[name[k] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da207f99600> assign[=] call[name[_validate_buckets], parameter[name[categorical], name[k], name[scheme]]]
if compare[name[hue] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da207f98310> assign[=] call[name[_discrete_colorize], parameter[name[categorical], name[hue], name[scheme], name[k], name[cmap], name[vmin], name[vmax]]]
variable[colors] assign[=] <ast.ListComp object at 0x7da207f99960>
variable[geoms] assign[=] call[name[_build_voronoi_polygons], parameter[name[df]]]
if name[projection] begin[:]
for taget[tuple[[<ast.Name object at 0x7da207f98640>, <ast.Name object at 0x7da207f997e0>]]] in starred[call[name[zip], parameter[name[colors], name[geoms]]]] begin[:]
variable[features] assign[=] call[name[ShapelyFeature], parameter[list[[<ast.Name object at 0x7da207f99390>]], call[name[ccrs].PlateCarree, parameter[]]]]
call[name[ax].add_feature, parameter[name[features]]]
if compare[name[clip] is_not constant[None]] begin[:]
variable[clip_geom] assign[=] call[name[_get_clip], parameter[call[name[ax].get_extent, parameter[]], name[clip]]]
variable[feature] assign[=] call[name[ShapelyFeature], parameter[list[[<ast.Name object at 0x7da20c795e10>]], call[name[ccrs].PlateCarree, parameter[]]]]
call[name[ax].add_feature, parameter[name[feature]]]
if <ast.BoolOp object at 0x7da20c796a40> begin[:]
call[name[_paint_hue_legend], parameter[name[ax], name[categories], name[cmap], name[legend_labels], name[legend_kwargs]]]
return[name[ax]] | keyword[def] identifier[voronoi] ( identifier[df] , identifier[projection] = keyword[None] , identifier[edgecolor] = literal[string] ,
identifier[clip] = keyword[None] ,
identifier[hue] = keyword[None] , identifier[scheme] = keyword[None] , identifier[k] = literal[int] , identifier[cmap] = literal[string] , identifier[categorical] = keyword[False] , identifier[vmin] = keyword[None] , identifier[vmax] = keyword[None] ,
identifier[legend] = keyword[False] , identifier[legend_kwargs] = keyword[None] , identifier[legend_labels] = keyword[None] ,
identifier[extent] = keyword[None] , identifier[figsize] =( literal[int] , literal[int] ), identifier[ax] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[fig] = identifier[_init_figure] ( identifier[ax] , identifier[figsize] )
keyword[if] identifier[projection] :
identifier[projection] = identifier[projection] . identifier[load] ( identifier[df] ,{
literal[string] : keyword[lambda] identifier[df] : identifier[np] . identifier[mean] ( identifier[np] . identifier[array] ([ identifier[p] . identifier[x] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ])),
literal[string] : keyword[lambda] identifier[df] : identifier[np] . identifier[mean] ( identifier[np] . identifier[array] ([ identifier[p] . identifier[y] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ]))
})
keyword[if] keyword[not] identifier[ax] :
identifier[ax] = identifier[plt] . identifier[subplot] ( literal[int] , identifier[projection] = identifier[projection] )
keyword[else] :
keyword[if] keyword[not] identifier[ax] :
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[_lay_out_axes] ( identifier[ax] , identifier[projection] )
keyword[if] identifier[len] ( identifier[df] . identifier[geometry] )== literal[int] :
keyword[return] identifier[ax]
identifier[xs] , identifier[ys] =[ identifier[p] . identifier[x] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ],[ identifier[p] . identifier[y] keyword[for] identifier[p] keyword[in] identifier[df] . identifier[geometry] . identifier[centroid] ]
identifier[extrema] = identifier[np] . identifier[min] ( identifier[xs] ), identifier[np] . identifier[max] ( identifier[xs] ), identifier[np] . identifier[min] ( identifier[ys] ), identifier[np] . identifier[max] ( identifier[ys] )
identifier[_set_extent] ( identifier[ax] , identifier[projection] , identifier[extent] , identifier[extrema] )
identifier[hue] = identifier[_validate_hue] ( identifier[df] , identifier[hue] )
keyword[if] identifier[k] keyword[is] keyword[not] keyword[None] :
identifier[categorical] , identifier[k] , identifier[scheme] = identifier[_validate_buckets] ( identifier[categorical] , identifier[k] , identifier[scheme] )
keyword[if] identifier[hue] keyword[is] keyword[not] keyword[None] :
identifier[cmap] , identifier[categories] , identifier[hue_values] = identifier[_discrete_colorize] ( identifier[categorical] , identifier[hue] , identifier[scheme] , identifier[k] , identifier[cmap] , identifier[vmin] , identifier[vmax] )
identifier[colors] =[ identifier[cmap] . identifier[to_rgba] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[hue_values] ]
keyword[else] :
identifier[colors] =[ literal[string] ]* identifier[len] ( identifier[df] )
keyword[elif] identifier[k] keyword[is] keyword[None] keyword[and] identifier[hue] keyword[is] keyword[not] keyword[None] :
identifier[hue_values] = identifier[hue]
identifier[cmap] = identifier[_continuous_colormap] ( identifier[hue_values] , identifier[cmap] , identifier[vmin] , identifier[vmax] )
identifier[colors] =[ identifier[cmap] . identifier[to_rgba] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[hue_values] ]
keyword[elif] literal[string] keyword[in] identifier[kwargs] :
identifier[colors] =[ identifier[kwargs] . identifier[pop] ( literal[string] )]* identifier[len] ( identifier[df] )
keyword[else] :
identifier[colors] =[ literal[string] ]* identifier[len] ( identifier[df] )
identifier[geoms] = identifier[_build_voronoi_polygons] ( identifier[df] )
keyword[if] identifier[projection] :
keyword[for] identifier[color] , identifier[geom] keyword[in] identifier[zip] ( identifier[colors] , identifier[geoms] ):
identifier[features] = identifier[ShapelyFeature] ([ identifier[geom] ], identifier[ccrs] . identifier[PlateCarree] ())
identifier[ax] . identifier[add_feature] ( identifier[features] , identifier[facecolor] = identifier[color] , identifier[edgecolor] = identifier[edgecolor] ,** identifier[kwargs] )
keyword[if] identifier[clip] keyword[is] keyword[not] keyword[None] :
identifier[clip_geom] = identifier[_get_clip] ( identifier[ax] . identifier[get_extent] ( identifier[crs] = identifier[ccrs] . identifier[PlateCarree] ()), identifier[clip] )
identifier[feature] = identifier[ShapelyFeature] ([ identifier[clip_geom] ], identifier[ccrs] . identifier[PlateCarree] ())
identifier[ax] . identifier[add_feature] ( identifier[feature] , identifier[facecolor] =( literal[int] , literal[int] , literal[int] ), identifier[linewidth] = literal[int] , identifier[zorder] = literal[int] )
keyword[else] :
keyword[for] identifier[color] , identifier[geom] keyword[in] identifier[zip] ( identifier[colors] , identifier[geoms] ):
identifier[feature] = identifier[descartes] . identifier[PolygonPatch] ( identifier[geom] , identifier[facecolor] = identifier[color] , identifier[edgecolor] = identifier[edgecolor] ,** identifier[kwargs] )
identifier[ax] . identifier[add_patch] ( identifier[feature] )
keyword[if] identifier[clip] keyword[is] keyword[not] keyword[None] :
identifier[clip_geom] = identifier[_get_clip] ( identifier[ax] . identifier[get_xlim] ()+ identifier[ax] . identifier[get_ylim] (), identifier[clip] )
identifier[ax] = identifier[polyplot] ( identifier[gpd] . identifier[GeoSeries] ( identifier[clip_geom] ), identifier[facecolor] = literal[string] , identifier[linewidth] = literal[int] , identifier[zorder] = literal[int] ,
identifier[extent] = identifier[ax] . identifier[get_xlim] ()+ identifier[ax] . identifier[get_ylim] (), identifier[ax] = identifier[ax] )
keyword[if] identifier[legend] keyword[and] identifier[k] keyword[is] keyword[not] keyword[None] :
identifier[_paint_hue_legend] ( identifier[ax] , identifier[categories] , identifier[cmap] , identifier[legend_labels] , identifier[legend_kwargs] , identifier[figure] = keyword[True] )
keyword[elif] identifier[legend] keyword[and] identifier[k] keyword[is] keyword[None] keyword[and] identifier[hue] keyword[is] keyword[not] keyword[None] :
identifier[_paint_colorbar_legend] ( identifier[ax] , identifier[hue_values] , identifier[cmap] , identifier[legend_kwargs] )
keyword[return] identifier[ax] | def voronoi(df, projection=None, edgecolor='black', clip=None, hue=None, scheme=None, k=5, cmap='viridis', categorical=False, vmin=None, vmax=None, legend=False, legend_kwargs=None, legend_labels=None, extent=None, figsize=(8, 6), ax=None, **kwargs):
"""
Geospatial Voronoi diagram.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
A geographic projection. For more information refer to `the tutorial page on projections
<https://nbviewer.jupyter.org/github/ResidentMario/geoplot/blob/master/notebooks/tutorials/Projections.ipynb>`_.
hue : None, Series, GeoSeries, iterable, or str, optional
Applies a colormap to the output points.
categorical : boolean, optional
Set to ``True`` if ``hue`` references a categorical variable, and ``False`` (the default) otherwise. Ignored
if ``hue`` is left unspecified.
scheme : None or {"quantiles"|"equal_interval"|"fisher_jenks"}, optional
Controls how the colormap bin edges are determined. Ignored if ``hue`` is left unspecified.
k : int or None, optional
Ignored if ``hue`` is left unspecified. Otherwise, if ``categorical`` is False, controls how many colors to
use (5 is the default). If set to ``None``, a continuous colormap will be used.
cmap : matplotlib color, optional
The `matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to be used.
Ignored if ``hue`` is left unspecified.
vmin : float, optional
Values below this level will be colored the same threshold value. Defaults to the dataset minimum. Ignored
if ``hue`` is left unspecified.
vmax : float, optional
Values above this level will be colored the same threshold value. Defaults to the dataset maximum. Ignored
if ``hue`` is left unspecified.
legend : boolean, optional
Whether or not to include a legend. Ignored if neither a ``hue`` nor a ``scale`` is specified.
legend_values : list, optional
The values to use in the legend. Defaults to equal intervals. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_labels : list, optional
The names to use in the legend. Defaults to the variable values. For more information see `the Gallery demo
<https://residentmario.github.io/geoplot/examples/largest-cities-usa.html>`_.
legend_kwargs : dict, optional
Keyword arguments to be passed to `the underlying legend <http://matplotlib.org/users/legend_guide.html>`_.
extent : None or (minx, maxx, miny, maxy), optional
Used to control plot x-axis and y-axis limits manually.
figsize : tuple, optional
An (x, y) tuple passed to ``matplotlib.figure`` which sets the size, in inches, of the resultant plot.
ax : AxesSubplot or GeoAxesSubplot instance, optional
A ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot`` instance. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Line2D objects
<http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_.
Returns
-------
AxesSubplot or GeoAxesSubplot instance
The axis object with the plot on it.
Examples
--------
The neighborhood closest to a point in space is known as its `Voronoi region
<https://en.wikipedia.org/wiki/Voronoi_diagram>`_. Every point in a dataset has a Voronoi region, which may be
either a closed polygon (for inliers) or open infinite region (for points on the edge of the distribution). A
Voronoi diagram works by dividing a space filled with points into such regions and plotting the result. Voronoi
plots allow efficient assessmelt of the *density* of points in different spaces, and when combined with a
colormap can be quite informative of overall trends in the dataset.
The ``geoplot`` ``voronoi`` is a spatially aware application of this technique. It compares well with the more
well-known ``choropleth``, which has the advantage of using meaningful regions, but the disadvantage of having
defined those regions beforehand. ``voronoi`` has fewer requirements and may perform better when the number of
observations is small. Compare also with the quadtree technique available in ``aggplot``.
A basic ``voronoi`` specified data and, optionally, a projection. We overlay geometry to aid interpretability.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000))
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-simple.png
``hue`` parameterizes the color, and ``cmap`` controls the colormap.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds')
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-cmap.png
Add a ``clip`` of iterable geometries to trim the ``voronoi`` against local geography.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-clip.png
``legend`` adds a a ``matplotlib`` `Legend
<http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_. This can be tuned even further using the
``legend_kwargs`` argument. Other keyword parameters are passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-kwargs.png
Change the number of bins by specifying an alternative ``k`` value. To use a continuous colormap, explicitly
specify ``k=None``. You can change the binning sceme with ``scheme``. The default is ``quantile``, which bins
observations into classes of different sizes but the same numbers of observations. ``equal_interval`` will
creates bins that are the same size, but potentially containing different numbers of observations. The more
complicated ``fisher_jenks`` scheme is an intermediate between the two.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000),
hue='NUMBER OF PERSONS INJURED', cmap='Reds', k=5, scheme='fisher_jenks',
clip=boroughs.geometry,
legend=True, legend_kwargs={'loc': 'upper left'},
linewidth=0.5, edgecolor='white',
)
gplt.polyplot(boroughs, ax=ax)
.. image:: ../figures/voronoi/voronoi-scheme.png
If your variable of interest is already `categorical
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_, specify ``categorical=True`` to
use the labels in your dataset directly.
.. code-block:: python
ax = gplt.voronoi(injurious_collisions.head(1000), hue='NUMBER OF PERSONS INJURED', cmap='Reds',
edgecolor='white', clip=boroughs.geometry,
linewidth=0.5, categorical=True
)
gplt.polyplot(boroughs, linewidth=1, ax=ax)
.. image:: ../figures/voronoi/voronoi-multiparty.png
"""
# Initialize the figure.
fig = _init_figure(ax, figsize)
if projection:
# Properly set up the projection.
projection = projection.load(df, {'central_longitude': lambda df: np.mean(np.array([p.x for p in df.geometry.centroid])), 'central_latitude': lambda df: np.mean(np.array([p.y for p in df.geometry.centroid]))})
# Set up the axis.
if not ax:
ax = plt.subplot(111, projection=projection) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not ax:
ax = plt.gca() # depends on [control=['if'], data=[]]
# Clean up patches.
_lay_out_axes(ax, projection)
# Immediately return if input geometry is empty.
if len(df.geometry) == 0:
return ax # depends on [control=['if'], data=[]]
# Set extent.
(xs, ys) = ([p.x for p in df.geometry.centroid], [p.y for p in df.geometry.centroid])
extrema = (np.min(xs), np.max(xs), np.min(ys), np.max(ys))
_set_extent(ax, projection, extent, extrema)
# Validate hue input.
hue = _validate_hue(df, hue)
# Generate the coloring information, if needed. Follows one of two schemes, categorical or continuous,
# based on whether or not ``k`` is specified (``hue`` must be specified for either to work).
if k is not None:
# Categorical colormap code path.
(categorical, k, scheme) = _validate_buckets(categorical, k, scheme)
if hue is not None:
(cmap, categories, hue_values) = _discrete_colorize(categorical, hue, scheme, k, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values] # depends on [control=['if'], data=['hue']]
else:
colors = ['None'] * len(df) # depends on [control=['if'], data=['k']]
elif k is None and hue is not None:
# Continuous colormap code path.
hue_values = hue
cmap = _continuous_colormap(hue_values, cmap, vmin, vmax)
colors = [cmap.to_rgba(v) for v in hue_values] # depends on [control=['if'], data=[]]
elif 'facecolor' in kwargs:
colors = [kwargs.pop('facecolor')] * len(df) # depends on [control=['if'], data=['kwargs']]
else:
colors = ['None'] * len(df)
# Finally we draw the features.
geoms = _build_voronoi_polygons(df)
if projection:
for (color, geom) in zip(colors, geoms):
features = ShapelyFeature([geom], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, edgecolor=edgecolor, **kwargs) # depends on [control=['for'], data=[]]
if clip is not None:
clip_geom = _get_clip(ax.get_extent(crs=ccrs.PlateCarree()), clip)
feature = ShapelyFeature([clip_geom], ccrs.PlateCarree())
ax.add_feature(feature, facecolor=(1, 1, 1), linewidth=0, zorder=100) # depends on [control=['if'], data=['clip']] # depends on [control=['if'], data=[]]
else:
for (color, geom) in zip(colors, geoms):
feature = descartes.PolygonPatch(geom, facecolor=color, edgecolor=edgecolor, **kwargs)
ax.add_patch(feature) # depends on [control=['for'], data=[]]
if clip is not None:
clip_geom = _get_clip(ax.get_xlim() + ax.get_ylim(), clip)
ax = polyplot(gpd.GeoSeries(clip_geom), facecolor='white', linewidth=0, zorder=100, extent=ax.get_xlim() + ax.get_ylim(), ax=ax) # depends on [control=['if'], data=['clip']]
# Add a legend, if appropriate.
if legend and k is not None:
_paint_hue_legend(ax, categories, cmap, legend_labels, legend_kwargs, figure=True) # depends on [control=['if'], data=[]]
elif legend and k is None and (hue is not None):
_paint_colorbar_legend(ax, hue_values, cmap, legend_kwargs) # depends on [control=['if'], data=[]]
return ax |
def nl_groups(self, value):
"""Group setter."""
self.bytearray[self._get_slicers(3)] = bytearray(c_uint32(value or 0)) | def function[nl_groups, parameter[self, value]]:
constant[Group setter.]
call[name[self].bytearray][call[name[self]._get_slicers, parameter[constant[3]]]] assign[=] call[name[bytearray], parameter[call[name[c_uint32], parameter[<ast.BoolOp object at 0x7da1b2609c60>]]]] | keyword[def] identifier[nl_groups] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[bytearray] [ identifier[self] . identifier[_get_slicers] ( literal[int] )]= identifier[bytearray] ( identifier[c_uint32] ( identifier[value] keyword[or] literal[int] )) | def nl_groups(self, value):
"""Group setter."""
self.bytearray[self._get_slicers(3)] = bytearray(c_uint32(value or 0)) |
def to_python_(self, table_name: str="data") -> list:
"""Convert the main dataframe to python a python list
:param table_name: python variable name, defaults to "data"
:param table_name: str, optional
:return: a python list of lists with the data
:rtype: str
:example: ``ds.to_python_("myvar")``
"""
try:
renderer = pytablewriter.PythonCodeTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to python list") | def function[to_python_, parameter[self, table_name]]:
constant[Convert the main dataframe to python a python list
:param table_name: python variable name, defaults to "data"
:param table_name: str, optional
:return: a python list of lists with the data
:rtype: str
:example: ``ds.to_python_("myvar")``
]
<ast.Try object at 0x7da1b25249d0> | keyword[def] identifier[to_python_] ( identifier[self] , identifier[table_name] : identifier[str] = literal[string] )-> identifier[list] :
literal[string]
keyword[try] :
identifier[renderer] = identifier[pytablewriter] . identifier[PythonCodeTableWriter]
identifier[data] = identifier[self] . identifier[_build_export] ( identifier[renderer] , identifier[table_name] )
keyword[return] identifier[data]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , literal[string] ) | def to_python_(self, table_name: str='data') -> list:
"""Convert the main dataframe to python a python list
:param table_name: python variable name, defaults to "data"
:param table_name: str, optional
:return: a python list of lists with the data
:rtype: str
:example: ``ds.to_python_("myvar")``
"""
try:
renderer = pytablewriter.PythonCodeTableWriter
data = self._build_export(renderer, table_name)
return data # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, 'Can not convert data to python list') # depends on [control=['except'], data=['e']] |
def set_hint_style(self, hint_style):
"""Changes the :ref:`HINT_STYLE` for the font options object.
This controls whether to fit font outlines to the pixel grid,
and if so, whether to optimize for fidelity or contrast.
"""
cairo.cairo_font_options_set_hint_style(self._pointer, hint_style)
self._check_status() | def function[set_hint_style, parameter[self, hint_style]]:
constant[Changes the :ref:`HINT_STYLE` for the font options object.
This controls whether to fit font outlines to the pixel grid,
and if so, whether to optimize for fidelity or contrast.
]
call[name[cairo].cairo_font_options_set_hint_style, parameter[name[self]._pointer, name[hint_style]]]
call[name[self]._check_status, parameter[]] | keyword[def] identifier[set_hint_style] ( identifier[self] , identifier[hint_style] ):
literal[string]
identifier[cairo] . identifier[cairo_font_options_set_hint_style] ( identifier[self] . identifier[_pointer] , identifier[hint_style] )
identifier[self] . identifier[_check_status] () | def set_hint_style(self, hint_style):
"""Changes the :ref:`HINT_STYLE` for the font options object.
This controls whether to fit font outlines to the pixel grid,
and if so, whether to optimize for fidelity or contrast.
"""
cairo.cairo_font_options_set_hint_style(self._pointer, hint_style)
self._check_status() |
def concat_t_vars(self):
"""
Concatenate ``self.t`` with ``self.vars`` and output a single matrix
for data dump
:return matrix: concatenated matrix with ``self.t`` as the 0-th column
"""
logger.warning('This function is deprecated and replaced by `concat_t_vars_np`.')
out = np.array([])
if len(self.t) == 0:
return out
out = np.ndarray(shape=(0, self.vars[0].size[0] + 1))
for t, var in zip(self.t, self.vars):
line = [[t]]
line[0].extend(list(var))
out = np.append(out, line, axis=0)
return out | def function[concat_t_vars, parameter[self]]:
constant[
Concatenate ``self.t`` with ``self.vars`` and output a single matrix
for data dump
:return matrix: concatenated matrix with ``self.t`` as the 0-th column
]
call[name[logger].warning, parameter[constant[This function is deprecated and replaced by `concat_t_vars_np`.]]]
variable[out] assign[=] call[name[np].array, parameter[list[[]]]]
if compare[call[name[len], parameter[name[self].t]] equal[==] constant[0]] begin[:]
return[name[out]]
variable[out] assign[=] call[name[np].ndarray, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20cabe4d0>, <ast.Name object at 0x7da20cabd270>]]] in starred[call[name[zip], parameter[name[self].t, name[self].vars]]] begin[:]
variable[line] assign[=] list[[<ast.List object at 0x7da20cabf580>]]
call[call[name[line]][constant[0]].extend, parameter[call[name[list], parameter[name[var]]]]]
variable[out] assign[=] call[name[np].append, parameter[name[out], name[line]]]
return[name[out]] | keyword[def] identifier[concat_t_vars] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[warning] ( literal[string] )
identifier[out] = identifier[np] . identifier[array] ([])
keyword[if] identifier[len] ( identifier[self] . identifier[t] )== literal[int] :
keyword[return] identifier[out]
identifier[out] = identifier[np] . identifier[ndarray] ( identifier[shape] =( literal[int] , identifier[self] . identifier[vars] [ literal[int] ]. identifier[size] [ literal[int] ]+ literal[int] ))
keyword[for] identifier[t] , identifier[var] keyword[in] identifier[zip] ( identifier[self] . identifier[t] , identifier[self] . identifier[vars] ):
identifier[line] =[[ identifier[t] ]]
identifier[line] [ literal[int] ]. identifier[extend] ( identifier[list] ( identifier[var] ))
identifier[out] = identifier[np] . identifier[append] ( identifier[out] , identifier[line] , identifier[axis] = literal[int] )
keyword[return] identifier[out] | def concat_t_vars(self):
"""
Concatenate ``self.t`` with ``self.vars`` and output a single matrix
for data dump
:return matrix: concatenated matrix with ``self.t`` as the 0-th column
"""
logger.warning('This function is deprecated and replaced by `concat_t_vars_np`.')
out = np.array([])
if len(self.t) == 0:
return out # depends on [control=['if'], data=[]]
out = np.ndarray(shape=(0, self.vars[0].size[0] + 1))
for (t, var) in zip(self.t, self.vars):
line = [[t]]
line[0].extend(list(var))
out = np.append(out, line, axis=0) # depends on [control=['for'], data=[]]
return out |
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for table_key, json_records in six.iteritems(self._buffer):
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(
self._make_table_name(),
["key", "value"],
[record for record in json_records.items()],
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(),
) | def function[to_table_data, parameter[self]]:
constant[
:raises ValueError:
:raises pytablereader.error.ValidationError:
]
call[name[self]._validate_source_data, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0fdb3d0>, <ast.Name object at 0x7da1b0fdbd30>]]] in starred[call[name[six].iteritems, parameter[name[self]._buffer]]] begin[:]
call[name[self]._loader.inc_table_count, parameter[]]
name[self]._table_key assign[=] name[table_key]
<ast.Yield object at 0x7da1b0fda6e0> | keyword[def] identifier[to_table_data] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_validate_source_data] ()
keyword[for] identifier[table_key] , identifier[json_records] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[_buffer] ):
identifier[self] . identifier[_loader] . identifier[inc_table_count] ()
identifier[self] . identifier[_table_key] = identifier[table_key]
keyword[yield] identifier[TableData] (
identifier[self] . identifier[_make_table_name] (),
[ literal[string] , literal[string] ],
[ identifier[record] keyword[for] identifier[record] keyword[in] identifier[json_records] . identifier[items] ()],
identifier[dp_extractor] = identifier[self] . identifier[_loader] . identifier[dp_extractor] ,
identifier[type_hints] = identifier[self] . identifier[_extract_type_hints] (),
) | def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
for (table_key, json_records) in six.iteritems(self._buffer):
self._loader.inc_table_count()
self._table_key = table_key
yield TableData(self._make_table_name(), ['key', 'value'], [record for record in json_records.items()], dp_extractor=self._loader.dp_extractor, type_hints=self._extract_type_hints()) # depends on [control=['for'], data=[]] |
def _load_start_paths(self):
" Start the Read-Eval-Print Loop. "
if self._startup_paths:
for path in self._startup_paths:
if os.path.exists(path):
with open(path, 'rb') as f:
code = compile(f.read(), path, 'exec')
six.exec_(code, self.get_globals(), self.get_locals())
else:
output = self.app.output
output.write('WARNING | File not found: {}\n\n'.format(path)) | def function[_load_start_paths, parameter[self]]:
constant[ Start the Read-Eval-Print Loop. ]
if name[self]._startup_paths begin[:]
for taget[name[path]] in starred[name[self]._startup_paths] begin[:]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
with call[name[open], parameter[name[path], constant[rb]]] begin[:]
variable[code] assign[=] call[name[compile], parameter[call[name[f].read, parameter[]], name[path], constant[exec]]]
call[name[six].exec_, parameter[name[code], call[name[self].get_globals, parameter[]], call[name[self].get_locals, parameter[]]]] | keyword[def] identifier[_load_start_paths] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_startup_paths] :
keyword[for] identifier[path] keyword[in] identifier[self] . identifier[_startup_paths] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[code] = identifier[compile] ( identifier[f] . identifier[read] (), identifier[path] , literal[string] )
identifier[six] . identifier[exec_] ( identifier[code] , identifier[self] . identifier[get_globals] (), identifier[self] . identifier[get_locals] ())
keyword[else] :
identifier[output] = identifier[self] . identifier[app] . identifier[output]
identifier[output] . identifier[write] ( literal[string] . identifier[format] ( identifier[path] )) | def _load_start_paths(self):
""" Start the Read-Eval-Print Loop. """
if self._startup_paths:
for path in self._startup_paths:
if os.path.exists(path):
with open(path, 'rb') as f:
code = compile(f.read(), path, 'exec')
six.exec_(code, self.get_globals(), self.get_locals()) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
output = self.app.output
output.write('WARNING | File not found: {}\n\n'.format(path)) # depends on [control=['for'], data=['path']] # depends on [control=['if'], data=[]] |
def get_log_slice(db, job_id, start, stop):
"""
Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded)
"""
start = int(start)
stop = int(stop)
limit = -1 if stop == 0 else stop - start
logs = db('SELECT * FROM log WHERE job_id=?x '
'ORDER BY id LIMIT ?s OFFSET ?s',
job_id, limit, start)
# NB: .isoformat() returns a string like '2016-08-29T15:42:34.984756'
# we consider only the first 22 characters, i.e. '2016-08-29T15:42:34.98'
return [[log.timestamp.isoformat()[:22], log.level,
log.process, log.message] for log in logs] | def function[get_log_slice, parameter[db, job_id, start, stop]]:
constant[
Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded)
]
variable[start] assign[=] call[name[int], parameter[name[start]]]
variable[stop] assign[=] call[name[int], parameter[name[stop]]]
variable[limit] assign[=] <ast.IfExp object at 0x7da207f02830>
variable[logs] assign[=] call[name[db], parameter[constant[SELECT * FROM log WHERE job_id=?x ORDER BY id LIMIT ?s OFFSET ?s], name[job_id], name[limit], name[start]]]
return[<ast.ListComp object at 0x7da207f03190>] | keyword[def] identifier[get_log_slice] ( identifier[db] , identifier[job_id] , identifier[start] , identifier[stop] ):
literal[string]
identifier[start] = identifier[int] ( identifier[start] )
identifier[stop] = identifier[int] ( identifier[stop] )
identifier[limit] =- literal[int] keyword[if] identifier[stop] == literal[int] keyword[else] identifier[stop] - identifier[start]
identifier[logs] = identifier[db] ( literal[string]
literal[string] ,
identifier[job_id] , identifier[limit] , identifier[start] )
keyword[return] [[ identifier[log] . identifier[timestamp] . identifier[isoformat] ()[: literal[int] ], identifier[log] . identifier[level] ,
identifier[log] . identifier[process] , identifier[log] . identifier[message] ] keyword[for] identifier[log] keyword[in] identifier[logs] ] | def get_log_slice(db, job_id, start, stop):
"""
Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded)
"""
start = int(start)
stop = int(stop)
limit = -1 if stop == 0 else stop - start
logs = db('SELECT * FROM log WHERE job_id=?x ORDER BY id LIMIT ?s OFFSET ?s', job_id, limit, start)
# NB: .isoformat() returns a string like '2016-08-29T15:42:34.984756'
# we consider only the first 22 characters, i.e. '2016-08-29T15:42:34.98'
return [[log.timestamp.isoformat()[:22], log.level, log.process, log.message] for log in logs] |
def assign_to_topic_partition(self, topic_partition=None):
"""Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
if not self._is_assigned(topic_partition):
self.consumer.assign(topic_partition) | def function[assign_to_topic_partition, parameter[self, topic_partition]]:
constant[Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
]
if call[name[isinstance], parameter[name[topic_partition], name[TopicPartition]]] begin[:]
variable[topic_partition] assign[=] list[[<ast.Name object at 0x7da1b0f2c0d0>]]
if <ast.UnaryOp object at 0x7da1b0f2c130> begin[:]
call[name[self].consumer.assign, parameter[name[topic_partition]]] | keyword[def] identifier[assign_to_topic_partition] ( identifier[self] , identifier[topic_partition] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[topic_partition] , identifier[TopicPartition] ):
identifier[topic_partition] =[ identifier[topic_partition] ]
keyword[if] keyword[not] identifier[self] . identifier[_is_assigned] ( identifier[topic_partition] ):
identifier[self] . identifier[consumer] . identifier[assign] ( identifier[topic_partition] ) | def assign_to_topic_partition(self, topic_partition=None):
"""Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition] # depends on [control=['if'], data=[]]
if not self._is_assigned(topic_partition):
self.consumer.assign(topic_partition) # depends on [control=['if'], data=[]] |
def prep_input(self, read_list):
"Prepare the list of files or text content objects to be read."
logger.info('Prepping input for sparser.')
self.file_list = []
for content in read_list:
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
if content.is_format('nxml'):
# If it is already an nxml, we just need to adjust the
# name a bit, if anything.
if not content.get_filename().startswith('PMC'):
content.change_id('PMC' + str(content.get_id()))
fpath = content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
elif content.is_format('txt', 'text'):
# Otherwise we need to frame the content in xml and put it
# in a new file with the appropriate name.
nxml_str = sparser.make_nxml_from_text(content.get_text())
new_content = Content.from_string('PMC' + str(content.get_id()),
'nxml', nxml_str)
fpath = new_content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
else:
raise SparserError("Unrecognized format %s."
% content.format)
return | def function[prep_input, parameter[self, read_list]]:
constant[Prepare the list of files or text content objects to be read.]
call[name[logger].info, parameter[constant[Prepping input for sparser.]]]
name[self].file_list assign[=] list[[]]
for taget[name[content]] in starred[name[read_list]] begin[:]
variable[quality_issue] assign[=] call[name[self]._check_content, parameter[call[name[content].get_text, parameter[]]]]
if compare[name[quality_issue] is_not constant[None]] begin[:]
call[name[logger].warning, parameter[binary_operation[constant[Skipping %d due to: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c991de0>, <ast.Name object at 0x7da20c990e80>]]]]]
continue
if call[name[content].is_format, parameter[constant[nxml]]] begin[:]
if <ast.UnaryOp object at 0x7da20c991000> begin[:]
call[name[content].change_id, parameter[binary_operation[constant[PMC] + call[name[str], parameter[call[name[content].get_id, parameter[]]]]]]]
variable[fpath] assign[=] call[name[content].copy_to, parameter[name[self].tmp_dir]]
call[name[self].file_list.append, parameter[name[fpath]]]
return[None] | keyword[def] identifier[prep_input] ( identifier[self] , identifier[read_list] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[file_list] =[]
keyword[for] identifier[content] keyword[in] identifier[read_list] :
identifier[quality_issue] = identifier[self] . identifier[_check_content] ( identifier[content] . identifier[get_text] ())
keyword[if] identifier[quality_issue] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string]
%( identifier[content] . identifier[get_id] (), identifier[quality_issue] ))
keyword[continue]
keyword[if] identifier[content] . identifier[is_format] ( literal[string] ):
keyword[if] keyword[not] identifier[content] . identifier[get_filename] (). identifier[startswith] ( literal[string] ):
identifier[content] . identifier[change_id] ( literal[string] + identifier[str] ( identifier[content] . identifier[get_id] ()))
identifier[fpath] = identifier[content] . identifier[copy_to] ( identifier[self] . identifier[tmp_dir] )
identifier[self] . identifier[file_list] . identifier[append] ( identifier[fpath] )
keyword[elif] identifier[content] . identifier[is_format] ( literal[string] , literal[string] ):
identifier[nxml_str] = identifier[sparser] . identifier[make_nxml_from_text] ( identifier[content] . identifier[get_text] ())
identifier[new_content] = identifier[Content] . identifier[from_string] ( literal[string] + identifier[str] ( identifier[content] . identifier[get_id] ()),
literal[string] , identifier[nxml_str] )
identifier[fpath] = identifier[new_content] . identifier[copy_to] ( identifier[self] . identifier[tmp_dir] )
identifier[self] . identifier[file_list] . identifier[append] ( identifier[fpath] )
keyword[else] :
keyword[raise] identifier[SparserError] ( literal[string]
% identifier[content] . identifier[format] )
keyword[return] | def prep_input(self, read_list):
"""Prepare the list of files or text content objects to be read."""
logger.info('Prepping input for sparser.')
self.file_list = []
for content in read_list:
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning('Skipping %d due to: %s' % (content.get_id(), quality_issue))
continue # depends on [control=['if'], data=['quality_issue']]
if content.is_format('nxml'):
# If it is already an nxml, we just need to adjust the
# name a bit, if anything.
if not content.get_filename().startswith('PMC'):
content.change_id('PMC' + str(content.get_id())) # depends on [control=['if'], data=[]]
fpath = content.copy_to(self.tmp_dir)
self.file_list.append(fpath) # depends on [control=['if'], data=[]]
elif content.is_format('txt', 'text'):
# Otherwise we need to frame the content in xml and put it
# in a new file with the appropriate name.
nxml_str = sparser.make_nxml_from_text(content.get_text())
new_content = Content.from_string('PMC' + str(content.get_id()), 'nxml', nxml_str)
fpath = new_content.copy_to(self.tmp_dir)
self.file_list.append(fpath) # depends on [control=['if'], data=[]]
else:
raise SparserError('Unrecognized format %s.' % content.format) # depends on [control=['for'], data=['content']]
return |
def get_resources(cls):
"""Returns Ext Resources."""
job_controller = JobsController(
directory.get_plugin())
resources = []
resources.append(extensions.ResourceExtension(
Jobs.get_alias(),
job_controller))
return resources | def function[get_resources, parameter[cls]]:
constant[Returns Ext Resources.]
variable[job_controller] assign[=] call[name[JobsController], parameter[call[name[directory].get_plugin, parameter[]]]]
variable[resources] assign[=] list[[]]
call[name[resources].append, parameter[call[name[extensions].ResourceExtension, parameter[call[name[Jobs].get_alias, parameter[]], name[job_controller]]]]]
return[name[resources]] | keyword[def] identifier[get_resources] ( identifier[cls] ):
literal[string]
identifier[job_controller] = identifier[JobsController] (
identifier[directory] . identifier[get_plugin] ())
identifier[resources] =[]
identifier[resources] . identifier[append] ( identifier[extensions] . identifier[ResourceExtension] (
identifier[Jobs] . identifier[get_alias] (),
identifier[job_controller] ))
keyword[return] identifier[resources] | def get_resources(cls):
"""Returns Ext Resources."""
job_controller = JobsController(directory.get_plugin())
resources = []
resources.append(extensions.ResourceExtension(Jobs.get_alias(), job_controller))
return resources |
def python_sidebar_navigation(python_input):
"""
Create the `Layout` showing the navigation information for the sidebar.
"""
def get_text_fragments():
tokens = []
# Show navigation info.
tokens.extend([
('class:sidebar', ' '),
('class:sidebar.key', '[Arrows]'),
('class:sidebar', ' '),
('class:sidebar.description', 'Navigate'),
('class:sidebar', ' '),
('class:sidebar.key', '[Enter]'),
('class:sidebar', ' '),
('class:sidebar.description', 'Hide menu'),
])
return tokens
return Window(
FormattedTextControl(get_text_fragments),
style='class:sidebar',
width=Dimension.exact(43),
height=Dimension.exact(1)) | def function[python_sidebar_navigation, parameter[python_input]]:
constant[
Create the `Layout` showing the navigation information for the sidebar.
]
def function[get_text_fragments, parameter[]]:
variable[tokens] assign[=] list[[]]
call[name[tokens].extend, parameter[list[[<ast.Tuple object at 0x7da1b0855ed0>, <ast.Tuple object at 0x7da1b0856bc0>, <ast.Tuple object at 0x7da1b0857c40>, <ast.Tuple object at 0x7da1b0854df0>, <ast.Tuple object at 0x7da1b0854100>, <ast.Tuple object at 0x7da1b0854070>, <ast.Tuple object at 0x7da1b0857fa0>, <ast.Tuple object at 0x7da1b0857eb0>]]]]
return[name[tokens]]
return[call[name[Window], parameter[call[name[FormattedTextControl], parameter[name[get_text_fragments]]]]]] | keyword[def] identifier[python_sidebar_navigation] ( identifier[python_input] ):
literal[string]
keyword[def] identifier[get_text_fragments] ():
identifier[tokens] =[]
identifier[tokens] . identifier[extend] ([
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
])
keyword[return] identifier[tokens]
keyword[return] identifier[Window] (
identifier[FormattedTextControl] ( identifier[get_text_fragments] ),
identifier[style] = literal[string] ,
identifier[width] = identifier[Dimension] . identifier[exact] ( literal[int] ),
identifier[height] = identifier[Dimension] . identifier[exact] ( literal[int] )) | def python_sidebar_navigation(python_input):
"""
Create the `Layout` showing the navigation information for the sidebar.
"""
def get_text_fragments():
tokens = []
# Show navigation info.
tokens.extend([('class:sidebar', ' '), ('class:sidebar.key', '[Arrows]'), ('class:sidebar', ' '), ('class:sidebar.description', 'Navigate'), ('class:sidebar', ' '), ('class:sidebar.key', '[Enter]'), ('class:sidebar', ' '), ('class:sidebar.description', 'Hide menu')])
return tokens
return Window(FormattedTextControl(get_text_fragments), style='class:sidebar', width=Dimension.exact(43), height=Dimension.exact(1)) |
def OnItemSelected(self, event):
"""Item selection event handler"""
value = event.m_itemIndex
self.startIndex = value
self.switching = True
post_command_event(self, self.GridActionTableSwitchMsg, newtable=value)
self.switching = False
event.Skip() | def function[OnItemSelected, parameter[self, event]]:
constant[Item selection event handler]
variable[value] assign[=] name[event].m_itemIndex
name[self].startIndex assign[=] name[value]
name[self].switching assign[=] constant[True]
call[name[post_command_event], parameter[name[self], name[self].GridActionTableSwitchMsg]]
name[self].switching assign[=] constant[False]
call[name[event].Skip, parameter[]] | keyword[def] identifier[OnItemSelected] ( identifier[self] , identifier[event] ):
literal[string]
identifier[value] = identifier[event] . identifier[m_itemIndex]
identifier[self] . identifier[startIndex] = identifier[value]
identifier[self] . identifier[switching] = keyword[True]
identifier[post_command_event] ( identifier[self] , identifier[self] . identifier[GridActionTableSwitchMsg] , identifier[newtable] = identifier[value] )
identifier[self] . identifier[switching] = keyword[False]
identifier[event] . identifier[Skip] () | def OnItemSelected(self, event):
"""Item selection event handler"""
value = event.m_itemIndex
self.startIndex = value
self.switching = True
post_command_event(self, self.GridActionTableSwitchMsg, newtable=value)
self.switching = False
event.Skip() |
def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super(FormAdmin, self).get_urls()
extra_urls = [
re_path("^(?P<form_id>\d+)/entries/$",
self.admin_site.admin_view(self.entries_view),
name="form_entries"),
re_path("^(?P<form_id>\d+)/entries/show/$",
self.admin_site.admin_view(self.entries_view),
{"show": True}, name="form_entries_show"),
re_path("^(?P<form_id>\d+)/entries/export/$",
self.admin_site.admin_view(self.entries_view),
{"export": True}, name="form_entries_export"),
re_path("^file/(?P<field_entry_id>\d+)/$",
self.admin_site.admin_view(self.file_view),
name="form_file"),
]
return extra_urls + urls | def function[get_urls, parameter[self]]:
constant[
Add the entries view to urls.
]
variable[urls] assign[=] call[call[name[super], parameter[name[FormAdmin], name[self]]].get_urls, parameter[]]
variable[extra_urls] assign[=] list[[<ast.Call object at 0x7da1b20b5bd0>, <ast.Call object at 0x7da1b20b4f40>, <ast.Call object at 0x7da1b20b4a60>, <ast.Call object at 0x7da20e961720>]]
return[binary_operation[name[extra_urls] + name[urls]]] | keyword[def] identifier[get_urls] ( identifier[self] ):
literal[string]
identifier[urls] = identifier[super] ( identifier[FormAdmin] , identifier[self] ). identifier[get_urls] ()
identifier[extra_urls] =[
identifier[re_path] ( literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[entries_view] ),
identifier[name] = literal[string] ),
identifier[re_path] ( literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[entries_view] ),
{ literal[string] : keyword[True] }, identifier[name] = literal[string] ),
identifier[re_path] ( literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[entries_view] ),
{ literal[string] : keyword[True] }, identifier[name] = literal[string] ),
identifier[re_path] ( literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[file_view] ),
identifier[name] = literal[string] ),
]
keyword[return] identifier[extra_urls] + identifier[urls] | def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super(FormAdmin, self).get_urls()
extra_urls = [re_path('^(?P<form_id>\\d+)/entries/$', self.admin_site.admin_view(self.entries_view), name='form_entries'), re_path('^(?P<form_id>\\d+)/entries/show/$', self.admin_site.admin_view(self.entries_view), {'show': True}, name='form_entries_show'), re_path('^(?P<form_id>\\d+)/entries/export/$', self.admin_site.admin_view(self.entries_view), {'export': True}, name='form_entries_export'), re_path('^file/(?P<field_entry_id>\\d+)/$', self.admin_site.admin_view(self.file_view), name='form_file')]
return extra_urls + urls |
def get_community_trends(self, indicator_type=None, days_back=None):
"""
Find indicators that are trending in the community.
:param indicator_type: A type of indicator to filter by. If ``None``, will get all types of indicators except
for MALWARE and CVEs (this convention is for parity with the corresponding view on the Dashboard).
:param days_back: The number of days back to search. Any integer between 1 and 30 is allowed.
:return: A list of |Indicator| objects.
"""
params = {
'type': indicator_type,
'daysBack': days_back
}
resp = self._client.get("indicators/community-trending", params=params)
body = resp.json()
# parse items in response as indicators
return [Indicator.from_dict(indicator) for indicator in body] | def function[get_community_trends, parameter[self, indicator_type, days_back]]:
constant[
Find indicators that are trending in the community.
:param indicator_type: A type of indicator to filter by. If ``None``, will get all types of indicators except
for MALWARE and CVEs (this convention is for parity with the corresponding view on the Dashboard).
:param days_back: The number of days back to search. Any integer between 1 and 30 is allowed.
:return: A list of |Indicator| objects.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c794e80>, <ast.Constant object at 0x7da20c794460>], [<ast.Name object at 0x7da20c7957b0>, <ast.Name object at 0x7da20c795c00>]]
variable[resp] assign[=] call[name[self]._client.get, parameter[constant[indicators/community-trending]]]
variable[body] assign[=] call[name[resp].json, parameter[]]
return[<ast.ListComp object at 0x7da1b23467a0>] | keyword[def] identifier[get_community_trends] ( identifier[self] , identifier[indicator_type] = keyword[None] , identifier[days_back] = keyword[None] ):
literal[string]
identifier[params] ={
literal[string] : identifier[indicator_type] ,
literal[string] : identifier[days_back]
}
identifier[resp] = identifier[self] . identifier[_client] . identifier[get] ( literal[string] , identifier[params] = identifier[params] )
identifier[body] = identifier[resp] . identifier[json] ()
keyword[return] [ identifier[Indicator] . identifier[from_dict] ( identifier[indicator] ) keyword[for] identifier[indicator] keyword[in] identifier[body] ] | def get_community_trends(self, indicator_type=None, days_back=None):
"""
Find indicators that are trending in the community.
:param indicator_type: A type of indicator to filter by. If ``None``, will get all types of indicators except
for MALWARE and CVEs (this convention is for parity with the corresponding view on the Dashboard).
:param days_back: The number of days back to search. Any integer between 1 and 30 is allowed.
:return: A list of |Indicator| objects.
"""
params = {'type': indicator_type, 'daysBack': days_back}
resp = self._client.get('indicators/community-trending', params=params)
body = resp.json()
# parse items in response as indicators
return [Indicator.from_dict(indicator) for indicator in body] |
def funnel(self, steps, timeframe=None, timezone=None, max_age=None, all_keys=False):
""" Performs a Funnel query
Returns an object containing the results for each step of the funnel.
:param steps: array of dictionaries, one for each step. example:
[{"event_collection":"signup","actor_property":"user.id"},
{"event_collection":"purchase","actor_property:"user.id"}]
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
:all_keys: set to true to return all keys on response (i.e. "result", "actors", "steps")
"""
params = self.get_params(
steps=steps,
timeframe=timeframe,
timezone=timezone,
max_age=max_age,
)
return self.api.query("funnel", params, all_keys=all_keys) | def function[funnel, parameter[self, steps, timeframe, timezone, max_age, all_keys]]:
constant[ Performs a Funnel query
Returns an object containing the results for each step of the funnel.
:param steps: array of dictionaries, one for each step. example:
[{"event_collection":"signup","actor_property":"user.id"},
{"event_collection":"purchase","actor_property:"user.id"}]
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
:all_keys: set to true to return all keys on response (i.e. "result", "actors", "steps")
]
variable[params] assign[=] call[name[self].get_params, parameter[]]
return[call[name[self].api.query, parameter[constant[funnel], name[params]]]] | keyword[def] identifier[funnel] ( identifier[self] , identifier[steps] , identifier[timeframe] = keyword[None] , identifier[timezone] = keyword[None] , identifier[max_age] = keyword[None] , identifier[all_keys] = keyword[False] ):
literal[string]
identifier[params] = identifier[self] . identifier[get_params] (
identifier[steps] = identifier[steps] ,
identifier[timeframe] = identifier[timeframe] ,
identifier[timezone] = identifier[timezone] ,
identifier[max_age] = identifier[max_age] ,
)
keyword[return] identifier[self] . identifier[api] . identifier[query] ( literal[string] , identifier[params] , identifier[all_keys] = identifier[all_keys] ) | def funnel(self, steps, timeframe=None, timezone=None, max_age=None, all_keys=False):
""" Performs a Funnel query
Returns an object containing the results for each step of the funnel.
:param steps: array of dictionaries, one for each step. example:
[{"event_collection":"signup","actor_property":"user.id"},
{"event_collection":"purchase","actor_property:"user.id"}]
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
:all_keys: set to true to return all keys on response (i.e. "result", "actors", "steps")
"""
params = self.get_params(steps=steps, timeframe=timeframe, timezone=timezone, max_age=max_age)
return self.api.query('funnel', params, all_keys=all_keys) |
def sphinxify(docstring, context, buildername='html'):
"""
Runs Sphinx on a docstring and outputs the processed documentation.
Parameters
----------
docstring : str
a ReST-formatted docstring
context : dict
Variables to be passed to the layout template to control how its
rendered (through the Sphinx variable *html_context*).
buildername: str
It can be either `html` or `text`.
Returns
-------
An Sphinx-processed string, in either HTML or plain text format, depending
on the value of `buildername`
"""
srcdir = mkdtemp()
srcdir = encoding.to_unicode_from_fs(srcdir)
destdir = osp.join(srcdir, '_build')
rst_name = osp.join(srcdir, 'docstring.rst')
if buildername == 'html':
suffix = '.html'
else:
suffix = '.txt'
output_name = osp.join(destdir, 'docstring' + suffix)
# This is needed so users can type \\ on latex eqnarray envs inside raw
# docstrings
if context['right_sphinx_version'] and context['math_on']:
docstring = docstring.replace('\\\\', '\\\\\\\\')
# Add a class to several characters on the argspec. This way we can
# highlight them using css, in a similar way to what IPython does.
# NOTE: Before doing this, we escape common html chars so that they
# don't interfere with the rest of html present in the page
argspec = escape(context['argspec'])
for char in ['=', ',', '(', ')', '*', '**']:
argspec = argspec.replace(char,
'<span class="argspec-highlight">' + char + '</span>')
context['argspec'] = argspec
doc_file = codecs.open(rst_name, 'w', encoding='utf-8')
doc_file.write(docstring)
doc_file.close()
temp_confdir = False
if temp_confdir:
# TODO: This may be inefficient. Find a faster way to do it.
confdir = mkdtemp()
confdir = encoding.to_unicode_from_fs(confdir)
generate_configuration(confdir)
else:
confdir = osp.join(get_module_source_path('spyder.plugins.help.utils'))
confoverrides = {'html_context': context}
doctreedir = osp.join(srcdir, 'doctrees')
sphinx_app = Sphinx(srcdir, confdir, destdir, doctreedir, buildername,
confoverrides, status=None, warning=None,
freshenv=True, warningiserror=False, tags=None)
try:
sphinx_app.build(None, [rst_name])
except SystemMessage:
output = _("It was not possible to generate rich text help for this "
"object.</br>"
"Please see it in plain text.")
return warning(output)
# TODO: Investigate if this is necessary/important for us
if osp.exists(output_name):
output = codecs.open(output_name, 'r', encoding='utf-8').read()
output = output.replace('<pre>', '<pre class="literal-block">')
else:
output = _("It was not possible to generate rich text help for this "
"object.</br>"
"Please see it in plain text.")
return warning(output)
if temp_confdir:
shutil.rmtree(confdir, ignore_errors=True)
shutil.rmtree(srcdir, ignore_errors=True)
return output | def function[sphinxify, parameter[docstring, context, buildername]]:
constant[
Runs Sphinx on a docstring and outputs the processed documentation.
Parameters
----------
docstring : str
a ReST-formatted docstring
context : dict
Variables to be passed to the layout template to control how its
rendered (through the Sphinx variable *html_context*).
buildername: str
It can be either `html` or `text`.
Returns
-------
An Sphinx-processed string, in either HTML or plain text format, depending
on the value of `buildername`
]
variable[srcdir] assign[=] call[name[mkdtemp], parameter[]]
variable[srcdir] assign[=] call[name[encoding].to_unicode_from_fs, parameter[name[srcdir]]]
variable[destdir] assign[=] call[name[osp].join, parameter[name[srcdir], constant[_build]]]
variable[rst_name] assign[=] call[name[osp].join, parameter[name[srcdir], constant[docstring.rst]]]
if compare[name[buildername] equal[==] constant[html]] begin[:]
variable[suffix] assign[=] constant[.html]
variable[output_name] assign[=] call[name[osp].join, parameter[name[destdir], binary_operation[constant[docstring] + name[suffix]]]]
if <ast.BoolOp object at 0x7da20c7c9a50> begin[:]
variable[docstring] assign[=] call[name[docstring].replace, parameter[constant[\\], constant[\\\\]]]
variable[argspec] assign[=] call[name[escape], parameter[call[name[context]][constant[argspec]]]]
for taget[name[char]] in starred[list[[<ast.Constant object at 0x7da2054a64a0>, <ast.Constant object at 0x7da2054a5840>, <ast.Constant object at 0x7da2054a5e70>, <ast.Constant object at 0x7da2054a6c20>, <ast.Constant object at 0x7da2054a7670>, <ast.Constant object at 0x7da2054a6530>]]] begin[:]
variable[argspec] assign[=] call[name[argspec].replace, parameter[name[char], binary_operation[binary_operation[constant[<span class="argspec-highlight">] + name[char]] + constant[</span>]]]]
call[name[context]][constant[argspec]] assign[=] name[argspec]
variable[doc_file] assign[=] call[name[codecs].open, parameter[name[rst_name], constant[w]]]
call[name[doc_file].write, parameter[name[docstring]]]
call[name[doc_file].close, parameter[]]
variable[temp_confdir] assign[=] constant[False]
if name[temp_confdir] begin[:]
variable[confdir] assign[=] call[name[mkdtemp], parameter[]]
variable[confdir] assign[=] call[name[encoding].to_unicode_from_fs, parameter[name[confdir]]]
call[name[generate_configuration], parameter[name[confdir]]]
variable[confoverrides] assign[=] dictionary[[<ast.Constant object at 0x7da2054a57e0>], [<ast.Name object at 0x7da2054a5ae0>]]
variable[doctreedir] assign[=] call[name[osp].join, parameter[name[srcdir], constant[doctrees]]]
variable[sphinx_app] assign[=] call[name[Sphinx], parameter[name[srcdir], name[confdir], name[destdir], name[doctreedir], name[buildername], name[confoverrides]]]
<ast.Try object at 0x7da2054a49a0>
if call[name[osp].exists, parameter[name[output_name]]] begin[:]
variable[output] assign[=] call[call[name[codecs].open, parameter[name[output_name], constant[r]]].read, parameter[]]
variable[output] assign[=] call[name[output].replace, parameter[constant[<pre>], constant[<pre class="literal-block">]]]
if name[temp_confdir] begin[:]
call[name[shutil].rmtree, parameter[name[confdir]]]
call[name[shutil].rmtree, parameter[name[srcdir]]]
return[name[output]] | keyword[def] identifier[sphinxify] ( identifier[docstring] , identifier[context] , identifier[buildername] = literal[string] ):
literal[string]
identifier[srcdir] = identifier[mkdtemp] ()
identifier[srcdir] = identifier[encoding] . identifier[to_unicode_from_fs] ( identifier[srcdir] )
identifier[destdir] = identifier[osp] . identifier[join] ( identifier[srcdir] , literal[string] )
identifier[rst_name] = identifier[osp] . identifier[join] ( identifier[srcdir] , literal[string] )
keyword[if] identifier[buildername] == literal[string] :
identifier[suffix] = literal[string]
keyword[else] :
identifier[suffix] = literal[string]
identifier[output_name] = identifier[osp] . identifier[join] ( identifier[destdir] , literal[string] + identifier[suffix] )
keyword[if] identifier[context] [ literal[string] ] keyword[and] identifier[context] [ literal[string] ]:
identifier[docstring] = identifier[docstring] . identifier[replace] ( literal[string] , literal[string] )
identifier[argspec] = identifier[escape] ( identifier[context] [ literal[string] ])
keyword[for] identifier[char] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[argspec] = identifier[argspec] . identifier[replace] ( identifier[char] ,
literal[string] + identifier[char] + literal[string] )
identifier[context] [ literal[string] ]= identifier[argspec]
identifier[doc_file] = identifier[codecs] . identifier[open] ( identifier[rst_name] , literal[string] , identifier[encoding] = literal[string] )
identifier[doc_file] . identifier[write] ( identifier[docstring] )
identifier[doc_file] . identifier[close] ()
identifier[temp_confdir] = keyword[False]
keyword[if] identifier[temp_confdir] :
identifier[confdir] = identifier[mkdtemp] ()
identifier[confdir] = identifier[encoding] . identifier[to_unicode_from_fs] ( identifier[confdir] )
identifier[generate_configuration] ( identifier[confdir] )
keyword[else] :
identifier[confdir] = identifier[osp] . identifier[join] ( identifier[get_module_source_path] ( literal[string] ))
identifier[confoverrides] ={ literal[string] : identifier[context] }
identifier[doctreedir] = identifier[osp] . identifier[join] ( identifier[srcdir] , literal[string] )
identifier[sphinx_app] = identifier[Sphinx] ( identifier[srcdir] , identifier[confdir] , identifier[destdir] , identifier[doctreedir] , identifier[buildername] ,
identifier[confoverrides] , identifier[status] = keyword[None] , identifier[warning] = keyword[None] ,
identifier[freshenv] = keyword[True] , identifier[warningiserror] = keyword[False] , identifier[tags] = keyword[None] )
keyword[try] :
identifier[sphinx_app] . identifier[build] ( keyword[None] ,[ identifier[rst_name] ])
keyword[except] identifier[SystemMessage] :
identifier[output] = identifier[_] ( literal[string]
literal[string]
literal[string] )
keyword[return] identifier[warning] ( identifier[output] )
keyword[if] identifier[osp] . identifier[exists] ( identifier[output_name] ):
identifier[output] = identifier[codecs] . identifier[open] ( identifier[output_name] , literal[string] , identifier[encoding] = literal[string] ). identifier[read] ()
identifier[output] = identifier[output] . identifier[replace] ( literal[string] , literal[string] )
keyword[else] :
identifier[output] = identifier[_] ( literal[string]
literal[string]
literal[string] )
keyword[return] identifier[warning] ( identifier[output] )
keyword[if] identifier[temp_confdir] :
identifier[shutil] . identifier[rmtree] ( identifier[confdir] , identifier[ignore_errors] = keyword[True] )
identifier[shutil] . identifier[rmtree] ( identifier[srcdir] , identifier[ignore_errors] = keyword[True] )
keyword[return] identifier[output] | def sphinxify(docstring, context, buildername='html'):
"""
Runs Sphinx on a docstring and outputs the processed documentation.
Parameters
----------
docstring : str
a ReST-formatted docstring
context : dict
Variables to be passed to the layout template to control how its
rendered (through the Sphinx variable *html_context*).
buildername: str
It can be either `html` or `text`.
Returns
-------
An Sphinx-processed string, in either HTML or plain text format, depending
on the value of `buildername`
"""
srcdir = mkdtemp()
srcdir = encoding.to_unicode_from_fs(srcdir)
destdir = osp.join(srcdir, '_build')
rst_name = osp.join(srcdir, 'docstring.rst')
if buildername == 'html':
suffix = '.html' # depends on [control=['if'], data=[]]
else:
suffix = '.txt'
output_name = osp.join(destdir, 'docstring' + suffix)
# This is needed so users can type \\ on latex eqnarray envs inside raw
# docstrings
if context['right_sphinx_version'] and context['math_on']:
docstring = docstring.replace('\\\\', '\\\\\\\\') # depends on [control=['if'], data=[]]
# Add a class to several characters on the argspec. This way we can
# highlight them using css, in a similar way to what IPython does.
# NOTE: Before doing this, we escape common html chars so that they
# don't interfere with the rest of html present in the page
argspec = escape(context['argspec'])
for char in ['=', ',', '(', ')', '*', '**']:
argspec = argspec.replace(char, '<span class="argspec-highlight">' + char + '</span>') # depends on [control=['for'], data=['char']]
context['argspec'] = argspec
doc_file = codecs.open(rst_name, 'w', encoding='utf-8')
doc_file.write(docstring)
doc_file.close()
temp_confdir = False
if temp_confdir:
# TODO: This may be inefficient. Find a faster way to do it.
confdir = mkdtemp()
confdir = encoding.to_unicode_from_fs(confdir)
generate_configuration(confdir) # depends on [control=['if'], data=[]]
else:
confdir = osp.join(get_module_source_path('spyder.plugins.help.utils'))
confoverrides = {'html_context': context}
doctreedir = osp.join(srcdir, 'doctrees')
sphinx_app = Sphinx(srcdir, confdir, destdir, doctreedir, buildername, confoverrides, status=None, warning=None, freshenv=True, warningiserror=False, tags=None)
try:
sphinx_app.build(None, [rst_name]) # depends on [control=['try'], data=[]]
except SystemMessage:
output = _('It was not possible to generate rich text help for this object.</br>Please see it in plain text.')
return warning(output) # depends on [control=['except'], data=[]]
# TODO: Investigate if this is necessary/important for us
if osp.exists(output_name):
output = codecs.open(output_name, 'r', encoding='utf-8').read()
output = output.replace('<pre>', '<pre class="literal-block">') # depends on [control=['if'], data=[]]
else:
output = _('It was not possible to generate rich text help for this object.</br>Please see it in plain text.')
return warning(output)
if temp_confdir:
shutil.rmtree(confdir, ignore_errors=True) # depends on [control=['if'], data=[]]
shutil.rmtree(srcdir, ignore_errors=True)
return output |
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df) | def function[process_fastq_rich, parameter[fastq]]:
constant[Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
]
call[name[logging].info, parameter[constant[Nanoget: Starting to collect statistics from rich fastq file.]]]
variable[inputfastq] assign[=] call[name[handle_compressed_input], parameter[name[fastq]]]
variable[res] assign[=] list[[]]
for taget[name[record]] in starred[call[name[SeqIO].parse, parameter[name[inputfastq], constant[fastq]]]] begin[:]
<ast.Try object at 0x7da20eb2ae60>
variable[df] assign[=] call[call[name[pd].DataFrame, parameter[]].dropna, parameter[]]
call[name[df]][constant[channelIDs]] assign[=] call[call[name[df]][constant[channelIDs]].astype, parameter[constant[int64]]]
return[call[name[ut].reduce_memory_usage, parameter[name[df]]]] | keyword[def] identifier[process_fastq_rich] ( identifier[fastq] ,** identifier[kwargs] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[inputfastq] = identifier[handle_compressed_input] ( identifier[fastq] )
identifier[res] =[]
keyword[for] identifier[record] keyword[in] identifier[SeqIO] . identifier[parse] ( identifier[inputfastq] , literal[string] ):
keyword[try] :
identifier[read_info] = identifier[info_to_dict] ( identifier[record] . identifier[description] )
identifier[res] . identifier[append] (
( identifier[nanomath] . identifier[ave_qual] ( identifier[record] . identifier[letter_annotations] [ literal[string] ]),
identifier[len] ( identifier[record] ),
identifier[read_info] [ literal[string] ],
identifier[read_info] [ literal[string] ],
identifier[read_info] [ literal[string] ]))
keyword[except] identifier[KeyError] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[record] . identifier[description] ))
identifier[sys] . identifier[exit] ( literal[string] . identifier[format] (
identifier[record] . identifier[description] ))
identifier[df] = identifier[pd] . identifier[DataFrame] (
identifier[data] = identifier[res] ,
identifier[columns] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]). identifier[dropna] ()
identifier[df] [ literal[string] ]= identifier[df] [ literal[string] ]. identifier[astype] ( literal[string] )
keyword[return] identifier[ut] . identifier[reduce_memory_usage] ( identifier[df] ) | def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info('Nanoget: Starting to collect statistics from rich fastq file.')
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, 'fastq'):
try:
read_info = info_to_dict(record.description)
res.append((nanomath.ave_qual(record.letter_annotations['phred_quality']), len(record), read_info['ch'], read_info['start_time'], read_info['runid'])) # depends on [control=['try'], data=[]]
except KeyError:
logging.error('Nanoget: keyerror when processing record {}'.format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(record.description)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['record']]
df = pd.DataFrame(data=res, columns=['quals', 'lengths', 'channelIDs', 'timestamp', 'runIDs']).dropna()
df['channelIDs'] = df['channelIDs'].astype('int64')
return ut.reduce_memory_usage(df) |
def notify(self, msg, color='green', notify='true', message_format='text'):
"""Send notification to specified HipChat room"""
self.message_dict = {
'message': msg,
'color': color,
'notify': notify,
'message_format': message_format,
}
if not self.debug:
return requests.post(
self.notification_url,
json.dumps(self.message_dict),
headers=self.headers
)
else:
print('HipChat message: <{}>'.format(msg))
return [] | def function[notify, parameter[self, msg, color, notify, message_format]]:
constant[Send notification to specified HipChat room]
name[self].message_dict assign[=] dictionary[[<ast.Constant object at 0x7da1b2726050>, <ast.Constant object at 0x7da1b2727520>, <ast.Constant object at 0x7da1b2726ce0>, <ast.Constant object at 0x7da1b2725630>], [<ast.Name object at 0x7da1b2727e80>, <ast.Name object at 0x7da1b27262f0>, <ast.Name object at 0x7da1b2726a70>, <ast.Name object at 0x7da1b2726710>]]
if <ast.UnaryOp object at 0x7da1b2725660> begin[:]
return[call[name[requests].post, parameter[name[self].notification_url, call[name[json].dumps, parameter[name[self].message_dict]]]]] | keyword[def] identifier[notify] ( identifier[self] , identifier[msg] , identifier[color] = literal[string] , identifier[notify] = literal[string] , identifier[message_format] = literal[string] ):
literal[string]
identifier[self] . identifier[message_dict] ={
literal[string] : identifier[msg] ,
literal[string] : identifier[color] ,
literal[string] : identifier[notify] ,
literal[string] : identifier[message_format] ,
}
keyword[if] keyword[not] identifier[self] . identifier[debug] :
keyword[return] identifier[requests] . identifier[post] (
identifier[self] . identifier[notification_url] ,
identifier[json] . identifier[dumps] ( identifier[self] . identifier[message_dict] ),
identifier[headers] = identifier[self] . identifier[headers]
)
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[msg] ))
keyword[return] [] | def notify(self, msg, color='green', notify='true', message_format='text'):
"""Send notification to specified HipChat room"""
self.message_dict = {'message': msg, 'color': color, 'notify': notify, 'message_format': message_format}
if not self.debug:
return requests.post(self.notification_url, json.dumps(self.message_dict), headers=self.headers) # depends on [control=['if'], data=[]]
else:
print('HipChat message: <{}>'.format(msg))
return [] |
def send_method_request(self, method: str, method_params: dict) -> dict:
"""
Sends user-defined method and method params
"""
url = '/'.join((self.METHOD_URL, method))
method_params['v'] = self.API_VERSION
if self._access_token:
method_params['access_token'] = self._access_token
response = self.post(url, method_params, timeout=10)
response.raise_for_status()
return json.loads(response.text) | def function[send_method_request, parameter[self, method, method_params]]:
constant[
Sends user-defined method and method params
]
variable[url] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da2054a6800>, <ast.Name object at 0x7da2054a5120>]]]]
call[name[method_params]][constant[v]] assign[=] name[self].API_VERSION
if name[self]._access_token begin[:]
call[name[method_params]][constant[access_token]] assign[=] name[self]._access_token
variable[response] assign[=] call[name[self].post, parameter[name[url], name[method_params]]]
call[name[response].raise_for_status, parameter[]]
return[call[name[json].loads, parameter[name[response].text]]] | keyword[def] identifier[send_method_request] ( identifier[self] , identifier[method] : identifier[str] , identifier[method_params] : identifier[dict] )-> identifier[dict] :
literal[string]
identifier[url] = literal[string] . identifier[join] (( identifier[self] . identifier[METHOD_URL] , identifier[method] ))
identifier[method_params] [ literal[string] ]= identifier[self] . identifier[API_VERSION]
keyword[if] identifier[self] . identifier[_access_token] :
identifier[method_params] [ literal[string] ]= identifier[self] . identifier[_access_token]
identifier[response] = identifier[self] . identifier[post] ( identifier[url] , identifier[method_params] , identifier[timeout] = literal[int] )
identifier[response] . identifier[raise_for_status] ()
keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[text] ) | def send_method_request(self, method: str, method_params: dict) -> dict:
"""
Sends user-defined method and method params
"""
url = '/'.join((self.METHOD_URL, method))
method_params['v'] = self.API_VERSION
if self._access_token:
method_params['access_token'] = self._access_token # depends on [control=['if'], data=[]]
response = self.post(url, method_params, timeout=10)
response.raise_for_status()
return json.loads(response.text) |
def prepare(self, session, event):
"""Prepare phase for session.
:param session: sqlalchemy session
"""
if not event:
self.logger.warn("event empty!")
return
sp_key, sp_hkey = self._keygen(session)
def _pk(obj):
pk_values = tuple(getattr(obj, c.name)
for c in obj.__mapper__.primary_key)
if len(pk_values) == 1:
return pk_values[0]
return pk_values
def _get_dump_value(value):
if hasattr(value, '__mapper__'):
return _pk(value)
return value
pickled_event = {
k: pickle.dumps({_get_dump_value(obj) for obj in objs})
for k, objs in event.items()}
with self.r.pipeline(transaction=False) as p:
p.sadd(sp_key, session.meepo_unique_id)
p.hmset(sp_hkey, pickled_event)
p.execute() | def function[prepare, parameter[self, session, event]]:
constant[Prepare phase for session.
:param session: sqlalchemy session
]
if <ast.UnaryOp object at 0x7da18eb57a00> begin[:]
call[name[self].logger.warn, parameter[constant[event empty!]]]
return[None]
<ast.Tuple object at 0x7da18eb56740> assign[=] call[name[self]._keygen, parameter[name[session]]]
def function[_pk, parameter[obj]]:
variable[pk_values] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18eb54610>]]
if compare[call[name[len], parameter[name[pk_values]]] equal[==] constant[1]] begin[:]
return[call[name[pk_values]][constant[0]]]
return[name[pk_values]]
def function[_get_dump_value, parameter[value]]:
if call[name[hasattr], parameter[name[value], constant[__mapper__]]] begin[:]
return[call[name[_pk], parameter[name[value]]]]
return[name[value]]
variable[pickled_event] assign[=] <ast.DictComp object at 0x7da18eb54c40>
with call[name[self].r.pipeline, parameter[]] begin[:]
call[name[p].sadd, parameter[name[sp_key], name[session].meepo_unique_id]]
call[name[p].hmset, parameter[name[sp_hkey], name[pickled_event]]]
call[name[p].execute, parameter[]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[session] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[event] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[return]
identifier[sp_key] , identifier[sp_hkey] = identifier[self] . identifier[_keygen] ( identifier[session] )
keyword[def] identifier[_pk] ( identifier[obj] ):
identifier[pk_values] = identifier[tuple] ( identifier[getattr] ( identifier[obj] , identifier[c] . identifier[name] )
keyword[for] identifier[c] keyword[in] identifier[obj] . identifier[__mapper__] . identifier[primary_key] )
keyword[if] identifier[len] ( identifier[pk_values] )== literal[int] :
keyword[return] identifier[pk_values] [ literal[int] ]
keyword[return] identifier[pk_values]
keyword[def] identifier[_get_dump_value] ( identifier[value] ):
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ):
keyword[return] identifier[_pk] ( identifier[value] )
keyword[return] identifier[value]
identifier[pickled_event] ={
identifier[k] : identifier[pickle] . identifier[dumps] ({ identifier[_get_dump_value] ( identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[objs] })
keyword[for] identifier[k] , identifier[objs] keyword[in] identifier[event] . identifier[items] ()}
keyword[with] identifier[self] . identifier[r] . identifier[pipeline] ( identifier[transaction] = keyword[False] ) keyword[as] identifier[p] :
identifier[p] . identifier[sadd] ( identifier[sp_key] , identifier[session] . identifier[meepo_unique_id] )
identifier[p] . identifier[hmset] ( identifier[sp_hkey] , identifier[pickled_event] )
identifier[p] . identifier[execute] () | def prepare(self, session, event):
"""Prepare phase for session.
:param session: sqlalchemy session
"""
if not event:
self.logger.warn('event empty!')
return # depends on [control=['if'], data=[]]
(sp_key, sp_hkey) = self._keygen(session)
def _pk(obj):
pk_values = tuple((getattr(obj, c.name) for c in obj.__mapper__.primary_key))
if len(pk_values) == 1:
return pk_values[0] # depends on [control=['if'], data=[]]
return pk_values
def _get_dump_value(value):
if hasattr(value, '__mapper__'):
return _pk(value) # depends on [control=['if'], data=[]]
return value
pickled_event = {k: pickle.dumps({_get_dump_value(obj) for obj in objs}) for (k, objs) in event.items()}
with self.r.pipeline(transaction=False) as p:
p.sadd(sp_key, session.meepo_unique_id)
p.hmset(sp_hkey, pickled_event)
p.execute() # depends on [control=['with'], data=['p']] |
def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = \
metrics[self._optimizedMetricLabel]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics , optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results,
metricValue=optimizeDict.values()[0],
numRecords=(self._currentRecordIndex + 1))
self._logger.debug(
"Model Results: modelID=%s; numRecords=%s; results=%s" % \
(self._modelID, self._currentRecordIndex + 1, results))
return | def function[_updateModelDBResults, parameter[self]]:
constant[ Retrieves the current results and updates the model's record in
the Model database.
]
variable[metrics] assign[=] call[name[self]._getMetrics, parameter[]]
variable[reportDict] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18dc99e40>]]
variable[metrics] assign[=] call[name[self]._getMetrics, parameter[]]
variable[optimizeDict] assign[=] call[name[dict], parameter[]]
if compare[name[self]._optimizeKeyPattern is_not constant[None]] begin[:]
call[name[optimizeDict]][name[self]._optimizedMetricLabel] assign[=] call[name[metrics]][name[self]._optimizedMetricLabel]
variable[results] assign[=] call[name[json].dumps, parameter[tuple[[<ast.Name object at 0x7da20c6c4c40>, <ast.Name object at 0x7da20c6c4c70>]]]]
call[name[self]._jobsDAO.modelUpdateResults, parameter[name[self]._modelID]]
call[name[self]._logger.debug, parameter[binary_operation[constant[Model Results: modelID=%s; numRecords=%s; results=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c6c7cd0>, <ast.BinOp object at 0x7da20c6c7fa0>, <ast.Name object at 0x7da20c6c67d0>]]]]]
return[None] | keyword[def] identifier[_updateModelDBResults] ( identifier[self] ):
literal[string]
identifier[metrics] = identifier[self] . identifier[_getMetrics] ()
identifier[reportDict] = identifier[dict] ([( identifier[k] , identifier[metrics] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_reportMetricLabels] ])
identifier[metrics] = identifier[self] . identifier[_getMetrics] ()
identifier[optimizeDict] = identifier[dict] ()
keyword[if] identifier[self] . identifier[_optimizeKeyPattern] keyword[is] keyword[not] keyword[None] :
identifier[optimizeDict] [ identifier[self] . identifier[_optimizedMetricLabel] ]= identifier[metrics] [ identifier[self] . identifier[_optimizedMetricLabel] ]
identifier[results] = identifier[json] . identifier[dumps] (( identifier[metrics] , identifier[optimizeDict] ))
identifier[self] . identifier[_jobsDAO] . identifier[modelUpdateResults] ( identifier[self] . identifier[_modelID] , identifier[results] = identifier[results] ,
identifier[metricValue] = identifier[optimizeDict] . identifier[values] ()[ literal[int] ],
identifier[numRecords] =( identifier[self] . identifier[_currentRecordIndex] + literal[int] ))
identifier[self] . identifier[_logger] . identifier[debug] (
literal[string] %( identifier[self] . identifier[_modelID] , identifier[self] . identifier[_currentRecordIndex] + literal[int] , identifier[results] ))
keyword[return] | def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k, metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = metrics[self._optimizedMetricLabel] # depends on [control=['if'], data=[]]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics, optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results, metricValue=optimizeDict.values()[0], numRecords=self._currentRecordIndex + 1)
self._logger.debug('Model Results: modelID=%s; numRecords=%s; results=%s' % (self._modelID, self._currentRecordIndex + 1, results))
return |
def callback_url(self, request):
"""
the url to go back after the external service call
:param request: contains the current session
:type request: dict
:rtype: string
"""
service = self.service.split('Service')[1].lower()
return_to = '{service}_callback'.format(service=service)
return '%s://%s%s' % (request.scheme, request.get_host(), reverse(return_to)) | def function[callback_url, parameter[self, request]]:
constant[
the url to go back after the external service call
:param request: contains the current session
:type request: dict
:rtype: string
]
variable[service] assign[=] call[call[call[name[self].service.split, parameter[constant[Service]]]][constant[1]].lower, parameter[]]
variable[return_to] assign[=] call[constant[{service}_callback].format, parameter[]]
return[binary_operation[constant[%s://%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b08c9bd0>, <ast.Call object at 0x7da18ede6560>, <ast.Call object at 0x7da18ede7310>]]]] | keyword[def] identifier[callback_url] ( identifier[self] , identifier[request] ):
literal[string]
identifier[service] = identifier[self] . identifier[service] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[lower] ()
identifier[return_to] = literal[string] . identifier[format] ( identifier[service] = identifier[service] )
keyword[return] literal[string] %( identifier[request] . identifier[scheme] , identifier[request] . identifier[get_host] (), identifier[reverse] ( identifier[return_to] )) | def callback_url(self, request):
"""
the url to go back after the external service call
:param request: contains the current session
:type request: dict
:rtype: string
"""
service = self.service.split('Service')[1].lower()
return_to = '{service}_callback'.format(service=service)
return '%s://%s%s' % (request.scheme, request.get_host(), reverse(return_to)) |
def group_is_client_group(self) -> bool:
"""
Returns: True if this group is a client group
"""
# TODO create test
first_unit = self.get_unit_by_index(1)
if first_unit:
return first_unit.skill == 'Client'
return False | def function[group_is_client_group, parameter[self]]:
constant[
Returns: True if this group is a client group
]
variable[first_unit] assign[=] call[name[self].get_unit_by_index, parameter[constant[1]]]
if name[first_unit] begin[:]
return[compare[name[first_unit].skill equal[==] constant[Client]]]
return[constant[False]] | keyword[def] identifier[group_is_client_group] ( identifier[self] )-> identifier[bool] :
literal[string]
identifier[first_unit] = identifier[self] . identifier[get_unit_by_index] ( literal[int] )
keyword[if] identifier[first_unit] :
keyword[return] identifier[first_unit] . identifier[skill] == literal[string]
keyword[return] keyword[False] | def group_is_client_group(self) -> bool:
"""
Returns: True if this group is a client group
"""
# TODO create test
first_unit = self.get_unit_by_index(1)
if first_unit:
return first_unit.skill == 'Client' # depends on [control=['if'], data=[]]
return False |
def eval_grad(self):
"""Compute gradient in Fourier domain."""
# Compute D X - S
self.Ryf[:] = self.eval_Rf(self.Yf)
# Map to spatial domain to multiply by mask
Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
# Multiply by mask
self.WRy[:] = (self.W**2) * Ry
# Map back to frequency domain
WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)
gradf = np.conj(self.Df) * WRyf
# Multiple channel signal, multiple channel dictionary
if self.cri.Cd > 1:
gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)
return gradf | def function[eval_grad, parameter[self]]:
constant[Compute gradient in Fourier domain.]
call[name[self].Ryf][<ast.Slice object at 0x7da1b06c7dc0>] assign[=] call[name[self].eval_Rf, parameter[name[self].Yf]]
variable[Ry] assign[=] call[name[sl].irfftn, parameter[name[self].Ryf, name[self].cri.Nv, name[self].cri.axisN]]
call[name[self].WRy][<ast.Slice object at 0x7da1b06c7970>] assign[=] binary_operation[binary_operation[name[self].W ** constant[2]] * name[Ry]]
variable[WRyf] assign[=] call[name[sl].rfftn, parameter[name[self].WRy, name[self].cri.Nv, name[self].cri.axisN]]
variable[gradf] assign[=] binary_operation[call[name[np].conj, parameter[name[self].Df]] * name[WRyf]]
if compare[name[self].cri.Cd greater[>] constant[1]] begin[:]
variable[gradf] assign[=] call[name[np].sum, parameter[name[gradf]]]
return[name[gradf]] | keyword[def] identifier[eval_grad] ( identifier[self] ):
literal[string]
identifier[self] . identifier[Ryf] [:]= identifier[self] . identifier[eval_Rf] ( identifier[self] . identifier[Yf] )
identifier[Ry] = identifier[sl] . identifier[irfftn] ( identifier[self] . identifier[Ryf] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] )
identifier[self] . identifier[WRy] [:]=( identifier[self] . identifier[W] ** literal[int] )* identifier[Ry]
identifier[WRyf] = identifier[sl] . identifier[rfftn] ( identifier[self] . identifier[WRy] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] )
identifier[gradf] = identifier[np] . identifier[conj] ( identifier[self] . identifier[Df] )* identifier[WRyf]
keyword[if] identifier[self] . identifier[cri] . identifier[Cd] > literal[int] :
identifier[gradf] = identifier[np] . identifier[sum] ( identifier[gradf] , identifier[axis] = identifier[self] . identifier[cri] . identifier[axisC] , identifier[keepdims] = keyword[True] )
keyword[return] identifier[gradf] | def eval_grad(self):
"""Compute gradient in Fourier domain."""
# Compute D X - S
self.Ryf[:] = self.eval_Rf(self.Yf)
# Map to spatial domain to multiply by mask
Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)
# Multiply by mask
self.WRy[:] = self.W ** 2 * Ry
# Map back to frequency domain
WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)
gradf = np.conj(self.Df) * WRyf
# Multiple channel signal, multiple channel dictionary
if self.cri.Cd > 1:
gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True) # depends on [control=['if'], data=[]]
return gradf |
def parameter(self, component_id, location, component=None, **kwargs):
""" Add a parameter which can be referenced.
:param str param_id: identifier by which parameter may be referenced.
:param str location: location of the parameter.
:param dict component: parameter fields.
:param dict kwargs: plugin-specific arguments
"""
if component_id in self._parameters:
raise DuplicateComponentNameError(
'Another parameter with name "{}" is already registered.'.format(
component_id
)
)
component = component or {}
ret = component.copy()
ret.setdefault("name", component_id)
ret["in"] = location
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.parameter_helper(component, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._parameters[component_id] = ret
return self | def function[parameter, parameter[self, component_id, location, component]]:
constant[ Add a parameter which can be referenced.
:param str param_id: identifier by which parameter may be referenced.
:param str location: location of the parameter.
:param dict component: parameter fields.
:param dict kwargs: plugin-specific arguments
]
if compare[name[component_id] in name[self]._parameters] begin[:]
<ast.Raise object at 0x7da1b172e950>
variable[component] assign[=] <ast.BoolOp object at 0x7da1b172e050>
variable[ret] assign[=] call[name[component].copy, parameter[]]
call[name[ret].setdefault, parameter[constant[name], name[component_id]]]
call[name[ret]][constant[in]] assign[=] name[location]
for taget[name[plugin]] in starred[name[self]._plugins] begin[:]
<ast.Try object at 0x7da1b17de590>
call[name[self]._parameters][name[component_id]] assign[=] name[ret]
return[name[self]] | keyword[def] identifier[parameter] ( identifier[self] , identifier[component_id] , identifier[location] , identifier[component] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[component_id] keyword[in] identifier[self] . identifier[_parameters] :
keyword[raise] identifier[DuplicateComponentNameError] (
literal[string] . identifier[format] (
identifier[component_id]
)
)
identifier[component] = identifier[component] keyword[or] {}
identifier[ret] = identifier[component] . identifier[copy] ()
identifier[ret] . identifier[setdefault] ( literal[string] , identifier[component_id] )
identifier[ret] [ literal[string] ]= identifier[location]
keyword[for] identifier[plugin] keyword[in] identifier[self] . identifier[_plugins] :
keyword[try] :
identifier[ret] . identifier[update] ( identifier[plugin] . identifier[parameter_helper] ( identifier[component] ,** identifier[kwargs] ) keyword[or] {})
keyword[except] identifier[PluginMethodNotImplementedError] :
keyword[continue]
identifier[self] . identifier[_parameters] [ identifier[component_id] ]= identifier[ret]
keyword[return] identifier[self] | def parameter(self, component_id, location, component=None, **kwargs):
""" Add a parameter which can be referenced.
:param str param_id: identifier by which parameter may be referenced.
:param str location: location of the parameter.
:param dict component: parameter fields.
:param dict kwargs: plugin-specific arguments
"""
if component_id in self._parameters:
raise DuplicateComponentNameError('Another parameter with name "{}" is already registered.'.format(component_id)) # depends on [control=['if'], data=['component_id']]
component = component or {}
ret = component.copy()
ret.setdefault('name', component_id)
ret['in'] = location
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.parameter_helper(component, **kwargs) or {}) # depends on [control=['try'], data=[]]
except PluginMethodNotImplementedError:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['plugin']]
self._parameters[component_id] = ret
return self |
def save_report(session):
'''
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
'''
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file)
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(
report_dir(),
identifier + '.pyireport'
)
session.save(path)
return path, identifier | def function[save_report, parameter[session]]:
constant[
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
]
variable[previous_reports] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[call[name[report_dir], parameter[]], constant[*.pyireport]]]]]
call[name[previous_reports].sort, parameter[]]
while compare[call[name[len], parameter[name[previous_reports]]] greater[>] constant[10]] begin[:]
variable[report_file] assign[=] call[name[previous_reports].pop, parameter[]]
call[name[os].remove, parameter[name[report_file]]]
variable[identifier] assign[=] call[name[time].strftime, parameter[constant[%Y-%m-%dT%H-%M-%S], call[name[time].localtime, parameter[name[session].start_time]]]]
variable[path] assign[=] call[name[os].path.join, parameter[call[name[report_dir], parameter[]], binary_operation[name[identifier] + constant[.pyireport]]]]
call[name[session].save, parameter[name[path]]]
return[tuple[[<ast.Name object at 0x7da18f7205b0>, <ast.Name object at 0x7da18f720d60>]]] | keyword[def] identifier[save_report] ( identifier[session] ):
literal[string]
identifier[previous_reports] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[report_dir] (), literal[string] ))
identifier[previous_reports] . identifier[sort] ( identifier[reverse] = keyword[True] )
keyword[while] identifier[len] ( identifier[previous_reports] )> literal[int] :
identifier[report_file] = identifier[previous_reports] . identifier[pop] ()
identifier[os] . identifier[remove] ( identifier[report_file] )
identifier[identifier] = identifier[time] . identifier[strftime] ( literal[string] , identifier[time] . identifier[localtime] ( identifier[session] . identifier[start_time] ))
identifier[path] = identifier[os] . identifier[path] . identifier[join] (
identifier[report_dir] (),
identifier[identifier] + literal[string]
)
identifier[session] . identifier[save] ( identifier[path] )
keyword[return] identifier[path] , identifier[identifier] | def save_report(session):
"""
Saves the session to a temp file, and returns that path.
Also prunes the number of reports to 10 so there aren't loads building up.
"""
# prune this folder to contain the last 10 sessions
previous_reports = glob.glob(os.path.join(report_dir(), '*.pyireport'))
previous_reports.sort(reverse=True)
while len(previous_reports) > 10:
report_file = previous_reports.pop()
os.remove(report_file) # depends on [control=['while'], data=[]]
identifier = time.strftime('%Y-%m-%dT%H-%M-%S', time.localtime(session.start_time))
path = os.path.join(report_dir(), identifier + '.pyireport')
session.save(path)
return (path, identifier) |
def new_job_file(frontier, job_conf_file):
'''Returns new Job.'''
logging.info("loading %s", job_conf_file)
with open(job_conf_file) as f:
job_conf = yaml.safe_load(f)
return new_job(frontier, job_conf) | def function[new_job_file, parameter[frontier, job_conf_file]]:
constant[Returns new Job.]
call[name[logging].info, parameter[constant[loading %s], name[job_conf_file]]]
with call[name[open], parameter[name[job_conf_file]]] begin[:]
variable[job_conf] assign[=] call[name[yaml].safe_load, parameter[name[f]]]
return[call[name[new_job], parameter[name[frontier], name[job_conf]]]] | keyword[def] identifier[new_job_file] ( identifier[frontier] , identifier[job_conf_file] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] , identifier[job_conf_file] )
keyword[with] identifier[open] ( identifier[job_conf_file] ) keyword[as] identifier[f] :
identifier[job_conf] = identifier[yaml] . identifier[safe_load] ( identifier[f] )
keyword[return] identifier[new_job] ( identifier[frontier] , identifier[job_conf] ) | def new_job_file(frontier, job_conf_file):
"""Returns new Job."""
logging.info('loading %s', job_conf_file)
with open(job_conf_file) as f:
job_conf = yaml.safe_load(f)
return new_job(frontier, job_conf) # depends on [control=['with'], data=['f']] |
def QA_util_date_int2str(int_date):
"""
类型datetime.datatime
:param date: int 8位整数
:return: 类型str
"""
date = str(int_date)
if len(date) == 8:
return str(date[0:4] + '-' + date[4:6] + '-' + date[6:8])
elif len(date) == 10:
return date | def function[QA_util_date_int2str, parameter[int_date]]:
constant[
类型datetime.datatime
:param date: int 8位整数
:return: 类型str
]
variable[date] assign[=] call[name[str], parameter[name[int_date]]]
if compare[call[name[len], parameter[name[date]]] equal[==] constant[8]] begin[:]
return[call[name[str], parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[date]][<ast.Slice object at 0x7da1b1f25600>] + constant[-]] + call[name[date]][<ast.Slice object at 0x7da1b1f27f70>]] + constant[-]] + call[name[date]][<ast.Slice object at 0x7da1b1f25990>]]]]] | keyword[def] identifier[QA_util_date_int2str] ( identifier[int_date] ):
literal[string]
identifier[date] = identifier[str] ( identifier[int_date] )
keyword[if] identifier[len] ( identifier[date] )== literal[int] :
keyword[return] identifier[str] ( identifier[date] [ literal[int] : literal[int] ]+ literal[string] + identifier[date] [ literal[int] : literal[int] ]+ literal[string] + identifier[date] [ literal[int] : literal[int] ])
keyword[elif] identifier[len] ( identifier[date] )== literal[int] :
keyword[return] identifier[date] | def QA_util_date_int2str(int_date):
"""
类型datetime.datatime
:param date: int 8位整数
:return: 类型str
"""
date = str(int_date)
if len(date) == 8:
return str(date[0:4] + '-' + date[4:6] + '-' + date[6:8]) # depends on [control=['if'], data=[]]
elif len(date) == 10:
return date # depends on [control=['if'], data=[]] |
def import_authors(self, tree):
"""
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
"""
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find(
'{http://purl.org/dc/elements/1.1/}creator').text)
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author
else:
authors[post_author] = self.migrate_author(
post_author.replace(' ', '-'))
return authors | def function[import_authors, parameter[self, tree]]:
constant[
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
]
call[name[self].write_out, parameter[call[name[self].style.STEP, parameter[constant[- Importing authors
]]]]]
variable[post_authors] assign[=] call[name[set], parameter[]]
for taget[name[item]] in starred[call[name[tree].findall, parameter[constant[channel/item]]]] begin[:]
variable[post_type] assign[=] call[name[item].find, parameter[binary_operation[constant[{%s}post_type] <ast.Mod object at 0x7da2590d6920> name[WP_NS]]]].text
if compare[name[post_type] equal[==] constant[post]] begin[:]
call[name[post_authors].add, parameter[call[name[item].find, parameter[constant[{http://purl.org/dc/elements/1.1/}creator]]].text]]
call[name[self].write_out, parameter[binary_operation[constant[> %i authors found.
] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[post_authors]]]]]]
variable[authors] assign[=] dictionary[[], []]
for taget[name[post_author]] in starred[name[post_authors]] begin[:]
if name[self].default_author begin[:]
call[name[authors]][name[post_author]] assign[=] name[self].default_author
return[name[authors]] | keyword[def] identifier[import_authors] ( identifier[self] , identifier[tree] ):
literal[string]
identifier[self] . identifier[write_out] ( identifier[self] . identifier[style] . identifier[STEP] ( literal[string] ))
identifier[post_authors] = identifier[set] ()
keyword[for] identifier[item] keyword[in] identifier[tree] . identifier[findall] ( literal[string] ):
identifier[post_type] = identifier[item] . identifier[find] ( literal[string] % identifier[WP_NS] ). identifier[text]
keyword[if] identifier[post_type] == literal[string] :
identifier[post_authors] . identifier[add] ( identifier[item] . identifier[find] (
literal[string] ). identifier[text] )
identifier[self] . identifier[write_out] ( literal[string] % identifier[len] ( identifier[post_authors] ))
identifier[authors] ={}
keyword[for] identifier[post_author] keyword[in] identifier[post_authors] :
keyword[if] identifier[self] . identifier[default_author] :
identifier[authors] [ identifier[post_author] ]= identifier[self] . identifier[default_author]
keyword[else] :
identifier[authors] [ identifier[post_author] ]= identifier[self] . identifier[migrate_author] (
identifier[post_author] . identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[authors] | def import_authors(self, tree):
"""
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
"""
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find('{http://purl.org/dc/elements/1.1/}creator').text) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author # depends on [control=['if'], data=[]]
else:
authors[post_author] = self.migrate_author(post_author.replace(' ', '-')) # depends on [control=['for'], data=['post_author']]
return authors |
def periodic_send(self, content, interval, title=''):
"""
发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情
"""
url = '{0}periodic_message'.format(self.remote)
if isinstance(interval, datetime.timedelta):
interval = int(interval.total_seconds())
if not isinstance(interval, int):
raise ValueError
data = self._wrap_post_data(title=title, content=content, interval=interval)
res = requests.post(url, data, timeout=self.timeout)
if res.status_code == requests.codes.ok:
res_data = json.loads(self._convert_bytes(res.content))
if res_data.get('status') == STATUS_SUCCESS:
return True, res_data.get('message')
return False, res_data.get('message')
res.raise_for_status()
return False, 'Request or Response Error' | def function[periodic_send, parameter[self, content, interval, title]]:
constant[
发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情
]
variable[url] assign[=] call[constant[{0}periodic_message].format, parameter[name[self].remote]]
if call[name[isinstance], parameter[name[interval], name[datetime].timedelta]] begin[:]
variable[interval] assign[=] call[name[int], parameter[call[name[interval].total_seconds, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b0e8c460> begin[:]
<ast.Raise object at 0x7da1b0e8c3d0>
variable[data] assign[=] call[name[self]._wrap_post_data, parameter[]]
variable[res] assign[=] call[name[requests].post, parameter[name[url], name[data]]]
if compare[name[res].status_code equal[==] name[requests].codes.ok] begin[:]
variable[res_data] assign[=] call[name[json].loads, parameter[call[name[self]._convert_bytes, parameter[name[res].content]]]]
if compare[call[name[res_data].get, parameter[constant[status]]] equal[==] name[STATUS_SUCCESS]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0e8ebf0>, <ast.Call object at 0x7da1b0e8c610>]]]
return[tuple[[<ast.Constant object at 0x7da1b0f3a230>, <ast.Call object at 0x7da1b0f39b10>]]]
call[name[res].raise_for_status, parameter[]]
return[tuple[[<ast.Constant object at 0x7da1b0e25e10>, <ast.Constant object at 0x7da1b0e26e90>]]] | keyword[def] identifier[periodic_send] ( identifier[self] , identifier[content] , identifier[interval] , identifier[title] = literal[string] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[remote] )
keyword[if] identifier[isinstance] ( identifier[interval] , identifier[datetime] . identifier[timedelta] ):
identifier[interval] = identifier[int] ( identifier[interval] . identifier[total_seconds] ())
keyword[if] keyword[not] identifier[isinstance] ( identifier[interval] , identifier[int] ):
keyword[raise] identifier[ValueError]
identifier[data] = identifier[self] . identifier[_wrap_post_data] ( identifier[title] = identifier[title] , identifier[content] = identifier[content] , identifier[interval] = identifier[interval] )
identifier[res] = identifier[requests] . identifier[post] ( identifier[url] , identifier[data] , identifier[timeout] = identifier[self] . identifier[timeout] )
keyword[if] identifier[res] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[ok] :
identifier[res_data] = identifier[json] . identifier[loads] ( identifier[self] . identifier[_convert_bytes] ( identifier[res] . identifier[content] ))
keyword[if] identifier[res_data] . identifier[get] ( literal[string] )== identifier[STATUS_SUCCESS] :
keyword[return] keyword[True] , identifier[res_data] . identifier[get] ( literal[string] )
keyword[return] keyword[False] , identifier[res_data] . identifier[get] ( literal[string] )
identifier[res] . identifier[raise_for_status] ()
keyword[return] keyword[False] , literal[string] | def periodic_send(self, content, interval, title=''):
"""
发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情
"""
url = '{0}periodic_message'.format(self.remote)
if isinstance(interval, datetime.timedelta):
interval = int(interval.total_seconds()) # depends on [control=['if'], data=[]]
if not isinstance(interval, int):
raise ValueError # depends on [control=['if'], data=[]]
data = self._wrap_post_data(title=title, content=content, interval=interval)
res = requests.post(url, data, timeout=self.timeout)
if res.status_code == requests.codes.ok:
res_data = json.loads(self._convert_bytes(res.content))
if res_data.get('status') == STATUS_SUCCESS:
return (True, res_data.get('message')) # depends on [control=['if'], data=[]]
return (False, res_data.get('message')) # depends on [control=['if'], data=[]]
res.raise_for_status()
return (False, 'Request or Response Error') |
def auth(username, password):
'''
Simple Django auth
'''
django_auth_path = __opts__['django_auth_path']
if django_auth_path not in sys.path:
sys.path.append(django_auth_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', __opts__['django_auth_settings'])
__django_auth_setup()
if not is_connection_usable():
connection.close()
import django.contrib.auth # pylint: disable=import-error,3rd-party-module-not-gated
user = django.contrib.auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
log.debug('Django authentication successful')
return True
else:
log.debug('Django authentication: the password is valid but the account is disabled.')
else:
log.debug('Django authentication failed.')
return False | def function[auth, parameter[username, password]]:
constant[
Simple Django auth
]
variable[django_auth_path] assign[=] call[name[__opts__]][constant[django_auth_path]]
if compare[name[django_auth_path] <ast.NotIn object at 0x7da2590d7190> name[sys].path] begin[:]
call[name[sys].path.append, parameter[name[django_auth_path]]]
call[name[os].environ.setdefault, parameter[constant[DJANGO_SETTINGS_MODULE], call[name[__opts__]][constant[django_auth_settings]]]]
call[name[__django_auth_setup], parameter[]]
if <ast.UnaryOp object at 0x7da207f981c0> begin[:]
call[name[connection].close, parameter[]]
import module[django.contrib.auth]
variable[user] assign[=] call[name[django].contrib.auth.authenticate, parameter[]]
if compare[name[user] is_not constant[None]] begin[:]
if name[user].is_active begin[:]
call[name[log].debug, parameter[constant[Django authentication successful]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[auth] ( identifier[username] , identifier[password] ):
literal[string]
identifier[django_auth_path] = identifier[__opts__] [ literal[string] ]
keyword[if] identifier[django_auth_path] keyword[not] keyword[in] identifier[sys] . identifier[path] :
identifier[sys] . identifier[path] . identifier[append] ( identifier[django_auth_path] )
identifier[os] . identifier[environ] . identifier[setdefault] ( literal[string] , identifier[__opts__] [ literal[string] ])
identifier[__django_auth_setup] ()
keyword[if] keyword[not] identifier[is_connection_usable] ():
identifier[connection] . identifier[close] ()
keyword[import] identifier[django] . identifier[contrib] . identifier[auth]
identifier[user] = identifier[django] . identifier[contrib] . identifier[auth] . identifier[authenticate] ( identifier[username] = identifier[username] , identifier[password] = identifier[password] )
keyword[if] identifier[user] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[user] . identifier[is_active] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] keyword[False] | def auth(username, password):
"""
Simple Django auth
"""
django_auth_path = __opts__['django_auth_path']
if django_auth_path not in sys.path:
sys.path.append(django_auth_path) # depends on [control=['if'], data=['django_auth_path']]
os.environ.setdefault('DJANGO_SETTINGS_MODULE', __opts__['django_auth_settings'])
__django_auth_setup()
if not is_connection_usable():
connection.close() # depends on [control=['if'], data=[]]
import django.contrib.auth # pylint: disable=import-error,3rd-party-module-not-gated
user = django.contrib.auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
log.debug('Django authentication successful')
return True # depends on [control=['if'], data=[]]
else:
log.debug('Django authentication: the password is valid but the account is disabled.') # depends on [control=['if'], data=['user']]
else:
log.debug('Django authentication failed.')
return False |
def learn(self, features, labels):
""" Fits the classifier
If it's state is empty, the classifier is fitted, if not
the classifier is partially fitted.
See sklearn's SGDClassifier fit and partial_fit methods.
Args:
features (:obj:`list` of :obj:`list` of :obj:`float`)
labels (:obj:`list` of :obj:`str`): Labels for each set of features.
New features are learnt.
"""
labels = np.ravel(labels)
self.__learn_labels(labels)
if len(labels) == 0:
return
labels = self.labels.transform(labels)
if self.feature_length > 0 and hasattr(self.clf, 'partial_fit'):
# FIXME? check docs, may need to pass class=[...]
self.clf = self.clf.partial_fit(features, labels)
else:
self.clf = self.clf.fit(features, labels)
self.feature_length = len(features[0]) | def function[learn, parameter[self, features, labels]]:
constant[ Fits the classifier
If it's state is empty, the classifier is fitted, if not
the classifier is partially fitted.
See sklearn's SGDClassifier fit and partial_fit methods.
Args:
features (:obj:`list` of :obj:`list` of :obj:`float`)
labels (:obj:`list` of :obj:`str`): Labels for each set of features.
New features are learnt.
]
variable[labels] assign[=] call[name[np].ravel, parameter[name[labels]]]
call[name[self].__learn_labels, parameter[name[labels]]]
if compare[call[name[len], parameter[name[labels]]] equal[==] constant[0]] begin[:]
return[None]
variable[labels] assign[=] call[name[self].labels.transform, parameter[name[labels]]]
if <ast.BoolOp object at 0x7da1b05f3af0> begin[:]
name[self].clf assign[=] call[name[self].clf.partial_fit, parameter[name[features], name[labels]]] | keyword[def] identifier[learn] ( identifier[self] , identifier[features] , identifier[labels] ):
literal[string]
identifier[labels] = identifier[np] . identifier[ravel] ( identifier[labels] )
identifier[self] . identifier[__learn_labels] ( identifier[labels] )
keyword[if] identifier[len] ( identifier[labels] )== literal[int] :
keyword[return]
identifier[labels] = identifier[self] . identifier[labels] . identifier[transform] ( identifier[labels] )
keyword[if] identifier[self] . identifier[feature_length] > literal[int] keyword[and] identifier[hasattr] ( identifier[self] . identifier[clf] , literal[string] ):
identifier[self] . identifier[clf] = identifier[self] . identifier[clf] . identifier[partial_fit] ( identifier[features] , identifier[labels] )
keyword[else] :
identifier[self] . identifier[clf] = identifier[self] . identifier[clf] . identifier[fit] ( identifier[features] , identifier[labels] )
identifier[self] . identifier[feature_length] = identifier[len] ( identifier[features] [ literal[int] ]) | def learn(self, features, labels):
""" Fits the classifier
If it's state is empty, the classifier is fitted, if not
the classifier is partially fitted.
See sklearn's SGDClassifier fit and partial_fit methods.
Args:
features (:obj:`list` of :obj:`list` of :obj:`float`)
labels (:obj:`list` of :obj:`str`): Labels for each set of features.
New features are learnt.
"""
labels = np.ravel(labels)
self.__learn_labels(labels)
if len(labels) == 0:
return # depends on [control=['if'], data=[]]
labels = self.labels.transform(labels)
if self.feature_length > 0 and hasattr(self.clf, 'partial_fit'):
# FIXME? check docs, may need to pass class=[...]
self.clf = self.clf.partial_fit(features, labels) # depends on [control=['if'], data=[]]
else:
self.clf = self.clf.fit(features, labels)
self.feature_length = len(features[0]) |
def add_user_to_group(iam_client, user, group, quiet = False):
"""
Add an IAM user to an IAM group
:param iam_client:
:param group:
:param user:
:param user_info:
:param dry_run:
:return:
"""
if not quiet:
printInfo('Adding user to group %s...' % group)
iam_client.add_user_to_group(GroupName = group, UserName = user) | def function[add_user_to_group, parameter[iam_client, user, group, quiet]]:
constant[
Add an IAM user to an IAM group
:param iam_client:
:param group:
:param user:
:param user_info:
:param dry_run:
:return:
]
if <ast.UnaryOp object at 0x7da1b265ef80> begin[:]
call[name[printInfo], parameter[binary_operation[constant[Adding user to group %s...] <ast.Mod object at 0x7da2590d6920> name[group]]]]
call[name[iam_client].add_user_to_group, parameter[]] | keyword[def] identifier[add_user_to_group] ( identifier[iam_client] , identifier[user] , identifier[group] , identifier[quiet] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[quiet] :
identifier[printInfo] ( literal[string] % identifier[group] )
identifier[iam_client] . identifier[add_user_to_group] ( identifier[GroupName] = identifier[group] , identifier[UserName] = identifier[user] ) | def add_user_to_group(iam_client, user, group, quiet=False):
"""
Add an IAM user to an IAM group
:param iam_client:
:param group:
:param user:
:param user_info:
:param dry_run:
:return:
"""
if not quiet:
printInfo('Adding user to group %s...' % group) # depends on [control=['if'], data=[]]
iam_client.add_user_to_group(GroupName=group, UserName=user) |
def check_config_mode(self, check_string=")#", pattern=r"[#\$]"):
"""Checks if the device is in configuration mode or not.
IOS-XR, unfortunately, does this:
RP/0/RSP0/CPU0:BNG(admin)#
"""
self.write_channel(self.RETURN)
output = self.read_until_pattern(pattern=pattern)
# Strip out (admin) so we don't get a false positive with (admin)#
# (admin-config)# would still match.
output = output.replace("(admin)", "")
return check_string in output | def function[check_config_mode, parameter[self, check_string, pattern]]:
constant[Checks if the device is in configuration mode or not.
IOS-XR, unfortunately, does this:
RP/0/RSP0/CPU0:BNG(admin)#
]
call[name[self].write_channel, parameter[name[self].RETURN]]
variable[output] assign[=] call[name[self].read_until_pattern, parameter[]]
variable[output] assign[=] call[name[output].replace, parameter[constant[(admin)], constant[]]]
return[compare[name[check_string] in name[output]]] | keyword[def] identifier[check_config_mode] ( identifier[self] , identifier[check_string] = literal[string] , identifier[pattern] = literal[string] ):
literal[string]
identifier[self] . identifier[write_channel] ( identifier[self] . identifier[RETURN] )
identifier[output] = identifier[self] . identifier[read_until_pattern] ( identifier[pattern] = identifier[pattern] )
identifier[output] = identifier[output] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[check_string] keyword[in] identifier[output] | def check_config_mode(self, check_string=')#', pattern='[#\\$]'):
"""Checks if the device is in configuration mode or not.
IOS-XR, unfortunately, does this:
RP/0/RSP0/CPU0:BNG(admin)#
"""
self.write_channel(self.RETURN)
output = self.read_until_pattern(pattern=pattern)
# Strip out (admin) so we don't get a false positive with (admin)#
# (admin-config)# would still match.
output = output.replace('(admin)', '')
return check_string in output |
def updateAccountResponse(self, subject, person, vendorSpecific=None):
"""CNIdentity.updateAccount(session, person) → Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount.
Args:
subject:
person:
vendorSpecific:
Returns:
"""
mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))}
return self.PUT(['accounts', subject], fields=mmp_dict, headers=vendorSpecific) | def function[updateAccountResponse, parameter[self, subject, person, vendorSpecific]]:
constant[CNIdentity.updateAccount(session, person) → Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount.
Args:
subject:
person:
vendorSpecific:
Returns:
]
variable[mmp_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a2f6d0>], [<ast.Tuple object at 0x7da1b1a2eda0>]]
return[call[name[self].PUT, parameter[list[[<ast.Constant object at 0x7da1b1a2f0d0>, <ast.Name object at 0x7da1b1a2e4a0>]]]]] | keyword[def] identifier[updateAccountResponse] ( identifier[self] , identifier[subject] , identifier[person] , identifier[vendorSpecific] = keyword[None] ):
literal[string]
identifier[mmp_dict] ={ literal[string] :( literal[string] , identifier[person] . identifier[toxml] ( literal[string] ))}
keyword[return] identifier[self] . identifier[PUT] ([ literal[string] , identifier[subject] ], identifier[fields] = identifier[mmp_dict] , identifier[headers] = identifier[vendorSpecific] ) | def updateAccountResponse(self, subject, person, vendorSpecific=None):
"""CNIdentity.updateAccount(session, person) → Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount.
Args:
subject:
person:
vendorSpecific:
Returns:
"""
mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))}
return self.PUT(['accounts', subject], fields=mmp_dict, headers=vendorSpecific) |
def delete_dataset_cache(*filenames):
"""
Delete the cache (converted files) for a dataset.
Parameters
----------
filenames: str
Filenames of files to delete
"""
for filename in filenames:
filename = path_string(filename)
path = config.get_data_path(filename)
if os.path.exists(path):
os.remove(path) | def function[delete_dataset_cache, parameter[]]:
constant[
Delete the cache (converted files) for a dataset.
Parameters
----------
filenames: str
Filenames of files to delete
]
for taget[name[filename]] in starred[name[filenames]] begin[:]
variable[filename] assign[=] call[name[path_string], parameter[name[filename]]]
variable[path] assign[=] call[name[config].get_data_path, parameter[name[filename]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
call[name[os].remove, parameter[name[path]]] | keyword[def] identifier[delete_dataset_cache] (* identifier[filenames] ):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
identifier[filename] = identifier[path_string] ( identifier[filename] )
identifier[path] = identifier[config] . identifier[get_data_path] ( identifier[filename] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[remove] ( identifier[path] ) | def delete_dataset_cache(*filenames):
"""
Delete the cache (converted files) for a dataset.
Parameters
----------
filenames: str
Filenames of files to delete
"""
for filename in filenames:
filename = path_string(filename)
path = config.get_data_path(filename)
if os.path.exists(path):
os.remove(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] |
def init(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
self.get_station_id() | def function[init, parameter[self]]:
constant[
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
]
<ast.Try object at 0x7da204567520>
call[name[self].get_station_id, parameter[]] | keyword[def] identifier[init] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[for] identifier[no_lookup] keyword[in] ( literal[string] , literal[string] ):
identifier[sid] = identifier[self] . identifier[location_code] . identifier[partition] ( identifier[no_lookup] + literal[string] )[- literal[int] ]
keyword[if] identifier[sid] :
identifier[self] . identifier[station_id] = identifier[self] . identifier[location_code]
keyword[return]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[self] . identifier[get_station_id] () | def init(self):
"""
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
"""
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['no_lookup']] # depends on [control=['try'], data=[]]
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass # depends on [control=['except'], data=[]]
self.get_station_id() |
def connectionMade(self):
"""
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
"""
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid}) | def function[connectionMade, parameter[self]]:
constant[
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
]
name[self].transport.uid assign[=] call[name[str], parameter[call[name[uuid].uuid1, parameter[]]]]
name[self].guid assign[=] call[name[self].dispatcher.add, parameter[name[self].transport]]
call[name[self].dispatcher.send, parameter[name[self].guid, dictionary[[<ast.Constant object at 0x7da20e954190>], [<ast.Attribute object at 0x7da20e955c90>]]]] | keyword[def] identifier[connectionMade] ( identifier[self] ):
literal[string]
identifier[self] . identifier[transport] . identifier[uid] = identifier[str] ( identifier[uuid] . identifier[uuid1] ())
identifier[self] . identifier[guid] = identifier[self] . identifier[dispatcher] . identifier[add] ( identifier[self] . identifier[transport] )
identifier[self] . identifier[dispatcher] . identifier[send] ( identifier[self] . identifier[guid] ,{ literal[string] : identifier[self] . identifier[guid] }) | def connectionMade(self):
"""
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
"""
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid}) |
def get_single_poll_submission(self, id, poll_id, poll_session_id):
"""
Get a single poll submission.
Returns the poll submission with the given id
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - poll_id
"""ID"""
path["poll_id"] = poll_id
# REQUIRED - PATH - poll_session_id
"""ID"""
path["poll_session_id"] = poll_session_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id}".format(**path), data=data, params=params, no_data=True) | def function[get_single_poll_submission, parameter[self, id, poll_id, poll_session_id]]:
constant[
Get a single poll submission.
Returns the poll submission with the given id
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[poll_id]] assign[=] name[poll_id]
constant[ID]
call[name[path]][constant[poll_session_id]] assign[=] name[poll_session_id]
constant[ID]
call[name[path]][constant[id]] assign[=] name[id]
call[name[self].logger.debug, parameter[call[constant[GET /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id} with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[GET], call[constant[/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id}].format, parameter[]]]]] | keyword[def] identifier[get_single_poll_submission] ( identifier[self] , identifier[id] , identifier[poll_id] , identifier[poll_session_id] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[poll_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[poll_session_id]
literal[string]
identifier[path] [ literal[string] ]= identifier[id]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] ) | def get_single_poll_submission(self, id, poll_id, poll_session_id):
"""
Get a single poll submission.
Returns the poll submission with the given id
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - poll_id
'ID'
path['poll_id'] = poll_id # REQUIRED - PATH - poll_session_id
'ID'
path['poll_session_id'] = poll_session_id # REQUIRED - PATH - id
'ID'
path['id'] = id
self.logger.debug('GET /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id} with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('GET', '/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions/{id}'.format(**path), data=data, params=params, no_data=True) |
def uptime():
"""Returns uptime in seconds if even remotely possible, or None if not."""
if __boottime is not None:
return time.time() - __boottime
return {'amiga': _uptime_amiga,
'aros12': _uptime_amiga,
'beos5': _uptime_beos,
'cygwin': _uptime_linux,
'darwin': _uptime_osx,
'haiku1': _uptime_beos,
'linux': _uptime_linux,
'linux-armv71': _uptime_linux,
'linux2': _uptime_linux,
'mac': _uptime_mac,
'minix3': _uptime_minix,
'riscos': _uptime_riscos,
'sunos5': _uptime_solaris,
'syllable': _uptime_syllable,
'win32': _uptime_windows,
'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or \
_uptime_bsd() or _uptime_plan9() or _uptime_linux() or \
_uptime_windows() or _uptime_solaris() or _uptime_beos() or \
_uptime_amiga() or _uptime_riscos() or _uptime_posix() or \
_uptime_syllable() or _uptime_mac() or _uptime_osx() | def function[uptime, parameter[]]:
constant[Returns uptime in seconds if even remotely possible, or None if not.]
if compare[name[__boottime] is_not constant[None]] begin[:]
return[binary_operation[call[name[time].time, parameter[]] - name[__boottime]]]
return[<ast.BoolOp object at 0x7da1aff02170>] | keyword[def] identifier[uptime] ():
literal[string]
keyword[if] identifier[__boottime] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[time] . identifier[time] ()- identifier[__boottime]
keyword[return] { literal[string] : identifier[_uptime_amiga] ,
literal[string] : identifier[_uptime_amiga] ,
literal[string] : identifier[_uptime_beos] ,
literal[string] : identifier[_uptime_linux] ,
literal[string] : identifier[_uptime_osx] ,
literal[string] : identifier[_uptime_beos] ,
literal[string] : identifier[_uptime_linux] ,
literal[string] : identifier[_uptime_linux] ,
literal[string] : identifier[_uptime_linux] ,
literal[string] : identifier[_uptime_mac] ,
literal[string] : identifier[_uptime_minix] ,
literal[string] : identifier[_uptime_riscos] ,
literal[string] : identifier[_uptime_solaris] ,
literal[string] : identifier[_uptime_syllable] ,
literal[string] : identifier[_uptime_windows] ,
literal[string] : identifier[_uptime_windows] }. identifier[get] ( identifier[sys] . identifier[platform] , identifier[_uptime_bsd] )() keyword[or] identifier[_uptime_bsd] () keyword[or] identifier[_uptime_plan9] () keyword[or] identifier[_uptime_linux] () keyword[or] identifier[_uptime_windows] () keyword[or] identifier[_uptime_solaris] () keyword[or] identifier[_uptime_beos] () keyword[or] identifier[_uptime_amiga] () keyword[or] identifier[_uptime_riscos] () keyword[or] identifier[_uptime_posix] () keyword[or] identifier[_uptime_syllable] () keyword[or] identifier[_uptime_mac] () keyword[or] identifier[_uptime_osx] () | def uptime():
"""Returns uptime in seconds if even remotely possible, or None if not."""
if __boottime is not None:
return time.time() - __boottime # depends on [control=['if'], data=['__boottime']]
return {'amiga': _uptime_amiga, 'aros12': _uptime_amiga, 'beos5': _uptime_beos, 'cygwin': _uptime_linux, 'darwin': _uptime_osx, 'haiku1': _uptime_beos, 'linux': _uptime_linux, 'linux-armv71': _uptime_linux, 'linux2': _uptime_linux, 'mac': _uptime_mac, 'minix3': _uptime_minix, 'riscos': _uptime_riscos, 'sunos5': _uptime_solaris, 'syllable': _uptime_syllable, 'win32': _uptime_windows, 'wince': _uptime_windows}.get(sys.platform, _uptime_bsd)() or _uptime_bsd() or _uptime_plan9() or _uptime_linux() or _uptime_windows() or _uptime_solaris() or _uptime_beos() or _uptime_amiga() or _uptime_riscos() or _uptime_posix() or _uptime_syllable() or _uptime_mac() or _uptime_osx() |
def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
#find unassigned i*
istar = np.argmin(self._x)
#compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int) + istar
#initialize sets
#READY: set of nodes visited and in the path (whose price gets
#updated in augment)
#SCAN: set of nodes at the bottom of the tree, which we need to
#look at
#T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
#populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return _pred, _ready, istar, j, mu
#pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
#pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
#find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
#update distances
self._d[shorter] = newdists[shorter]
#update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return _pred, _ready, istar, j, mu
_scan[j] = True
_todo[j] = False | def function[_build_tree, parameter[self]]:
constant[
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
]
variable[istar] assign[=] call[name[np].argmin, parameter[name[self]._x]]
name[self]._d assign[=] binary_operation[call[name[self].c][name[istar]] - name[self]._v]
variable[_pred] assign[=] binary_operation[call[name[np].zeros, parameter[name[self].n]] + name[istar]]
variable[_ready] assign[=] call[name[np].zeros, parameter[name[self].n]]
variable[_scan] assign[=] call[name[np].zeros, parameter[name[self].n]]
variable[_todo] assign[=] binary_operation[call[name[np].zeros, parameter[name[self].n]] + constant[True]]
while constant[True] begin[:]
if compare[constant[True] <ast.NotIn object at 0x7da2590d7190> name[_scan]] begin[:]
variable[mu] assign[=] call[name[np].min, parameter[call[name[self]._d][name[_todo]]]]
call[name[_scan]][compare[name[self]._d equal[==] name[mu]]] assign[=] constant[True]
call[name[_todo]][name[_scan]] assign[=] constant[False]
variable[j] assign[=] call[name[np].argmin, parameter[binary_operation[name[self]._y * name[_scan]]]]
if <ast.BoolOp object at 0x7da18f812f80> begin[:]
return[tuple[[<ast.Name object at 0x7da18f812230>, <ast.Name object at 0x7da18f813a90>, <ast.Name object at 0x7da18f810190>, <ast.Name object at 0x7da18f810ee0>, <ast.Name object at 0x7da18f810cd0>]]]
variable[_jstar] assign[=] call[name[np].argmax, parameter[name[_scan]]]
variable[i] assign[=] call[name[self]._y][name[_jstar]]
call[name[_scan]][name[_jstar]] assign[=] constant[False]
call[name[_ready]][name[_jstar]] assign[=] constant[True]
variable[newdists] assign[=] binary_operation[name[mu] + call[name[self].cred][tuple[[<ast.Name object at 0x7da18f8110f0>, <ast.Slice object at 0x7da18f812b90>]]]]
variable[shorter] assign[=] call[name[np].logical_and, parameter[compare[name[newdists] less[<] name[self]._d], name[_todo]]]
call[name[self]._d][name[shorter]] assign[=] call[name[newdists]][name[shorter]]
call[name[_pred]][name[shorter]] assign[=] name[i]
for taget[name[j]] in starred[call[call[name[np].nonzero, parameter[call[name[np].logical_and, parameter[compare[name[self]._d equal[==] name[mu]], name[_todo]]]]]][constant[0]]] begin[:]
if compare[call[name[self]._y][name[j]] equal[==] <ast.UnaryOp object at 0x7da2045678b0>] begin[:]
return[tuple[[<ast.Name object at 0x7da204564640>, <ast.Name object at 0x7da204567a60>, <ast.Name object at 0x7da204567100>, <ast.Name object at 0x7da204564b20>, <ast.Name object at 0x7da204564280>]]]
call[name[_scan]][name[j]] assign[=] constant[True]
call[name[_todo]][name[j]] assign[=] constant[False] | keyword[def] identifier[_build_tree] ( identifier[self] ):
literal[string]
identifier[istar] = identifier[np] . identifier[argmin] ( identifier[self] . identifier[_x] )
identifier[self] . identifier[_d] = identifier[self] . identifier[c] [ identifier[istar] ]- identifier[self] . identifier[_v]
identifier[_pred] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[n] , identifier[dtype] = identifier[np] . identifier[int] )+ identifier[istar]
identifier[_ready] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[n] , identifier[dtype] = identifier[np] . identifier[bool] )
identifier[_scan] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[n] , identifier[dtype] = identifier[np] . identifier[bool] )
identifier[_todo] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[n] , identifier[dtype] = identifier[np] . identifier[bool] )+ keyword[True]
keyword[while] keyword[True] :
keyword[if] keyword[True] keyword[not] keyword[in] identifier[_scan] :
identifier[mu] = identifier[np] . identifier[min] ( identifier[self] . identifier[_d] [ identifier[_todo] ])
identifier[_scan] [ identifier[self] . identifier[_d] == identifier[mu] ]= keyword[True]
identifier[_todo] [ identifier[_scan] ]= keyword[False]
identifier[j] = identifier[np] . identifier[argmin] ( identifier[self] . identifier[_y] * identifier[_scan] )
keyword[if] identifier[self] . identifier[_y] [ identifier[j] ]==- literal[int] keyword[and] identifier[_scan] [ identifier[j] ]:
keyword[return] identifier[_pred] , identifier[_ready] , identifier[istar] , identifier[j] , identifier[mu]
identifier[_jstar] = identifier[np] . identifier[argmax] ( identifier[_scan] )
identifier[i] = identifier[self] . identifier[_y] [ identifier[_jstar] ]
identifier[_scan] [ identifier[_jstar] ]= keyword[False]
identifier[_ready] [ identifier[_jstar] ]= keyword[True]
identifier[newdists] = identifier[mu] + identifier[self] . identifier[cred] [ identifier[i] ,:]
identifier[shorter] = identifier[np] . identifier[logical_and] ( identifier[newdists] < identifier[self] . identifier[_d] , identifier[_todo] )
identifier[self] . identifier[_d] [ identifier[shorter] ]= identifier[newdists] [ identifier[shorter] ]
identifier[_pred] [ identifier[shorter] ]= identifier[i]
keyword[for] identifier[j] keyword[in] identifier[np] . identifier[nonzero] ( identifier[np] . identifier[logical_and] ( identifier[self] . identifier[_d] == identifier[mu] , identifier[_todo] ))[ literal[int] ]:
keyword[if] identifier[self] . identifier[_y] [ identifier[j] ]==- literal[int] :
keyword[return] identifier[_pred] , identifier[_ready] , identifier[istar] , identifier[j] , identifier[mu]
identifier[_scan] [ identifier[j] ]= keyword[True]
identifier[_todo] [ identifier[j] ]= keyword[False] | def _build_tree(self):
"""
Builds the tree finding an augmenting path. Alternates along
matched and unmatched edges between X and Y. The paths are
stored in _pred (new predecessor of nodes in Y), and
self._x and self._y
"""
#find unassigned i*
istar = np.argmin(self._x)
#compute distances
self._d = self.c[istar] - self._v
_pred = np.zeros(self.n, dtype=np.int) + istar
#initialize sets
#READY: set of nodes visited and in the path (whose price gets
#updated in augment)
#SCAN: set of nodes at the bottom of the tree, which we need to
#look at
#T0DO: unvisited nodes
_ready = np.zeros(self.n, dtype=np.bool)
_scan = np.zeros(self.n, dtype=np.bool)
_todo = np.zeros(self.n, dtype=np.bool) + True
while True:
#populate scan with minimum reduced distances
if True not in _scan:
mu = np.min(self._d[_todo])
_scan[self._d == mu] = True
_todo[_scan] = False
j = np.argmin(self._y * _scan)
if self._y[j] == -1 and _scan[j]:
return (_pred, _ready, istar, j, mu) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['_scan']]
#pick jstar from scan (scan always has at least 1)
_jstar = np.argmax(_scan)
#pick i associated with jstar
i = self._y[_jstar]
_scan[_jstar] = False
_ready[_jstar] = True
#find shorter distances
newdists = mu + self.cred[i, :]
shorter = np.logical_and(newdists < self._d, _todo)
#update distances
self._d[shorter] = newdists[shorter]
#update predecessors
_pred[shorter] = i
for j in np.nonzero(np.logical_and(self._d == mu, _todo))[0]:
if self._y[j] == -1:
return (_pred, _ready, istar, j, mu) # depends on [control=['if'], data=[]]
_scan[j] = True
_todo[j] = False # depends on [control=['for'], data=['j']] # depends on [control=['while'], data=[]] |
def single_val(self):
"""return relative error of worst point that might make the data none
symmetric.
"""
sv_t = self._sv(self._tdsphere)
sv_p = self._sv(self._tdsphere)
return (sv_t, sv_p) | def function[single_val, parameter[self]]:
constant[return relative error of worst point that might make the data none
symmetric.
]
variable[sv_t] assign[=] call[name[self]._sv, parameter[name[self]._tdsphere]]
variable[sv_p] assign[=] call[name[self]._sv, parameter[name[self]._tdsphere]]
return[tuple[[<ast.Name object at 0x7da20c6ab880>, <ast.Name object at 0x7da20c6ab7f0>]]] | keyword[def] identifier[single_val] ( identifier[self] ):
literal[string]
identifier[sv_t] = identifier[self] . identifier[_sv] ( identifier[self] . identifier[_tdsphere] )
identifier[sv_p] = identifier[self] . identifier[_sv] ( identifier[self] . identifier[_tdsphere] )
keyword[return] ( identifier[sv_t] , identifier[sv_p] ) | def single_val(self):
"""return relative error of worst point that might make the data none
symmetric.
"""
sv_t = self._sv(self._tdsphere)
sv_p = self._sv(self._tdsphere)
return (sv_t, sv_p) |
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True | def function[visit_AsyncFunctionDef, parameter[self, node]]:
constant[Visit an async function node.]
variable[node] assign[=] call[name[self].get_function_node, parameter[name[node]]]
if compare[name[node] is_not constant[None]] begin[:]
name[node]._async assign[=] constant[True] | keyword[def] identifier[visit_AsyncFunctionDef] ( identifier[self] , identifier[node] ):
literal[string]
identifier[node] = identifier[self] . identifier[get_function_node] ( identifier[node] )
keyword[if] identifier[node] keyword[is] keyword[not] keyword[None] :
identifier[node] . identifier[_async] = keyword[True] | def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True # depends on [control=['if'], data=['node']] |
def add_from_dataframe(self, dataframe, data_type_id, name, description):
"""
Serialize the specified DataFrame and upload it as a new dataset.
Parameters
----------
dataframe : pandas.DataFrame
Data to serialize.
data_type_id : str
Format to serialize to.
Supported formats are:
'PlainText'
'GenericCSV'
'GenericTSV'
'GenericCSVNoHeader'
'GenericTSVNoHeader'
See the azureml.DataTypeIds class for constants.
name : str
Name for the new dataset.
description : str
Description for the new dataset.
Returns
-------
SourceDataset
Dataset that was just created.
Use open(), read_as_binary(), read_as_text() or to_dataframe() on
the dataset object to get its contents as a stream, bytes, str or
pandas DataFrame.
"""
_not_none('dataframe', dataframe)
_not_none_or_empty('data_type_id', data_type_id)
_not_none_or_empty('name', name)
_not_none_or_empty('description', description)
try:
output = BytesIO()
serialize_dataframe(output, data_type_id, dataframe)
raw_data = output.getvalue()
finally:
output.close()
return self._upload(raw_data, data_type_id, name, description) | def function[add_from_dataframe, parameter[self, dataframe, data_type_id, name, description]]:
constant[
Serialize the specified DataFrame and upload it as a new dataset.
Parameters
----------
dataframe : pandas.DataFrame
Data to serialize.
data_type_id : str
Format to serialize to.
Supported formats are:
'PlainText'
'GenericCSV'
'GenericTSV'
'GenericCSVNoHeader'
'GenericTSVNoHeader'
See the azureml.DataTypeIds class for constants.
name : str
Name for the new dataset.
description : str
Description for the new dataset.
Returns
-------
SourceDataset
Dataset that was just created.
Use open(), read_as_binary(), read_as_text() or to_dataframe() on
the dataset object to get its contents as a stream, bytes, str or
pandas DataFrame.
]
call[name[_not_none], parameter[constant[dataframe], name[dataframe]]]
call[name[_not_none_or_empty], parameter[constant[data_type_id], name[data_type_id]]]
call[name[_not_none_or_empty], parameter[constant[name], name[name]]]
call[name[_not_none_or_empty], parameter[constant[description], name[description]]]
<ast.Try object at 0x7da20c6e7c40>
return[call[name[self]._upload, parameter[name[raw_data], name[data_type_id], name[name], name[description]]]] | keyword[def] identifier[add_from_dataframe] ( identifier[self] , identifier[dataframe] , identifier[data_type_id] , identifier[name] , identifier[description] ):
literal[string]
identifier[_not_none] ( literal[string] , identifier[dataframe] )
identifier[_not_none_or_empty] ( literal[string] , identifier[data_type_id] )
identifier[_not_none_or_empty] ( literal[string] , identifier[name] )
identifier[_not_none_or_empty] ( literal[string] , identifier[description] )
keyword[try] :
identifier[output] = identifier[BytesIO] ()
identifier[serialize_dataframe] ( identifier[output] , identifier[data_type_id] , identifier[dataframe] )
identifier[raw_data] = identifier[output] . identifier[getvalue] ()
keyword[finally] :
identifier[output] . identifier[close] ()
keyword[return] identifier[self] . identifier[_upload] ( identifier[raw_data] , identifier[data_type_id] , identifier[name] , identifier[description] ) | def add_from_dataframe(self, dataframe, data_type_id, name, description):
"""
Serialize the specified DataFrame and upload it as a new dataset.
Parameters
----------
dataframe : pandas.DataFrame
Data to serialize.
data_type_id : str
Format to serialize to.
Supported formats are:
'PlainText'
'GenericCSV'
'GenericTSV'
'GenericCSVNoHeader'
'GenericTSVNoHeader'
See the azureml.DataTypeIds class for constants.
name : str
Name for the new dataset.
description : str
Description for the new dataset.
Returns
-------
SourceDataset
Dataset that was just created.
Use open(), read_as_binary(), read_as_text() or to_dataframe() on
the dataset object to get its contents as a stream, bytes, str or
pandas DataFrame.
"""
_not_none('dataframe', dataframe)
_not_none_or_empty('data_type_id', data_type_id)
_not_none_or_empty('name', name)
_not_none_or_empty('description', description)
try:
output = BytesIO()
serialize_dataframe(output, data_type_id, dataframe)
raw_data = output.getvalue() # depends on [control=['try'], data=[]]
finally:
output.close()
return self._upload(raw_data, data_type_id, name, description) |
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsntfs_file_entry.get_modification_time_as_integer()
return dfdatetime_filetime.Filetime(timestamp=timestamp) | def function[modification_time, parameter[self]]:
constant[dfdatetime.DateTimeValues: modification time or None if not available.]
variable[timestamp] assign[=] call[name[self]._fsntfs_file_entry.get_modification_time_as_integer, parameter[]]
return[call[name[dfdatetime_filetime].Filetime, parameter[]]] | keyword[def] identifier[modification_time] ( identifier[self] ):
literal[string]
identifier[timestamp] = identifier[self] . identifier[_fsntfs_file_entry] . identifier[get_modification_time_as_integer] ()
keyword[return] identifier[dfdatetime_filetime] . identifier[Filetime] ( identifier[timestamp] = identifier[timestamp] ) | def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsntfs_file_entry.get_modification_time_as_integer()
return dfdatetime_filetime.Filetime(timestamp=timestamp) |
def shouts(self, group, format, *args):
"""
Send formatted string to a named group
"""
return lib.zyre_shouts(self._as_parameter_, group, format, *args) | def function[shouts, parameter[self, group, format]]:
constant[
Send formatted string to a named group
]
return[call[name[lib].zyre_shouts, parameter[name[self]._as_parameter_, name[group], name[format], <ast.Starred object at 0x7da1b09130d0>]]] | keyword[def] identifier[shouts] ( identifier[self] , identifier[group] , identifier[format] ,* identifier[args] ):
literal[string]
keyword[return] identifier[lib] . identifier[zyre_shouts] ( identifier[self] . identifier[_as_parameter_] , identifier[group] , identifier[format] ,* identifier[args] ) | def shouts(self, group, format, *args):
"""
Send formatted string to a named group
"""
return lib.zyre_shouts(self._as_parameter_, group, format, *args) |
def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]:
"""
Returns the edge connecting the nodes with the specified indices if such an edge exists.
Arguments:
source_index (int): The index of one of the endpoints of queried edge.
target_index (int): The index of the other endpoint of the queried edge.
Returns:
The edge connecting the nodes with the specified indices
or `None` if no such node exists.
"""
edge = self._edges.get((source_index, target_index))
if edge is not None:
return edge
return self._edges.get((target_index, source_index)) | def function[get_edge_by_index, parameter[self, source_index, target_index]]:
constant[
Returns the edge connecting the nodes with the specified indices if such an edge exists.
Arguments:
source_index (int): The index of one of the endpoints of queried edge.
target_index (int): The index of the other endpoint of the queried edge.
Returns:
The edge connecting the nodes with the specified indices
or `None` if no such node exists.
]
variable[edge] assign[=] call[name[self]._edges.get, parameter[tuple[[<ast.Name object at 0x7da1b27a8b20>, <ast.Name object at 0x7da1b27a8a90>]]]]
if compare[name[edge] is_not constant[None]] begin[:]
return[name[edge]]
return[call[name[self]._edges.get, parameter[tuple[[<ast.Name object at 0x7da1b2715ed0>, <ast.Name object at 0x7da1b2716c20>]]]]] | keyword[def] identifier[get_edge_by_index] ( identifier[self] , identifier[source_index] : identifier[int] , identifier[target_index] : identifier[int] )-> identifier[Optional] [ identifier[Edge] ]:
literal[string]
identifier[edge] = identifier[self] . identifier[_edges] . identifier[get] (( identifier[source_index] , identifier[target_index] ))
keyword[if] identifier[edge] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[edge]
keyword[return] identifier[self] . identifier[_edges] . identifier[get] (( identifier[target_index] , identifier[source_index] )) | def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]:
"""
Returns the edge connecting the nodes with the specified indices if such an edge exists.
Arguments:
source_index (int): The index of one of the endpoints of queried edge.
target_index (int): The index of the other endpoint of the queried edge.
Returns:
The edge connecting the nodes with the specified indices
or `None` if no such node exists.
"""
edge = self._edges.get((source_index, target_index))
if edge is not None:
return edge # depends on [control=['if'], data=['edge']]
return self._edges.get((target_index, source_index)) |
def get(self, *index):
"""
Get the instance with the specified index.
Returns:
The corresponding instance.
"""
assert self.wrapFunction is not None
if len(index) == 1 and isinstance(index[0], (tuple, list)):
index = index[0]
if len(index) == 0:
return self.wrapFunction(self._impl.get())
else:
return self.wrapFunction(self._impl.get(Tuple(index)._impl)) | def function[get, parameter[self]]:
constant[
Get the instance with the specified index.
Returns:
The corresponding instance.
]
assert[compare[name[self].wrapFunction is_not constant[None]]]
if <ast.BoolOp object at 0x7da18f58ee30> begin[:]
variable[index] assign[=] call[name[index]][constant[0]]
if compare[call[name[len], parameter[name[index]]] equal[==] constant[0]] begin[:]
return[call[name[self].wrapFunction, parameter[call[name[self]._impl.get, parameter[]]]]] | keyword[def] identifier[get] ( identifier[self] ,* identifier[index] ):
literal[string]
keyword[assert] identifier[self] . identifier[wrapFunction] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[len] ( identifier[index] )== literal[int] keyword[and] identifier[isinstance] ( identifier[index] [ literal[int] ],( identifier[tuple] , identifier[list] )):
identifier[index] = identifier[index] [ literal[int] ]
keyword[if] identifier[len] ( identifier[index] )== literal[int] :
keyword[return] identifier[self] . identifier[wrapFunction] ( identifier[self] . identifier[_impl] . identifier[get] ())
keyword[else] :
keyword[return] identifier[self] . identifier[wrapFunction] ( identifier[self] . identifier[_impl] . identifier[get] ( identifier[Tuple] ( identifier[index] ). identifier[_impl] )) | def get(self, *index):
"""
Get the instance with the specified index.
Returns:
The corresponding instance.
"""
assert self.wrapFunction is not None
if len(index) == 1 and isinstance(index[0], (tuple, list)):
index = index[0] # depends on [control=['if'], data=[]]
if len(index) == 0:
return self.wrapFunction(self._impl.get()) # depends on [control=['if'], data=[]]
else:
return self.wrapFunction(self._impl.get(Tuple(index)._impl)) |
def fasta(args):
"""
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
"""
p = OptionParser(fasta.__doc__)
p.add_option("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile | def function[fasta, parameter[args]]:
constant[
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[fasta].__doc__]]
call[name[p].add_option, parameter[constant[--seqtk]]]
call[name[p].set_outdir, parameter[]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da2044c0af0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2044c2050>]]
variable[fastqfiles] assign[=] name[args]
variable[outdir] assign[=] name[opts].outdir
if <ast.BoolOp object at 0x7da2044c3370> begin[:]
call[name[mkdir], parameter[name[outdir]]]
variable[fastqfile] assign[=] call[name[fastqfiles]][constant[0]]
variable[pf] assign[=] call[name[op].basename, parameter[name[fastqfile]]]
variable[gzinput] assign[=] call[name[pf].endswith, parameter[constant[.gz]]]
if name[gzinput] begin[:]
variable[pf] assign[=] call[call[name[pf].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
<ast.Tuple object at 0x7da2044c0c40> assign[=] call[name[pf].rsplit, parameter[constant[.], constant[1]]]
if compare[name[sf] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da2044c1540>, <ast.Constant object at 0x7da2044c3700>]]] begin[:]
call[name[logging].debug, parameter[constant[Assumed FASTA: suffix not `fq` or `fastq`]]]
return[tuple[[<ast.Name object at 0x7da2044c1bd0>, <ast.Constant object at 0x7da2044c2980>]]]
<ast.Tuple object at 0x7da2044c3fa0> assign[=] tuple[[<ast.BinOp object at 0x7da2044c1a20>, <ast.BinOp object at 0x7da2044c09a0>]]
variable[outfile] assign[=] <ast.BoolOp object at 0x7da2044c0bb0>
variable[outfile] assign[=] call[name[op].join, parameter[name[outdir], name[outfile]]]
if name[opts].seqtk begin[:]
if call[name[need_update], parameter[name[fastqfiles], name[outfile]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2044c2140>, <ast.Name object at 0x7da2044c33a0>]]] in starred[call[name[enumerate], parameter[name[fastqfiles]]]] begin[:]
variable[cmd] assign[=] call[constant[seqtk seq -A {0} -L 30 -l 70].format, parameter[name[fastqfile]]]
call[name[sh], parameter[name[cmd]]]
return[tuple[[<ast.Name object at 0x7da2044c35b0>, <ast.Constant object at 0x7da2044c28c0>]]]
for taget[name[fastqfile]] in starred[name[fastqfiles]] begin[:]
call[name[SeqIO].convert, parameter[name[fastqfile], constant[fastq], name[fastafile], constant[fasta]]]
call[name[SeqIO].convert, parameter[name[fastqfile], constant[fastq], name[qualfile], constant[qual]]]
return[tuple[[<ast.Name object at 0x7da2044c3670>, <ast.Name object at 0x7da2044c00d0>]]] | keyword[def] identifier[fasta] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[fasta] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_outdir] ()
identifier[p] . identifier[set_outfile] ( identifier[outfile] = keyword[None] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastqfiles] = identifier[args]
identifier[outdir] = identifier[opts] . identifier[outdir]
keyword[if] identifier[outdir] keyword[and] identifier[outdir] != literal[string] :
identifier[mkdir] ( identifier[outdir] )
identifier[fastqfile] = identifier[fastqfiles] [ literal[int] ]
identifier[pf] = identifier[op] . identifier[basename] ( identifier[fastqfile] )
identifier[gzinput] = identifier[pf] . identifier[endswith] ( literal[string] )
keyword[if] identifier[gzinput] :
identifier[pf] = identifier[pf] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[pf] , identifier[sf] = identifier[pf] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[if] identifier[sf] keyword[not] keyword[in] ( literal[string] , literal[string] ):
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return] identifier[fastqfile] , keyword[None]
identifier[fastafile] , identifier[qualfile] = identifier[pf] + literal[string] , identifier[pf] + literal[string]
identifier[outfile] = identifier[opts] . identifier[outfile] keyword[or] identifier[fastafile]
identifier[outfile] = identifier[op] . identifier[join] ( identifier[outdir] , identifier[outfile] )
keyword[if] identifier[opts] . identifier[seqtk] :
keyword[if] identifier[need_update] ( identifier[fastqfiles] , identifier[outfile] ):
keyword[for] identifier[i] , identifier[fastqfile] keyword[in] identifier[enumerate] ( identifier[fastqfiles] ):
identifier[cmd] = literal[string] . identifier[format] ( identifier[fastqfile] )
identifier[sh] ( identifier[cmd] , identifier[outfile] = identifier[outfile] , identifier[append] = identifier[i] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[outfile] ))
keyword[return] identifier[outfile] , keyword[None]
keyword[for] identifier[fastqfile] keyword[in] identifier[fastqfiles] :
identifier[SeqIO] . identifier[convert] ( identifier[fastqfile] , literal[string] , identifier[fastafile] , literal[string] )
identifier[SeqIO] . identifier[convert] ( identifier[fastqfile] , literal[string] , identifier[qualfile] , literal[string] )
keyword[return] identifier[fastafile] , identifier[qualfile] | def fasta(args):
"""
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
"""
p = OptionParser(fasta.__doc__)
p.add_option('--seqtk', default=False, action='store_true', help='Use seqtk to convert')
p.set_outdir()
p.set_outfile(outfile=None)
(opts, args) = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
fastqfiles = args
outdir = opts.outdir
if outdir and outdir != '.':
mkdir(outdir) # depends on [control=['if'], data=[]]
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith('.gz')
if gzinput:
pf = pf.rsplit('.', 1)[0] # depends on [control=['if'], data=[]]
(pf, sf) = pf.rsplit('.', 1)
if sf not in ('fq', 'fastq'):
logging.debug('Assumed FASTA: suffix not `fq` or `fastq`')
return (fastqfile, None) # depends on [control=['if'], data=[]]
(fastafile, qualfile) = (pf + '.fasta', pf + '.qual')
outfile = opts.outfile or fastafile
outfile = op.join(outdir, outfile)
if opts.seqtk:
if need_update(fastqfiles, outfile):
for (i, fastqfile) in enumerate(fastqfiles):
cmd = 'seqtk seq -A {0} -L 30 -l 70'.format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
logging.debug('Outfile `{0}` already exists.'.format(outfile))
return (outfile, None) # depends on [control=['if'], data=[]]
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, 'fastq', fastafile, 'fasta')
SeqIO.convert(fastqfile, 'fastq', qualfile, 'qual') # depends on [control=['for'], data=['fastqfile']]
return (fastafile, qualfile) |
def update(context, id, etag, name, component_types,
label, next_topic_id, active, product_id, data):
"""update(context, id, etag, name, label, next_topic_id, active,
product_id, data)
Update a Topic.
>>> dcictl topic-update [OPTIONS]
:param string id: ID of the Topic [required]
:param string etag: Entity tag of the Topic resource [required]
:param string name: Name of the Topic
:param string component_types: list of component types separated by commas
:param string label: Label of the Topic
:param string data: JSON data to pass during remote CI update
:param boolean active: Set the topic in the active state
:param string product_id: The product the topic belongs to
:param string next_topic_id: The ID of the next topic for upgrades
"""
if component_types:
component_types = component_types.split(',')
result = topic.update(context, id=id, etag=etag, name=name,
component_types=component_types,
label=label, next_topic_id=next_topic_id,
state=utils.active_string(active),
product_id=product_id, data=data)
utils.format_output(result, context.format) | def function[update, parameter[context, id, etag, name, component_types, label, next_topic_id, active, product_id, data]]:
constant[update(context, id, etag, name, label, next_topic_id, active,
product_id, data)
Update a Topic.
>>> dcictl topic-update [OPTIONS]
:param string id: ID of the Topic [required]
:param string etag: Entity tag of the Topic resource [required]
:param string name: Name of the Topic
:param string component_types: list of component types separated by commas
:param string label: Label of the Topic
:param string data: JSON data to pass during remote CI update
:param boolean active: Set the topic in the active state
:param string product_id: The product the topic belongs to
:param string next_topic_id: The ID of the next topic for upgrades
]
if name[component_types] begin[:]
variable[component_types] assign[=] call[name[component_types].split, parameter[constant[,]]]
variable[result] assign[=] call[name[topic].update, parameter[name[context]]]
call[name[utils].format_output, parameter[name[result], name[context].format]] | keyword[def] identifier[update] ( identifier[context] , identifier[id] , identifier[etag] , identifier[name] , identifier[component_types] ,
identifier[label] , identifier[next_topic_id] , identifier[active] , identifier[product_id] , identifier[data] ):
literal[string]
keyword[if] identifier[component_types] :
identifier[component_types] = identifier[component_types] . identifier[split] ( literal[string] )
identifier[result] = identifier[topic] . identifier[update] ( identifier[context] , identifier[id] = identifier[id] , identifier[etag] = identifier[etag] , identifier[name] = identifier[name] ,
identifier[component_types] = identifier[component_types] ,
identifier[label] = identifier[label] , identifier[next_topic_id] = identifier[next_topic_id] ,
identifier[state] = identifier[utils] . identifier[active_string] ( identifier[active] ),
identifier[product_id] = identifier[product_id] , identifier[data] = identifier[data] )
identifier[utils] . identifier[format_output] ( identifier[result] , identifier[context] . identifier[format] ) | def update(context, id, etag, name, component_types, label, next_topic_id, active, product_id, data):
"""update(context, id, etag, name, label, next_topic_id, active,
product_id, data)
Update a Topic.
>>> dcictl topic-update [OPTIONS]
:param string id: ID of the Topic [required]
:param string etag: Entity tag of the Topic resource [required]
:param string name: Name of the Topic
:param string component_types: list of component types separated by commas
:param string label: Label of the Topic
:param string data: JSON data to pass during remote CI update
:param boolean active: Set the topic in the active state
:param string product_id: The product the topic belongs to
:param string next_topic_id: The ID of the next topic for upgrades
"""
if component_types:
component_types = component_types.split(',') # depends on [control=['if'], data=[]]
result = topic.update(context, id=id, etag=etag, name=name, component_types=component_types, label=label, next_topic_id=next_topic_id, state=utils.active_string(active), product_id=product_id, data=data)
utils.format_output(result, context.format) |
def func2md(self, func, clsname=None, names=None, depth=3):
"""Takes a function (or method) and documents it.
Args:
clsname (str, optional): class name to prepend to funcname.
depth (int, optional): number of ### to append to function name
"""
section = "#" * depth
if names is None:
names = [func.__name__]
funcname = ", ".join(names)
escfuncname = ", ".join(["`%s`" % funcname if funcname.startswith("_") else funcname for funcname in names])
header = "%s%s" % ("%s." % clsname if clsname else "", escfuncname)
path = self.get_src_path(func)
doc = self.doc2md(func)
args, kwargs = [], []
spec = getargspec(func)
vargsname, kwargsname = spec.varargs, spec.keywords
vargs = list(make_iter(spec.args)) if spec.args else []
defaults = list(make_iter(spec.defaults)) if spec.defaults else []
while vargs:
if vargs and vargs[0] == "self":
args.append(vargs.pop(0))
elif len(vargs) > len(defaults):
args.append(vargs.pop(0))
else:
default = defaults.pop(0)
if isinstance(default, str):
default = "\"%s\"" % default
else:
default = "%s" % str(default)
kwargs.append((vargs.pop(0), default))
if args:
args = ", ".join("%s" % arg for arg in args)
if kwargs:
kwargs = ", ".join("%s=%s" % kwarg for kwarg in kwargs)
if args:
kwargs = ", " + kwargs
if vargsname:
vargsname = "*%s" % vargsname
if args or kwargs:
vargsname = ", " + vargsname
if kwargsname:
kwargsname = "**%s" % kwargsname
if args or kwargs or vargsname:
kwargsname = ", " + kwargsname
_FUNCDEF = "{funcname}({args}{kwargs}{vargs}{vkwargs})"
funcdef = _FUNCDEF.format(funcname=funcname,
args=args or "",
kwargs=kwargs or "",
vargs=vargsname or "",
vkwargs=kwargsname or "")
# split the function definition if it is too long
lmax = 90
if len(funcdef) > lmax:
# wrap in the args list
split = funcdef.split("(", 1)
# we gradually build the string again
rest = split[1]
args = rest.split(", ")
funcname = "(".join(split[:1]) + "("
lline = len(funcname)
parts = []
for arg in args:
larg = len(arg)
if larg > lmax - 5:
# not much to do if arg is so long
parts.append(arg)
elif lline + larg > lmax:
# the next arg is too long, break the line
parts.append("\\\n " + arg)
lline = 0
else:
parts.append(arg)
lline += len(parts[-1])
funcdef = funcname + ", ".join(parts)
# build the signature
string = FUNC_TEMPLATE.format(section=section,
header=header,
funcdef=funcdef,
path=path,
doc=doc if doc else "*No documentation found.*")
return string | def function[func2md, parameter[self, func, clsname, names, depth]]:
constant[Takes a function (or method) and documents it.
Args:
clsname (str, optional): class name to prepend to funcname.
depth (int, optional): number of ### to append to function name
]
variable[section] assign[=] binary_operation[constant[#] * name[depth]]
if compare[name[names] is constant[None]] begin[:]
variable[names] assign[=] list[[<ast.Attribute object at 0x7da20e9550c0>]]
variable[funcname] assign[=] call[constant[, ].join, parameter[name[names]]]
variable[escfuncname] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da20e954bb0>]]
variable[header] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.IfExp object at 0x7da20e956110>, <ast.Name object at 0x7da20e957df0>]]]
variable[path] assign[=] call[name[self].get_src_path, parameter[name[func]]]
variable[doc] assign[=] call[name[self].doc2md, parameter[name[func]]]
<ast.Tuple object at 0x7da20e955330> assign[=] tuple[[<ast.List object at 0x7da20e957fa0>, <ast.List object at 0x7da20e957340>]]
variable[spec] assign[=] call[name[getargspec], parameter[name[func]]]
<ast.Tuple object at 0x7da20e9572b0> assign[=] tuple[[<ast.Attribute object at 0x7da20e954fd0>, <ast.Attribute object at 0x7da20e956fe0>]]
variable[vargs] assign[=] <ast.IfExp object at 0x7da20e955ae0>
variable[defaults] assign[=] <ast.IfExp object at 0x7da20e957580>
while name[vargs] begin[:]
if <ast.BoolOp object at 0x7da20e957730> begin[:]
call[name[args].append, parameter[call[name[vargs].pop, parameter[constant[0]]]]]
if name[args] begin[:]
variable[args] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da18c4cd960>]]
if name[kwargs] begin[:]
variable[kwargs] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da18c4cdfc0>]]
if name[args] begin[:]
variable[kwargs] assign[=] binary_operation[constant[, ] + name[kwargs]]
if name[vargsname] begin[:]
variable[vargsname] assign[=] binary_operation[constant[*%s] <ast.Mod object at 0x7da2590d6920> name[vargsname]]
if <ast.BoolOp object at 0x7da18c4cf610> begin[:]
variable[vargsname] assign[=] binary_operation[constant[, ] + name[vargsname]]
if name[kwargsname] begin[:]
variable[kwargsname] assign[=] binary_operation[constant[**%s] <ast.Mod object at 0x7da2590d6920> name[kwargsname]]
if <ast.BoolOp object at 0x7da18c4ce9b0> begin[:]
variable[kwargsname] assign[=] binary_operation[constant[, ] + name[kwargsname]]
variable[_FUNCDEF] assign[=] constant[{funcname}({args}{kwargs}{vargs}{vkwargs})]
variable[funcdef] assign[=] call[name[_FUNCDEF].format, parameter[]]
variable[lmax] assign[=] constant[90]
if compare[call[name[len], parameter[name[funcdef]]] greater[>] name[lmax]] begin[:]
variable[split] assign[=] call[name[funcdef].split, parameter[constant[(], constant[1]]]
variable[rest] assign[=] call[name[split]][constant[1]]
variable[args] assign[=] call[name[rest].split, parameter[constant[, ]]]
variable[funcname] assign[=] binary_operation[call[constant[(].join, parameter[call[name[split]][<ast.Slice object at 0x7da18c4cf670>]]] + constant[(]]
variable[lline] assign[=] call[name[len], parameter[name[funcname]]]
variable[parts] assign[=] list[[]]
for taget[name[arg]] in starred[name[args]] begin[:]
variable[larg] assign[=] call[name[len], parameter[name[arg]]]
if compare[name[larg] greater[>] binary_operation[name[lmax] - constant[5]]] begin[:]
call[name[parts].append, parameter[name[arg]]]
<ast.AugAssign object at 0x7da18c4ce260>
variable[funcdef] assign[=] binary_operation[name[funcname] + call[constant[, ].join, parameter[name[parts]]]]
variable[string] assign[=] call[name[FUNC_TEMPLATE].format, parameter[]]
return[name[string]] | keyword[def] identifier[func2md] ( identifier[self] , identifier[func] , identifier[clsname] = keyword[None] , identifier[names] = keyword[None] , identifier[depth] = literal[int] ):
literal[string]
identifier[section] = literal[string] * identifier[depth]
keyword[if] identifier[names] keyword[is] keyword[None] :
identifier[names] =[ identifier[func] . identifier[__name__] ]
identifier[funcname] = literal[string] . identifier[join] ( identifier[names] )
identifier[escfuncname] = literal[string] . identifier[join] ([ literal[string] % identifier[funcname] keyword[if] identifier[funcname] . identifier[startswith] ( literal[string] ) keyword[else] identifier[funcname] keyword[for] identifier[funcname] keyword[in] identifier[names] ])
identifier[header] = literal[string] %( literal[string] % identifier[clsname] keyword[if] identifier[clsname] keyword[else] literal[string] , identifier[escfuncname] )
identifier[path] = identifier[self] . identifier[get_src_path] ( identifier[func] )
identifier[doc] = identifier[self] . identifier[doc2md] ( identifier[func] )
identifier[args] , identifier[kwargs] =[],[]
identifier[spec] = identifier[getargspec] ( identifier[func] )
identifier[vargsname] , identifier[kwargsname] = identifier[spec] . identifier[varargs] , identifier[spec] . identifier[keywords]
identifier[vargs] = identifier[list] ( identifier[make_iter] ( identifier[spec] . identifier[args] )) keyword[if] identifier[spec] . identifier[args] keyword[else] []
identifier[defaults] = identifier[list] ( identifier[make_iter] ( identifier[spec] . identifier[defaults] )) keyword[if] identifier[spec] . identifier[defaults] keyword[else] []
keyword[while] identifier[vargs] :
keyword[if] identifier[vargs] keyword[and] identifier[vargs] [ literal[int] ]== literal[string] :
identifier[args] . identifier[append] ( identifier[vargs] . identifier[pop] ( literal[int] ))
keyword[elif] identifier[len] ( identifier[vargs] )> identifier[len] ( identifier[defaults] ):
identifier[args] . identifier[append] ( identifier[vargs] . identifier[pop] ( literal[int] ))
keyword[else] :
identifier[default] = identifier[defaults] . identifier[pop] ( literal[int] )
keyword[if] identifier[isinstance] ( identifier[default] , identifier[str] ):
identifier[default] = literal[string] % identifier[default]
keyword[else] :
identifier[default] = literal[string] % identifier[str] ( identifier[default] )
identifier[kwargs] . identifier[append] (( identifier[vargs] . identifier[pop] ( literal[int] ), identifier[default] ))
keyword[if] identifier[args] :
identifier[args] = literal[string] . identifier[join] ( literal[string] % identifier[arg] keyword[for] identifier[arg] keyword[in] identifier[args] )
keyword[if] identifier[kwargs] :
identifier[kwargs] = literal[string] . identifier[join] ( literal[string] % identifier[kwarg] keyword[for] identifier[kwarg] keyword[in] identifier[kwargs] )
keyword[if] identifier[args] :
identifier[kwargs] = literal[string] + identifier[kwargs]
keyword[if] identifier[vargsname] :
identifier[vargsname] = literal[string] % identifier[vargsname]
keyword[if] identifier[args] keyword[or] identifier[kwargs] :
identifier[vargsname] = literal[string] + identifier[vargsname]
keyword[if] identifier[kwargsname] :
identifier[kwargsname] = literal[string] % identifier[kwargsname]
keyword[if] identifier[args] keyword[or] identifier[kwargs] keyword[or] identifier[vargsname] :
identifier[kwargsname] = literal[string] + identifier[kwargsname]
identifier[_FUNCDEF] = literal[string]
identifier[funcdef] = identifier[_FUNCDEF] . identifier[format] ( identifier[funcname] = identifier[funcname] ,
identifier[args] = identifier[args] keyword[or] literal[string] ,
identifier[kwargs] = identifier[kwargs] keyword[or] literal[string] ,
identifier[vargs] = identifier[vargsname] keyword[or] literal[string] ,
identifier[vkwargs] = identifier[kwargsname] keyword[or] literal[string] )
identifier[lmax] = literal[int]
keyword[if] identifier[len] ( identifier[funcdef] )> identifier[lmax] :
identifier[split] = identifier[funcdef] . identifier[split] ( literal[string] , literal[int] )
identifier[rest] = identifier[split] [ literal[int] ]
identifier[args] = identifier[rest] . identifier[split] ( literal[string] )
identifier[funcname] = literal[string] . identifier[join] ( identifier[split] [: literal[int] ])+ literal[string]
identifier[lline] = identifier[len] ( identifier[funcname] )
identifier[parts] =[]
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[larg] = identifier[len] ( identifier[arg] )
keyword[if] identifier[larg] > identifier[lmax] - literal[int] :
identifier[parts] . identifier[append] ( identifier[arg] )
keyword[elif] identifier[lline] + identifier[larg] > identifier[lmax] :
identifier[parts] . identifier[append] ( literal[string] + identifier[arg] )
identifier[lline] = literal[int]
keyword[else] :
identifier[parts] . identifier[append] ( identifier[arg] )
identifier[lline] += identifier[len] ( identifier[parts] [- literal[int] ])
identifier[funcdef] = identifier[funcname] + literal[string] . identifier[join] ( identifier[parts] )
identifier[string] = identifier[FUNC_TEMPLATE] . identifier[format] ( identifier[section] = identifier[section] ,
identifier[header] = identifier[header] ,
identifier[funcdef] = identifier[funcdef] ,
identifier[path] = identifier[path] ,
identifier[doc] = identifier[doc] keyword[if] identifier[doc] keyword[else] literal[string] )
keyword[return] identifier[string] | def func2md(self, func, clsname=None, names=None, depth=3):
"""Takes a function (or method) and documents it.
Args:
clsname (str, optional): class name to prepend to funcname.
depth (int, optional): number of ### to append to function name
"""
section = '#' * depth
if names is None:
names = [func.__name__] # depends on [control=['if'], data=['names']]
funcname = ', '.join(names)
escfuncname = ', '.join(['`%s`' % funcname if funcname.startswith('_') else funcname for funcname in names])
header = '%s%s' % ('%s.' % clsname if clsname else '', escfuncname)
path = self.get_src_path(func)
doc = self.doc2md(func)
(args, kwargs) = ([], [])
spec = getargspec(func)
(vargsname, kwargsname) = (spec.varargs, spec.keywords)
vargs = list(make_iter(spec.args)) if spec.args else []
defaults = list(make_iter(spec.defaults)) if spec.defaults else []
while vargs:
if vargs and vargs[0] == 'self':
args.append(vargs.pop(0)) # depends on [control=['if'], data=[]]
elif len(vargs) > len(defaults):
args.append(vargs.pop(0)) # depends on [control=['if'], data=[]]
else:
default = defaults.pop(0)
if isinstance(default, str):
default = '"%s"' % default # depends on [control=['if'], data=[]]
else:
default = '%s' % str(default)
kwargs.append((vargs.pop(0), default)) # depends on [control=['while'], data=[]]
if args:
args = ', '.join(('%s' % arg for arg in args)) # depends on [control=['if'], data=[]]
if kwargs:
kwargs = ', '.join(('%s=%s' % kwarg for kwarg in kwargs))
if args:
kwargs = ', ' + kwargs # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if vargsname:
vargsname = '*%s' % vargsname
if args or kwargs:
vargsname = ', ' + vargsname # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if kwargsname:
kwargsname = '**%s' % kwargsname
if args or kwargs or vargsname:
kwargsname = ', ' + kwargsname # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
_FUNCDEF = '{funcname}({args}{kwargs}{vargs}{vkwargs})'
funcdef = _FUNCDEF.format(funcname=funcname, args=args or '', kwargs=kwargs or '', vargs=vargsname or '', vkwargs=kwargsname or '')
# split the function definition if it is too long
lmax = 90
if len(funcdef) > lmax:
# wrap in the args list
split = funcdef.split('(', 1)
# we gradually build the string again
rest = split[1]
args = rest.split(', ')
funcname = '('.join(split[:1]) + '('
lline = len(funcname)
parts = []
for arg in args:
larg = len(arg)
if larg > lmax - 5:
# not much to do if arg is so long
parts.append(arg) # depends on [control=['if'], data=[]]
elif lline + larg > lmax:
# the next arg is too long, break the line
parts.append('\\\n ' + arg)
lline = 0 # depends on [control=['if'], data=[]]
else:
parts.append(arg)
lline += len(parts[-1]) # depends on [control=['for'], data=['arg']]
funcdef = funcname + ', '.join(parts) # depends on [control=['if'], data=['lmax']]
# build the signature
string = FUNC_TEMPLATE.format(section=section, header=header, funcdef=funcdef, path=path, doc=doc if doc else '*No documentation found.*')
return string |
def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result | def function[inv, parameter[self]]:
constant[The inverse translation]
variable[result] assign[=] call[name[Translation], parameter[<ast.UnaryOp object at 0x7da1b26ad600>]]
name[result]._cache_inv assign[=] name[self]
return[name[result]] | keyword[def] identifier[inv] ( identifier[self] ):
literal[string]
identifier[result] = identifier[Translation] (- identifier[self] . identifier[t] )
identifier[result] . identifier[_cache_inv] = identifier[self]
keyword[return] identifier[result] | def inv(self):
"""The inverse translation"""
result = Translation(-self.t)
result._cache_inv = self
return result |
def add_user(self,
username,
email,
directoryId=1,
password=None,
fullname=None,
notify=False,
active=True,
ignore_existing=False,
application_keys=None,
):
"""Create a new JIRA user.
:param username: the username of the new user
:type username: str
:param email: email address of the new user
:type email: str
:param directoryId: The directory ID the new user should be a part of (Default: 1)
:type directoryId: int
:param password: Optional, the password for the new user
:type password: Optional[str]
:param fullname: Optional, the full name of the new user
:type fullname: Optional[str]
:param notify: Whether or not to send a notification to the new user. (Default: False)
:type notify: bool
:param active: Whether or not to make the new user active upon creation. (Default: True)
:type active: bool
:param ignore_existing: Whether or not to ignore and existing user. (Default: False)
:type ignore_existing: bool
:param applicationKeys: Keys of products user should have access to
:type applicationKeys: Optional[list]
:return: Whether or not the user creation was successful.
:rtype: bool
:raises JIRAError: If username already exists and `ignore_existing` has not been set to `True`.
"""
if not fullname:
fullname = username
# TODO(ssbarnea): default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
if notify:
x['notification'] = 'True'
if application_keys is not None:
x['applicationKeys'] = application_keys
payload = json.dumps(x)
try:
self._session.post(url, data=payload)
except JIRAError as e:
err = e.response.json()['errors']
if 'username' in err and err['username'] == 'A user with that username already exists.' and ignore_existing:
return True
raise e
return True | def function[add_user, parameter[self, username, email, directoryId, password, fullname, notify, active, ignore_existing, application_keys]]:
constant[Create a new JIRA user.
:param username: the username of the new user
:type username: str
:param email: email address of the new user
:type email: str
:param directoryId: The directory ID the new user should be a part of (Default: 1)
:type directoryId: int
:param password: Optional, the password for the new user
:type password: Optional[str]
:param fullname: Optional, the full name of the new user
:type fullname: Optional[str]
:param notify: Whether or not to send a notification to the new user. (Default: False)
:type notify: bool
:param active: Whether or not to make the new user active upon creation. (Default: True)
:type active: bool
:param ignore_existing: Whether or not to ignore and existing user. (Default: False)
:type ignore_existing: bool
:param applicationKeys: Keys of products user should have access to
:type applicationKeys: Optional[list]
:return: Whether or not the user creation was successful.
:rtype: bool
:raises JIRAError: If username already exists and `ignore_existing` has not been set to `True`.
]
if <ast.UnaryOp object at 0x7da1b21898d0> begin[:]
variable[fullname] assign[=] name[username]
variable[url] assign[=] binary_operation[call[name[self]._options][constant[server]] + constant[/rest/api/latest/user]]
variable[x] assign[=] call[name[OrderedDict], parameter[]]
call[name[x]][constant[displayName]] assign[=] name[fullname]
call[name[x]][constant[emailAddress]] assign[=] name[email]
call[name[x]][constant[name]] assign[=] name[username]
if name[password] begin[:]
call[name[x]][constant[password]] assign[=] name[password]
if name[notify] begin[:]
call[name[x]][constant[notification]] assign[=] constant[True]
if compare[name[application_keys] is_not constant[None]] begin[:]
call[name[x]][constant[applicationKeys]] assign[=] name[application_keys]
variable[payload] assign[=] call[name[json].dumps, parameter[name[x]]]
<ast.Try object at 0x7da1b2189bd0>
return[constant[True]] | keyword[def] identifier[add_user] ( identifier[self] ,
identifier[username] ,
identifier[email] ,
identifier[directoryId] = literal[int] ,
identifier[password] = keyword[None] ,
identifier[fullname] = keyword[None] ,
identifier[notify] = keyword[False] ,
identifier[active] = keyword[True] ,
identifier[ignore_existing] = keyword[False] ,
identifier[application_keys] = keyword[None] ,
):
literal[string]
keyword[if] keyword[not] identifier[fullname] :
identifier[fullname] = identifier[username]
identifier[url] = identifier[self] . identifier[_options] [ literal[string] ]+ literal[string]
identifier[x] = identifier[OrderedDict] ()
identifier[x] [ literal[string] ]= identifier[fullname]
identifier[x] [ literal[string] ]= identifier[email]
identifier[x] [ literal[string] ]= identifier[username]
keyword[if] identifier[password] :
identifier[x] [ literal[string] ]= identifier[password]
keyword[if] identifier[notify] :
identifier[x] [ literal[string] ]= literal[string]
keyword[if] identifier[application_keys] keyword[is] keyword[not] keyword[None] :
identifier[x] [ literal[string] ]= identifier[application_keys]
identifier[payload] = identifier[json] . identifier[dumps] ( identifier[x] )
keyword[try] :
identifier[self] . identifier[_session] . identifier[post] ( identifier[url] , identifier[data] = identifier[payload] )
keyword[except] identifier[JIRAError] keyword[as] identifier[e] :
identifier[err] = identifier[e] . identifier[response] . identifier[json] ()[ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[err] keyword[and] identifier[err] [ literal[string] ]== literal[string] keyword[and] identifier[ignore_existing] :
keyword[return] keyword[True]
keyword[raise] identifier[e]
keyword[return] keyword[True] | def add_user(self, username, email, directoryId=1, password=None, fullname=None, notify=False, active=True, ignore_existing=False, application_keys=None):
"""Create a new JIRA user.
:param username: the username of the new user
:type username: str
:param email: email address of the new user
:type email: str
:param directoryId: The directory ID the new user should be a part of (Default: 1)
:type directoryId: int
:param password: Optional, the password for the new user
:type password: Optional[str]
:param fullname: Optional, the full name of the new user
:type fullname: Optional[str]
:param notify: Whether or not to send a notification to the new user. (Default: False)
:type notify: bool
:param active: Whether or not to make the new user active upon creation. (Default: True)
:type active: bool
:param ignore_existing: Whether or not to ignore and existing user. (Default: False)
:type ignore_existing: bool
:param applicationKeys: Keys of products user should have access to
:type applicationKeys: Optional[list]
:return: Whether or not the user creation was successful.
:rtype: bool
:raises JIRAError: If username already exists and `ignore_existing` has not been set to `True`.
"""
if not fullname:
fullname = username # depends on [control=['if'], data=[]]
# TODO(ssbarnea): default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password # depends on [control=['if'], data=[]]
if notify:
x['notification'] = 'True' # depends on [control=['if'], data=[]]
if application_keys is not None:
x['applicationKeys'] = application_keys # depends on [control=['if'], data=['application_keys']]
payload = json.dumps(x)
try:
self._session.post(url, data=payload) # depends on [control=['try'], data=[]]
except JIRAError as e:
err = e.response.json()['errors']
if 'username' in err and err['username'] == 'A user with that username already exists.' and ignore_existing:
return True # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']]
return True |
def update_from_sam(self, sam, sam_reader):
'''Updates graph info from a pysam.AlignedSegment object'''
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | def function[update_from_sam, parameter[self, sam, sam_reader]]:
constant[Updates graph info from a pysam.AlignedSegment object]
if <ast.BoolOp object at 0x7da18fe92320> begin[:]
return[None]
variable[new_link] assign[=] call[name[link].Link, parameter[name[sam], name[sam_reader], name[self].ref_lengths]]
variable[read_name] assign[=] name[sam].query_name
if compare[name[read_name] in name[self].partial_links] begin[:]
call[name[new_link].merge, parameter[call[name[self].partial_links][name[read_name]]]]
<ast.Delete object at 0x7da18fe93be0>
variable[key] assign[=] call[name[tuple], parameter[call[name[sorted], parameter[tuple[[<ast.Subscript object at 0x7da18fe91ae0>, <ast.Subscript object at 0x7da18fe939d0>]]]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].links] begin[:]
call[name[self].links][name[key]] assign[=] list[[]]
call[name[new_link].sort, parameter[]]
call[call[name[self].links][name[key]].append, parameter[name[new_link]]] | keyword[def] identifier[update_from_sam] ( identifier[self] , identifier[sam] , identifier[sam_reader] ):
literal[string]
keyword[if] identifier[sam] . identifier[is_unmapped] keyword[or] identifier[sam] . identifier[mate_is_unmapped] keyword[or] ( identifier[sam] . identifier[reference_id] == identifier[sam] . identifier[next_reference_id] ):
keyword[return]
identifier[new_link] = identifier[link] . identifier[Link] ( identifier[sam] , identifier[sam_reader] , identifier[self] . identifier[ref_lengths] )
identifier[read_name] = identifier[sam] . identifier[query_name]
keyword[if] identifier[read_name] keyword[in] identifier[self] . identifier[partial_links] :
identifier[new_link] . identifier[merge] ( identifier[self] . identifier[partial_links] [ identifier[read_name] ])
keyword[del] identifier[self] . identifier[partial_links] [ identifier[read_name] ]
identifier[key] = identifier[tuple] ( identifier[sorted] (( identifier[new_link] . identifier[refnames] [ literal[int] ], identifier[new_link] . identifier[refnames] [ literal[int] ])))
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[links] :
identifier[self] . identifier[links] [ identifier[key] ]=[]
identifier[new_link] . identifier[sort] ()
identifier[self] . identifier[links] [ identifier[key] ]. identifier[append] ( identifier[new_link] )
keyword[else] :
identifier[self] . identifier[partial_links] [ identifier[read_name] ]= identifier[new_link] | def update_from_sam(self, sam, sam_reader):
"""Updates graph info from a pysam.AlignedSegment object"""
if sam.is_unmapped or sam.mate_is_unmapped or sam.reference_id == sam.next_reference_id:
return # depends on [control=['if'], data=[]]
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = [] # depends on [control=['if'], data=['key']]
new_link.sort()
self.links[key].append(new_link) # depends on [control=['if'], data=['read_name']]
else:
self.partial_links[read_name] = new_link |
def fastaWrite(fileHandleOrFile, name, seq, mode="w"):
"""Writes out fasta file
"""
fileHandle = _getFileHandle(fileHandleOrFile, mode)
valid_chars = {x for x in string.ascii_letters + "-"}
try:
assert any([isinstance(seq, unicode), isinstance(seq, str)])
except AssertionError:
raise RuntimeError("Sequence is not unicode or string")
try:
assert all(x in valid_chars for x in seq)
except AssertionError:
bad_chars = {x for x in seq if x not in valid_chars}
raise RuntimeError("Invalid FASTA character(s) see in fasta sequence: {}".format(bad_chars))
fileHandle.write(">%s\n" % name)
chunkSize = 100
for i in xrange(0, len(seq), chunkSize):
fileHandle.write("%s\n" % seq[i:i+chunkSize])
if isinstance(fileHandleOrFile, "".__class__):
fileHandle.close() | def function[fastaWrite, parameter[fileHandleOrFile, name, seq, mode]]:
constant[Writes out fasta file
]
variable[fileHandle] assign[=] call[name[_getFileHandle], parameter[name[fileHandleOrFile], name[mode]]]
variable[valid_chars] assign[=] <ast.SetComp object at 0x7da204564a30>
<ast.Try object at 0x7da2045665f0>
<ast.Try object at 0x7da1b26af820>
call[name[fileHandle].write, parameter[binary_operation[constant[>%s
] <ast.Mod object at 0x7da2590d6920> name[name]]]]
variable[chunkSize] assign[=] constant[100]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], call[name[len], parameter[name[seq]]], name[chunkSize]]]] begin[:]
call[name[fileHandle].write, parameter[binary_operation[constant[%s
] <ast.Mod object at 0x7da2590d6920> call[name[seq]][<ast.Slice object at 0x7da207f03eb0>]]]]
if call[name[isinstance], parameter[name[fileHandleOrFile], constant[].__class__]] begin[:]
call[name[fileHandle].close, parameter[]] | keyword[def] identifier[fastaWrite] ( identifier[fileHandleOrFile] , identifier[name] , identifier[seq] , identifier[mode] = literal[string] ):
literal[string]
identifier[fileHandle] = identifier[_getFileHandle] ( identifier[fileHandleOrFile] , identifier[mode] )
identifier[valid_chars] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[string] . identifier[ascii_letters] + literal[string] }
keyword[try] :
keyword[assert] identifier[any] ([ identifier[isinstance] ( identifier[seq] , identifier[unicode] ), identifier[isinstance] ( identifier[seq] , identifier[str] )])
keyword[except] identifier[AssertionError] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[try] :
keyword[assert] identifier[all] ( identifier[x] keyword[in] identifier[valid_chars] keyword[for] identifier[x] keyword[in] identifier[seq] )
keyword[except] identifier[AssertionError] :
identifier[bad_chars] ={ identifier[x] keyword[for] identifier[x] keyword[in] identifier[seq] keyword[if] identifier[x] keyword[not] keyword[in] identifier[valid_chars] }
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[bad_chars] ))
identifier[fileHandle] . identifier[write] ( literal[string] % identifier[name] )
identifier[chunkSize] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[seq] ), identifier[chunkSize] ):
identifier[fileHandle] . identifier[write] ( literal[string] % identifier[seq] [ identifier[i] : identifier[i] + identifier[chunkSize] ])
keyword[if] identifier[isinstance] ( identifier[fileHandleOrFile] , literal[string] . identifier[__class__] ):
identifier[fileHandle] . identifier[close] () | def fastaWrite(fileHandleOrFile, name, seq, mode='w'):
"""Writes out fasta file
"""
fileHandle = _getFileHandle(fileHandleOrFile, mode)
valid_chars = {x for x in string.ascii_letters + '-'}
try:
assert any([isinstance(seq, unicode), isinstance(seq, str)]) # depends on [control=['try'], data=[]]
except AssertionError:
raise RuntimeError('Sequence is not unicode or string') # depends on [control=['except'], data=[]]
try:
assert all((x in valid_chars for x in seq)) # depends on [control=['try'], data=[]]
except AssertionError:
bad_chars = {x for x in seq if x not in valid_chars}
raise RuntimeError('Invalid FASTA character(s) see in fasta sequence: {}'.format(bad_chars)) # depends on [control=['except'], data=[]]
fileHandle.write('>%s\n' % name)
chunkSize = 100
for i in xrange(0, len(seq), chunkSize):
fileHandle.write('%s\n' % seq[i:i + chunkSize]) # depends on [control=['for'], data=['i']]
if isinstance(fileHandleOrFile, ''.__class__):
fileHandle.close() # depends on [control=['if'], data=[]] |
def set_value(hive,
key,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False,
volatile=False):
'''
Sets a value in the registry. If ``vname`` is passed, it will be the value
for that value name, otherwise it will be the default value for the
specified key
Args:
hive (str):
The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be set.
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (vname) is passed, this
will be the data for that value name. If not, this will be the
(Default) value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary. You can pass base64 encoded using the ``binascii``
built-in module. Use ``binascii.b2a_base64('your data')``
.. note::
The type for the (Default) value is always REG_SZ and cannot be
changed.
.. note::
This parameter is optional. If not passed, the Key will be
created with no associated item/value pairs.
vtype (str):
The value type. The possible values of the vtype parameter are
indicated above in the description of the vdata parameter.
use_32bit_registry (bool):
Sets the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
volatile (bool):
When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or
shutdown). This parameter only has an effect when a key is being
created and at no other time.
Returns:
bool: True if successful, otherwise False
Usage:
This will set the version value to 2015.5.2 in the SOFTWARE\\Salt key in
the HKEY_LOCAL_MACHINE hive
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='version', vdata='2015.5.2')
Usage:
This function is strict about the type of vdata. For instance this
example will fail because vtype has a value of REG_SZ and vdata has a
type of int (as opposed to str as expected).
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata=1.2)
Usage:
In this next example vdata is properly quoted and should succeed.
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata='1.2')
Usage:
This is an example of using vtype REG_BINARY. Both ``set_value``
commands will set the same value ``Salty Test``
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data', vdata='Salty Test', vtype='REG_BINARY')
import binascii
bin_data = binascii.b2a_base64('Salty Test')
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data_encoded', vdata=bin_data, vtype='REG_BINARY')
Usage:
An example using vtype REG_MULTI_SZ is as follows:
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='list_data', vdata=['Salt', 'is', 'great'], vtype='REG_MULTI_SZ')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
local_vtype = _to_unicode(vtype)
registry = Registry()
try:
hkey = registry.hkeys[local_hive]
except KeyError:
raise CommandExecutionError('Invalid Hive: {0}'.format(local_hive))
vtype_value = registry.vtype[local_vtype]
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
local_vdata = cast_vdata(vdata=vdata, vtype=local_vtype)
if volatile:
create_options = registry.opttype['REG_OPTION_VOLATILE']
else:
create_options = registry.opttype['REG_OPTION_NON_VOLATILE']
handle = None
try:
handle, _ = win32api.RegCreateKeyEx(hkey, local_key, access_mask,
Options=create_options)
win32api.RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
win32api.RegFlushKey(handle)
broadcast_change()
return True
except (win32api.error, SystemError, ValueError, TypeError): # pylint: disable=E0602
log.exception('Encountered error setting registry value')
return False
finally:
if handle:
win32api.RegCloseKey(handle) | def function[set_value, parameter[hive, key, vname, vdata, vtype, use_32bit_registry, volatile]]:
constant[
Sets a value in the registry. If ``vname`` is passed, it will be the value
for that value name, otherwise it will be the default value for the
specified key
Args:
hive (str):
The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be set.
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (vname) is passed, this
will be the data for that value name. If not, this will be the
(Default) value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary. You can pass base64 encoded using the ``binascii``
built-in module. Use ``binascii.b2a_base64('your data')``
.. note::
The type for the (Default) value is always REG_SZ and cannot be
changed.
.. note::
This parameter is optional. If not passed, the Key will be
created with no associated item/value pairs.
vtype (str):
The value type. The possible values of the vtype parameter are
indicated above in the description of the vdata parameter.
use_32bit_registry (bool):
Sets the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
volatile (bool):
When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or
shutdown). This parameter only has an effect when a key is being
created and at no other time.
Returns:
bool: True if successful, otherwise False
Usage:
This will set the version value to 2015.5.2 in the SOFTWARE\Salt key in
the HKEY_LOCAL_MACHINE hive
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='version', vdata='2015.5.2')
Usage:
This function is strict about the type of vdata. For instance this
example will fail because vtype has a value of REG_SZ and vdata has a
type of int (as opposed to str as expected).
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='str_data', vdata=1.2)
Usage:
In this next example vdata is properly quoted and should succeed.
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='str_data', vdata='1.2')
Usage:
This is an example of using vtype REG_BINARY. Both ``set_value``
commands will set the same value ``Salty Test``
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='bin_data', vdata='Salty Test', vtype='REG_BINARY')
import binascii
bin_data = binascii.b2a_base64('Salty Test')
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='bin_data_encoded', vdata=bin_data, vtype='REG_BINARY')
Usage:
An example using vtype REG_MULTI_SZ is as follows:
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\Salt', vname='list_data', vdata=['Salt', 'is', 'great'], vtype='REG_MULTI_SZ')
]
variable[local_hive] assign[=] call[name[_to_unicode], parameter[name[hive]]]
variable[local_key] assign[=] call[name[_to_unicode], parameter[name[key]]]
variable[local_vname] assign[=] call[name[_to_unicode], parameter[name[vname]]]
variable[local_vtype] assign[=] call[name[_to_unicode], parameter[name[vtype]]]
variable[registry] assign[=] call[name[Registry], parameter[]]
<ast.Try object at 0x7da1b2007580>
variable[vtype_value] assign[=] call[name[registry].vtype][name[local_vtype]]
variable[access_mask] assign[=] binary_operation[call[name[registry].registry_32][name[use_32bit_registry]] <ast.BitOr object at 0x7da2590d6aa0> name[win32con].KEY_ALL_ACCESS]
variable[local_vdata] assign[=] call[name[cast_vdata], parameter[]]
if name[volatile] begin[:]
variable[create_options] assign[=] call[name[registry].opttype][constant[REG_OPTION_VOLATILE]]
variable[handle] assign[=] constant[None]
<ast.Try object at 0x7da1b1f83610> | keyword[def] identifier[set_value] ( identifier[hive] ,
identifier[key] ,
identifier[vname] = keyword[None] ,
identifier[vdata] = keyword[None] ,
identifier[vtype] = literal[string] ,
identifier[use_32bit_registry] = keyword[False] ,
identifier[volatile] = keyword[False] ):
literal[string]
identifier[local_hive] = identifier[_to_unicode] ( identifier[hive] )
identifier[local_key] = identifier[_to_unicode] ( identifier[key] )
identifier[local_vname] = identifier[_to_unicode] ( identifier[vname] )
identifier[local_vtype] = identifier[_to_unicode] ( identifier[vtype] )
identifier[registry] = identifier[Registry] ()
keyword[try] :
identifier[hkey] = identifier[registry] . identifier[hkeys] [ identifier[local_hive] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[local_hive] ))
identifier[vtype_value] = identifier[registry] . identifier[vtype] [ identifier[local_vtype] ]
identifier[access_mask] = identifier[registry] . identifier[registry_32] [ identifier[use_32bit_registry] ]| identifier[win32con] . identifier[KEY_ALL_ACCESS]
identifier[local_vdata] = identifier[cast_vdata] ( identifier[vdata] = identifier[vdata] , identifier[vtype] = identifier[local_vtype] )
keyword[if] identifier[volatile] :
identifier[create_options] = identifier[registry] . identifier[opttype] [ literal[string] ]
keyword[else] :
identifier[create_options] = identifier[registry] . identifier[opttype] [ literal[string] ]
identifier[handle] = keyword[None]
keyword[try] :
identifier[handle] , identifier[_] = identifier[win32api] . identifier[RegCreateKeyEx] ( identifier[hkey] , identifier[local_key] , identifier[access_mask] ,
identifier[Options] = identifier[create_options] )
identifier[win32api] . identifier[RegSetValueEx] ( identifier[handle] , identifier[local_vname] , literal[int] , identifier[vtype_value] , identifier[local_vdata] )
identifier[win32api] . identifier[RegFlushKey] ( identifier[handle] )
identifier[broadcast_change] ()
keyword[return] keyword[True]
keyword[except] ( identifier[win32api] . identifier[error] , identifier[SystemError] , identifier[ValueError] , identifier[TypeError] ):
identifier[log] . identifier[exception] ( literal[string] )
keyword[return] keyword[False]
keyword[finally] :
keyword[if] identifier[handle] :
identifier[win32api] . identifier[RegCloseKey] ( identifier[handle] ) | def set_value(hive, key, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False, volatile=False):
"""
Sets a value in the registry. If ``vname`` is passed, it will be the value
for that value name, otherwise it will be the default value for the
specified key
Args:
hive (str):
The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name.
vname (str):
The value name. These are the individual name/data pairs under the
key. If not passed, the key (Default) value will be set.
vdata (str, int, list, bytes):
The value you'd like to set. If a value name (vname) is passed, this
will be the data for that value name. If not, this will be the
(Default) value for the key.
The type of data this parameter expects is determined by the value
type specified in ``vtype``. The correspondence is as follows:
- REG_BINARY: Binary data (str in Py2, bytes in Py3)
- REG_DWORD: int
- REG_EXPAND_SZ: str
- REG_MULTI_SZ: list of str
- REG_QWORD: int
- REG_SZ: str
.. note::
When setting REG_BINARY, string data will be converted to
binary. You can pass base64 encoded using the ``binascii``
built-in module. Use ``binascii.b2a_base64('your data')``
.. note::
The type for the (Default) value is always REG_SZ and cannot be
changed.
.. note::
This parameter is optional. If not passed, the Key will be
created with no associated item/value pairs.
vtype (str):
The value type. The possible values of the vtype parameter are
indicated above in the description of the vdata parameter.
use_32bit_registry (bool):
Sets the 32bit portion of the registry on 64bit installations. On
32bit machines this is ignored.
volatile (bool):
When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or
shutdown). This parameter only has an effect when a key is being
created and at no other time.
Returns:
bool: True if successful, otherwise False
Usage:
This will set the version value to 2015.5.2 in the SOFTWARE\\Salt key in
the HKEY_LOCAL_MACHINE hive
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='version', vdata='2015.5.2')
Usage:
This function is strict about the type of vdata. For instance this
example will fail because vtype has a value of REG_SZ and vdata has a
type of int (as opposed to str as expected).
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata=1.2)
Usage:
In this next example vdata is properly quoted and should succeed.
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='str_data', vdata='1.2')
Usage:
This is an example of using vtype REG_BINARY. Both ``set_value``
commands will set the same value ``Salty Test``
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data', vdata='Salty Test', vtype='REG_BINARY')
import binascii
bin_data = binascii.b2a_base64('Salty Test')
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='bin_data_encoded', vdata=bin_data, vtype='REG_BINARY')
Usage:
An example using vtype REG_MULTI_SZ is as follows:
.. code-block:: python
import salt.utils.win_reg
winreg.set_value(hive='HKLM', key='SOFTWARE\\Salt', vname='list_data', vdata=['Salt', 'is', 'great'], vtype='REG_MULTI_SZ')
"""
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
local_vtype = _to_unicode(vtype)
registry = Registry()
try:
hkey = registry.hkeys[local_hive] # depends on [control=['try'], data=[]]
except KeyError:
raise CommandExecutionError('Invalid Hive: {0}'.format(local_hive)) # depends on [control=['except'], data=[]]
vtype_value = registry.vtype[local_vtype]
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
local_vdata = cast_vdata(vdata=vdata, vtype=local_vtype)
if volatile:
create_options = registry.opttype['REG_OPTION_VOLATILE'] # depends on [control=['if'], data=[]]
else:
create_options = registry.opttype['REG_OPTION_NON_VOLATILE']
handle = None
try:
(handle, _) = win32api.RegCreateKeyEx(hkey, local_key, access_mask, Options=create_options)
win32api.RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
win32api.RegFlushKey(handle)
broadcast_change()
return True # depends on [control=['try'], data=[]]
except (win32api.error, SystemError, ValueError, TypeError): # pylint: disable=E0602
log.exception('Encountered error setting registry value')
return False # depends on [control=['except'], data=[]]
finally:
if handle:
win32api.RegCloseKey(handle) # depends on [control=['if'], data=[]] |
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(len(self), dtype='i8')
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
new_values = self._maybe_mask_results(new_values)
return new_values.view('i8') | def function[_add_timedeltalike_scalar, parameter[self, other]]:
constant[
Add a delta of a timedeltalike
return the i8 result view
]
if call[name[isna], parameter[name[other]]] begin[:]
variable[new_values] assign[=] call[name[np].empty, parameter[call[name[len], parameter[name[self]]]]]
call[name[new_values]][<ast.Slice object at 0x7da18ede6920>] assign[=] name[iNaT]
return[name[new_values]]
variable[inc] assign[=] call[name[delta_to_nanoseconds], parameter[name[other]]]
variable[new_values] assign[=] call[call[name[checked_add_with_arr], parameter[name[self].asi8, name[inc]]].view, parameter[constant[i8]]]
variable[new_values] assign[=] call[name[self]._maybe_mask_results, parameter[name[new_values]]]
return[call[name[new_values].view, parameter[constant[i8]]]] | keyword[def] identifier[_add_timedeltalike_scalar] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] identifier[isna] ( identifier[other] ):
identifier[new_values] = identifier[np] . identifier[empty] ( identifier[len] ( identifier[self] ), identifier[dtype] = literal[string] )
identifier[new_values] [:]= identifier[iNaT]
keyword[return] identifier[new_values]
identifier[inc] = identifier[delta_to_nanoseconds] ( identifier[other] )
identifier[new_values] = identifier[checked_add_with_arr] ( identifier[self] . identifier[asi8] , identifier[inc] ,
identifier[arr_mask] = identifier[self] . identifier[_isnan] ). identifier[view] ( literal[string] )
identifier[new_values] = identifier[self] . identifier[_maybe_mask_results] ( identifier[new_values] )
keyword[return] identifier[new_values] . identifier[view] ( literal[string] ) | def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(len(self), dtype='i8')
new_values[:] = iNaT
return new_values # depends on [control=['if'], data=[]]
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view('i8')
new_values = self._maybe_mask_results(new_values)
return new_values.view('i8') |
def _init_file(self):
"""
Initialize a new password file and set the reference password.
"""
self.keyring_key = self._get_new_password()
# set a reference password, used to check that the password provided
# matches for subsequent checks.
self.set_password('keyring-setting',
'password reference',
'password reference value')
self._write_config_value('keyring-setting',
'scheme',
self.scheme)
self._write_config_value('keyring-setting',
'version',
self.version) | def function[_init_file, parameter[self]]:
constant[
Initialize a new password file and set the reference password.
]
name[self].keyring_key assign[=] call[name[self]._get_new_password, parameter[]]
call[name[self].set_password, parameter[constant[keyring-setting], constant[password reference], constant[password reference value]]]
call[name[self]._write_config_value, parameter[constant[keyring-setting], constant[scheme], name[self].scheme]]
call[name[self]._write_config_value, parameter[constant[keyring-setting], constant[version], name[self].version]] | keyword[def] identifier[_init_file] ( identifier[self] ):
literal[string]
identifier[self] . identifier[keyring_key] = identifier[self] . identifier[_get_new_password] ()
identifier[self] . identifier[set_password] ( literal[string] ,
literal[string] ,
literal[string] )
identifier[self] . identifier[_write_config_value] ( literal[string] ,
literal[string] ,
identifier[self] . identifier[scheme] )
identifier[self] . identifier[_write_config_value] ( literal[string] ,
literal[string] ,
identifier[self] . identifier[version] ) | def _init_file(self):
"""
Initialize a new password file and set the reference password.
"""
self.keyring_key = self._get_new_password()
# set a reference password, used to check that the password provided
# matches for subsequent checks.
self.set_password('keyring-setting', 'password reference', 'password reference value')
self._write_config_value('keyring-setting', 'scheme', self.scheme)
self._write_config_value('keyring-setting', 'version', self.version) |
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers] | def function[_get_class_handlers, parameter[cls, signal_name, instance]]:
constant[Returns the handlers registered at class level.
]
variable[handlers] assign[=] call[name[cls]._signal_handlers_sorted][name[signal_name]]
return[<ast.ListComp object at 0x7da1b1045690>] | keyword[def] identifier[_get_class_handlers] ( identifier[cls] , identifier[signal_name] , identifier[instance] ):
literal[string]
identifier[handlers] = identifier[cls] . identifier[_signal_handlers_sorted] [ identifier[signal_name] ]
keyword[return] [ identifier[getattr] ( identifier[instance] , identifier[hname] ) keyword[for] identifier[hname] keyword[in] identifier[handlers] ] | def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers] |
def add_listener_destinations(self, server_id, listener_urls, owned=True):
"""
Register WBEM listeners to be the target of indications sent by a
WBEM server.
This function automatically creates a listener destination instance
(of CIM class "CIM_ListenerDestinationCIMXML") for each specified
listener URL in the Interop namespace of the specified WBEM server.
The form of the `Name` property of the created destination instance is:
``"pywbemdestination:" {ownership} ":" {subscription_manager_id} ":"
{guid}``
where ``{ownership}`` is ``"owned"`` or ``"permanent"`` dependent on
the `owned` argument; ``{subscription_manager_id}`` is the
subscription manager ID; and ``{guid}`` is a globally unique
identifier.
Owned listener destinations are added or updated conditionally: If the
listener destination instance to be added is already registered with
this subscription manager and has the same property values, it is not
created or modified. If it has the same path but different property
values, it is modified to get the desired property values. If an
instance with this path does not exist yet (the normal case), it is
created.
Permanent listener destinations are created unconditionally, and it is
up to the user to ensure that such an instance does not exist yet.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
listener_urls (:term:`string` or list of :term:`string`):
The URL or URLs of the WBEM listeners to be registered.
The WBEM listener may be a :class:`~pywbem.WBEMListener` object or
any external WBEM listener.
Each listener URL string must have the format:
``[{scheme}://]{host}:{port}``
The following URL schemes are supported:
* ``https``: Causes HTTPS to be used.
* ``http``: Causes HTTP to be used. This is the default
The host can be specified in any of the usual formats:
* a short or fully qualified DNS hostname
* a literal (= dotted) IPv4 address
* a literal IPv6 address, formatted as defined in :term:`RFC3986`
with the extensions for zone identifiers as defined in
:term:`RFC6874`, supporting ``-`` (minus) for the delimiter
before the zone ID string, as an additional choice to ``%25``.
Note that the port is required in listener URLs.
See :class:`~pywbem.WBEMConnection` for examples of valid URLs,
with the caveat that the port in server URLs is optional.
owned (:class:`py:bool`):
Defines the ownership type of the created listener destination
instances: If `True`, they will be owned. Otherwise, they will be
permanent. See :ref:`WBEMSubscriptionManager` for details about
these ownership types.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The created
listener destination instances for the defined listener URLs.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# server_id is validated in _create_...() method.
# If list, recursively call this function with each list item.
if isinstance(listener_urls, list):
dest_insts = []
for listener_url in listener_urls:
new_dest_insts = self.add_listener_destinations(
server_id, listener_url)
dest_insts.extend(new_dest_insts)
return dest_insts
# Here, the variable will be a single list item.
listener_url = listener_urls
dest_inst = self._create_destination(server_id, listener_url, owned)
return [dest_inst] | def function[add_listener_destinations, parameter[self, server_id, listener_urls, owned]]:
constant[
Register WBEM listeners to be the target of indications sent by a
WBEM server.
This function automatically creates a listener destination instance
(of CIM class "CIM_ListenerDestinationCIMXML") for each specified
listener URL in the Interop namespace of the specified WBEM server.
The form of the `Name` property of the created destination instance is:
``"pywbemdestination:" {ownership} ":" {subscription_manager_id} ":"
{guid}``
where ``{ownership}`` is ``"owned"`` or ``"permanent"`` dependent on
the `owned` argument; ``{subscription_manager_id}`` is the
subscription manager ID; and ``{guid}`` is a globally unique
identifier.
Owned listener destinations are added or updated conditionally: If the
listener destination instance to be added is already registered with
this subscription manager and has the same property values, it is not
created or modified. If it has the same path but different property
values, it is modified to get the desired property values. If an
instance with this path does not exist yet (the normal case), it is
created.
Permanent listener destinations are created unconditionally, and it is
up to the user to ensure that such an instance does not exist yet.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
listener_urls (:term:`string` or list of :term:`string`):
The URL or URLs of the WBEM listeners to be registered.
The WBEM listener may be a :class:`~pywbem.WBEMListener` object or
any external WBEM listener.
Each listener URL string must have the format:
``[{scheme}://]{host}:{port}``
The following URL schemes are supported:
* ``https``: Causes HTTPS to be used.
* ``http``: Causes HTTP to be used. This is the default
The host can be specified in any of the usual formats:
* a short or fully qualified DNS hostname
* a literal (= dotted) IPv4 address
* a literal IPv6 address, formatted as defined in :term:`RFC3986`
with the extensions for zone identifiers as defined in
:term:`RFC6874`, supporting ``-`` (minus) for the delimiter
before the zone ID string, as an additional choice to ``%25``.
Note that the port is required in listener URLs.
See :class:`~pywbem.WBEMConnection` for examples of valid URLs,
with the caveat that the port in server URLs is optional.
owned (:class:`py:bool`):
Defines the ownership type of the created listener destination
instances: If `True`, they will be owned. Otherwise, they will be
permanent. See :ref:`WBEMSubscriptionManager` for details about
these ownership types.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The created
listener destination instances for the defined listener URLs.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
]
if call[name[isinstance], parameter[name[listener_urls], name[list]]] begin[:]
variable[dest_insts] assign[=] list[[]]
for taget[name[listener_url]] in starred[name[listener_urls]] begin[:]
variable[new_dest_insts] assign[=] call[name[self].add_listener_destinations, parameter[name[server_id], name[listener_url]]]
call[name[dest_insts].extend, parameter[name[new_dest_insts]]]
return[name[dest_insts]]
variable[listener_url] assign[=] name[listener_urls]
variable[dest_inst] assign[=] call[name[self]._create_destination, parameter[name[server_id], name[listener_url], name[owned]]]
return[list[[<ast.Name object at 0x7da212db5030>]]] | keyword[def] identifier[add_listener_destinations] ( identifier[self] , identifier[server_id] , identifier[listener_urls] , identifier[owned] = keyword[True] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[listener_urls] , identifier[list] ):
identifier[dest_insts] =[]
keyword[for] identifier[listener_url] keyword[in] identifier[listener_urls] :
identifier[new_dest_insts] = identifier[self] . identifier[add_listener_destinations] (
identifier[server_id] , identifier[listener_url] )
identifier[dest_insts] . identifier[extend] ( identifier[new_dest_insts] )
keyword[return] identifier[dest_insts]
identifier[listener_url] = identifier[listener_urls]
identifier[dest_inst] = identifier[self] . identifier[_create_destination] ( identifier[server_id] , identifier[listener_url] , identifier[owned] )
keyword[return] [ identifier[dest_inst] ] | def add_listener_destinations(self, server_id, listener_urls, owned=True):
"""
Register WBEM listeners to be the target of indications sent by a
WBEM server.
This function automatically creates a listener destination instance
(of CIM class "CIM_ListenerDestinationCIMXML") for each specified
listener URL in the Interop namespace of the specified WBEM server.
The form of the `Name` property of the created destination instance is:
``"pywbemdestination:" {ownership} ":" {subscription_manager_id} ":"
{guid}``
where ``{ownership}`` is ``"owned"`` or ``"permanent"`` dependent on
the `owned` argument; ``{subscription_manager_id}`` is the
subscription manager ID; and ``{guid}`` is a globally unique
identifier.
Owned listener destinations are added or updated conditionally: If the
listener destination instance to be added is already registered with
this subscription manager and has the same property values, it is not
created or modified. If it has the same path but different property
values, it is modified to get the desired property values. If an
instance with this path does not exist yet (the normal case), it is
created.
Permanent listener destinations are created unconditionally, and it is
up to the user to ensure that such an instance does not exist yet.
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
listener_urls (:term:`string` or list of :term:`string`):
The URL or URLs of the WBEM listeners to be registered.
The WBEM listener may be a :class:`~pywbem.WBEMListener` object or
any external WBEM listener.
Each listener URL string must have the format:
``[{scheme}://]{host}:{port}``
The following URL schemes are supported:
* ``https``: Causes HTTPS to be used.
* ``http``: Causes HTTP to be used. This is the default
The host can be specified in any of the usual formats:
* a short or fully qualified DNS hostname
* a literal (= dotted) IPv4 address
* a literal IPv6 address, formatted as defined in :term:`RFC3986`
with the extensions for zone identifiers as defined in
:term:`RFC6874`, supporting ``-`` (minus) for the delimiter
before the zone ID string, as an additional choice to ``%25``.
Note that the port is required in listener URLs.
See :class:`~pywbem.WBEMConnection` for examples of valid URLs,
with the caveat that the port in server URLs is optional.
owned (:class:`py:bool`):
Defines the ownership type of the created listener destination
instances: If `True`, they will be owned. Otherwise, they will be
permanent. See :ref:`WBEMSubscriptionManager` for details about
these ownership types.
Returns:
:class:`py:list` of :class:`~pywbem.CIMInstance`: The created
listener destination instances for the defined listener URLs.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
"""
# server_id is validated in _create_...() method.
# If list, recursively call this function with each list item.
if isinstance(listener_urls, list):
dest_insts = []
for listener_url in listener_urls:
new_dest_insts = self.add_listener_destinations(server_id, listener_url)
dest_insts.extend(new_dest_insts) # depends on [control=['for'], data=['listener_url']]
return dest_insts # depends on [control=['if'], data=[]]
# Here, the variable will be a single list item.
listener_url = listener_urls
dest_inst = self._create_destination(server_id, listener_url, owned)
return [dest_inst] |
def run_query(method, params, **kwargs):
'''
Send Zabbix API call
Args:
method: actual operation to perform via the API
params: parameters required for specific method
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional template.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.4/manual/api/reference/
Returns:
Response from Zabbix API
CLI Example:
.. code-block:: bash
salt '*' zabbix.run_query proxy.create '{"host": "zabbixproxy.domain.com", "status": "5"}'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = method
params = params
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
if isinstance(ret['result'], bool):
return ret['result']
return ret['result'] if ret['result'] else False
else:
raise KeyError
except KeyError:
return ret | def function[run_query, parameter[method, params]]:
constant[
Send Zabbix API call
Args:
method: actual operation to perform via the API
params: parameters required for specific method
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional template.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.4/manual/api/reference/
Returns:
Response from Zabbix API
CLI Example:
.. code-block:: bash
salt '*' zabbix.run_query proxy.create '{"host": "zabbixproxy.domain.com", "status": "5"}'
]
variable[conn_args] assign[=] call[name[_login], parameter[]]
variable[ret] assign[=] dictionary[[], []]
<ast.Try object at 0x7da20c7c9060> | keyword[def] identifier[run_query] ( identifier[method] , identifier[params] ,** identifier[kwargs] ):
literal[string]
identifier[conn_args] = identifier[_login] (** identifier[kwargs] )
identifier[ret] ={}
keyword[try] :
keyword[if] identifier[conn_args] :
identifier[method] = identifier[method]
identifier[params] = identifier[params]
identifier[params] = identifier[_params_extend] ( identifier[params] ,** identifier[kwargs] )
identifier[ret] = identifier[_query] ( identifier[method] , identifier[params] , identifier[conn_args] [ literal[string] ], identifier[conn_args] [ literal[string] ])
keyword[if] identifier[isinstance] ( identifier[ret] [ literal[string] ], identifier[bool] ):
keyword[return] identifier[ret] [ literal[string] ]
keyword[return] identifier[ret] [ literal[string] ] keyword[if] identifier[ret] [ literal[string] ] keyword[else] keyword[False]
keyword[else] :
keyword[raise] identifier[KeyError]
keyword[except] identifier[KeyError] :
keyword[return] identifier[ret] | def run_query(method, params, **kwargs):
"""
Send Zabbix API call
Args:
method: actual operation to perform via the API
params: parameters required for specific method
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
all optional template.get parameters: keyword argument names depends on your zabbix version, see:
https://www.zabbix.com/documentation/2.4/manual/api/reference/
Returns:
Response from Zabbix API
CLI Example:
.. code-block:: bash
salt '*' zabbix.run_query proxy.create '{"host": "zabbixproxy.domain.com", "status": "5"}'
"""
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = method
params = params
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
if isinstance(ret['result'], bool):
return ret['result'] # depends on [control=['if'], data=[]]
return ret['result'] if ret['result'] else False # depends on [control=['if'], data=[]]
else:
raise KeyError # depends on [control=['try'], data=[]]
except KeyError:
return ret # depends on [control=['except'], data=[]] |
def add_step(step_name, func):
"""
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
"""
if isinstance(func, Callable):
logger.debug('registering step {!r}'.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func)
else:
raise TypeError('func must be a callable') | def function[add_step, parameter[step_name, func]]:
constant[
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
]
if call[name[isinstance], parameter[name[func], name[Callable]]] begin[:]
call[name[logger].debug, parameter[call[constant[registering step {!r}].format, parameter[name[step_name]]]]]
call[name[_STEPS]][name[step_name]] assign[=] call[name[_StepFuncWrapper], parameter[name[step_name], name[func]]] | keyword[def] identifier[add_step] ( identifier[step_name] , identifier[func] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[func] , identifier[Callable] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[step_name] ))
identifier[_STEPS] [ identifier[step_name] ]= identifier[_StepFuncWrapper] ( identifier[step_name] , identifier[func] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def add_step(step_name, func):
"""
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
"""
if isinstance(func, Callable):
logger.debug('registering step {!r}'.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func) # depends on [control=['if'], data=[]]
else:
raise TypeError('func must be a callable') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.